gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
"""`PhotovoltaicPixel`, `PRIMA`, `PRIMA75`, `PRIMA55`, `PRIMA40`"""
from matplotlib.patches import Circle, RegularPolygon
import numpy as np
# Using or importing the ABCs from 'collections' instead of from
# 'collections.abc' is deprecated, and in 3.8 it will stop working:
from collections.abc import Sequence
from .base import ProsthesisSystem
from .electrodes import HexElectrode
from .electrode_arrays import ElectrodeGrid
class PhotovoltaicPixel(HexElectrode):
"""Photovoltaic pixel
.. versionadded:: 0.7
Parameters
----------
x/y/z : double
3D location of the electrode.
The coordinate system is centered over the fovea.
Positive ``x`` values move the electrode into the nasal retina.
Positive ``y`` values move the electrode into the superior retina.
Positive ``z`` values move the electrode away from the retina into the
vitreous humor (sometimes called electrode-retina distance).
r : double
Disk radius in the x,y plane
a : double
Length of line drawn from the center of the hexagon to the midpoint of
one of its sides.
activated : bool
To deactivate, set to ``False``. Deactivated electrodes cannot receive
stimuli.
"""
# Frozen class: User cannot add more class attributes
__slots__ = ('r', 'a')
def __init__(self, x, y, z, r, a, name=None, activated=True):
super(PhotovoltaicPixel, self).__init__(x, y, z, a, name=name,
activated=activated)
if isinstance(r, (Sequence, np.ndarray)):
raise TypeError("Radius of the active electrode must be a scalar.")
if r <= 0:
raise ValueError("Radius of the active electrode must be > 0, not "
"{r}.")
self.r = r
# Plot two objects: hex honeycomb and circular active electrode
self.plot_patch = [RegularPolygon, Circle]
self.plot_kwargs = [{'radius': a, 'numVertices': 6, 'alpha': 0.2,
'orientation': np.radians(30),
'fc': 'k', 'ec': 'k'},
{'radius': r, 'linewidth': 0, 'color': 'k',
'alpha': 0.5}]
self.plot_deactivated_kwargs = [{'radius': a, 'numVertices': 6,
'orientation': np.radians(30),
'fc': 'k', 'ec': 'k', 'alpha': 0.1},
{'radius': r, 'linewidth': 0,
'color': 'k', 'alpha': 0.2}]
def _pprint_params(self):
"""Return dict of class attributes to pretty-print"""
params = super()._pprint_params()
params.update({'r': self.r, 'a': self.a})
return params
def electric_potential(self, x, y, z, v0):
raise NotImplementedError
class PRIMA(ProsthesisSystem):
"""Create a PRIMA-100 array on the retina
This class creates a PRIMA array with 378 photovoltaic pixels (each
100um in diameter) as used in the clinical trial [Palanker2020]_, and
places it in the subretinal space such that the center of the array is
located at 3D location (x,y,z), given in microns, and the array is rotated
by rotation angle ``rot``, given in degrees.
The device consists of 378 85um-wide pixels separated by 15um trenches,
arranged in a 2-mm wide hexagonal pattern.
This corresponds to a 100um pitch, with adjacent rows separated by 87um.
The active electrode is a disk with 28um diameter.
.. versionadded:: 0.7
Parameters
----------
x/y/z : double
3D location of the center of the electrode array.
The coordinate system is centered over the fovea.
Positive ``x`` values move the electrode into the nasal retina.
Positive ``y`` values move the electrode into the superior retina.
Positive ``z`` values move the electrode away from the retina into the
vitreous humor (sometimes called electrode-retina distance).
``z`` can either be a list with 378 entries or a scalar that is applied
to all electrodes.
rot : float, optional
Rotation angle of the array (deg). Positive values denote
counter-clock-wise (CCW) rotations in the retinal coordinate
system.
eye : {'RE', 'LE'}, optional
Eye in which array is implanted.
preprocess : bool or callable, optional
Either True/False to indicate whether to execute the implant's default
preprocessing method whenever a new stimulus is assigned, or a custom
function (callable).
safe_mode : bool, optional
If safe mode is enabled, only charge-balanced stimuli are allowed.
Notes
-----
* The diameter of the active electrode and the trench width were estimated
from Fig.1 in [Palanker2020]_.
"""
# Frozen class: User cannot add more class attributes
__slots__ = ('shape', 'spacing', 'trench')
def __init__(self, x=0, y=0, z=-100, rot=0, eye='RE', stim=None,
preprocess=False, safe_mode=False):
# 85 um pixels with 15 um trenches, 28 um active electrode:
self.trench = 15 # um
self.spacing = 100 # um
elec_radius = 14 # um
# Roughly a 19x22 grid, but edges are trimmed off:
self.shape = (19, 22)
self.eye = eye
self.preprocess = preprocess
self.safe_mode = safe_mode
# The user might provide a list of z values for each of the
# 378 resulting electrodes, not for the 22x19 initial ones.
# In this case, don't pass it to ElectrodeGrid, but overwrite
# the z values later:
overwrite_z = isinstance(z, (list, np.ndarray))
zarr = -100 if overwrite_z else z
self.earray = ElectrodeGrid(self.shape, self.spacing, x=x, y=y,
z=zarr, rot=rot, type='hex',
orientation='vertical',
etype=PhotovoltaicPixel, r=elec_radius,
a=(self.spacing - self.trench) / 2)
# Remove extra electrodes to fit the actual implant:
extra_elecs = ['A1', 'A2', 'A3', 'A4', 'A14', 'A16', 'A17',
'A18', 'A19', 'A20', 'A21', 'A22', 'B1',
'B2', 'B18', 'B19', 'B20', 'B21', 'B22',
'C1', 'C20', 'C21', 'C22', 'D22', 'E22', 'P1',
'Q1', 'Q22', 'R1', 'R2', 'R21', 'R22', 'S1',
'S2', 'S3', 'S5', 'S19', 'S20', 'S21', 'S22']
for elec in extra_elecs:
self.earray.remove_electrode(elec)
# Adjust the z values:
if overwrite_z:
# Specify different height for every electrode in a list:
z_arr = np.asarray(z).flatten()
if z_arr.size != self.n_electrodes:
raise ValueError(f"If `z` is a list, it must have {self.n_electrodes} entries, "
f"not {z_arr.size}.")
for elec, z_elec in zip(self.earray.electrode_objects, z):
elec.z = z_elec
# Beware of race condition: Stim must be set last, because it requires
# indexing into self.electrodes:
self.stim = stim
class PRIMA75(ProsthesisSystem):
"""Create a PRIMA-75 array on the retina
This class creates a PRIMA array with 142 photovoltaic pixels (each 75um
in diameter) as described in [Lorach2015]_, and places it in the subretinal
space, such that that the center of the array is located at 3D location
(x,y,z), given in microns, and the array is rotated by rotation angle
``rot``, given in degrees.
The device consists of 142 70um-wide pixels separated by 5um trenches,
arranged in a 1-mm wide hexagonal pattern.
This corresponds to a 75um pitch, with adjacent rows separated by 65um.
The active electrode is a disk with 20um diameter.
.. versionadded:: 0.7
Parameters
----------
x/y/z : double
3D location of the center of the electrode array.
The coordinate system is centered over the fovea.
Positive ``x`` values move the electrode into the nasal retina.
Positive ``y`` values move the electrode into the superior retina.
Positive ``z`` values move the electrode away from the retina into the
vitreous humor (sometimes called electrode-retina distance).
``z`` can either be a list with 142 entries or a scalar that is applied
to all electrodes.
rot : float, optional
Rotation angle of the array (deg). Positive values denote
counter-clock-wise (CCW) rotations in the retinal coordinate
system.
eye : {'RE', 'LE'}, optional
Eye in which array is implanted.
preprocess : bool or callable, optional
Either True/False to indicate whether to execute the implant's default
preprocessing method whenever a new stimulus is assigned, or a custom
function (callable).
safe_mode : bool, optional
If safe mode is enabled, only charge-balanced stimuli are allowed.
"""
# Frozen class: User cannot add more class attributes
__slots__ = ('shape', 'spacing', 'trench')
def __init__(self, x=0, y=0, z=-100, rot=0, eye='RE', stim=None,
preprocess=False, safe_mode=False):
# 70 um pixels with 5 um trenches, 20 um active electrode:
self.spacing = 75 # um
self.trench = 5 # um
elec_radius = 10 # um
# Roughly a 12x15 grid, but edges are trimmed off:
self.shape = (12, 15)
self.eye = eye
self.preprocess = preprocess
self.safe_mode = safe_mode
# The user might provide a list of z values for each of the
# 378 resulting electrodes, not for the 22x19 initial ones.
# In this case, don't pass it to ElectrodeGrid, but overwrite
# the z values later:
overwrite_z = isinstance(z, (list, np.ndarray))
zarr = -100 if overwrite_z else z
self.earray = ElectrodeGrid(self.shape, self.spacing, x=x, y=y,
z=zarr, rot=rot, type='hex',
orientation='vertical',
etype=PhotovoltaicPixel, r=elec_radius,
a=(self.spacing - self.trench) / 2)
# Remove extra electrodes to fit the actual implant:
extra_elecs = ['A1', 'B1', 'C1', 'D1', 'E1', 'I1', 'J1', 'K1', 'L1',
'A2', 'B2', 'C2', 'D2', 'K2', 'L2',
'A3', 'B3', 'L3',
'A4',
'A12',
'A13', 'K13', 'L13',
'A14', 'B14', 'C14', 'J14', 'K14', 'L14',
'A15', 'B15', 'C15', 'D15', 'H15', 'I15', 'J15', 'K15',
'L15']
for elec in extra_elecs:
self.earray.remove_electrode(elec)
# Adjust the z values:
if overwrite_z:
# Specify different height for every electrode in a list:
z_arr = np.asarray(z).flatten()
if z_arr.size != self.n_electrodes:
raise ValueError(f"If `z` is a list, it must have {self.n_electrodes} entries, "
f"not {z_arr.size}.")
for elec, z_elec in zip(self.earray.electrode_objects, z):
elec.z = z_elec
# Beware of race condition: Stim must be set last, because it requires
# indexing into self.electrodes:
self.stim = stim
class PRIMA55(ProsthesisSystem):
"""Create a PRIMA-55 array on the retina
This class creates a PRIMA array with 273 photovoltaic pixels (each 55um
in diameter), and places it in the subretinal space, such that that the
center of the array is located at 3D location (x,y,z), given in microns,
and the array is rotated by rotation angle ``rot``, given in degrees.
The device consists of 273 50um-wide pixels separated by 5um trenches,
arranged in a 1-mm wide hexagonal pattern.
This corresponds to a 55um pitch, with adjacent rows separated by 48um.
The active electrode is a disk with 16um diameter.
.. warning ::
The exact shape of the device has not been published yet. We assume the
array fits on a circular 1mm-diameter substrate, which leaves us with
273 electrodes.
.. versionadded:: 0.7
Parameters
----------
x/y/z : double
3D location of the center of the electrode array.
The coordinate system is centered over the fovea.
Positive ``x`` values move the electrode into the nasal retina.
Positive ``y`` values move the electrode into the superior retina.
Positive ``z`` values move the electrode away from the retina into the
vitreous humor (sometimes called electrode-retina distance).
``z`` can either be a list with 378 entries or a scalar that is applied
to all electrodes.
rot : float, optional
Rotation angle of the array (deg). Positive values denote
counter-clock-wise (CCW) rotations in the retinal coordinate
system.
eye : {'RE', 'LE'}, optional
Eye in which array is implanted.
preprocess : bool or callable, optional
Either True/False to indicate whether to execute the implant's default
preprocessing method whenever a new stimulus is assigned, or a custom
function (callable).
safe_mode : bool, optional
If safe mode is enabled, only charge-balanced stimuli are allowed.
"""
# Frozen class: User cannot add more class attributes
__slots__ = ('shape', 'spacing', 'trench')
def __init__(self, x=0, y=0, z=-100, rot=0, eye='RE', stim=None,
preprocess=False, safe_mode=False):
# 50 um pixels with 5 um trenches, 16 um active electrode:
self.spacing = 55 # um
self.trench = 5
elec_radius = 8 # um
# Roughly a 18x21 grid, but edges are trimmed off:
self.shape = (18, 21)
self.eye = eye
self.preprocess = preprocess
self.safe_mode = safe_mode
# The user might provide a list of z values for each of the
# 378 resulting electrodes, not for the 22x19 initial ones.
# In this case, don't pass it to ElectrodeGrid, but overwrite
# the z values later:
overwrite_z = isinstance(z, (list, np.ndarray))
zarr = -100 if overwrite_z else z
self.earray = ElectrodeGrid(self.shape, self.spacing, x=x, y=y,
z=zarr, rot=rot, type='hex',
orientation='vertical',
etype=PhotovoltaicPixel, r=elec_radius,
a=(self.spacing - self.trench) / 2)
# Note that the exact shape of this implant is not known. We remove
# all electrodes that don't fit on a circular 1mm x 1mm substrate:
extra_elec = ['A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8', 'A9',
'A10', 'A12', 'A14', 'A15', 'A16', 'A17', 'A18', 'A19',
'A20', 'A21', 'B1', 'B2', 'B3', 'B4', 'B5', 'B6', 'B16',
'B17', 'B18', 'B19', 'B20', 'B21', 'C1', 'C2', 'C3',
'C4', 'C18', 'C19', 'C20', 'C21', 'D1', 'D2', 'D3', 'D4',
'D20', 'D21', 'E1', 'E2', 'E20', 'E21', 'F1', 'F2',
'F21', 'G1', 'G2', 'G21', 'H1', 'I1', 'J1', 'K1', 'L1',
'L21', 'M1', 'M2', 'M21', 'N1', 'N2', 'N21', 'O1', 'O2',
'O3', 'O20', 'O21', 'P1', 'P2', 'P3', 'P4', 'P19', 'P20',
'P21', 'Q1', 'Q2', 'Q3', 'Q4', 'Q5', 'Q17', 'Q18', 'Q19',
'Q20', 'Q21', 'R1', 'R2', 'R3', 'R4', 'R5', 'R6', 'R7',
'R9', 'R13', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20',
'R21']
for elec in extra_elec:
self.earray.remove_electrode(elec)
# Adjust the z values:
if overwrite_z:
# Specify different height for every electrode in a list:
z_arr = np.asarray(z).flatten()
if z_arr.size != self.n_electrodes:
raise ValueError(f"If `z` is a list, it must have {self.n_electrodes} entries, "
f"not {z_arr.size}.")
for elec, z_elec in zip(self.earray.electrode_objects, z):
elec.z = z_elec
# Beware of race condition: Stim must be set last, because it requires
# indexing into self.electrodes:
self.stim = stim
class PRIMA40(ProsthesisSystem):
"""Create a PRIMA-40 array on the retina
This class creates a PRIMA array with 532 photovoltaic pixels (each 40um
in diameter), and places it in the subretinal space, such that that the
center of the array is located at 3D location (x,y,z), given in microns,
and the array is rotated by rotation angle ``rot``, given in degrees.
The device consists of 532 35um-wide pixels separated by 5um trenches,
arranged in a 1-mm wide hexagonal pattern.
This corresponds to a 40um pitch, with adjacent rows separated by 48um.
The active electrode is a disk with 16um diameter.
.. important ::
The exact shape of the device has not been published yet. We assume the
array fits on a circular 1mm-diameter substrate, which leaves us with
532 electrodes.
.. versionadded:: 0.7
Parameters
----------
x/y/z : double
3D location of the center of the electrode array.
The coordinate system is centered over the fovea.
Positive ``x`` values move the electrode into the nasal retina.
Positive ``y`` values move the electrode into the superior retina.
Positive ``z`` values move the electrode away from the retina into the
vitreous humor (sometimes called electrode-retina distance).
``z`` can either be a list with 532 entries or a scalar that is applied
to all electrodes.
rot : float, optional
Rotation angle of the array (deg). Positive values denote
counter-clock-wise (CCW) rotations in the retinal coordinate
system.
eye : {'LE', 'RE'}, optional
Eye in which array is implanted.
preprocess : bool or callable, optional
Either True/False to indicate whether to execute the implant's default
preprocessing method whenever a new stimulus is assigned, or a custom
function (callable).
safe_mode : bool, optional
If safe mode is enabled, only charge-balanced stimuli are allowed.
"""
# Frozen class: User cannot add more class attributes
__slots__ = ('shape', 'spacing', 'trench')
def __init__(self, x=0, y=0, z=-100, rot=0, eye='RE', stim=None,
preprocess=False, safe_mode=False):
# 35 um pixels with 5 um trenches, 16 um active electrode:
self.spacing = 40 # um
self.trench = 5 # um
elec_radius = 8 # um
# Roughly a 25x28 grid, but edges are trimmed off:
self.shape = (25, 28)
self.eye = eye
self.preprocess = preprocess
self.safe_mode = safe_mode
# The user might provide a list of z values for each of the
# 378 resulting electrodes, not for the 22x19 initial ones.
# In this case, don't pass it to ElectrodeGrid, but overwrite
# the z values later:
overwrite_z = isinstance(z, (list, np.ndarray))
zarr = -100 if overwrite_z else z
self.earray = ElectrodeGrid(self.shape, self.spacing, x=x, y=y,
z=zarr, rot=rot, type='hex',
orientation='vertical',
etype=PhotovoltaicPixel, r=elec_radius,
a=(self.spacing - self.trench) / 2)
# Note that the exact shape of this implant is not known. We remove
# all electrodes that don't fit on a circular 1mm x 1mm substrate:
extra_elec = ['A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8', 'A9',
'A10', 'A11', 'A12', 'A14', 'A16', 'A17', 'A18', 'A19',
'A20', 'A21', 'A22', 'A23', 'A24', 'A25', 'A26', 'A27',
'A28', 'B1', 'B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B8',
'B10', 'B20', 'B21', 'B22', 'B23', 'B24', 'B25', 'B26',
'B27', 'B28', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C22',
'C23', 'C24', 'C25', 'C26', 'C27', 'C28', 'D1', 'D2',
'D3', 'D4', 'D5', 'D24', 'D25', 'D26', 'D27', 'D28',
'E1', 'E2', 'E3', 'E4', 'E25', 'E26', 'E27', 'E28', 'F1',
'F2', 'F26', 'F27', 'F28', 'G1', 'G2', 'G27', 'G28',
'H1', 'H27', 'H28', 'I1', 'I28', 'J28', 'K28', 'P28',
'Q1', 'Q28', 'R1', 'R27', 'R28', 'S1', 'S27', 'S28',
'T1', 'T2', 'T27', 'T28', 'U1', 'U2', 'U3', 'U25', 'U26',
'U27', 'U28', 'V1', 'V2', 'V3', 'V4', 'V5', 'V25', 'V26',
'V27', 'V28', 'W1', 'W2', 'W3', 'W4', 'W5', 'W23', 'W24',
'W25', 'W26', 'W27', 'W28', 'X1', 'X2', 'X3', 'X4', 'X5',
'X6', 'X7', 'X21', 'X22', 'X23', 'X24', 'X25', 'X26',
'X27', 'X28', 'Y1', 'Y2', 'Y3', 'Y4', 'Y5', 'Y6', 'Y7',
'Y8', 'Y9', 'Y10', 'Y11', 'Y17', 'Y19', 'Y20', 'Y21',
'Y22', 'Y23', 'Y24', 'Y25', 'Y26', 'Y27', 'Y28']
for elec in extra_elec:
self.earray.remove_electrode(elec)
# Adjust the z values:
if overwrite_z:
# Specify different height for every electrode in a list:
z_arr = np.asarray(z).flatten()
if z_arr.size != self.n_electrodes:
raise ValueError(f"If `z` is a list, it must have {self.n_electrodes} entries, "
f"not {z_arr.size}.")
for elec, z_elec in zip(self.earray.electrode_objects, z):
elec.z = z_elec
# Beware of race condition: Stim must be set last, because it requires
# indexing into self.electrodes:
self.stim = stim
|
|
import logging
import Queue
import time
from threading import Thread, current_thread, Lock
import zmq
from binder.binderd.client import BinderClient
from binder.settings import LogSettings
class LoggerClient(Thread):
_singleton = None
@staticmethod
def getInstance():
if not LoggerClient._singleton:
client = LoggerClient()
client.start()
LoggerClient._singleton = client
return LoggerClient._singleton
def __init__(self):
super(LoggerClient, self).__init__()
self.parent = current_thread()
self._stopped = False
self._queue = Queue.Queue()
self._client = BinderClient("log_writer")
def stop(self):
self._client.close()
self._stopped= True
def _send_message(self):
msg = self._queue.get()
self._client.send(msg)
def run(self):
while not self._stopped and self.parent.is_alive():
self._send_message()
# keep logging until the queue is empty, even after the parent has died
while not self._queue.empty():
self._send_message()
def _send(self, msg):
self._queue.put(msg)
def debug(self, tag, msg, app=None):
self._send({'type': 'log', 'level': logging.DEBUG, 'msg': msg, 'tag': tag, 'app': app})
def info(self, tag, msg, app=None):
self._send({'type': 'log', 'level': logging.INFO, 'msg': msg, 'tag': tag, 'app': app})
def warn(self, tag, msg, app=None):
self._send({'type': 'log', 'level': logging.WARNING, 'msg': msg, 'tag': tag, 'app': app})
def error(self, tag, msg, app=None):
self._send({'type': 'log', 'level': logging.ERROR, 'msg': msg, 'tag': tag, 'app': app})
def debug_log(tag, msg, app=None):
log = LoggerClient.getInstance()
log.debug(tag, msg, app)
def info_log(tag, msg, app=None):
log = LoggerClient.getInstance()
log.info(tag, msg, app)
def warning_log(tag, msg, app=None):
log = LoggerClient.getInstance()
log.warning(tag, msg, app)
def error_log(tag, msg, app=None):
log = LoggerClient.getInstance()
log.error(tag, msg, app)
def write_stream(tag, level_string, stream, app=None):
def _process_stream(app, stream):
log = LoggerClient.getInstance()
if level_string not in LoggerClient.__dict__:
log.error("LoggerClient", "write_stream failing with unexpected level_string: {}".format(level_string))
return
method = log.__getattribute__(level_string)
for line in iter(stream.readline, ''):
method(tag, line, app=app)
t = Thread(target=_process_stream, args=(app, stream))
t.start()
class PubSubStreamer(Thread):
class SubStreamReader(Thread):
def __init__(self, buf):
super(PubSubStreamer.SubStreamReader, self).__init__()
self._stopped = False
self._buf = buf
def stop(self):
self._stopped = True
def run(self):
context = zmq.Context()
socket = context.socket(zmq.SUB)
socket.setsockopt(zmq.SUBSCRIBE, b'')
socket.connect("{}:{}".format(LogSettings.PUBSUB_HOST, LogSettings.PUBSUB_PORT))
while not self._stopped:
try:
topic, msg = socket.recv_multipart(zmq.NOBLOCK)
# buffer the message
self._buf.put((topic, msg))
except zmq.ZMQError:
continue
_singleton = None
def __init__(self):
super(PubSubStreamer, self).__init__()
self._stopped = False
self._queue = Queue.Queue()
self._sub_reader = PubSubStreamer.SubStreamReader(self._queue)
self.callbacks = {}
@staticmethod
def get_instance():
if not PubSubStreamer._singleton:
PubSubStreamer._singleton = PubSubStreamer()
PubSubStreamer._singleton.start()
return PubSubStreamer._singleton
def add_app_callback(self, app, cb):
if app in self.callbacks:
self.callbacks[app].append(cb)
else:
self.callbacks[app] = [cb]
def stop(self):
self._stopped = True
self._sub_reader.stop()
def remove_app_callback(self, app, cb):
if app in self.callbacks:
try:
self.callbacks[app].remove(cb)
except ValueError:
pass
def run(self):
self._sub_reader.start()
while not self._stopped:
app, msg = self._queue.get()
if app in self.callbacks:
for cb in self.callbacks[app]:
cb(msg)
class AppLogStreamer(Thread):
def __init__(self, app, start_time, callback):
super(AppLogStreamer, self).__init__()
self.daemon = True
self._stopped = False
self._app = app
self._start_time = start_time
self._cb = callback
self._pubsub_cb = None
PubSubStreamer.get_instance()
def stop(self):
self._stopped = True
if self._pubsub_cb:
PubSubStreamer.get_instance().remove_app_callback(self._app, self._pubsub_cb)
def run(self):
buf = Queue.Queue()
def buffered_cb(msg):
buf.put(msg)
self._pubsub_cb = buffered_cb
PubSubStreamer.get_instance().add_app_callback(self._app, self._pubsub_cb)
lines = []
bc = BinderClient("log_reader")
rsp = bc.send({"type": "get", "app": self._app, "since": self._start_time})
if rsp["type"] == "success":
lines = rsp["msg"].split("\n")
else:
error_log("LoggerClient", "read_stream failure for app {}: {}".format(self._app, rsp))
return
bc.close()
# exhaust all lines from the get request
last_time = None
for line in lines:
last_time = LogSettings.EXTRACT_TIME(line)
self._cb(line)
if last_time:
last_time = time.strptime(last_time, LogSettings.TIME_FORMAT)
# now start reading the subscriber output (starting strictly after last_time)
while not self._stopped:
try:
timeout = 0.05
line = buf.get(timeout=timeout)
line_time = time.strptime(LogSettings.EXTRACT_TIME(line), LogSettings.TIME_FORMAT)
if not last_time or line_time > last_time:
self._cb(line)
except Queue.Empty:
continue
|
|
# Create your views here.
from django.http import HttpResponse
from rango.models import Category
from rango.models import Page
from django.template import RequestContext
from django.shortcuts import render_to_response
from rango.forms import CategoryForm
from rango.forms import PageForm
from rango.forms import UserForm, UserProfileForm
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from datetime import datetime
def encode_url(str):
return str.replace(' ','_')
def decode_url(str):
return str.replace('_', ' ')
def index(request):
#return HttpResponse("Rango says hello world!") # old code CH3
context = RequestContext(request)
##context_dict = {'boldmessage': "I am from the context"}
#CH6 code below
category_list = Category.objects.order_by('-likes')[:5]
context_dict = {'categories': category_list}
for category in category_list:
category.url = category.name.replace(' ', '_')
return render_to_response('rango/index.html', context_dict, context)
#CH10.5 code
#visits = int(request.COOKIES.get('visits', '0'))
#if request.COOKIES.has_key('last_visit'):
#last_visit = request.COOKIES['last_visit']
#last_visit_time = datetime.strptime(last_visit[:-7], "%Y-%m-%d %H:%M:%S")
#if (datetime.now() - last_visit_time).days > 0:
#response.set_cookie('visits', visits+1)
#response.set_cookie('last_visit', datetime.now())
#else:
#response.set_cookie('last_visit', datetime.now())
#return response
#CH10.6 code
if request.session.get('last_visit'):
last_visit_time = request.session.get('last_visit')
visits = request.session.get('visits', 0)
if (datetime.now() - datetime.strptime(last_visit_time[:-7], "%Y-%m-%d %H:%M:%S")).days > 0:
request.session['visits'] = visits + 1
request.session['visits'] = 1
else:
request.session['last_visit'] = str(datetime.now())
request.session['visits'] = 1
return render_to_response('rango/index.html', context_dict, context)
def about_page(request):
return HttpResponse("Rango says this is the about page")
def category(request, category_name_url):
context = RequestContext(request)
category_name = category_name_url.replace('_', ' ')
context_dict = {'category_name': category_name, 'category_name_url': category_name_url}
try:
category = Category.objects.get(name=category_name)
pages = Page.objects.filter(category=category)
context_dict['pages'] = pages
context_dict['category'] = category
except Category.DoesNotExist:
pass
return render_to_response('rango/category.html', context_dict, context)
def add_category(request):
context = RequestContext(request)
if request.method =='POST':
form = CategoryForm(request.POST)
if form.is_valid():
form.save(commit=True)
return index(request)
else:
print form.errors
else:
form = CategoryForm()
return render_to_response('rango/add_category.html', {'form': form}, context)
def add_page(request, category_name_url):
context = RequestContext(request)
category_name = decode_url(category_name_url)
if request.method =='POST':
form = PageForm(request.POST)
if form.is_valid():
page = form.save(commit=False)
cat = Category.objects.get(name=category_name)
page.category = cat
page.views = 0
page.save()
return category(request, category_name_url)
else:
print form.errors
else:
form = PageForm()
return render_to_response('rango/add_page.html',
{'category_name_url': category_name_url,
'category_name': category_name, 'form': form},
context)
def register(request):
context = RequestContext(request)
registered = False
if request.method == 'POST':
user_form = UserForm(data=request.POST)
profile_form = UserProfileForm(data=request.POST)
if user_form.is_valid() and profile_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
profile = profile_form.save(commit=False)
profile.user = user
if 'picture' in request.FILES:
profile.picture = request.FILES['picture']
profile.save()
registered = True
else:
print user_form.errors, profile_form.errors
else:
user_form = UserForm()
profile_form = UserProfileForm()
return render_to_response(
'rango/register.html',
{'user_form': user_form, 'profile_form': profile_form, 'registered': registered},
context)
def user_login(request):
context = RequestContext(request)
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return HttpResponseRedirect('/rango/')
else:
return HttpResponse("Your Rango account is disabled!")
else:
print "Invalid login details: {0}, {1}.".format(username, password)
return HttpResponse("Invalid login details supplied.")
else:
return render_to_response('rango/login.html', {}, context)
@login_required
def restricted(request): #Ch8.6 function, not really important
return HttpResponse("Since you're logged in, you can see this text!")
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect('/rango/')
|
|
import weakref
import pytest
from pytestqt import qt_compat
from pytestqt.qt_compat import qt_api
def test_basics(qtbot):
"""
Basic test that works more like a sanity check to ensure we are setting up a QApplication
properly and are able to display a simple event_recorder.
"""
assert qt_api.QtWidgets.QApplication.instance() is not None
widget = qt_api.QtWidgets.QWidget()
qtbot.addWidget(widget)
widget.setWindowTitle("W1")
widget.show()
assert widget.isVisible()
assert widget.windowTitle() == "W1"
def test_qapp_default_name(qapp):
assert qapp.applicationName() == "pytest-qt-qapp"
def test_qapp_name(testdir):
testdir.makepyfile(
"""
def test_name(qapp):
assert qapp.applicationName() == "frobnicator"
"""
)
testdir.makeini(
"""
[pytest]
qt_qapp_name = frobnicator
"""
)
res = testdir.runpytest_subprocess()
res.stdout.fnmatch_lines("*1 passed*")
def test_key_events(qtbot, event_recorder):
"""
Basic key events test.
"""
def extract(key_event):
return (
key_event.type(),
qt_api.QtCore.Qt.Key(key_event.key()),
key_event.text(),
)
event_recorder.registerEvent(qt_api.QtGui.QKeyEvent, extract)
qtbot.keyPress(event_recorder, "a")
assert event_recorder.event_data == (
qt_api.QtCore.QEvent.Type.KeyPress,
qt_api.QtCore.Qt.Key.Key_A,
"a",
)
qtbot.keyRelease(event_recorder, "a")
assert event_recorder.event_data == (
qt_api.QtCore.QEvent.Type.KeyRelease,
qt_api.QtCore.Qt.Key.Key_A,
"a",
)
def test_mouse_events(qtbot, event_recorder):
"""
Basic mouse events test.
"""
def extract(mouse_event):
return (mouse_event.type(), mouse_event.button(), mouse_event.modifiers())
event_recorder.registerEvent(qt_api.QtGui.QMouseEvent, extract)
qtbot.mousePress(event_recorder, qt_api.QtCore.Qt.MouseButton.LeftButton)
assert event_recorder.event_data == (
qt_api.QtCore.QEvent.Type.MouseButtonPress,
qt_api.QtCore.Qt.MouseButton.LeftButton,
qt_api.QtCore.Qt.KeyboardModifier.NoModifier,
)
qtbot.mousePress(
event_recorder,
qt_api.QtCore.Qt.MouseButton.RightButton,
qt_api.QtCore.Qt.KeyboardModifier.AltModifier,
)
assert event_recorder.event_data == (
qt_api.QtCore.QEvent.Type.MouseButtonPress,
qt_api.QtCore.Qt.MouseButton.RightButton,
qt_api.QtCore.Qt.KeyboardModifier.AltModifier,
)
def test_stop(qtbot, timer):
"""
Test qtbot.stop()
"""
widget = qt_api.QtWidgets.QWidget()
qtbot.addWidget(widget)
with qtbot.waitExposed(widget):
widget.show()
timer.single_shot_callback(widget.close, 0)
qtbot.stop()
@pytest.mark.parametrize("show", [True, False])
@pytest.mark.parametrize("method_name", ["waitExposed", "waitActive"])
def test_wait_window(show, method_name, qtbot):
"""
Using one of the wait-widget methods should not raise anything if the widget
is properly displayed, otherwise should raise a TimeoutError.
"""
method = getattr(qtbot, method_name)
widget = qt_api.QtWidgets.QWidget()
qtbot.add_widget(widget)
if show:
with method(widget, timeout=1000):
widget.show()
else:
with pytest.raises(qtbot.TimeoutError):
with method(widget, timeout=100):
pass
@pytest.mark.parametrize("show", [True, False])
def test_wait_for_window_shown(qtbot, show):
widget = qt_api.QtWidgets.QWidget()
qtbot.add_widget(widget)
if show:
widget.show()
with pytest.deprecated_call(match="waitForWindowShown is deprecated"):
shown = qtbot.waitForWindowShown(widget)
assert shown == show
@pytest.mark.parametrize("method_name", ["waitExposed", "waitActive"])
def test_wait_window_propagates_other_exception(method_name, qtbot):
"""
Exceptions raised inside the with-statement of wait-widget methods should
propagate properly.
"""
method = getattr(qtbot, method_name)
widget = qt_api.QtWidgets.QWidget()
qtbot.add_widget(widget)
with pytest.raises(ValueError, match="some other error"):
with method(widget, timeout=100):
widget.show()
raise ValueError("some other error")
def test_widget_kept_as_weakref(qtbot):
"""
Test if the widget is kept as a weak reference in QtBot
"""
widget = qt_api.QtWidgets.QWidget()
qtbot.add_widget(widget)
widget = weakref.ref(widget)
assert widget() is None
def test_event_processing_before_and_after_teardown(testdir):
"""
Make sure events are processed before and after fixtures are torn down.
The test works by creating a session object which pops() one of its events
whenever a processEvents() occurs. Fixture and tests append values
to the event list but expect the list to have been processed (by the pop())
at each point of interest.
https://github.com/pytest-dev/pytest-qt/issues/67
"""
testdir.makepyfile(
"""
from pytestqt.qt_compat import qt_api
import pytest
@pytest.fixture(scope='session')
def events_queue(qapp):
class EventsQueue(qt_api.QtCore.QObject):
def __init__(self):
qt_api.QtCore.QObject.__init__(self)
self.events = []
def pop_later(self):
qapp.postEvent(self, qt_api.QtCore.QEvent(qt_api.QtCore.QEvent.Type.User))
def event(self, ev):
if ev.type() == qt_api.QtCore.QEvent.Type.User:
self.events.pop(-1)
return qt_api.QtCore.QObject.event(self, ev)
return EventsQueue()
@pytest.fixture
def fix(events_queue, qapp):
assert events_queue.events == []
yield
assert events_queue.events == []
events_queue.events.append('fixture teardown')
events_queue.pop_later()
@pytest.mark.parametrize('i', range(3))
def test_events(events_queue, fix, i):
assert events_queue.events == []
events_queue.events.append('test event')
events_queue.pop_later()
"""
)
res = testdir.runpytest()
res.stdout.fnmatch_lines(["*3 passed in*"])
def test_header(testdir):
testdir.makeconftest(
"""
from pytestqt import qt_compat
from pytestqt.qt_compat import qt_api
def mock_get_versions():
return qt_compat.VersionTuple('PyQtAPI', '1.0', '2.5', '3.5')
assert hasattr(qt_api, 'get_versions')
qt_api.get_versions = mock_get_versions
"""
)
res = testdir.runpytest()
res.stdout.fnmatch_lines(
["*test session starts*", "PyQtAPI 1.0 -- Qt runtime 2.5 -- Qt compiled 3.5"]
)
def test_qvariant(tmpdir):
"""Test that QVariant works in the same way across all supported Qt bindings."""
settings = qt_api.QtCore.QSettings(
str(tmpdir / "foo.ini"), qt_api.QtCore.QSettings.Format.IniFormat
)
settings.setValue("int", 42)
settings.setValue("str", "Hello")
settings.setValue("empty", None)
assert settings.value("int") == 42
assert settings.value("str") == "Hello"
assert settings.value("empty") is None
def test_widgets_closed_before_fixtures(testdir):
"""
Ensure widgets added by "qtbot.add_widget" are closed before all other
fixtures are teardown. (#106).
"""
testdir.makepyfile(
"""
import pytest
from pytestqt.qt_compat import qt_api
class Widget(qt_api.QtWidgets.QWidget):
closed = False
def closeEvent(self, e):
e.accept()
self.closed = True
@pytest.fixture
def widget(qtbot):
w = Widget()
qtbot.add_widget(w)
yield w
assert w.closed
def test_foo(widget):
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*= 1 passed in *"])
def test_qtbot_wait(qtbot, stop_watch):
stop_watch.start()
qtbot.wait(250)
stop_watch.stop()
assert stop_watch.elapsed >= 220
@pytest.fixture
def event_recorder(qtbot):
class EventRecorder(qt_api.QtWidgets.QWidget):
"""
Widget that records some kind of events sent to it.
When this event_recorder receives a registered event (by calling `registerEvent`), it will call
the associated *extract* function and hold the return value from the function in the
`event_data` member.
"""
def __init__(self):
qt_api.QtWidgets.QWidget.__init__(self)
self._event_types = {}
self.event_data = None
def registerEvent(self, event_type, extract_func):
self._event_types[event_type] = extract_func
def event(self, ev):
for event_type, extract_func in self._event_types.items():
if isinstance(ev, event_type):
self.event_data = extract_func(ev)
return True
return False
widget = EventRecorder()
qtbot.addWidget(widget)
return widget
@pytest.mark.parametrize(
"value, expected",
[
(True, True),
(False, False),
("True", True),
("False", False),
("true", True),
("false", False),
],
)
def test_parse_ini_boolean_valid(value, expected):
import pytestqt.qtbot
assert pytestqt.qtbot._parse_ini_boolean(value) == expected
def test_parse_ini_boolean_invalid():
import pytestqt.qtbot
with pytest.raises(ValueError):
pytestqt.qtbot._parse_ini_boolean("foo")
@pytest.mark.parametrize("option_api", ["pyqt5", "pyqt6", "pyside2", "pyside6"])
def test_qt_api_ini_config(testdir, monkeypatch, option_api):
"""
Test qt_api ini option handling.
"""
from pytestqt.qt_compat import qt_api
monkeypatch.delenv("PYTEST_QT_API", raising=False)
testdir.makeini(
"""
[pytest]
qt_api={option_api}
""".format(
option_api=option_api
)
)
testdir.makepyfile(
"""
import pytest
def test_foo(qtbot):
pass
"""
)
result = testdir.runpytest_subprocess()
if qt_api.pytest_qt_api == option_api:
result.stdout.fnmatch_lines(["* 1 passed in *"])
else:
try:
ModuleNotFoundError
except NameError:
# Python < 3.6
result.stderr.fnmatch_lines(["*ImportError:*"])
else:
# Python >= 3.6
result.stderr.fnmatch_lines(["*ModuleNotFoundError:*"])
@pytest.mark.parametrize("envvar", ["pyqt5", "pyqt6", "pyside2", "pyside6"])
def test_qt_api_ini_config_with_envvar(testdir, monkeypatch, envvar):
"""ensure environment variable wins over config value if both are present"""
testdir.makeini(
"""
[pytest]
qt_api={option_api}
""".format(
option_api="piecute"
)
)
monkeypatch.setenv("PYTEST_QT_API", envvar)
testdir.makepyfile(
"""
import pytest
def test_foo(qtbot):
pass
"""
)
result = testdir.runpytest_subprocess()
if qt_api.pytest_qt_api == envvar:
result.stdout.fnmatch_lines(["* 1 passed in *"])
else:
try:
ModuleNotFoundError
except NameError:
# Python < 3.6
result.stderr.fnmatch_lines(["*ImportError:*"])
else:
# Python >= 3.6
result.stderr.fnmatch_lines(["*ModuleNotFoundError:*"])
def test_invalid_qt_api_envvar(testdir, monkeypatch):
"""
Make sure the error message with an invalid PYQTEST_QT_API is correct.
"""
testdir.makepyfile(
"""
import pytest
def test_foo(qtbot):
pass
"""
)
monkeypatch.setenv("PYTEST_QT_API", "piecute")
result = testdir.runpytest_subprocess()
result.stderr.fnmatch_lines(
["* Invalid value for $PYTEST_QT_API: piecute, expected one of *"]
)
def test_qapp_args(testdir):
"""
Test customizing of QApplication arguments.
"""
testdir.makeconftest(
"""
import pytest
@pytest.fixture(scope='session')
def qapp_args():
return ['--test-arg']
"""
)
testdir.makepyfile(
"""
def test_args(qapp):
assert '--test-arg' in list(qapp.arguments())
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*= 1 passed in *"])
def test_importerror(monkeypatch):
def _fake_import(name, *args):
raise ModuleNotFoundError(f"Failed to import {name}")
monkeypatch.delenv("PYTEST_QT_API", raising=False)
monkeypatch.setattr(qt_compat, "_import", _fake_import)
expected = (
"pytest-qt requires either PySide2, PySide6, PyQt5 or PyQt6 installed.\n"
" PyQt5.QtCore: Failed to import PyQt5.QtCore\n"
" PyQt6.QtCore: Failed to import PyQt6.QtCore\n"
" PySide2.QtCore: Failed to import PySide2.QtCore\n"
" PySide6.QtCore: Failed to import PySide6.QtCore"
)
with pytest.raises(pytest.UsageError, match=expected):
qt_api.set_qt_api(api=None)
def test_before_close_func(testdir):
"""
Test the `before_close_func` argument of qtbot.addWidget.
"""
import sys
testdir.makepyfile(
"""
import sys
import pytest
from pytestqt.qt_compat import qt_api
def widget_closed(w):
assert w.some_id == 'my id'
sys.pytest_qt_widget_closed = True
@pytest.fixture
def widget(qtbot):
w = qt_api.QtWidgets.QWidget()
w.some_id = 'my id'
qtbot.add_widget(w, before_close_func=widget_closed)
return w
def test_foo(widget):
pass
"""
)
result = testdir.runpytest_inprocess()
result.stdout.fnmatch_lines(["*= 1 passed in *"])
assert sys.pytest_qt_widget_closed
def test_addwidget_typeerror(testdir, qtbot):
"""
Make sure addWidget catches type errors early.
"""
obj = qt_api.QtCore.QObject()
with pytest.raises(TypeError):
qtbot.addWidget(obj)
|
|
# Copyright (C) 2013 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class for all backup drivers."""
import abc
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
from cinder.db import base
from cinder import exception
from cinder.i18n import _, _LI, _LW
from cinder import keymgr
service_opts = [
cfg.IntOpt('backup_metadata_version', default=2,
help='Backup metadata version to be used when backing up '
'volume metadata. If this number is bumped, make sure the '
'service doing the restore supports the new version.'),
cfg.IntOpt('backup_object_number_per_notification',
default=10,
help='The number of chunks or objects, for which one '
'Ceilometer notification will be sent'),
cfg.IntOpt('backup_timer_interval',
default=120,
help='Interval, in seconds, between two progress notifications '
'reporting the backup status'),
]
CONF = cfg.CONF
CONF.register_opts(service_opts)
LOG = logging.getLogger(__name__)
class BackupMetadataAPI(base.Base):
TYPE_TAG_VOL_BASE_META = 'volume-base-metadata'
TYPE_TAG_VOL_META = 'volume-metadata'
TYPE_TAG_VOL_GLANCE_META = 'volume-glance-metadata'
def __init__(self, context, db_driver=None):
super(BackupMetadataAPI, self).__init__(db_driver)
self.context = context
@staticmethod
def _is_serializable(value):
"""Returns True if value is serializable."""
try:
jsonutils.dumps(value)
except TypeError:
LOG.info(_LI("Value with type=%s is not serializable"),
type(value))
return False
return True
def _save_vol_base_meta(self, container, volume_id):
"""Save base volume metadata to container.
This will fetch all fields from the db Volume object for volume_id and
save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_BASE_META
LOG.debug("Getting metadata type '%s'", type_tag)
meta = self.db.volume_get(self.context, volume_id)
if meta:
container[type_tag] = {}
for key, value in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(value):
LOG.info(_LI("Unable to serialize field '%s' - excluding "
"from backup"), key)
continue
# Copy the encryption key uuid for backup
if key is 'encryption_key_id' and value is not None:
value = keymgr.API().copy_key(self.context, value)
LOG.debug("Copying encryption key uuid for backup.")
container[type_tag][key] = value
LOG.debug("Completed fetching metadata type '%s'", type_tag)
else:
LOG.debug("No metadata type '%s' available", type_tag)
def _save_vol_meta(self, container, volume_id):
"""Save volume metadata to container.
This will fetch all fields from the db VolumeMetadata object for
volume_id and save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_META
LOG.debug("Getting metadata type '%s'", type_tag)
meta = self.db.volume_metadata_get(self.context, volume_id)
if meta:
container[type_tag] = {}
for entry in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(meta[entry]):
LOG.info(_LI("Unable to serialize field '%s' - excluding "
"from backup"), entry)
continue
container[type_tag][entry] = meta[entry]
LOG.debug("Completed fetching metadata type '%s'", type_tag)
else:
LOG.debug("No metadata type '%s' available", type_tag)
def _save_vol_glance_meta(self, container, volume_id):
"""Save volume Glance metadata to container.
This will fetch all fields from the db VolumeGlanceMetadata object for
volume_id and save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_GLANCE_META
LOG.debug("Getting metadata type '%s'", type_tag)
try:
meta = self.db.volume_glance_metadata_get(self.context, volume_id)
if meta:
container[type_tag] = {}
for entry in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(entry.value):
LOG.info(_LI("Unable to serialize field '%s' - "
"excluding from backup"), entry)
continue
container[type_tag][entry.key] = entry.value
LOG.debug("Completed fetching metadata type '%s'", type_tag)
except exception.GlanceMetadataNotFound:
LOG.debug("No metadata type '%s' available", type_tag)
@staticmethod
def _filter(metadata, fields):
"""Returns set of metadata restricted to required fields.
If fields is empty list, the full set is returned.
"""
if fields == []:
return metadata
subset = {}
for field in fields:
if field in metadata:
subset[field] = metadata[field]
else:
LOG.debug("Excluding field '%s'", field)
return subset
def _restore_vol_base_meta(self, metadata, volume_id, fields):
"""Restore values to Volume object for provided fields."""
LOG.debug("Restoring volume base metadata")
# Ignore unencrypted backups.
key = 'encryption_key_id'
if key in fields and key in metadata and metadata[key] is not None:
self._restore_vol_encryption_meta(volume_id,
metadata['volume_type_id'])
metadata = self._filter(metadata, fields)
self.db.volume_update(self.context, volume_id, metadata)
def _restore_vol_encryption_meta(self, volume_id, src_volume_type_id):
"""Restores the volume_type_id for encryption if needed.
Only allow restoration of an encrypted backup if the destination
volume has the same volume type as the source volume. Otherwise
encryption will not work. If volume types are already the same,
no action is needed.
"""
dest_vol = self.db.volume_get(self.context, volume_id)
if dest_vol['volume_type_id'] != src_volume_type_id:
LOG.debug("Volume type id's do not match.")
# If the volume types do not match, and the destination volume
# does not have a volume type, force the destination volume
# to have the encrypted volume type, provided it still exists.
if dest_vol['volume_type_id'] is None:
try:
self.db.volume_type_get(
self.context, src_volume_type_id)
except exception.VolumeTypeNotFound:
LOG.debug("Volume type of source volume has been "
"deleted. Encrypted backup restore has "
"failed.")
msg = _("The source volume type '%s' is not "
"available.") % (src_volume_type_id)
raise exception.EncryptedBackupOperationFailed(msg)
# Update dest volume with src volume's volume_type_id.
LOG.debug("The volume type of the destination volume "
"will become the volume type of the source "
"volume.")
self.db.volume_update(self.context, volume_id,
{'volume_type_id': src_volume_type_id})
else:
# Volume type id's do not match, and destination volume
# has a volume type. Throw exception.
LOG.warning(_LW("Destination volume type is different from "
"source volume type for an encrypted volume. "
"Encrypted backup restore has failed."))
msg = (_("The source volume type '%(src)s' is different "
"than the destination volume type '%(dest)s'.") %
{'src': src_volume_type_id,
'dest': dest_vol['volume_type_id']})
raise exception.EncryptedBackupOperationFailed(msg)
def _restore_vol_meta(self, metadata, volume_id, fields):
"""Restore values to VolumeMetadata object for provided fields."""
LOG.debug("Restoring volume metadata")
metadata = self._filter(metadata, fields)
self.db.volume_metadata_update(self.context, volume_id, metadata, True)
def _restore_vol_glance_meta(self, metadata, volume_id, fields):
"""Restore values to VolumeGlanceMetadata object for provided fields.
First delete any existing metadata then save new values.
"""
LOG.debug("Restoring volume glance metadata")
metadata = self._filter(metadata, fields)
self.db.volume_glance_metadata_delete_by_volume(self.context,
volume_id)
for key, value in metadata.items():
self.db.volume_glance_metadata_create(self.context,
volume_id,
key, value)
# Now mark the volume as bootable
self.db.volume_update(self.context, volume_id,
{'bootable': True})
def _v1_restore_factory(self):
"""All metadata is backed up but we selectively restore.
Returns a dictionary of the form:
{<type tag>: (<fields list>, <restore function>)}
Empty field list indicates that all backed up fields should be
restored.
"""
return {self.TYPE_TAG_VOL_META:
(self._restore_vol_meta, []),
self.TYPE_TAG_VOL_GLANCE_META:
(self._restore_vol_glance_meta, [])}
def _v2_restore_factory(self):
"""All metadata is backed up but we selectively restore.
Returns a dictionary of the form:
{<type tag>: (<fields list>, <restore function>)}
Empty field list indicates that all backed up fields should be
restored.
"""
return {self.TYPE_TAG_VOL_BASE_META:
(self._restore_vol_base_meta,
['encryption_key_id']),
self.TYPE_TAG_VOL_META:
(self._restore_vol_meta, []),
self.TYPE_TAG_VOL_GLANCE_META:
(self._restore_vol_glance_meta, [])}
def get(self, volume_id):
"""Get volume metadata.
Returns a json-encoded dict containing all metadata and the restore
version i.e. the version used to decide what actually gets restored
from this container when doing a backup restore.
"""
container = {'version': CONF.backup_metadata_version}
self._save_vol_base_meta(container, volume_id)
self._save_vol_meta(container, volume_id)
self._save_vol_glance_meta(container, volume_id)
if container:
return jsonutils.dumps(container)
else:
return None
def put(self, volume_id, json_metadata):
"""Restore volume metadata to a volume.
The json container should contain a version that is supported here.
"""
meta_container = jsonutils.loads(json_metadata)
version = meta_container['version']
if version == 1:
factory = self._v1_restore_factory()
elif version == 2:
factory = self._v2_restore_factory()
else:
msg = (_("Unsupported backup metadata version (%s)") % (version))
raise exception.BackupMetadataUnsupportedVersion(msg)
for type in factory:
func = factory[type][0]
fields = factory[type][1]
if type in meta_container:
func(meta_container[type], volume_id, fields)
else:
LOG.debug("No metadata of type '%s' to restore", type)
@six.add_metaclass(abc.ABCMeta)
class BackupDriver(base.Base):
def __init__(self, context, db_driver=None):
super(BackupDriver, self).__init__(db_driver)
self.context = context
self.backup_meta_api = BackupMetadataAPI(context, db_driver)
def get_metadata(self, volume_id):
return self.backup_meta_api.get(volume_id)
def put_metadata(self, volume_id, json_metadata):
self.backup_meta_api.put(volume_id, json_metadata)
@abc.abstractmethod
def backup(self, backup, volume_file, backup_metadata=False):
"""Start a backup of a specified volume."""
return
@abc.abstractmethod
def restore(self, backup, volume_id, volume_file):
"""Restore a saved backup."""
return
@abc.abstractmethod
def delete(self, backup):
"""Delete a saved backup."""
return
def export_record(self, backup):
"""Export backup record.
Default backup driver implementation.
Serialize the backup record describing the backup into a string.
:param backup: backup entry to export
:returns backup_url - a string describing the backup record
"""
retval = jsonutils.dumps(backup)
return retval.encode("base64")
def import_record(self, backup_url):
"""Import and verify backup record.
Default backup driver implementation.
De-serialize the backup record into a dictionary, so we can
update the database.
:param backup_url: driver specific backup record string
:returns dictionary object with database updates
"""
return jsonutils.loads(backup_url.decode("base64"))
@six.add_metaclass(abc.ABCMeta)
class BackupDriverWithVerify(BackupDriver):
@abc.abstractmethod
def verify(self, backup):
"""Verify that the backup exists on the backend.
Verify that the backup is OK, possibly following an import record
operation.
:param backup: backup id of the backup to verify
:raises: InvalidBackup, NotImplementedError
"""
return
|
|
# Copyright (c) 2012 Denis Bilenko. See LICENSE for details.
"""A simple UDP server.
For every message received, it sends a reply back.
You can use udp_client.py to send a message.
"""
from gevent.greenlet import Greenlet
from gevent.server import DatagramServer
from object import Resend
from binascii import hexlify
from msg_write import write_pull_ack, write_pull_resp, write_pull_resp_multi, get_random_token
from msg_write import write_push_ack
from msg_read import read_push_data
from object.gateway import Gateway, PullInfo
from object.device import Device, ClassType
from datetime import datetime
from const import Const
from object.message import MsgDn
from object.group import Group
from otaa import write_join_accept_data
from gevent import monkey
monkey.patch_socket()
from gevent import sleep
import gevent
import time
import json
from binascii import unhexlify
from utils.log import Logger, IDType, Action
from utils.timing_log import timing
from utils.db0 import db0, Channel0, ConstDB0
from utils.db1 import db1, Channel1
from utils.db6 import db6, CHANNEL_TX_ACK
from config import HOST
g_token = {}
import sys
class PullServer(DatagramServer):
def handle(self, data, address):
protocol_version = data[0]
token = data[1:3]
data_type = data[3]
if data_type == Const.TX_ACK_IDENTIFIER:
db6.publish(CHANNEL_TX_ACK, token)
Logger.info(action=Action.tx_ack, type=IDType.token, id=token,
msg='publish db6 %s: %s' % (CHANNEL_TX_ACK, token))
return
gateway_mac_addr = data[4:12]
gateway = Gateway.objects.get(gateway_mac_addr)
assert protocol_version == 1 or protocol_version == 2, 'PROTOCOL_VERSION ERROR, GOT: %s' % protocol_version
if gateway is not None:
p_token = g_token.get(gateway.mac_addr)
g_token[gateway.mac_addr] = token
gateway.set_time(time.time())
if data_type == Const.PULL_DATA_IDENTIFIER:
gateway.request2niot_platform() # add to make request to
# niot platform.
restart = gateway.pop_restart()
pull_ack = write_pull_ack(
protocol_version, token, gateway.disable, restart)
self.socket.sendto(pull_ack, address)
Logger.info(msg='%s' % pull_ack, type=IDType.ip_addr,
action=Action.pull_ack, id=address)
if p_token != token:
Logger.info(msg='NEW: %s' % data, type=IDType.gateway,
action=Action.pull_data, id='%s' % hexlify(gateway.mac_addr).decode())
pull_info = PullInfo(
gateway.mac_addr, ip_addr=address, prot_ver=protocol_version)
pull_info.save()
else:
Logger.info(msg='UDP_RETRANSMISSION: %s' % data, type=IDType.gateway,
action=Action.pull_data, id='%s' % hexlify(gateway.mac_addr).decode())
#------------------ deprecated ------------------------#
elif data_type == Const.PUSH_DATA_IDENTIFIER:
try:
datagram = json.loads(data[12:].decode())
except Exception as error:
Logger.error(msg='%s' % error, type=IDType.gateway,
action=Action.push_data, id='%s' % hexlify(gateway.mac_addr).decode())
return
push_ack = write_push_ack(token, protocol_version)
self.socket.sendto(push_ack, address)
Logger.info(msg='%s' % push_ack, type=IDType.gateway,
action=Action.push_ack, id='%s' % hexlify(gateway.mac_addr).decode())
if p_token != token:
Logger.info(msg='NEW: %s' % data, type=IDType.gateway,
action=Action.push_data, id='%s' % hexlify(gateway.mac_addr).decode())
read_push_data(datagram, gateway)
else:
Logger.info(msg='UDP_RETRANSMISSION %s' % data, type=IDType.gateway,
action=Action.push_data, id='%s' % hexlify(gateway.mac_addr).decode())
#------------------- deprecated ------------------------#
else:
Logger.error(msg='Get Unknow Data_type: %s' % data_type, type=IDType.gateway,
action=Action.got, id='%s' % hexlify(gateway.mac_addr).decode())
else:
Logger.error(msg='Not Imported', type=IDType.gateway,
action=Action.pull_ack, id='%s' % hexlify(gateway_mac_addr).decode())
class ReSender(Greenlet):
def __init__(self, pull_info, packet, server):
Greenlet.__init__(self)
self.ps = db6.pubsub()
self.server = server
self.packet = packet
self.pull_info = pull_info
self.token = packet[1:3]
def _run(self):
self.ps.subscribe(CHANNEL_TX_ACK)
try:
for i in range(0, 3):
start = time.time()
while time.time() - start < 0.1:
sleep(0.05)
for item in self.ps.listen():
# item = self.ps.get_message(timeout=0.05)
Logger.debug(action=Action.resend, type=IDType.ip_addr,
id=self.pull_info.ip_addr, msg='Get Publish TX, %s' % item)
if item is not None and item['data'] == self.token:
Logger.info(action=Action.resend, type=IDType.ip_addr,
id=self.pull_info.ip_addr, msg='Get Publish TX, %s' % item)
self.ps.unsubscribe()
self.ps.close()
return
self.server.sendto(self.packet, self.pull_info.ip_addr)
Logger.error(action=Action.resend, type=IDType.ip_addr,
id=self.pull_info.ip_addr, msg='Resend data %s : %s' % (i, self.packet))
Logger.error(action=Action.resend, type=IDType.gateway, id=self.pull_info.ip_addr,
msg='No TX_ACK got, PULL_RESP may not received by gateway')
except Exception as error:
logger.error(str(error))
finally:
try:
self.ps.unsubscribe()
self.ps.close()
except Exception as error:
logger.error(str(error))
class Sender(Greenlet):
def __init__(self, dev_eui, server, rx_window, *args):
Greenlet.__init__(self)
self.server = server
self.dev_eui = dev_eui
self.rx_window = rx_window
self.args = list(args)
def run(self):
device = Device.objects.get(self.dev_eui)
if device.dev_class == ClassType.a or (device.dev_class == ClassType.c and self.rx_window == 1):
send_info = write_pull_resp(device, rx_window=self.rx_window)
if send_info is not None:
send_data = send_info[0]
fcnt = send_info[1]
pull_info = send_info[2]
self.server.socket.sendto(send_data, pull_info.ip_addr)
t1 = time.time()
self.args.append(t1)
self.args.reverse()
timing.info(msg='PULL_RESP: DEV:%s, TIME:%s' %
(hexlify(device.dev_eui).decode(), self.args))
Logger.info(msg='RX1Sender %s' % send_data, action=Action.pull_resp,
type=IDType.ip_addr, id='%s:%d' % pull_info.ip_addr)
if pull_info.prot_ver == 2:
resend = ReSender(pull_info, send_data, self.server)
resend.start()
# msg = MsgDn(category=ConstDB0.dev, eui=hexlify(device.dev_eui).decode(), ts=int(time.time()), fcnt=fcnt)
# msg.save()
else:
while device.que_down.len() != 0 or Resend(device.dev_eui).check_exist():
send_data, fcnt, pull_info = write_pull_resp(
device, rx_window=self.rx_window)
if send_data is not None:
self.server.socket.sendto(send_data, pull_info.ip_addr)
Logger.info(msg='CLASS_B or class_c rx2 Sender %s' % send_data, action=Action.pull_resp,
type=IDType.ip_addr, id='%s:%d' % pull_info.ip_addr)
if pull_info.prot_ver == 2:
resend = ReSender(pull_info, send_data, self.server)
resend.start()
gevent.sleep(3)
def rx_1(server):
ps = db0.pubsub()
ps.subscribe(Channel0.rx1_alarm)
while True:
for item in ps.listen():
Logger.info(msg='PS Listen %s' %
item, type=IDType.sub, action=Action.rx1)
if item['type'] == 'message':
t0 = time.time()
sender = Sender(item['data'], server, 1, t0)
sender.start()
def class_c(server):
ps = db0.pubsub()
ps.subscribe(Channel0.que_down_alarm_c)
while True:
for item in ps.listen():
Logger.info(msg='PS Listen %s' %
item, type=IDType.sub, action=Action.class_c)
if item['type'] == 'message':
dev_eui = unhexlify(item['data'].decode().split(':')[1])
# sender = ClassCSender(item['data'], server)
sender = Sender(dev_eui, server, rx_window=2)
sender.start()
def class_b(server):
ps = db0.pubsub()
ps.subscribe(Channel0.que_down_alarm_b)
while True:
for item in ps.listen():
Logger.info(msg='PS Listen %s' %
item, type=IDType.sub, action=Action.class_b)
if item['type'] == 'message':
dev_eui = unhexlify(item['data'].decode().split(':')[1])
# sender = ClassBSender(item['data'], server)
sender = Sender(dev_eui, server=server, rx_window=0)
sender.start()
gevent.sleep(1)
def otaa(server):
ps = db1.pubsub()
ps.psubscribe(Channel1.join_accept_alarm + '*')
while True:
for item in ps.listen():
Logger.info(msg='PS Listen %s' %
item, type=IDType.sub, action=Action.otaa)
if item['type'] == 'pmessage':
sender = OTAASender(item, server=server)
sender.start()
class OTAASender(Greenlet):
def __init__(self, item, server):
Greenlet.__init__(self)
dev_eui = unhexlify(item['channel'].decode().replace(
Channel1.join_accept_alarm, ''))
accept = item['data'].decode()
self.server = server
self.dev_eui = dev_eui
self.data = accept
def _run(self):
result = write_join_accept_data(self.dev_eui, self.data)
if not result:
Logger.error(action=Action.otaa,
msg='No packet, pull_info return!!!')
return
packet = result[0]
pull_info = result[1]
if pull_info.prot_ver == 2:
resend = ReSender(pull_info, packet, self.server)
resend.start()
self.server.sendto(packet, pull_info.ip_addr)
Logger.info(action=Action.otaa, type=IDType.ip_addr, id='%s:%d' %
pull_info.ip_addr, msg='SENT JOIN ACCEPT %s' % packet)
def group(server):
ps = db0.pubsub()
ps.subscribe(Channel0.que_down_alarm_multi)
Logger.info(msg='Listen IN GROUP', type=IDType.sub, action=Action.multi)
while True:
for item in ps.listen():
Logger.info(msg='PS Listen %s' %
item, type=IDType.sub, action=Action.multi)
if item['type'] == 'message':
key_split = item['data'].decode().split(':')
group = Group.objects.get(unhexlify(key_split[1]))
send_packet_info = write_pull_resp_multi(group)
if send_packet_info is not None:
send_packets = send_packet_info['packet']
pull_info_set = send_packet_info['pull_info_set']
fcnt = send_packet_info['fcnt']
for pull_info in pull_info_set:
for send_packet in send_packets:
send_packet = bytes([pull_info.prot_ver]) + get_random_token(
) + Const.PULL_RESP_IDENTIFIER + json.dumps({'txpk': send_packet}).encode()
server.socket.sendto(
send_packet, pull_info.ip_addr)
Logger.info(msg='%s' % send_packet, action=Action.pull_resp,
type=IDType.ip_addr, id='%s:%d' % pull_info.ip_addr)
# msg = MsgDn(category=ConstDB0.group, ts=int(time.time()), fcnt=fcnt, gateways=pull_info.mac_addr,
# eui=hexlify(group.app.app_eui).decode() + ':' + hexlify(group.id).decode())
# msg.save()
# class ClassCSender(Greenlet):
# def __init__(self, msg, server):
# Greenlet.__init__(self)
# self.server = server
# self.msg = msg
#
# def _run(self):
# key_split = self.msg.decode().split(':')
# dev_eui = key_split[1]
# device = Device.objects.get(unhexlify(dev_eui))
# while device.que_down.len() != 0 or Resend(device.dev_eui).check_exist():
# send_info = write_pull_resp(device, rx_window=2)
# if send_info is not None:
# send_data = send_info[0]
# fcnt = send_info[1]
# pull_info = send_info[2]
# self.server.socket.sendto(send_data, pull_info.ip_addr)
# # msg = MsgDn(category=ConstDB0.dev, eui=dev_eui, ts=int(time.time()), fcnt=fcnt)
# # msg.save()
# Logger.info(msg='ClassCSender %s' % send_data, action=Action.pull_resp, type=IDType.ip_addr, id='%s:%d' % pull_info.ip_addr)
# gevent.sleep(3)
#
#
# class ClassBSender(Greenlet):
# def __init__(self, msg, server):
# Greenlet.__init__(self)
# self.server = server
# self.msg = msg
#
# def _run(self):
# key_split = self.msg.decode().split(':')
# dev_eui = key_split[1]
# device = Device.objects.get(unhexlify(dev_eui))
# while device.que_down.len() != 0 or Resend(device.dev_eui).check_exist():
# send_data, fcnt, pull_info = write_pull_resp(device, rx_window=0)
# if send_data is not None:
# self.server.socket.sendto(send_data, pull_info.ip_addr)
# # msg = MsgDn(category=ConstDB0.dev, eui=dev_eui, ts=int(time.time()), fcnt=fcnt)
# # msg.save()
# Logger.info(msg='ClassBSender %s' % send_data, action=Action.pull_resp, type=IDType.ip_addr, id='%s:%d' % pull_info.ip_addr)
def serve(server):
print('[GServer] begin to work!')
server.serve_forever()
def handle_uncaught_except(exctype, value, traceback):
print('??????????????????????')
print(exctype, value, traceback)
if __name__ == '__main__':
pull_server = PullServer((9200))
print('start serve on %s:%s' % (HOST, 9200))
gevent.joinall([
gevent.spawn(serve, pull_server),
gevent.spawn(class_c, pull_server),
gevent.spawn(group, pull_server),
gevent.spawn(rx_1, pull_server),
gevent.spawn(class_b, pull_server),
gevent.spawn(otaa, pull_server)
])
|
|
# -*- coding: utf-8 -*-
"""Field classes for various types of data."""
from __future__ import absolute_import, unicode_literals
import collections
import datetime as dt
import uuid
import warnings
import decimal
from operator import attrgetter
from marshmallow import validate, utils, class_registry
from marshmallow.base import FieldABC, SchemaABC
from marshmallow.utils import missing as missing_
from marshmallow.compat import text_type, basestring
from marshmallow.exceptions import ValidationError
from marshmallow.validate import Validator
__all__ = [
'Field',
'Raw',
'Nested',
'Dict',
'List',
'String',
'UUID',
'Number',
'Integer',
'Decimal',
'Boolean',
'FormattedString',
'Float',
'DateTime',
'LocalDateTime',
'Time',
'Date',
'TimeDelta',
'Url',
'URL',
'Email',
'Method',
'Function',
'Str',
'Bool',
'Int',
'Constant',
]
MISSING_ERROR_MESSAGE = (
'ValidationError raised by `{class_name}`, but error key `{key}` does '
'not exist in the `error_messages` dictionary.'
)
_RECURSIVE_NESTED = 'self'
class Field(FieldABC):
"""Basic field from which other fields should extend. It applies no
formatting by default, and should only be used in cases where
data does not need to be formatted before being serialized or deserialized.
On error, the name of the field will be returned.
:param default: If set, this value will be used during serialization if the input value
is missing. If not set, the field will be excluded from the serialized output if the
input value is missing. May be a value or a callable.
:param str attribute: The name of the attribute to get the value from. If
`None`, assumes the attribute has the same name as the field.
:param str load_from: Additional key to look for when deserializing. Will only
be checked if the field's name is not found on the input dictionary. If checked,
it will return this parameter on error.
:param str dump_to: Field name to use as a key when serializing.
:param callable validate: Validator or collection of validators that are called
during deserialization. Validator takes a field's input value as
its only parameter and returns a boolean.
If it returns `False`, an :exc:`ValidationError` is raised.
:param required: Raise a :exc:`ValidationError` if the field value
is not supplied during deserialization.
:param allow_none: Set this to `True` if `None` should be considered a valid value during
validation/deserialization. If ``missing=None`` and ``allow_none`` is unset,
will default to ``True``. Otherwise, the default is ``False``.
:param bool load_only: If `True` skip this field during serialization, otherwise
its value will be present in the serialized data.
:param bool dump_only: If `True` skip this field during deserialization, otherwise
its value will be present in the deserialized object. In the context of an
HTTP API, this effectively marks the field as "read-only".
:param missing: Default deserialization value for the field if the field is not
found in the input data. May be a value or a callable.
:param dict error_messages: Overrides for `Field.default_error_messages`.
:param metadata: Extra arguments to be stored as metadata.
.. versionchanged:: 2.0.0
Removed `error` parameter. Use ``error_messages`` instead.
.. versionchanged:: 2.0.0
Added `allow_none` parameter, which makes validation/deserialization of `None`
consistent across fields.
.. versionchanged:: 2.0.0
Added `load_only` and `dump_only` parameters, which allow field skipping
during the (de)serialization process.
.. versionchanged:: 2.0.0
Added `missing` parameter, which indicates the value for a field if the field
is not found during deserialization.
.. versionchanged:: 2.0.0
``default`` value is only used if explicitly set. Otherwise, missing values
inputs are excluded from serialized output.
"""
# Some fields, such as Method fields and Function fields, are not expected
# to exists as attributes on the objects to serialize. Set this to False
# for those fields
_CHECK_ATTRIBUTE = True
_creation_index = 0 # Used for sorting
#: Default error messages for various kinds of errors. The keys in this dictionary
#: are passed to `Field.fail`. The values are error messages passed to
#: :exc:`marshmallow.ValidationError`.
default_error_messages = {
'required': 'Missing data for required field.',
'type': 'Invalid input type.', # used by Unmarshaller
'null': 'Field may not be null.',
'validator_failed': 'Invalid value.'
}
def __init__(self, default=missing_, attribute=None, load_from=None, dump_to=None,
error=None, validate=None, required=False, allow_none=None, load_only=False,
dump_only=False, missing=missing_, error_messages=None, **metadata):
self.default = default
self.attribute = attribute
self.load_from = load_from # this flag is used by Unmarshaller
self.dump_to = dump_to # this flag is used by Marshaller
self.validate = validate
if utils.is_iterable_but_not_string(validate):
if not utils.is_generator(validate):
self.validators = validate
else:
self.validators = list(validate)
elif callable(validate):
self.validators = [validate]
elif validate is None:
self.validators = []
else:
raise ValueError("The 'validate' parameter must be a callable "
"or a collection of callables.")
self.required = required
# If missing=None, None should be considered valid by default
if allow_none is None:
if missing is None:
self.allow_none = True
else:
self.allow_none = False
else:
self.allow_none = allow_none
self.load_only = load_only
self.dump_only = dump_only
self.missing = missing
self.metadata = metadata
self._creation_index = Field._creation_index
Field._creation_index += 1
# Collect default error message from self and parent classes
messages = {}
for cls in reversed(self.__class__.__mro__):
messages.update(getattr(cls, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
def __repr__(self):
return ('<fields.{ClassName}(default={self.default!r}, '
'attribute={self.attribute!r}, '
'validate={self.validate}, required={self.required}, '
'load_only={self.load_only}, dump_only={self.dump_only}, '
'missing={self.missing}, allow_none={self.allow_none}, '
'error_messages={self.error_messages})>'
.format(ClassName=self.__class__.__name__, self=self))
def get_value(self, attr, obj, accessor=None, default=missing_):
"""Return the value for a given key from an object."""
# NOTE: Use getattr instead of direct attribute access here so that
# subclasses aren't required to define `attribute` member
attribute = getattr(self, 'attribute', None)
accessor_func = accessor or utils.get_value
check_key = attr if attribute is None else attribute
return accessor_func(check_key, obj, default)
def _validate(self, value):
"""Perform validation on ``value``. Raise a :exc:`ValidationError` if validation
does not succeed.
"""
errors = []
kwargs = {}
for validator in self.validators:
try:
r = validator(value)
if not isinstance(validator, Validator) and r is False:
self.fail('validator_failed')
except ValidationError as err:
kwargs.update(err.kwargs)
if isinstance(err.messages, dict):
errors.append(err.messages)
else:
errors.extend(err.messages)
if errors:
raise ValidationError(errors, **kwargs)
# Hat tip to django-rest-framework.
def fail(self, key, **kwargs):
"""A helper method that simply raises a `ValidationError`.
"""
try:
msg = self.error_messages[key]
except KeyError:
class_name = self.__class__.__name__
msg = MISSING_ERROR_MESSAGE.format(class_name=class_name, key=key)
raise AssertionError(msg)
if isinstance(msg, basestring):
msg = msg.format(**kwargs)
raise ValidationError(msg)
def _validate_missing(self, value):
"""Validate missing values. Raise a :exc:`ValidationError` if
`value` should be considered missing.
"""
if value is missing_:
if hasattr(self, 'required') and self.required:
self.fail('required')
if value is None:
if hasattr(self, 'allow_none') and self.allow_none is not True:
self.fail('null')
def serialize(self, attr, obj, accessor=None):
"""Pulls the value for the given key from the object, applies the
field's formatting and returns the result.
:param str attr: The attibute or key to get from the object.
:param str obj: The object to pull the key from.
:param callable accessor: Function used to pull values from ``obj``.
:raise ValidationError: In case of formatting problem
"""
if self._CHECK_ATTRIBUTE:
value = self.get_value(attr, obj, accessor=accessor)
if value is missing_:
if hasattr(self, 'default'):
if callable(self.default):
return self.default()
else:
return self.default
else:
value = None
return self._serialize(value, attr, obj)
def deserialize(self, value, attr=None, data=None):
"""Deserialize ``value``.
:raise ValidationError: If an invalid value is passed or if a required value
is missing.
"""
# Validate required fields, deserialize, then validate
# deserialized value
self._validate_missing(value)
if getattr(self, 'allow_none', False) is True and value is None:
return None
output = self._deserialize(value, attr, data)
self._validate(output)
return output
# Methods for concrete classes to override.
def _add_to_schema(self, field_name, schema):
"""Update field with values from its parent schema. Called by
:meth:`__set_field_attrs <marshmallow.Schema.__set_field_attrs>`.
:param str field_name: Field name set in schema.
:param Schema schema: Parent schema.
"""
self.parent = self.parent or schema
self.name = self.name or field_name
def _serialize(self, value, attr, obj):
"""Serializes ``value`` to a basic Python datatype. Noop by default.
Concrete :class:`Field` classes should implement this method.
Example: ::
class TitleCase(Field):
def _serialize(self, value, attr, obj):
if not value:
return ''
return unicode(value).title()
:param value: The value to be serialized.
:param str attr: The attribute or key on the object to be serialized.
:param object obj: The object the value was pulled from.
:raise ValidationError: In case of formatting or validation failure.
:return: The serialized value
"""
return value
def _deserialize(self, value, attr, data):
"""Deserialize value. Concrete :class:`Field` classes should implement this method.
:param value: The value to be deserialized.
:param str attr: The attribute/key in `data` to be deserialized.
:param dict data: The raw input data passed to the `Schema.load`.
:raise ValidationError: In case of formatting or validation failure.
:return: The deserialized value.
.. versionchanged:: 2.0.0
Added ``attr`` and ``data`` parameters.
"""
return value
# Properties
@property
def context(self):
"""The context dictionary for the parent :class:`Schema`."""
return self.parent.context
@property
def root(self):
"""Reference to the `Schema` that this field belongs to even if it is buried in a `List`."""
ret = self
while hasattr(ret, 'parent') and ret.parent:
ret = ret.parent
return ret
class Raw(Field):
"""Field that applies no formatting or validation."""
pass
class Nested(Field):
"""Allows you to nest a :class:`Schema <marshmallow.Schema>`
inside a field.
Examples: ::
user = fields.Nested(UserSchema)
user2 = fields.Nested('UserSchema') # Equivalent to above
collaborators = fields.Nested(UserSchema, many=True, only='id')
parent = fields.Nested('self')
When passing a `Schema <marshmallow.Schema>` instance as the first argument,
the instance's ``exclude``, ``only``, and ``many`` attributes will be respected.
Therefore, when passing the ``exclude``, ``only``, or ``many`` arguments to `fields.Nested`,
you should pass a `Schema <marshmallow.Schema>` class (not an instance) as the first argument.
::
# Yes
author = fields.Nested(UserSchema, only=('id', 'name'))
# No
author = fields.Nested(UserSchema(), only=('id', 'name'))
:param Schema nested: The Schema class or class name (string)
to nest, or ``"self"`` to nest the :class:`Schema` within itself.
:param default: Default value to if attribute is missing or None
:param tuple exclude: A list or tuple of fields to exclude.
:param required: Raise an :exc:`ValidationError` during deserialization
if the field, *and* any required field values specified
in the `nested` schema, are not found in the data. If not a `bool`
(e.g. a `str`), the provided value will be used as the message of the
:exc:`ValidationError` instead of the default message.
:param only: A tuple or string of the field(s) to marshal. If `None`, all fields
will be marshalled. If a field name (string) is given, only a single
value will be returned as output instead of a dictionary.
This parameter takes precedence over ``exclude``.
:param bool many: Whether the field is a collection of objects.
:param kwargs: The same keyword arguments that :class:`Field` receives.
"""
default_error_messages = {
'type': 'Invalid type.',
}
def __init__(self, nested, default=missing_, exclude=tuple(), only=None, **kwargs):
self.nested = nested
self.only = only
self.exclude = exclude
self.many = kwargs.get('many', False)
self.__schema = None # Cached Schema instance
self.__updated_fields = False
super(Nested, self).__init__(default=default, **kwargs)
@property
def schema(self):
"""The nested Schema object.
.. versionchanged:: 1.0.0
Renamed from `serializer` to `schema`
"""
# Ensure that only parameter is a tuple
if isinstance(self.only, basestring):
only = (self.only, )
else:
only = self.only
# Inherit context from parent.
context = getattr(self.parent, 'context', {})
if not self.__schema:
if isinstance(self.nested, SchemaABC):
self.__schema = self.nested
self.__schema.context.update(context)
elif isinstance(self.nested, type) and \
issubclass(self.nested, SchemaABC):
self.__schema = self.nested(many=self.many,
only=only, exclude=self.exclude, context=context)
elif isinstance(self.nested, basestring):
if self.nested == _RECURSIVE_NESTED:
parent_class = self.parent.__class__
self.__schema = parent_class(many=self.many, only=only,
exclude=self.exclude, context=context)
else:
schema_class = class_registry.get_class(self.nested)
self.__schema = schema_class(many=self.many,
only=only, exclude=self.exclude, context=context)
else:
raise ValueError('Nested fields must be passed a '
'Schema, not {0}.'.format(self.nested.__class__))
self.__schema.ordered = getattr(self.parent, 'ordered', False)
return self.__schema
def _serialize(self, nested_obj, attr, obj):
# Load up the schema first. This allows a RegistryError to be raised
# if an invalid schema name was passed
schema = self.schema
if nested_obj is None:
return None
if not self.__updated_fields:
schema._update_fields(obj=nested_obj, many=self.many)
self.__updated_fields = True
ret, errors = schema.dump(nested_obj, many=self.many,
update_fields=not self.__updated_fields)
if isinstance(self.only, basestring): # self.only is a field name
if self.many:
return utils.pluck(ret, key=self.only)
else:
return ret[self.only]
if errors:
raise ValidationError(errors, data=ret)
return ret
def _deserialize(self, value, attr, data):
if self.many and not utils.is_collection(value):
self.fail('type', input=value, type=value.__class__.__name__)
data, errors = self.schema.load(value)
if errors:
raise ValidationError(errors, data=data)
return data
def _validate_missing(self, value):
"""Validate missing values. Raise a :exc:`ValidationError` if
`value` should be considered missing.
"""
if value is missing_ and hasattr(self, 'required'):
if self.nested == _RECURSIVE_NESTED:
self.fail('required')
errors = self._check_required()
if errors:
raise ValidationError(errors)
else:
super(Nested, self)._validate_missing(value)
def _check_required(self):
errors = {}
if self.required:
for field_name, field in self.schema.fields.items():
if not field.required:
continue
error_field_name = field.load_from or field_name
if (
isinstance(field, Nested) and
self.nested != _RECURSIVE_NESTED and
field.nested != _RECURSIVE_NESTED
):
errors[error_field_name] = field._check_required()
else:
try:
field._validate_missing(field.missing)
except ValidationError as ve:
errors[error_field_name] = ve.messages
if self.many and errors:
errors = {0: errors}
# No inner errors; just raise required error like normal
if not errors:
self.fail('required')
return errors
class List(Field):
"""A list field, composed with another `Field` class or
instance.
Example: ::
numbers = fields.List(fields.Float())
:param Field cls_or_instance: A field class or instance.
:param bool default: Default value for serialization.
:param kwargs: The same keyword arguments that :class:`Field` receives.
.. versionchanged:: 2.0.0
The ``allow_none`` parameter now applies to deserialization and
has the same semantics as the other fields.
"""
default_error_messages = {
'invalid': 'Not a valid list.',
}
def __init__(self, cls_or_instance, **kwargs):
super(List, self).__init__(**kwargs)
if isinstance(cls_or_instance, type):
if not issubclass(cls_or_instance, FieldABC):
raise ValueError('The type of the list elements '
'must be a subclass of '
'marshmallow.base.FieldABC')
self.container = cls_or_instance()
else:
if not isinstance(cls_or_instance, FieldABC):
raise ValueError('The instances of the list '
'elements must be of type '
'marshmallow.base.FieldABC')
self.container = cls_or_instance
def get_value(self, attr, obj, accessor=None):
"""Return the value for a given key from an object."""
value = super(List, self).get_value(attr, obj, accessor=accessor)
if self.container.attribute:
if utils.is_collection(value):
return [
self.container.get_value(self.container.attribute, each)
for each in value
]
return self.container.get_value(self.container.attribute, value)
return value
def _add_to_schema(self, field_name, schema):
super(List, self)._add_to_schema(field_name, schema)
self.container.parent = self
self.container.name = field_name
def _serialize(self, value, attr, obj):
if value is None:
return None
if utils.is_collection(value):
return [self.container._serialize(each, attr, obj) for each in value]
return [self.container._serialize(value, attr, obj)]
def _deserialize(self, value, attr, data):
if not utils.is_collection(value):
self.fail('invalid')
result = []
errors = {}
for idx, each in enumerate(value):
try:
result.append(self.container.deserialize(each))
except ValidationError as e:
result.append(e.data)
errors.update({idx: e.messages})
if errors:
raise ValidationError(errors, data=result)
return result
class String(Field):
"""A string field.
:param kwargs: The same keyword arguments that :class:`Field` receives.
"""
default_error_messages = {
'invalid': 'Not a valid string.'
}
def _serialize(self, value, attr, obj):
if value is None:
return None
return utils.ensure_text_type(value)
def _deserialize(self, value, attr, data):
if not isinstance(value, basestring):
self.fail('invalid')
return utils.ensure_text_type(value)
class UUID(String):
"""A UUID field."""
default_error_messages = {
'invalid_uuid': 'Not a valid UUID.',
'invalid_guid': 'Not a valid UUID.' # TODO: Remove this in marshmallow 3.0
}
def _validated(self, value):
"""Format the value or raise a :exc:`ValidationError` if an error occurs."""
if value is None:
return None
if isinstance(value, uuid.UUID):
return value
try:
return uuid.UUID(value)
except (ValueError, AttributeError):
self.fail('invalid_uuid')
def _serialize(self, value, attr, obj):
validated = str(self._validated(value)) if value is not None else None
return super(String, self)._serialize(validated, attr, obj)
def _deserialize(self, value, attr, data):
return self._validated(value)
class Number(Field):
"""Base class for number fields.
:param bool as_string: If True, format the serialized value as a string.
:param kwargs: The same keyword arguments that :class:`Field` receives.
"""
num_type = float
default_error_messages = {
'invalid': 'Not a valid number.'
}
def __init__(self, as_string=False, **kwargs):
self.as_string = as_string
super(Number, self).__init__(**kwargs)
def _format_num(self, value):
"""Return the number value for value, given this field's `num_type`."""
if value is None:
return None
return self.num_type(value)
def _validated(self, value):
"""Format the value or raise a :exc:`ValidationError` if an error occurs."""
try:
return self._format_num(value)
except (TypeError, ValueError) as err:
self.fail('invalid')
def serialize(self, attr, obj, accessor=None):
"""Pulls the value for the given key from the object and returns the
serialized number representation. Return a string if `self.as_string=True`,
othewise return this field's `num_type`. Receives the same `args` and `kwargs`
as `Field`.
"""
ret = Field.serialize(self, attr, obj, accessor=accessor)
return self._to_string(ret) if (self.as_string and ret not in (None, missing_)) else ret
def _to_string(self, value):
return str(value)
def _serialize(self, value, attr, obj):
return self._validated(value)
def _deserialize(self, value, attr, data):
return self._validated(value)
class Integer(Number):
"""An integer field.
:param kwargs: The same keyword arguments that :class:`Number` receives.
"""
num_type = int
default_error_messages = {
'invalid': 'Not a valid integer.'
}
class Decimal(Number):
"""A field that (de)serializes to the Python ``decimal.Decimal`` type.
It's safe to use when dealing with money values, percentages, ratios
or other numbers where precision is critical.
.. warning::
This field serializes to a `decimal.Decimal` object by default. If you need
to render your data as JSON, keep in mind that the `json` module from the
standard library does not encode `decimal.Decimal`. Therefore, you must use
a JSON library that can handle decimals, such as `simplejson`, or serialize
to a string by passing ``as_string=True``.
.. warning::
If a JSON `float` value is passed to this field for deserialization it will
first be cast to its corresponding `string` value before being deserialized
to a `decimal.Decimal` object. The default `__str__` implementation of the
built-in Python `float` type may apply a destructive transformation upon
its input data and therefore cannot be relied upon to preserve precision.
To avoid this, you can instead pass a JSON `string` to be deserialized
directly.
:param int places: How many decimal places to quantize the value. If `None`, does
not quantize the value.
:param rounding: How to round the value during quantize, for example
`decimal.ROUND_UP`. If None, uses the rounding value from
the current thread's context.
:param bool allow_nan: If `True`, `NaN`, `Infinity` and `-Infinity` are allowed,
even though they are illegal according to the JSON specification.
:param bool as_string: If True, serialize to a string instead of a Python
`decimal.Decimal` type.
:param kwargs: The same keyword arguments that :class:`Number` receives.
.. versionadded:: 1.2.0
"""
num_type = decimal.Decimal
default_error_messages = {
'special': 'Special numeric values are not permitted.',
}
def __init__(self, places=None, rounding=None, allow_nan=False, as_string=False, **kwargs):
self.places = decimal.Decimal((0, (1,), -places)) if places is not None else None
self.rounding = rounding
self.allow_nan = allow_nan
super(Decimal, self).__init__(as_string=as_string, **kwargs)
# override Number
def _format_num(self, value):
if value is None:
return None
num = decimal.Decimal(str(value))
if self.allow_nan:
if num.is_nan():
return decimal.Decimal('NaN') # avoid sNaN, -sNaN and -NaN
else:
if num.is_nan() or num.is_infinite():
self.fail('special')
if self.places is not None and num.is_finite():
num = num.quantize(self.places, rounding=self.rounding)
return num
# override Number
def _validated(self, value):
try:
return super(Decimal, self)._validated(value)
except decimal.InvalidOperation:
self.fail('invalid')
# override Number
def _to_string(self, value):
return format(value, 'f')
class Boolean(Field):
"""A boolean field.
:param kwargs: The same keyword arguments that :class:`Field` receives.
"""
#: Values that will (de)serialize to `True`. If an empty set, any non-falsy
# value will deserialize to `True`.
truthy = set(('t', 'T', 'true', 'True', 'TRUE', '1', 1, True))
#: Values that will (de)serialize to `False`.
falsy = set(('f', 'F', 'false', 'False', 'FALSE', '0', 0, 0.0, False))
default_error_messages = {
'invalid': 'Not a valid boolean.'
}
def _serialize(self, value, attr, obj):
if value is None:
return None
elif value in self.truthy:
return True
elif value in self.falsy:
return False
return bool(value)
def _deserialize(self, value, attr, data):
if not self.truthy:
return bool(value)
else:
try:
if value in self.truthy:
return True
elif value in self.falsy:
return False
except TypeError:
pass
self.fail('invalid')
class FormattedString(Field):
"""Interpolate other values from the object into this field. The syntax for
the source string is the same as the string `str.format` method
from the python stdlib.
::
class UserSchema(Schema):
name = fields.String()
greeting = fields.FormattedString('Hello {name}')
ser = UserSchema()
res = ser.dump(user)
res.data # => {'name': 'Monty', 'greeting': 'Hello Monty'}
"""
default_error_messages = {
'format': 'Cannot format string with given data.'
}
_CHECK_ATTRIBUTE = False
def __init__(self, src_str, *args, **kwargs):
Field.__init__(self, *args, **kwargs)
self.src_str = text_type(src_str)
def _serialize(self, value, attr, obj):
try:
data = utils.to_marshallable_type(obj)
return self.src_str.format(**data)
except (TypeError, IndexError) as error:
self.fail('format')
class Float(Number):
"""
A double as IEEE-754 double precision string.
:param bool as_string: If True, format the value as a string.
:param kwargs: The same keyword arguments that :class:`Number` receives.
"""
num_type = float
class DateTime(Field):
"""A formatted datetime string in UTC.
Example: ``'2014-12-22T03:12:58.019077+00:00'``
Timezone-naive `datetime` objects are converted to
UTC (+00:00) by :meth:`Schema.dump <marshmallow.Schema.dump>`.
:meth:`Schema.load <marshmallow.Schema.load>` returns `datetime`
objects that are timezone-aware.
:param str format: Either ``"rfc"`` (for RFC822), ``"iso"`` (for ISO8601),
or a date format string. If `None`, defaults to "iso".
:param kwargs: The same keyword arguments that :class:`Field` receives.
"""
DATEFORMAT_SERIALIZATION_FUNCS = {
'iso': utils.isoformat,
'iso8601': utils.isoformat,
'rfc': utils.rfcformat,
'rfc822': utils.rfcformat,
}
DATEFORMAT_DESERIALIZATION_FUNCS = {
'iso': utils.from_iso,
'iso8601': utils.from_iso,
'rfc': utils.from_rfc,
'rfc822': utils.from_rfc,
}
DEFAULT_FORMAT = 'iso'
localtime = False
default_error_messages = {
'invalid': 'Not a valid datetime.',
'format': '"{input}" cannot be formatted as a datetime.',
}
def __init__(self, format=None, **kwargs):
super(DateTime, self).__init__(**kwargs)
# Allow this to be None. It may be set later in the ``_serialize``
# or ``_desrialize`` methods This allows a Schema to dynamically set the
# dateformat, e.g. from a Meta option
self.dateformat = format
def _add_to_schema(self, field_name, schema):
super(DateTime, self)._add_to_schema(field_name, schema)
self.dateformat = self.dateformat or schema.opts.dateformat
def _serialize(self, value, attr, obj):
if value is None:
return None
self.dateformat = self.dateformat or self.DEFAULT_FORMAT
format_func = self.DATEFORMAT_SERIALIZATION_FUNCS.get(self.dateformat, None)
if format_func:
try:
return format_func(value, localtime=self.localtime)
except (AttributeError, ValueError) as err:
self.fail('format', input=value)
else:
return value.strftime(self.dateformat)
def _deserialize(self, value, attr, data):
if not value: # Falsy values, e.g. '', None, [] are not valid
raise self.fail('invalid')
self.dateformat = self.dateformat or self.DEFAULT_FORMAT
func = self.DATEFORMAT_DESERIALIZATION_FUNCS.get(self.dateformat)
if func:
try:
return func(value)
except (TypeError, AttributeError, ValueError):
raise self.fail('invalid')
elif self.dateformat:
try:
return dt.datetime.strptime(value, self.dateformat)
except (TypeError, AttributeError, ValueError):
raise self.fail('invalid')
elif utils.dateutil_available:
try:
return utils.from_datestring(value)
except TypeError:
raise self.fail('invalid')
else:
warnings.warn('It is recommended that you install python-dateutil '
'for improved datetime deserialization.')
raise self.fail('invalid')
class LocalDateTime(DateTime):
"""A formatted datetime string in localized time, relative to UTC.
ex. ``"Sun, 10 Nov 2013 08:23:45 -0600"``
Takes the same arguments as :class:`DateTime <marshmallow.fields.DateTime>`.
"""
localtime = True
class Time(Field):
"""ISO8601-formatted time string.
:param kwargs: The same keyword arguments that :class:`Field` receives.
"""
default_error_messages = {
'invalid': 'Not a valid time.',
'format': '"{input}" cannot be formatted as a time.',
}
def _serialize(self, value, attr, obj):
if value is None:
return None
try:
ret = value.isoformat()
except AttributeError:
self.fail('format', input=value)
if value.microsecond:
return ret[:15]
return ret
def _deserialize(self, value, attr, data):
"""Deserialize an ISO8601-formatted time to a :class:`datetime.time` object."""
if not value: # falsy values are invalid
self.fail('invalid')
raise err
try:
return utils.from_iso_time(value)
except (AttributeError, TypeError, ValueError):
self.fail('invalid')
class Date(Field):
"""ISO8601-formatted date string.
:param kwargs: The same keyword arguments that :class:`Field` receives.
"""
default_error_messages = {
'invalid': 'Not a valid date.',
'format': '"{input}" cannot be formatted as a date.',
}
def _serialize(self, value, attr, obj):
if value is None:
return None
try:
return value.isoformat()
except AttributeError:
self.fail('format', input=value)
return value
def _deserialize(self, value, attr, data):
"""Deserialize an ISO8601-formatted date string to a
:class:`datetime.date` object.
"""
if not value: # falsy values are invalid
self.fail('invalid')
try:
return utils.from_iso_date(value)
except (AttributeError, TypeError, ValueError):
self.fail('invalid')
class TimeDelta(Field):
"""A field that (de)serializes a :class:`datetime.timedelta` object to an
integer and vice versa. The integer can represent the number of days,
seconds or microseconds.
:param str precision: Influences how the integer is interpreted during
(de)serialization. Must be 'days', 'seconds' or 'microseconds'.
:param str error: Error message stored upon validation failure.
:param kwargs: The same keyword arguments that :class:`Field` receives.
.. versionchanged:: 2.0.0
Always serializes to an integer value to avoid rounding errors.
Add `precision` parameter.
"""
DAYS = 'days'
SECONDS = 'seconds'
MICROSECONDS = 'microseconds'
default_error_messages = {
'invalid': 'Not a valid period of time.',
'format': '{input!r} cannot be formatted as a timedelta.'
}
def __init__(self, precision='seconds', error=None, **kwargs):
precision = precision.lower()
units = (self.DAYS, self.SECONDS, self.MICROSECONDS)
if precision not in units:
msg = 'The precision must be "{0}", "{1}" or "{2}".'.format(*units)
raise ValueError(msg)
self.precision = precision
super(TimeDelta, self).__init__(error=error, **kwargs)
def _serialize(self, value, attr, obj):
if value is None:
return None
try:
days = value.days
if self.precision == self.DAYS:
return days
else:
seconds = days * 86400 + value.seconds
if self.precision == self.SECONDS:
return seconds
else: # microseconds
return seconds * 10**6 + value.microseconds # flake8: noqa
except AttributeError:
self.fail('format', input=value)
def _deserialize(self, value, attr, data):
try:
value = int(value)
except (TypeError, ValueError):
self.fail('invalid')
kwargs = {self.precision: value}
try:
return dt.timedelta(**kwargs)
except OverflowError:
self.fail('invalid')
class Dict(Field):
"""A dict field. Supports dicts and dict-like objects.
.. note::
This field is only appropriate when the structure of
nested data is not known. For structured data, use
`Nested`.
.. versionadded:: 2.1.0
"""
default_error_messages = {
'invalid': 'Not a valid mapping type.'
}
def _deserialize(self, value, attr, data):
if isinstance(value, collections.Mapping):
return value
else:
self.fail('invalid')
class ValidatedField(Field):
"""A field that validates input on serialization."""
def _validated(self, value):
raise NotImplementedError('Must implement _validate method')
def _serialize(self, value, *args, **kwargs):
ret = super(ValidatedField, self)._serialize(value, *args, **kwargs)
return self._validated(ret)
class Url(ValidatedField, String):
"""A validated URL field. Validation occurs during both serialization and
deserialization.
:param default: Default value for the field if the attribute is not set.
:param str attribute: The name of the attribute to get the value from. If
`None`, assumes the attribute has the same name as the field.
:param bool relative: Allow relative URLs.
:param kwargs: The same keyword arguments that :class:`String` receives.
"""
default_error_messages = {'invalid': 'Not a valid URL.'}
def __init__(self, relative=False, **kwargs):
String.__init__(self, **kwargs)
self.relative = relative
# Insert validation into self.validators so that multiple errors can be
# stored.
self.validators.insert(0, validate.URL(
relative=self.relative,
error=self.error_messages['invalid']
))
def _validated(self, value):
if value is None:
return None
return validate.URL(
relative=self.relative,
error=self.error_messages['invalid']
)(value)
class Email(ValidatedField, String):
"""A validated email field. Validation occurs during both serialization and
deserialization.
:param args: The same positional arguments that :class:`String` receives.
:param kwargs: The same keyword arguments that :class:`String` receives.
"""
default_error_messages = {'invalid': 'Not a valid email address.'}
def __init__(self, *args, **kwargs):
String.__init__(self, *args, **kwargs)
# Insert validation into self.validators so that multiple errors can be
# stored.
self.validators.insert(0, validate.Email(error=self.error_messages['invalid']))
def _validated(self, value):
if value is None:
return None
return validate.Email(
error=self.error_messages['invalid']
)(value)
class Method(Field):
"""A field that takes the value returned by a `Schema` method.
:param str method_name: The name of the Schema method from which
to retrieve the value. The method must take an argument ``obj``
(in addition to self) that is the object to be serialized.
:param str deserialize: Optional name of the Schema method for deserializing
a value The method must take a single argument ``value``, which is the
value to deserialize.
.. versionchanged:: 2.0.0
Removed optional ``context`` parameter on methods. Use ``self.context`` instead.
.. versionchanged:: 2.3.0
Deprecated ``method_name`` parameter in favor of ``serialize`` and allow
``serialize`` to not be passed at all.
"""
_CHECK_ATTRIBUTE = False
def __init__(self, serialize=None, deserialize=None, method_name=None, **kwargs):
if method_name is not None:
warnings.warn('"method_name" argument of fields.Method is deprecated. '
'Use the "serialize" argument instead.', DeprecationWarning)
self.serialize_method_name = self.method_name = serialize or method_name
self.deserialize_method_name = deserialize
super(Method, self).__init__(**kwargs)
def _serialize(self, value, attr, obj):
if not self.serialize_method_name:
return missing_
method = utils.callable_or_raise(
getattr(self.parent, self.serialize_method_name, None)
)
try:
return method(obj)
except AttributeError:
pass
return missing_
def _deserialize(self, value, attr, data):
if self.deserialize_method_name:
try:
method = utils.callable_or_raise(
getattr(self.parent, self.deserialize_method_name, None)
)
return method(value)
except AttributeError:
pass
return value
class Function(Field):
"""A field that takes the value returned by a function.
:param callable serialize: A callable from which to retrieve the value.
The function must take a single argument ``obj`` which is the object
to be serialized. It can also optionally take a ``context`` argument,
which is a dictionary of context variables passed to the serializer.
If no callable is provided then the ```load_only``` flag will be set
to True.
:param callable deserialize: A callable from which to retrieve the value.
The function must take a single argument ``value`` which is the value
to be deserialized. It can also optionally take a ``context`` argument,
which is a dictionary of context variables passed to the deserializer.
If no callable is provided then ```value``` will be passed through
unchanged.
:param callable func: This argument is to be deprecated. It exists for
backwards compatiblity. Use serialize instead.
.. versionchanged:: 2.3.0
Deprecated ``func`` parameter in favor of ``serialize``.
"""
_CHECK_ATTRIBUTE = False
def __init__(self, serialize=None, deserialize=None, func=None, **kwargs):
if func:
warnings.warn('"func" argument of fields.Function is deprecated. '
'Use the "serialize" argument instead.', DeprecationWarning)
serialize = func
super(Function, self).__init__(**kwargs)
self.serialize_func = self.func = serialize and utils.callable_or_raise(serialize)
self.deserialize_func = deserialize and utils.callable_or_raise(deserialize)
def _serialize(self, value, attr, obj):
try:
return self._call_or_raise(self.serialize_func, obj, attr)
except AttributeError: # the object is not expected to have the attribute
pass
return missing_
def _deserialize(self, value, attr, data):
if self.deserialize_func:
return self._call_or_raise(self.deserialize_func, value, attr)
return value
def _call_or_raise(self, func, value, attr):
if len(utils.get_func_args(func)) > 1:
if self.parent.context is None:
msg = 'No context available for Function field {0!r}'.format(attr)
raise ValidationError(msg)
return func(value, self.parent.context)
else:
return func(value)
class Constant(Field):
"""A field that (de)serializes to a preset constant. If you only want the
constant added for serialization or deserialization, you should use
``dump_only=True`` or ``load_only=True`` respectively.
:param constant: The constant to return for the field attribute.
.. versionadded:: 2.0.0
"""
_CHECK_ATTRIBUTE = False
def __init__(self, constant, **kwargs):
super(Constant, self).__init__(**kwargs)
self.constant = constant
self.missing = constant
self.default = constant
def _serialize(self, value, *args, **kwargs):
return self.constant
def _deserialize(self, value, *args, **kwargs):
return self.constant
# Aliases
URL = Url
Str = String
Bool = Boolean
Int = Integer
|
|
from pprint import pprint
import hashlib
import datetime
from dartsense import db, List_C
import dartsense.event
import dartsense.player
class Match:
def __init__(
self,
id=None,
player_1=None,
player_2=None,
event=None,
date=datetime.date.today()
):
self._id = id
self._player_1_id = None
self._player_2_id = None
self._player_1 = None
self._player_2 = None
self.player_1 = player_1
self.player_2 = player_2
self._event_id = None
self._event = None
self.event = event
self._date = date
self.round = 0
self.type = None
self.player_1_score = 0
self.player_1_180s = 0
self.player_1_lollies = 0
self.player_1_finishes = []
self.player_2_score = 0
self.player_2_180s = 0
self.player_2_lollies = 0
self.player_2_finishes = []
if id and not (player_1 or player_2 or event):
sql = '''
SELECT
`match_id`, `event_id`, `match_start`,
`match_date_round`, `match_type`,
`player_1_id`, `player_1_id_orig`,
`player_1_score`, `player_1_180s`, `player_1_lollies`,
`player_2_id`, `player_2_id_orig`,
`player_2_score`, `player_2_180s`, `player_2_lollies`
FROM `match` m
WHERE
m.match_id = %s
LIMIT 1
'''
res = db.exec_select(sql, [id])
if(len(res) > 0):
r = res[0]
self._player_1_id = r['player_1_id']
self._player_2_id = r['player_2_id']
self._event_id = r['event_id']
self.round = r['match_date_round']
self.type = r['match_type']
self.player_1_score = r['player_1_score']
self.player_1_180s = r['player_1_180s']
self.player_1_lollies = r['player_1_lollies']
self.player_2_score = r['player_2_score']
self.player_2_180s = r['player_2_180s']
self.player_2_lollies = r['player_2_lollies']
def save(self):
if self._id:
# update
sql = '''
UPDATE `match`
SET
`event_id` = %s
, `match_start` = %s
, `match_date_round` = %s
, `match_type` = %s
, `player_1_id` = %s
, `player_1_score` = %s
, `player_1_180s` = %s
, `player_1_lollies` = %s
, `player_2_id` = %s
, `player_2_score` = %s
, `player_2_180s` = %s
, `player_2_lollies` = %s
WHERE
match_id = %s
'''
db.exec_sql(sql, [
self._event_id,
self._date,
self.round,
self.type,
self._player_1_id,
self.player_1_score,
self.player_1_180s,
self.player_1_lollies,
self._player_2_id,
self.player_2_score,
self.player_2_180s,
self.player_2_lollies,
self._id
])
else:
if not (
self._player_1_id and
self._player_2_id and
self._event_id
):
return None
# insert
sql = '''
INSERT INTO `match`
(
`event_id`, `match_start`,
`match_date_round`, `match_type`,
`player_1_id`, `player_1_id_orig`,
`player_1_score`, `player_1_180s`, `player_1_lollies`,
`player_2_id`, `player_2_id_orig`,
`player_2_score`, `player_2_180s`, `player_2_lollies`
)
VALUES
(
%s, %s,
%s, %s,
%s, %s,
%s, %s, %s,
%s, %s, %s,
%s, %s
)
'''
new_id = db.exec_insert(
sql,
[
self._event_id,
self._date,
self.round,
self.type,
self._player_1_id,
self._player_1_id,
self.player_1_score,
self.player_1_180s,
self.player_1_lollies,
self._player_2_id,
self._player_2_id,
self.player_2_score,
self.player_2_180s,
self.player_2_lollies,
]
)
self._id = new_id
return self._id
def _get_id(self):
return self._id
id = property(_get_id)
def _get_player_1(self):
return self._get_player(1)
def _get_player_2(self):
return self._get_player(2)
def _set_player_1(self, player):
return self._set_player(1, player)
def _set_player_2(self, player):
return self._set_player(2, player)
def _get_player(self, nr):
player = getattr(self, '_player_' + str(nr))
player_id = getattr(self, '_player_' + str(nr) + '_id')
if not player and player_id:
player = dartsense.player.Player(id=player_id)
return player
def _set_player(self, nr, player):
if isinstance(player, dartsense.player.Player):
setattr(self, '_player_' + str(nr), player)
setattr(self, '_player_' + str(nr) + '_id', player.id)
elif isinstance(player, int) and player > 0:
setattr(self, '_player_' + str(nr), None)
setattr(self, '_player_' + str(nr) + '_id', player)
def _get_event(self):
if not self._event and self._event_id:
self._event = dartsense.event.Event(id=self._event_id)
return self._event
def _set_event(self, event):
if isinstance(event, dartsense.event.Event):
self._event = event
self._event_id = event.id
elif isinstance(event, int) and event > 0:
self._event = None
self._event_id = event
player_1 = property(_get_player_1, _set_player_1)
player_2 = property(_get_player_2, _set_player_2)
event = property(_get_event, _set_event)
class MatchList(List_C):
def __init__(self, filters={}):
List_C.__init__(self)
self._filters = filters
def _search(self, force=False):
if force or self._elements == []:
self._elements = []
args = []
sql = """
SELECT DISTINCT
m.match_id
FROM
`match` m
LEFT JOIN event e ON e.event_id = m.event_id
WHERE
e.event_id > 0
"""
if len(self._filters) > 0:
if 'event' in self._filters:
sql += ' AND e.event_id = %s'
args.append(self._filters['event'])
else:
# a matchlist with no filters would be huge: don't list
return
res = db.exec_select(sql, args)
for r in res:
self._elements.append(Match(
id=r['match_id'],
))
|
|
##############################################################################
#
# Copyright (c) 2003 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test interface declarations against ExtensionClass-like classes.
These tests are to make sure we do something sane in the presence of
classic ExtensionClass classes and instances.
$Id: test_odd_declarations.py 88118 2008-07-08 14:54:52Z sidnei $
"""
import unittest, odd
from zope.interface import Interface, implements, implementsOnly
from zope.interface import directlyProvides, providedBy, directlyProvidedBy
from zope.interface import classImplements, classImplementsOnly, implementedBy
class I1(Interface): pass
class I2(Interface): pass
class I3(Interface): pass
class I31(I3): pass
class I4(Interface): pass
class I5(Interface): pass
class Odd(object): __metaclass__ = odd.MetaClass
class B(Odd): __implemented__ = I2
# TODO: We are going to need more magic to make classProvides work with odd
# classes. This will work in the next iteration. For now, we'll use
# a different mechanism.
# from zope.interface import classProvides
class A(Odd):
implements(I1)
class C(A, B):
implements(I31)
class Test(unittest.TestCase):
def test_ObjectSpecification(self):
c = C()
directlyProvides(c, I4)
self.assertEqual([i.getName() for i in providedBy(c)],
['I4', 'I31', 'I1', 'I2']
)
self.assertEqual([i.getName() for i in providedBy(c).flattened()],
['I4', 'I31', 'I3', 'I1', 'I2', 'Interface']
)
self.assert_(I1 in providedBy(c))
self.failIf(I3 in providedBy(c))
self.assert_(providedBy(c).extends(I3))
self.assert_(providedBy(c).extends(I31))
self.failIf(providedBy(c).extends(I5))
class COnly(A, B):
implementsOnly(I31)
class D(COnly):
implements(I5)
classImplements(D, I5)
c = D()
directlyProvides(c, I4)
self.assertEqual([i.getName() for i in providedBy(c)],
['I4', 'I5', 'I31'])
self.assertEqual([i.getName() for i in providedBy(c).flattened()],
['I4', 'I5', 'I31', 'I3', 'Interface'])
self.failIf(I1 in providedBy(c))
self.failIf(I3 in providedBy(c))
self.assert_(providedBy(c).extends(I3))
self.failIf(providedBy(c).extends(I1))
self.assert_(providedBy(c).extends(I31))
self.assert_(providedBy(c).extends(I5))
class COnly(A, B): __implemented__ = I31
class D(COnly):
implements(I5)
classImplements(D, I5)
c = D()
directlyProvides(c, I4)
self.assertEqual([i.getName() for i in providedBy(c)],
['I4', 'I5', 'I31'])
self.assertEqual([i.getName() for i in providedBy(c).flattened()],
['I4', 'I5', 'I31', 'I3', 'Interface'])
self.failIf(I1 in providedBy(c))
self.failIf(I3 in providedBy(c))
self.assert_(providedBy(c).extends(I3))
self.failIf(providedBy(c).extends(I1))
self.assert_(providedBy(c).extends(I31))
self.assert_(providedBy(c).extends(I5))
def test_classImplements(self):
class A(Odd):
implements(I3)
class B(Odd):
implements(I4)
class C(A, B):
pass
classImplements(C, I1, I2)
self.assertEqual([i.getName() for i in implementedBy(C)],
['I1', 'I2', 'I3', 'I4'])
classImplements(C, I5)
self.assertEqual([i.getName() for i in implementedBy(C)],
['I1', 'I2', 'I5', 'I3', 'I4'])
def test_classImplementsOnly(self):
class A(Odd):
implements(I3)
class B(Odd):
implements(I4)
class C(A, B):
pass
classImplementsOnly(C, I1, I2)
self.assertEqual([i.__name__ for i in implementedBy(C)],
['I1', 'I2'])
def test_directlyProvides(self):
class IA1(Interface): pass
class IA2(Interface): pass
class IB(Interface): pass
class IC(Interface): pass
class A(Odd):
implements(IA1, IA2)
class B(Odd):
implements(IB)
class C(A, B):
implements(IC)
ob = C()
directlyProvides(ob, I1, I2)
self.assert_(I1 in providedBy(ob))
self.assert_(I2 in providedBy(ob))
self.assert_(IA1 in providedBy(ob))
self.assert_(IA2 in providedBy(ob))
self.assert_(IB in providedBy(ob))
self.assert_(IC in providedBy(ob))
directlyProvides(ob, directlyProvidedBy(ob)-I2)
self.assert_(I1 in providedBy(ob))
self.failIf(I2 in providedBy(ob))
self.failIf(I2 in providedBy(ob))
directlyProvides(ob, directlyProvidedBy(ob), I2)
self.assert_(I2 in providedBy(ob))
def test_directlyProvides_fails_for_odd_class(self):
self.assertRaises(TypeError, directlyProvides, C, I5)
# see above
def TODO_test_classProvides_fails_for_odd_class(self):
try:
class A(Odd):
classProvides(I1)
except TypeError:
pass # Sucess
self.assert_(False,
"Shouldn't be able to use directlyProvides on odd class."
)
def test_implementedBy(self):
class I2(I1): pass
class C1(Odd):
implements(I2)
class C2(C1):
implements(I3)
self.assertEqual([i.getName() for i in implementedBy(C2)],
['I3', 'I2'])
def test_suite():
from zope.testing import doctest
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Test))
suite.addTest(doctest.DocTestSuite(odd))
return suite
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for constructing RNN Cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.rnn.python.ops import core_rnn_cell
from tensorflow.contrib.rnn.python.ops import core_rnn_cell_impl
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
def _get_concat_variable(name, shape, dtype, num_shards):
"""Get a sharded variable concatenated into one tensor."""
sharded_variable = _get_sharded_variable(name, shape, dtype, num_shards)
if len(sharded_variable) == 1:
return sharded_variable[0]
concat_name = name + "/concat"
concat_full_name = vs.get_variable_scope().name + "/" + concat_name + ":0"
for value in ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES):
if value.name == concat_full_name:
return value
concat_variable = array_ops.concat(sharded_variable, 0, name=concat_name)
ops.add_to_collection(ops.GraphKeys.CONCATENATED_VARIABLES,
concat_variable)
return concat_variable
def _get_sharded_variable(name, shape, dtype, num_shards):
"""Get a list of sharded variables with the given dtype."""
if num_shards > shape[0]:
raise ValueError("Too many shards: shape=%s, num_shards=%d" %
(shape, num_shards))
unit_shard_size = int(math.floor(shape[0] / num_shards))
remaining_rows = shape[0] - unit_shard_size * num_shards
shards = []
for i in range(num_shards):
current_size = unit_shard_size
if i < remaining_rows:
current_size += 1
shards.append(vs.get_variable(name + "_%d" % i, [current_size] + shape[1:],
dtype=dtype))
return shards
class CoupledInputForgetGateLSTMCell(core_rnn_cell.RNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
The default non-peephole implementation is based on:
http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
S. Hochreiter and J. Schmidhuber.
"Long Short-Term Memory". Neural Computation, 9(8):1735-1780, 1997.
The peephole implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
The coupling of input and forget gate is based on:
http://arxiv.org/pdf/1503.04069.pdf
Greff et al. "LSTM: A Search Space Odyssey"
The class uses optional peep-hole connections, and an optional projection
layer.
"""
def __init__(self, num_units, use_peepholes=False,
initializer=None, num_proj=None, proj_clip=None,
num_unit_shards=1, num_proj_shards=1,
forget_bias=1.0, state_is_tuple=False,
activation=math_ops.tanh):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
num_unit_shards: How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
num_proj_shards: How to split the projection matrix. If >1, the
projection matrix is stored across num_proj_shards.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. By default (False), they are concatenated
along the column axis. This default behavior will soon be deprecated.
activation: Activation function of the inner states.
"""
if not state_is_tuple:
logging.warn(
"%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._initializer = initializer
self._num_proj = num_proj
self._proj_clip = proj_clip
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation
if num_proj:
self._state_size = (
core_rnn_cell.LSTMStateTuple(num_units, num_proj)
if state_is_tuple else num_units + num_proj)
self._output_size = num_proj
else:
self._state_size = (
core_rnn_cell.LSTMStateTuple(num_units, num_units)
if state_is_tuple else 2 * num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def __call__(self, inputs, state, scope=None):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: if `state_is_tuple` is False, this must be a state Tensor,
`2-D, batch x state_size`. If `state_is_tuple` is True, this must be a
tuple of state Tensors, both `2-D`, with column sizes `c_state` and
`m_state`.
scope: VariableScope for the created subgraph; defaults to "LSTMCell".
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
sigmoid = math_ops.sigmoid
num_proj = self._num_units if self._num_proj is None else self._num_proj
if self._state_is_tuple:
(c_prev, m_prev) = state
else:
c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
with vs.variable_scope(scope or "coupled_input_forget_gate_lstm_cell",
initializer=self._initializer):
concat_w = _get_concat_variable(
"W", [input_size.value + num_proj, 3 * self._num_units],
dtype, self._num_unit_shards)
b = vs.get_variable(
"B",
shape=[3 * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
# j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat([inputs, m_prev], 1)
lstm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b)
j, f, o = array_ops.split(value=lstm_matrix, num_or_size_splits=3, axis=1)
# Diagonal connections
if self._use_peepholes:
w_f_diag = vs.get_variable(
"W_F_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"W_O_diag", shape=[self._num_units], dtype=dtype)
if self._use_peepholes:
f_act = sigmoid(f + self._forget_bias + w_f_diag * c_prev)
else:
f_act = sigmoid(f + self._forget_bias)
c = (f_act * c_prev + (1 - f_act) * self._activation(j))
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
concat_w_proj = _get_concat_variable(
"W_P", [self._num_units, self._num_proj],
dtype, self._num_proj_shards)
m = math_ops.matmul(m, concat_w_proj)
if self._proj_clip is not None:
# pylint: disable=invalid-unary-operand-type
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
# pylint: enable=invalid-unary-operand-type
new_state = (core_rnn_cell.LSTMStateTuple(c, m) if self._state_is_tuple else
array_ops.concat([c, m], 1))
return m, new_state
class TimeFreqLSTMCell(core_rnn_cell.RNNCell):
"""Time-Frequency Long short-term memory unit (LSTM) recurrent network cell.
This implementation is based on:
Tara N. Sainath and Bo Li
"Modeling Time-Frequency Patterns with LSTM vs. Convolutional Architectures
for LVCSR Tasks." submitted to INTERSPEECH, 2016.
It uses peep-hole connections and optional cell clipping.
"""
def __init__(self, num_units, use_peepholes=False,
cell_clip=None, initializer=None,
num_unit_shards=1, forget_bias=1.0,
feature_size=None, frequency_skip=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_unit_shards: int, How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
forget_bias: float, Biases of the forget gate are initialized by default
to 1 in order to reduce the scale of forgetting at the beginning
of the training.
feature_size: int, The size of the input feature the LSTM spans over.
frequency_skip: int, The amount the LSTM filter is shifted by in
frequency.
"""
self._num_units = num_units
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializer
self._num_unit_shards = num_unit_shards
self._forget_bias = forget_bias
self._feature_size = feature_size
self._frequency_skip = frequency_skip
self._state_size = 2 * num_units
self._output_size = num_units
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def __call__(self, inputs, state, scope=None):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: state Tensor, 2D, batch x state_size.
scope: VariableScope for the created subgraph; defaults to
"TimeFreqLSTMCell".
Returns:
A tuple containing:
- A 2D, batch x output_dim, Tensor representing the output of the LSTM
after reading "inputs" when previous state was "state".
Here output_dim is num_units.
- A 2D, batch x state_size, Tensor representing the new state of LSTM
after reading "inputs" when previous state was "state".
Raises:
ValueError: if an input_size was specified and the provided inputs have
a different dimension.
"""
sigmoid = math_ops.sigmoid
tanh = math_ops.tanh
freq_inputs = self._make_tf_features(inputs)
dtype = inputs.dtype
actual_input_size = freq_inputs[0].get_shape().as_list()[1]
with vs.variable_scope(scope or "time_freq_lstm_cell",
initializer=self._initializer): # "TimeFreqLSTMCell"
concat_w = _get_concat_variable(
"W", [actual_input_size + 2*self._num_units, 4 * self._num_units],
dtype, self._num_unit_shards)
b = vs.get_variable(
"B",
shape=[4 * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
# Diagonal connections
if self._use_peepholes:
w_f_diag = vs.get_variable(
"W_F_diag", shape=[self._num_units], dtype=dtype)
w_i_diag = vs.get_variable(
"W_I_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"W_O_diag", shape=[self._num_units], dtype=dtype)
# initialize the first freq state to be zero
m_prev_freq = array_ops.zeros([int(inputs.get_shape()[0]),
self._num_units], dtype)
for fq in range(len(freq_inputs)):
c_prev = array_ops.slice(state, [0, 2*fq*self._num_units],
[-1, self._num_units])
m_prev = array_ops.slice(state, [0, (2*fq+1)*self._num_units],
[-1, self._num_units])
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat([freq_inputs[fq], m_prev, m_prev_freq],
1)
lstm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b)
i, j, f, o = array_ops.split(
value=lstm_matrix, num_or_size_splits=4, axis=1)
if self._use_peepholes:
c = (sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev +
sigmoid(i + w_i_diag * c_prev) * tanh(j))
else:
c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) * tanh(j))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * tanh(c)
else:
m = sigmoid(o) * tanh(c)
m_prev_freq = m
if fq == 0:
state_out = array_ops.concat([c, m], 1)
m_out = m
else:
state_out = array_ops.concat([state_out, c, m], 1)
m_out = array_ops.concat([m_out, m], 1)
return m_out, state_out
def _make_tf_features(self, input_feat):
"""Make the frequency features.
Args:
input_feat: input Tensor, 2D, batch x num_units.
Returns:
A list of frequency features, with each element containing:
- A 2D, batch x output_dim, Tensor representing the time-frequency feature
for that frequency index. Here output_dim is feature_size.
Raises:
ValueError: if input_size cannot be inferred from static shape inference.
"""
input_size = input_feat.get_shape().with_rank(2)[-1].value
if input_size is None:
raise ValueError("Cannot infer input_size from static shape inference.")
num_feats = int((input_size - self._feature_size) / (
self._frequency_skip)) + 1
freq_inputs = []
for f in range(num_feats):
cur_input = array_ops.slice(input_feat, [0, f*self._frequency_skip],
[-1, self._feature_size])
freq_inputs.append(cur_input)
return freq_inputs
class GridLSTMCell(core_rnn_cell.RNNCell):
"""Grid Long short-term memory unit (LSTM) recurrent network cell.
The default is based on:
Nal Kalchbrenner, Ivo Danihelka and Alex Graves
"Grid Long Short-Term Memory," Proc. ICLR 2016.
http://arxiv.org/abs/1507.01526
When peephole connections are used, the implementation is based on:
Tara N. Sainath and Bo Li
"Modeling Time-Frequency Patterns with LSTM vs. Convolutional Architectures
for LVCSR Tasks." submitted to INTERSPEECH, 2016.
The code uses optional peephole connections, shared_weights and cell clipping.
"""
def __init__(self, num_units, use_peepholes=False,
share_time_frequency_weights=False,
cell_clip=None, initializer=None,
num_unit_shards=1, forget_bias=1.0,
feature_size=None, frequency_skip=None,
num_frequency_blocks=None,
start_freqindex_list=None,
end_freqindex_list=None,
couple_input_forget_gates=False,
state_is_tuple=False):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: (optional) bool, default False. Set True to enable
diagonal/peephole connections.
share_time_frequency_weights: (optional) bool, default False. Set True to
enable shared cell weights between time and frequency LSTMs.
cell_clip: (optional) A float value, default None, if provided the cell
state is clipped by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices, default None.
num_unit_shards: (optional) int, defualt 1, How to split the weight
matrix. If > 1,the weight matrix is stored across num_unit_shards.
forget_bias: (optional) float, default 1.0, The initial bias of the
forget gates, used to reduce the scale of forgetting at the beginning
of the training.
feature_size: (optional) int, default None, The size of the input feature
the LSTM spans over.
frequency_skip: (optional) int, default None, The amount the LSTM filter
is shifted by in frequency.
num_frequency_blocks: [required] A list of frequency blocks needed to
cover the whole input feature splitting defined by start_freqindex_list
and end_freqindex_list.
start_freqindex_list: [optional], list of ints, default None, The
starting frequency index for each frequency block.
end_freqindex_list: [optional], list of ints, default None. The ending
frequency index for each frequency block.
couple_input_forget_gates: (optional) bool, default False, Whether to
couple the input and forget gates, i.e. f_gate = 1.0 - i_gate, to reduce
model parameters and computation cost.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. By default (False), they are concatenated
along the column axis. This default behavior will soon be deprecated.
Raises:
ValueError: if the num_frequency_blocks list is not specified
"""
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._share_time_frequency_weights = share_time_frequency_weights
self._couple_input_forget_gates = couple_input_forget_gates
self._state_is_tuple = state_is_tuple
self._cell_clip = cell_clip
self._initializer = initializer
self._num_unit_shards = num_unit_shards
self._forget_bias = forget_bias
self._feature_size = feature_size
self._frequency_skip = frequency_skip
self._start_freqindex_list = start_freqindex_list
self._end_freqindex_list = end_freqindex_list
self._num_frequency_blocks = num_frequency_blocks
self._total_blocks = 0
if self._num_frequency_blocks is None:
raise ValueError("Must specify num_frequency_blocks")
for block_index in range(len(self._num_frequency_blocks)):
self._total_blocks += int(self._num_frequency_blocks[block_index])
if state_is_tuple:
state_names = ""
for block_index in range(len(self._num_frequency_blocks)):
for freq_index in range(self._num_frequency_blocks[block_index]):
name_prefix = "state_f%02d_b%02d" % (freq_index, block_index)
state_names += ("%s_c, %s_m," % (name_prefix, name_prefix))
self._state_tuple_type = collections.namedtuple(
"GridLSTMStateTuple", state_names.strip(","))
self._state_size = self._state_tuple_type(
*([num_units, num_units] * self._total_blocks))
else:
self._state_tuple_type = None
self._state_size = num_units * self._total_blocks * 2
self._output_size = num_units * self._total_blocks * 2
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
@property
def state_tuple_type(self):
return self._state_tuple_type
def __call__(self, inputs, state, scope=None):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, [batch, feature_size].
state: Tensor or tuple of Tensors, 2D, [batch, state_size], depends on the
flag self._state_is_tuple.
scope: (optional) VariableScope for the created subgraph; if None, it
defaults to "GridLSTMCell".
Returns:
A tuple containing:
- A 2D, [batch, output_dim], Tensor representing the output of the LSTM
after reading "inputs" when previous state was "state".
Here output_dim is num_units.
- A 2D, [batch, state_size], Tensor representing the new state of LSTM
after reading "inputs" when previous state was "state".
Raises:
ValueError: if an input_size was specified and the provided inputs have
a different dimension.
"""
batch_size = int(inputs.get_shape()[0])
freq_inputs = self._make_tf_features(inputs)
with vs.variable_scope(scope or "grid_lstm_cell",
initializer=self._initializer): # "GridLSTMCell"
m_out_lst = []
state_out_lst = []
for block in range(len(freq_inputs)):
m_out_lst_current, state_out_lst_current = self._compute(
freq_inputs[block], block, state, batch_size,
state_is_tuple=self._state_is_tuple)
m_out_lst.extend(m_out_lst_current)
state_out_lst.extend(state_out_lst_current)
if self._state_is_tuple:
state_out = self._state_tuple_type(*state_out_lst)
else:
state_out = array_ops.concat(state_out_lst, 1)
m_out = array_ops.concat(m_out_lst, 1)
return m_out, state_out
def _compute(self, freq_inputs, block, state, batch_size,
state_prefix="state",
state_is_tuple=True):
"""Run the actual computation of one step LSTM.
Args:
freq_inputs: list of Tensors, 2D, [batch, feature_size].
block: int, current frequency block index to process.
state: Tensor or tuple of Tensors, 2D, [batch, state_size], it depends on
the flag state_is_tuple.
batch_size: int32, batch size.
state_prefix: (optional) string, name prefix for states, defaults to
"state".
state_is_tuple: boolean, indicates whether the state is a tuple or Tensor.
Returns:
A tuple, containing:
- A list of [batch, output_dim] Tensors, representing the output of the
LSTM given the inputs and state.
- A list of [batch, state_size] Tensors, representing the LSTM state
values given the inputs and previous state.
"""
sigmoid = math_ops.sigmoid
tanh = math_ops.tanh
num_gates = 3 if self._couple_input_forget_gates else 4
dtype = freq_inputs[0].dtype
actual_input_size = freq_inputs[0].get_shape().as_list()[1]
concat_w_f = _get_concat_variable(
"W_f_%d" % block, [actual_input_size + 2 * self._num_units,
num_gates * self._num_units],
dtype, self._num_unit_shards)
b_f = vs.get_variable(
"B_f_%d" % block,
shape=[num_gates * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
if not self._share_time_frequency_weights:
concat_w_t = _get_concat_variable(
"W_t_%d" % block, [actual_input_size + 2 * self._num_units,
num_gates * self._num_units],
dtype, self._num_unit_shards)
b_t = vs.get_variable(
"B_t_%d" % block,
shape=[num_gates * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
if self._use_peepholes:
# Diagonal connections
if not self._couple_input_forget_gates:
w_f_diag_freqf = vs.get_variable(
"W_F_diag_freqf_%d" % block, shape=[self._num_units], dtype=dtype)
w_f_diag_freqt = vs.get_variable(
"W_F_diag_freqt_%d"% block, shape=[self._num_units], dtype=dtype)
w_i_diag_freqf = vs.get_variable(
"W_I_diag_freqf_%d" % block, shape=[self._num_units], dtype=dtype)
w_i_diag_freqt = vs.get_variable(
"W_I_diag_freqt_%d" % block, shape=[self._num_units], dtype=dtype)
w_o_diag_freqf = vs.get_variable(
"W_O_diag_freqf_%d" % block, shape=[self._num_units], dtype=dtype)
w_o_diag_freqt = vs.get_variable(
"W_O_diag_freqt_%d" % block, shape=[self._num_units], dtype=dtype)
if not self._share_time_frequency_weights:
if not self._couple_input_forget_gates:
w_f_diag_timef = vs.get_variable(
"W_F_diag_timef_%d" % block, shape=[self._num_units], dtype=dtype)
w_f_diag_timet = vs.get_variable(
"W_F_diag_timet_%d" % block, shape=[self._num_units], dtype=dtype)
w_i_diag_timef = vs.get_variable(
"W_I_diag_timef_%d" % block, shape=[self._num_units], dtype=dtype)
w_i_diag_timet = vs.get_variable(
"W_I_diag_timet_%d" % block, shape=[self._num_units], dtype=dtype)
w_o_diag_timef = vs.get_variable(
"W_O_diag_timef_%d" % block, shape=[self._num_units], dtype=dtype)
w_o_diag_timet = vs.get_variable(
"W_O_diag_timet_%d" % block, shape=[self._num_units], dtype=dtype)
# initialize the first freq state to be zero
m_prev_freq = array_ops.zeros([batch_size, self._num_units], dtype)
c_prev_freq = array_ops.zeros([batch_size, self._num_units], dtype)
for freq_index in range(len(freq_inputs)):
if state_is_tuple:
name_prefix = "%s_f%02d_b%02d" % (state_prefix, freq_index, block)
c_prev_time = getattr(state, name_prefix + "_c")
m_prev_time = getattr(state, name_prefix + "_m")
else:
c_prev_time = array_ops.slice(
state, [0, 2 * freq_index * self._num_units],
[-1, self._num_units])
m_prev_time = array_ops.slice(
state, [0, (2 * freq_index + 1) * self._num_units],
[-1, self._num_units])
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat(
[freq_inputs[freq_index], m_prev_time, m_prev_freq], 1)
# F-LSTM
lstm_matrix_freq = nn_ops.bias_add(math_ops.matmul(cell_inputs,
concat_w_f), b_f)
if self._couple_input_forget_gates:
i_freq, j_freq, o_freq = array_ops.split(
value=lstm_matrix_freq, num_or_size_splits=num_gates, axis=1)
f_freq = None
else:
i_freq, j_freq, f_freq, o_freq = array_ops.split(
value=lstm_matrix_freq, num_or_size_splits=num_gates, axis=1)
# T-LSTM
if self._share_time_frequency_weights:
i_time = i_freq
j_time = j_freq
f_time = f_freq
o_time = o_freq
else:
lstm_matrix_time = nn_ops.bias_add(math_ops.matmul(cell_inputs,
concat_w_t), b_t)
if self._couple_input_forget_gates:
i_time, j_time, o_time = array_ops.split(
value=lstm_matrix_time, num_or_size_splits=num_gates, axis=1)
f_time = None
else:
i_time, j_time, f_time, o_time = array_ops.split(
value=lstm_matrix_time, num_or_size_splits=num_gates, axis=1)
# F-LSTM c_freq
# input gate activations
if self._use_peepholes:
i_freq_g = sigmoid(i_freq +
w_i_diag_freqf * c_prev_freq +
w_i_diag_freqt * c_prev_time)
else:
i_freq_g = sigmoid(i_freq)
# forget gate activations
if self._couple_input_forget_gates:
f_freq_g = 1.0 - i_freq_g
else:
if self._use_peepholes:
f_freq_g = sigmoid(f_freq + self._forget_bias +
w_f_diag_freqf * c_prev_freq +
w_f_diag_freqt * c_prev_time)
else:
f_freq_g = sigmoid(f_freq + self._forget_bias)
# cell state
c_freq = f_freq_g * c_prev_freq + i_freq_g * tanh(j_freq)
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c_freq = clip_ops.clip_by_value(c_freq, -self._cell_clip,
self._cell_clip)
# pylint: enable=invalid-unary-operand-type
# T-LSTM c_freq
# input gate activations
if self._use_peepholes:
if self._share_time_frequency_weights:
i_time_g = sigmoid(i_time +
w_i_diag_freqf * c_prev_freq +
w_i_diag_freqt * c_prev_time)
else:
i_time_g = sigmoid(i_time +
w_i_diag_timef * c_prev_freq +
w_i_diag_timet * c_prev_time)
else:
i_time_g = sigmoid(i_time)
# forget gate activations
if self._couple_input_forget_gates:
f_time_g = 1.0 - i_time_g
else:
if self._use_peepholes:
if self._share_time_frequency_weights:
f_time_g = sigmoid(f_time + self._forget_bias +
w_f_diag_freqf * c_prev_freq +
w_f_diag_freqt * c_prev_time)
else:
f_time_g = sigmoid(f_time + self._forget_bias +
w_f_diag_timef * c_prev_freq +
w_f_diag_timet * c_prev_time)
else:
f_time_g = sigmoid(f_time + self._forget_bias)
# cell state
c_time = f_time_g * c_prev_time + i_time_g * tanh(j_time)
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c_time = clip_ops.clip_by_value(c_time, -self._cell_clip,
self._cell_clip)
# pylint: enable=invalid-unary-operand-type
# F-LSTM m_freq
if self._use_peepholes:
m_freq = sigmoid(o_freq +
w_o_diag_freqf * c_freq +
w_o_diag_freqt * c_time) * tanh(c_freq)
else:
m_freq = sigmoid(o_freq) * tanh(c_freq)
# T-LSTM m_time
if self._use_peepholes:
if self._share_time_frequency_weights:
m_time = sigmoid(o_time +
w_o_diag_freqf * c_freq +
w_o_diag_freqt * c_time) * tanh(c_time)
else:
m_time = sigmoid(o_time +
w_o_diag_timef * c_freq +
w_o_diag_timet * c_time) * tanh(c_time)
else:
m_time = sigmoid(o_time) * tanh(c_time)
m_prev_freq = m_freq
c_prev_freq = c_freq
# Concatenate the outputs for T-LSTM and F-LSTM for each shift
if freq_index == 0:
state_out_lst = [c_time, m_time]
m_out_lst = [m_time, m_freq]
else:
state_out_lst.extend([c_time, m_time])
m_out_lst.extend([m_time, m_freq])
return m_out_lst, state_out_lst
def _make_tf_features(self, input_feat, slice_offset=0):
"""Make the frequency features.
Args:
input_feat: input Tensor, 2D, [batch, num_units].
slice_offset: (optional) Python int, default 0, the slicing offset is only
used for the backward processing in the BidirectionalGridLSTMCell. It
specifies a different starting point instead of always 0 to enable the
forward and backward processing look at different frequency blocks.
Returns:
A list of frequency features, with each element containing:
- A 2D, [batch, output_dim], Tensor representing the time-frequency
feature for that frequency index. Here output_dim is feature_size.
Raises:
ValueError: if input_size cannot be inferred from static shape inference.
"""
input_size = input_feat.get_shape().with_rank(2)[-1].value
if input_size is None:
raise ValueError("Cannot infer input_size from static shape inference.")
if slice_offset > 0:
# Padding to the end
inputs = array_ops.pad(
input_feat, array_ops.constant([0, 0, 0, slice_offset], shape=[2, 2],
dtype=dtypes.int32),
"CONSTANT")
elif slice_offset < 0:
# Padding to the front
inputs = array_ops.pad(
input_feat, array_ops.constant([0, 0, -slice_offset, 0], shape=[2, 2],
dtype=dtypes.int32),
"CONSTANT")
slice_offset = 0
else:
inputs = input_feat
freq_inputs = []
if not self._start_freqindex_list:
if len(self._num_frequency_blocks) != 1:
raise ValueError("Length of num_frequency_blocks"
" is not 1, but instead is %d",
len(self._num_frequency_blocks))
num_feats = int((input_size - self._feature_size) / (
self._frequency_skip)) + 1
if num_feats != self._num_frequency_blocks[0]:
raise ValueError(
"Invalid num_frequency_blocks, requires %d but gets %d, please"
" check the input size and filter config are correct." % (
self._num_frequency_blocks[0], num_feats))
block_inputs = []
for f in range(num_feats):
cur_input = array_ops.slice(
inputs, [0, slice_offset + f * self._frequency_skip],
[-1, self._feature_size])
block_inputs.append(cur_input)
freq_inputs.append(block_inputs)
else:
if len(self._start_freqindex_list) != len(self._end_freqindex_list):
raise ValueError("Length of start and end freqindex_list"
" does not match %d %d",
len(self._start_freqindex_list),
len(self._end_freqindex_list))
if len(self._num_frequency_blocks) != len(self._start_freqindex_list):
raise ValueError("Length of num_frequency_blocks"
" is not equal to start_freqindex_list %d %d",
len(self._num_frequency_blocks),
len(self._start_freqindex_list))
for b in range(len(self._start_freqindex_list)):
start_index = self._start_freqindex_list[b]
end_index = self._end_freqindex_list[b]
cur_size = end_index - start_index
block_feats = int((cur_size - self._feature_size) / (
self._frequency_skip)) + 1
if block_feats != self._num_frequency_blocks[b]:
raise ValueError(
"Invalid num_frequency_blocks, requires %d but gets %d, please"
" check the input size and filter config are correct." % (
self._num_frequency_blocks[b], block_feats))
block_inputs = []
for f in range(block_feats):
cur_input = array_ops.slice(
inputs, [0, start_index + slice_offset + f *
self._frequency_skip],
[-1, self._feature_size])
block_inputs.append(cur_input)
freq_inputs.append(block_inputs)
return freq_inputs
class BidirectionalGridLSTMCell(GridLSTMCell):
"""Bidirectional GridLstm cell.
The bidirection connection is only used in the frequency direction, which
hence doesn't affect the time direction's real-time processing that is
required for online recognition systems.
The current implementation uses different weights for the two directions.
"""
def __init__(self, num_units, use_peepholes=False,
share_time_frequency_weights=False,
cell_clip=None, initializer=None,
num_unit_shards=1, forget_bias=1.0,
feature_size=None, frequency_skip=None,
num_frequency_blocks=None,
start_freqindex_list=None,
end_freqindex_list=None,
couple_input_forget_gates=False,
backward_slice_offset=0):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: (optional) bool, default False. Set True to enable
diagonal/peephole connections.
share_time_frequency_weights: (optional) bool, default False. Set True to
enable shared cell weights between time and frequency LSTMs.
cell_clip: (optional) A float value, default None, if provided the cell
state is clipped by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices, default None.
num_unit_shards: (optional) int, defualt 1, How to split the weight
matrix. If > 1,the weight matrix is stored across num_unit_shards.
forget_bias: (optional) float, default 1.0, The initial bias of the
forget gates, used to reduce the scale of forgetting at the beginning
of the training.
feature_size: (optional) int, default None, The size of the input feature
the LSTM spans over.
frequency_skip: (optional) int, default None, The amount the LSTM filter
is shifted by in frequency.
num_frequency_blocks: [required] A list of frequency blocks needed to
cover the whole input feature splitting defined by start_freqindex_list
and end_freqindex_list.
start_freqindex_list: [optional], list of ints, default None, The
starting frequency index for each frequency block.
end_freqindex_list: [optional], list of ints, default None. The ending
frequency index for each frequency block.
couple_input_forget_gates: (optional) bool, default False, Whether to
couple the input and forget gates, i.e. f_gate = 1.0 - i_gate, to reduce
model parameters and computation cost.
backward_slice_offset: (optional) int32, default 0, the starting offset to
slice the feature for backward processing.
"""
super(BidirectionalGridLSTMCell, self).__init__(
num_units, use_peepholes, share_time_frequency_weights, cell_clip,
initializer, num_unit_shards, forget_bias, feature_size, frequency_skip,
num_frequency_blocks, start_freqindex_list, end_freqindex_list,
couple_input_forget_gates=False,
state_is_tuple=True)
self._backward_slice_offset = int(backward_slice_offset)
state_names = ""
for direction in ["fwd", "bwd"]:
for block_index in range(len(self._num_frequency_blocks)):
for freq_index in range(self._num_frequency_blocks[block_index]):
name_prefix = "%s_state_f%02d_b%02d" % (direction, freq_index,
block_index)
state_names += ("%s_c, %s_m," % (name_prefix, name_prefix))
self._state_tuple_type = collections.namedtuple(
"BidirectionalGridLSTMStateTuple", state_names.strip(","))
self._state_size = self._state_tuple_type(
*([num_units, num_units] * self._total_blocks * 2))
self._output_size = 2 * num_units * self._total_blocks * 2
def __call__(self, inputs, state, scope=None):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, [batch, num_units].
state: tuple of Tensors, 2D, [batch, state_size].
scope: (optional) VariableScope for the created subgraph; if None, it
defaults to "BidirectionalGridLSTMCell".
Returns:
A tuple containing:
- A 2D, [batch, output_dim], Tensor representing the output of the LSTM
after reading "inputs" when previous state was "state".
Here output_dim is num_units.
- A 2D, [batch, state_size], Tensor representing the new state of LSTM
after reading "inputs" when previous state was "state".
Raises:
ValueError: if an input_size was specified and the provided inputs have
a different dimension.
"""
batch_size = int(inputs.get_shape()[0])
fwd_inputs = self._make_tf_features(inputs)
if self._backward_slice_offset:
bwd_inputs = self._make_tf_features(inputs, self._backward_slice_offset)
else:
bwd_inputs = fwd_inputs
# Forward processing
with vs.variable_scope(scope or "bidirectional_grid_lstm_cell",
initializer=self._initializer):
with vs.variable_scope("fwd"):
fwd_m_out_lst = []
fwd_state_out_lst = []
for block in range(len(fwd_inputs)):
fwd_m_out_lst_current, fwd_state_out_lst_current = self._compute(
fwd_inputs[block], block, state, batch_size,
state_prefix="fwd_state", state_is_tuple=True)
fwd_m_out_lst.extend(fwd_m_out_lst_current)
fwd_state_out_lst.extend(fwd_state_out_lst_current)
# Backward processing
bwd_m_out_lst = []
bwd_state_out_lst = []
with vs.variable_scope("bwd"):
for block in range(len(bwd_inputs)):
# Reverse the blocks
bwd_inputs_reverse = bwd_inputs[block][::-1]
bwd_m_out_lst_current, bwd_state_out_lst_current = self._compute(
bwd_inputs_reverse, block, state, batch_size,
state_prefix="bwd_state", state_is_tuple=True)
bwd_m_out_lst.extend(bwd_m_out_lst_current)
bwd_state_out_lst.extend(bwd_state_out_lst_current)
state_out = self._state_tuple_type(*(fwd_state_out_lst + bwd_state_out_lst))
# Outputs are always concated as it is never used separately.
m_out = array_ops.concat(fwd_m_out_lst + bwd_m_out_lst, 1)
return m_out, state_out
# pylint: disable=protected-access
_linear = core_rnn_cell_impl._linear
# pylint: enable=protected-access
class AttentionCellWrapper(core_rnn_cell.RNNCell):
"""Basic attention cell wrapper.
Implementation based on https://arxiv.org/abs/1409.0473.
"""
def __init__(self, cell, attn_length, attn_size=None, attn_vec_size=None,
input_size=None, state_is_tuple=False):
"""Create a cell with attention.
Args:
cell: an RNNCell, an attention is added to it.
attn_length: integer, the size of an attention window.
attn_size: integer, the size of an attention vector. Equal to
cell.output_size by default.
attn_vec_size: integer, the number of convolutional features calculated
on attention state and a size of the hidden layer built from
base cell state. Equal attn_size to by default.
input_size: integer, the size of a hidden linear layer,
built from inputs and attention. Derived from the input tensor
by default.
state_is_tuple: If True, accepted and returned states are n-tuples, where
`n = len(cells)`. By default (False), the states are all
concatenated along the column axis.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if cell returns a state tuple but the flag
`state_is_tuple` is `False` or if attn_length is zero or less.
"""
if not isinstance(cell, core_rnn_cell.RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if nest.is_sequence(cell.state_size) and not state_is_tuple:
raise ValueError("Cell returns tuple of states, but the flag "
"state_is_tuple is not set. State size is: %s"
% str(cell.state_size))
if attn_length <= 0:
raise ValueError("attn_length should be greater than zero, got %s"
% str(attn_length))
if not state_is_tuple:
logging.warn(
"%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if attn_size is None:
attn_size = cell.output_size
if attn_vec_size is None:
attn_vec_size = attn_size
self._state_is_tuple = state_is_tuple
self._cell = cell
self._attn_vec_size = attn_vec_size
self._input_size = input_size
self._attn_size = attn_size
self._attn_length = attn_length
@property
def state_size(self):
size = (self._cell.state_size, self._attn_size,
self._attn_size * self._attn_length)
if self._state_is_tuple:
return size
else:
return sum(list(size))
@property
def output_size(self):
return self._attn_size
def __call__(self, inputs, state, scope=None):
"""Long short-term memory cell with attention (LSTMA)."""
with vs.variable_scope(scope or "attention_cell_wrapper"):
if self._state_is_tuple:
state, attns, attn_states = state
else:
states = state
state = array_ops.slice(states, [0, 0], [-1, self._cell.state_size])
attns = array_ops.slice(
states, [0, self._cell.state_size], [-1, self._attn_size])
attn_states = array_ops.slice(
states, [0, self._cell.state_size + self._attn_size],
[-1, self._attn_size * self._attn_length])
attn_states = array_ops.reshape(attn_states,
[-1, self._attn_length, self._attn_size])
input_size = self._input_size
if input_size is None:
input_size = inputs.get_shape().as_list()[1]
inputs = _linear([inputs, attns], input_size, True)
lstm_output, new_state = self._cell(inputs, state)
if self._state_is_tuple:
new_state_cat = array_ops.concat(nest.flatten(new_state), 1)
else:
new_state_cat = new_state
new_attns, new_attn_states = self._attention(new_state_cat, attn_states)
with vs.variable_scope("attn_output_projection"):
output = _linear([lstm_output, new_attns], self._attn_size, True)
new_attn_states = array_ops.concat(
[new_attn_states, array_ops.expand_dims(output, 1)], 1)
new_attn_states = array_ops.reshape(
new_attn_states, [-1, self._attn_length * self._attn_size])
new_state = (new_state, new_attns, new_attn_states)
if not self._state_is_tuple:
new_state = array_ops.concat(list(new_state), 1)
return output, new_state
def _attention(self, query, attn_states):
conv2d = nn_ops.conv2d
reduce_sum = math_ops.reduce_sum
softmax = nn_ops.softmax
tanh = math_ops.tanh
with vs.variable_scope("attention"):
k = vs.get_variable(
"attn_w", [1, 1, self._attn_size, self._attn_vec_size])
v = vs.get_variable("attn_v", [self._attn_vec_size])
hidden = array_ops.reshape(attn_states,
[-1, self._attn_length, 1, self._attn_size])
hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME")
y = _linear(query, self._attn_vec_size, True)
y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size])
s = reduce_sum(v * tanh(hidden_features + y), [2, 3])
a = softmax(s)
d = reduce_sum(
array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2])
new_attns = array_ops.reshape(d, [-1, self._attn_size])
new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1])
return new_attns, new_attn_states
class LayerNormBasicLSTMCell(core_rnn_cell.RNNCell):
"""LSTM unit with layer normalization and recurrent dropout.
This class adds layer normalization and recurrent dropout to a
basic LSTM unit. Layer normalization implementation is based on:
https://arxiv.org/abs/1607.06450.
"Layer Normalization"
Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton
and is applied before the internal nonlinearities.
Recurrent dropout is base on:
https://arxiv.org/abs/1603.05118
"Recurrent Dropout without Memory Loss"
Stanislau Semeniuta, Aliaksei Severyn, Erhardt Barth.
"""
def __init__(self, num_units, forget_bias=1.0,
input_size=None, activation=math_ops.tanh,
layer_norm=True, norm_gain=1.0, norm_shift=0.0,
dropout_keep_prob=1.0, dropout_prob_seed=None):
"""Initializes the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
input_size: Deprecated and unused.
activation: Activation function of the inner states.
layer_norm: If `True`, layer normalization will be applied.
norm_gain: float, The layer normalization gain initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
norm_shift: float, The layer normalization shift initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
dropout_keep_prob: unit Tensor or float between 0 and 1 representing the
recurrent dropout probability value. If float and 1.0, no dropout will
be applied.
dropout_prob_seed: (optional) integer, the randomness seed.
"""
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self._num_units = num_units
self._activation = activation
self._forget_bias = forget_bias
self._keep_prob = dropout_keep_prob
self._seed = dropout_prob_seed
self._layer_norm = layer_norm
self._g = norm_gain
self._b = norm_shift
@property
def state_size(self):
return core_rnn_cell.LSTMStateTuple(self._num_units, self._num_units)
@property
def output_size(self):
return self._num_units
def _norm(self, inp, scope):
shape = inp.get_shape()[-1:]
gamma_init = init_ops.constant_initializer(self._g)
beta_init = init_ops.constant_initializer(self._b)
with vs.variable_scope(scope):
# Initialize beta and gamma for use by layer_norm.
vs.get_variable("gamma", shape=shape, initializer=gamma_init)
vs.get_variable("beta", shape=shape, initializer=beta_init)
normalized = layers.layer_norm(inp, reuse=True, scope=scope)
return normalized
def _linear(self, args):
out_size = 4 * self._num_units
proj_size = args.get_shape()[-1]
weights = vs.get_variable("weights", [proj_size, out_size])
out = math_ops.matmul(args, weights)
if not self._layer_norm:
bias = vs.get_variable("biases", [out_size])
out = nn_ops.bias_add(out, bias)
return out
def __call__(self, inputs, state, scope=None):
"""LSTM cell with layer normalization and recurrent dropout."""
with vs.variable_scope(scope or "layer_norm_basic_lstm_cell"):
c, h = state
args = array_ops.concat([inputs, h], 1)
concat = self._linear(args)
i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1)
if self._layer_norm:
i = self._norm(i, "input")
j = self._norm(j, "transform")
f = self._norm(f, "forget")
o = self._norm(o, "output")
g = self._activation(j)
if (not isinstance(self._keep_prob, float)) or self._keep_prob < 1:
g = nn_ops.dropout(g, self._keep_prob, seed=self._seed)
new_c = (c * math_ops.sigmoid(f + self._forget_bias)
+ math_ops.sigmoid(i) * g)
if self._layer_norm:
new_c = self._norm(new_c, "state")
new_h = self._activation(new_c) * math_ops.sigmoid(o)
new_state = core_rnn_cell.LSTMStateTuple(new_c, new_h)
return new_h, new_state
|
|
# plugin/plugin_base.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Testing extensions.
this module is designed to work as a testing-framework-agnostic library,
so that we can continue to support nose and also begin adding new
functionality via py.test.
"""
from __future__ import absolute_import
try:
# unitttest has a SkipTest also but pytest doesn't
# honor it unless nose is imported too...
from nose import SkipTest
except ImportError:
from _pytest.runner import Skipped as SkipTest
import sys
import re
py3k = sys.version_info >= (3, 0)
if py3k:
import configparser
else:
import ConfigParser as configparser
# late imports
fixtures = None
engines = None
exclusions = None
warnings = None
profiling = None
assertions = None
requirements = None
config = None
testing = None
util = None
file_config = None
logging = None
db_opts = {}
options = None
def setup_options(make_option):
make_option("--log-info", action="callback", type="string", callback=_log,
help="turn on info logging for <LOG> (multiple OK)")
make_option("--log-debug", action="callback",
type="string", callback=_log,
help="turn on debug logging for <LOG> (multiple OK)")
make_option("--db", action="append", type="string", dest="db",
help="Use prefab database uri. Multiple OK, "
"first one is run by default.")
make_option('--dbs', action='callback', callback=_list_dbs,
help="List available prefab dbs")
make_option("--dburi", action="append", type="string", dest="dburi",
help="Database uri. Multiple OK, "
"first one is run by default.")
make_option("--dropfirst", action="store_true", dest="dropfirst",
help="Drop all tables in the target database first")
make_option("--backend-only", action="store_true", dest="backend_only",
help="Run only tests marked with __backend__")
make_option("--mockpool", action="store_true", dest="mockpool",
help="Use mock pool (asserts only one connection used)")
make_option("--low-connections", action="store_true",
dest="low_connections",
help="Use a low number of distinct connections - "
"i.e. for Oracle TNS")
make_option("--reversetop", action="store_true",
dest="reversetop", default=False,
help="Use a random-ordering set implementation in the ORM "
"(helps reveal dependency issues)")
make_option("--requirements", action="callback", type="string",
callback=_requirements_opt,
help="requirements class for testing, overrides setup.cfg")
make_option("--with-cdecimal", action="store_true",
dest="cdecimal", default=False,
help="Monkeypatch the cdecimal library into Python 'decimal' "
"for all tests")
make_option("--serverside", action="callback",
callback=_server_side_cursors,
help="Turn on server side cursors for PG")
make_option("--mysql-engine", action="store",
dest="mysql_engine", default=None,
help="Use the specified MySQL storage engine for all tables, "
"default is a db-default/InnoDB combo.")
make_option("--tableopts", action="append", dest="tableopts", default=[],
help="Add a dialect-specific table option, key=value")
make_option("--write-profiles", action="store_true",
dest="write_profiles", default=False,
help="Write/update profiling data.")
def read_config():
global file_config
file_config = configparser.ConfigParser()
file_config.read(['setup.cfg', 'test.cfg'])
def pre_begin(opt):
"""things to set up early, before coverage might be setup."""
global options
options = opt
for fn in pre_configure:
fn(options, file_config)
def set_coverage_flag(value):
options.has_coverage = value
def post_begin():
"""things to set up later, once we know coverage is running."""
# Lazy setup of other options (post coverage)
for fn in post_configure:
fn(options, file_config)
# late imports, has to happen after config as well
# as nose plugins like coverage
global util, fixtures, engines, exclusions, \
assertions, warnings, profiling,\
config, testing
from sqlalchemy import testing
from sqlalchemy.testing import fixtures, engines, exclusions, \
assertions, warnings, profiling, config
from sqlalchemy import util
def _log(opt_str, value, parser):
global logging
if not logging:
import logging
logging.basicConfig()
if opt_str.endswith('-info'):
logging.getLogger(value).setLevel(logging.INFO)
elif opt_str.endswith('-debug'):
logging.getLogger(value).setLevel(logging.DEBUG)
def _list_dbs(*args):
print("Available --db options (use --dburi to override)")
for macro in sorted(file_config.options('db')):
print("%20s\t%s" % (macro, file_config.get('db', macro)))
sys.exit(0)
def _server_side_cursors(opt_str, value, parser):
db_opts['server_side_cursors'] = True
def _requirements_opt(opt_str, value, parser):
_setup_requirements(value)
pre_configure = []
post_configure = []
def pre(fn):
pre_configure.append(fn)
return fn
def post(fn):
post_configure.append(fn)
return fn
@pre
def _setup_options(opt, file_config):
global options
options = opt
@pre
def _monkeypatch_cdecimal(options, file_config):
if options.cdecimal:
import cdecimal
sys.modules['decimal'] = cdecimal
@post
def _engine_uri(options, file_config):
from sqlalchemy.testing import engines, config
from sqlalchemy import testing
if options.dburi:
db_urls = list(options.dburi)
else:
db_urls = []
if options.db:
for db_token in options.db:
for db in re.split(r'[,\s]+', db_token):
if db not in file_config.options('db'):
raise RuntimeError(
"Unknown URI specifier '%s'. "
"Specify --dbs for known uris."
% db)
else:
db_urls.append(file_config.get('db', db))
if not db_urls:
db_urls.append(file_config.get('db', 'default'))
for db_url in db_urls:
eng = engines.testing_engine(db_url, db_opts)
eng.connect().close()
config.Config.register(eng, db_opts, options, file_config, testing)
config.db_opts = db_opts
@post
def _engine_pool(options, file_config):
if options.mockpool:
from sqlalchemy import pool
db_opts['poolclass'] = pool.AssertionPool
@post
def _requirements(options, file_config):
requirement_cls = file_config.get('sqla_testing', "requirement_cls")
_setup_requirements(requirement_cls)
def _setup_requirements(argument):
from sqlalchemy.testing import config
from sqlalchemy import testing
if config.requirements is not None:
return
modname, clsname = argument.split(":")
# importlib.import_module() only introduced in 2.7, a little
# late
mod = __import__(modname)
for component in modname.split(".")[1:]:
mod = getattr(mod, component)
req_cls = getattr(mod, clsname)
config.requirements = testing.requires = req_cls()
@post
def _prep_testing_database(options, file_config):
from sqlalchemy.testing import config
from sqlalchemy import schema, inspect
if options.dropfirst:
for cfg in config.Config.all_configs():
e = cfg.db
inspector = inspect(e)
try:
view_names = inspector.get_view_names()
except NotImplementedError:
pass
else:
for vname in view_names:
e.execute(schema._DropView(
schema.Table(vname, schema.MetaData())
))
if config.requirements.schemas.enabled_for_config(cfg):
try:
view_names = inspector.get_view_names(
schema="test_schema")
except NotImplementedError:
pass
else:
for vname in view_names:
e.execute(schema._DropView(
schema.Table(vname, schema.MetaData(),
schema="test_schema")
))
for tname in reversed(inspector.get_table_names(
order_by="foreign_key")):
e.execute(schema.DropTable(
schema.Table(tname, schema.MetaData())
))
if config.requirements.schemas.enabled_for_config(cfg):
for tname in reversed(inspector.get_table_names(
order_by="foreign_key", schema="test_schema")):
e.execute(schema.DropTable(
schema.Table(tname, schema.MetaData(),
schema="test_schema")
))
@post
def _set_table_options(options, file_config):
from sqlalchemy.testing import schema
table_options = schema.table_options
for spec in options.tableopts:
key, value = spec.split('=')
table_options[key] = value
if options.mysql_engine:
table_options['mysql_engine'] = options.mysql_engine
@post
def _reverse_topological(options, file_config):
if options.reversetop:
from sqlalchemy.orm.util import randomize_unitofwork
randomize_unitofwork()
@post
def _post_setup_options(opt, file_config):
from sqlalchemy.testing import config
config.options = options
config.file_config = file_config
@post
def _setup_profiling(options, file_config):
from sqlalchemy.testing import profiling
profiling._profile_stats = profiling.ProfileStatsFile(
file_config.get('sqla_testing', 'profile_file'))
def want_class(cls):
if not issubclass(cls, fixtures.TestBase):
return False
elif cls.__name__.startswith('_'):
return False
elif config.options.backend_only and not getattr(cls, '__backend__',
False):
return False
else:
return True
def generate_sub_tests(cls, module):
if getattr(cls, '__backend__', False):
for cfg in _possible_configs_for_cls(cls):
name = "%s_%s_%s" % (cls.__name__, cfg.db.name, cfg.db.driver)
subcls = type(
name,
(cls, ),
{
"__only_on__": ("%s+%s" % (cfg.db.name, cfg.db.driver)),
}
)
setattr(module, name, subcls)
yield subcls
else:
yield cls
def start_test_class(cls):
_do_skips(cls)
_setup_engine(cls)
def stop_test_class(cls):
#from sqlalchemy import inspect
#assert not inspect(testing.db).get_table_names()
engines.testing_reaper._stop_test_ctx()
if not options.low_connections:
assertions.global_cleanup_assertions()
_restore_engine()
def _restore_engine():
config._current.reset(testing)
def _setup_engine(cls):
if getattr(cls, '__engine_options__', None):
eng = engines.testing_engine(options=cls.__engine_options__)
config._current.push_engine(eng, testing)
def before_test(test, test_module_name, test_class, test_name):
# like a nose id, e.g.:
# "test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause"
name = test_class.__name__
suffix = "_%s_%s" % (config.db.name, config.db.driver)
if name.endswith(suffix):
name = name[0:-(len(suffix))]
id_ = "%s.%s.%s" % (test_module_name, name, test_name)
warnings.resetwarnings()
profiling._current_test = id_
def after_test(test):
engines.testing_reaper._after_test_ctx()
warnings.resetwarnings()
def _possible_configs_for_cls(cls, reasons=None):
all_configs = set(config.Config.all_configs())
if cls.__unsupported_on__:
spec = exclusions.db_spec(*cls.__unsupported_on__)
for config_obj in list(all_configs):
if spec(config_obj):
all_configs.remove(config_obj)
if getattr(cls, '__only_on__', None):
spec = exclusions.db_spec(*util.to_list(cls.__only_on__))
for config_obj in list(all_configs):
if not spec(config_obj):
all_configs.remove(config_obj)
if hasattr(cls, '__requires__'):
requirements = config.requirements
for config_obj in list(all_configs):
for requirement in cls.__requires__:
check = getattr(requirements, requirement)
skip_reasons = check.matching_config_reasons(config_obj)
if skip_reasons:
all_configs.remove(config_obj)
if reasons is not None:
reasons.extend(skip_reasons)
break
if hasattr(cls, '__prefer_requires__'):
non_preferred = set()
requirements = config.requirements
for config_obj in list(all_configs):
for requirement in cls.__prefer_requires__:
check = getattr(requirements, requirement)
if not check.enabled_for_config(config_obj):
non_preferred.add(config_obj)
if all_configs.difference(non_preferred):
all_configs.difference_update(non_preferred)
for db_spec, op, spec in getattr(cls, '__excluded_on__', ()):
for config_obj in list(all_configs):
if not exclusions.skip_if(
exclusions.SpecPredicate(db_spec, op, spec)
).enabled_for_config(config_obj):
all_configs.remove(config_obj)
return all_configs
def _do_skips(cls):
reasons = []
all_configs = _possible_configs_for_cls(cls, reasons)
if getattr(cls, '__skip_if__', False):
for c in getattr(cls, '__skip_if__'):
if c():
raise SkipTest("'%s' skipped by %s" % (
cls.__name__, c.__name__)
)
if not all_configs:
if getattr(cls, '__backend__', False):
msg = "'%s' unsupported for implementation '%s'" % (
cls.__name__, cls.__only_on__)
else:
msg = "'%s' unsupported on any DB implementation %s%s" % (
cls.__name__,
", ".join(
"'%s(%s)+%s'" % (
config_obj.db.name,
".".join(
str(dig) for dig in
config_obj.db.dialect.server_version_info),
config_obj.db.driver
)
for config_obj in config.Config.all_configs()
),
", ".join(reasons)
)
raise SkipTest(msg)
elif hasattr(cls, '__prefer_backends__'):
non_preferred = set()
spec = exclusions.db_spec(*util.to_list(cls.__prefer_backends__))
for config_obj in all_configs:
if not spec(config_obj):
non_preferred.add(config_obj)
if all_configs.difference(non_preferred):
all_configs.difference_update(non_preferred)
if config._current not in all_configs:
_setup_config(all_configs.pop(), cls)
def _setup_config(config_obj, ctx):
config._current.push(config_obj, testing)
|
|
import unittest
from mock import MagicMock
from hazelcast.cp import LOCK_SERVICE
from hazelcast.errors import (
HazelcastRuntimeError,
LockOwnershipLostError,
LockAcquireLimitReachedError,
SessionExpiredError,
WaitKeyCancelledError,
IllegalMonitorStateError,
)
from hazelcast.future import ImmediateExceptionFuture, ImmediateFuture
from hazelcast.protocol import RaftGroupId
from hazelcast.proxy.cp.fenced_lock import FencedLock
from hazelcast.util import thread_id, AtomicInteger
class FencedLockMockTest(unittest.TestCase):
def setUp(self):
self.acquire_session = MagicMock()
self.release_session = MagicMock()
self.invalidate_session = MagicMock()
self.session_manager = MagicMock(
acquire_session=self.acquire_session,
release_session=self.release_session,
invalidate_session=self.invalidate_session,
)
context = MagicMock(proxy_session_manager=self.session_manager)
group_id = RaftGroupId("test", 0, 42)
self.proxy = FencedLock(
context, group_id, LOCK_SERVICE, "mylock@mygroup", "mylock"
).blocking()
def test_lock(self):
# Everything succeeds
self.prepare_acquire_session(1)
self.mock_request_lock(2)
self.assertEqual(2, self.proxy.lock())
self.assert_call_counts(1, 0, 0)
self.assert_lock_session_id(1)
def test_lock_when_acquire_session_fails(self):
# First call to acquire session fails, should not retry
self.prepare_acquire_session(-1, HazelcastRuntimeError("server_error"))
with self.assertRaises(HazelcastRuntimeError):
self.proxy.lock()
self.assert_call_counts(1, 0, 0)
self.assert_no_lock_session_id()
def test_lock_when_server_closes_old_session(self):
# Same thread issues a new lock call while holding a lock.
# Server closes session related to the first lock, should not retry
self.prepare_acquire_session(2)
self.prepare_lock_session_ids(1)
with self.assertRaises(LockOwnershipLostError):
self.proxy.lock()
self.assert_call_counts(1, 1, 0)
self.assert_no_lock_session_id()
def test_lock_when_lock_acquire_limit_reached(self):
# Lock acquire limit is reached, server returns invalid fence, should not retry
self.prepare_acquire_session(1)
self.mock_request_lock(FencedLock.INVALID_FENCE)
with self.assertRaises(LockAcquireLimitReachedError):
self.proxy.lock()
self.assert_call_counts(1, 1, 0)
self.assert_no_lock_session_id()
def test_lock_on_session_expired_error(self):
# Session expired error comes from the server on lock request, retries and gets valid fence
self.prepare_acquire_session(1)
self.mock_request_lock(2, SessionExpiredError())
self.assertEqual(2, self.proxy.lock())
self.assert_call_counts(2, 0, 1)
self.assert_lock_session_id(1)
def test_lock_on_session_expired_error_on_reentrant_lock_request(self):
# Session expired error comes from the server on second lock request,
# while holding a lock, should not retry
self.prepare_acquire_session(1)
self.prepare_lock_session_ids(1)
self.mock_request_lock(3, SessionExpiredError())
with self.assertRaises(LockOwnershipLostError):
self.proxy.lock()
self.assert_call_counts(1, 0, 1)
self.assert_no_lock_session_id()
def test_lock_on_wait_key_cancelled_error(self):
# Wait key cancelled error comes from the server, should not retry
self.prepare_acquire_session(1)
self.mock_request_lock(2, WaitKeyCancelledError())
with self.assertRaises(IllegalMonitorStateError):
self.proxy.lock()
self.assert_call_counts(1, 1, 0)
self.assert_no_lock_session_id()
def test_lock_on_unspecified_error(self):
# Server sends another error, should not retry
self.prepare_acquire_session(1)
self.mock_request_lock(-1, HazelcastRuntimeError("expected"))
with self.assertRaises(HazelcastRuntimeError):
self.proxy.lock()
self.assert_call_counts(1, 1, 0)
self.assert_no_lock_session_id()
def test_try_lock(self):
# Everything succeeds
self.prepare_acquire_session(1)
self.mock_request_try_lock(2)
self.assertEqual(2, self.proxy.try_lock())
self.assert_call_counts(1, 0, 0)
self.assert_lock_session_id(1)
def test_try_lock_when_acquire_session_fails(self):
# First call to acquire session fails, should not retry
self.prepare_acquire_session(-1, HazelcastRuntimeError("server_error"))
with self.assertRaises(HazelcastRuntimeError):
self.proxy.try_lock()
self.assert_call_counts(1, 0, 0)
self.assert_no_lock_session_id()
def test_try_lock_when_server_closes_old_session(self):
# Same thread issues a new lock call while holding a lock.
# Server closes session related to the first lock, should not retry
self.prepare_acquire_session(2)
self.prepare_lock_session_ids(1)
with self.assertRaises(LockOwnershipLostError):
self.proxy.try_lock()
self.assert_call_counts(1, 1, 0)
self.assert_no_lock_session_id()
def test_try_lock_when_lock_acquire_limit_reached(self):
# Lock acquire limit is reached, server returns invalid fence
self.prepare_acquire_session(1)
self.mock_request_try_lock(FencedLock.INVALID_FENCE)
self.assertEqual(FencedLock.INVALID_FENCE, self.proxy.try_lock())
self.assert_call_counts(1, 1, 0)
self.assert_no_lock_session_id()
def test_try_lock_on_session_expired_error(self):
# Session expired error comes from the server on lock request,
# client determines the timeout and returns invalid fence
self.prepare_acquire_session(1)
self.mock_request_try_lock(2, SessionExpiredError())
self.assertEqual(FencedLock.INVALID_FENCE, self.proxy.try_lock())
self.assert_call_counts(1, 0, 1)
self.assert_no_lock_session_id()
def test_try_lock_on_session_expired_error_when_not_timed_out(self):
# Session expired error comes from the server on lock request,
# client retries due to not reaching timeout and succeeds
self.prepare_acquire_session(1)
self.mock_request_try_lock(2, SessionExpiredError())
self.assertEqual(2, self.proxy.try_lock(100))
self.assert_call_counts(2, 0, 1)
self.assert_lock_session_id(1)
def test_try_lock_on_session_expired_error_on_reentrant_lock_request(self):
# Session expired error comes from the server on second lock request,
# while holding a lock, should not retry
self.prepare_acquire_session(1)
self.prepare_lock_session_ids(1)
self.mock_request_try_lock(3, SessionExpiredError())
with self.assertRaises(LockOwnershipLostError):
self.proxy.try_lock()
self.assert_call_counts(1, 0, 1)
self.assert_no_lock_session_id()
def test_try_lock_on_wait_key_cancelled_error(self):
# Wait key cancelled error comes from the server, invalid fence is returned
self.prepare_acquire_session(1)
self.mock_request_try_lock(2, WaitKeyCancelledError())
self.assertEqual(FencedLock.INVALID_FENCE, self.proxy.try_lock())
self.assert_call_counts(1, 1, 0)
self.assert_no_lock_session_id()
def test_try_lock_on_unspecified_error(self):
# Server sends another error, should not retry
self.prepare_acquire_session(1)
self.mock_request_try_lock(-1, HazelcastRuntimeError("expected"))
with self.assertRaises(HazelcastRuntimeError):
self.proxy.try_lock()
self.assert_call_counts(1, 1, 0)
self.assert_no_lock_session_id()
def test_unlock(self):
# Everything succeeds
self.prepare_get_session(2)
self.mock_request_unlock(True)
self.proxy.unlock()
self.assert_call_counts(0, 1, 0)
self.assert_lock_session_id(2) # Server sent true, client still holds the lock after unlock
def test_unlock_when_server_closes_old_session(self):
# Session id is different than what we store in the
# dict. The old session must be closed while we were
# holding the lock.
self.prepare_get_session(2)
self.prepare_lock_session_ids(1)
with self.assertRaises(LockOwnershipLostError):
self.proxy.unlock()
self.assert_call_counts(0, 0, 0)
self.assert_no_lock_session_id()
def test_unlock_when_there_is_no_session(self):
# No active session for the current thread.
self.prepare_get_session(-1)
with self.assertRaises(IllegalMonitorStateError):
self.proxy.unlock()
self.assert_call_counts(0, 0, 0)
self.assert_no_lock_session_id()
def test_unlock_when_client_unlocked_the_locked(self):
# After the unlock, lock is free
self.prepare_get_session(1)
self.mock_request_unlock(False)
self.proxy.unlock()
self.assert_call_counts(0, 1, 0)
self.assert_no_lock_session_id()
def test_unlock_on_session_expired_error(self):
# Server sends session expired error
self.prepare_get_session(1)
self.mock_request_unlock(None, SessionExpiredError())
with self.assertRaises(LockOwnershipLostError):
self.proxy.unlock()
self.assert_call_counts(0, 0, 1)
self.assert_no_lock_session_id()
def test_unlock_on_illegal_monitor_state_error(self):
# Lock is not held by the current thread, but client
# thinks that it holds it and sends the request.
# Server sends illegal monitor state error in response.
self.prepare_get_session(1)
self.mock_request_unlock(None, IllegalMonitorStateError())
with self.assertRaises(IllegalMonitorStateError):
self.proxy.unlock()
self.assert_call_counts(0, 0, 0)
self.assert_no_lock_session_id()
def test_unlock_on_unspecified_error(self):
# Server sends an unspecified error
self.prepare_get_session(1)
self.mock_request_unlock(None, HazelcastRuntimeError())
with self.assertRaises(HazelcastRuntimeError):
self.proxy.unlock()
self.assert_call_counts(0, 0, 0)
self.assert_no_lock_session_id()
def test_is_locked(self):
# Everything succeeds, client holds the lock
self.prepare_get_session(2)
state = self.prepare_state(3, 1, 2, thread_id())
self.mock_request_get_lock_ownership_state(state)
self.assertTrue(self.proxy.is_locked())
self.assert_call_counts(0, 0, 0)
self.assert_lock_session_id(2)
def test_is_locked_when_it_is_locked_by_another_thread(self):
# Client is not holding the lock, but someone else does.
self.prepare_get_session(1)
state = self.prepare_state(3, 1, 2, thread_id() - 1)
self.mock_request_get_lock_ownership_state(state)
self.assertTrue(self.proxy.is_locked())
self.assert_call_counts(0, 0, 0)
self.assert_no_lock_session_id()
def test_is_locked_when_free(self):
# No one holds the lock
self.prepare_get_session(1)
state = self.prepare_state(FencedLock.INVALID_FENCE, 0, -1, -1)
self.mock_request_get_lock_ownership_state(state)
self.assertFalse(self.proxy.is_locked())
self.assert_call_counts(0, 0, 0)
self.assert_no_lock_session_id()
def test_is_locked_when_server_closes_old_session(self):
# Session id is different than what we store in the
# dict. The old session must be closed while we were
# holding the lock.
self.prepare_get_session(2)
self.prepare_lock_session_ids(1)
with self.assertRaises(LockOwnershipLostError):
self.proxy.is_locked()
self.assert_call_counts(0, 0, 0)
self.assert_no_lock_session_id()
def test_is_locked_when_server_returns_a_different_thread_id_for_lock_holder(self):
# Client thinks that it holds the lock, but server
# says it's not.
self.prepare_get_session(1)
self.prepare_lock_session_ids(1)
state = self.prepare_state(3, 1, 2, thread_id() - 1)
self.mock_request_get_lock_ownership_state(state)
with self.assertRaises(LockOwnershipLostError):
self.proxy.is_locked()
self.assert_call_counts(0, 0, 0)
self.assert_no_lock_session_id()
def test_is_locked_on_unspecified_error(self):
# Server sends an unspecified error
self.prepare_get_session(1)
self.mock_request_get_lock_ownership_state(None, HazelcastRuntimeError())
with self.assertRaises(HazelcastRuntimeError):
self.proxy.is_locked()
self.assert_call_counts(0, 0, 0)
self.assert_no_lock_session_id()
def test_is_locked_by_current_thread(self):
# Everything succeeds, client holds the lock
self.prepare_get_session(2)
state = self.prepare_state(3, 1, 2, thread_id())
self.mock_request_get_lock_ownership_state(state)
self.assertTrue(self.proxy.is_locked_by_current_thread())
self.assert_call_counts(0, 0, 0)
self.assert_lock_session_id(2)
def test_is_locked_by_current_thread_when_it_is_locked_by_another_thread(self):
# Client is not holding the lock, but someone else does.
self.prepare_get_session(1)
state = self.prepare_state(3, 1, 2, thread_id() - 1)
self.mock_request_get_lock_ownership_state(state)
self.assertFalse(self.proxy.is_locked_by_current_thread())
self.assert_call_counts(0, 0, 0)
self.assert_no_lock_session_id()
def test_is_locked_by_current_thread_when_free(self):
# No one holds the lock
self.prepare_get_session(1)
state = self.prepare_state(FencedLock.INVALID_FENCE, 0, -1, -1)
self.mock_request_get_lock_ownership_state(state)
self.assertFalse(self.proxy.is_locked_by_current_thread())
self.assert_call_counts(0, 0, 0)
self.assert_no_lock_session_id()
def test_is_locked_by_current_thread_when_server_closes_old_session(self):
# Session id is different than what we store in the
# dict. The old session must be closed while we were
# holding the lock.
self.prepare_get_session(2)
self.prepare_lock_session_ids(1)
with self.assertRaises(LockOwnershipLostError):
self.proxy.is_locked_by_current_thread()
self.assert_call_counts(0, 0, 0)
self.assert_no_lock_session_id()
def test_is_locked_by_current_thread_when_server_returns_a_different_thread_id_for_lock_holder(
self,
):
# Client thinks that it holds the lock, but server
# says it's not.
self.prepare_get_session(1)
self.prepare_lock_session_ids(1)
state = self.prepare_state(3, 1, 2, thread_id() - 1)
self.mock_request_get_lock_ownership_state(state)
with self.assertRaises(LockOwnershipLostError):
self.proxy.is_locked_by_current_thread()
self.assert_call_counts(0, 0, 0)
self.assert_no_lock_session_id()
def test_is_locked_by_current_thread_on_unspecified_error(self):
# Server sends an unspecified error
self.prepare_get_session(1)
self.mock_request_get_lock_ownership_state(None, HazelcastRuntimeError())
with self.assertRaises(HazelcastRuntimeError):
self.proxy.is_locked_by_current_thread()
self.assert_call_counts(0, 0, 0)
self.assert_no_lock_session_id()
def test_get_lock_count(self):
# Everything succeeds, client holds the lock
self.prepare_get_session(2)
state = self.prepare_state(3, 123, 2, thread_id())
self.mock_request_get_lock_ownership_state(state)
self.assertEqual(123, self.proxy.get_lock_count())
self.assert_call_counts(0, 0, 0)
self.assert_lock_session_id(2)
def test_get_lock_count_when_it_is_locked_by_another_thread(self):
# Client is not holding the lock, but someone else does.
self.prepare_get_session(1)
state = self.prepare_state(3, 1, 2, thread_id() - 1)
self.mock_request_get_lock_ownership_state(state)
self.assertEqual(1, self.proxy.get_lock_count())
self.assert_call_counts(0, 0, 0)
self.assert_no_lock_session_id()
def test_get_lock_count_when_free(self):
# No one holds the lock
self.prepare_get_session(1)
state = self.prepare_state(FencedLock.INVALID_FENCE, 0, -1, -1)
self.mock_request_get_lock_ownership_state(state)
self.assertEqual(0, self.proxy.get_lock_count())
self.assert_call_counts(0, 0, 0)
self.assert_no_lock_session_id()
def test_get_lock_count_when_server_closes_old_session(self):
# Session id is different than what we store in the
# dict. The old session must be closed while we were
# holding the lock.
self.prepare_get_session(2)
self.prepare_lock_session_ids(1)
with self.assertRaises(LockOwnershipLostError):
self.proxy.get_lock_count()
self.assert_call_counts(0, 0, 0)
self.assert_no_lock_session_id()
def test_get_lock_count_when_server_returns_a_different_thread_id_for_lock_holder(self):
# Client thinks that it holds the lock, but server
# says it's not.
self.prepare_get_session(1)
self.prepare_lock_session_ids(1)
state = self.prepare_state(3, 1, 2, thread_id() - 1)
self.mock_request_get_lock_ownership_state(state)
with self.assertRaises(LockOwnershipLostError):
self.proxy.get_lock_count()
self.assert_call_counts(0, 0, 0)
self.assert_no_lock_session_id()
def test_get_lock_count_on_unspecified_error(self):
# Server sends an unspecified error
self.prepare_get_session(1)
self.mock_request_get_lock_ownership_state(None, HazelcastRuntimeError())
with self.assertRaises(HazelcastRuntimeError):
self.proxy.get_lock_count()
self.assert_call_counts(0, 0, 0)
self.assert_no_lock_session_id()
def prepare_lock_session_ids(self, session_id):
self.proxy._wrapped._lock_session_ids[thread_id()] = session_id
def prepare_acquire_session(self, session_id, err=None):
if err:
val = ImmediateExceptionFuture(err)
else:
val = ImmediateFuture(session_id)
acquire_mock = MagicMock(return_value=val)
release_mock = MagicMock()
invalidate_mock = MagicMock()
self.session_manager.acquire_session = acquire_mock
self.session_manager.release_session = release_mock
self.session_manager.invalidate_session = invalidate_mock
self.acquire_session = acquire_mock
self.release_session = release_mock
self.invalidate_session = invalidate_mock
def prepare_get_session(self, session_id):
self.session_manager.get_session_id = MagicMock(return_value=session_id)
def mock_request_lock(self, fence, first_call_err=None):
self._mock_request("_request_lock", fence, first_call_err)
def mock_request_unlock(self, result, first_call_err=None):
self._mock_request("_request_unlock", result, first_call_err)
def mock_request_try_lock(self, fence, first_call_err=None):
self._mock_request("_request_try_lock", fence, first_call_err)
def mock_request_get_lock_ownership_state(self, state, first_call_err=None):
self._mock_request("_request_get_lock_ownership_state", state, first_call_err)
def _mock_request(self, method_name, result, first_call_err):
called = AtomicInteger()
def mock(*_, **__):
if called.get_and_increment() == 0 and first_call_err:
return ImmediateExceptionFuture(first_call_err)
return ImmediateFuture(result)
setattr(self.proxy._wrapped, method_name, MagicMock(side_effect=mock))
def assert_call_counts(self, acquire, release, invalidate):
self.assertEqual(acquire, self.acquire_session.call_count)
self.assertEqual(release, self.release_session.call_count)
self.assertEqual(invalidate, self.invalidate_session.call_count)
def prepare_state(self, fence, lock_count, session_id, t_id):
return {
"fence": fence,
"lock_count": lock_count,
"session_id": session_id,
"thread_id": t_id,
}
def assert_no_lock_session_id(self):
self.assertEqual(0, len(self.proxy._wrapped._lock_session_ids))
def assert_lock_session_id(self, session_id):
s_id = self.proxy._wrapped._lock_session_ids.get(thread_id(), None)
self.assertEqual(session_id, s_id)
|
|
from __future__ import absolute_import
import six
from bson import DBRef, SON
from .base import (BaseDict, BaseList, TopLevelDocumentMetaclass, get_document)
from .fields import (ReferenceField, ListField, DictField, MapField)
from .connection import get_db
from .queryset import QuerySet
from .document import Document
class DeReference(object):
def __call__(self, items, max_depth=1, instance=None, name=None):
"""
Cheaply dereferences the items to a set depth.
Also handles the convertion of complex data types.
:param items: The iterable (dict, list, queryset) to be dereferenced.
:param max_depth: The maximum depth to recurse to
:param instance: The owning instance used for tracking changes by
:class:`~mongoengine.base.ComplexBaseField`
:param name: The name of the field, used for tracking changes by
:class:`~mongoengine.base.ComplexBaseField`
:param get: A boolean determining if being called by __get__
"""
if items is None or isinstance(
items, six.string_types + (six.binary_type,)):
return items
# cheapest way to convert a queryset to a list
# list(queryset) uses a count() query to determine length
if isinstance(items, QuerySet):
items = [i for i in items]
self.max_depth = max_depth
doc_type = None
if instance and instance._fields:
doc_type = instance._fields[name].field
if isinstance(doc_type, ReferenceField):
doc_type = doc_type.document_type
if all([i.__class__ == doc_type for i in items]):
return items
self.reference_map = self._find_references(items)
self.object_map = self._fetch_objects(doc_type=doc_type)
return self._attach_objects(items, 0, instance, name)
def _find_references(self, items, depth=0):
"""
Recursively finds all db references to be dereferenced
:param items: The iterable (dict, list, queryset)
:param depth: The current depth of recursion
"""
reference_map = {}
if not items or depth >= self.max_depth:
return reference_map
# Determine the iterator to use
if isinstance(items, dict):
iterator = six.itervalues(items)
else:
iterator = items
# Recursively find dbreferences
depth += 1
for item in iterator:
if hasattr(item, '_fields'):
for field_name, field in six.iteritems(item._fields):
v = item._data.get(field_name, None)
if isinstance(v, (DBRef)):
reference_map.setdefault(field.document_type, []) \
.append(v.id)
elif isinstance(v, (dict, SON)) and '_ref' in v:
reference_map.setdefault(get_document(v['_cls']), []) \
.append(v['_ref'].id)
elif isinstance(v, (dict, list, tuple)) and \
depth <= self.max_depth:
field_cls = getattr(getattr(field, 'field', None),
'document_type', None)
references = self._find_references(v, depth)
for key, refs in six.iteritems(references):
if isinstance(
field_cls,
(Document, TopLevelDocumentMetaclass)):
key = field_cls
reference_map.setdefault(key, []).extend(refs)
elif isinstance(item, (DBRef)):
reference_map.setdefault(item.collection, []).append(item.id)
elif isinstance(item, (dict, SON)) and '_ref' in item:
reference_map.setdefault(get_document(item['_cls']), []) \
.append(item['_ref'].id)
elif isinstance(item, (dict, list, tuple)) and \
depth - 1 <= self.max_depth:
references = self._find_references(item, depth - 1)
for key, refs in six.iteritems(references):
reference_map.setdefault(key, []).extend(refs)
return reference_map
def _fetch_objects(self, doc_type=None):
"""Fetch all references and convert to their document objects
"""
object_map = {}
for col, dbrefs in six.iteritems(self.reference_map):
keys = set(object_map.keys())
refs = list({dbref for dbref in dbrefs if str(dbref) not in keys})
if hasattr(col, 'objects'): # We have a document class for the refs
references = col.objects.in_bulk(refs)
for key, doc in six.iteritems(references):
object_map[key] = doc
else: # Generic reference: use the refs data to convert to document
if doc_type and \
not isinstance(doc_type, (ListField, DictField, MapField)): # noqa
references = doc_type._get_db()[col].find(
{'_id': {'$in': refs}})
for ref in references:
doc = doc_type._from_son(ref)
object_map[doc.id] = doc
else:
references = get_db()[col].find({'_id': {'$in': refs}})
for ref in references:
if '_cls' in ref:
doc = get_document(ref["_cls"])._from_son(ref)
else:
doc = doc_type._from_son(ref)
object_map[doc.id] = doc
return object_map
def _attach_objects(self, items, depth=0, instance=None, name=None):
"""
Recursively finds all db references to be dereferenced
:param items: The iterable (dict, list, queryset)
:param depth: The current depth of recursion
:param instance: The owning instance used for tracking changes by
:class:`~mongoengine.base.ComplexBaseField`
:param name: The name of the field, used for tracking changes by
:class:`~mongoengine.base.ComplexBaseField`
"""
if not items:
if isinstance(items, (BaseDict, BaseList)):
return items
if instance:
if isinstance(items, dict):
return BaseDict(items, instance, name)
else:
return BaseList(items, instance, name)
if isinstance(items, (dict, SON)):
if '_ref' in items:
return self.object_map.get(items['_ref'].id, items)
elif '_cls' in items:
doc = get_document(items['_cls'])._from_son(items)
doc._data = self._attach_objects(doc._data, depth, doc, name)
return doc
if isinstance(items, dict):
is_list = False
iterator = six.iteritems(items)
data = {}
else:
is_list = True
iterator = enumerate(items)
data = []
depth += 1
for k, v in iterator:
if is_list:
data.append(v)
else:
data[k] = v
if k in self.object_map:
data[k] = self.object_map[k]
elif hasattr(v, '_fields'):
for field_name in six.iterkeys(v._fields):
v = data[k]._data.get(field_name, None)
if isinstance(v, (DBRef)):
data[k]._data[field_name] = self.object_map.get(v.id, v)
elif isinstance(v, (dict, SON)) and '_ref' in v:
data[k]._data[field_name] = \
self.object_map.get(v['_ref'].id, v)
elif isinstance(v, dict) and depth <= self.max_depth:
data[k]._data[field_name] = self._attach_objects(
v, depth, instance=instance, name=name)
elif isinstance(v, (list, tuple)) and \
depth <= self.max_depth:
data[k]._data[field_name] = self._attach_objects(
v, depth, instance=instance, name=name)
elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth:
data[k] = self._attach_objects(
v, depth - 1, instance=instance, name=name)
elif hasattr(v, 'id'):
data[k] = self.object_map.get(v.id, v)
if instance and name:
if is_list:
return BaseList(data, instance, name)
return BaseDict(data, instance, name)
depth += 1
return data
|
|
#!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core methods for checking EcmaScript files for common style guide violations.
"""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)',
'jacobr@google.com (Jacob Richman)')
import re
from closure_linter import checkerbase
from closure_linter import ecmametadatapass
from closure_linter import error_check
from closure_linter import errorrules
from closure_linter import errors
from closure_linter import indentation
from closure_linter import javascripttokens
from closure_linter import javascripttokenizer
from closure_linter import statetracker
from closure_linter import tokenutil
from closure_linter.common import error
from closure_linter.common import htmlutil
from closure_linter.common import lintrunner
from closure_linter.common import position
from closure_linter.common import tokens
import gflags as flags
FLAGS = flags.FLAGS
flags.DEFINE_list('custom_jsdoc_tags', '', 'Extra jsdoc tags to allow')
# TODO(robbyw): Check for extra parens on return statements
# TODO(robbyw): Check for 0px in strings
# TODO(robbyw): Ensure inline jsDoc is in {}
# TODO(robbyw): Check for valid JS types in parameter docs
# Shorthand
Context = ecmametadatapass.EcmaContext
Error = error.Error
Modes = javascripttokenizer.JavaScriptModes
Position = position.Position
Rule = error_check.Rule
Type = javascripttokens.JavaScriptTokenType
class EcmaScriptLintRules(checkerbase.LintRulesBase):
"""EmcaScript lint style checking rules.
Can be used to find common style errors in JavaScript, ActionScript and other
Ecma like scripting languages. Style checkers for Ecma scripting languages
should inherit from this style checker.
Please do not add any state to EcmaScriptLintRules or to any subclasses.
All state should be added to the StateTracker subclass used for a particular
language.
"""
# It will be initialized in constructor so the flags are initialized.
max_line_length = -1
# Static constants.
MISSING_PARAMETER_SPACE = re.compile(r',\S')
EXTRA_SPACE = re.compile('(\(\s|\s\))')
ENDS_WITH_SPACE = re.compile('\s$')
ILLEGAL_TAB = re.compile(r'\t')
# Regex used to split up complex types to check for invalid use of ? and |.
TYPE_SPLIT = re.compile(r'[,<>()]')
# Regex for form of author lines after the @author tag.
AUTHOR_SPEC = re.compile(r'(\s*)[^\s]+@[^(\s]+(\s*)\(.+\)')
# Acceptable tokens to remove for line too long testing.
LONG_LINE_IGNORE = frozenset(['*', '//', '@see'] +
['@%s' % tag for tag in statetracker.DocFlag.HAS_TYPE])
JSDOC_FLAGS_DESCRIPTION_NOT_REQUIRED = frozenset([
'@param', '@return', '@returns'])
def __init__(self):
"""Initialize this lint rule object."""
checkerbase.LintRulesBase.__init__(self)
if EcmaScriptLintRules.max_line_length == -1:
EcmaScriptLintRules.max_line_length = errorrules.GetMaxLineLength()
def Initialize(self, checker, limited_doc_checks, is_html):
"""Initialize this lint rule object before parsing a new file."""
checkerbase.LintRulesBase.Initialize(self, checker, limited_doc_checks,
is_html)
self._indentation = indentation.IndentationRules()
def HandleMissingParameterDoc(self, token, param_name):
"""Handle errors associated with a parameter missing a @param tag."""
raise TypeError('Abstract method HandleMissingParameterDoc not implemented')
def _CheckLineLength(self, last_token, state):
"""Checks whether the line is too long.
Args:
last_token: The last token in the line.
"""
# Start from the last token so that we have the flag object attached to
# and DOC_FLAG tokens.
line_number = last_token.line_number
token = last_token
# Build a representation of the string where spaces indicate potential
# line-break locations.
line = []
while token and token.line_number == line_number:
if state.IsTypeToken(token):
line.insert(0, 'x' * len(token.string))
elif token.type in (Type.IDENTIFIER, Type.NORMAL):
# Dots are acceptable places to wrap.
line.insert(0, token.string.replace('.', ' '))
else:
line.insert(0, token.string)
token = token.previous
line = ''.join(line)
line = line.rstrip('\n\r\f')
try:
length = len(unicode(line, 'utf-8'))
except:
# Unknown encoding. The line length may be wrong, as was originally the
# case for utf-8 (see bug 1735846). For now just accept the default
# length, but as we find problems we can either add test for other
# possible encodings or return without an error to protect against
# false positives at the cost of more false negatives.
length = len(line)
if length > EcmaScriptLintRules.max_line_length:
# If the line matches one of the exceptions, then it's ok.
for long_line_regexp in self.GetLongLineExceptions():
if long_line_regexp.match(last_token.line):
return
# If the line consists of only one "word", or multiple words but all
# except one are ignoreable, then it's ok.
parts = set(line.split())
# We allow two "words" (type and name) when the line contains @param
max = 1
if '@param' in parts:
max = 2
# Custom tags like @requires may have url like descriptions, so ignore
# the tag, similar to how we handle @see.
custom_tags = set(['@%s' % f for f in FLAGS.custom_jsdoc_tags])
if (len(parts.difference(self.LONG_LINE_IGNORE | custom_tags)) > max):
self._HandleError(errors.LINE_TOO_LONG,
'Line too long (%d characters).' % len(line), last_token)
def _CheckJsDocType(self, token):
"""Checks the given type for style errors.
Args:
token: The DOC_FLAG token for the flag whose type to check.
"""
flag = token.attached_object
type = flag.type
if type and type is not None and not type.isspace():
pieces = self.TYPE_SPLIT.split(type)
if len(pieces) == 1 and type.count('|') == 1 and (
type.endswith('|null') or type.startswith('null|')):
self._HandleError(errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL,
'Prefer "?Type" to "Type|null": "%s"' % type, token)
for p in pieces:
if p.count('|') and p.count('?'):
# TODO(robbyw): We should do actual parsing of JsDoc types. As is,
# this won't report an error for {number|Array.<string>?}, etc.
self._HandleError(errors.JSDOC_ILLEGAL_QUESTION_WITH_PIPE,
'JsDoc types cannot contain both "?" and "|": "%s"' % p, token)
if error_check.ShouldCheck(Rule.BRACES_AROUND_TYPE) and (
flag.type_start_token.type != Type.DOC_START_BRACE or
flag.type_end_token.type != Type.DOC_END_BRACE):
self._HandleError(errors.MISSING_BRACES_AROUND_TYPE,
'Type must always be surrounded by curly braces.', token)
def _CheckForMissingSpaceBeforeToken(self, token):
"""Checks for a missing space at the beginning of a token.
Reports a MISSING_SPACE error if the token does not begin with a space or
the previous token doesn't end with a space and the previous token is on the
same line as the token.
Args:
token: The token being checked
"""
# TODO(user): Check if too many spaces?
if (len(token.string) == len(token.string.lstrip()) and
token.previous and token.line_number == token.previous.line_number and
len(token.previous.string) - len(token.previous.string.rstrip()) == 0):
self._HandleError(
errors.MISSING_SPACE,
'Missing space before "%s"' % token.string,
token,
Position.AtBeginning())
def _ExpectSpaceBeforeOperator(self, token):
"""Returns whether a space should appear before the given operator token.
Args:
token: The operator token.
Returns:
Whether there should be a space before the token.
"""
if token.string == ',' or token.metadata.IsUnaryPostOperator():
return False
# Colons should appear in labels, object literals, the case of a switch
# statement, and ternary operator. Only want a space in the case of the
# ternary operator.
if (token.string == ':' and
token.metadata.context.type in (Context.LITERAL_ELEMENT,
Context.CASE_BLOCK,
Context.STATEMENT)):
return False
if token.metadata.IsUnaryOperator() and token.IsFirstInLine():
return False
return True
def CheckToken(self, token, state):
"""Checks a token, given the current parser_state, for warnings and errors.
Args:
token: The current token under consideration
state: parser_state object that indicates the current state in the page
"""
# Store some convenience variables
first_in_line = token.IsFirstInLine()
last_in_line = token.IsLastInLine()
last_non_space_token = state.GetLastNonSpaceToken()
type = token.type
# Process the line change.
if not self._is_html and error_check.ShouldCheck(Rule.INDENTATION):
# TODO(robbyw): Support checking indentation in HTML files.
indentation_errors = self._indentation.CheckToken(token, state)
for indentation_error in indentation_errors:
self._HandleError(*indentation_error)
if last_in_line:
self._CheckLineLength(token, state)
if type == Type.PARAMETERS:
# Find missing spaces in parameter lists.
if self.MISSING_PARAMETER_SPACE.search(token.string):
self._HandleError(errors.MISSING_SPACE, 'Missing space after ","',
token)
# Find extra spaces at the beginning of parameter lists. Make sure
# we aren't at the beginning of a continuing multi-line list.
if not first_in_line:
space_count = len(token.string) - len(token.string.lstrip())
if space_count:
self._HandleError(errors.EXTRA_SPACE, 'Extra space after "("',
token, Position(0, space_count))
elif (type == Type.START_BLOCK and
token.metadata.context.type == Context.BLOCK):
self._CheckForMissingSpaceBeforeToken(token)
elif type == Type.END_BLOCK:
# This check is for object literal end block tokens, but there is no need
# to test that condition since a comma at the end of any other kind of
# block is undoubtedly a parse error.
last_code = token.metadata.last_code
if last_code.IsOperator(','):
self._HandleError(errors.COMMA_AT_END_OF_LITERAL,
'Illegal comma at end of object literal', last_code,
Position.All(last_code.string))
if state.InFunction() and state.IsFunctionClose():
is_immediately_called = (token.next and
token.next.type == Type.START_PAREN)
if state.InTopLevelFunction():
# A semicolons should not be included at the end of a function
# declaration.
if not state.InAssignedFunction():
if not last_in_line and token.next.type == Type.SEMICOLON:
self._HandleError(errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
'Illegal semicolon after function declaration',
token.next, Position.All(token.next.string))
# A semicolon should be included at the end of a function expression
# that is not immediately called.
if state.InAssignedFunction():
if not is_immediately_called and (last_in_line or
not token.next.type == Type.SEMICOLON):
self._HandleError(errors.MISSING_SEMICOLON_AFTER_FUNCTION,
'Missing semicolon after function assigned to a variable',
token, Position.AtEnd(token.string))
if (state.InInterfaceMethod() and last_code.type != Type.START_BLOCK):
self._HandleError(errors.INTERFACE_METHOD_CANNOT_HAVE_CODE,
'Interface methods cannot contain code', last_code)
elif (state.IsBlockClose() and
token.next and token.next.type == Type.SEMICOLON):
if (not last_code.metadata.context.parent.type == Context.OBJECT_LITERAL
and not last_code.metadata.context.type == Context.OBJECT_LITERAL):
self._HandleError(errors.REDUNDANT_SEMICOLON,
'No semicolon is required to end a code block',
token.next, Position.All(token.next.string))
elif type == Type.SEMICOLON:
if token.previous and token.previous.type == Type.WHITESPACE:
self._HandleError(errors.EXTRA_SPACE, 'Extra space before ";"',
token.previous, Position.All(token.previous.string))
if token.next and token.next.line_number == token.line_number:
if token.metadata.context.type != Context.FOR_GROUP_BLOCK:
# TODO(robbyw): Error about no multi-statement lines.
pass
elif token.next.type not in (
Type.WHITESPACE, Type.SEMICOLON, Type.END_PAREN):
self._HandleError(errors.MISSING_SPACE,
'Missing space after ";" in for statement',
token.next,
Position.AtBeginning())
last_code = token.metadata.last_code
if last_code and last_code.type == Type.SEMICOLON:
# Allow a single double semi colon in for loops for cases like:
# for (;;) { }.
# NOTE(user): This is not a perfect check, and will not throw an error
# for cases like: for (var i = 0;; i < n; i++) {}, but then your code
# probably won't work either.
for_token = tokenutil.CustomSearch(last_code,
lambda token: token.type == Type.KEYWORD and token.string == 'for',
end_func=lambda token: token.type == Type.SEMICOLON,
distance=None,
reverse=True)
if not for_token:
self._HandleError(errors.REDUNDANT_SEMICOLON, 'Redundant semicolon',
token, Position.All(token.string))
elif type == Type.START_PAREN:
if token.previous and token.previous.type == Type.KEYWORD:
self._HandleError(errors.MISSING_SPACE, 'Missing space before "("',
token, Position.AtBeginning())
elif token.previous and token.previous.type == Type.WHITESPACE:
before_space = token.previous.previous
if (before_space and before_space.line_number == token.line_number and
before_space.type == Type.IDENTIFIER):
self._HandleError(errors.EXTRA_SPACE, 'Extra space before "("',
token.previous, Position.All(token.previous.string))
elif type == Type.START_BRACKET:
self._HandleStartBracket(token, last_non_space_token)
elif type in (Type.END_PAREN, Type.END_BRACKET):
# Ensure there is no space before closing parentheses, except when
# it's in a for statement with an omitted section, or when it's at the
# beginning of a line.
if (token.previous and token.previous.type == Type.WHITESPACE and
not token.previous.IsFirstInLine() and
not (last_non_space_token and last_non_space_token.line_number ==
token.line_number and
last_non_space_token.type == Type.SEMICOLON)):
self._HandleError(errors.EXTRA_SPACE, 'Extra space before "%s"' %
token.string, token.previous, Position.All(token.previous.string))
if token.type == Type.END_BRACKET:
last_code = token.metadata.last_code
if last_code.IsOperator(','):
self._HandleError(errors.COMMA_AT_END_OF_LITERAL,
'Illegal comma at end of array literal', last_code,
Position.All(last_code.string))
elif type == Type.WHITESPACE:
if self.ILLEGAL_TAB.search(token.string):
if token.IsFirstInLine():
if token.next:
self._HandleError(errors.ILLEGAL_TAB,
'Illegal tab in whitespace before "%s"' % token.next.string,
token, Position.All(token.string))
else:
self._HandleError(errors.ILLEGAL_TAB,
'Illegal tab in whitespace',
token, Position.All(token.string))
else:
self._HandleError(errors.ILLEGAL_TAB,
'Illegal tab in whitespace after "%s"' % token.previous.string,
token, Position.All(token.string))
# Check whitespace length if it's not the first token of the line and
# if it's not immediately before a comment.
if last_in_line:
# Check for extra whitespace at the end of a line.
self._HandleError(errors.EXTRA_SPACE, 'Extra space at end of line',
token, Position.All(token.string))
elif not first_in_line and not token.next.IsComment():
if token.length > 1:
self._HandleError(errors.EXTRA_SPACE, 'Extra space after "%s"' %
token.previous.string, token,
Position(1, len(token.string) - 1))
elif type == Type.OPERATOR:
last_code = token.metadata.last_code
if not self._ExpectSpaceBeforeOperator(token):
if (token.previous and token.previous.type == Type.WHITESPACE and
last_code and last_code.type in (Type.NORMAL, Type.IDENTIFIER)):
self._HandleError(errors.EXTRA_SPACE,
'Extra space before "%s"' % token.string, token.previous,
Position.All(token.previous.string))
elif (token.previous and
not token.previous.IsComment() and
token.previous.type in Type.EXPRESSION_ENDER_TYPES):
self._HandleError(errors.MISSING_SPACE,
'Missing space before "%s"' % token.string, token,
Position.AtBeginning())
# Check that binary operators are not used to start lines.
if ((not last_code or last_code.line_number != token.line_number) and
not token.metadata.IsUnaryOperator()):
self._HandleError(errors.LINE_STARTS_WITH_OPERATOR,
'Binary operator should go on previous line "%s"' % token.string,
token)
elif type == Type.DOC_FLAG:
flag = token.attached_object
if flag.flag_type == 'bug':
# TODO(robbyw): Check for exactly 1 space on the left.
string = token.next.string.lstrip()
string = string.split(' ', 1)[0]
if not string.isdigit():
self._HandleError(errors.NO_BUG_NUMBER_AFTER_BUG_TAG,
'@bug should be followed by a bug number', token)
elif flag.flag_type == 'suppress':
if flag.type is None:
# A syntactically invalid suppress tag will get tokenized as a normal
# flag, indicating an error.
self._HandleError(errors.INCORRECT_SUPPRESS_SYNTAX,
'Invalid suppress syntax: should be @suppress {errortype}. '
'Spaces matter.', token)
else:
for suppress_type in flag.type.split('|'):
if suppress_type not in state.GetDocFlag().SUPPRESS_TYPES:
self._HandleError(errors.INVALID_SUPPRESS_TYPE,
'Invalid suppression type: %s' % suppress_type,
token)
elif (error_check.ShouldCheck(Rule.WELL_FORMED_AUTHOR) and
flag.flag_type == 'author'):
# TODO(user): In non strict mode check the author tag for as much as
# it exists, though the full form checked below isn't required.
string = token.next.string
result = self.AUTHOR_SPEC.match(string)
if not result:
self._HandleError(errors.INVALID_AUTHOR_TAG_DESCRIPTION,
'Author tag line should be of the form: '
'@author foo@somewhere.com (Your Name)',
token.next)
else:
# Check spacing between email address and name. Do this before
# checking earlier spacing so positions are easier to calculate for
# autofixing.
num_spaces = len(result.group(2))
if num_spaces < 1:
self._HandleError(errors.MISSING_SPACE,
'Missing space after email address',
token.next, Position(result.start(2), 0))
elif num_spaces > 1:
self._HandleError(errors.EXTRA_SPACE,
'Extra space after email address',
token.next,
Position(result.start(2) + 1, num_spaces - 1))
# Check for extra spaces before email address. Can't be too few, if
# not at least one we wouldn't match @author tag.
num_spaces = len(result.group(1))
if num_spaces > 1:
self._HandleError(errors.EXTRA_SPACE,
'Extra space before email address',
token.next, Position(1, num_spaces - 1))
elif (flag.flag_type in state.GetDocFlag().HAS_DESCRIPTION and
not self._limited_doc_checks):
if flag.flag_type == 'param':
if flag.name is None:
self._HandleError(errors.MISSING_JSDOC_PARAM_NAME,
'Missing name in @param tag', token)
if not flag.description or flag.description is None:
flag_name = token.type
if 'name' in token.values:
flag_name = '@' + token.values['name']
if not flag_name in self.JSDOC_FLAGS_DESCRIPTION_NOT_REQUIRED:
self._HandleError(errors.MISSING_JSDOC_TAG_DESCRIPTION,
'Missing description in %s tag' % flag_name, token)
else:
self._CheckForMissingSpaceBeforeToken(flag.description_start_token)
if flag.flag_type in state.GetDocFlag().HAS_TYPE:
if flag.type_start_token is not None:
self._CheckForMissingSpaceBeforeToken(
token.attached_object.type_start_token)
if flag.type and flag.type != '' and not flag.type.isspace():
self._CheckJsDocType(token)
if type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):
if (token.values['name'] not in state.GetDocFlag().LEGAL_DOC and
token.values['name'] not in FLAGS.custom_jsdoc_tags):
self._HandleError(errors.INVALID_JSDOC_TAG,
'Invalid JsDoc tag: %s' % token.values['name'], token)
if (error_check.ShouldCheck(Rule.NO_BRACES_AROUND_INHERIT_DOC) and
token.values['name'] == 'inheritDoc' and
type == Type.DOC_INLINE_FLAG):
self._HandleError(errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC,
'Unnecessary braces around @inheritDoc',
token)
elif type == Type.SIMPLE_LVALUE:
identifier = token.values['identifier']
if ((not state.InFunction() or state.InConstructor()) and
state.InTopLevel() and not state.InObjectLiteralDescendant()):
jsdoc = state.GetDocComment()
if not state.HasDocComment(identifier):
# Only test for documentation on identifiers with .s in them to
# avoid checking things like simple variables. We don't require
# documenting assignments to .prototype itself (bug 1880803).
if (not state.InConstructor() and
identifier.find('.') != -1 and not
identifier.endswith('.prototype') and not
self._limited_doc_checks):
comment = state.GetLastComment()
if not (comment and comment.lower().count('jsdoc inherited')):
self._HandleError(errors.MISSING_MEMBER_DOCUMENTATION,
"No docs found for member '%s'" % identifier,
token);
elif jsdoc and (not state.InConstructor() or
identifier.startswith('this.')):
# We are at the top level and the function/member is documented.
if identifier.endswith('_') and not identifier.endswith('__'):
# Can have a private class which inherits documentation from a
# public superclass.
#
# @inheritDoc is deprecated in favor of using @override, and they
if (jsdoc.HasFlag('override') and not jsdoc.HasFlag('constructor')
and not ('accessControls' in jsdoc.suppressions)):
self._HandleError(errors.INVALID_OVERRIDE_PRIVATE,
'%s should not override a private member.' % identifier,
jsdoc.GetFlag('override').flag_token)
if (jsdoc.HasFlag('inheritDoc') and not jsdoc.HasFlag('constructor')
and not ('accessControls' in jsdoc.suppressions)):
self._HandleError(errors.INVALID_INHERIT_DOC_PRIVATE,
'%s should not inherit from a private member.' % identifier,
jsdoc.GetFlag('inheritDoc').flag_token)
if (not jsdoc.HasFlag('private') and
not ('underscore' in jsdoc.suppressions) and not
((jsdoc.HasFlag('inheritDoc') or jsdoc.HasFlag('override')) and
('accessControls' in jsdoc.suppressions))):
self._HandleError(errors.MISSING_PRIVATE,
'Member "%s" must have @private JsDoc.' %
identifier, token)
if jsdoc.HasFlag('private') and 'underscore' in jsdoc.suppressions:
self._HandleError(errors.UNNECESSARY_SUPPRESS,
'@suppress {underscore} is not necessary with @private',
jsdoc.suppressions['underscore'])
elif (jsdoc.HasFlag('private') and
not self.InExplicitlyTypedLanguage()):
# It is convention to hide public fields in some ECMA
# implementations from documentation using the @private tag.
self._HandleError(errors.EXTRA_PRIVATE,
'Member "%s" must not have @private JsDoc' %
identifier, token)
# These flags are only legal on localizable message definitions;
# such variables always begin with the prefix MSG_.
for f in ('desc', 'hidden', 'meaning'):
if (jsdoc.HasFlag(f)
and not identifier.startswith('MSG_')
and identifier.find('.MSG_') == -1):
self._HandleError(errors.INVALID_USE_OF_DESC_TAG,
'Member "%s" should not have @%s JsDoc' % (identifier, f),
token)
# Check for illegaly assigning live objects as prototype property values.
index = identifier.find('.prototype.')
# Ignore anything with additional .s after the prototype.
if index != -1 and identifier.find('.', index + 11) == -1:
equal_operator = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
next_code = tokenutil.SearchExcept(equal_operator, Type.NON_CODE_TYPES)
if next_code and (
next_code.type in (Type.START_BRACKET, Type.START_BLOCK) or
next_code.IsOperator('new')):
self._HandleError(errors.ILLEGAL_PROTOTYPE_MEMBER_VALUE,
'Member %s cannot have a non-primitive value' % identifier,
token)
elif type == Type.END_PARAMETERS:
# Find extra space at the end of parameter lists. We check the token
# prior to the current one when it is a closing paren.
if (token.previous and token.previous.type == Type.PARAMETERS
and self.ENDS_WITH_SPACE.search(token.previous.string)):
self._HandleError(errors.EXTRA_SPACE, 'Extra space before ")"',
token.previous)
jsdoc = state.GetDocComment()
if state.GetFunction().is_interface:
if token.previous and token.previous.type == Type.PARAMETERS:
self._HandleError(errors.INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS,
'Interface constructor cannot have parameters',
token.previous)
elif (state.InTopLevel() and jsdoc and not jsdoc.HasFlag('see')
and not jsdoc.InheritsDocumentation()
and not state.InObjectLiteralDescendant() and not
jsdoc.IsInvalidated()):
distance, edit = jsdoc.CompareParameters(state.GetParams())
if distance:
params_iter = iter(state.GetParams())
docs_iter = iter(jsdoc.ordered_params)
for op in edit:
if op == 'I':
# Insertion.
# Parsing doc comments is the same for all languages
# but some languages care about parameters that don't have
# doc comments and some languages don't care.
# Languages that don't allow variables to by typed such as
# JavaScript care but languages such as ActionScript or Java
# that allow variables to be typed don't care.
if not self._limited_doc_checks:
self.HandleMissingParameterDoc(token, params_iter.next())
elif op == 'D':
# Deletion
self._HandleError(errors.EXTRA_PARAMETER_DOCUMENTATION,
'Found docs for non-existing parameter: "%s"' %
docs_iter.next(), token)
elif op == 'S':
# Substitution
if not self._limited_doc_checks:
self._HandleError(errors.WRONG_PARAMETER_DOCUMENTATION,
'Parameter mismatch: got "%s", expected "%s"' %
(params_iter.next(), docs_iter.next()), token)
else:
# Equality - just advance the iterators
params_iter.next()
docs_iter.next()
elif type == Type.STRING_TEXT:
# If this is the first token after the start of the string, but it's at
# the end of a line, we know we have a multi-line string.
if token.previous.type in (Type.SINGLE_QUOTE_STRING_START,
Type.DOUBLE_QUOTE_STRING_START) and last_in_line:
self._HandleError(errors.MULTI_LINE_STRING,
'Multi-line strings are not allowed', token)
# This check is orthogonal to the ones above, and repeats some types, so
# it is a plain if and not an elif.
if token.type in Type.COMMENT_TYPES:
if self.ILLEGAL_TAB.search(token.string):
self._HandleError(errors.ILLEGAL_TAB,
'Illegal tab in comment "%s"' % token.string, token)
trimmed = token.string.rstrip()
if last_in_line and token.string != trimmed:
# Check for extra whitespace at the end of a line.
self._HandleError(errors.EXTRA_SPACE, 'Extra space at end of line',
token, Position(len(trimmed), len(token.string) - len(trimmed)))
# This check is also orthogonal since it is based on metadata.
if token.metadata.is_implied_semicolon:
self._HandleError(errors.MISSING_SEMICOLON,
'Missing semicolon at end of line', token)
def _HandleStartBracket(self, token, last_non_space_token):
"""Handles a token that is an open bracket.
Args:
token: The token to handle.
last_non_space_token: The last token that was not a space.
"""
if (not token.IsFirstInLine() and token.previous.type == Type.WHITESPACE and
last_non_space_token and
last_non_space_token.type in Type.EXPRESSION_ENDER_TYPES):
self._HandleError(errors.EXTRA_SPACE, 'Extra space before "["',
token.previous, Position.All(token.previous.string))
# If the [ token is the first token in a line we shouldn't complain
# about a missing space before [. This is because some Ecma script
# languages allow syntax like:
# [Annotation]
# class MyClass {...}
# So we don't want to blindly warn about missing spaces before [.
# In the the future, when rules for computing exactly how many spaces
# lines should be indented are added, then we can return errors for
# [ tokens that are improperly indented.
# For example:
# var someVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongVariableName =
# [a,b,c];
# should trigger a proper indentation warning message as [ is not indented
# by four spaces.
elif (not token.IsFirstInLine() and token.previous and
not token.previous.type in (
[Type.WHITESPACE, Type.START_PAREN, Type.START_BRACKET] +
Type.EXPRESSION_ENDER_TYPES)):
self._HandleError(errors.MISSING_SPACE, 'Missing space before "["',
token, Position.AtBeginning())
def Finalize(self, state):
last_non_space_token = state.GetLastNonSpaceToken()
# Check last line for ending with newline.
if state.GetLastLine() and not (state.GetLastLine().isspace() or
state.GetLastLine().rstrip('\n\r\f') != state.GetLastLine()):
self._HandleError(
errors.FILE_MISSING_NEWLINE,
'File does not end with new line. (%s)' % state.GetLastLine(),
last_non_space_token)
try:
self._indentation.Finalize()
except Exception, e:
self._HandleError(
errors.FILE_DOES_NOT_PARSE,
str(e),
last_non_space_token)
def GetLongLineExceptions(self):
"""Gets a list of regexps for lines which can be longer than the limit.
Returns:
A list of regexps, used as matches (rather than searches).
"""
return []
def InExplicitlyTypedLanguage(self):
"""Returns whether this ecma implementation is explicitly typed."""
return False
|
|
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from collections import defaultdict
import os
import six
import yaml
from nailgun import consts
from nailgun.errors import errors
from nailgun.expression import Expression
from nailgun.logger import logger
from nailgun import objects
from nailgun.orchestrator import deployment_serializers
from nailgun.orchestrator import tasks_templates as templates
from nailgun.settings import settings
from nailgun.utils.role_resolver import BaseRoleResolver
def get_uids_for_roles(nodes, roles):
"""Returns list of uids for nodes that matches roles
:param nodes: list of nodes
:param roles: list of roles or consts.TASK_ROLES.all
:returns: list of strings
"""
uids = set()
if roles == consts.TASK_ROLES.all:
uids.update([n.uid for n in nodes])
elif roles == consts.TASK_ROLES.master:
return [consts.MASTER_NODE_UID]
elif isinstance(roles, list):
for node in nodes:
if set(roles) & set(objects.Node.all_roles(node)):
uids.add(node.uid)
else:
logger.warn(
'Wrong roles format, `roles` should be a list or "*": %s',
roles)
return list(uids)
class LegacyRoleResolver(BaseRoleResolver):
"""The role resolver that implements legacy behaviour."""
# TODO(bgaifullin): remove this in 9.0
def __init__(self, nodes):
self.nodes = nodes
def resolve(self, roles, policy=None):
return get_uids_for_roles(self.nodes, roles)
@six.add_metaclass(abc.ABCMeta)
class DeploymentHook(object):
def should_execute(self):
"""Should be used to define conditions when task should be executed."""
return True
@abc.abstractmethod
def serialize(self):
"""Serialize task in expected by orchestrator format.
This interface should return generator, because in some cases one
external task - should serialize several tasks internally.
"""
class ExpressionBasedTask(DeploymentHook):
def __init__(self, task, cluster):
self.task = task
self.cluster = cluster
@property
def _expression_context(self):
return {'cluster': self.cluster,
'settings':
objects.Cluster.get_editable_attributes(self.cluster)}
def should_execute(self):
if 'condition' not in self.task:
return True
return Expression(
self.task['condition'], self._expression_context).evaluate()
class GenericNodeHook(ExpressionBasedTask):
"""Should be used for node serialization."""
hook_type = abc.abstractproperty
def __init__(self, task, cluster, node):
self.node = node
super(GenericNodeHook, self).__init__(task, cluster)
class PuppetHook(GenericNodeHook):
hook_type = 'puppet'
def serialize(self):
yield templates.make_puppet_task([self.node['uid']], self.task)
class StandartConfigRolesHook(ExpressionBasedTask):
"""Role hooks that serializes task based on config file only."""
def __init__(self, task, cluster, nodes, role_resolver=None):
super(StandartConfigRolesHook, self).__init__(task, cluster)
self.nodes = nodes
self.role_resolver = role_resolver or LegacyRoleResolver(nodes)
def get_uids(self):
return self.role_resolver.resolve(
self.task.get('role', self.task.get('groups'))
)
def serialize(self):
uids = self.get_uids()
if uids:
yield templates.make_generic_task(uids, self.task)
class GenericRolesHook(StandartConfigRolesHook):
identity = abc.abstractproperty
class UploadMOSRepo(GenericRolesHook):
identity = 'upload_core_repos'
def get_uids(self):
return self.role_resolver.resolve(consts.TASK_ROLES.all)
def serialize(self):
uids = self.get_uids()
operating_system = self.cluster.release.operating_system
repos = objects.Attributes.merged_attrs_values(
self.cluster.attributes)['repo_setup']['repos']
if operating_system == consts.RELEASE_OS.centos:
for repo in repos:
yield templates.make_centos_repo_task(uids, repo)
yield templates.make_yum_clean(uids)
elif operating_system == consts.RELEASE_OS.ubuntu:
# NOTE(ikalnitsky):
# We have to clear /etc/apt/sources.list, because it
# has a lot of invalid repos right after provisioning
# and that lead us to deployment failures.
yield templates.make_shell_task(uids, {
'parameters': {
'cmd': '> /etc/apt/sources.list',
'timeout': 60
}})
yield templates.make_ubuntu_apt_disable_ipv6(uids)
# NOTE(kozhukalov):
# This task is to allow installing packages from
# unauthenticated repositories.
yield templates.make_ubuntu_unauth_repos_task(uids)
for repo in repos:
yield templates.make_ubuntu_sources_task(uids, repo)
if repo.get('priority'):
# do not add preferences task to task list if we can't
# complete it (e.g. can't retrieve or parse Release file)
task = templates.make_ubuntu_preferences_task(uids, repo)
if task is not None:
yield task
yield templates.make_apt_update_task(uids)
class RsyncPuppet(GenericRolesHook):
identity = 'rsync_core_puppet'
def get_uids(self):
return self.role_resolver.resolve(consts.TASK_ROLES.all)
def serialize(self):
src_path = self.task['parameters']['src'].format(
MASTER_IP=settings.MASTER_IP,
OPENSTACK_VERSION=self.cluster.release.version)
uids = self.get_uids()
yield templates.make_sync_scripts_task(
uids, src_path, self.task['parameters']['dst'])
class GenerateKeys(GenericRolesHook):
identity = 'generate_keys'
def serialize(self):
uids = self.get_uids()
self.task['parameters']['cmd'] = self.task['parameters']['cmd'].format(
CLUSTER_ID=self.cluster.id)
yield templates.make_shell_task(uids, self.task)
class CopyKeys(GenericRolesHook):
identity = 'copy_keys'
def serialize(self):
for file_path in self.task['parameters']['files']:
file_path['src'] = file_path['src'].format(
CLUSTER_ID=self.cluster.id)
uids = self.get_uids()
yield templates.make_generic_task(
uids, self.task)
class GenerateCephKeys(GenerateKeys):
identity = 'generate_keys_ceph'
class CopyCephKeys(CopyKeys):
identity = 'copy_keys_ceph'
class GenerateHaproxyKeys(GenericRolesHook):
identity = 'generate_haproxy_keys'
def serialize(self):
uids = self.get_uids()
self.task['parameters']['cmd'] = self.task['parameters']['cmd'].format(
CLUSTER_ID=self.cluster.id,
CN_HOSTNAME=objects.Cluster.get_editable_attributes(self.cluster)
['public_ssl']['hostname']['value'])
yield templates.make_shell_task(uids, self.task)
class CopyHaproxyKeys(CopyKeys):
identity = 'copy_haproxy_keys'
class IronicUploadImages(GenericRolesHook):
identity = 'ironic_upload_images'
def serialize(self):
uids = self.get_uids()
if uids:
self.task['parameters']['cmd'] = \
self.task['parameters']['cmd'].format(
CLUSTER_ID=self.cluster.id)
yield templates.make_shell_task(uids, self.task)
class IronicCopyBootstrapKey(CopyKeys):
identity = 'ironic_copy_bootstrap_key'
class RestartRadosGW(GenericRolesHook):
identity = 'restart_radosgw'
def should_execute(self):
for node in self.nodes:
if 'ceph-osd' in node.all_roles:
return True
return False
class CreateVMsOnCompute(GenericRolesHook):
"""Hook that uploads info about all nodes in cluster."""
identity = 'generate_vms'
hook_type = 'puppet'
def __init__(self, task, cluster, nodes, role_resolver=None):
super(CreateVMsOnCompute, self).__init__(
task, cluster, [], role_resolver
)
self.vm_nodes = objects.Cluster.get_nodes_to_spawn_vms(self.cluster)
def should_execute(self):
return len(self.vm_nodes) > 0
def get_uids(self):
return [node.uid for node in self.vm_nodes]
def get_nodes(self):
return self.vm_nodes
def serialize(self):
uids = self.get_uids()
yield templates.make_puppet_task(uids, self.task)
class UploadNodesInfo(GenericRolesHook):
"""Hook that uploads info about all nodes in cluster."""
identity = 'upload_nodes_info'
def serialize(self):
q_nodes = objects.Cluster.get_nodes_not_for_deletion(self.cluster)
# task can be executed only on deployed nodes
nodes = set(q_nodes.filter_by(status=consts.NODE_STATUSES.ready))
# add nodes scheduled for deployment since they could be filtered out
# above and task must be run also on them
nodes.update(self.nodes)
uids = [n.uid for n in nodes]
# every node must have data about every other good node in cluster
serialized_nodes = self._serialize_nodes(nodes)
data = yaml.safe_dump({
'nodes': serialized_nodes,
})
path = self.task['parameters']['path']
yield templates.make_upload_task(uids, path=path, data=data)
def _serialize_nodes(self, nodes):
serializer = deployment_serializers.get_serializer_for_cluster(
self.cluster)
net_serializer = serializer.get_net_provider_serializer(self.cluster)
serialized_nodes = serializer.node_list(nodes)
serialized_nodes = net_serializer.update_nodes_net_info(
self.cluster, serialized_nodes)
return serialized_nodes
class UpdateHosts(GenericRolesHook):
"""Updates hosts info on nodes in cluster."""
identity = 'update_hosts'
def serialize(self):
q_nodes = objects.Cluster.get_nodes_not_for_deletion(self.cluster)
# task can be executed only on deployed nodes
nodes = set(q_nodes.filter_by(status=consts.NODE_STATUSES.ready))
# add nodes scheduled for deployment since they could be filtered out
# above and task must be run also on them
nodes.update(self.nodes)
uids = [n.uid for n in nodes]
yield templates.make_puppet_task(uids, self.task)
class UploadConfiguration(GenericRolesHook):
"""Hook that uploads yaml file with configuration on nodes."""
identity = 'upload_configuration'
def __init__(self, task, cluster, nodes, configs=None, role_resolver=None):
super(UploadConfiguration, self).__init__(
task, cluster, nodes, role_resolver=role_resolver
)
self.configs = configs
@staticmethod
def _merge_configs(dest, src):
"""Merge configuration parameters within groups.
Configuration uploaded to node has format:
::
--
config_group:
param_name: {'value': '<some_value>'}
Merge should happen on 2nd nesting level to merge parameters
within groups.
"""
for group, value in six.iteritems(src):
dest.setdefault(group, {}).update(value)
def serialize(self):
configs = self.configs
if configs is None:
configs = objects.OpenstackConfigCollection.find_configs_for_nodes(
self.cluster, self.nodes)
node_configs = defaultdict(lambda: defaultdict(dict))
nodes_to_update = dict((node.id, node) for node in self.nodes)
for config in configs:
if config.config_type == consts.OPENSTACK_CONFIG_TYPES.cluster:
for node_id in nodes_to_update:
node_configs[node_id]['cluster'] = config.configuration
elif config.config_type == consts.OPENSTACK_CONFIG_TYPES.role:
for node in self.nodes:
if config.node_role in node.roles:
self._merge_configs(node_configs[node.id]['role'],
config.configuration)
elif config.config_type == consts.OPENSTACK_CONFIG_TYPES.node:
if config.node_id in nodes_to_update:
fqdn = objects.Node.get_node_fqdn(
nodes_to_update[config.node_id])
node_configs[config.node_id][fqdn] = config.configuration
for node_id in node_configs:
for config_dest in node_configs[node_id]:
path = os.path.join(consts.OVERRIDE_CONFIG_BASE_PATH,
config_dest + '.yaml')
# Converts config from MutableDict to dict
# needs for further serialization to yaml
data = {
'configuration': dict(node_configs[node_id][config_dest])}
node = nodes_to_update[node_id]
yield templates.make_upload_task(
[node.uid], path=path, data=yaml.safe_dump(data))
class TaskSerializers(object):
"""Class serves as fabric for different types of task serializers."""
stage_serializers = [UploadMOSRepo, RsyncPuppet, CopyKeys, RestartRadosGW,
UploadNodesInfo, UpdateHosts, GenerateKeys,
GenerateHaproxyKeys, CopyHaproxyKeys,
GenerateCephKeys, CopyCephKeys, IronicUploadImages,
IronicCopyBootstrapKey, UploadConfiguration]
deploy_serializers = [PuppetHook, CreateVMsOnCompute]
def __init__(self, stage_serializers=None, deploy_serializers=None):
"""TaskSerializers initializer
Task serializers for stage (pre/post) are different from
serializers used for main deployment.
This should be considered as limitation of current architecture,
and will be solved in next releases.
:param stage_serializers: list of GenericRoleHook classes
:param deploy_serializers: list of GenericNodeHook classes
"""
self._stage_serializers_map = {}
self._deploy_serializers_map = {}
if stage_serializers is None:
stage_serializers = self.stage_serializers
for serializer in stage_serializers:
self.add_stage_serializer(serializer)
if deploy_serializers is None:
deploy_serializers = self.deploy_serializers
for serializer in deploy_serializers:
self.add_deploy_serializer(serializer)
def add_stage_serializer(self, serializer):
self._stage_serializers_map[serializer.identity] = serializer
def add_deploy_serializer(self, serializer):
if getattr(serializer, 'identity', None):
self._deploy_serializers_map[serializer.identity] = serializer
else:
self._deploy_serializers_map[serializer.hook_type] = serializer
def get_deploy_serializer(self, task):
if 'type' not in task:
raise errors.InvalidData('Task %s should have type', task)
if task['id'] and task['id'] in self._deploy_serializers_map:
return self._deploy_serializers_map[task['id']]
elif task['type'] in self._deploy_serializers_map:
return self._deploy_serializers_map[task['type']]
else:
# Currently we are not supporting anything except puppet as main
# deployment engine, therefore exception should be raised,
# but it should be verified by validation as well
raise errors.SerializerNotSupported(
'Serialization of type {0} is not supported. Task {1}'.format(
task['type'], task))
def get_stage_serializer(self, task):
if 'id' not in task:
raise errors.InvalidData('Task %s should have id', task)
if task['id'] in self._stage_serializers_map:
return self._stage_serializers_map[task['id']]
else:
return StandartConfigRolesHook
|
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
import subprocess
import time
from resource_management.core.logger import Logger
from resource_management.core.resources.system import Execute, File
import metron_service
# Wrap major operations and functionality in this class
class ParserCommands:
__params = None
__parser_list = None
__configured = False
def __init__(self, params):
if params is None:
raise ValueError("params argument is required for initialization")
self.__params = params
self.__parser_list = self.__get_parsers(params)
self.__configured = os.path.isfile(self.__params.parsers_configured_flag_file)
# get list of parsers
def __get_parsers(self, params):
return params.parsers.replace(' ', '').split(',')
def is_configured(self):
return self.__configured
def set_configured(self):
File(self.__params.parsers_configured_flag_file,
content="",
owner=self.__params.metron_user,
mode=0775)
def init_parsers(self):
Logger.info(
"Copying grok patterns from local directory '{0}' to HDFS '{1}'".format(self.__params.local_grok_patterns_dir,
self.__params.metron_apps_dir))
self.__params.HdfsResource(self.__params.metron_apps_dir,
type="directory",
action="create_on_execute",
owner=self.__params.metron_user,
mode=0775,
source=self.__params.local_grok_patterns_dir)
Logger.info("Done initializing parser configuration")
def get_parser_list(self):
return self.__parser_list
def setup_repo(self):
def local_repo():
Logger.info("Setting up local repo")
Execute("yum -y install createrepo")
Execute("createrepo /localrepo")
Execute("chmod -R o-w+r /localrepo")
Execute("echo \"[METRON-0.2.1BETA]\n"
"name=Metron 0.2.1BETA packages\n"
"baseurl=file:///localrepo\n"
"gpgcheck=0\n"
"enabled=1\" > /etc/yum.repos.d/local.repo")
def remote_repo():
print('Using remote repo')
yum_repo_types = {
'local': local_repo,
'remote': remote_repo
}
repo_type = self.__params.yum_repo_type
if repo_type in yum_repo_types:
yum_repo_types[repo_type]()
else:
raise ValueError("Unsupported repo type '{0}'".format(repo_type))
def init_kafka_topics(self):
Logger.info('Creating Kafka topics')
command_template = """{0}/kafka-topics.sh \
--zookeeper {1} \
--create \
--topic {2} \
--partitions {3} \
--replication-factor {4} \
--config retention.bytes={5}"""
num_partitions = 1
replication_factor = 1
retention_gigabytes = int(self.__params.metron_topic_retention)
retention_bytes = retention_gigabytes * 1024 * 1024 * 1024
Logger.info("Creating main topics for parsers")
for parser_name in self.get_parser_list():
Logger.info("Creating topic'{0}'".format(parser_name))
Execute(command_template.format(self.__params.kafka_bin_dir,
self.__params.zookeeper_quorum,
parser_name,
num_partitions,
replication_factor,
retention_bytes))
Logger.info("Creating topics for error handling")
Execute(command_template.format(self.__params.kafka_bin_dir,
self.__params.zookeeper_quorum,
"parser_invalid",
num_partitions,
replication_factor,
retention_bytes))
Execute(command_template.format(self.__params.kafka_bin_dir,
self.__params.zookeeper_quorum,
"parser_error",
num_partitions, replication_factor,
retention_bytes))
Logger.info("Done creating Kafka topics")
def start_parser_topologies(self):
Logger.info("Starting Metron parser topologies: {0}".format(self.get_parser_list()))
start_cmd_template = """{0}/bin/start_parser_topology.sh \
-k {1} \
-z {2} \
-s {3}"""
for parser in self.get_parser_list():
Logger.info('Starting ' + parser)
Execute(start_cmd_template.format(self.__params.metron_home, self.__params.kafka_brokers,
self.__params.zookeeper_quorum, parser))
Logger.info('Finished starting parser topologies')
def stop_parser_topologies(self):
Logger.info('Stopping parsers')
for parser in self.get_parser_list():
Logger.info('Stopping ' + parser)
stop_cmd = 'storm kill ' + parser
Execute(stop_cmd)
Logger.info('Done stopping parser topologies')
def restart_parser_topologies(self, env):
Logger.info('Restarting the parser topologies')
self.stop_parser_topologies()
attempt_count = 0
while self.topologies_running(env):
if attempt_count > 2:
raise Exception("Unable to kill topologies")
attempt_count += 1
time.sleep(10)
self.start_parser_topologies()
Logger.info('Done restarting the parser topologies')
def topologies_exist(self):
cmd_open = subprocess.Popen(["storm", "list"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = cmd_open.communicate()
stdout_lines = stdout.splitlines()
if stdout_lines:
status_lines = self.__get_status_lines(stdout_lines)
for parser in self.get_parser_list():
for line in status_lines:
items = re.sub('[\s]+', ' ', line).split()
if items and items[0] == parser:
return True
return False
def topologies_running(self, env):
env.set_params(self.__params)
all_running = True
topologies = metron_service.get_running_topologies()
for parser in self.get_parser_list():
parser_found = False
is_running = False
if parser in topologies:
parser_found = True
is_running = topologies[parser] in ['ACTIVE', 'REBALANCING']
all_running &= parser_found and is_running
return all_running
def __get_status_lines(self, lines):
status_lines = []
do_stat = False
skipped = 0
for line in lines:
if line.startswith("Topology_name"):
do_stat = True
if do_stat and skipped == 2:
status_lines += [line]
elif do_stat:
skipped += 1
return status_lines
def __is_running(self, status):
return status in ['ACTIVE', 'REBALANCING']
|
|
# http://djangosnippets.org/snippets/773/
import math
from django.core.paginator import \
Paginator, QuerySetPaginator, Page, InvalidPage
__all__ = (
'InvalidPage',
'ExPaginator',
'DiggPaginator',
'QuerySetDiggPaginator',
)
class ExPaginator(Paginator):
"""Adds a ``softlimit`` option to ``page()``. If True, querying a
page number larger than max. will not fail, but instead return the
last available page.
This is useful when the data source can not provide an exact count
at all times (like some search engines), meaning the user could
possibly see links to invalid pages at some point which we wouldn't
want to fail as 404s.
>>> items = range(1, 1000)
>>> paginator = ExPaginator(items, 10)
>>> paginator.page(1000)
Traceback (most recent call last):
InvalidPage: That page contains no results
>>> paginator.page(1000, softlimit=True)
<Page 100 of 100>
# [bug] graceful handling of non-int args
>>> paginator.page("str")
Traceback (most recent call last):
InvalidPage: That page number is not an integer
"""
def _ensure_int(self, num, e):
# see Django #7307
try:
return int(num)
except ValueError:
raise e
def page(self, number, softlimit=False):
try:
return super(ExPaginator, self).page(number)
except InvalidPage, e:
number = self._ensure_int(number, e)
if number > self.num_pages and softlimit:
return self.page(self.num_pages, softlimit=False)
else:
raise e
class DiggPaginator(ExPaginator):
"""
Based on Django's default paginator, it adds "Digg-style" page ranges
with a leading block of pages, an optional middle block, and another
block at the end of the page range. They are available as attributes
on the page:
{# with: page = digg_paginator.page(1) #}
{% for num in page.leading_range %} ...
{% for num in page.main_range %} ...
{% for num in page.trailing_range %} ...
Additionally, ``page_range`` contains a nun-numeric ``False`` element
for every transition between two ranges.
{% for num in page.page_range %}
{% if not num %} ... {# literally output dots #}
{% else %}{{ num }}
{% endif %}
{% endfor %}
Additional arguments passed to the constructor allow customization of
how those bocks are constructed:
body=5, tail=2
[1] 2 3 4 5 ... 91 92
|_________| |___|
body tail
|_____|
margin
body=5, tail=2, padding=2
1 2 ... 6 7 [8] 9 10 ... 91 92
|_| |__|
^padding^
|_| |__________| |___|
tail body tail
``margin`` is the minimum number of pages required between two ranges; if
there are less, they are combined into one.
When ``align_left`` is set to ``True``, the paginator operates in a
special mode that always skips the right tail, e.g. does not display the
end block unless necessary. This is useful for situations in which the
exact number of items/pages is not actually known.
# odd body length
>>> print DiggPaginator(range(1,1000), 10, body=5).page(1)
1 2 3 4 5 ... 99 100
>>> print DiggPaginator(range(1,1000), 10, body=5).page(100)
1 2 ... 96 97 98 99 100
# even body length
>>> print DiggPaginator(range(1,1000), 10, body=6).page(1)
1 2 3 4 5 6 ... 99 100
>>> print DiggPaginator(range(1,1000), 10, body=6).page(100)
1 2 ... 95 96 97 98 99 100
# leading range and main range are combined when close; note how
# we have varying body and padding values, and their effect.
>>> print DiggPaginator(range(1,1000), 10, body=5, padding=2, margin=2).page(3)
1 2 3 4 5 ... 99 100
>>> print DiggPaginator(range(1,1000), 10, body=6, padding=2, margin=2).page(4)
1 2 3 4 5 6 ... 99 100
>>> print DiggPaginator(range(1,1000), 10, body=5, padding=1, margin=2).page(6)
1 2 3 4 5 6 7 ... 99 100
>>> print DiggPaginator(range(1,1000), 10, body=5, padding=2, margin=2).page(7)
1 2 ... 5 6 7 8 9 ... 99 100
>>> print DiggPaginator(range(1,1000), 10, body=5, padding=1, margin=2).page(7)
1 2 ... 5 6 7 8 9 ... 99 100
# the trailing range works the same
>>> print DiggPaginator(range(1,1000), 10, body=5, padding=2, margin=2, ).page(98)
1 2 ... 96 97 98 99 100
>>> print DiggPaginator(range(1,1000), 10, body=6, padding=2, margin=2, ).page(97)
1 2 ... 95 96 97 98 99 100
>>> print DiggPaginator(range(1,1000), 10, body=5, padding=1, margin=2, ).page(95)
1 2 ... 94 95 96 97 98 99 100
>>> print DiggPaginator(range(1,1000), 10, body=5, padding=2, margin=2, ).page(94)
1 2 ... 92 93 94 95 96 ... 99 100
>>> print DiggPaginator(range(1,1000), 10, body=5, padding=1, margin=2, ).page(94)
1 2 ... 92 93 94 95 96 ... 99 100
# all three ranges may be combined as well
>>> print DiggPaginator(range(1,151), 10, body=6, padding=2).page(7)
1 2 3 4 5 6 7 8 9 ... 14 15
>>> print DiggPaginator(range(1,151), 10, body=6, padding=2).page(8)
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
>>> print DiggPaginator(range(1,151), 10, body=6, padding=1).page(8)
1 2 3 4 5 6 7 8 9 ... 14 15
# no leading or trailing ranges might be required if there are only
# a very small number of pages
>>> print DiggPaginator(range(1,80), 10, body=10).page(1)
1 2 3 4 5 6 7 8
>>> print DiggPaginator(range(1,80), 10, body=10).page(8)
1 2 3 4 5 6 7 8
>>> print DiggPaginator(range(1,12), 10, body=5).page(1)
1 2
# test left align mode
>>> print DiggPaginator(range(1,1000), 10, body=5, align_left=True).page(1)
1 2 3 4 5
>>> print DiggPaginator(range(1,1000), 10, body=5, align_left=True).page(50)
1 2 ... 48 49 50 51 52
>>> print DiggPaginator(range(1,1000), 10, body=5, align_left=True).page(97)
1 2 ... 95 96 97 98 99
>>> print DiggPaginator(range(1,1000), 10, body=5, align_left=True).page(100)
1 2 ... 96 97 98 99 100
# padding: default value
>>> DiggPaginator(range(1,1000), 10, body=10).padding
4
# padding: automatic reduction
>>> DiggPaginator(range(1,1000), 10, body=5).padding
2
>>> DiggPaginator(range(1,1000), 10, body=6).padding
2
# padding: sanity check
>>> DiggPaginator(range(1,1000), 10, body=5, padding=3)
Traceback (most recent call last):
ValueError: padding too large for body (max 2)
"""
def __init__(self, *args, **kwargs):
self.body = kwargs.pop('body', 10)
self.tail = kwargs.pop('tail', 2)
self.align_left = kwargs.pop('align_left', False)
self.margin = kwargs.pop('margin', 4) # TODO: make the default relative to body?
# validate padding value
max_padding = int(math.ceil(self.body/2.0)-1)
self.padding = kwargs.pop('padding', min(4, max_padding))
if self.padding > max_padding:
raise ValueError('padding too large for body (max %d)'%max_padding)
super(DiggPaginator, self).__init__(*args, **kwargs)
def page(self, number, *args, **kwargs):
"""Return a standard ``Page`` instance with custom, digg-specific
page ranges attached.
"""
page = super(DiggPaginator, self).page(number, *args, **kwargs)
number = int(number) # we know this will work
# easier access
num_pages, body, tail, padding, margin = \
self.num_pages, self.body, self.tail, self.padding, self.margin
# put active page in middle of main range
main_range = map(int, [
math.floor(number-body/2.0)+1, # +1 = shift odd body to right
math.floor(number+body/2.0)])
# adjust bounds
if main_range[0] < 1:
main_range = map(abs(main_range[0]-1).__add__, main_range)
if main_range[1] > num_pages:
main_range = map((num_pages-main_range[1]).__add__, main_range)
# Determine leading and trailing ranges; if possible and appropriate,
# combine them with the main range, in which case the resulting main
# block might end up considerable larger than requested. While we
# can't guarantee the exact size in those cases, we can at least try
# to come as close as possible: we can reduce the other boundary to
# max padding, instead of using half the body size, which would
# otherwise be the case. If the padding is large enough, this will
# of course have no effect.
# Example:
# total pages=100, page=4, body=5, (default padding=2)
# 1 2 3 [4] 5 6 ... 99 100
# total pages=100, page=4, body=5, padding=1
# 1 2 3 [4] 5 ... 99 100
# If it were not for this adjustment, both cases would result in the
# first output, regardless of the padding value.
if main_range[0] <= tail+margin:
leading = []
main_range = [1, max(body, min(number+padding, main_range[1]))]
main_range[0] = 1
else:
leading = range(1, tail+1)
# basically same for trailing range, but not in ``left_align`` mode
if self.align_left:
trailing = []
else:
if main_range[1] >= num_pages-(tail+margin)+1:
trailing = []
if not leading:
# ... but handle the special case of neither leading nor
# trailing ranges; otherwise, we would now modify the
# main range low bound, which we just set in the previous
# section, again.
main_range = [1, num_pages]
else:
main_range = [min(num_pages-body+1, max(number-padding, main_range[0])), num_pages]
else:
trailing = range(num_pages-tail+1, num_pages+1)
# finally, normalize values that are out of bound; this basically
# fixes all the things the above code screwed up in the simple case
# of few enough pages where one range would suffice.
main_range = [max(main_range[0], 1), min(main_range[1], num_pages)]
# make the result of our calculations available as custom ranges
# on the ``Page`` instance.
page.main_range = range(main_range[0], main_range[1]+1)
page.leading_range = leading
page.trailing_range = trailing
page.page_range = reduce(lambda x, y: x+((x and y) and [False])+y,
[page.leading_range, page.main_range, page.trailing_range])
page.__class__ = DiggPage
return page
class DiggPage(Page):
def __str__(self):
return " ... ".join(filter(None, [
" ".join(map(str, self.leading_range)),
" ".join(map(str, self.main_range)),
" ".join(map(str, self.trailing_range))]))
class QuerySetDiggPaginator(DiggPaginator, QuerySetPaginator):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
|
|
"""
Module. Includes classes for all PTC elements.
Built on the back of TEAPOT by J. Holmes.
"""
import sys
import os
import math
# import teapot base functions from wrapper around C++ functions
from orbit.teapot_base import TPB
# import the function that creates multidimensional arrays
from orbit.utils import orbitFinalize
# import some constants
from orbit.utils import consts
# import general accelerator elements and lattice
from orbit.lattice import AccLattice, AccNode,\
AccActionsContainer, AccNodeBunchTracker
# import the teapot classes
from orbit.teapot import TEAPOT_Lattice
from orbit.teapot import BaseTEAPOT
# import the interface to PTC
from libptc_orbit import *
class PTC_Lattice(TEAPOT_Lattice):
"""
PTC Subclass of the AccLattice class.
Inherits from the TEAPOT lattice.
"""
def __init__(self, name = "no name"):
TEAPOT_Lattice.__init__(self, name)
def readPTC(self, PTC_File):
"""
Reads the PTC file input and initializes all structures.
Input PTC_File is the flat PTC file.
"""
self.setName(PTC_File)
length_of_name = len(PTC_File)
ptc_init_(PTC_File, length_of_name - 1)
(betax, betay, alphax, alphay, etax, etapx) =\
ptc_get_twiss_init_()
self.betax0 = betax
self.betay0 = betay
self.alphax0 = alphax
self.alphay0 = alphay
self.etax0 = etax
self.etapx0 = etapx
(nNodes, nHarm, lRing, gammaT) = ptc_get_ini_params_()
self.nNodes = nNodes
self.nHarm = nHarm
self.lRing = lRing
self.gammaT = gammaT
for node_index in range(nNodes):
(length, betax, betay,\
alphax, alphay, etax, etapx) =\
ptc_get_twiss_for_node_(node_index)
elem = PTC_Node("PTC_Node")
elem.setparams(node_index, length,\
betax, betay, alphax, alphay,\
etax, etapx)
self.addNode(elem)
self.initialize()
class PTC_Node(BaseTEAPOT):
"""
PTC element.
"""
def __init__(self, name = "ptc_node"):
"""
Constructor. Creates a PTC element.
"""
BaseTEAPOT.__init__(self, name)
self.setType("ptc_node")
def setparams(self, orbit_ptc_node_index, length,\
betax, betay, alphax, alphay,\
etax, etapx):
"""
Sets element parameters.
"""
self.addParam("node_index", orbit_ptc_node_index)
self.setLength(length)
self.addParam("betax" , betax)
self.addParam("betay" , betay)
self.addParam("alphax", alphax)
self.addParam("alphay", alphay)
self.addParam("etax" , etax)
self.addParam("etapx" , etapx)
def track(self, paramsDict):
"""
The PTC class implementation of the
AccNodeBunchTracker class track(probe) method.
"""
bunch = paramsDict["bunch"]
PhaseLength = paramsDict["length"]
orbit_ptc_node_index = self.getParam("node_index")
action_type = -1
#ptc_get_task_type_(orbit_ptc_node_index, action_type)
if(action_type == 1):
print "==============================="
print "PTC_Node.track."
print "Energy chnage actions have not been taken."
print "STOP."
sys.exit(1)
ptc_trackBunch(bunch, PhaseLength, orbit_ptc_node_index)
def setBunchParamsPTC(bunch):
"""
Sets the synchronous particle parameters of the bunch.
"""
(mass, charge, kin_energy) = ptc_get_syncpart_()
mass = mass * consts.mass_proton
syncPart = bunch.getSyncParticle()
syncPart.kinEnergy(kin_energy)
bunch.charge(charge)
bunch.mass(mass)
def readAccelTablePTC(Acc_File):
"""
Gets the information for acceleration.
"""
length_of_name = len(Acc_File)
ptc_read_accel_table_(Acc_File, length_of_name - 1)
def readScriptPTC(Script_File):
"""
Reads a PTC Script file.
"""
length_of_name = len(Script_File)
ptc_script_(Script_File, length_of_name - 1)
def updateParamsPTC(lattice, bunch):
"""
Updates Twiss parameters of lattice.
Updates element parameters.
Updates synchronous particle parameters of the bunch.
"""
(betax, betay, alphax, alphay, etax, etapx) =\
ptc_get_twiss_init_()
lattice.betax0 = betax
lattice.betay0 = betay
lattice.alphax0 = alphax
lattice.alphay0 = alphay
lattice.etax0 = etax
lattice.etapx0 = etapx
(nNodes, nHarm, lRing, gammaT) = ptc_get_ini_params_()
lattice.nNodes = nNodes
lattice.nHarm = nHarm
lattice.lRing = lRing
lattice.gammaT = gammaT
for node in lattice.getNodes():
node_index = node.getParam("node_index")
length = node.getLength()
ptc_get_twiss_for_node_(node_index)
node.setparams(node_index, length,\
betax, betay, alphax, alphay,\
etax, etapx)
setBunchParamsPTC(bunch)
def synchronousSetPTC(ival):
"""
Calls ptc_synchronous_set_.
"""
if(ival >= 0):
print "==============================="
print "synchronousSetPTC requires ival < 0"
print "STOP."
sys.exit(1)
ptc_synchronous_set_(ival)
def synchronousAfterPTC(ival):
"""
Calls ptc_synchronous_set_.
"""
if(ival >= 0):
print "==============================="
print "synchronousAfterPTC requires ival < 0"
print "STOP."
sys.exit(1)
ptc_synchronous_after_(ival)
def trackBunchThroughLatticePTC(lattice, bunch, PhaseLength):
"""
Tracks a bunch through the whole lattice.
"""
paramsDict = {}
paramsDict["bunch"]= bunch
paramsDict["length"]=PhaseLength
for node in lattice.getNodes():
node.track(paramsDict)
def trackBunchInRangePTC(lattice, bunch, PhaseLength, indexi, indexf):
"""
Tracks a bunch from indexi through indexf, inclusive.
"""
paramsDict = {}
paramsDict["bunch"]= bunch
paramsDict["length"]=PhaseLength
for node in lattice.getNodes():
node_index = node.getParam("node_index")
if((node_index >= indexi) and (node_index <= indexf)):
node.track(paramsDict)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for broadcasting operators."""
import numpy as np
import tvm
import topi
import topi.testing
from common import get_all_backend
def verify_expand_dims(in_shape, out_shape, axis, num_newaxis):
A = tvm.placeholder(shape=in_shape, name="A")
B = topi.expand_dims(A, axis, num_newaxis)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_broadcast(B)
foo = tvm.build(s, [A, B], device, name="expand_dims")
data_npy = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = data_npy.reshape(out_shape)
data_nd = tvm.nd.array(data_npy, ctx)
out_nd = tvm.nd.array(np.empty(out_shape).astype(B.dtype), ctx)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in get_all_backend():
check_device(device)
def verify_reinterpret(in_shape, in_dtype, out_dtype, generator):
A = tvm.placeholder(shape=in_shape, name="A", dtype=in_dtype)
B = topi.reinterpret(A, out_dtype)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_elemwise(B)
foo = tvm.build(s, [A, B], device, name="reinterpret")
data_npy = generator(in_shape).astype(in_dtype)
out_npy = data_npy.view(B.dtype)
data_nd = tvm.nd.array(data_npy, ctx)
out_nd = tvm.nd.array(np.empty(in_shape).astype(B.dtype), ctx)
foo(data_nd, out_nd)
np.testing.assert_equal(out_nd.asnumpy(), out_npy)
for device in get_all_backend():
check_device(device)
def verify_transpose(in_shape, axes):
A = tvm.placeholder(shape=in_shape, name="A")
B = topi.transpose(A, axes)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_injective(B)
foo = tvm.build(s, [A, B], device, name="transpose")
data_npy = np.arange(np.prod(in_shape)).reshape(in_shape).astype(A.dtype)
out_npy = data_npy.transpose(axes)
data_nd = tvm.nd.array(data_npy, ctx)
out_nd = tvm.nd.empty(out_npy.shape, ctx=ctx, dtype=B.dtype)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in get_all_backend():
check_device(device)
def verify_reshape(src_shape, dst_shape):
A = tvm.placeholder(shape=src_shape, name="A")
B = topi.reshape(A, dst_shape)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_injective(B)
foo = tvm.build(s, [A, B], device, name="reshape")
data_npy = np.random.normal(size=src_shape).astype(A.dtype)
out_npy = np.reshape(data_npy, newshape=dst_shape)
data_nd = tvm.nd.array(data_npy, ctx)
out_nd = tvm.nd.empty(dst_shape, ctx=ctx, dtype=B.dtype)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in get_all_backend():
check_device(device)
def verify_squeeze(src_shape, axis):
A = tvm.placeholder(shape=src_shape, name="A")
B = topi.squeeze(A, axis=axis)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_injective(B)
foo = tvm.build(s, [A, B], device, name="squeeze")
data_npy = np.random.normal(size=src_shape).astype(A.dtype)
out_npy = np.squeeze(data_npy, axis=axis)
data_nd = tvm.nd.array(data_npy, ctx)
out_nd_shape = out_npy.shape
out_nd = tvm.nd.empty(out_nd_shape, ctx=ctx, dtype=B.dtype)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in get_all_backend():
check_device(device)
def verify_concatenate(shapes, axis):
tensor_l = []
for i, shape in enumerate(shapes):
tensor_l.append(tvm.placeholder(shape, name="A" + str(i)))
out_tensor = topi.concatenate(a_tuple=tensor_l, axis=axis)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_concatenate(out_tensor)
foo = tvm.build(s, tensor_l + [out_tensor], device, name="concatenate")
data_npys = [np.random.normal(size=shape).astype(tensor_l[0].dtype) for shape in shapes]
out_npy = np.concatenate(data_npys, axis=axis)
data_nds = [tvm.nd.array(data_npy, ctx) for data_npy in data_npys]
out_nd = tvm.nd.empty(out_npy.shape, ctx=ctx, dtype=out_tensor.dtype)
foo(*(data_nds + [out_nd]))
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in get_all_backend():
check_device(device)
def verify_stack(shapes, axis):
tensor_l = []
for i, shape in enumerate(shapes):
tensor_l.append(tvm.placeholder(shape, name="A" + str(i)))
out_tensor = topi.stack(tensor_l, axis)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_broadcast(out_tensor)
foo = tvm.build(s, tensor_l + [out_tensor], device, name="stack")
data_npys = [np.random.normal(size=shape).astype(tensor_l[0].dtype) for shape in shapes]
out_npy = np.stack(data_npys, axis=axis)
data_nds = [tvm.nd.array(data_npy, ctx) for data_npy in data_npys]
out_nd = tvm.nd.empty(out_npy.shape, ctx=ctx, dtype=out_tensor.dtype)
foo(*(data_nds + [out_nd]))
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in get_all_backend():
check_device(device)
def verify_split(src_shape, indices_or_sections, axis):
A = tvm.placeholder(shape=src_shape, name="A")
tensor_l = topi.split(A, indices_or_sections, axis=axis)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_injective(tensor_l)
foo = tvm.build(s, [A] + list(tensor_l), device, name="split")
data_npy = np.random.normal(size=src_shape).astype(A.dtype)
out_npys = np.split(data_npy, indices_or_sections, axis=axis)
data_nd = tvm.nd.array(data_npy, ctx)
out_nds = [tvm.nd.empty(out_npy.shape, ctx=ctx, dtype=tensor_l[0].dtype) for out_npy in out_npys]
foo(*([data_nd] + out_nds))
for out_nd, out_npy in zip(out_nds, out_npys):
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in get_all_backend():
check_device(device)
def verify_expand_like(in_shape, out_shape, axis):
A = tvm.placeholder(shape=in_shape, name="A")
B = tvm.placeholder(shape=out_shape, name="B")
C = topi.expand_like(A, B, axis)
s = tvm.create_schedule([C.op])
def check_device(device):
if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
ctx = tvm.context(device, 0)
f = tvm.build(s, [A, B, C], device, name="expand_like")
input = np.random.uniform(size=in_shape).astype(A.dtype)
tvm_input = tvm.nd.array(input, ctx)
odim = len(out_shape)
real_axis = [x if x >= 0 else x + odim for x in axis]
real_axis = sorted(real_axis)
for x in real_axis:
input = np.expand_dims(input, x).astype(A.dtype)
for x in real_axis:
input = np.concatenate([input]*out_shape[x], axis=x).astype(A.dtype)
assert input.shape == out_shape
tvm_shape_like = tvm.nd.array(np.zeros(out_shape).astype(B.dtype), ctx)
out = tvm.nd.array(np.zeros(out_shape).astype(A.dtype), ctx)
f(tvm_input, tvm_shape_like, out)
tvm.testing.assert_allclose(out.asnumpy(), input)
for device in ["llvm"]:
check_device(device)
def verify_flip(in_shape, axis):
A = tvm.placeholder(shape=in_shape, name="A")
B = topi.flip(A, axis) + 1
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_injective(B)
foo = tvm.build(s, [A, B], device, name="reverse")
x_np = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = np.flip(x_np, axis) + 1
data_nd = tvm.nd.array(x_np, ctx)
out_nd = tvm.nd.empty(out_npy.shape, ctx=ctx, dtype=A.dtype)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in ["llvm", "cuda", "opencl", "sdaccel", "aocl_sw_emu"]:
check_device(device)
def verify_take(src_shape, indices_src, axis=None, mode="clip"):
src_dtype = "float32"
indices_dtype = "int32"
indices_src = np.array(indices_src, dtype=indices_dtype)
A = tvm.placeholder(shape=src_shape, dtype=src_dtype, name="A")
indices = tvm.placeholder(shape=indices_src.shape, dtype=indices_dtype, name="indices")
if axis is None:
out_tensor = topi.take(a=A, indices=indices, mode=mode)
else:
out_tensor = topi.take(a=A, indices=indices, axis=axis, mode=mode)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_injective(out_tensor)
foo = tvm.build(s, [A] + [indices] + [out_tensor] , device, name="take")
shape_size = 1
for i in range(len(src_shape)):
shape_size = shape_size * src_shape[i]
data_npy = np.arange(shape_size, dtype=src_dtype).reshape((src_shape))
if axis is None:
np_mode = "raise" if mode == "fast" else mode
out_npys = np.take(data_npy, indices_src, mode=np_mode)
else:
np_mode = "raise" if mode == "fast" else mode
out_npys = np.take(data_npy, indices_src, axis=axis, mode=np_mode)
data_nd = tvm.nd.array(data_npy, ctx)
indices_nd = tvm.nd.array(indices_src, ctx)
out_nd = tvm.nd.empty(out_npys.shape, ctx=ctx, dtype=src_dtype)
foo(data_nd, indices_nd, out_nd)
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npys)
for device in ["llvm", "opencl", "sdaccel", "aocl_sw_emu"]:
check_device(device)
def verify_strided_slice(in_shape, begin, end, strides=None):
A = tvm.placeholder(shape=in_shape, name="A")
strides = [1,1,1] if strides is None else strides
B = topi.strided_slice(A, begin, end, strides) + 1
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_injective(B)
foo = tvm.build(s, [A, B], device, name="stride_slice")
x_np = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = topi.testing.strided_slice_python(
x_np, begin, end, strides) + 1
data_nd = tvm.nd.array(x_np, ctx)
out_nd = tvm.nd.empty(out_npy.shape, ctx=ctx, dtype=A.dtype)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in ["llvm", "opencl", "sdaccel", "aocl_sw_emu"]:
check_device(device)
def verify_gather_nd(src_shape, indices_src, indices_dtype):
src_dtype = "float32"
indices_src = np.array(indices_src, dtype=indices_dtype)
A = tvm.placeholder(shape=src_shape, dtype=src_dtype, name="A")
indices = tvm.placeholder(shape=indices_src.shape, dtype=indices_dtype, name="indices")
out_tensor = topi.gather_nd(a=A, indices=indices)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_injective(out_tensor)
func = tvm.build(s, [A, indices, out_tensor] , device, name="take")
shape_size = 1
for i in range(len(src_shape)):
shape_size = shape_size * src_shape[i]
data_npy = np.arange(shape_size, dtype=src_dtype).reshape((src_shape))
out_npys = topi.testing.gather_nd_python(data_npy, indices_src)
data_nd = tvm.nd.array(data_npy, ctx)
indices_nd = tvm.nd.array(indices_src, ctx)
out_nd = tvm.nd.empty(out_npys.shape, ctx=ctx, dtype=src_dtype)
func(data_nd, indices_nd, out_nd)
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npys)
for device in get_all_backend():
check_device(device)
def verify_arange(start, stop, step):
if start is None and step is None:
A = topi.arange(stop)
a_np = np.arange(stop)
elif start is None:
A = topi.arange(stop, step=step)
a_np = np.arange(stop, step=step)
elif step is None:
A = topi.arange(start, stop)
a_np = np.arange(start, stop)
else:
A = topi.arange(start, stop, step)
a_np = np.arange(start, stop, step)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_injective(A)
f = tvm.build(s, [A], device, name="arange")
a_nd = tvm.nd.empty(a_np.shape, dtype='float32', ctx=ctx)
f(a_nd)
tvm.testing.assert_allclose(a_nd.asnumpy(), a_np)
for device in get_all_backend():
check_device(device)
def verify_repeat(in_shape, repeats, axis):
A = tvm.placeholder(shape=in_shape, name="A")
B = topi.repeat(A, repeats, axis)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_broadcast(B)
foo = tvm.build(s, [A, B], device, name="repeat")
data_npy = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = np.repeat(data_npy, repeats, axis)
data_nd = tvm.nd.array(data_npy, ctx)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(B.dtype), ctx)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in get_all_backend():
check_device(device)
def verify_tile(in_shape, reps):
A = tvm.placeholder(shape=in_shape, name="A")
B = topi.tile(A, reps)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_broadcast(B)
foo = tvm.build(s, [A, B], device, name="tile")
data_npy = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = np.tile(data_npy, reps)
data_nd = tvm.nd.array(data_npy, ctx)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(B.dtype), ctx)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in get_all_backend():
check_device(device)
def verify_where(in_shape):
Cond = tvm.placeholder(shape=in_shape, name="cond")
dtype = Cond.dtype
A = tvm.placeholder(shape=in_shape, name="A")
B = tvm.placeholder(shape=in_shape, name="B")
C = topi.where(Cond, A, B)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_broadcast(C)
f = tvm.build(s, [Cond, A, B, C], device, name="where")
cond_npy = np.random.uniform(low=-1, high=1, size=in_shape).astype(dtype)
x_npy = np.random.uniform(size=in_shape).astype(dtype)
y_npy = np.random.uniform(size=in_shape).astype(dtype)
out_npy = np.where(cond_npy, x_npy, y_npy)
cond_nd = tvm.nd.array(cond_npy, ctx)
x_nd = tvm.nd.array(x_npy, ctx)
y_nd = tvm.nd.array(y_npy, ctx)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(C.dtype), ctx)
f(cond_nd, x_nd, y_nd, out_nd)
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
for device in get_all_backend():
check_device(device)
def verify_one_hot(indices_shape, depth, on_value, off_value, axis, dtype):
indices = tvm.placeholder(shape=indices_shape, name="indices", dtype="int32")
on_value_const = tvm.const(on_value, dtype)
off_value_const = tvm.const(off_value, dtype)
one_hot_result = topi.transform.one_hot(indices, on_value_const, off_value_const, depth, axis, dtype)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_injective(one_hot_result)
fn = tvm.build(s, [indices, one_hot_result], device, name="one_hot")
indices_npy = np.random.randint(0, depth, size=indices_shape).astype(indices.dtype)
out_npy = topi.testing.one_hot(indices_npy, on_value, off_value, depth, axis, dtype)
indices_nd = tvm.nd.array(indices_npy, ctx)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(one_hot_result.dtype), ctx)
fn(indices_nd, out_nd)
out_topi = out_nd.asnumpy()
tvm.testing.assert_allclose(out_topi, out_npy)
for device in get_all_backend():
check_device(device)
def test_strided_slice():
verify_strided_slice((3, 4, 3), [0, 0, 0], [4, -5, 4], [1, -1, 2])
verify_strided_slice((3, 4, 3), [1, 1, 0], [4, 4, 3], [2, 1, 1])
verify_strided_slice((3, 4, 3), [1, -1, 0], [4, -5, 3], [2, -1, 1])
verify_strided_slice((3, 4, 3), [1, 0, 0], [2, 2, 3], [1, 1, 2])
verify_strided_slice((3, 4, 3), [1, -1, 0], [2, -3, 3], [1, -1, 1])
verify_strided_slice((3, 4, 3), [1, 1, 0], [4, 4, 3])
def test_expand_dims():
verify_expand_dims((3, 10), (3, 10, 1, 1), 2, 2)
verify_expand_dims((3, 10), (1, 3, 10), -3, 1)
def test_reinterpret():
verify_reinterpret((1000,), "float32", "int32",
lambda shape: np.random.randn(*shape) * 1000)
verify_reinterpret((1000,), "float16", "int16",
lambda shape: np.random.randn(*shape) * 100)
verify_reinterpret((1000,), "int16", "uint16",
lambda shape: np.random.randint(-1000, 1000, size=shape))
verify_reinterpret((1000,), "uint32", "int32",
lambda shape: np.random.randint(0, 2 ** 32 - 1, size=shape))
verify_reinterpret((1000,), "uint32", "int32",
lambda shape: np.random.randint(0, 2 ** 32 - 1, size=shape))
def test_transpose():
verify_transpose((3, 10, 2), (1, 0, 2))
verify_transpose((3, 10, 5), (2, 0, 1))
verify_transpose((3, 10), None)
def test_reshape():
verify_reshape((1, 2, 3, 4), (2, 3, 4))
verify_reshape((4, 2, 3, 4), (2, 4, 12))
verify_reshape((4, 2, 3, 4), (2, 48))
verify_reshape((16, ), (2, 2, 2, 2))
def test_where():
verify_where((1, 2, 3, 4))
def test_squeeze():
verify_squeeze((1, 2, 3, 4), 0)
verify_squeeze((1, 2, 1, 4), None)
verify_squeeze((1, 1, 1, 4), (1, 2))
verify_squeeze((1, 1, 1, 1), None)
# a special case to trigger inline let expression
A = tvm.placeholder((2,), 'float32', 'A')
E = topi.squeeze(A)
C = tvm.compute((1,), lambda i: E[(2 * A[0] - 1).astype('int32')])
for device in ['cuda', 'opencl']:
ctx = tvm.context(device, 0)
if ctx.exist:
with tvm.target.create(device):
s = topi.generic.schedule_injective(C)
func = tvm.build(s, [A, C])
a = tvm.nd.array(np.array((1, 2)).astype('float32'), ctx=ctx)
c = tvm.nd.empty((1,), dtype='float32', ctx=ctx)
func(a, c)
assert c.asnumpy()[0] == 2
def test_concatenate():
verify_concatenate([(2,), (2,), (2,)], -1)
verify_concatenate([(2, 3, 4), (2, 2, 4), (2, 5, 4)], 1)
verify_concatenate([(1, 2, 4), (1, 2, 3), (1, 2, 7), (1, 2, 8), (1, 2, 1)], -1)
verify_concatenate([(5, 6, 7, 3),
(16, 6, 7, 3),
(12, 6, 7, 3),
(8, 6, 7, 3),
(2, 6, 7, 3)], 0)
verify_concatenate([(1, 14400), (1, 2400), (1, 640), (1, 240)], 1)
def test_stack():
verify_stack([(2,), (2,), (2,)], -1)
verify_stack([(2,), (2,), (2,)], 1)
verify_stack([(2,), (2,), (2,)], 0)
verify_stack([(2, 2, 4), (2, 2, 4), (2, 2, 4)], 1)
verify_stack([(2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4)], -1)
def test_split():
verify_split((2, 12, 3), 3, 1)
verify_split((2, 12, 3), [2, 4], 1)
verify_split((10, 12, 24), [5, 7, 9], -1)
def test_flip():
verify_flip((3, 4, 3), 1)
verify_flip((3, 4, 3), 0)
verify_flip((3, 4, 3), 2)
verify_flip((3, 4, 3), -1)
verify_flip((3, 4, 3), -3)
verify_flip((3, 4, 3), -2)
def test_expand_like():
verify_expand_like((3,), (2, 3), [0])
verify_expand_like((2,), (2, 3), [1])
verify_expand_like((3, 4), (3, 5, 4), [1])
verify_expand_like((5, 7), (5, 6, 7, 8), [1, 3])
def test_take():
verify_take((4,), [1])
verify_take((4,), [[0,1,2,3]])
verify_take((3,3,3), [[11,25]])
verify_take((4,), [[0,1],[2,3]])
verify_take((4,), [1], 0)
verify_take((2,2), [[[1,0],[0,1]]], 0)
verify_take((2,2), [[[1,0],[0,1]]], 1)
verify_take((4,3,5,6), [[2,1,0,0]], -2)
verify_take((3,4), [-5, 20])
verify_take((3,4), [-5, 20], mode="wrap")
verify_take((3,4), [-1, 2], axis=0)
verify_take((3,4), [-1, 2], axis=0, mode="wrap")
verify_take((3,4), [-1, 2], axis=1)
verify_take((3,4), [-1, 2], axis=1, mode="wrap")
verify_take((3,3,3), [[11,25]], mode="fast")
verify_take((3,4), [0, 2], axis=0, mode="fast")
verify_take((3,4), [0, 2], axis=1, mode="fast")
def test_gather_nd():
for indices_dtype in ['int32', 'float32']:
verify_gather_nd((4,), [[1.8]], indices_dtype)
verify_gather_nd((4,), [[1, 3, 2]], indices_dtype)
verify_gather_nd((2, 3), [[1]], indices_dtype)
verify_gather_nd((2, 3), [[1], [0]], indices_dtype)
verify_gather_nd((2, 3), [[1, 0], [0, 2]], indices_dtype)
verify_gather_nd((2, 3, 4), [[1, 0], [0, 2]], indices_dtype)
verify_gather_nd((2, 3, 4), [[1, 0], [0, 2], [3, 1]], indices_dtype)
verify_gather_nd((2, 3, 4), [[[1, 0], [0, 1]], [[0, 2], [1, 2]],
[[3, 1], [0, 2]]], indices_dtype)
verify_gather_nd((2, 3, 4, 5), [[1, 0], [0, 2]], indices_dtype)
verify_gather_nd((2, 3, 4, 5), [[1, 0], [2, 1], [3, 2], [4, 2]],
indices_dtype)
def test_arange():
verify_arange(None, 20, None)
verify_arange(None, 20, 2)
verify_arange(1, 20, None)
verify_arange(1, 20, 2)
verify_arange(1, 20, 1.5)
verify_arange(1, 20.5, None)
verify_arange(1, 20, 3)
verify_arange(20, 1, -1)
verify_arange(20, 1, -1.5)
def test_repeat():
verify_repeat((2,), 1, 0)
verify_repeat((3, 2), 2, 0)
verify_repeat((3, 2, 4), 3, 1)
verify_repeat((1, 3, 2, 4), 4, -1)
def test_tile():
verify_tile((3, 2), (2, 3))
verify_tile((3, 2, 5), (2,))
verify_tile((3, ), (2, 3, 3))
def test_layout_transform():
in_shape = (1, 32, 8, 8)
A = tvm.placeholder(shape=in_shape, dtype="float32", name="A")
B = topi.layout_transform(A, "NCHW", "NCHW16c")
input = np.random.uniform(size=in_shape).astype(A.dtype)
output = np.transpose(input, axes=(0, 2, 3, 1))
output = np.reshape(output, newshape=(1, 8, 8, 2, 16))
output = np.transpose(output, axes=(0, 3, 1, 2, 4))
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
tvm_input = tvm.nd.array(input, ctx)
tvm_output = tvm.nd.empty(output.shape, ctx=ctx, dtype=B.dtype)
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_injective(B)
f = tvm.build(s, [A, B], device, name="layout_transform")
f(tvm_input, tvm_output)
tvm.testing.assert_allclose(tvm_output.asnumpy(), output)
for backend in get_all_backend():
check_device(backend)
def test_shape():
in_shape = (8, 7, 13)
dtype = "int32"
A = tvm.placeholder(shape=in_shape, dtype="float32", name="A")
B = topi.shape(A, dtype)
input = np.random.uniform(size=in_shape).astype(A.dtype)
output = np.asarray(in_shape).astype(dtype)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
tvm_input = tvm.nd.array(input, ctx)
tvm_output = tvm.nd.empty(output.shape, ctx=ctx, dtype=dtype)
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_injective(B)
f = tvm.build(s, [A, B], device, name="shape")
f(tvm_input, tvm_output)
tvm.testing.assert_allclose(tvm_output.asnumpy(), output)
for backend in get_all_backend():
check_device(backend)
def test_sequence_mask():
for in_shape in (5, 10), (3, 4, 5, 4):
for axis in [0, 1]:
for mask_value in [0.0, 1.0]:
max_length = in_shape[axis]
batch_size = in_shape[1 - axis]
A = tvm.placeholder(shape=in_shape, dtype="float32", name="A")
B = tvm.placeholder(shape=(batch_size,), dtype="int32", name="B")
C = topi.sequence_mask(A, B, axis=axis, mask_value=mask_value)
A_data = np.random.normal(0, 1, in_shape).astype(np.float32)
B_data = np.random.randint(1, max_length, (batch_size,)).astype(np.int32)
C_gt_data = topi.testing.sequence_mask(A_data, B_data, mask_value, axis)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
tvm_A = tvm.nd.array(A_data, ctx)
tvm_B = tvm.nd.array(B_data, ctx)
tvm_C = tvm.nd.empty(in_shape, ctx=ctx, dtype="float32")
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_injective(C)
f = tvm.build(s, [A, B, C], device, name="SequenceMask")
f(tvm_A, tvm_B, tvm_C)
tvm.testing.assert_allclose(tvm_C.asnumpy(), C_gt_data)
for backend in get_all_backend():
check_device(backend)
def test_ndarray_size():
in_shape = (5, 11, 7)
dtype = "int32"
A = tvm.placeholder(shape=in_shape, dtype="float32", name="A")
B = topi.ndarray_size(A, dtype)
input = np.random.uniform(size=in_shape).astype(A.dtype)
output = np.asarray(np.size(input)).astype(dtype)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
tvm_input = tvm.nd.array(input, ctx=ctx)
tvm_output = tvm.nd.empty((1,), ctx=ctx, dtype=B.dtype)
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_injective(B)
f = tvm.build(s, [A, B], device, name="ndarray_size")
f(tvm_input, tvm_output)
tvm.testing.assert_allclose(tvm_output.asnumpy(), output)
for backend in get_all_backend():
check_device(backend)
def test_where_fusion():
"""integration test that where and zeros should be properly inlined"""
def check_device(device):
with tvm.target.create(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
data = tvm.placeholder((2, 1, 2, 4), 'int8', 'data')
w = tvm.placeholder((3, 1, 2, 2), 'int8', 'w')
conv1 = topi.nn.conv2d(data, w, 1, 0, 1, out_dtype='int32')
zeros = topi.full((2, 3, 1, 3), 'int32', tvm.const(0, dtype='int32'))
gt = topi.greater_equal(conv1, zeros)
one = topi.full((2, 3, 1, 3), 'int32', tvm.const(1, dtype='int32'))
two = topi.full((2, 3, 1, 3), 'int32', tvm.const(2, dtype='int32'))
where = topi.where(gt, one, two)
add = topi.add(conv1, where)
outs = [add]
s = topi.generic.schedule_conv2d_nchw(outs)
tvm.build(s, [data, w, add], target=backend)
for backend in get_all_backend():
check_device(backend)
def test_one_hot():
verify_one_hot((3,), 3, 1, 0, -1, "int32")
verify_one_hot((3,), 3, 1.0, 0.0, -1, "float32")
verify_one_hot((2, 2), 5, 2, -2, 0, "int32")
verify_one_hot((2, 2), 5, 0.5, -0.5, 1, "float32")
verify_one_hot((3, 2, 4, 5), 6, 1, 0, 1, "int32")
verify_one_hot((3, 2, 4, 5), 6, 1.0, 0.0, 0, "float32")
if __name__ == "__main__":
test_strided_slice()
test_concatenate()
test_stack()
test_transpose()
test_expand_dims()
test_reshape()
test_where()
test_squeeze()
test_split()
test_flip()
test_expand_like()
test_take()
test_gather_nd()
test_arange()
test_layout_transform()
test_repeat()
test_tile()
test_shape()
test_sequence_mask()
test_ndarray_size()
test_where_fusion()
test_one_hot()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for linear algebra."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import math_ops
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_linalg_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# Names below are lower_case.
# pylint: disable=invalid-name
def _RegularizedGramianCholesky(matrix, l2_regularizer, first_kind):
r"""Computes Cholesky factorization of regularized gramian matrix.
Below we will use the following notation for each pair of matrix and
right-hand sides in the batch:
`matrix`=\\(A \in \Re^{m \times n}\\),
`output`=\\(C \in \Re^{\min(m, n) \times \min(m,n)}\\),
`l2_regularizer`=\\(\lambda\\).
If `first_kind` is True, returns the Cholesky factorization \\(L\\) such that
\\(L L^H = A^H A + \lambda I\\).
If `first_kind` is False, returns the Cholesky factorization \\(L\\) such that
\\(L L^H = A A^H + \lambda I\\).
Args:
matrix: `Tensor` of shape `[..., M, N]`.
l2_regularizer: 0-D `double` `Tensor`. Ignored if `fast=False`.
first_kind: bool. Controls what gramian matrix to factor.
Returns:
output: `Tensor` of shape `[..., min(M,N), min(M,N)]` whose inner-most 2
dimensions contain the Cholesky factors \\(L\\) described above.
"""
gramian = math_ops.matmul(
matrix, matrix, adjoint_a=first_kind, adjoint_b=not first_kind)
if isinstance(l2_regularizer, ops.Tensor) or l2_regularizer != 0:
matrix_shape = array_ops.shape(matrix)
batch_shape = matrix_shape[:-2]
if first_kind:
small_dim = matrix_shape[-1]
else:
small_dim = matrix_shape[-2]
identity = eye(small_dim, batch_shape=batch_shape, dtype=matrix.dtype)
small_dim_static = matrix.shape[-1 if first_kind else -2]
identity.set_shape(
matrix.shape[:-2].concatenate([small_dim_static, small_dim_static]))
gramian += l2_regularizer * identity
return gen_linalg_ops.cholesky(gramian)
@tf_export('cholesky_solve', 'linalg.cholesky_solve')
def cholesky_solve(chol, rhs, name=None):
"""Solves systems of linear eqns `A X = RHS`, given Cholesky factorizations.
```python
# Solve 10 separate 2x2 linear systems:
A = ... # shape 10 x 2 x 2
RHS = ... # shape 10 x 2 x 1
chol = tf.cholesky(A) # shape 10 x 2 x 2
X = tf.cholesky_solve(chol, RHS) # shape 10 x 2 x 1
# tf.matmul(A, X) ~ RHS
X[3, :, 0] # Solution to the linear system A[3, :, :] x = RHS[3, :, 0]
# Solve five linear systems (K = 5) for every member of the length 10 batch.
A = ... # shape 10 x 2 x 2
RHS = ... # shape 10 x 2 x 5
...
X[3, :, 2] # Solution to the linear system A[3, :, :] x = RHS[3, :, 2]
```
Args:
chol: A `Tensor`. Must be `float32` or `float64`, shape is `[..., M, M]`.
Cholesky factorization of `A`, e.g. `chol = tf.cholesky(A)`.
For that reason, only the lower triangular parts (including the diagonal)
of the last two dimensions of `chol` are used. The strictly upper part is
assumed to be zero and not accessed.
rhs: A `Tensor`, same type as `chol`, shape is `[..., M, K]`.
name: A name to give this `Op`. Defaults to `cholesky_solve`.
Returns:
Solution to `A x = rhs`, shape `[..., M, K]`.
"""
# To solve C C^* x = rhs, we
# 1. Solve C y = rhs for y, thus y = C^* x
# 2. Solve C^* x = y for x
with ops.name_scope(name, 'cholesky_solve', [chol, rhs]):
y = gen_linalg_ops.matrix_triangular_solve(
chol, rhs, adjoint=False, lower=True)
x = gen_linalg_ops.matrix_triangular_solve(
chol, y, adjoint=True, lower=True)
return x
@tf_export('eye', 'linalg.eye')
def eye(num_rows,
num_columns=None,
batch_shape=None,
dtype=dtypes.float32,
name=None):
"""Construct an identity matrix, or a batch of matrices.
```python
# Construct one identity matrix.
tf.eye(2)
==> [[1., 0.],
[0., 1.]]
# Construct a batch of 3 identity matricies, each 2 x 2.
# batch_identity[i, :, :] is a 2 x 2 identity matrix, i = 0, 1, 2.
batch_identity = tf.eye(2, batch_shape=[3])
# Construct one 2 x 3 "identity" matrix
tf.eye(2, num_columns=3)
==> [[ 1., 0., 0.],
[ 0., 1., 0.]]
```
Args:
num_rows: Non-negative `int32` scalar `Tensor` giving the number of rows
in each batch matrix.
num_columns: Optional non-negative `int32` scalar `Tensor` giving the number
of columns in each batch matrix. Defaults to `num_rows`.
batch_shape: A list or tuple of Python integers or a 1-D `int32` `Tensor`.
If provided, the returned `Tensor` will have leading batch dimensions of
this shape.
dtype: The type of an element in the resulting `Tensor`
name: A name for this `Op`. Defaults to "eye".
Returns:
A `Tensor` of shape `batch_shape + [num_rows, num_columns]`
"""
with ops.name_scope(
name, default_name='eye', values=[num_rows, num_columns, batch_shape]):
is_square = num_columns is None
batch_shape = [] if batch_shape is None else batch_shape
num_columns = num_rows if num_columns is None else num_columns
if isinstance(num_rows, ops.Tensor) or isinstance(
num_columns, ops.Tensor) or isinstance(batch_shape, ops.Tensor):
batch_shape = ops.convert_to_tensor(
batch_shape, name='shape', dtype=dtypes.int32)
diag_size = math_ops.minimum(num_rows, num_columns)
diag_shape = array_ops.concat((batch_shape, [diag_size]), 0)
if not is_square:
shape = array_ops.concat((batch_shape, [num_rows, num_columns]), 0)
else:
if not isinstance(num_rows, compat.integral_types) or not isinstance(
num_columns, compat.integral_types):
raise TypeError(
'num_rows and num_columns must be positive integer values.')
batch_shape = [dim for dim in batch_shape]
is_square = num_rows == num_columns
diag_shape = batch_shape + [np.minimum(num_rows, num_columns)]
if not is_square:
shape = batch_shape + [num_rows, num_columns]
diag_ones = array_ops.ones(diag_shape, dtype=dtype)
if is_square:
return array_ops.matrix_diag(diag_ones)
else:
zero_matrix = array_ops.zeros(shape, dtype=dtype)
return array_ops.matrix_set_diag(zero_matrix, diag_ones)
@tf_export('matrix_solve_ls', 'linalg.lstsq')
def matrix_solve_ls(matrix, rhs, l2_regularizer=0.0, fast=True, name=None):
r"""Solves one or more linear least-squares problems.
`matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions
form `M`-by-`N` matrices. Rhs is a tensor of shape `[..., M, K]` whose
inner-most 2 dimensions form `M`-by-`K` matrices. The computed output is a
`Tensor` of shape `[..., N, K]` whose inner-most 2 dimensions form `M`-by-`K`
matrices that solve the equations
`matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]` in the least squares
sense.
Below we will use the following notation for each pair of matrix and
right-hand sides in the batch:
`matrix`=\\(A \in \Re^{m \times n}\\),
`rhs`=\\(B \in \Re^{m \times k}\\),
`output`=\\(X \in \Re^{n \times k}\\),
`l2_regularizer`=\\(\lambda\\).
If `fast` is `True`, then the solution is computed by solving the normal
equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then
\\(X = (A^T A + \lambda I)^{-1} A^T B\\), which solves the least-squares
problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k}} ||A Z - B||_F^2 +
\lambda ||Z||_F^2\\). If \\(m \lt n\\) then `output` is computed as
\\(X = A^T (A A^T + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is
the minimum-norm solution to the under-determined linear system, i.e.
\\(X = \mathrm{argmin}_{Z \in \Re^{n \times k}} ||Z||_F^2 \\), subject to
\\(A Z = B\\). Notice that the fast path is only numerically stable when
\\(A\\) is numerically full rank and has a condition number
\\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach}}}\\) or\\(\lambda\\)
is sufficiently large.
If `fast` is `False` an algorithm based on the numerically robust complete
orthogonal decomposition is used. This computes the minimum-norm
least-squares solution, even when \\(A\\) is rank deficient. This path is
typically 6-7 times slower than the fast path. If `fast` is `False` then
`l2_regularizer` is ignored.
Args:
matrix: `Tensor` of shape `[..., M, N]`.
rhs: `Tensor` of shape `[..., M, K]`.
l2_regularizer: 0-D `double` `Tensor`. Ignored if `fast=False`.
fast: bool. Defaults to `True`.
name: string, optional name of the operation.
Returns:
output: `Tensor` of shape `[..., N, K]` whose inner-most 2 dimensions form
`M`-by-`K` matrices that solve the equations
`matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]` in the least
squares sense.
Raises:
NotImplementedError: matrix_solve_ls is currently disabled for complex128
and l2_regularizer != 0 due to poor accuracy.
"""
# pylint: disable=long-lambda
def _use_composite_impl(fast, tensor_shape):
"""Determines whether to use the composite or specialized CPU kernel.
When the total size of the tensor is larger than the cache size and the
batch size is large compared to the smallest matrix dimension, then the
composite implementation is inefficient since it has to read the entire
tensor from memory multiple times. In this case we fall back to the
original CPU kernel, which does all the computational steps on each
matrix separately.
Only fast mode is supported by the composite impl, so `False` is returned
if `fast` is `False`.
Args:
fast: bool indicating if fast mode in the solver was requested.
tensor_shape: The shape of the tensor.
Returns:
True if the composite impl should be used. False otherwise.
"""
if fast is False:
return False
batch_shape = tensor_shape[:-2]
matrix_shape = tensor_shape[-2:]
if not tensor_shape.is_fully_defined():
return True
tensor_size = tensor_shape.num_elements() * matrix.dtype.size
is_io_bound = batch_shape.num_elements() > np.min(matrix_shape)
L2_CACHE_SIZE_GUESSTIMATE = 256000
if tensor_size > L2_CACHE_SIZE_GUESSTIMATE and is_io_bound:
return False
else:
return True
def _overdetermined(matrix, rhs, l2_regularizer):
"""Computes (A^H*A + l2_regularizer)^{-1} * A^H * rhs."""
chol = _RegularizedGramianCholesky(
matrix, l2_regularizer=l2_regularizer, first_kind=True)
return cholesky_solve(chol, math_ops.matmul(matrix, rhs, adjoint_a=True))
def _underdetermined(matrix, rhs, l2_regularizer):
"""Computes A^H * (A*A^H + l2_regularizer)^{-1} * rhs."""
chol = _RegularizedGramianCholesky(
matrix, l2_regularizer=l2_regularizer, first_kind=False)
return math_ops.matmul(matrix, cholesky_solve(chol, rhs), adjoint_a=True)
def _composite_impl(matrix, rhs, l2_regularizer):
"""Composite implementation of matrix_solve_ls that supports GPU."""
with ops.name_scope(name, 'matrix_solve_ls', [matrix, rhs, l2_regularizer]):
matrix_shape = matrix.get_shape()[-2:]
if matrix_shape.is_fully_defined():
if matrix_shape[-2] >= matrix_shape[-1]:
return _overdetermined(matrix, rhs, l2_regularizer)
else:
return _underdetermined(matrix, rhs, l2_regularizer)
else:
# We have to defer determining the shape to runtime and use
# conditional execution of the appropriate graph.
matrix_shape = array_ops.shape(matrix)[-2:]
return control_flow_ops.cond(
matrix_shape[-2] >= matrix_shape[-1],
lambda: _overdetermined(matrix, rhs, l2_regularizer),
lambda: _underdetermined(matrix, rhs, l2_regularizer))
matrix = ops.convert_to_tensor(matrix, name='matrix')
if matrix.dtype == dtypes.complex128 and l2_regularizer != 0:
# TODO(rmlarsen): Investigate and fix accuracy bug.
raise NotImplementedError('matrix_solve_ls is currently disabled for '
'complex128 and l2_regularizer != 0 due to '
'poor accuracy.')
tensor_shape = matrix.get_shape()
if _use_composite_impl(fast, tensor_shape):
return _composite_impl(matrix, rhs, l2_regularizer)
else:
return gen_linalg_ops.matrix_solve_ls(
matrix, rhs, l2_regularizer, fast=fast, name=name)
@tf_export('self_adjoint_eig', 'linalg.eigh')
def self_adjoint_eig(tensor, name=None):
"""Computes the eigen decomposition of a batch of self-adjoint matrices.
Computes the eigenvalues and eigenvectors of the innermost N-by-N matrices
in `tensor` such that
`tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i]`, for i=0...N-1.
Args:
tensor: `Tensor` of shape `[..., N, N]`. Only the lower triangular part of
each inner inner matrix is referenced.
name: string, optional name of the operation.
Returns:
e: Eigenvalues. Shape is `[..., N]`. Sorted in non-decreasing order.
v: Eigenvectors. Shape is `[..., N, N]`. The columns of the inner most
matrices contain eigenvectors of the corresponding matrices in `tensor`
"""
e, v = gen_linalg_ops.self_adjoint_eig_v2(tensor, compute_v=True, name=name)
return e, v
@tf_export('self_adjoint_eigvals', 'linalg.eigvalsh')
def self_adjoint_eigvals(tensor, name=None):
"""Computes the eigenvalues of one or more self-adjoint matrices.
Note: If your program backpropagates through this function, you should replace
it with a call to tf.self_adjoint_eig (possibly ignoring the second output) to
avoid computing the eigen decomposition twice. This is because the
eigenvectors are used to compute the gradient w.r.t. the eigenvalues. See
_SelfAdjointEigV2Grad in linalg_grad.py.
Args:
tensor: `Tensor` of shape `[..., N, N]`.
name: string, optional name of the operation.
Returns:
e: Eigenvalues. Shape is `[..., N]`. The vector `e[..., :]` contains the `N`
eigenvalues of `tensor[..., :, :]`.
"""
e, _ = gen_linalg_ops.self_adjoint_eig_v2(tensor, compute_v=False, name=name)
return e
@tf_export('svd', 'linalg.svd')
def svd(tensor, full_matrices=False, compute_uv=True, name=None):
r"""Computes the singular value decompositions of one or more matrices.
Computes the SVD of each inner matrix in `tensor` such that
`tensor[..., :, :] = u[..., :, :] * diag(s[..., :, :]) *
transpose(conj(v[..., :, :]))`
```python
# a is a tensor.
# s is a tensor of singular values.
# u is a tensor of left singular vectors.
# v is a tensor of right singular vectors.
s, u, v = svd(a)
s = svd(a, compute_uv=False)
```
Args:
tensor: `Tensor` of shape `[..., M, N]`. Let `P` be the minimum of `M` and
`N`.
full_matrices: If true, compute full-sized `u` and `v`. If false
(the default), compute only the leading `P` singular vectors.
Ignored if `compute_uv` is `False`.
compute_uv: If `True` then left and right singular vectors will be
computed and returned in `u` and `v`, respectively. Otherwise, only the
singular values will be computed, which can be significantly faster.
name: string, optional name of the operation.
Returns:
s: Singular values. Shape is `[..., P]`. The values are sorted in reverse
order of magnitude, so s[..., 0] is the largest value, s[..., 1] is the
second largest, etc.
u: Left singular vectors. If `full_matrices` is `False` (default) then
shape is `[..., M, P]`; if `full_matrices` is `True` then shape is
`[..., M, M]`. Not returned if `compute_uv` is `False`.
v: Right singular vectors. If `full_matrices` is `False` (default) then
shape is `[..., N, P]`. If `full_matrices` is `True` then shape is
`[..., N, N]`. Not returned if `compute_uv` is `False`.
@compatibility(numpy)
Mostly equivalent to numpy.linalg.svd, except that
* The order of output arguments here is `s`, `u`, `v` when `compute_uv` is
`True`, as opposed to `u`, `s`, `v` for numpy.linalg.svd.
* full_matrices is `False` by default as opposed to `True` for
numpy.linalg.svd.
* tf.linalg.svd uses the standard definition of the SVD
\\(A = U \Sigma V^H\\), such that the left singular vectors of `a` are
the columns of `u`, while the right singular vectors of `a` are the
columns of `v`. On the other hand, numpy.linalg.svd returns the adjoint
\\(V^H\\) as the third output argument.
```python
import tensorflow as tf
import numpy as np
s, u, v = tf.linalg.svd(a)
tf_a_approx = tf.matmul(u, tf.matmul(tf.linalg.diag(s), v, adjoint_v=True))
u, s, v_adj = np.linalg.svd(a, full_matrices=False)
np_a_approx = np.dot(u, np.dot(np.diag(s), v_adj))
# tf_a_approx and np_a_approx should be numerically close.
```
@end_compatibility
"""
s, u, v = gen_linalg_ops.svd(
tensor, compute_uv=compute_uv, full_matrices=full_matrices, name=name)
if compute_uv:
return math_ops.real(s), u, v
else:
return math_ops.real(s)
# pylint: disable=redefined-builtin
@tf_export('norm', 'linalg.norm')
@deprecation.deprecated_args(
None, 'keep_dims is deprecated, use keepdims instead', 'keep_dims')
def norm(tensor,
ord='euclidean',
axis=None,
keepdims=None,
name=None,
keep_dims=None):
r"""Computes the norm of vectors, matrices, and tensors.
This function can compute several different vector norms (the 1-norm, the
Euclidean or 2-norm, the inf-norm, and in general the p-norm for p > 0) and
matrix norms (Frobenius, 1-norm, and inf-norm).
Args:
tensor: `Tensor` of types `float32`, `float64`, `complex64`, `complex128`
ord: Order of the norm. Supported values are 'fro', 'euclidean',
`1`, `2`, `np.inf` and any positive real number yielding the corresponding
p-norm. Default is 'euclidean' which is equivalent to Frobenius norm if
`tensor` is a matrix and equivalent to 2-norm for vectors.
Some restrictions apply:
a) The Frobenius norm `fro` is not defined for vectors,
b) If axis is a 2-tuple (matrix norm), only 'euclidean', 'fro', `1`,
`np.inf` are supported.
See the description of `axis` on how to compute norms for a batch of
vectors or matrices stored in a tensor.
axis: If `axis` is `None` (the default), the input is considered a vector
and a single vector norm is computed over the entire set of values in the
tensor, i.e. `norm(tensor, ord=ord)` is equivalent to
`norm(reshape(tensor, [-1]), ord=ord)`.
If `axis` is a Python integer, the input is considered a batch of vectors,
and `axis` determines the axis in `tensor` over which to compute vector
norms.
If `axis` is a 2-tuple of Python integers it is considered a batch of
matrices and `axis` determines the axes in `tensor` over which to compute
a matrix norm.
Negative indices are supported. Example: If you are passing a tensor that
can be either a matrix or a batch of matrices at runtime, pass
`axis=[-2,-1]` instead of `axis=None` to make sure that matrix norms are
computed.
keepdims: If True, the axis indicated in `axis` are kept with size 1.
Otherwise, the dimensions in `axis` are removed from the output shape.
name: The name of the op.
keep_dims: Deprecated alias for `keepdims`.
Returns:
output: A `Tensor` of the same type as tensor, containing the vector or
matrix norms. If `keepdims` is True then the rank of output is equal to
the rank of `tensor`. Otherwise, if `axis` is none the output is a scalar,
if `axis` is an integer, the rank of `output` is one less than the rank
of `tensor`, if `axis` is a 2-tuple the rank of `output` is two less
than the rank of `tensor`.
Raises:
ValueError: If `ord` or `axis` is invalid.
@compatibility(numpy)
Mostly equivalent to numpy.linalg.norm.
Not supported: ord <= 0, 2-norm for matrices, nuclear norm.
Other differences:
a) If axis is `None`, treats the flattened `tensor` as a vector
regardless of rank.
b) Explicitly supports 'euclidean' norm as the default, including for
higher order tensors.
@end_compatibility
"""
keepdims = deprecation.deprecated_argument_lookup('keepdims', keepdims,
'keep_dims', keep_dims)
if keepdims is None:
keepdims = False
is_matrix_norm = ((isinstance(axis, tuple) or isinstance(axis, list)) and
len(axis) == 2)
if is_matrix_norm:
axis = tuple(axis)
if (not isinstance(axis[0], int) or not isinstance(axis[1], int) or
axis[0] == axis[1]):
raise ValueError(
"'axis' must be None, an integer, or a tuple of 2 unique integers")
# TODO(rmlarsen): Implement matrix 2-norm using tf.svd().
supported_matrix_norms = ['euclidean', 'fro', 1, np.inf]
if ord not in supported_matrix_norms:
raise ValueError("'ord' must be a supported matrix norm in %s, got %s" %
(supported_matrix_norms, ord))
else:
if not (isinstance(axis, int) or axis is None):
raise ValueError(
"'axis' must be None, an integer, or a tuple of 2 unique integers")
supported_vector_norms = ['euclidean', 1, 2, np.inf]
if (not np.isreal(ord) or ord <= 0) and ord not in supported_vector_norms:
raise ValueError("'ord' must be a supported vector norm, got %s" % ord)
if axis is not None:
axis = (axis,)
with ops.name_scope(name, 'norm', [tensor]):
tensor = ops.convert_to_tensor(tensor)
if ord in ['fro', 'euclidean', 2, 2.0]:
# TODO(rmlarsen): Move 2-norm to a separate clause once we support it for
# matrices.
result = math_ops.sqrt(
math_ops.reduce_sum(
tensor * math_ops.conj(tensor), axis, keepdims=True))
else:
result = math_ops.abs(tensor)
if ord == 1:
sum_axis = None if axis is None else axis[0]
result = math_ops.reduce_sum(result, sum_axis, keepdims=True)
if is_matrix_norm:
result = math_ops.reduce_max(result, axis[-1], keepdims=True)
elif ord == np.inf:
if is_matrix_norm:
result = math_ops.reduce_sum(result, axis[1], keepdims=True)
max_axis = None if axis is None else axis[0]
result = math_ops.reduce_max(result, max_axis, keepdims=True)
else:
# General p-norms (positive p only)
result = math_ops.pow(
math_ops.reduce_sum(math_ops.pow(result, ord), axis, keepdims=True),
1.0 / ord)
if not keepdims:
result = array_ops.squeeze(result, axis)
return result
# pylint: enable=invalid-name,redefined-builtin
|
|
import os
import warnings
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse, clear_url_caches
from django.test import TestCase
from django.test.utils import override_settings
from django.template import Template, Context
from django.utils import translation
@override_settings(
USE_I18N=True,
LOCALE_PATHS=(
os.path.join(os.path.dirname(__file__), 'locale'),
),
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(__file__), 'templates'),
),
LANGUAGE_CODE='en',
LANGUAGES=(
('nl', 'Dutch'),
('en', 'English'),
('pt-br', 'Brazilian Portuguese'),
),
MIDDLEWARE_CLASSES=(
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
),
)
class URLTestCaseBase(TestCase):
"""
TestCase base-class for the URL tests.
"""
urls = 'regressiontests.i18n.patterns.urls.default'
def setUp(self):
# Make sure the cache is empty before we are doing our tests.
clear_url_caches()
def tearDown(self):
# Make sure we will leave an empty cache for other testcases.
clear_url_caches()
class URLPrefixTests(URLTestCaseBase):
"""
Tests if the `i18n_patterns` is adding the prefix correctly.
"""
def test_not_prefixed(self):
with translation.override('en'):
self.assertEqual(reverse('not-prefixed'), '/not-prefixed/')
with translation.override('nl'):
self.assertEqual(reverse('not-prefixed'), '/not-prefixed/')
def test_prefixed(self):
with translation.override('en'):
self.assertEqual(reverse('prefixed'), '/en/prefixed/')
with translation.override('nl'):
self.assertEqual(reverse('prefixed'), '/nl/prefixed/')
@override_settings(ROOT_URLCONF='regressiontests.i18n.patterns.urls.wrong')
def test_invalid_prefix_use(self):
self.assertRaises(ImproperlyConfigured, lambda: reverse('account:register'))
class URLDisabledTests(URLTestCaseBase):
urls = 'regressiontests.i18n.patterns.urls.disabled'
@override_settings(USE_I18N=False)
def test_prefixed_i18n_disabled(self):
with translation.override('en'):
self.assertEqual(reverse('prefixed'), '/prefixed/')
with translation.override('nl'):
self.assertEqual(reverse('prefixed'), '/prefixed/')
class PathUnusedTests(URLTestCaseBase):
"""
Check that if no i18n_patterns is used in root urlconfs, then no
language activation happens based on url prefix.
"""
urls = 'regressiontests.i18n.patterns.urls.path_unused'
def test_no_lang_activate(self):
response = self.client.get('/nl/foo/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'en')
self.assertEqual(response.context['LANGUAGE_CODE'], 'en')
class URLTranslationTests(URLTestCaseBase):
"""
Tests if the pattern-strings are translated correctly (within the
`i18n_patterns` and the normal `patterns` function).
"""
def test_no_prefix_translated(self):
with translation.override('en'):
self.assertEqual(reverse('no-prefix-translated'), '/translated/')
self.assertEqual(reverse('no-prefix-translated-slug', kwargs={'slug': 'yeah'}), '/translated/yeah/')
with translation.override('nl'):
self.assertEqual(reverse('no-prefix-translated'), '/vertaald/')
self.assertEqual(reverse('no-prefix-translated-slug', kwargs={'slug': 'yeah'}), '/vertaald/yeah/')
with translation.override('pt-br'):
self.assertEqual(reverse('no-prefix-translated'), '/traduzidos/')
self.assertEqual(reverse('no-prefix-translated-slug', kwargs={'slug': 'yeah'}), '/traduzidos/yeah/')
def test_users_url(self):
with translation.override('en'):
self.assertEqual(reverse('users'), '/en/users/')
with translation.override('nl'):
self.assertEqual(reverse('users'), '/nl/gebruikers/')
with translation.override('pt-br'):
self.assertEqual(reverse('users'), '/pt-br/usuarios/')
class URLNamespaceTests(URLTestCaseBase):
"""
Tests if the translations are still working within namespaces.
"""
def test_account_register(self):
with translation.override('en'):
self.assertEqual(reverse('account:register'), '/en/account/register/')
with translation.override('nl'):
self.assertEqual(reverse('account:register'), '/nl/profiel/registeren/')
class URLRedirectTests(URLTestCaseBase):
"""
Tests if the user gets redirected to the right URL when there is no
language-prefix in the request URL.
"""
def test_no_prefix_response(self):
response = self.client.get('/not-prefixed/')
self.assertEqual(response.status_code, 200)
def test_en_redirect(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
def test_en_redirect_wrong_url(self):
response = self.client.get('/profiel/registeren/', HTTP_ACCEPT_LANGUAGE='en')
self.assertEqual(response.status_code, 404)
def test_nl_redirect(self):
response = self.client.get('/profiel/registeren/', HTTP_ACCEPT_LANGUAGE='nl')
self.assertRedirects(response, '/nl/profiel/registeren/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
def test_nl_redirect_wrong_url(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='nl')
self.assertEqual(response.status_code, 404)
def test_pt_br_redirect(self):
response = self.client.get('/conta/registre-se/', HTTP_ACCEPT_LANGUAGE='pt-br')
self.assertRedirects(response, '/pt-br/conta/registre-se/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
class URLRedirectWithoutTrailingSlashTests(URLTestCaseBase):
"""
Tests the redirect when the requested URL doesn't end with a slash
(`settings.APPEND_SLASH=True`).
"""
def test_not_prefixed_redirect(self):
response = self.client.get('/not-prefixed', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/not-prefixed/', 301)
def test_en_redirect(self):
response = self.client.get('/account/register', HTTP_ACCEPT_LANGUAGE='en', follow=True)
# target status code of 301 because of CommonMiddleware redirecting
self.assertIn(('http://testserver/en/account/register/', 301), response.redirect_chain)
self.assertRedirects(response, '/en/account/register/', 302)
class URLRedirectWithoutTrailingSlashSettingTests(URLTestCaseBase):
"""
Tests the redirect when the requested URL doesn't end with a slash
(`settings.APPEND_SLASH=False`).
"""
@override_settings(APPEND_SLASH=False)
def test_not_prefixed_redirect(self):
response = self.client.get('/not-prefixed', HTTP_ACCEPT_LANGUAGE='en')
self.assertEqual(response.status_code, 404)
@override_settings(APPEND_SLASH=False)
def test_en_redirect(self):
response = self.client.get('/account/register-without-slash', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register-without-slash', 302)
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
class URLResponseTests(URLTestCaseBase):
"""
Tests if the response has the right language-code.
"""
def test_not_prefixed_with_prefix(self):
response = self.client.get('/en/not-prefixed/')
self.assertEqual(response.status_code, 404)
def test_en_url(self):
response = self.client.get('/en/account/register/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'en')
self.assertEqual(response.context['LANGUAGE_CODE'], 'en')
def test_nl_url(self):
response = self.client.get('/nl/profiel/registeren/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'nl')
self.assertEqual(response.context['LANGUAGE_CODE'], 'nl')
def test_wrong_en_prefix(self):
response = self.client.get('/en/profiel/registeren/')
self.assertEqual(response.status_code, 404)
def test_wrong_nl_prefix(self):
response = self.client.get('/nl/account/register/')
self.assertEqual(response.status_code, 404)
def test_pt_br_url(self):
response = self.client.get('/pt-br/conta/registre-se/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'pt-br')
self.assertEqual(response.context['LANGUAGE_CODE'], 'pt-br')
class URLTagTests(URLTestCaseBase):
"""
Test if the language tag works.
"""
def test_strings_only(self):
t = Template("""{% load i18n %}
{% language 'nl' %}{% url 'no-prefix-translated' %}{% endlanguage %}
{% language 'pt-br' %}{% url 'no-prefix-translated' %}{% endlanguage %}""")
self.assertEqual(t.render(Context({})).strip().split(),
[u'/vertaald/', u'/traduzidos/'])
def test_context(self):
ctx = Context({'lang1':'nl', 'lang2':'pt-br'})
tpl = Template("""{% load i18n %}
{% language lang1 %}{% url 'no-prefix-translated' %}{% endlanguage %}
{% language lang2 %}{% url 'no-prefix-translated' %}{% endlanguage %}""")
self.assertEqual(tpl.render(ctx).strip().split(),
[u'/vertaald/', u'/traduzidos/'])
def test_args(self):
tpl = Template("""{% load i18n %}
{% language 'nl' %}{% url 'no-prefix-translated-slug' 'apo' %}{% endlanguage %}
{% language 'pt-br' %}{% url 'no-prefix-translated-slug' 'apo' %}{% endlanguage %}""")
self.assertEqual(tpl.render(Context({})).strip().split(),
[u'/vertaald/apo/', u'/traduzidos/apo/'])
def test_kwargs(self):
tpl = Template("""{% load i18n %}
{% language 'nl' %}{% url 'no-prefix-translated-slug' slug='apo' %}{% endlanguage %}
{% language 'pt-br' %}{% url 'no-prefix-translated-slug' slug='apo' %}{% endlanguage %}""")
self.assertEqual(tpl.render(Context({})).strip().split(),
[u'/vertaald/apo/', u'/traduzidos/apo/'])
|
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/Users/bibsian/Desktop/git/database-development/Views/ui_dialog_site.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(652, 628)
Dialog.setStyleSheet(_fromUtf8(".QLabel{\n"
" background: None;\n"
" padding: 0px;\n"
" margin: 0px;\n"
"}\n"
".QComboBox {\n"
" border: 1px solid gray;\n"
" border-radius: 7px;\n"
" padding: 2px;\n"
" padding-left: 15px;\n"
" background: #EEEEEE;\n"
"}\n"
".QFrame, .QWidget{\n"
" border-radius: 7;\n"
" background: white;\n"
"} \n"
"\n"
"\n"
"\n"
".QLineEdit{\n"
" padding: 1px;\n"
" border-style: solid;\n"
" border: 1px solid black;\n"
" border-radius: 8px;\n"
" margin: 0px;\n"
"}\n"
"\n"
".QPushButton {\n"
" color: black;\n"
" background: #EEEEEE;\n"
" border-width: 1px;\n"
" border-color: black;\n"
" border-style: solid;\n"
" border-radius: 7;\n"
" margin-top: 0px;\n"
" margin-left: 5px;\n"
" margin-right:5px; \n"
" padding-left: 5px;\n"
" padding-right: 5px;\n"
" padding-top: 3px;\n"
" padding-bottom: 3px;\n"
"}\n"
"\n"
"\n"
""))
self.verticalLayout_8 = QtGui.QVBoxLayout(Dialog)
self.verticalLayout_8.setObjectName(_fromUtf8("verticalLayout_8"))
self.verticalLayout_7 = QtGui.QVBoxLayout()
self.verticalLayout_7.setObjectName(_fromUtf8("verticalLayout_7"))
self.frame = QtGui.QFrame(Dialog)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame.sizePolicy().hasHeightForWidth())
self.frame.setSizePolicy(sizePolicy)
self.frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName(_fromUtf8("frame"))
self.verticalLayout_6 = QtGui.QVBoxLayout(self.frame)
self.verticalLayout_6.setObjectName(_fromUtf8("verticalLayout_6"))
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setSpacing(15)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
spacerItem = QtGui.QSpacerItem(18, 20, QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem)
self.label_4 = QtGui.QLabel(self.frame)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_4.setFont(font)
self.label_4.setTextFormat(QtCore.Qt.PlainText)
self.label_4.setAlignment(QtCore.Qt.AlignCenter)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.horizontalLayout_4.addWidget(self.label_4)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem1)
self.gridLayout.addLayout(self.horizontalLayout_4, 0, 0, 1, 1)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
spacerItem2 = QtGui.QSpacerItem(18, 20, QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem2)
self.label = QtGui.QLabel(self.frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
font.setKerning(True)
self.label.setFont(font)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout.addWidget(self.label)
spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem3)
self.gridLayout.addLayout(self.horizontalLayout, 1, 0, 1, 1)
self.horizontalLayout_5 = QtGui.QHBoxLayout()
self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5"))
spacerItem4 = QtGui.QSpacerItem(18, 20, QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem4)
self.lnedSiteID = QtGui.QLineEdit(self.frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lnedSiteID.sizePolicy().hasHeightForWidth())
self.lnedSiteID.setSizePolicy(sizePolicy)
self.lnedSiteID.setFocusPolicy(QtCore.Qt.StrongFocus)
self.lnedSiteID.setObjectName(_fromUtf8("lnedSiteID"))
self.horizontalLayout_5.addWidget(self.lnedSiteID)
self.btnSiteID = QtGui.QPushButton(self.frame)
font = QtGui.QFont()
font.setPointSize(8)
self.btnSiteID.setFont(font)
self.btnSiteID.setObjectName(_fromUtf8("btnSiteID"))
self.horizontalLayout_5.addWidget(self.btnSiteID)
spacerItem5 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem5)
self.gridLayout.addLayout(self.horizontalLayout_5, 2, 0, 1, 1)
self.horizontalLayout_6 = QtGui.QHBoxLayout()
self.horizontalLayout_6.setObjectName(_fromUtf8("horizontalLayout_6"))
spacerItem6 = QtGui.QSpacerItem(21, 20, QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(spacerItem6)
self.ckCreate = QtGui.QCheckBox(self.frame)
self.ckCreate.setObjectName(_fromUtf8("ckCreate"))
self.horizontalLayout_6.addWidget(self.ckCreate)
spacerItem7 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(spacerItem7)
self.gridLayout.addLayout(self.horizontalLayout_6, 3, 0, 1, 1)
self.verticalLayout.addLayout(self.gridLayout)
self.splitter = QtGui.QSplitter(self.frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.splitter.sizePolicy().hasHeightForWidth())
self.splitter.setSizePolicy(sizePolicy)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName(_fromUtf8("splitter"))
self.layoutWidget_3 = QtGui.QWidget(self.splitter)
self.layoutWidget_3.setObjectName(_fromUtf8("layoutWidget_3"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.layoutWidget_3)
self.verticalLayout_3.setContentsMargins(5, -1, -1, -1)
self.verticalLayout_3.setSpacing(0)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
spacerItem8 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem8)
self.label_2 = QtGui.QLabel(self.layoutWidget_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.label_2.setFont(font)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.horizontalLayout_3.addWidget(self.label_2)
spacerItem9 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem9)
self.verticalLayout_3.addLayout(self.horizontalLayout_3)
self.listviewSiteLabels = QtGui.QTableView(self.layoutWidget_3)
self.listviewSiteLabels.setFocusPolicy(QtCore.Qt.ClickFocus)
self.listviewSiteLabels.setFrameShape(QtGui.QFrame.WinPanel)
self.listviewSiteLabels.setFrameShadow(QtGui.QFrame.Plain)
self.listviewSiteLabels.setObjectName(_fromUtf8("listviewSiteLabels"))
self.verticalLayout_3.addWidget(self.listviewSiteLabels)
self.layoutWidget_2 = QtGui.QWidget(self.splitter)
self.layoutWidget_2.setObjectName(_fromUtf8("layoutWidget_2"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.layoutWidget_2)
self.verticalLayout_2.setContentsMargins(0, -1, 5, -1)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
spacerItem10 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem10)
self.label_3 = QtGui.QLabel(self.layoutWidget_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_3.sizePolicy().hasHeightForWidth())
self.label_3.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.label_3.setFont(font)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.horizontalLayout_2.addWidget(self.label_3)
spacerItem11 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem11)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.tabviewDbSiteQuery = QtGui.QTableView(self.layoutWidget_2)
self.tabviewDbSiteQuery.setFocusPolicy(QtCore.Qt.ClickFocus)
self.tabviewDbSiteQuery.setFrameShape(QtGui.QFrame.WinPanel)
self.tabviewDbSiteQuery.setFrameShadow(QtGui.QFrame.Plain)
self.tabviewDbSiteQuery.setObjectName(_fromUtf8("tabviewDbSiteQuery"))
self.verticalLayout_2.addWidget(self.tabviewDbSiteQuery)
self.verticalLayout.addWidget(self.splitter)
self.verticalLayout_6.addLayout(self.verticalLayout)
self.verticalLayout_7.addWidget(self.frame)
self.horizontalLayout_7 = QtGui.QHBoxLayout()
self.horizontalLayout_7.setObjectName(_fromUtf8("horizontalLayout_7"))
self.btnChange = QtGui.QPushButton(Dialog)
self.btnChange.setObjectName(_fromUtf8("btnChange"))
self.horizontalLayout_7.addWidget(self.btnChange)
self.btnUpdate = QtGui.QPushButton(Dialog)
self.btnUpdate.setEnabled(True)
self.btnUpdate.setFocusPolicy(QtCore.Qt.ClickFocus)
self.btnUpdate.setObjectName(_fromUtf8("btnUpdate"))
self.horizontalLayout_7.addWidget(self.btnUpdate)
spacerItem12 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_7.addItem(spacerItem12)
self.btnSaveClose = QtGui.QPushButton(Dialog)
self.btnSaveClose.setFocusPolicy(QtCore.Qt.ClickFocus)
self.btnSaveClose.setObjectName(_fromUtf8("btnSaveClose"))
self.horizontalLayout_7.addWidget(self.btnSaveClose)
self.btnSkip = QtGui.QPushButton(Dialog)
self.btnSkip.setObjectName(_fromUtf8("btnSkip"))
self.horizontalLayout_7.addWidget(self.btnSkip)
self.verticalLayout_7.addLayout(self.horizontalLayout_7)
self.verticalLayout_8.addLayout(self.verticalLayout_7)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "Site Information", None))
self.label_4.setText(_translate("Dialog", "Identify Site Information", None))
self.label.setText(_translate("Dialog", "Site Column", None))
self.btnSiteID.setText(_translate("Dialog", "Submit", None))
self.ckCreate.setText(_translate("Dialog", "Add Study Site Name", None))
self.label_2.setText(_translate("Dialog", "Site List", None))
self.label_3.setText(_translate("Dialog", "Site Database Query", None))
self.btnChange.setText(_translate("Dialog", "Change Labels", None))
self.btnUpdate.setText(_translate("Dialog", "Check Database", None))
self.btnSaveClose.setText(_translate("Dialog", "Save && Close", None))
self.btnSkip.setText(_translate("Dialog", "Canel", None))
|
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2002 Ben Escoto <ben@emerose.org>
# Copyright 2007 Kenneth Loafman <kenneth@loafman.com>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import time
import duplicity.backend
from duplicity import globals
from duplicity import log
from duplicity.errors import * #@UnusedWildImport
from duplicity.util import exception_traceback
from duplicity.backend import retry
from duplicity import progress
BOTO_MIN_VERSION = "2.0"
class BotoBackend(duplicity.backend.Backend):
"""
Backend for Amazon's Simple Storage System, (aka Amazon S3), though
the use of the boto module, (http://code.google.com/p/boto/).
To make use of this backend you must set aws_access_key_id
and aws_secret_access_key in your ~/.boto or /etc/boto.cfg
with your Amazon Web Services key id and secret respectively.
Alternatively you can export the environment variables
AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY.
"""
def __init__(self, parsed_url):
duplicity.backend.Backend.__init__(self, parsed_url)
import boto
assert boto.Version >= BOTO_MIN_VERSION
# This folds the null prefix and all null parts, which means that:
# //MyBucket/ and //MyBucket are equivalent.
# //MyBucket//My///My/Prefix/ and //MyBucket/My/Prefix are equivalent.
self.url_parts = filter(lambda x: x != '', parsed_url.path.split('/'))
if self.url_parts:
self.bucket_name = self.url_parts.pop(0)
else:
# Duplicity hangs if boto gets a null bucket name.
# HC: Caught a socket error, trying to recover
raise BackendException('Boto requires a bucket name.')
self.scheme = parsed_url.scheme
if self.url_parts:
self.key_prefix = '%s/' % '/'.join(self.url_parts)
else:
self.key_prefix = ''
self.straight_url = duplicity.backend.strip_auth_from_url(parsed_url)
self.parsed_url = parsed_url
# duplicity and boto.storage_uri() have different URI formats.
# boto uses scheme://bucket[/name] and specifies hostname on connect()
self.boto_uri_str = '://'.join((parsed_url.scheme[:2],
parsed_url.path.lstrip('/')))
self.storage_uri = boto.storage_uri(self.boto_uri_str)
self.resetConnection()
def resetConnection(self):
self.bucket = None
self.conn = None
try:
from boto.s3.connection import S3Connection
from boto.s3.key import Key
assert hasattr(S3Connection, 'lookup')
# Newer versions of boto default to using
# virtual hosting for buckets as a result of
# upstream deprecation of the old-style access
# method by Amazon S3. This change is not
# backwards compatible (in particular with
# respect to upper case characters in bucket
# names); so we default to forcing use of the
# old-style method unless the user has
# explicitly asked us to use new-style bucket
# access.
#
# Note that if the user wants to use new-style
# buckets, we use the subdomain calling form
# rather than given the option of both
# subdomain and vhost. The reason being that
# anything addressable as a vhost, is also
# addressable as a subdomain. Seeing as the
# latter is mostly a convenience method of
# allowing browse:able content semi-invisibly
# being hosted on S3, the former format makes
# a lot more sense for us to use - being
# explicit about what is happening (the fact
# that we are talking to S3 servers).
try:
from boto.s3.connection import OrdinaryCallingFormat
from boto.s3.connection import SubdomainCallingFormat
cfs_supported = True
calling_format = OrdinaryCallingFormat()
except ImportError:
cfs_supported = False
calling_format = None
if globals.s3_use_new_style:
if cfs_supported:
calling_format = SubdomainCallingFormat()
else:
log.FatalError("Use of new-style (subdomain) S3 bucket addressing was"
"requested, but does not seem to be supported by the "
"boto library. Either you need to upgrade your boto "
"library or duplicity has failed to correctly detect "
"the appropriate support.",
log.ErrorCode.boto_old_style)
else:
if cfs_supported:
calling_format = OrdinaryCallingFormat()
else:
calling_format = None
except ImportError:
log.FatalError("This backend (s3) requires boto library, version %s or later, "
"(http://code.google.com/p/boto/)." % BOTO_MIN_VERSION,
log.ErrorCode.boto_lib_too_old)
if not self.parsed_url.hostname:
# Use the default host.
self.conn = self.storage_uri.connect(
is_secure=(not globals.s3_unencrypted_connection))
else:
assert self.scheme == 's3'
self.conn = self.storage_uri.connect(
host=self.parsed_url.hostname,
is_secure=(not globals.s3_unencrypted_connection))
if hasattr(self.conn, 'calling_format'):
if calling_format is None:
log.FatalError("It seems we previously failed to detect support for calling "
"formats in the boto library, yet the support is there. This is "
"almost certainly a duplicity bug.",
log.ErrorCode.boto_calling_format)
else:
self.conn.calling_format = calling_format
else:
# Duplicity hangs if boto gets a null bucket name.
# HC: Caught a socket error, trying to recover
raise BackendException('Boto requires a bucket name.')
self.bucket = self.conn.lookup(self.bucket_name)
def put(self, source_path, remote_filename=None):
from boto.s3.connection import Location
if globals.s3_european_buckets:
if not globals.s3_use_new_style:
log.FatalError("European bucket creation was requested, but not new-style "
"bucket addressing (--s3-use-new-style)",
log.ErrorCode.s3_bucket_not_style)
#Network glitch may prevent first few attempts of creating/looking up a bucket
for n in range(1, globals.num_retries+1):
if self.bucket:
break
if n > 1:
time.sleep(30)
try:
try:
self.bucket = self.conn.get_bucket(self.bucket_name, validate=True)
except Exception, e:
if "NoSuchBucket" in str(e):
if globals.s3_european_buckets:
self.bucket = self.conn.create_bucket(self.bucket_name,
location=Location.EU)
else:
self.bucket = self.conn.create_bucket(self.bucket_name)
else:
raise e
except Exception, e:
log.Warn("Failed to create bucket (attempt #%d) '%s' failed (reason: %s: %s)"
"" % (n, self.bucket_name,
e.__class__.__name__,
str(e)))
self.resetConnection()
if not remote_filename:
remote_filename = source_path.get_filename()
key = self.bucket.new_key(self.key_prefix + remote_filename)
for n in range(1, globals.num_retries+1):
if n > 1:
# sleep before retry (new connection to a **hopeful** new host, so no need to wait so long)
time.sleep(10)
if globals.s3_use_rrs:
storage_class = 'REDUCED_REDUNDANCY'
else:
storage_class = 'STANDARD'
log.Info("Uploading %s/%s to %s Storage" % (self.straight_url, remote_filename, storage_class))
try:
key.set_contents_from_filename(source_path.name, {'Content-Type': 'application/octet-stream',
'x-amz-storage-class': storage_class},
cb=progress.report_transfer,
num_cb=(max(2, 8 * globals.volsize / (1024 * 1024)))
) # Max num of callbacks = 8 times x megabyte
key.close()
self.resetConnection()
return
except Exception, e:
log.Warn("Upload '%s/%s' failed (attempt #%d, reason: %s: %s)"
"" % (self.straight_url,
remote_filename,
n,
e.__class__.__name__,
str(e)))
log.Debug("Backtrace of previous error: %s" % (exception_traceback(),))
self.resetConnection()
log.Warn("Giving up trying to upload %s/%s after %d attempts" %
(self.straight_url, remote_filename, globals.num_retries))
raise BackendException("Error uploading %s/%s" % (self.straight_url, remote_filename))
def get(self, remote_filename, local_path):
for n in range(1, globals.num_retries+1):
if n > 1:
# sleep before retry (new connection to a **hopeful** new host, so no need to wait so long)
time.sleep(10)
log.Info("Downloading %s/%s" % (self.straight_url, remote_filename))
try:
key_name = self.key_prefix + remote_filename
key = self.bucket.get_key(key_name)
if key is None:
raise BackendException("%s: key not found" % key_name)
key.get_contents_to_filename(local_path.name)
local_path.setdata()
self.resetConnection()
return
except Exception, e:
log.Warn("Download %s/%s failed (attempt #%d, reason: %s: %s)"
"" % (self.straight_url,
remote_filename,
n,
e.__class__.__name__,
str(e)), 1)
log.Debug("Backtrace of previous error: %s" % (exception_traceback(),))
self.resetConnection()
log.Warn("Giving up trying to download %s/%s after %d attempts" %
(self.straight_url, remote_filename, globals.num_retries))
raise BackendException("Error downloading %s/%s" % (self.straight_url, remote_filename))
def _list(self):
if not self.bucket:
raise BackendException("No connection to backend")
for n in range(1, globals.num_retries+1):
if n > 1:
# sleep before retry
time.sleep(30)
log.Info("Listing %s" % self.straight_url)
try:
return self._list_filenames_in_bucket()
except Exception, e:
log.Warn("List %s failed (attempt #%d, reason: %s: %s)"
"" % (self.straight_url,
n,
e.__class__.__name__,
str(e)), 1)
log.Debug("Backtrace of previous error: %s" % (exception_traceback(),))
log.Warn("Giving up trying to list %s after %d attempts" %
(self.straight_url, globals.num_retries))
raise BackendException("Error listng %s" % self.straight_url)
def _list_filenames_in_bucket(self):
# We add a 'd' to the prefix to make sure it is not null (for boto) and
# to optimize the listing of our filenames, which always begin with 'd'.
# This will cause a failure in the regression tests as below:
# FAIL: Test basic backend operations
# <tracback snipped>
# AssertionError: Got list: []
# Wanted: ['testfile']
# Because of the need for this optimization, it should be left as is.
#for k in self.bucket.list(prefix = self.key_prefix + 'd', delimiter = '/'):
filename_list = []
for k in self.bucket.list(prefix = self.key_prefix, delimiter = '/'):
try:
filename = k.key.replace(self.key_prefix, '', 1)
filename_list.append(filename)
log.Debug("Listed %s/%s" % (self.straight_url, filename))
except AttributeError:
pass
return filename_list
def delete(self, filename_list):
for filename in filename_list:
self.bucket.delete_key(self.key_prefix + filename)
log.Debug("Deleted %s/%s" % (self.straight_url, filename))
@retry
def _query_file_info(self, filename, raise_errors=False):
try:
key = self.bucket.lookup(self.key_prefix + filename)
if key is None:
return {'size': -1}
return {'size': key.size}
except Exception, e:
log.Warn("Query %s/%s failed: %s"
"" % (self.straight_url,
filename,
str(e)))
self.resetConnection()
if raise_errors:
raise e
else:
return {'size': None}
duplicity.backend.register_backend("gs", BotoBackend)
duplicity.backend.register_backend("s3", BotoBackend)
duplicity.backend.register_backend("s3+http", BotoBackend)
|
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The ActionChains implementation,
"""
import time
from selenium.webdriver.remote.command import Command
from .utils import keys_to_typing
from .actions.action_builder import ActionBuilder
class ActionChains(object):
"""
ActionChains are a way to automate low level interactions such as
mouse movements, mouse button actions, key press, and context menu interactions.
This is useful for doing more complex actions like hover over and drag and drop.
Generate user actions.
When you call methods for actions on the ActionChains object,
the actions are stored in a queue in the ActionChains object.
When you call perform(), the events are fired in the order they
are queued up.
ActionChains can be used in a chain pattern::
menu = driver.find_element_by_css_selector(".nav")
hidden_submenu = driver.find_element_by_css_selector(".nav #submenu1")
ActionChains(driver).move_to_element(menu).click(hidden_submenu).perform()
Or actions can be queued up one by one, then performed.::
menu = driver.find_element_by_css_selector(".nav")
hidden_submenu = driver.find_element_by_css_selector(".nav #submenu1")
actions = ActionChains(driver)
actions.move_to_element(menu)
actions.click(hidden_submenu)
actions.perform()
Either way, the actions are performed in the order they are called, one after
another.
"""
def __init__(self, driver):
"""
Creates a new ActionChains.
:Args:
- driver: The WebDriver instance which performs user actions.
"""
self._driver = driver
self._actions = []
if self._driver.w3c:
self.w3c_actions = ActionBuilder(driver)
def perform(self):
"""
Performs all stored actions.
"""
if self._driver.w3c:
self.w3c_actions.perform()
else:
for action in self._actions:
action()
def reset_actions(self):
"""
Clears actions that are already stored on the remote end.
"""
if self._driver.w3c:
self._driver.execute(Command.W3C_CLEAR_ACTIONS)
else:
self._actions = []
def click(self, on_element=None):
"""
Clicks an element.
:Args:
- on_element: The element to click.
If None, clicks on current mouse position.
"""
if self._driver.w3c:
self.w3c_actions.pointer_action.click(on_element)
self.w3c_actions.key_action.pause()
self.w3c_actions.key_action.pause()
else:
if on_element:
self.move_to_element(on_element)
self._actions.append(lambda: self._driver.execute(
Command.CLICK, {'button': 0}))
return self
def click_and_hold(self, on_element=None):
"""
Holds down the left mouse button on an element.
:Args:
- on_element: The element to mouse down.
If None, clicks on current mouse position.
"""
if self._driver.w3c:
self.w3c_actions.pointer_action.click_and_hold(on_element)
self.w3c_actions.key_action.pause()
if on_element:
self.w3c_actions.key_action.pause()
else:
if on_element:
self.move_to_element(on_element)
self._actions.append(lambda: self._driver.execute(
Command.MOUSE_DOWN, {}))
return self
def context_click(self, on_element=None):
"""
Performs a context-click (right click) on an element.
:Args:
- on_element: The element to context-click.
If None, clicks on current mouse position.
"""
if self._driver.w3c:
self.w3c_actions.pointer_action.context_click(on_element)
self.w3c_actions.key_action.pause()
else:
if on_element:
self.move_to_element(on_element)
self._actions.append(lambda: self._driver.execute(
Command.CLICK, {'button': 2}))
return self
def double_click(self, on_element=None):
"""
Double-clicks an element.
:Args:
- on_element: The element to double-click.
If None, clicks on current mouse position.
"""
if self._driver.w3c:
self.w3c_actions.pointer_action.double_click(on_element)
for _ in range(4):
self.w3c_actions.key_action.pause()
else:
if on_element:
self.move_to_element(on_element)
self._actions.append(lambda: self._driver.execute(
Command.DOUBLE_CLICK, {}))
return self
def drag_and_drop(self, source, target):
"""
Holds down the left mouse button on the source element,
then moves to the target element and releases the mouse button.
:Args:
- source: The element to mouse down.
- target: The element to mouse up.
"""
if self._driver.w3c:
self.w3c_actions.pointer_action.click_and_hold(source) \
.move_to(target) \
.release()
for _ in range(3):
self.w3c_actions.key_action.pause()
else:
self.click_and_hold(source)
self.release(target)
return self
def drag_and_drop_by_offset(self, source, xoffset, yoffset):
"""
Holds down the left mouse button on the source element,
then moves to the target offset and releases the mouse button.
:Args:
- source: The element to mouse down.
- xoffset: X offset to move to.
- yoffset: Y offset to move to.
"""
if self._driver.w3c:
self.w3c_actions.pointer_action.click_and_hold(source) \
.move_by(xoffset, yoffset) \
.release()
for _ in range(3):
self.w3c_actions.key_action.pause()
else:
self.click_and_hold(source)
self.move_by_offset(xoffset, yoffset)
self.release()
return self
def key_down(self, value, element=None):
"""
Sends a key press only, without releasing it.
Should only be used with modifier keys (Control, Alt and Shift).
:Args:
- value: The modifier key to send. Values are defined in `Keys` class.
- element: The element to send keys.
If None, sends a key to current focused element.
Example, pressing ctrl+c::
ActionChains(driver).key_down(Keys.CONTROL).send_keys('c').key_up(Keys.CONTROL).perform()
"""
if element:
self.click(element)
if self._driver.w3c:
self.w3c_actions.key_action.key_down(value)
self.w3c_actions.pointer_action.pause()
else:
self._actions.append(lambda: self._driver.execute(
Command.SEND_KEYS_TO_ACTIVE_ELEMENT,
{"value": keys_to_typing(value)}))
return self
def key_up(self, value, element=None):
"""
Releases a modifier key.
:Args:
- value: The modifier key to send. Values are defined in Keys class.
- element: The element to send keys.
If None, sends a key to current focused element.
Example, pressing ctrl+c::
ActionChains(driver).key_down(Keys.CONTROL).send_keys('c').key_up(Keys.CONTROL).perform()
"""
if element:
self.click(element)
if self._driver.w3c:
self.w3c_actions.key_action.key_up(value)
self.w3c_actions.pointer_action.pause()
else:
self._actions.append(lambda: self._driver.execute(
Command.SEND_KEYS_TO_ACTIVE_ELEMENT,
{"value": keys_to_typing(value)}))
return self
def move_by_offset(self, xoffset, yoffset):
"""
Moving the mouse to an offset from current mouse position.
:Args:
- xoffset: X offset to move to, as a positive or negative integer.
- yoffset: Y offset to move to, as a positive or negative integer.
"""
if self._driver.w3c:
self.w3c_actions.pointer_action.move_by(xoffset, yoffset)
self.w3c_actions.key_action.pause()
else:
self._actions.append(lambda: self._driver.execute(
Command.MOVE_TO, {
'xoffset': int(xoffset),
'yoffset': int(yoffset)}))
return self
def move_to_element(self, to_element):
"""
Moving the mouse to the middle of an element.
:Args:
- to_element: The WebElement to move to.
"""
if self._driver.w3c:
self.w3c_actions.pointer_action.move_to(to_element)
self.w3c_actions.key_action.pause()
else:
self._actions.append(lambda: self._driver.execute(
Command.MOVE_TO, {'element': to_element.id}))
return self
def move_to_element_with_offset(self, to_element, xoffset, yoffset):
"""
Move the mouse by an offset of the specified element.
Offsets are relative to the top-left corner of the element.
:Args:
- to_element: The WebElement to move to.
- xoffset: X offset to move to.
- yoffset: Y offset to move to.
"""
if self._driver.w3c:
self.w3c_actions.pointer_action.move_to(to_element, xoffset, yoffset)
self.w3c_actions.key_action.pause()
else:
self._actions.append(
lambda: self._driver.execute(Command.MOVE_TO, {
'element': to_element.id,
'xoffset': int(xoffset),
'yoffset': int(yoffset)}))
return self
def pause(self, seconds):
""" Pause all inputs for the specified duration in seconds """
if self._driver.w3c:
self.w3c_actions.pointer_action.pause(seconds)
self.w3c_actions.key_action.pause(seconds)
else:
self._actions.append(lambda: time.sleep(seconds))
return self
def release(self, on_element=None):
"""
Releasing a held mouse button on an element.
:Args:
- on_element: The element to mouse up.
If None, releases on current mouse position.
"""
if on_element:
self.move_to_element(on_element)
if self._driver.w3c:
self.w3c_actions.pointer_action.release()
self.w3c_actions.key_action.pause()
else:
self._actions.append(lambda: self._driver.execute(Command.MOUSE_UP, {}))
return self
def send_keys(self, *keys_to_send):
"""
Sends keys to current focused element.
:Args:
- keys_to_send: The keys to send. Modifier keys constants can be found in the
'Keys' class.
"""
if self._driver.w3c:
self.w3c_actions.key_action.send_keys(keys_to_send)
else:
self._actions.append(lambda: self._driver.execute(
Command.SEND_KEYS_TO_ACTIVE_ELEMENT, {'value': keys_to_typing(keys_to_send)}))
return self
def send_keys_to_element(self, element, *keys_to_send):
"""
Sends keys to an element.
:Args:
- element: The element to send keys.
- keys_to_send: The keys to send. Modifier keys constants can be found in the
'Keys' class.
"""
if self._driver.w3c:
self.w3c_actions.key_action.send_keys(keys_to_send, element=element)
else:
self._actions.append(lambda: element.send_keys(*keys_to_send))
return self
# Context manager so ActionChains can be used in a 'with .. as' statements.
def __enter__(self):
return self # Return created instance of self.
def __exit__(self, _type, _value, _traceback):
pass # Do nothing, does not require additional cleanup.
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitsOperations(object):
"""ExpressRouteCircuitsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
circuit_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
circuit_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCircuit"
"""Gets information about the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of express route circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuit, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_08_01.models.ExpressRouteCircuit
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuit"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
parameters, # type: "_models.ExpressRouteCircuit"
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCircuit"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuit"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ExpressRouteCircuit')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
circuit_name, # type: str
parameters, # type: "_models.ExpressRouteCircuit"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRouteCircuit"]
"""Creates or updates an express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param parameters: Parameters supplied to the create or update express route circuit operation.
:type parameters: ~azure.mgmt.network.v2018_08_01.models.ExpressRouteCircuit
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteCircuit or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_08_01.models.ExpressRouteCircuit]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuit"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCircuit"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuit"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
circuit_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRouteCircuit"]
"""Updates an express route circuit tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param parameters: Parameters supplied to update express route circuit tags.
:type parameters: ~azure.mgmt.network.v2018_08_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteCircuit or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_08_01.models.ExpressRouteCircuit]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuit"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
def _list_arp_table_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
device_path, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ExpressRouteCircuitsArpTableListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ExpressRouteCircuitsArpTableListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
# Construct URL
url = self._list_arp_table_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsArpTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_arp_table_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/arpTables/{devicePath}'} # type: ignore
def begin_list_arp_table(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
device_path, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRouteCircuitsArpTableListResult"]
"""Gets the currently advertised ARP table associated with the express route circuit in a resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteCircuitsArpTableListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_08_01.models.ExpressRouteCircuitsArpTableListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitsArpTableListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._list_arp_table_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
device_path=device_path,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitsArpTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_arp_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/arpTables/{devicePath}'} # type: ignore
def _list_routes_table_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
device_path, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ExpressRouteCircuitsRoutesTableListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ExpressRouteCircuitsRoutesTableListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
# Construct URL
url = self._list_routes_table_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_routes_table_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTables/{devicePath}'} # type: ignore
def begin_list_routes_table(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
device_path, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRouteCircuitsRoutesTableListResult"]
"""Gets the currently advertised routes table associated with the express route circuit in a
resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteCircuitsRoutesTableListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_08_01.models.ExpressRouteCircuitsRoutesTableListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitsRoutesTableListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._list_routes_table_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
device_path=device_path,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_routes_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTables/{devicePath}'} # type: ignore
def _list_routes_table_summary_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
device_path, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ExpressRouteCircuitsRoutesTableSummaryListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ExpressRouteCircuitsRoutesTableSummaryListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
# Construct URL
url = self._list_routes_table_summary_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableSummaryListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_routes_table_summary_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTablesSummary/{devicePath}'} # type: ignore
def begin_list_routes_table_summary(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
device_path, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRouteCircuitsRoutesTableSummaryListResult"]
"""Gets the currently advertised routes table summary associated with the express route circuit in
a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteCircuitsRoutesTableSummaryListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_08_01.models.ExpressRouteCircuitsRoutesTableSummaryListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitsRoutesTableSummaryListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._list_routes_table_summary_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
device_path=device_path,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableSummaryListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_routes_table_summary.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTablesSummary/{devicePath}'} # type: ignore
def get_stats(
self,
resource_group_name, # type: str
circuit_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCircuitStats"
"""Gets all the stats from an express route circuit in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitStats, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_08_01.models.ExpressRouteCircuitStats
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitStats"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
# Construct URL
url = self.get_stats.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitStats', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_stats.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/stats'} # type: ignore
def get_peering_stats(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCircuitStats"
"""Gets all stats from an express route circuit in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitStats, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_08_01.models.ExpressRouteCircuitStats
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitStats"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
# Construct URL
url = self.get_peering_stats.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitStats', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_peering_stats.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/stats'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ExpressRouteCircuitListResult"]
"""Gets all the express route circuits in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_08_01.models.ExpressRouteCircuitListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ExpressRouteCircuitListResult"]
"""Gets all the express route circuits in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_08_01.models.ExpressRouteCircuitListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteCircuits'} # type: ignore
|
|
#
# Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""Base class for targets."""
import json
import os
from time import sleep
from .. import host_tests_plugins as ht_plugins
from mbed_lstools.main import create
from .. import DEFAULT_BAUD_RATE
from ..host_tests_logger import HtrunLogger
class TargetBase:
"""TargetBase class for a host driven test.
This class stores information necessary to communicate with the device
under test. It is responsible for managing serial port communication
between the host and the device.
"""
def __init__(self, options):
"""Initialise common target attributes."""
self.options = options
self.logger = HtrunLogger("Greentea")
# Options related to copy / reset the connected target device
self.port = self.options.port
self.mcu = self.options.micro
self.disk = self.options.disk
self.target_id = self.options.target_id
self.image_path = (
self.options.image_path.strip('"')
if self.options.image_path is not None
else ""
)
self.copy_method = self.options.copy_method
self.retry_copy = self.options.retry_copy
self.program_cycle_s = float(
self.options.program_cycle_s
if self.options.program_cycle_s is not None
else 2.0
)
self.polling_timeout = self.options.polling_timeout
# Serial port settings
self.serial_baud = DEFAULT_BAUD_RATE
self.serial_timeout = 1
# Users can use command to pass port speeds together with port name. E.g.
# COM4:115200:1
# Format is PORT:SPEED:TIMEOUT
port_config = self.port.split(":") if self.port else ""
if len(port_config) == 2:
# -p COM4:115200
self.port = port_config[0]
self.serial_baud = int(port_config[1])
elif len(port_config) == 3:
# -p COM4:115200:0.5
self.port = port_config[0]
self.serial_baud = int(port_config[1])
self.serial_timeout = float(port_config[2])
# Overriding baud rate value with command line specified value
self.serial_baud = (
self.options.baud_rate if self.options.baud_rate else self.serial_baud
)
# Test configuration in JSON format
self.test_cfg = None
if self.options.json_test_configuration is not None:
# We need to normalize path before we open file
json_test_configuration_path = self.options.json_test_configuration.strip(
"\"'"
)
try:
self.logger.prn_inf(
"Loading test configuration from '%s'..."
% json_test_configuration_path
)
with open(json_test_configuration_path) as data_file:
self.test_cfg = json.load(data_file)
except IOError as e:
self.logger.prn_err(
"Test configuration JSON file '{0}' I/O error({1}): {2}".format(
json_test_configuration_path, e.errno, e.strerror
)
)
except Exception as e:
self.logger.prn_err("Test configuration JSON Unexpected error:", str(e))
raise
def copy_image(
self,
image_path=None,
disk=None,
copy_method=None,
port=None,
mcu=None,
retry_copy=5,
):
"""Copy an image to a target.
Returns:
True if the copy succeeded, otherwise False.
"""
def get_remount_count(disk_path, tries=2):
"""Get the remount count from 'DETAILS.TXT' file.
Returns:
Remount count, or None if not available.
"""
# In case of no disk path, nothing to do
if disk_path is None:
return None
for cur_try in range(1, tries + 1):
try:
files_on_disk = [x.upper() for x in os.listdir(disk_path)]
if "DETAILS.TXT" in files_on_disk:
with open(
os.path.join(disk_path, "DETAILS.TXT"), "r"
) as details_txt:
for line in details_txt.readlines():
if "Remount count:" in line:
return int(line.replace("Remount count: ", ""))
# Remount count not found in file
return None
# 'DETAILS.TXT file not found
else:
return None
except OSError as e:
self.logger.prn_err(
"Failed to get remount count due to OSError.", str(e)
)
self.logger.prn_inf(
"Retrying in 1 second (try %s of %s)" % (cur_try, tries)
)
sleep(1)
# Failed to get remount count
return None
def check_flash_error(target_id, disk, initial_remount_count):
"""Check for flash errors.
Returns:
False if FAIL.TXT present, else True.
"""
if not target_id:
self.logger.prn_wrn(
"Target ID not found: Skipping flash check and retry"
)
return True
if copy_method not in ["shell", "default"]:
# We're using a "copy method" that may not necessarily require
# an "Mbed Enabled" device. In this case we shouldn't use
# mbedls.detect to attempt to rediscover the mount point, as
# mbedls.detect is only compatible with Mbed Enabled devices.
# It's best just to return `True` and continue here. This will
# avoid the inevitable 2.5s delay caused by us repeatedly
# attempting to enumerate Mbed Enabled devices in the code
# below when none are connected. The user has specified a
# non-Mbed plugin copy method, so we shouldn't delay them by
# trying to check for Mbed Enabled devices.
return True
bad_files = set(["FAIL.TXT"])
# Re-try at max 5 times with 0.5 sec in delay
for i in range(5):
# mbed_lstools.main.create() should be done inside the loop. Otherwise
# it will loop on same data.
mbeds = create()
mbed_list = mbeds.list_mbeds() # list of mbeds present
# get first item in list with a matching target_id, if present
mbed_target = next(
(x for x in mbed_list if x["target_id"] == target_id), None
)
if mbed_target is not None:
if (
"mount_point" in mbed_target
and mbed_target["mount_point"] is not None
):
if initial_remount_count is not None:
new_remount_count = get_remount_count(disk)
if (
new_remount_count is not None
and new_remount_count == initial_remount_count
):
sleep(0.5)
continue
common_items = []
try:
items = set(
[
x.upper()
for x in os.listdir(mbed_target["mount_point"])
]
)
common_items = bad_files.intersection(items)
except OSError:
print("Failed to enumerate disk files, retrying")
continue
for common_item in common_items:
full_path = os.path.join(
mbed_target["mount_point"], common_item
)
self.logger.prn_err("Found %s" % (full_path))
bad_file_contents = "[failed to read bad file]"
try:
with open(full_path, "r") as bad_file:
bad_file_contents = bad_file.read()
except IOError as error:
self.logger.prn_err(
"Error opening '%s': %s" % (full_path, error)
)
self.logger.prn_err(
"Error file contents:\n%s" % bad_file_contents
)
if common_items:
return False
sleep(0.5)
return True
# Set-up closure environment
if not image_path:
image_path = self.image_path
if not disk:
disk = self.disk
if not copy_method:
copy_method = self.copy_method
if not port:
port = self.port
if not mcu:
mcu = self.mcu
if not retry_copy:
retry_copy = self.retry_copy
target_id = self.target_id
if not image_path:
self.logger.prn_err("Error: image path not specified")
return False
if not os.path.isfile(image_path):
self.logger.prn_err("Error: image file (%s) not found" % image_path)
return False
for count in range(0, retry_copy):
initial_remount_count = get_remount_count(disk)
# Call proper copy method
result = self.copy_image_raw(image_path, disk, copy_method, port, mcu)
sleep(self.program_cycle_s)
if not result:
continue
result = check_flash_error(target_id, disk, initial_remount_count)
if result:
break
return result
def copy_image_raw(
self, image_path=None, disk=None, copy_method=None, port=None, mcu=None
):
"""Copy a firmware image to disk with the given copy_method.
Handles exception and return code from shell copy commands.
Args:
image_path: Path to the firmware image to copy/flash.
disk: Destination path forr the firmware image.
copy_method: Copy plugin name to use.
port: Serial COM port.
mcu: Name of the MCU being targeted.
Returns:
True if copy succeeded, otherwise False.
"""
# image_path - Where is binary with target's firmware
# Select copy_method
# We override 'default' method with 'shell' method
copy_method = {
None: "shell",
"default": "shell",
}.get(copy_method, copy_method)
result = ht_plugins.call_plugin(
"CopyMethod",
copy_method,
image_path=image_path,
mcu=mcu,
serial=port,
destination_disk=disk,
target_id=self.target_id,
pooling_timeout=self.polling_timeout,
format=self.options.format,
)
return result
def hw_reset(self):
"""Perform hardware reset of target device.
Returns:
True if the reset succeeded, otherwise False.
"""
device_info = {}
result = ht_plugins.call_plugin(
"ResetMethod",
"power_cycle",
target_id=self.target_id,
device_info=device_info,
format=self.options.format,
)
if result:
self.port = device_info["serial_port"]
self.disk = device_info["mount_point"]
return result
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations to emit summaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.contrib.summary import gen_summary_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import summary_op_util
from tensorflow.python.training import training_util
from tensorflow.python.util import tf_contextlib
# Name for a collection which is expected to have at most a single boolean
# Tensor. If this tensor is True the summary ops will record summaries.
_SHOULD_RECORD_SUMMARIES_NAME = "ShouldRecordSummaries"
_SUMMARY_COLLECTION_NAME = "_SUMMARY_V2"
_SUMMARY_WRITER_INIT_COLLECTION_NAME = "_SUMMARY_WRITER_V2"
def should_record_summaries():
"""Returns boolean Tensor which is true if summaries should be recorded."""
should_record_collection = ops.get_collection(_SHOULD_RECORD_SUMMARIES_NAME)
if not should_record_collection:
return False
if len(should_record_collection) != 1:
raise ValueError(
"More than one tensor specified for whether summaries "
"should be recorded: %s" % should_record_collection)
return should_record_collection[0]
# TODO(apassos) consider how to handle local step here.
@tf_contextlib.contextmanager
def record_summaries_every_n_global_steps(n):
"""Sets the should_record_summaries Tensor to true if global_step % n == 0."""
collection_ref = ops.get_collection_ref(_SHOULD_RECORD_SUMMARIES_NAME)
old = collection_ref[:]
with ops.device("cpu:0"):
collection_ref[:] = [math_ops.equal(training_util.get_global_step() % n, 0)]
yield
collection_ref[:] = old
@tf_contextlib.contextmanager
def always_record_summaries():
"""Sets the should_record_summaries Tensor to always true."""
collection_ref = ops.get_collection_ref(_SHOULD_RECORD_SUMMARIES_NAME)
old = collection_ref[:]
collection_ref[:] = [True]
yield
collection_ref[:] = old
@tf_contextlib.contextmanager
def never_record_summaries():
"""Sets the should_record_summaries Tensor to always false."""
collection_ref = ops.get_collection_ref(_SHOULD_RECORD_SUMMARIES_NAME)
old = collection_ref[:]
collection_ref[:] = [False]
yield
collection_ref[:] = old
class SummaryWriter(object):
"""Encapsulates a summary writer."""
def __init__(self, resource):
self._resource = resource
if context.in_eager_mode():
self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
handle=self._resource, handle_device="cpu:0")
def set_as_default(self):
context.context().summary_writer_resource = self._resource
@tf_contextlib.contextmanager
def as_default(self):
if self._resource is None:
yield
else:
old = context.context().summary_writer_resource
context.context().summary_writer_resource = self._resource
yield
# Flushes the summary writer in eager mode or in graph functions, but not
# in legacy graph mode (you're on your own there).
with ops.device("cpu:0"):
gen_summary_ops.flush_summary_writer(self._resource)
context.context().summary_writer_resource = old
def create_summary_file_writer(logdir,
max_queue=None,
flush_secs=None,
filename_suffix=None,
name=None):
"""Creates a summary file writer in the current context.
Args:
logdir: a string, or None. If a string, creates a summary file writer
which writes to the directory named by the string. If None, returns
a mock object which acts like a summary writer but does nothing,
useful to use as a context manager.
max_queue: the largest number of summaries to keep in a queue; will
flush once the queue gets bigger than this.
flush_secs: the largest interval (in seconds) between flushes.
filename_suffix: optional suffix for the event file name.
name: name for the summary writer.
Returns:
Either a summary writer or an empty object which can be used as a
summary writer.
"""
if logdir is None:
return SummaryWriter(None)
with ops.device("cpu:0"):
if max_queue is None:
max_queue = constant_op.constant(10)
if flush_secs is None:
flush_secs = constant_op.constant(120)
if filename_suffix is None:
filename_suffix = constant_op.constant("")
resource = gen_summary_ops.summary_writer(shared_name=name)
# TODO(apassos) ensure the initialization op runs when in graph mode;
# consider calling session.run here.
ops.add_to_collection(
_SUMMARY_WRITER_INIT_COLLECTION_NAME,
gen_summary_ops.create_summary_file_writer(resource, logdir, max_queue,
flush_secs, filename_suffix))
return SummaryWriter(resource)
def _nothing():
"""Convenient else branch for when summaries do not record."""
return constant_op.constant(False)
def all_summary_ops():
"""Graph-mode only. Returns all summary ops."""
if context.in_eager_mode():
raise RuntimeError(
"tf.contrib.summary.all_summary_ops is only supported in graph mode.")
return ops.get_collection(_SUMMARY_COLLECTION_NAME)
def summary_writer_initializer_op():
"""Graph-mode only. Returns the list of ops to create all summary writers."""
if context.in_eager_mode():
raise RuntimeError(
"tf.contrib.summary.summary_writer_initializer_op is only "
"supported in graph mode.")
return ops.get_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME)
def summary_writer_function(name, tensor, function, family=None):
"""Helper function to write summaries.
Args:
name: name of the summary
tensor: main tensor to form the summary
function: function taking a tag and a scope which writes the summary
family: optional, the summary's family
Returns:
The result of writing the summary.
"""
def record():
with summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
with ops.control_dependencies([function(tag, scope)]):
return constant_op.constant(True)
if context.context().summary_writer_resource is None:
return control_flow_ops.no_op()
with ops.device("cpu:0"):
op = utils.smart_cond(
should_record_summaries(), record, _nothing, name="")
ops.add_to_collection(_SUMMARY_COLLECTION_NAME, op)
return op
def generic(name, tensor, metadata, family=None):
"""Writes a tensor summary if possible."""
def function(tag, scope):
# Note the identity to move the tensor to the CPU.
return gen_summary_ops.write_summary(
context.context().summary_writer_resource,
training_util.get_global_step(), array_ops.identity(tensor),
tag, metadata, name=scope)
return summary_writer_function(name, tensor, function, family=family)
def scalar(name, tensor, family=None):
"""Writes a scalar summary if possible."""
def function(tag, scope):
# Note the identity to move the tensor to the CPU.
return gen_summary_ops.write_scalar_summary(
context.context().summary_writer_resource,
training_util.get_global_step(), tag, array_ops.identity(tensor),
name=scope)
return summary_writer_function(name, tensor, function, family=family)
def histogram(name, tensor, family=None):
"""Writes a histogram summary if possible."""
def function(tag, scope):
# Note the identity to move the tensor to the CPU.
return gen_summary_ops.write_histogram_summary(
context.context().summary_writer_resource,
training_util.get_global_step(), tag, array_ops.identity(tensor),
name=scope)
return summary_writer_function(name, tensor, function, family=family)
def image(name, tensor, bad_color=None, max_images=3, family=None):
"""Writes an image summary if possible."""
def function(tag, scope):
if bad_color is None:
bad_color_ = constant_op.constant([255, 0, 0, 255], dtype=dtypes.uint8)
# Note the identity to move the tensor to the CPU.
return gen_summary_ops.write_image_summary(
context.context().summary_writer_resource,
training_util.get_global_step(), tag, array_ops.identity(tensor),
bad_color_,
max_images, name=scope)
return summary_writer_function(name, tensor, function, family=family)
def audio(name, tensor, sample_rate, max_outputs, family=None):
"""Writes an audio summary if possible."""
def function(tag, scope):
# Note the identity to move the tensor to the CPU.
return gen_summary_ops.write_audio_summary(
context.context().summary_writer_resource,
training_util.get_global_step(),
tag,
array_ops.identity(tensor),
sample_rate=sample_rate,
max_outputs=max_outputs,
name=scope)
return summary_writer_function(name, tensor, function, family=family)
def eval_dir(model_dir, name=None):
"""Construct a logdir for an eval summary writer."""
return os.path.join(model_dir, "eval" if not name else "eval_" + name)
|
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
import unittest
from datetime import date, time
from ...compatibility import StringIO
from ...worksheet import Worksheet
from ..helperfunctions import _xml_to_list
class TestWriteDataValidations(unittest.TestCase):
"""
Test the Worksheet _write_data_validations() method.
"""
def setUp(self):
self.maxDiff = None
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_data_validations_1(self):
"""
Test 1 Integer between 1 and 10.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_2(self):
"""
Test 2 Integer not between 1 and 10.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'not between',
'minimum': 1,
'maximum': 10,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" operator="notBetween" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_3(self):
"""
Test 3,4,5 Integer == 1.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'equal to',
'value': 1,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" operator="equal" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_4(self):
"""
Test 3,4,5 Integer == 1.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': '=',
'value': 1,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" operator="equal" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_5(self):
"""
Test 3,4,5 Integer == 1.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': '==',
'value': 1,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" operator="equal" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_6(self):
"""
Test 6,7,8 Integer != 1.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'not equal to',
'value': 1,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" operator="notEqual" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_7(self):
"""
Test 6,7,8 Integer != 1.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': '<>',
'value': 1,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" operator="notEqual" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_8(self):
"""
Test 6,7,8 Integer != 1.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': '!=',
'value': 1,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" operator="notEqual" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_9(self):
"""
Test 9,10 Integer > 1.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'greater than',
'value': 1,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" operator="greaterThan" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_10(self):
"""
Test 9,10 Integer > 1.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': '>',
'value': 1,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" operator="greaterThan" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_11(self):
"""
Test 11,12 Integer < 1.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'less than',
'value': 1,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" operator="lessThan" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_12(self):
"""
Test 11,12 Integer < 1.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': '<',
'value': 1,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" operator="lessThan" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_13(self):
"""
Test 13,14 Integer >= 1.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'greater than or equal to',
'value': 1,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" operator="greaterThanOrEqual" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_14(self):
"""
Test 13,14 Integer >= 1.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': '>=',
'value': 1,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" operator="greaterThanOrEqual" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_15(self):
"""
Test 15,16 Integer <= 1.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'less than or equal to',
'value': 1,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" operator="lessThanOrEqual" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_16(self):
"""
Test 15,16 Integer <= 1.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': '<=',
'value': 1,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" operator="lessThanOrEqual" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_17(self):
"""
Test 17 Integer between 1 and 10 (same as test 1) + Ignore blank off.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
'ignore_blank': 0,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_18(self):
"""
Test 18 Integer between 1 and 10 (same as test 1) + Error style == warning.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
'error_type': 'warning',
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" errorStyle="warning" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_19(self):
"""
Test 19 Integer between 1 and 10 (same as test 1) + Error style == info.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
'error_type': 'information',
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" errorStyle="information" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_20(self):
"""
Test 20 Integer between 1 and 10 (same as test 1)
+ input title.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
'input_title': 'Input title January',
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" allowBlank="1" showInputMessage="1" showErrorMessage="1" promptTitle="Input title January" sqref="B5"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_21(self):
"""
Test 21 Integer between 1 and 10 (same as test 1)
+ input title.
+ input message.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
'input_title': 'Input title January',
'input_message': 'Input message February',
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" allowBlank="1" showInputMessage="1" showErrorMessage="1" promptTitle="Input title January" prompt="Input message February" sqref="B5"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_22(self):
"""
Test 22 Integer between 1 and 10 (same as test 1)
+ input title.
+ input message.
+ error title.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
'input_title': 'Input title January',
'input_message': 'Input message February',
'error_title': 'Error title March',
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" allowBlank="1" showInputMessage="1" showErrorMessage="1" errorTitle="Error title March" promptTitle="Input title January" prompt="Input message February" sqref="B5"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_23(self):
"""
Test 23 Integer between 1 and 10 (same as test 1)
+ input title.
+ input message.
+ error title.
+ error message.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
'input_title': 'Input title January',
'input_message': 'Input message February',
'error_title': 'Error title March',
'error_message': 'Error message April',
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" allowBlank="1" showInputMessage="1" showErrorMessage="1" errorTitle="Error title March" error="Error message April" promptTitle="Input title January" prompt="Input message February" sqref="B5"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_24(self):
"""
Test 24 Integer between 1 and 10 (same as test 1)
+ input title.
+ input message.
+ error title.
+ error message.
- input message box.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
'input_title': 'Input title January',
'input_message': 'Input message February',
'error_title': 'Error title March',
'error_message': 'Error message April',
'show_input': 0,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" allowBlank="1" showErrorMessage="1" errorTitle="Error title March" error="Error message April" promptTitle="Input title January" prompt="Input message February" sqref="B5"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_25(self):
"""
Test 25 Integer between 1 and 10 (same as test 1)
+ input title.
+ input message.
+ error title.
+ error message.
- input message box.
- error message box.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
'input_title': 'Input title January',
'input_message': 'Input message February',
'error_title': 'Error title March',
'error_message': 'Error message April',
'show_input': 0,
'show_error': 0,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" allowBlank="1" errorTitle="Error title March" error="Error message April" promptTitle="Input title January" prompt="Input message February" sqref="B5"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_26(self):
"""
Test 26 'Any' shouldn't produce a DV record if there are no messages.
"""
self.worksheet.data_validation('B5', {'validate': 'any'})
self.worksheet._write_data_validations()
exp = ''
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_data_validations_27(self):
"""
Test 27 Decimal = 1.2345
"""
self.worksheet.data_validation('B5', {'validate': 'decimal',
'criteria': '==',
'value': 1.2345,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="decimal" operator="equal" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1.2345</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_28(self):
"""
Test 28 List = a,bb,ccc
"""
self.worksheet.data_validation('B5', {'validate': 'list',
'source': ['a', 'bb', 'ccc'],
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="list" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>"a,bb,ccc"</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_29(self):
"""
Test 29 List = a,bb,ccc, No dropdown
"""
self.worksheet.data_validation('B5', {'validate': 'list',
'source': ['a', 'bb', 'ccc'],
'dropdown': 0,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="list" allowBlank="1" showDropDown="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>"a,bb,ccc"</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_30(self):
"""
Test 30 List = $D$1:$D$5
"""
self.worksheet.data_validation('A1:A1', {'validate': 'list',
'source': '=$D$1:$D$5',
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="list" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="A1"><formula1>$D$1:$D$5</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_31(self):
"""
Test 31 Date = 39653 (2008-07-24)
"""
self.worksheet.data_validation('B5', {'validate': 'date',
'criteria': '==',
'value': date(2008, 7, 24),
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="date" operator="equal" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>39653</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_32(self):
"""
Test 32 Date = 2008-07-25T
"""
self.worksheet.data_validation('B5', {'validate': 'date',
'criteria': '==',
'value': date(2008, 7, 25),
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="date" operator="equal" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>39654</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_33(self):
"""
Test 33 Date between ranges.
"""
self.worksheet.data_validation('B5', {'validate': 'date',
'criteria': 'between',
'minimum': date(2008, 1, 1),
'maximum': date(2008, 12, 12),
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="date" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>39448</formula1><formula2>39794</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_34(self):
"""
Test 34 Time = 0.5 (12:00:00)
"""
self.worksheet.data_validation('B5:B5', {'validate': 'time',
'criteria': '==',
'value': time(12),
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="time" operator="equal" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>0.5</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_35(self):
"""
Test 35 Time = T12:00:00
"""
self.worksheet.data_validation('B5', {'validate': 'time',
'criteria': '==',
'value': time(12, 0, 0),
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="time" operator="equal" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>0.5</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_36(self):
"""
Test 36 Custom == 10.
"""
self.worksheet.data_validation('B5', {'validate': 'custom',
'criteria': '==',
'value': 10,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="custom" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>10</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_37(self):
"""
Test 37 Check the row/col processing: single A1 style cell.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_38(self):
"""
Test 38 Check the row/col processing: single A1 style range.
"""
self.worksheet.data_validation('B5:B10', {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5:B10"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_39(self):
"""
Test 39 Check the row/col processing: single (row, col) style cell.
"""
self.worksheet.data_validation(4, 1, 4, 1, {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_40(self):
"""
Test 40 Check the row/col processing: single (row, col) style range.
"""
self.worksheet.data_validation(4, 1, 9, 1, {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5:B10"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_41(self):
"""
Test 41 Check the row/col processing: multiple (row, col) style cells.
"""
self.worksheet.data_validation(4, 1, 4, 1, {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
'other_cells': [[4, 3, 4, 3]],
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5 D5"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_42(self):
"""
Test 42 Check the row/col processing: multiple (row, col) style cells.
"""
self.worksheet.data_validation(4, 1, 4, 1, {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
'other_cells': [[6, 1, 6, 1], [8, 1, 8, 1]],
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5 B7 B9"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_43(self):
"""
Test 43 Check the row/col processing: multiple (row, col) style cells.
"""
self.worksheet.data_validation(4, 1, 8, 1, {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
'other_cells': [[3, 3, 3, 3]],
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5:B9 D4"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_44(self):
"""
Test 44 Multiple validations.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': '>',
'value': 10,
})
self.worksheet.data_validation('C10', {'validate': 'integer',
'criteria': '<',
'value': 10,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="2"><dataValidation type="whole" operator="greaterThan" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>10</formula1></dataValidation><dataValidation type="whole" operator="lessThan" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="C10"><formula1>10</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_21(self):
"""
Test 45 Test 'any' with input messages.
"""
self.worksheet.data_validation('B5', {'validate': 'any',
'input_title': 'Input title January',
'input_message': 'Input message February',
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation allowBlank="1" showInputMessage="1" showErrorMessage="1" promptTitle="Input title January" prompt="Input message February" sqref="B5"/></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pprint
from model_mommy import mommy
import mock
import responses
import re
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import override_settings
from rest_framework import status
from rest_framework.test import APITestCase
import ujson
from metrics.values import Stats
from tasks.models import TaskStatus
from tests.mocks.ssh import SSHConnectionMock, SSHConnectionMockBuilder
from tests.mocks.requests import post_response, get_response, put_response, patch_response
from tests.mocks.phantomas import PhantomasMock
from tests.mocks.chrome import ChromeMock
def time_time_mock(*args, **kwargs):
return 2
@override_settings(CELERY_ALWAYS_EAGER=True)
@override_settings(API_SERVER_URL='http://testserver')
class TestResultTestCase(APITestCase):
def setUp(self):
self.test_run = mommy.make('test_runs.TestRun')
self.task_to_delete = mommy.make('tasks.Task')
self.result = mommy.make('results.TestResult')
self.response_data = []
self.mw_content = """
<html>
Some stuff here
<!-- Some comments here
And more comments
-->
</html>
<!--
100.00% 1.869824 1 - -total
90.10% 1.684765 1 - MediaWiki::main
80.50% 1.505257 1 - MediaWiki::finalCleanup
80.49% 1.504998 1 - OutputPage::output
18.72% 0.350065 314 - DatabaseBase::query-master
0.72% 0.013514 11 - DatabaseBase::query
4.63% 0.086566 170 - MWMemcached::get
0.35% 0.006459 1 - MWMemcached::get::NavigationModel:wikicities:en:message-global-navigation-hubs-menu-7-4-4-:1
0.01% 0.000109 4 - MWMemcached::get::wikicities:WikiFactoryHub::getAllVerticals !DUPE
0.01% 0.000125 1 - MWMemcached::get::muppet:newtalk:ip:94.42.104.162 !MISS
-->
"""
def test_create_task(self):
url = reverse('task-list')
payload = {
'test_run': reverse('testrun-detail', args=[self.test_run.id, ]),
'status': TaskStatus.PENDING,
'results': [],
}
response = self.client.post(url, payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED, msg='Create failed: {0}'.format(response.data))
def test_read_task(self):
url = reverse('task-detail', args=[self.task_to_delete.id, ])
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK, msg='Read failed: {0}'.format(response.data))
@mock.patch('testrunner.tasks.deploy.SSHConnection', SSHConnectionMockBuilder('1'))
@mock.patch('testrunner.tasks.phantomas_get.phantomas.Phantomas', PhantomasMock)
@mock.patch('testrunner.tasks.selenium_get.webdriver.Chrome', ChromeMock.create)
@mock.patch('selenium.webdriver.support.wait.WebDriverWait', mock.MagicMock())
@mock.patch('testrunner.tasks.http_get.HttpGet._elapsed_time', time_time_mock)
@responses.activate
@post_response
@get_response
@put_response
def test_run_task(self, post_callback, get_callback, put_callback):
def extract_values_from_results(origin, id):
series = [x for x in results
if x['context']['id'] == id
if x['context']['origin'] == origin]
self.assertEqual(len(series), 10, '{}:{} gave {} value series (10 expected)'.format(origin, id, len(series)))
return [x['value'] for x in series[0]['values']]
def get_stats_from_requests(id, origin):
values = extract_values_from_results(id, origin)
return Stats(values)
# TODO: Fix this to use better mocking mechanism
patch_calls = []
def patch_callback(request):
api_response = self.client.patch(request.url, data=ujson.decode(request.body), headers=request.headers)
patch_calls.append(api_response.data)
return api_response.status_code, {}, api_response.content
url = reverse('task-run', args=[self.task_to_delete.id, ])
api_uri = re.compile(r'https?://testserver')
# mocking MW responses
responses.add(responses.GET, self.task_to_delete.test_run.test_run_uri,
body=self.mw_content, status=status.HTTP_200_OK,
adding_headers={'X-Backend-Response-Time': '123', })
# mocking task status update requests
responses.add_callback(responses.PATCH, api_uri, callback=patch_callback,
content_type='application/json')
# mocking API results calls
responses.add_callback(responses.POST, api_uri, callback=post_callback,
content_type='application/json')
responses.add_callback(responses.PUT, api_uri, callback=put_callback,
content_type='application/json')
responses.add_callback(responses.GET, api_uri, callback=get_callback,
content_type='application/json')
response = self.client.post(url)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED,
msg='Run task failed: {0}'.format(response.content))
# fetching data for a task
url = reverse('task-detail', args=[self.task_to_delete.id, ])
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK,
msg='Get task failed: {0}'.format(response.content))
# now get result
url = response.data['results'][0]
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK,
msg='Get result failed: {0}'.format(response.content))
results = response.data['results']
response_times = get_stats_from_requests('requests', 'server.app.response_time')
self.assertEqual(response_times.count, 10)
self.assertEqual(response_times.min, 2.0)
self.assertEqual(response_times.max, 2.0)
memcached_dupes = get_stats_from_requests('mw_profiler', 'server.app.memcached.dupe_count')
memcached_misses = get_stats_from_requests('mw_profiler', 'server.app.memcached.miss_count')
queries_master = get_stats_from_requests('mw_profiler', 'server.app.database.queries.master_count')
queries_slave = get_stats_from_requests('mw_profiler', 'server.app.database.queries.slave_count')
queries_time = get_stats_from_requests('mw_profiler', 'server.app.database.queries.time')
response_times = get_stats_from_requests('mw_profiler', 'server.app.response_time')
self.assertEqual(memcached_dupes.count, 10)
self.assertEqual(memcached_dupes.max, 4.0)
self.assertEqual(memcached_misses.count, 10)
self.assertEqual(memcached_misses.max, 1.0)
self.assertEqual(queries_master.max, 314.0)
self.assertEqual(queries_slave.max, 11.0)
self.assertEqual(queries_time.max, 0.363579)
self.assertEqual(response_times.max, 1.869824)
js_size = get_stats_from_requests('phantomas', 'browser.assets.js.size')
content_length = get_stats_from_requests('phantomas', 'raw.phantomas.contentLength')
css_count = get_stats_from_requests('phantomas', 'browser.assets.css.count')
body_size = get_stats_from_requests('phantomas', 'browser.assets.total_size')
html_size = get_stats_from_requests('phantomas', 'browser.assets.html.size')
other_count = get_stats_from_requests('phantomas', 'browser.assets.other.count')
self.assertEqual(js_size.max, 927221.0)
self.assertEqual(content_length.max, 1922742.0)
self.assertEqual(css_count.max, 5.0)
self.assertEqual(body_size.max, 1304562.0)
self.assertEqual(html_size.max, 124541.0)
self.assertEqual(other_count.max, 19.0)
# status update (7 * IN_PROGRESS + 1 * DONE)
self.assertEqual(len(patch_calls), 8)
# starting -> IN PROGRESS
self.assertEqual(patch_calls[0]['status'], TaskStatus.IN_PROGRESS)
# finished -> DONE
self.assertEqual(patch_calls[-1]['status'], TaskStatus.DONE)
def test_update_result(self):
url = reverse('task-detail', args=[self.task_to_delete.id, ])
payload = {'status': TaskStatus.ERROR}
response = self.client.patch(url, payload)
self.assertEqual(response.status_code, status.HTTP_200_OK, msg='Update failed: {0}'.format(response.data))
self.assertEqual(response.data['status'], payload['status'])
def test_delete_result(self):
url = reverse('task-detail', args=[self.task_to_delete.id, ])
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT,
msg='Delete failed: {0}'.format(response.data))
@responses.activate
def test_update_task_status(self):
url = reverse('task-detail', args=[self.task_to_delete.id, ])
api_uri = re.compile(r'https?://testserver')
response = self.client.patch(url, {'status': 1})
self.assertEqual(response.status_code, status.HTTP_200_OK)
results = response.data
self.assertEqual(results['status'], 1)
|
|
#
# Copyright (C) 2001-2004 greg Landrum and Rational Discovery LLC
# All Rights Reserved
#
""" The "parser" for compound descriptors.
I almost hesitate to document this, because it's not the prettiest
thing the world has ever seen... but it does work (for at least some
definitions of the word).
Rather than getting into the whole mess of writing a parser for the
compound descriptor expressions, I'm just using string substitutions
and python's wonderful ability to *eval* code.
It would probably be a good idea at some point to replace this with a
real parser, if only for the flexibility and intelligent error
messages that would become possible.
The general idea is that we're going to deal with expressions where
atomic descriptors have some kind of method applied to them which
reduces them to a single number for the entire composition. Compound
descriptors (those applicable to the compound as a whole) are not
operated on by anything in particular (except for standard math stuff).
Here's the general flow of things:
1) Composition descriptor references ($a, $b, etc.) are replaced with the
corresponding descriptor names using string subsitution.
(*_SubForCompoundDescriptors*)
2) Atomic descriptor references ($1, $2, etc) are replaced with lookups
into the atomic dict with "DEADBEEF" in place of the atom name.
(*_SubForAtomicVars*)
3) Calls to Calculator Functions are augmented with a reference to
the composition and atomic dictionary
(*_SubMethodArgs*)
**NOTE:**
anytime we don't know the answer for a descriptor, rather than
throwing a (completely incomprehensible) exception, we just return
-666. So bad descriptor values should stand out like sore thumbs.
"""
__DEBUG=0
from rdkit import RDConfig
import string
# we do this to allow the use of stuff in the math module
from math import *
#----------------------
# atomic descriptor section
#----------------------
# these are the methods which can be applied to ATOMIC descriptors.
knownMethods = ['SUM','MIN','MAX','MEAN','AVG','DEV','HAS']
def HAS(strArg,composList,atomDict):
""" *Calculator Method*
does a string search
**Arguments**
- strArg: the arguments in string form
- composList: the composition vector
- atomDict: the atomic dictionary
**Returns**
1 or 0
"""
splitArgs = string.split(strArg,',')
if len(splitArgs)>1:
for atom,num in composList:
tStr = string.replace(splitArgs[0],'DEADBEEF',atom)
where = eval(tStr)
what = eval(splitArgs[1])
if string.find(where,what)!= -1:
return 1
return 0
else:
return -666
def SUM(strArg,composList,atomDict):
""" *Calculator Method*
calculates the sum of a descriptor across a composition
**Arguments**
- strArg: the arguments in string form
- compos: the composition vector
- atomDict: the atomic dictionary
**Returns**
a float
"""
accum = 0.0
for atom,num in composList:
tStr = string.replace(strArg,'DEADBEEF',atom)
accum = accum + eval(tStr)*num
return accum
def MEAN(strArg,composList,atomDict):
""" *Calculator Method*
calculates the average of a descriptor across a composition
**Arguments**
- strArg: the arguments in string form
- compos: the composition vector
- atomDict: the atomic dictionary
**Returns**
a float
"""
accum = 0.0
nSoFar = 0
for atom,num in composList:
tStr = string.replace(strArg,'DEADBEEF',atom)
accum = accum + eval(tStr)*num
nSoFar = nSoFar + num
return accum/nSoFar
AVG = MEAN
def DEV(strArg,composList,atomDict):
""" *Calculator Method*
calculates the average deviation of a descriptor across a composition
**Arguments**
- strArg: the arguments in string form
- compos: the composition vector
- atomDict: the atomic dictionary
**Returns**
a float
"""
avg = MEAN(strArg,composList,atomDict)
accum = 0.0
nSoFar = 0.0
for atom,num in composList:
tStr = string.replace(strArg,'DEADBEEF',atom)
accum = accum + abs(eval(tStr)-avg)*num
nSoFar = nSoFar + num
return accum/nSoFar
def MIN(strArg,composList,atomDict):
""" *Calculator Method*
calculates the minimum value of a descriptor across a composition
**Arguments**
- strArg: the arguments in string form
- compos: the composition vector
- atomDict: the atomic dictionary
**Returns**
a float
"""
accum = []
for atom,num in composList:
tStr = string.replace(strArg,'DEADBEEF',atom)
accum.append(eval(tStr))
return min(accum)
def MAX(strArg,composList,atomDict):
""" *Calculator Method*
calculates the maximum value of a descriptor across a composition
**Arguments**
- strArg: the arguments in string form
- compos: the composition vector
- atomDict: the atomic dictionary
**Returns**
a float
"""
accum = []
for atom,num in composList:
tStr = string.replace(strArg,'DEADBEEF',atom)
accum.append(eval(tStr))
return max(accum)
#------------------
# string replacement routines
# these are not intended to be called by clients
#------------------
def _SubForAtomicVars(cExpr,varList,dictName):
""" replace atomic variables with the appropriate dictionary lookup
*Not intended for client use*
"""
for i in xrange(len(varList)):
cExpr = string.replace(cExpr,'$%d'%(i+1),
'%s["DEADBEEF"]["%s"]'%(dictName,varList[i]))
return cExpr
def _SubForCompoundDescriptors(cExpr,varList,dictName):
""" replace compound variables with the appropriate list index
*Not intended for client use*
"""
for i in xrange(len(varList)):
cExpr = string.replace(cExpr,'$%s'%chr(ord('a')+i),
'%s["%s"]'%(dictName,varList[i]))
return cExpr
def _SubMethodArgs(cExpr,knownMethods):
""" alters the arguments of calls to calculator methods
*Not intended for client use*
This is kind of putrid (and the code ain't so pretty either)
The general idea is that the various special methods for atomic
descriptors need two extra arguments (the composition and the atomic
dict). Rather than make the user type those in, we just find
invocations of these methods and fill out the function calls using
string replacements.
"""
res = cExpr
for method in knownMethods:
p = 0
while p != -1 and p < len(res):
p = string.find(res,method,p)
if p != -1:
p = p + len(method) + 1
start = p
parenCount = 1
while parenCount and p < len(res):
if res[p] == ')':
parenCount = parenCount - 1
elif res[p] == '(':
parenCount = parenCount + 1
p = p + 1
if p <= len(res):
res = res[0:start]+"'%s',compos,atomDict"%(res[start:p-1])+res[p-1:]
return res
def CalcSingleCompoundDescriptor(compos,argVect,atomDict,propDict):
""" calculates the value of the descriptor for a single compound
**ARGUMENTS:**
- compos: a vector/tuple containing the composition
information... in the form:
'[("Fe",1.),("Pt",2.),("Rh",0.02)]'
- argVect: a vector/tuple with three elements:
1) AtomicDescriptorNames: a list/tuple of the names of the
atomic descriptors being used. These determine the
meaning of $1, $2, etc. in the expression
2) CompoundDescriptorNames: a list/tuple of the names of the
compound descriptors being used. These determine the
meaning of $a, $b, etc. in the expression
3) Expr: a string containing the expression to be used to
evaluate the final result.
- atomDict:
a dictionary of atomic descriptors. Each atomic entry is
another dictionary containing the individual descriptors
and their values
- propVect:
a list of descriptors for the composition.
**RETURNS:**
the value of the descriptor, -666 if a problem was encountered
**NOTE:**
- because it takes rather a lot of work to get everything set
up to calculate a descriptor, if you are calculating the
same descriptor for multiple compounds, you probably want to
be calling _CalcMultipleCompoundsDescriptor()_.
"""
try:
atomVarNames = argVect[0]
compositionVarNames = argVect[1]
formula = argVect[2]
formula = _SubForCompoundDescriptors(formula,compositionVarNames,'propDict')
formula = _SubForAtomicVars(formula,atomVarNames,'atomDict')
evalTarget = _SubMethodArgs(formula,knownMethods)
except:
if __DEBUG:
import sys,traceback
print 'Sub Failure!'
traceback.print_exc()
print evalTarget
print propDict
raise RuntimeError,'Failure 1'
else:
return -666
try:
v = eval(evalTarget)
except:
if __DEBUG:
import sys,traceback
outF = open(RDConfig.RDCodeDir+'/ml/descriptors/log.txt','a+')
outF.write('#------------------------------\n')
outF.write('formula: %s\n'%repr(formula))
outF.write('target: %s\n'%repr(evalTarget))
outF.write('propDict: %s\n'%(repr(propDict)))
try:
outF.write('keys: %s\n'%(repr(atomDict.keys())))
except:
outF.write('no atomDict\n')
outF.close()
print 'ick!'
print 'formula:',formula
print 'target:',evalTarget
print 'propDict:',propDict
print 'keys:',atomDict.keys()
traceback.print_exc()
raise RuntimeError,'Failure 2'
else:
v = -666
return v
def CalcMultipleCompoundsDescriptor(composVect,argVect,atomDict,propDictList):
""" calculates the value of the descriptor for a list of compounds
**ARGUMENTS:**
- composVect: a vector of vector/tuple containing the composition
information.
See _CalcSingleCompoundDescriptor()_ for an explanation of the elements.
- argVect: a vector/tuple with three elements:
1) AtomicDescriptorNames: a list/tuple of the names of the
atomic descriptors being used. These determine the
meaning of $1, $2, etc. in the expression
2) CompoundDsscriptorNames: a list/tuple of the names of the
compound descriptors being used. These determine the
meaning of $a, $b, etc. in the expression
3) Expr: a string containing the expression to be used to
evaluate the final result.
- atomDict:
a dictionary of atomic descriptors. Each atomic entry is
another dictionary containing the individual descriptors
and their values
- propVectList:
a vector of vectors of descriptors for the composition.
**RETURNS:**
a vector containing the values of the descriptor for each
compound. Any given entry will be -666 if problems were
encountered
"""
res = [-666]*len(composVect)
try:
atomVarNames = argVect[0]
compositionVarNames = argVect[1]
formula = argVect[2]
formula = _SubForCompoundDescriptors(formula,compositionVarNames,'propDict')
formula = _SubForAtomicVars(formula,atomVarNames,'atomDict')
evalTarget = _SubMethodArgs(formula,knownMethods)
except:
return res
for i in xrange(len(composVect)):
propDict = propDictList[i]
compos = composVect[i]
try:
v = eval(evalTarget)
except:
v = -666
res[i] = v
return res
#------------
# Demo/testing code
#------------
if __name__ == '__main__':
piece1 = [['d1','d2'],['d1','d2']]
aDict = {'Fe':{'d1':1.,'d2':2.},'Pt':{'d1':10.,'d2':20.}}
pDict = {'d1':100.,'d2':200.}
compos = [('Fe',1),('Pt',1)]
cExprs = ["SUM($1)","SUM($1)+SUM($2)","SUM($1)+SUM($1)","MEAN($1)","DEV($2)","MAX($1)","MIN($1)/MAX($1)",
"MIN($2)","SUM($1)/$a","sqrt($a+$b)","SUM((3.*$1)/($2))","foo"]
for cExpr in cExprs:
argVect = piece1 + [cExpr]
print cExpr
print CalcSingleCompoundDescriptor(compos,argVect,aDict,pDict)
print CalcMultipleCompoundsDescriptor([compos,compos],argVect,aDict,[pDict,pDict])
|
|
####################################################################
# Class CONTROLLER: contral scan #
####################################################################
# External modules
import os
import random
import time
from math import log10
import math
# Internal modules
import auxfun as af
class CONTROLLER:
def __init__(self):
self._FolderName = 'test'
self._PointNum = 2
self._ScanMethod = 'random'
self._RandomSeed = -1
self._PrintNum = 10
self._AccepRate = 0.25
self._FlagTuneR = False
self._Prog = {}
self.AllPar = {}
self.InPar = {}
self.FixedPar = {}
self.OutPar = {}
self.InputPar = {} # Different to InPar, including prior, value
self.GridBin = {} # Number of bins
self.MCMCss = {} # Step size
self.MCMCiv = {} # Initial value
self.MNOutputFile = 'test/MultiNestData/'
self._Count = 0
def setScanMethod(self, method):
self._ScanMethod = method.upper()
if self._ScanMethod not in af._all:
af.ErrorStop('%s is not a supported scan method'%method)
af.Info('Scan method = %s'%self._ScanMethod)
def backup_result(self, FolderName, cp=False):
if not os.path.exists(os.path.join(af.CurrentPath,"Backup")):
os.mkdir(os.path.join(af.CurrentPath,"Backup"))
BackupTime = time.strftime("_%Y_%m_%d_%H_%M_%S", time.localtime())
name = os.path.basename(os.path.normpath(FolderName))
BackupPath = os.path.join(af.CurrentPath,"Backup/%s%s"%(name, BackupTime))
action = r'cp -r ' if cp else r'mv '
af.Info('Back up previous result into %s.'%BackupPath)
os.system(action+r" %s %s" %(FolderName, BackupPath))
def setFolderName(self, name):
# Turn the result folder path into absolute path
if name.startswith('/home') or name.startswith('~'):
self._FolderName = name
else:
self._FolderName = os.path.join(af.CurrentPath, name)
if self._ScanMethod in af._post:
if not os.path.exists(self._FolderName):
af.ErrorStop("The result folder %s does not exist."%self._FolderName)
if self._ScanMethod == af._postprocess:
# Backup previous results
self.backup_result(self._FolderName, cp=True)
# rm Figure and SavedFile folder
os.system(r"find %s -type f -name '*' | xargs rm" %os.path.join(self._FolderName,'SavedFile'))
os.system(r"find %s -type f -name '*' | xargs rm" %os.path.join(self._FolderName,'Figures'))
# rename data file
if not os.path.exists(os.path.join(self._FolderName,af.ResultFile_post)):
if not os.path.exists(os.path.join(self._FolderName,af.ResultFile)):
af.ErrorStop("No result data file in %s."%self._FolderName)
else:
os.system(r"mv %s %s"%(os.path.join(self._FolderName, af.ResultFile), os.path.join(self._FolderName, af.ResultFile_post)))
else:
if af.flag_resume:
# TODO Add resume
af.ErrorStop("no resume function")
else: # af._plot
if not os.path.exists(os.path.join(self._FolderName,af.ResultFile)):
af.ErrorStop("No result data file in %s."%self._FolderName)
else:
# Deal with the situation that the result folder already exists.
if os.path.exists(self._FolderName):
af.Info(("* The Result file [%s] already exists." % name ))
while True:
c = input("Choose: (r)replace, (b)backup, (s)stop\n")
if c == "r":
os.system(r"rm -r %s" %self._FolderName)
break
elif c == "b":
self.backup_result(self._FolderName)
break
elif c == "s":
exit(1)
else:
af.Info("Wrong input! Please type in one of ('r','b','s')")
# Create result folder
os.mkdir(self._FolderName)
os.mkdir(os.path.join(self._FolderName,'SavedFile'))
if self._ScanMethod == af._multinest:
self.MNOutputFile = os.path.join(self._FolderName, "MultiNestData/")
os.mkdir(self.MNOutputFile)
af.Info('...............................................')
af.Info('Result file name = %s'%self._FolderName)
def setPointNum(self, ntot):
self._PointNum = int(ntot)
if self._PointNum < 1 :
af.ErrorStop('"Number of points" should larger than 0')
af.Info('Number of points = %s'%self._PointNum)
def setRandomSeed(self, iseed):
self._RandomSeed = int(iseed)
# If iseed is provided in the input file, initialize the basic random number generator
# Otherwise, it will be initialized by current system time, and self._RandomSeed = -1,
# which means also initialized by current system time in MultiNest
random.seed( self._RandomSeed )
af.Info('Random seed = %s'%self._RandomSeed)
def setAccepRate(self, AccepRate):
self._AccepRate = float(AccepRate)
if self._AccepRate >= 1 or self._AccepRate <= 0:
af.ErrorStop('"Acceptance rate" must be in [0,1]. The suggest value is 0.5 for d<=2, 0.25 otherwise.')
self._FlagTuneR = True
af.Info('Acceptance rate = %s'%self._AccepRate)
def setPrintNum(self, nprint):
self._PrintNum = int(nprint)
if self._PrintNum < 1 :
af.ErrorStop('"Interval of print" should be larger than 0')
af.Info('Interval of print = %s'%self._PrintNum)
af.Info(' If all elements defined in "Output variable" are read by easyscan successfully,')
af.Info(' easyscan would show information as following every interval')
af.Info(' ------------ Num: # ------------')
af.Info(' Input - = ')
af.Info(' Output - = ')
af.Info(' LnLike = ')
af.Info(' Accepted Num = ')
af.Info(' Total Num = ')
def InputCheck(self, name, num, items):
return 'Input parameter "%s" need %i iterms [ID, Prior, %s]'%(name, num, items)
def setInputPar(self, inputvar):
inputvar = af.string2nestlist(inputvar)
# inputvar is list of list of input parameters define in section [scan]
af.Info('Input parameters = ')
for ii in inputvar:
lenii = len(ii)
if self._ScanMethod == af._postprocess:
self.InPar[ii[0]] = af.NaN
self.AllPar[ii[0]] = af.NaN
af.Info(' ID= %s, read from previous '%(ii[0]))
continue
if lenii < 3 :
af.ErrorStop(self.InputCheck(ii[0], 3, "Value"))
# Set fixed par
if ii[1].upper() == "FIXED":
if lenii > 3 :
af.WarningNoWait(self.InputCheck(ii[0], 3, "Value"))
af.WarningWait("The rest %i values will be ignore."%(lenii-3) )
af.Info(' ID= %s\tPrior= %s\t =%f'%(ii[0],ii[1],ii[2]))
self.FixedPar[ii[0]] = ii[2]
self.AllPar[ii[0]] = ii[2]
continue
# Initialize other input par to NaN
self.InputPar[ii[0]] = ii
self.InPar[ii[0]] = af.NaN
self.AllPar[ii[0]] = af.NaN
if lenii < 4 :
af.ErrorStop(self.InputCheck(ii[0], 4, "Minimum, Maximum"))
if self._ScanMethod in [af._random, af._multinest]:
if lenii > 4 :
af.WarningNoWait(self.InputCheck(ii[0], 4, "Minimum, Maximum"))
af.WarningWait("The rest %i values will be ignore."%(lenii-4) )
af.Info(' ID= %s\tPrior= %s\tMin= %f\tMax= %f'%(ii[0],ii[1],ii[2],ii[3]))
continue
if self._ScanMethod == af._grid:
if lenii == 4:
self.GridBin[ii[0]]=20
af.WarningNoWait(self.InputCheck(ii[0], 5, "Minimum, Maximum, Number of bins"))
af.WarningWait("'Number of bins' will take default value, 20.")
else:
self.GridBin[ii[0]]=ii[4]
if self.GridBin[ii[0]] < 0 or type(ii[4]) != int:
af.WarningNoWait(InputCheck(ii[0], 5, "Minimum, Maximum, Number of bins"))
af.ErrorStop("'Number of bins' is not a positive integer.")
if lenii> 5:
af.WarningNoWait(self.InputCheck(ii[0], 5, "Minimum, Maximum, Number of bins"))
af.WarningWait("The rest %i values will be ignore."%(lenii-5) )
af.Info(' ID= %s\tPrior= %s\tMin= %f\tMax= %f\tNbin=%i'%(ii[0],ii[1],ii[2],ii[3],self.GridBin[ii[0]]))
continue
if self._ScanMethod == af._mcmc:
if lenii < 6:
af.WarningNoWait(self.InputCheck(ii[0], 6, "Minimum, Maximum, Interval, Initial value"))
self.MCMCiv[ii[0]] = 1./2.
IniV = float(ii[3]+ii[2])/2.
af.WarningWait("'Initial value' will take default value, (Max-Min)/2.")
if lenii < 5:
self.MCMCss[ii[0]] = 1./30.
Step = float(ii[3]-ii[2])/30.
af.WarningWait("'Interval' will take default value, (Max-Min)/30.")
else:
# The scan range is normalized to 1
self.MCMCss[ii[0]] = 1.0/float(ii[4])
Step = float(ii[3]-ii[2])/float(ii[4])
if ii[1].lower() == 'flat':
self.MCMCiv[ii[0]] = float(ii[5]-ii[2])/float(ii[3]-ii[2])
elif ii[1].lower() == 'log':
self.MCMCiv[ii[0]] = (log10(ii[5])-log10(ii[2]))/(log10(ii[3]) - log10(ii[2]))
IniV = ii[5]
if lenii > 6:
af.WarningNoWait(self.InputCheck(ii[0], 6, "Minimum, Maximum, Interval, Initial value"))
af.WarningWait("The rest %i values will be ignore."%(lenii-6) )
af.Info(' ID= %s\tPrior= %s\tMin= %f\tMax= %f\tStep=%f\tIniV=%f'%(ii[0],ii[1],ii[2],ii[3],Step,self.MCMCiv[ii[0]]))
continue
def setProgram(self,prog):
self._Prog = prog
# Copy input vars of prog into allvars
for ii in prog:
af.Debug('Programe ID', ii)
af.Debug('Corresponding output vars', prog[ii].outvar)
# save all parameters
for jj in prog[ii].outvar:
self.AllPar[jj] = prog[ii].outvar[jj]
self.OutPar[jj] = prog[ii].outvar[jj]
for jj in prog[ii].invar:
if jj not in list(self.AllPar.keys()):
self.AllPar[jj] = prog[ii].invar[jj]
self.OutPar[jj] = prog[ii].invar[jj]
for jj in prog[ii].boundvar:
if jj not in list(self.AllPar.keys()):
self.AllPar[jj] = prog[ii].boundvar[jj]
self.OutPar[jj] = prog[ii].boundvar[jj]
for jj in prog[ii].cgauvar:
if jj not in list(self.AllPar.keys()):
self.AllPar[jj] = prog[ii].cgauvar[jj]
self.OutPar[jj] = prog[ii].cgauvar[jj]
for jj in prog[ii].cffchi2var:
if jj not in list(self.AllPar.keys()):
self.AllPar[jj] = prog[ii].cffchi2var[jj]
self.OutPar[jj] = prog[ii].cffchi2var[jj]
# Order parameters
self.InPar = af.sortDic(self.InPar)
self.FixedPar = af.sortDic(self.FixedPar)
self.OutPar = af.sortDic(self.OutPar)
af.Debug('All vars: ', self.AllPar)
af.Debug('Input Pars: ', self.InPar)
af.Debug('Fixed Pars: ', self.FixedPar)
af.Debug('Output Pars:', self.OutPar)
def getFolderName(self):
return self._FolderName
def getPointNum(self):
return self._PointNum
def getScanMethod(self):
return self._ScanMethod
def getRandomSeed(self):
return self._RandomSeed
def getPrintNum(self):
return self._PrintNum
def getDebugFlag(self):
return self._DebugFlag
def getStepSize(self):
return self.MCMCss
def getInitialValue(self):
return self.MCMCiv
def getFlagTuneR(self):
return self._FlagTuneR
def getAccepRate(self):
return self._AccepRate
|
|
"""
Contains the querying interface.
Starting with :class:`~tinydb.queries.Query` you can construct complex
queries:
>>> ((where('f1') == 5) & (where('f2') != 2)) | where('s').matches(r'^\w+$')
(('f1' == 5) and ('f2' != 2)) or ('s' ~= ^\w+$ )
Queries are executed by using the ``__call__``:
>>> q = where('val') == 5
>>> q({'val': 5})
True
>>> q({'val': 1})
False
"""
import re
import sys
from .utils import catch_warning, freeze
__all__ = ('Query', 'where')
def is_sequence(obj):
return hasattr(obj, '__iter__')
class QueryImpl(object):
"""
A query implementation.
This query implementation wraps a test function which is run when the
query is evaluated by calling the object.
Queries can be combined with logical and/or and modified with logical not.
"""
def __init__(self, test, hashval):
self.test = test
self.hashval = hashval
def __call__(self, value):
return self.test(value)
def __hash__(self):
return hash(self.hashval)
def __repr__(self):
return 'QueryImpl{0}'.format(self.hashval)
def __eq__(self, other):
return self.hashval == other.hashval
# --- Query modifiers -----------------------------------------------------
def __and__(self, other):
# We use a frozenset for the hash as the AND operation is commutative
# (a | b == b | a)
return QueryImpl(lambda value: self(value) and other(value),
('and', frozenset([self.hashval, other.hashval])))
def __or__(self, other):
# We use a frozenset for the hash as the OR operation is commutative
# (a & b == b & a)
return QueryImpl(lambda value: self(value) or other(value),
('or', frozenset([self.hashval, other.hashval])))
def __invert__(self):
return QueryImpl(lambda value: not self(value),
('not', self.hashval))
class Query(object):
"""
TinyDB Queries.
Allows to build queries for TinyDB databases. There are two main ways of
using queries:
1) ORM-like usage:
>>> User = Query()
>>> db.search(User.name == 'John Doe')
>>> db.search(User['logged-in'] == True)
2) Classical usage:
>>> db.search(where('value') == True)
Note that ``where(...)`` is a shorthand for ``Query(...)`` allowing for
a more fluent syntax.
Besides the methods documented here you can combine queries using the
binary AND and OR operators:
>>> db.search(where('field1').exists() & where('field2') == 5) # Binary AND
>>> db.search(where('field1').exists() | where('field2') == 5) # Binary OR
Queries are executed by calling the resulting object. They expect to get
the document to test as the first argument and return ``True`` or
``False`` depending on whether the documents matches the query or not.
"""
def __init__(self):
self._path = []
def __getattr__(self, item):
query = Query()
query._path = self._path + [item]
return query
__getitem__ = __getattr__
def _generate_test(self, test, hashval):
"""
Generate a query based on a test function.
:param test: The test the query executes.
:param hashval: The hash of the query.
:return: A :class:`~tinydb.queries.QueryImpl` object
"""
if not self._path:
raise ValueError('Query has no path')
def impl(value):
try:
# Resolve the path
for part in self._path:
value = value[part]
except (KeyError, TypeError):
return False
else:
return test(value)
return QueryImpl(impl, hashval)
def __eq__(self, rhs):
"""
Test a dict value for equality.
>>> Query().f1 == 42
:param rhs: The value to compare against
"""
if sys.version_info <= (3, 0): # pragma: no cover
# Special UTF-8 handling on Python 2
def test(value):
with catch_warning(UnicodeWarning):
try:
return value == rhs
except UnicodeWarning:
# Dealing with a case, where 'value' or 'rhs'
# is unicode and the other is a byte string.
if isinstance(value, str):
return value.decode('utf-8') == rhs
elif isinstance(rhs, str):
return value == rhs.decode('utf-8')
else: # pragma: no cover
def test(value):
return value == rhs
return self._generate_test(lambda value: test(value),
('==', tuple(self._path), freeze(rhs)))
def __ne__(self, rhs):
"""
Test a dict value for inequality.
>>> Query().f1 != 42
:param rhs: The value to compare against
"""
return self._generate_test(lambda value: value != rhs,
('!=', tuple(self._path), freeze(rhs)))
def __lt__(self, rhs):
"""
Test a dict value for being lower than another value.
>>> Query().f1 < 42
:param rhs: The value to compare against
"""
return self._generate_test(lambda value: value < rhs,
('<', tuple(self._path), rhs))
def __le__(self, rhs):
"""
Test a dict value for being lower than or equal to another value.
>>> where('f1') <= 42
:param rhs: The value to compare against
"""
return self._generate_test(lambda value: value <= rhs,
('<=', tuple(self._path), rhs))
def __gt__(self, rhs):
"""
Test a dict value for being greater than another value.
>>> Query().f1 > 42
:param rhs: The value to compare against
"""
return self._generate_test(lambda value: value > rhs,
('>', tuple(self._path), rhs))
def __ge__(self, rhs):
"""
Test a dict value for being greater than or equal to another value.
>>> Query().f1 >= 42
:param rhs: The value to compare against
"""
return self._generate_test(lambda value: value >= rhs,
('>=', tuple(self._path), rhs))
def exists(self):
"""
Test for a dict where a provided key exists.
>>> Query().f1.exists() >= 42
:param rhs: The value to compare against
"""
return self._generate_test(lambda _: True,
('exists', tuple(self._path)))
def matches(self, regex):
"""
Run a regex test against a dict value (whole string has to match).
>>> Query().f1.matches(r'^\w+$')
:param regex: The regular expression to use for matching
"""
return self._generate_test(lambda value: re.match(regex, value),
('matches', tuple(self._path), regex))
def search(self, regex):
"""
Run a regex test against a dict value (only substring string has to
match).
>>> Query().f1.search(r'^\w+$')
:param regex: The regular expression to use for matching
"""
return self._generate_test(lambda value: re.search(regex, value),
('search', tuple(self._path), regex))
def test(self, func, *args):
"""
Run a user-defined test function against a dict value.
>>> def test_func(val):
... return val == 42
...
>>> Query().f1.test(test_func)
:param func: The function to call, passing the dict as the first
argument
:param args: Additional arguments to pass to the test function
"""
return self._generate_test(lambda value: func(value, *args),
('test', tuple(self._path), func, args))
def any(self, cond):
"""
Checks if a condition is met by any document in a list,
where a condition can also be a sequence (e.g. list).
>>> Query().f1.any(Query().f2 == 1)
Matches::
{'f1': [{'f2': 1}, {'f2': 0}]}
>>> Query().f1.any([1, 2, 3])
Matches::
{'f1': [1, 2]}
{'f1': [3, 4, 5]}
:param cond: Either a query that at least one document has to match or
a list of which at least one document has to be contained
in the tested document.
- """
if callable(cond):
def _cmp(value):
return is_sequence(value) and any(cond(e) for e in value)
else:
def _cmp(value):
return is_sequence(value) and any(e in cond for e in value)
return self._generate_test(lambda value: _cmp(value),
('any', tuple(self._path), freeze(cond)))
def all(self, cond):
"""
Checks if a condition is met by any document in a list,
where a condition can also be a sequence (e.g. list).
>>> Query().f1.all(Query().f2 == 1)
Matches::
{'f1': [{'f2': 1}, {'f2': 1}]}
>>> Query().f1.all([1, 2, 3])
Matches::
{'f1': [1, 2, 3, 4, 5]}
:param cond: Either a query that all documents have to match or a list
which has to be contained in the tested document.
"""
if callable(cond):
def _cmp(value):
return is_sequence(value) and all(cond(e) for e in value)
else:
def _cmp(value):
return is_sequence(value) and all(e in value for e in cond)
return self._generate_test(lambda value: _cmp(value),
('all', tuple(self._path), freeze(cond)))
def where(key):
return Query()[key]
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from collections import defaultdict as dd
from omw.common_sql import (
qs,
connect_omw,
connect_admin,
query_omw,
query_omw_direct,
write_omw,
blk_write_omw
)
import sys
from omw import app
#ntsense=namedtuple('Sense', ['lemma', 'y'], verbose=True)
import gwadoc
with app.app_context():
def init_omw_db():
omw = connect_omw()
with app.open_resource('schemas/omw.sql') as f:
omw.executescript(f.read().decode('utf-8'))
def init_admin_db():
admin = connect_admin()
with app.open_resource('schemas/admin.sql') as f:
admin.executescript(f.read().decode('utf-8'))
def l2q (l):
"""return a comma seperated list of question marks
useful for sql queries over lists"""
return ",".join('?' for e in l)
def fetch_langs():
lang_id = dd(lambda: dd())
lang_code = dd(lambda: dd())
for r in query_omw("""SELECT id, bcp47, iso639, in_lang_id, name
FROM lang JOIN lang_name
ON id = lang_id"""):
lang_id[r['id']]['bcp47'] = r['bcp47']
lang_id[r['id']]['iso639'] = r['iso639']
lang_id[r['id']][r['in_lang_id']] = r['name']
lang_code['code'][r['bcp47']] = r['id']
lang_code['code'][r['iso639']] = r['id']
return lang_id, lang_code
def fetch_key_bcp_lang_code(lang):
""" There should be only one id for either lang code """
for r in query_omw("""SELECT id
FROM lang
WHERE (bcp47 = ?)""", [lang]):
return r['id']
def fetch_proj():
proj_id = dict()
for r in query_omw("""SELECT id, code FROM proj"""):
proj_id[r['id']]=r['code']
return proj_id
def insert_new_project(code, u):
" Adds a new project to the system "
try:
proj_code = code.strip() if code else code
write_omw("""INSERT INTO proj (code, u)
VALUES (?,?) """, [proj_code, u])
return True
except:
return False
def f_proj_id_by_code(code):
for r in query_omw("""SELECT id, code
FROM proj
WHERE code = ?""", [code]):
return r['id']
def fetch_src():
"""
Return a mapping of {src.id: (proj.code, src.version)}
Example:
>>> next(fetch_src())
{1: ('pwn', '3.0')}
"""
proj_id = fetch_proj()
src_id = dict()
for r in query_omw("""SELECT id, proj_id, version FROM src"""):
src_id[r['id']]=(proj_id[r['proj_id']],r['version'])
return src_id
def insert_src(proj_id, version, u):
return write_omw("""INSERT INTO src (proj_id, version, u)
VALUES (?,?,?)""",
[proj_id, version, u])
def f_src_id_by_proj_id_ver(proj_id, version):
for r in query_omw("""SELECT id, proj_id, version
FROM src
WHERE proj_id = ? AND version = ?""",
[proj_id, version]):
return r['id']
def f_src_id_by_proj_ver(proj, version):
# print(proj,version)
for r in query_omw("""SELECT src.id
FROM src JOIN proj
ON src.proj_id=proj.id
WHERE proj.code= ? AND src.version = ?""",
[proj, version]):
return r['id']
def fetch_src_meta():
src_meta = dd(lambda: dd(str))
for r in query_omw("""SELECT src_id, attr, val, u, t FROM src_meta"""):
src_meta[r['src_id']][r['attr']] = r['val']
#src_meta_id[r['src_id']].append(r)
return src_meta
def fetch_src_id_pos_stats(src_id):
src_pos_stats=dd(lambda: dd(int))
pos = fetch_pos()
r = query_omw_direct("""
SELECT pos_id, count(distinct s.ss_id),
count(distinct s.w_id), count(distinct s.id)
FROM s JOIN s_src
ON s.id=s_src.s_id
JOIN ss ON s.ss_id=ss.id
WHERE s_src.src_id=? group by pos_id""", (src_id,))
for (p, ss, w, s) in r:
ps = pos['id'][p]
src_pos_stats[ps]['id'] = p
src_pos_stats[ps]['synsets'] = ss
src_pos_stats[ps]['words'] = w
src_pos_stats[ps]['senses'] = s
return src_pos_stats
def fetch_pos_id_ss_mf(pos_ids, num=3, src_id=0):
"""
get the most frequent num synsets per POS
pos_ids is the list of pos_ids you want
num is how many examples
src_id is the id of the wordnet (or 0 for all)
pos_exe[pos_id] = [(ss_id1, freq1), (ss_id2, freq2),
..., (ss_idn, freqn)]
"""
# get the examples for the POS
pos_exe=dd(list)
if src_id: # get for only one wordnet
## Randomly ordered (as there may be no frequency)
for p in pos_ids:
print(src_id, p)
for r in query_omw_direct("""
SELECT ss_id
FROM s JOIN ss
ON ss.id = s.ss_id
WHERE pos_id = ?
AND s.id IN (
SELECT s_id
FROM s_src
WHERE src_id = ?)
LIMIT ?""", (p, src_id, num)):
pos_exe[p].append((r[0], 0))
else: # get for all wordnets
for p in pos_ids:
for r in query_omw_direct("""
SELECT ss_id, sum(sml_id) AS freq
FROM s JOIN sm ON s.id = sm.s_id
JOIN ss ON ss.id = s.ss_id
WHERE smt_id =1 AND pos_id = ?
GROUP BY ss.id
ORDER BY freq DESC LIMIT ?""", (p, num)):
pos_exe[p].append((r[0], r[1]))
return pos_exe
def fetch_pos_id_freq():
"""
get the frequency of each POS
"""
pos_freq = dd(int)
for (pos_id, freq) in query_omw_direct("""
SELECT pos_id, count(distinct s.ss_id)
FROM s JOIN ss ON s.ss_id=ss.id
GROUP BY pos_id;"""):
pos_freq[pos_id] = freq
return pos_freq
def fetch_ssrel_stats(src_id):
""" Return the number of links for each type for sense-sense links
for a wordnet with the given src_id
"""
constitutive = ['instance_hyponym','instance_hypernym',
'hypernym', 'hyponym',
'synonym', 'antonym',
'mero_part', 'holo_part',
'mero_member', 'holo_member',
'mero_substance', 'holo_substance' ]
src_ssrel_stats = dd(int)
ssrl=fetch_ssrel()
for r in query_omw("""
SELECT ssrel_id, count(ssrel_id)
FROM sslink JOIN sslink_src
ON sslink.id=sslink_src.sslink_id
WHERE sslink_src.src_id=?
GROUP BY ssrel_id""", [src_id]):
link = ssrl['id'][r['ssrel_id']]
src_ssrel_stats[link[0]] = r['count(ssrel_id)']
src_ssrel_stats['TOTAL'] += r['count(ssrel_id)']
if link[0] in constitutive:
src_ssrel_stats['CONSTITUTIVE'] += r['count(ssrel_id)']
return src_ssrel_stats
def fetch_srel_stats(src_id):
""" Return the number of links for each type for sense-sense links
for a wordnet with the given src_id
"""
src_srel_stats = dd(int)
srl=fetch_srel()
for r in query_omw("""
SELECT srel_id, count(srel_id)
FROM slink JOIN slink_src
ON slink.id=slink_src.slink_id
WHERE slink_src.src_id=?
GROUP BY srel_id""", [src_id]):
link = srl['id'][r['srel_id']]
src_srel_stats[link[0]] = r['count(srel_id)']
src_srel_stats['TOTAL'] += r['count(srel_id)']
return src_srel_stats
def fetch_src_id_stats(src_id):
src_id_stats=dd(int)
for r in query_omw("""
SELECT count(distinct s.ss_id), count(distinct s.id)
FROM s JOIN s_src
ON s.id=s_src.s_id
WHERE s_src.src_id=?""", [src_id]):
src_id_stats['synsets'] = r['count(distinct s.ss_id)']
src_id_stats['senses'] = r['count(distinct s.id)']
for r in query_omw("""
SELECT count(distinct w_id), count(distinct f_id)
FROM wf_link WHERE src_id=?""", [src_id]):
src_id_stats['forms'] = r['count(distinct f_id)']
src_id_stats['words'] = r['count(distinct w_id)']
cid = query_omw('select id from resource where code = ?', ('core',), one=True)
if cid:
core_id = cid['id']
for r in query_omw("""select count(distinct ss.id)
FROM ss JOIN ss_src ON ss.id=ss_src.ss_id JOIN ssxl ON ssxl.ss_id=ss.id
WHERE ss_src.src_id = ? AND ssxl.resource_id = ?""", [src_id, core_id]):
src_id_stats['core'] = r['count(distinct ss.id)']
## synsets that are used in a sense and linked to an ili
for r in query_omw("""
SELECT count(distinct id)
FROM ss
WHERE ss.ili_id is not NULL
AND id IN
(SELECT s.ss_id FROM s
WHERE s.id IN
(SELECT s_id FROM s_src
WHERE s_src.src_id=?))""", [src_id]):
src_id_stats['in_ili'] = r['count(distinct id)']
### Definitions
for r in query_omw("""
SELECT count(distinct ss_id)
FROM def WHERE id IN
(SELECT def_id FROM def_src
WHERE src_id =?)""", [src_id]):
src_id_stats['def'] = r['count(distinct ss_id)']
### Examples
for r in query_omw("""
SELECT count(distinct ss_id)
FROM ssexe WHERE id in
(SELECT ssexe_id FROM ssexe_src
WHERE src_id =?)""", [src_id]):
src_id_stats['ssexe'] = r['count(distinct ss_id)']
### Frequency
for r in query_omw("""
SELECT COALESCE(sum(sml_id),0) as sum, count(sml_id) FROM sm
WHERE smt_id = 1 AND sm.s_id IN
(SELECT s_id FROM s_src
WHERE src_id =?)""", [src_id]):
src_id_stats['freq_token'] = r['count(sml_id)']
src_id_stats['freq_type'] = r['sum']
return src_id_stats
def fetch_src_for_s_id(s_ids):
"""return a dict of lists of (src_ids, conf) per sense id
src_id[s_id] = [(src_id, conf), ... ]
"""
src_sid = dd(list)
for r in query_omw("""SELECT s_id, src_id, conf
FROM s_src WHERE s_id in (%s)""" % qs(s_ids), s_ids):
src_sid[r['s_id']].append((r['src_id'], r['conf']))
return src_sid
def fetch_src_for_ss_id(s_ids):
"""return a dict of lists of (src_ids, conf) per synset id
src_id[ss_id] = [(src_id, src_key, conf), ... ]
"""
src_ssid = dd(list)
for r in query_omw("""SELECT ss_id, src_key, src_id, conf
FROM ss_src WHERE ss_id in (%s)""" % qs(s_ids), s_ids):
src_ssid[r['ss_id']].append((r['src_id'], r['src_key'], r['conf']))
return src_ssid
def insert_src_meta(src_id, attr, val, u):
return write_omw("""INSERT INTO src_meta (src_id, attr, val, u)
VALUES (?,?,?,?)""",
[src_id, attr, val, u])
def blk_insert_src_meta(tuple_list):
""" tupple_list must of of format [(src_id, attr, val, u), ...] """
return blk_write_omw("""INSERT INTO src_meta (src_id, attr, val, u)
VALUES (?,?,?,?)""", tuple_list)
def fetch_kind():
kind_id = dict()
for r in query_omw("""SELECT id, kind FROM kind"""):
kind_id[r['id']]=r['kind']
return kind_id
def fetch_status():
status_id = dict()
for r in query_omw("""SELECT id, status FROM status"""):
status_id[r['id']]=r['status']
return status_id
def fetch_max_ili_id():
for r in query_omw("""SELECT MAX(id) FROM ili"""):
return r['MAX(id)']
def fetch_max_ss_id():
for r in query_omw("""SELECT MAX(id) FROM ss"""):
return r['MAX(id)']
def fetch_max_def_id():
for r in query_omw("""SELECT MAX(id) FROM def"""):
return r['MAX(id)']
def fetch_max_ssexe_id():
for r in query_omw("""SELECT MAX(id) FROM ssexe"""):
return r['MAX(id)']
def fetch_max_sslink_id():
for r in query_omw("""SELECT MAX(id) FROM sslink"""):
return r['MAX(id)']
def fetch_max_slink_id():
for r in query_omw("""SELECT MAX(id) FROM slink"""):
return r['MAX(id)']
def fetch_max_ssslink_id():
for r in query_omw("""SELECT MAX(id) FROM ssslink"""):
return r['MAX(id)']
def fetch_max_f_id():
for r in query_omw("""SELECT MAX(id) FROM f"""):
return r['MAX(id)']
def fetch_max_w_id():
for r in query_omw("""SELECT MAX(id) FROM w"""):
return r['MAX(id)']
def fetch_max_s_id():
for r in query_omw("""SELECT MAX(id) FROM s"""):
return r['MAX(id)']
def fetch_max_sm_id():
for r in query_omw("""SELECT MAX(id) FROM sm"""):
return r['MAX(id)']
def insert_into_ili(kind, definition, status, src, key, u):
return write_omw("""INSERT INTO ili
(kind_id, def, status_id, origin_src_id, src_key, u)
VALUES (?,?,?,?,?,?)""",
[kind, definition, status, src, key, u])
def blk_insert_into_ili(tuple_list):
return blk_write_omw("""INSERT INTO ili
(id, kind_id, def, status_id, origin_src_id, src_key, u)
VALUES (?,?,?,?,?,?,?)""", tuple_list)
def fetch_rate_id(ili_ids, u=None):
"""
This function takes a list of ili ids and, optionally a username.
It returns a dictionary with the ratings filtered by the ids and,
if provided, for that specific user.
"""
rating = dd(list)
ili_list = (",".join("?" for s in ili_ids), ili_ids)
if u:
# sys.stderr.write('\n USER MODE \n') #TEST
for r in query_omw("""SELECT id, ili_id, rating, u, t
FROM ili_rating
WHERE ili_id in ({})
AND u = ?""".format(ili_list[0]),
ili_list[1]+[u]):
rating[r['ili_id']].append((r['rating'], r['u'], r['t']))
else:
# sys.stderr.write('\n NON USER MODE \n') #TEST
for r in query_omw("""SELECT id, ili_id, rating, u, t
FROM ili_rating
WHERE ili_id in ({})
""".format(ili_list[0]),
ili_list[1]):
rating[r['ili_id']].append((r['rating'], r['u'], r['t']))
return rating
def f_rate_summary(ili_ids):
"""
This function takes a list of ili ids and returns a dictionary with the
cumulative ratings filtered by the ids.
"""
counts = dd(lambda: dd(int))
rates = fetch_rate_id(ili_ids)
up_who = dd(list)
down_who = dd(list)
for key, value in rates.items():
for (r, u, t) in rates[key]:
if r == 1:
counts[int(key)]['up'] += 1
up_who[int(key)].append(u)
elif r == -1:
counts[int(key)]['down'] += 1
down_who[int(key)].append(u)
return counts, up_who, down_who
def rate_ili_id(ili_id, rate, u):
"""
This function is used to give a +1 or -1 rating to ili ids. It only updates
these values when necessary, overwritting the previous rating if it was
different. Returns True on update and None if no changes were necessary.
"""
ili_id = int(ili_id)
rating = fetch_rate_id([ili_id], u)
if ili_id in rating:
if rating[ili_id][0][0] == rate:
return None
else:
write_omw("""UPDATE ili_rating
SET rating = ?
WHERE ili_id = ?
AND u = ?
""", [rate, ili_id, u])
return True
else:
write_omw("""INSERT INTO ili_rating (ili_id, rating, u)
VALUES (?,?,?)
""", [ili_id, rate, u])
return True
def comment_ili_id(ili_id, comment, u):
"""
This function is used to post a comment to ili ids.
"""
ili_id = int(ili_id)
comment = comment.strip() if comment else comment
write_omw("""INSERT INTO ili_com (ili_id, com, u)
VALUES (?,?,?)
""", [ili_id, comment, u])
return True
def fetch_comment_id(ili_ids, u=None):
"""
This function takes a list of ili ids and, optionally a username.
It returns a dictionary with the comments filtered by the ids and,
if provided, for that specific user.
"""
comments = dd(list)
ili_list = (",".join("?" for s in ili_ids), ili_ids)
if u:
for r in query_omw("""SELECT id, ili_id, com, u, t
FROM ili_com
WHERE ili_id in ({})
AND u = ?""".format(ili_list[0]),
ili_list[1]+[u]):
comments[r['ili_id']].append((r['com'], r['u'], r['t']))
else:
for r in query_omw("""SELECT id, ili_id, com, u, t
FROM ili_com
WHERE ili_id in ({})
""".format(ili_list[0]),
ili_list[1]):
comments[r['ili_id']].append((r['com'], r['u'], r['t']))
return comments
def fetch_ili(ili_ids=None):
src_id = fetch_src()
kind_id = fetch_kind()
status_id = fetch_status()
ili = dict()
ili_defs = dict()
if ili_ids:
ili_list = (",".join("?" for s in ili_ids), ili_ids)
for c in query_omw("""SELECT * FROM ili WHERE id in (%s)
""" % (ili_list[0]), ili_list[1]):
ili[c['id']] = (kind_id[c['kind_id']], c['def'],
src_id[c['origin_src_id']], c['src_key'],
status_id[c['status_id']], c['superseded_by_id'],
c['t'])
ili_defs[c['def']]=c['id']
else:
for c in query_omw("""SELECT * FROM ili """):
ili[c['id']] = (kind_id[c['kind_id']], c['def'],
src_id[c['origin_src_id']], c['src_key'],
status_id[c['status_id']], c['superseded_by_id'],
c['t'])
ili_defs[c['def']]=c['id']
return ili, ili_defs
def fetch_ili_status(status):
src_id = fetch_src()
kind_id = fetch_kind()
status_id = fetch_status()
ili = dict()
for c in query_omw("""SELECT * FROM ili WHERE status_id = ?""", [status]):
ili[c['id']] = (kind_id[c['kind_id']], c['def'],
src_id[c['origin_src_id']], c['src_key'],
status_id[c['status_id']], c['superseded_by_id'],
c['t'])
return ili
def insert_new_language(bcp, iso, name, u):
"""
This function is used to add a new language to the system.
Only the bcp47 code and the language name are required.
"""
if (bcp and name and u \
and (not fetch_key_bcp_lang_code(bcp))):
try:
bcp = bcp.strip() if bcp else bcp
iso = iso.strip() if iso else iso
name = name.strip() if name else name
lastid = write_omw("""INSERT INTO lang (bcp47, iso639, u)
VALUES (?,?,?)
""", [bcp, iso, u])
write_omw("""INSERT INTO lang_name (lang_id, in_lang_id,
name, u)
VALUES (?,?,?,?)
""", [lastid, 1, name, u])
return True
except:
return False
else:
return False
# OMW
def fetch_pos():
pos_id = dd(lambda: dd())
for r in query_omw("""SELECT id, tag, def FROM pos"""):
pos_id['id'][r['id']]=r['tag']
pos_id['tag'][r['tag']]=r['id']
pos_id['def'][r['id']]=r['def']
return pos_id
def fetch_ssrel():
"""look up the relation and definition for synset level links
index by an 'id' or from a 'ssrel'
ssrel['id'][1] = ('agent', 'the undertaker of an action')
ssrel['rel']['agent'] = (1, 'the undertaker of an action') """
ssrel_dict = dd(lambda: dd())
for r in query_omw("""SELECT id, rel, def FROM ssrel"""):
ssrel_dict['id'][r['id']]=(r['rel'],r['def'])
ssrel_dict['rel'][r['rel']]=(r['id'],r['def'])
return ssrel_dict
def fetch_sssrel():
"""look up the relation and definition for sense-synset level links
index by an 'id' or from a 'ssrel'
ssrel['id'][1] = ('agent', 'the undertaker of an action')
ssrel['rel']['agent'] = (1, 'the undertaker of an action')
FIXME link to GWADOC """
sssrel_dict = dd(lambda: dd())
for r in query_omw("""SELECT id, rel, def FROM ssrel"""):
ssrel_dict['id'][r['id']]=(r['rel'],r['def'])
ssrel_dict['rel'][r['rel']]=(r['id'],r['def'])
return ssrel_dict
def fetch_srel():
"""look up the relation and definition for sense level links
index by an 'id' or from a 'srel'
ssrel['id'][1] = ('antonym', 'a sense with the opposite meaning')
ssrel['rel']['agent'] = (1, 'a sense with the opposite meaning') """
srel_dict = dd(lambda: dd())
for r in query_omw("""SELECT id, rel, def FROM srel"""):
srel_dict['id'][r['id']]=(r['rel'],r['def'])
srel_dict['rel'][r['rel']]=(r['id'],r['def'])
return srel_dict
def fetch_def_by_ssid_lang_text(ss_id, lang_id, d):
for r in query_omw(""" SELECT id, ss_id, lang_id, def FROM def
WHERE ss_id = ?
AND lang_id = ?
AND def = ?""",
[ss_id, lang_id, d]):
return r['id']
def fetch_ssexe_by_ssid_lang_text(ss_id, lang_id, e):
for r in query_omw(""" SELECT id, ss_id, lang_id, ssexe FROM ssexe
WHERE ss_id = ?
AND lang_id = ?
AND ssexe = ?""",
[ss_id, lang_id, e]):
return r['id']
def fetch_s_freq(sense_list):
"""get the total frequency of each sense
frequency is in sm:
(s_id, smt_id, sml_id)
smt_id=1 (hard coded)
sml_id is the frequency
the source of the frequency should be in sm_src
senses can have multiple rows of frequency
:returns: a dd with key sense id and value frequency
:rtype: defaultdict(int)
sfreq[s_id] = freq
"""
sfreq = dd(int) # defaults to zero
for r in query_omw("""SELECT s_id, SUM(sml_id) as freq FROM sm
WHERE s_id in (%s) and smt_id=1
GROUP BY s_id"""
% l2q(sense_list), sense_list):
sfreq[r['s_id']] = r['freq']
return sfreq
def fetch_ss_basic(synset_list):
synset_list = list(synset_list)
ss_list = (",".join("?" for s in synset_list), synset_list)
ss = dict() # ss[ss_id][s_id] = [wid, fid, lang_id, pos_id]
for r in query_omw(""" SELECT id, ili_id, pos_id FROM ss
WHERE id in (%s) """ % (ss_list[0]), ss_list[1]):
ss[r['id']] = (r['ili_id'], r['pos_id'])
senses = dd(lambda: dd(list)) # senses[ss_id][lang] = [(s_id, lemma, freq), ]
s_tmp = list()
s_list = list()
for r in query_omw("""
SELECT lang_id, lemma, w_id, canon, ss_id, s_id
FROM ( SELECT w_id, canon, ss_id, s_id
FROM ( SELECT id as s_id, ss_id, w_id FROM s
WHERE ss_id in (%s)) as sense
JOIN w ON w_id = w.id ) as word
JOIN f ON canon = f.id
""" % (ss_list[0]), ss_list[1]):
s_tmp.append((r['ss_id'], r['lang_id'], r['s_id'], r['lemma']))
s_list.append(r['s_id'])
sfreq = fetch_s_freq(s_list)
for (ss_id, lang_id, s_id, lemma) in s_tmp:
senses[ss_id][lang_id].append((s_id, lemma, sfreq[s_id]))
for ss_id in senses:
for lang_id in senses[ss_id]:
senses[ss_id][lang_id].sort(key=lambda x: x[2], reverse=True)
defs = dd(lambda: dd(list)) # defs[ss_id][lang] = [def, def2]
for r in query_omw(""" SELECT ss_id, lang_id, def FROM def
WHERE ss_id in (%s) """ % (ss_list[0]), ss_list[1]):
defs[r['ss_id']][r['lang_id']].append(r['def'])
exes = dd(lambda: dd(list)) # exs[ss_id][lang] = [ex, ex2]
for r in query_omw(""" SELECT ss_id, lang_id, ssexe FROM ssexe
WHERE ss_id in (%s) """ % (ss_list[0]), ss_list[1]):
exes[r['ss_id']][r['lang_id']].append(r['ssexe'])
links = dd(lambda: dd(list)) # links[ss1_id][ssrel] = [ss2_id, ...]
for r in query_omw(""" SELECT ss1_id, ssrel_id, ss2_id FROM sslink
WHERE ss1_id in (%s) """ % (ss_list[0]), ss_list[1]):
links[r['ss1_id']][r['ssrel_id']].append(r['ss2_id'])
return ss, senses, defs, exes, links
def fetch_core():
"""return sets of core synsets as OMW synsets and ILIs"""
core_ss = set()
core_ili = set()
r = query_omw('select id from resource where code = ?', ('core',), one=True)
# print(r)
if r:
rid = r['id']
for q in query_omw("""SELECT ss_id, x1 FROM ssxl WHERE resource_id=?""", (rid,)):
core_ss.add(q['ss_id'])
core_ili.add(q['x1'])
return core_ss, core_ili
def fetch_cili_tsv():
"""output the ili as tsv, with lists of linked synsets
the data is accessible at "/cili.tsv"
and documented on the CILI welcome page
"""
### get projects linked to ili
srcs = fetch_src()
src = dd(list)
r = query_omw_direct("SELECT ss_id, src_id, src_key from ss_src")
for (ss_id, src_id, src_key) in r:
src[ss_id].append("{}-{}:{}".format(srcs[src_id][0],
srcs[src_id][1],
src_key))
### prepare headers
tsv=["\t".join(["ili_id",
"status",
"superseded_by",
"origin",
"used_by",
"definition"])]
### get the data
r = query_omw_direct("""SELECT ss.id, ili.id, def,
status_id, superseded_by_id, origin_src_id, src_key
FROM ili LEFT JOIN ss on ss.ili_id = ili.id""")
for (ss_id, ili_id, dfn, status_id,
superseded_by_id, origin_src_id, src_key) in r:
tsv.append("\t".join(['i' + str(ili_id),
str(status_id),
'i' + str(superseded_by_id) if superseded_by_id else '',
"{}-{}:{}".format(srcs[origin_src_id][0],
srcs[ origin_src_id][1],
src_key),
";".join(src[ss_id]),
dfn]))
return "\n".join(tsv)+"\n"
def fetch_sense_links(s_ids):
""" return information about the links to a list of senses
slinks[s_id_from][srel] = [s_id_to, ...]
"""
slinks = dd(lambda: dd(list)) # links[srel] = [s2_id, ...]
for r in query_omw(""" SELECT s1_id, srel_id, s2_id FROM slink
WHERE s1_id in ({})""".format(l2q(s_ids)), s_ids):
slinks[r['s1_id']][r['srel_id']].append(r['s2_id'])
return slinks
def fetch_sense(s_id):
""" return information about the sense
"""
# sense = (lemma, pos, freq, w_id, ss_id, ili_id)
sense=[]
for r in query_omw("""
SELECT lemma, w_id, canon, ss_id, pos_id, ili_id
FROM ( SELECT lemma, w_id, canon, ss_id
FROM ( SELECT w_id, canon, ss_id
FROM ( SELECT ss_id, w_id FROM s
WHERE id=? ) as sense
JOIN w ON w_id = w.id ) as word
JOIN f ON canon = f.id ) as thing
JOIN ss on ss.id=ss_id
""", (s_id,)):
sense = [r['lemma'], r['pos_id'], 0,
r['w_id'], r['ss_id'], r['ili_id']]
### NOTE may want to show the different sources of frequencies
sfreq = fetch_s_freq([s_id])
sense[2] = sfreq[s_id]
return sense
def fetch_forms(w_id):
"""return the forms of all variants
FIXME: should include meta data
"""
# variant = [lemma]
forms=[]
for r in query_omw("""
SELECT lemma, id as f_id
FROM (SELECT f_id FROM wf_link
WHERE w_id = ?)
JOIN f on f.id=f_id""", (w_id,)):
forms.append(r['lemma'])
return forms
def fetch_form_meta_attr():
"""
fetch the form metadata tags
"""
fma = dd(dict)
for r in query_omw("""
SELECT id as fma_id, tag, name
FROM fmt"""):
fma['attr'][r['fma_id']] = r['tag']
fma['name'][r['fma_id']] = r['name']
fma['id'][r['tag']] = r['fma_id']
return fma
def fetch_form_meta_val():
"""
fetch the form metadata tags
"""
fmv = dd(dict)
for r in query_omw("""
SELECT id as fmv_id, label, name
FROM fml"""):
fmv['val'][r['fmv_id']] = r['label']
fmv['name'][r['fmv_id']] = r['name']
fmv['id'][r['label']] = r['fmv_id']
return fmv
def fetch_labels(lang_id, sss):
"""return a dict with lang_id labels for the synsets in sss"""
labels = dict()
for r in query_omw("""SELECT ss_id, label FROM label
WHERE lang_id = ? AND ss_id in (%s)""" % l2q(sss),
[lang_id] + list(sss)):
labels[r['ss_id']]=r['label']
return labels
def fetch_sense_labels(s_ids):
"""return just the string for the canonical form for each of a list of sense ids
slabel[s_id] = lemma (s_id is the id of the sense)
slabel[127262] = 'driver' """
slabel = dict()
for r in query_omw("""SELECT lemma, s_id, canon
FROM ( SELECT w_id, canon, s_id
FROM ( SELECT id as s_id, w_id FROM s
WHERE id in ({}) ) as sense
JOIN w ON w_id = w.id ) as word
JOIN f ON canon = f.id""".format(l2q(s_ids)),
s_ids):
slabel[r['s_id']] = r['lemma']
return slabel
def fetch_ss_id_by_src_orginalkey(src_id, originalkey):
for r in query_omw(""" SELECT ss_id, src_id, src_key FROM ss_src
WHERE src_id = ? and src_key = ? """, [src_id, originalkey]):
ss = r['ss_id']
return ss
def fetch_defs_by_sense(s_ids):
"""given a list of senses, return a dictionary of definitions"""
### FIXME: find the sense level definition when defined
defs=dd(lambda: dict())
for r in query_omw(""" SELECT s_id, lang_id, def
FROM (SELECT id AS s_id, ss_id FROM s
WHERE id IN ({})) as sense
JOIN def ON sense.ss_id = def.ss_id""".format(l2q(s_ids)), s_ids):
defs[r['s_id']][r['lang_id']] = r['def']
return defs
def fetch_all_defs_by_ss_lang_text():
defs = dd(lambda: dd())
for r in query_omw("""SELECT id, ss_id, lang_id, def FROM def"""):
defs[r['ss_id']][(r['lang_id'],r['def'])]=r['id']
return defs
def fetch_all_ssexe_by_ss_lang_text():
ssexes = dd(lambda: dd())
for r in query_omw("""SELECT id, ss_id, lang_id, ssexe FROM ssexe"""):
ssexes[r['ss_id']][(r['lang_id'],r['ssexe'])]=r['id']
return ssexes
def fetch_all_ssrels_by_ss_rel_trgt():
sslinks = dd(lambda: dd())
for r in query_omw("""SELECT id, ss1_id, ssrel_id, ss2_id
FROM sslink"""):
sslinks[r['ss1_id']][(r['ssrel_id'],r['ss2_id'])]=r['id']
return sslinks
def fetch_all_srels_by_s_rel_trgt():
slinks = dd(lambda: dd())
for r in query_omw("""SELECT id, s1_id, srel_id, s2_id
FROM slink"""):
slinks[r['s1_id']][(r['srel_id'],r['s2_id'])]=r['id']
return slinks
def fetch_all_sssrels_by_s_rel_trgt():
ssslinks = dd(lambda: dd())
for r in query_omw("""SELECT id, s_id, srel_id, ss_id
FROM ssslink"""):
ssslinks[r['s_id']][(r['srel_id'],r['ss_id'])]=r['id']
return ssslinks
def fetch_all_forms_by_lang_pos_lemma():
forms = dd(lambda: dd())
for r in query_omw("""SELECT id, lang_id, pos_id, lemma
FROM f"""):
forms[r['lang_id']][(r['pos_id'],r['lemma'])]=r['id']
return forms
def f_ss_id_by_ili_id(ili_id):
""" Return a list of ss_ids from an ili_id """
ss_ids = list()
for r in query_omw("""SELECT id FROM ss
WHERE ili_id = ?""", [ili_id]):
ss_ids.append(r['id'])
return ss_ids
def f_ili_ss_id_map():
""" Returns a dictionary linking ili_ids and ss_ids. It is possible
that one ili_id links to multiple ss_ids, but one ss_id can only
link to a single ili_id. """
ili_ss_map = dd(lambda: dd(list))
for r in query_omw("""SELECT id, ili_id, pos_id FROM ss"""):
ili_ss_map['ili'][r['ili_id']].append((r['id'],r['pos_id']))
ili_ss_map['ss'][r['id']] = r['ili_id']
return ili_ss_map
def f_sslink_id_by_ss1_rel_ss2(ss1, rel, ss2):
"Return sslink_id, if any, from sslink."
for r in query_omw("""SELECT id
FROM sslink
WHERE ss1_id = ?
AND ssrel_id = ?
AND ss2_id = ?""",
[ss1, rel, ss2]):
return r['id']
def insert_omw_ss(ili_id, pos_id, u):
return write_omw("""INSERT INTO ss (ili_id, pos_id, u)
VALUES (?,?,?)""",
[ili_id, pos_id, u])
def blk_insert_omw_ss(tuple_list):
""" tuple_list = [(id, ili_id, pos_id, u), ...]"""
return blk_write_omw("""INSERT INTO ss (id, ili_id, pos_id, u)
VALUES (?,?,?,?)""", tuple_list)
def insert_omw_ss_src(ss_id, src_id, src_key, conf, u):
return write_omw("""INSERT INTO ss_src (ss_id, src_id, src_key, conf, u)
VALUES (?,?,?,?,?)""",
[ss_id, src_id, src_key, conf, u])
def blk_insert_omw_ss_src(tuple_list):
return blk_write_omw("""INSERT INTO ss_src (ss_id, src_id, src_key, conf, u)
VALUES (?,?,?,?,?)""", tuple_list)
def insert_omw_def(ss_id, lang_id, d, u):
return write_omw("""INSERT INTO def (ss_id, lang_id, def, u)
VALUES (?,?,?,?)""",
[ss_id, lang_id, d, u])
def blk_insert_omw_def(tuple_list):
return blk_write_omw("""INSERT INTO def (id, ss_id, lang_id, def, u)
VALUES (?, ?,?,?,?)""", tuple_list)
def insert_omw_def_src(def_id, src_id, conf, u):
return write_omw("""INSERT INTO def_src (def_id, src_id, conf, u)
VALUES (?,?,?,?)""",
[def_id, src_id, conf, u])
def blk_insert_omw_def_src(tuple_list):
return blk_write_omw("""INSERT INTO def_src (def_id, src_id, conf, u)
VALUES (?,?,?,?)""", tuple_list)
def insert_omw_ssexe(ss_id, lang_id, e, u):
return write_omw("""INSERT INTO ssexe (ss_id, lang_id, ssexe, u)
VALUES (?,?,?,?)""",
[ss_id, lang_id, e, u])
def blk_insert_omw_ssexe(tuple_list):
return blk_write_omw("""INSERT INTO ssexe (id, ss_id, lang_id, ssexe, u)
VALUES (?,?,?,?,?)""", tuple_list)
def insert_omw_ssexe_src(ssexe_id, src_id, conf, u):
return write_omw("""INSERT INTO ssexe_src (ssexe_id, src_id, conf, u)
VALUES (?,?,?,?)""",
[ssexe_id, src_id, conf, u])
def blk_insert_omw_ssexe_src(tuple_list):
return blk_write_omw("""INSERT INTO ssexe_src (ssexe_id, src_id, conf, u)
VALUES (?,?,?,?)""", tuple_list)
def insert_omw_sslink(ss1_id, ssrel_id, ss2_id, u):
return write_omw("""INSERT INTO sslink (ss1_id, ssrel_id, ss2_id, u)
VALUES (?,?,?,?)""",
[ss1_id, ssrel_id, ss2_id, u])
def blk_insert_omw_sslink(tuple_list):
"""
insert a list of (id, ss1_id, ssrel_id, ss2_id, u) into sslink
often followed by insert_omw_sslink_src()
if id is NULL, it will autoincrement
"""
return blk_write_omw("""INSERT INTO sslink (id, ss1_id, ssrel_id, ss2_id, u)
VALUES (?,?,?,?,?)""", tuple_list)
def insert_omw_sslink_src(sslink_id, src_id, conf, lang_id, u):
return write_omw("""INSERT INTO sslink_src (sslink_id, src_id, conf, lang_id, u)
VALUES (?,?,?,?,?)""",
[sslink_id, src_id, conf, lang_id, u])
def blk_insert_omw_sslink_src(tuple_list):
return blk_write_omw("""INSERT INTO sslink_src (sslink_id, src_id, conf, lang_id, u)
VALUES (?,?,?,?,?)""", tuple_list)
def insert_omw_slink(s1_id, srel_id, s2_id, u):
return write_omw("""INSERT INTO slink (s1_id, srel_id, s2_id, u)
VALUES (?,?,?,?)""",
[s1_id, srel_id, s2_id, u])
def blk_insert_omw_slink(tuple_list):
return blk_write_omw("""INSERT INTO slink (id, s1_id, srel_id, s2_id, u)
VALUES (?,?,?,?,?)""", tuple_list)
def insert_omw_slink_src(slink_id, src_id, conf, u):
return write_omw("""INSERT INTO slink_src (slink_id, src_id, conf, u)
VALUES (?,?,?,?)""",
[slink_id, src_id, conf, u])
def blk_insert_omw_slink_src(tuple_list):
return blk_write_omw("""INSERT INTO slink_src (slink_id, src_id, conf, u)
VALUES (?,?,?,?)""", tuple_list)
def insert_omw_ssslink(s_id, srel_id, ss_id, u):
return write_omw("""INSERT INTO ssslink (s_id, srel_id, ss_id, u)
VALUES (?,?,?,?)""",
[s_id, srel_id, ss_id, u])
def blk_insert_omw_ssslink(tuple_list):
return blk_write_omw("""INSERT INTO ssslink (id, s_id, srel_id, ss_id, u)
VALUES (?,?,?,?,?)""", tuple_list)
def insert_omw_ssslink_src(ssslink_id, src_id, conf, lang_id, u):
return write_omw("""INSERT INTO ssslink_src (ssslink_id, src_id, conf, lang_id, u)
VALUES (?,?,?,?,?)""",
[ssslink_id, src_id, conf, lang_id, u])
def blk_insert_omw_ssslink_src(tuple_list):
return blk_write_omw("""INSERT INTO ssslink_src (ssslink_id, src_id, conf, lang_id, u)
VALUES (?,?,?,?,?)""", tuple_list)
# FORM
def insert_omw_f(lang_id, pos_id, form, u):
return write_omw("""INSERT INTO f (lang_id, pos_id, lemma, u)
VALUES (?,?,?,?)""",
[lang_id, pos_id, form, u])
# BLK FORM
def blk_insert_omw_f(tuple_list):
return blk_write_omw("""INSERT INTO f (id, lang_id, pos_id, lemma, u)
VALUES (?,?,?,?,?)""", tuple_list)
# FORM SRC
def insert_omw_f_src(f_id, src_id, conf, u):
return write_omw("""INSERT INTO f_src (f_id, src_id, conf, u)
VALUES (?,?,?,?)""",
[f_id, src_id, conf, u])
# BLK FORM SRC
def blk_insert_omw_f_src(tuple_list):
return blk_write_omw("""INSERT INTO f_src (f_id, src_id, conf, u)
VALUES (?,?,?,?)""", tuple_list)
# WORD
def insert_omw_w(canon, u):
return write_omw("""INSERT INTO w (canon, u)
VALUES (?,?)""",
[canon, u])
# BLK WORD
def blk_insert_omw_w(tuple_list):
return blk_write_omw("""INSERT INTO w (id, canon, u)
VALUES (?,?,?)""", tuple_list)
# WORD-FORM LINK
def insert_omw_wf_link(w_id, f_id, src_id, conf, u):
return write_omw("""INSERT INTO wf_link (w_id, f_id, src_id, conf, u)
VALUES (?,?,?,?,?)""",
[w_id, f_id, src_id, conf, u])
# BLK WORD-FORM LINK
def blk_insert_omw_wf_link(tuple_list):
return blk_write_omw("""INSERT INTO wf_link (w_id, f_id, src_id, conf, u)
VALUES (?,?,?,?,?)""", tuple_list)
# SENSE
def insert_omw_s(ss_id, w_id, u):
return write_omw("""INSERT INTO s (ss_id, w_id, u)
VALUES (?,?,?)""",
[ss_id, w_id, u])
# BLK SENSE
def blk_insert_omw_s(tuple_list):
return blk_write_omw("""INSERT INTO s (id, ss_id, w_id, u)
VALUES (?,?,?,?)""", tuple_list)
# SENSE SRC
def insert_omw_s_src(s_id, src_id, conf, u):
return write_omw("""INSERT INTO s_src (s_id, src_id, conf, u)
VALUES (?,?,?,?)""",
[s_id, src_id, conf, u])
# BLK SENSE SRC
def blk_insert_omw_s_src(tuple_list):
return blk_write_omw("""INSERT INTO s_src (s_id, src_id, conf, u)
VALUES (?,?,?,?)""", tuple_list)
# INSERT SENSE META
def insert_omw_sm(s_id, smt_id, sml_id, u):
"""
smt_id = sense-meta-tag_id (db restricted to the ones stored in smt)
sml_id = labels (not restricted, but meanings available in sml)
"""
return write_omw("""INSERT INTO sm (s_id, smt_id, sml_id, u)
VALUES (?,?,?,?)""",
[s_id, smt_id, sml_id, u])
# BLK INSERT SENSE META
def blk_insert_omw_sm(tuple_list):
return blk_write_omw("""INSERT INTO sm (id, s_id, smt_id, sml_id, u)
VALUES (?,?,?,?,?)""", tuple_list)
# INSERT SENSE META SRC
def insert_omw_sm_src(sm_id, src_id, conf, u):
return write_omw("""INSERT INTO sm_src (sm_id, src_id, conf, u)
VALUES (?,?,?,?)""",
[sm_id, src_id, conf, u])
# BLK SENSE META SRC
def blk_insert_omw_sm_src(tuple_list):
return blk_write_omw("""INSERT INTO sm_src (sm_id, src_id, conf, u)
VALUES (?,?,?,?)""", tuple_list)
def fetch_graph(ili_labels=False):
"""
get the complete hypernym graph (including instances)
if the node is in ili, its name is iXXXX
else the node is oXXXX
return a dictionary of sets
graph[hype] = {hypo, hypo, hypo, ...}
"""
if ili_labels:
ss2ili = dict()
for r in query_omw("""SELECT id, ili_id FROM ss WHERE ili_id IS NOT Null"""):
ss2ili[r['id']] = 'i{}'.format(r['ili_id'])
graph = dd(set)
for r in query_omw("""SELECT ss1_id, ssrel_id, ss2_id
FROM sslink
WHERE ssrel_id in (34,37, 35, 38)"""):
ss1_id = r['ss1_id']
ss2_id = r['ss2_id']
if ili_labels:
if ss1_id in ss2ili:
ss1_id = ss2ili[ss1_id]
if ss2_id in ss2ili:
ss2_id = ss2ili[ss2_id]
if r['ssrel_id'] in [35,38]: # hypo, ihypo
graph[ss2_id].add(ss1_id)
else:
graph[ss1_id].add(ss2_id)
# if ss1_id in ('i46538', 'i46539'):
# print(ss1_id, ss2_id, file=sys.stderr)
return graph
# UPDATE LABELS
def updateLabels():
"""This functions is to be run after a new wordnet is uploaded
so that concept labels for that language are created and visible
as concept names.
"""
# get frequency for everything
# similar to fetch_s_freq(s_list), but no need for a list
sfreq = dd(int) # defaults to zero
for r in query_omw("""SELECT s_id, SUM(sml_id) as freq FROM sm
WHERE smt_id=1
GROUP BY s_id"""):
sfreq[r['s_id']] = r['freq']
senses =dd(lambda: dd(list))
#senses[ss_id][lang_id]=[(ls_id, lemma, freq), ...]
forms = dd(lambda: dd(int))
#forms[lang][word] = freq
eng_id=1 ### we know this :-)
for r in query_omw("""SELECT s_id, ss_id, lemma, lang_id
FROM (SELECT w_id, canon, ss_id, s_id
FROM (SELECT id as s_id, ss_id, w_id FROM s)
JOIN w ON w_id = w.id )
JOIN f ON canon = f.id"""):
senses[r['ss_id']][r['lang_id']].append((r['s_id'], r['lemma'], sfreq[r['s_id']]))
forms[r['lang_id']][r['lemma']] += 1
# make the best label for each language that has lemmas
for ss in senses:
for l in senses[ss]:
senses[ss][l].sort(key=lambda x: (-x[2], ### sense freq (freq is good)
forms[l][x[1]], ### uniqueness (freq is bad)
len(x[1]), ### length (short is good)
x[1])) ### lemma (so it is the same)
lgs=[]
cv = query_omw_direct("SELECT id FROM lang ORDER BY id") ### English first!
for (lid,) in cv:
lgs.append(lid)
# make the labels
label = dd(lambda: dd(str))
values=list()
for ss in senses:
for l in lgs:
if senses[ss][l]:
label[ss][l]=senses[ss][l][0][1]
else:
for lx in lgs: ### start with eng and go through till you find one
if senses[ss][lx]:
label[ss][l]=senses[ss][lx][0][1]
break
else:
label[ss][l]="?????"
values.append((ss, l, label[ss][l]))
# write the labels (delete old ones first)
write_omw("""DELETE FROM label""")
blk_write_omw("""INSERT INTO label(ss_id, lang_id, label, u)
VALUES (?,?,?,"omw")""", values)
return True
|
|
# coding: utf-8
import dataset
import csv
import os
from default_settings import DB_RESULTS_URL
db = None
dir = os.path.dirname(__file__)
REL_POLLING_PATH = os.path.join(dir, '../data/maestras')
REL_RESULTS_PATH = os.path.join(dir, '../data/resultados')
POLLING_STATIONS_DATA_FILE = 'caba_est_2015.csv'
POLLING_TABLES_DATA_FILE = 'mesas.csv'
RESULTS_DATA_FILE = 'resultados_partido_lista.csv'
RELATIONS_DATA_FILE = 'relaciones.csv'
SCHEMA_POLLING_STATION_NUMERIC = {
"caba_id": "id_caba",
"distrito_id": "id_distrito",
"seccion_id": "id_seccion",
"mesa_desde": "mesa_desde",
"mesa_hasta": "mesa_hasta",
"num_mesas": "num_mesas"
}
SCHEMA_POLLING_TABLE_NUMERIC = {
"id_mesa": "id_mesa",
"id_establecimiento": "id_establecimiento_gob",
"id_centro_de_distribucion": "id_centro",
"ciudadanos_habilitados": "electores"
}
SCHEMA_RESULTS_NUMERIC = {
"id_mesa": "id_mesa",
"cantidad_votantes": "electores",
"sobres_utilizados": "votantes",
"JEF": "votos"
}
SCHEMA_RELATION_NUMERIC = {
"id_establecimiento": "id_establecimiento",
"id_agrupado": "id_agrupado"
}
SPECIAL_PARTIES = {
"BLC": 0,
"NUL": 1,
"IMP": 1,
"REC": 1
}
def connect_dataset():
'''DB connection setup'''
return dataset.connect(DB_RESULTS_URL)
def clearDB():
''' Clears the DB to make the script idempotent '''
for t in db.tables:
print t
db.get_table(t).drop()
def import_poll_stations(fname):
''' import geolocated polling stations'''
t = db['locales']
f = open(fname, 'r')
fields = f.readline().strip().split(',')
c = csv.DictReader(f, fields)
results = []
for row in c:
t_results = {}
for k, v in row.iteritems():
if k in SCHEMA_POLLING_STATION_NUMERIC.keys():
kt = SCHEMA_POLLING_STATION_NUMERIC[k]
t_results[kt] = int(v) if v else None
if k not in SCHEMA_POLLING_STATION_NUMERIC.keys():
if k == "geom":
t_results[k] = v
else:
t_results[k] = v.decode('utf-8')
results.append(t_results)
t.insert_many(results)
def import_poll_tables(fname):
''' import polling tables CSV '''
t = db['mesas']
f = open(fname, 'r')
fields = f.readline().strip().split(',')
c = csv.DictReader(f, fields)
results = []
for row in c:
t_results = {}
for k, v in row.iteritems():
if k in SCHEMA_POLLING_TABLE_NUMERIC.keys():
kt = SCHEMA_POLLING_TABLE_NUMERIC[k]
t_results[kt] = int(v) if v else None
if k not in SCHEMA_POLLING_TABLE_NUMERIC.keys():
t_results[k] = v.decode('utf-8')
results.append(t_results)
t.insert_many(results, chunk_size=1000)
def import_results(fname):
''' import results by polling table CSV '''
t = db['resultados']
f = open(fname, 'r')
fields = f.readline().strip().split(',')
c = csv.DictReader(f, fields)
results = []
for row in c:
t_results = {}
for k, v in row.iteritems():
if k in SCHEMA_RESULTS_NUMERIC.keys():
kt = SCHEMA_RESULTS_NUMERIC[k]
t_results[kt] = int(v) if v else None
if k not in SCHEMA_RESULTS_NUMERIC.keys():
t_results[k] = v.decode('utf-8')
results.append(t_results)
t.insert_many(results, chunk_size=10000)
def import_relations(fname):
''' import relations to fix polling stations CSV '''
t = db['relaciones']
f = open(fname, 'r')
fields = f.readline().strip().split(',')
c = csv.DictReader(f, fields)
results = []
for row in c:
t_results = {}
for k, v in row.iteritems():
if k in SCHEMA_RELATION_NUMERIC.keys():
kt = SCHEMA_RELATION_NUMERIC[k]
t_results[kt] = int(v) if v else None
if k not in SCHEMA_RELATION_NUMERIC.keys():
t_results[k] = v.decode('utf-8')
results.append(t_results)
t.insert_many(results, chunk_size=10000)
def create_locales_tmp(table_polling='locales',
table_rel='relaciones'):
'''create temp table'''
q = '''
SELECT l.*, r.id_agrupado
FROM %s l, %s r
WHERE l.id = r.id_establecimiento
''' % (table_polling,
table_rel)
results = db.query(q)
locales_tmp_table = db['locales_tmp']
locales_tmp_table.insert_many(results)
def create_locales_loc(table_polling='locales_tmp'):
'''create polling stations by location'''
q = '''
SELECT t1.*
FROM %s as t1
LEFT OUTER JOIN %s as t2
ON t1.id_agrupado = t2.id_agrupado
AND t1.id > t2.id
WHERE t2.id_agrupado IS NULL;
''' % (table_polling,
table_polling)
results = db.query(q)
locales_loc_table = db['locales_loc']
locales_loc_table.insert_many(results)
def aggregate_results_by_poll_station(table_polling='locales',
table_votes='resultados'):
''' aggregate results by polling station
political party'''
tmp = []
for r in db[table_polling]:
q = '''
SELECT id_partido, SUM(votos) as votos
FROM "%s"
WHERE id_mesa BETWEEN %d AND %d
GROUP BY id_partido
''' % (table_votes,
int(r['mesa_desde']),
int(r['mesa_hasta']))
for p in db.query(q):
p['id_establecimiento'] = r['id']
p['mesa_desde'] = r['mesa_desde']
p['mesa_hasta'] = r['mesa_hasta']
p['id_distrito'] = r['id_distrito']
p['id_seccion'] = r['id_seccion']
p['votos'] = int(p['votos'])
tmp.append(p)
votos_est = db['votos_establecimiento']
votos_est.insert_many(tmp)
def aggregate_census_by_poll_station(table_polling='locales',
table_census='mesas'):
''' aggregate census data by polling station '''
tmp = []
for r in db[table_polling]:
q = '''
SELECT id_establecimiento_gob, SUM(electores) as total
FROM "%s"
WHERE id_mesa BETWEEN %d AND %d
GROUP BY id_establecimiento_gob
''' % (table_census,
int(r['mesa_desde']),
int(r['mesa_hasta']))
for p in db.query(q):
p['id_establecimiento'] = r['id']
p['id_caba'] = r['id_caba']
p['mesa_desde'] = r['mesa_desde']
p['mesa_hasta'] = r['mesa_hasta']
p['id_distrito'] = r['id_distrito']
p['id_seccion'] = r['id_seccion']
p['total'] = int(p['total'])
tmp.append(p)
votos_est = db['censo_establecimiento']
votos_est.insert_many(tmp)
def aggregate_totals_by_poll_station(table_votes='votos_establecimiento'):
''' aggregate participation results by polling station '''
tmp = []
q = '''
SELECT id_establecimiento,
SUM(CASE WHEN id_partido = 'BLC'
THEN votos else 0 end) as blancos,
SUM(CASE WHEN id_partido not in ('NUL', 'REC', 'IMP')
THEN votos else 0 end) as validos,
SUM(CASE WHEN id_partido not in ('BLC', 'NUL', 'REC', 'IMP')
THEN votos else 0 end) as positivos,
SUM(CASE WHEN id_partido in ('NUL', 'REC', 'IMP')
THEN votos else 0 end) as invalidos
FROM "%s"
GROUP BY id_establecimiento
''' % (table_votes)
for p in db.query(q):
p['id_establecimiento'] = int(p['id_establecimiento'])
p['blancos'] = int(p['blancos'])
p['validos'] = int(p['validos'])
p['positivos'] = int(p['positivos'])
p['invalidos'] = int(p['invalidos'])
tmp.append(p)
votos_est = db['totales_establecimiento']
votos_est.insert_many(tmp)
def aggregate_results_by_location(table_votes='votos_establecimiento',
table_rel='relaciones'):
'''Aggregate by location to fix duplicates'''
tmp = []
q = '''
SELECT a.id_agrupado, v.id_partido,
sum(v.votos) as votos
FROM %s as v, %s as a
WHERE v.id_establecimiento = a.id_establecimiento
GROUP BY a.id_agrupado, v.id_partido, v.id_distrito, v.id_seccion
''' % (table_votes, table_rel)
for p in db.query(q):
p['id_agrupado'] = int(p['id_agrupado'])
p['votos'] = int(p['votos'])
tmp.append(p)
votos_loc = db['votos_loc']
votos_loc.insert_many(tmp)
def aggregate_totals_by_location(table_totals='totales_establecimiento',
table_rel='relaciones'):
'''Aggregate by location to fix duplicates'''
tmp = []
q = '''
SELECT a.id_agrupado, sum(t.blancos) as blancos,
sum(t.validos) as validos, sum(t.invalidos) as invalidos,
sum(t.positivos) as positivos
FROM %s as t, %s as a
WHERE t.id_establecimiento = a.id_establecimiento
GROUP BY a.id_agrupado
''' % (table_totals, table_rel)
for p in db.query(q):
p['id_agrupado'] = int(p['id_agrupado'])
p['blancos'] = int(p['blancos'])
p['validos'] = int(p['validos'])
p['invalidos'] = int(p['invalidos'])
p['positivos'] = int(p['positivos'])
tmp.append(p)
totales_loc = db['totales_loc']
totales_loc.insert_many(tmp)
def aggregate_census_by_location(table_census='censo_establecimiento',
table_rel='relaciones'):
'''Aggregate by location to fix duplicates'''
tmp = []
q = '''
SELECT a.id_agrupado, sum(c.total) as total
FROM %s as c, %s as a
WHERE c.id_establecimiento = a.id_establecimiento
GROUP BY a.id_agrupado
''' % (table_census, table_rel)
for p in db.query(q):
p['id_agrupado'] = int(p['id_agrupado'])
p['total'] = int(p['total'])
tmp.append(p)
censo_loc = db['censo_loc']
censo_loc.insert_many(tmp)
def make_cache_table(table_polling='locales',
table_votes='votos_establecimiento',
table_census='censo_establecimiento',
table_totals='totales_establecimiento'):
q = '''
WITH %(winner)s AS (SELECT id_establecimiento, id_partido, votos,
row_number() over(partition by id_establecimiento
ORDER BY votos DESC) as rank,
(votos - lead(votos,1,0) over(partition by id_establecimiento
ORDER BY votos DESC)) as margin_victory
FROM %(table_votes)s
ORDER BY id_establecimiento, rank)
SELECT c.id_establecimiento_gob as id_establecimiento_gob,
l.id as id_establecimiento,
l.id_distrito, l.id_seccion,
l.mesa_desde, l.mesa_hasta, l.num_mesas, l.geom,
l.circuito, l.direccion, l.nombre,
c.total as electores,
t.positivos, sqrt(t.positivos) as sqrt_positivos,
(t.validos + t.invalidos) as votantes,
w.id_partido, w.votos, w.margin_victory
FROM %(table_polling)s l
INNER JOIN %(winner)s w ON l.id = w.id_establecimiento
INNER JOIN %(table_census)s c ON l.id = c.id_establecimiento
INNER JOIN %(table_totals)s t ON l.id = t.id_establecimiento
AND w.rank = 1;
''' % {'table_polling': table_polling,
'table_votes': table_votes,
'table_census': table_census,
'table_totals': table_totals,
'winner': 'winner'}
results = db.query(q)
cache_table = db['cache_votos_paso_2015']
cache_table.insert_many(results)
def make_cache_table_loc(table_polling='locales_loc',
table_votes='votos_loc',
table_census='censo_loc',
table_totals='totales_loc'):
q = '''
WITH %(winner)s AS (SELECT id_agrupado, id_partido, votos,
row_number() over(partition by id_agrupado
ORDER BY votos DESC) as rank,
(votos - lead(votos,1,0) over(partition by id_agrupado
ORDER BY votos DESC)) as margin_victory
FROM %(table_votes)s
ORDER BY id_agrupado, rank)
SELECT l.id_agrupado as id_establecimiento,
l.id_distrito, l.id_seccion,
l.direccion, l.nombre, l.geom,
c.total as electores,
t.positivos, sqrt(t.positivos) as sqrt_positivos,
(t.validos + t.invalidos) as votantes,
w.id_partido, w.votos, w.margin_victory
FROM %(table_polling)s l
INNER JOIN %(winner)s w ON l.id_agrupado = w.id_agrupado
INNER JOIN %(table_census)s c ON l.id_agrupado = c.id_agrupado
INNER JOIN %(table_totals)s t ON l.id_agrupado = t.id_agrupado
AND w.rank = 1;
''' % {'table_polling': table_polling,
'table_votes': table_votes,
'table_census': table_census,
'table_totals': table_totals,
'winner': 'winner'}
results = db.query(q)
cache_table = db['cache_votos_paso_2015_loc']
cache_table.insert_many(results)
def process_CABA():
print "clear DB"
clearDB()
print "import polling station data"
import_poll_stations('%s/%s'
% (REL_POLLING_PATH, POLLING_STATIONS_DATA_FILE))
print "import polling tables data"
import_poll_tables('%s/%s'
% (REL_POLLING_PATH, POLLING_TABLES_DATA_FILE))
print "import results"
import_results('%s/%s'
% (REL_RESULTS_PATH, RESULTS_DATA_FILE))
print "import relations"
import_relations('%s/%s'
% (REL_POLLING_PATH, RELATIONS_DATA_FILE))
print "create polling stations tmp"
create_locales_tmp()
print "create polling stations assigned to location"
create_locales_loc()
print "aggregate census data by polling station"
aggregate_census_by_poll_station()
print "aggregate results by polling station and party"
aggregate_results_by_poll_station()
print "aggregate totals by polling station"
aggregate_totals_by_poll_station()
print "aggregate census data by location"
aggregate_census_by_location()
print "aggregate results by location"
aggregate_results_by_location()
print "aggregate totals by location"
aggregate_totals_by_location()
print "create unnormalized table for cartodb performance"
make_cache_table()
print "create unnormalized table for cartodb performance by location"
make_cache_table_loc()
if __name__ == "__main__":
db = connect_dataset()
process_CABA()
|
|
"""
python generate_sparsetools.py
Generate manual wrappers for C++ sparsetools code.
Type codes used:
'i': integer scalar
'I': integer array
'T': data array
'B': boolean array
'V': std::vector<integer>*
'W': std::vector<data>*
'*': indicates that the next argument is an output argument
'v': void
'l': 64-bit integer scalar
See sparsetools.cxx for more details.
"""
import optparse
import os
from distutils.dep_util import newer
#
# List of all routines and their argument types.
#
# The first code indicates the return value, the rest the arguments.
#
# bsr.h
BSR_ROUTINES = """
bsr_diagonal v iiiiiIIT*T
bsr_tocsr v iiiiIIT*I*I*T
bsr_scale_rows v iiiiII*TT
bsr_scale_columns v iiiiII*TT
bsr_sort_indices v iiii*I*I*T
bsr_transpose v iiiiIIT*I*I*T
bsr_matmat v iiiiiiIITIIT*I*I*T
bsr_matvec v iiiiIITT*T
bsr_matvecs v iiiiiIITT*T
bsr_elmul_bsr v iiiiIITIIT*I*I*T
bsr_eldiv_bsr v iiiiIITIIT*I*I*T
bsr_plus_bsr v iiiiIITIIT*I*I*T
bsr_minus_bsr v iiiiIITIIT*I*I*T
bsr_maximum_bsr v iiiiIITIIT*I*I*T
bsr_minimum_bsr v iiiiIITIIT*I*I*T
bsr_ne_bsr v iiiiIITIIT*I*I*B
bsr_lt_bsr v iiiiIITIIT*I*I*B
bsr_gt_bsr v iiiiIITIIT*I*I*B
bsr_le_bsr v iiiiIITIIT*I*I*B
bsr_ge_bsr v iiiiIITIIT*I*I*B
"""
# csc.h
CSC_ROUTINES = """
csc_diagonal v iiiIIT*T
csc_tocsr v iiIIT*I*I*T
csc_matmat_maxnnz l iiIIII
csc_matmat v iiIITIIT*I*I*T
csc_matvec v iiIITT*T
csc_matvecs v iiiIITT*T
csc_elmul_csc v iiIITIIT*I*I*T
csc_eldiv_csc v iiIITIIT*I*I*T
csc_plus_csc v iiIITIIT*I*I*T
csc_minus_csc v iiIITIIT*I*I*T
csc_maximum_csc v iiIITIIT*I*I*T
csc_minimum_csc v iiIITIIT*I*I*T
csc_ne_csc v iiIITIIT*I*I*B
csc_lt_csc v iiIITIIT*I*I*B
csc_gt_csc v iiIITIIT*I*I*B
csc_le_csc v iiIITIIT*I*I*B
csc_ge_csc v iiIITIIT*I*I*B
"""
# csr.h
CSR_ROUTINES = """
csr_matmat_maxnnz l iiIIII
csr_matmat v iiIITIIT*I*I*T
csr_diagonal v iiiIIT*T
csr_tocsc v iiIIT*I*I*T
csr_tobsr v iiiiIIT*I*I*T
csr_todense v iiIIT*T
csr_matvec v iiIITT*T
csr_matvecs v iiiIITT*T
csr_elmul_csr v iiIITIIT*I*I*T
csr_eldiv_csr v iiIITIIT*I*I*T
csr_plus_csr v iiIITIIT*I*I*T
csr_minus_csr v iiIITIIT*I*I*T
csr_maximum_csr v iiIITIIT*I*I*T
csr_minimum_csr v iiIITIIT*I*I*T
csr_ne_csr v iiIITIIT*I*I*B
csr_lt_csr v iiIITIIT*I*I*B
csr_gt_csr v iiIITIIT*I*I*B
csr_le_csr v iiIITIIT*I*I*B
csr_ge_csr v iiIITIIT*I*I*B
csr_scale_rows v iiII*TT
csr_scale_columns v iiII*TT
csr_sort_indices v iI*I*T
csr_eliminate_zeros v ii*I*I*T
csr_sum_duplicates v ii*I*I*T
get_csr_submatrix v iiIITiiii*V*V*W
csr_row_index v iIIIT*I*T
csr_row_slice v iiiIIT*I*T
csr_column_index1 v iIiiII*I*I
csr_column_index2 v IIiIT*I*T
csr_sample_values v iiIITiII*T
csr_count_blocks i iiiiII
csr_sample_offsets i iiIIiII*I
csr_hstack v iiIIIT*I*I*T
expandptr v iI*I
test_throw_error i
csr_has_sorted_indices i iII
csr_has_canonical_format i iII
"""
# coo.h, dia.h, csgraph.h
OTHER_ROUTINES = """
coo_tocsr v iiiIIT*I*I*T
coo_todense v iilIIT*Ti
coo_matvec v lIITT*T
dia_matvec v iiiiITT*T
cs_graph_components i iII*I
"""
# List of compilation units
COMPILATION_UNITS = [
('bsr', BSR_ROUTINES),
('csr', CSR_ROUTINES),
('csc', CSC_ROUTINES),
('other', OTHER_ROUTINES),
]
#
# List of the supported index typenums and the corresponding C++ types
#
I_TYPES = [
('NPY_INT32', 'npy_int32'),
('NPY_INT64', 'npy_int64'),
]
#
# List of the supported data typenums and the corresponding C++ types
#
T_TYPES = [
('NPY_BOOL', 'npy_bool_wrapper'),
('NPY_BYTE', 'npy_byte'),
('NPY_UBYTE', 'npy_ubyte'),
('NPY_SHORT', 'npy_short'),
('NPY_USHORT', 'npy_ushort'),
('NPY_INT', 'npy_int'),
('NPY_UINT', 'npy_uint'),
('NPY_LONG', 'npy_long'),
('NPY_ULONG', 'npy_ulong'),
('NPY_LONGLONG', 'npy_longlong'),
('NPY_ULONGLONG', 'npy_ulonglong'),
('NPY_FLOAT', 'npy_float'),
('NPY_DOUBLE', 'npy_double'),
('NPY_LONGDOUBLE', 'npy_longdouble'),
('NPY_CFLOAT', 'npy_cfloat_wrapper'),
('NPY_CDOUBLE', 'npy_cdouble_wrapper'),
('NPY_CLONGDOUBLE', 'npy_clongdouble_wrapper'),
]
#
# Code templates
#
THUNK_TEMPLATE = """
static PY_LONG_LONG %(name)s_thunk(int I_typenum, int T_typenum, void **a)
{
%(thunk_content)s
}
"""
METHOD_TEMPLATE = """
NPY_VISIBILITY_HIDDEN PyObject *
%(name)s_method(PyObject *self, PyObject *args)
{
return call_thunk('%(ret_spec)s', "%(arg_spec)s", %(name)s_thunk, args);
}
"""
GET_THUNK_CASE_TEMPLATE = """
static int get_thunk_case(int I_typenum, int T_typenum)
{
%(content)s;
return -1;
}
"""
#
# Code generation
#
def get_thunk_type_set():
"""
Get a list containing cartesian product of data types, plus a getter routine.
Returns
-------
i_types : list [(j, I_typenum, None, I_type, None), ...]
Pairing of index type numbers and the corresponding C++ types,
and an unique index `j`. This is for routines that are parameterized
only by I but not by T.
it_types : list [(j, I_typenum, T_typenum, I_type, T_type), ...]
Same as `i_types`, but for routines parameterized both by T and I.
getter_code : str
C++ code for a function that takes I_typenum, T_typenum and returns
the unique index corresponding to the lists, or -1 if no match was
found.
"""
it_types = []
i_types = []
j = 0
getter_code = " if (0) {}"
for I_typenum, I_type in I_TYPES:
piece = """
else if (I_typenum == %(I_typenum)s) {
if (T_typenum == -1) { return %(j)s; }"""
getter_code += piece % dict(I_typenum=I_typenum, j=j)
i_types.append((j, I_typenum, None, I_type, None))
j += 1
for T_typenum, T_type in T_TYPES:
piece = """
else if (T_typenum == %(T_typenum)s) { return %(j)s; }"""
getter_code += piece % dict(T_typenum=T_typenum, j=j)
it_types.append((j, I_typenum, T_typenum, I_type, T_type))
j += 1
getter_code += """
}"""
return i_types, it_types, GET_THUNK_CASE_TEMPLATE % dict(content=getter_code)
def parse_routine(name, args, types):
"""
Generate thunk and method code for a given routine.
Parameters
----------
name : str
Name of the C++ routine
args : str
Argument list specification (in format explained above)
types : list
List of types to instantiate, as returned `get_thunk_type_set`
"""
ret_spec = args[0]
arg_spec = args[1:]
def get_arglist(I_type, T_type):
"""
Generate argument list for calling the C++ function
"""
args = []
next_is_writeable = False
j = 0
for t in arg_spec:
const = '' if next_is_writeable else 'const '
next_is_writeable = False
if t == '*':
next_is_writeable = True
continue
elif t == 'i':
args.append("*(%s*)a[%d]" % (const + I_type, j))
elif t == 'I':
args.append("(%s*)a[%d]" % (const + I_type, j))
elif t == 'T':
args.append("(%s*)a[%d]" % (const + T_type, j))
elif t == 'B':
args.append("(npy_bool_wrapper*)a[%d]" % (j,))
elif t == 'V':
if const:
raise ValueError("'V' argument must be an output arg")
args.append("(std::vector<%s>*)a[%d]" % (I_type, j,))
elif t == 'W':
if const:
raise ValueError("'W' argument must be an output arg")
args.append("(std::vector<%s>*)a[%d]" % (T_type, j,))
elif t == 'l':
args.append("*(%snpy_int64*)a[%d]" % (const, j))
else:
raise ValueError("Invalid spec character %r" % (t,))
j += 1
return ", ".join(args)
# Generate thunk code: a giant switch statement with different
# type combinations inside.
thunk_content = """int j = get_thunk_case(I_typenum, T_typenum);
switch (j) {"""
for j, I_typenum, T_typenum, I_type, T_type in types:
arglist = get_arglist(I_type, T_type)
piece = """
case %(j)s:"""
if ret_spec == 'v':
piece += """
(void)%(name)s(%(arglist)s);
return 0;"""
else:
piece += """
return %(name)s(%(arglist)s);"""
thunk_content += piece % dict(j=j, I_type=I_type, T_type=T_type,
I_typenum=I_typenum, T_typenum=T_typenum,
arglist=arglist, name=name)
thunk_content += """
default:
throw std::runtime_error("internal error: invalid argument typenums");
}"""
thunk_code = THUNK_TEMPLATE % dict(name=name,
thunk_content=thunk_content)
# Generate method code
method_code = METHOD_TEMPLATE % dict(name=name,
ret_spec=ret_spec,
arg_spec=arg_spec)
return thunk_code, method_code
def main():
p = optparse.OptionParser(usage=(__doc__ or '').strip())
p.add_option("--no-force", action="store_false",
dest="force", default=True)
p.add_option("-o", "--outdir", type=str,
help="Relative path to the output directory")
options, args = p.parse_args()
names = []
i_types, it_types, getter_code = get_thunk_type_set()
# Generate *_impl.h for each compilation unit
for unit_name, routines in COMPILATION_UNITS:
thunks = []
methods = []
# Generate thunks and methods for all routines
for line in routines.splitlines():
line = line.strip()
if not line or line.startswith('#'):
continue
try:
name, args = line.split(None, 1)
except ValueError as e:
raise ValueError("Malformed line: %r" % (line,)) from e
args = "".join(args.split())
if 't' in args or 'T' in args:
thunk, method = parse_routine(name, args, it_types)
else:
thunk, method = parse_routine(name, args, i_types)
if name in names:
raise ValueError("Duplicate routine %r" % (name,))
names.append(name)
thunks.append(thunk)
methods.append(method)
# Produce output
if options.outdir:
# Used by Meson (options.outdir == scipy/sparse/sparsetools)
outdir = os.path.join(os.getcwd(), options.outdir)
else:
# Used by setup.py
outdir = os.path.join(os.path.dirname(__file__), 'sparsetools')
dst = os.path.join(outdir,
unit_name + '_impl.h')
if newer(__file__, dst) or options.force:
if not options.outdir:
# Be silent if we're using Meson. TODO: add --verbose option
print("[generate_sparsetools] generating %r" % (dst,))
with open(dst, 'w') as f:
write_autogen_blurb(f)
f.write(getter_code)
for thunk in thunks:
f.write(thunk)
for method in methods:
f.write(method)
else:
if not options.outdir:
# Be silent if we're using Meson
print("[generate_sparsetools] %r already up-to-date" % (dst,))
# Generate code for method struct
method_defs = ""
for name in names:
method_defs += "NPY_VISIBILITY_HIDDEN PyObject *%s_method(PyObject *, PyObject *);\n" % (name,)
method_struct = """\nstatic struct PyMethodDef sparsetools_methods[] = {"""
for name in names:
method_struct += """
{"%(name)s", (PyCFunction)%(name)s_method, METH_VARARGS, NULL},""" % dict(name=name)
method_struct += """
{NULL, NULL, 0, NULL}
};"""
# Produce sparsetools_impl.h
dst = os.path.join(outdir, 'sparsetools_impl.h')
if newer(__file__, dst) or options.force:
if not options.outdir:
# Be silent if we're using Meson.
print("[generate_sparsetools] generating %r" % (dst,))
with open(dst, 'w') as f:
write_autogen_blurb(f)
f.write(method_defs)
f.write(method_struct)
else:
if not options.outdir:
# Be silent if we're using Meson
print("[generate_sparsetools] %r already up-to-date" % (dst,))
def write_autogen_blurb(stream):
stream.write("""\
/* This file is autogenerated by generate_sparsetools.py
* Do not edit manually or check into VCS.
*/
""")
if __name__ == "__main__":
main()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum
from six import with_metaclass
from azure.core import CaseInsensitiveEnumMeta
class AccessLevel(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NONE = "None"
READ = "Read"
class AggregatedReplicationState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""This is the aggregated replication status based on all the regional replication status flags.
"""
UNKNOWN = "Unknown"
IN_PROGRESS = "InProgress"
COMPLETED = "Completed"
FAILED = "Failed"
class AvailabilitySetSkuTypes(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the sku of an Availability Set. Use 'Aligned' for virtual machines with managed disks
and 'Classic' for virtual machines with unmanaged disks. Default value is 'Classic'.
"""
CLASSIC = "Classic"
ALIGNED = "Aligned"
class CachingTypes(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the caching requirements. :code:`<br>`:code:`<br>` Possible values are:
:code:`<br>`:code:`<br>` **None** :code:`<br>`:code:`<br>` **ReadOnly**
:code:`<br>`:code:`<br>` **ReadWrite** :code:`<br>`:code:`<br>` Default: **None for Standard
storage. ReadOnly for Premium storage**
"""
NONE = "None"
READ_ONLY = "ReadOnly"
READ_WRITE = "ReadWrite"
class DiffDiskOptions(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the ephemeral disk option for operating system disk.
"""
LOCAL = "Local"
class DiskCreateOption(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""This enumerates the possible sources of a disk's creation.
"""
EMPTY = "Empty"
ATTACH = "Attach"
FROM_IMAGE = "FromImage"
IMPORT_ENUM = "Import"
COPY = "Copy"
RESTORE = "Restore"
class DiskCreateOptionTypes(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies how the virtual machine should be created.:code:`<br>`:code:`<br>` Possible values
are::code:`<br>`:code:`<br>` **Attach** \u2013 This value is used when you are using a
specialized disk to create the virtual machine.:code:`<br>`:code:`<br>` **FromImage** \u2013
This value is used when you are using an image to create the virtual machine. If you are using
a platform image, you also use the imageReference element described above. If you are using a
marketplace image, you also use the plan element previously described.
"""
FROM_IMAGE = "FromImage"
EMPTY = "Empty"
ATTACH = "Attach"
class DiskStorageAccountTypes(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The sku name.
"""
STANDARD_LRS = "Standard_LRS"
PREMIUM_LRS = "Premium_LRS"
STANDARD_SSD_LRS = "StandardSSD_LRS"
ULTRA_SSD_LRS = "UltraSSD_LRS"
class GalleryImagePropertiesProvisioningState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The provisioning state, which only appears in the response.
"""
CREATING = "Creating"
UPDATING = "Updating"
FAILED = "Failed"
SUCCEEDED = "Succeeded"
DELETING = "Deleting"
MIGRATING = "Migrating"
class GalleryImageVersionPropertiesProvisioningState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The provisioning state, which only appears in the response.
"""
CREATING = "Creating"
UPDATING = "Updating"
FAILED = "Failed"
SUCCEEDED = "Succeeded"
DELETING = "Deleting"
MIGRATING = "Migrating"
class GalleryPropertiesProvisioningState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The provisioning state, which only appears in the response.
"""
CREATING = "Creating"
UPDATING = "Updating"
FAILED = "Failed"
SUCCEEDED = "Succeeded"
DELETING = "Deleting"
MIGRATING = "Migrating"
class HostCaching(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite'
"""
NONE = "None"
READ_ONLY = "ReadOnly"
READ_WRITE = "ReadWrite"
class IntervalInMins(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Interval value in minutes used to create LogAnalytics call rate logs.
"""
THREE_MINS = "ThreeMins"
FIVE_MINS = "FiveMins"
THIRTY_MINS = "ThirtyMins"
SIXTY_MINS = "SixtyMins"
class IPVersion(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Available from Api-Version 2017-03-30 onwards, it represents whether the specific
ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and
'IPv6'.
"""
I_PV4 = "IPv4"
I_PV6 = "IPv6"
class MaintenanceOperationResultCodeTypes(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The Last Maintenance Operation Result Code.
"""
NONE = "None"
RETRY_LATER = "RetryLater"
MAINTENANCE_ABORTED = "MaintenanceAborted"
MAINTENANCE_COMPLETED = "MaintenanceCompleted"
class OperatingSystemStateTypes(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The OS State.
"""
GENERALIZED = "Generalized"
SPECIALIZED = "Specialized"
class OperatingSystemTypes(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The operating system of the osDiskImage.
"""
WINDOWS = "Windows"
LINUX = "Linux"
class ProtocolTypes(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the protocol of listener. :code:`<br>`:code:`<br>` Possible values are: :code:`<br>`\
**http** :code:`<br>`:code:`<br>` **https**
"""
HTTP = "Http"
HTTPS = "Https"
class ProximityPlacementGroupType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the type of the proximity placement group. :code:`<br>`:code:`<br>` Possible values
are: :code:`<br>`:code:`<br>` **Standard** : Co-locate resources within an Azure region or
Availability Zone. :code:`<br>`:code:`<br>` **Ultra** : For future use.
"""
STANDARD = "Standard"
ULTRA = "Ultra"
class ReplicationState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""This is the regional replication state.
"""
UNKNOWN = "Unknown"
REPLICATING = "Replicating"
COMPLETED = "Completed"
FAILED = "Failed"
class ReplicationStatusTypes(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
REPLICATION_STATUS = "ReplicationStatus"
class ResourceIdentityType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of identity used for the virtual machine. The type 'SystemAssigned, UserAssigned'
includes both an implicitly created identity and a set of user assigned identities. The type
'None' will remove any identities from the virtual machine.
"""
SYSTEM_ASSIGNED = "SystemAssigned"
USER_ASSIGNED = "UserAssigned"
SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned, UserAssigned"
NONE = "None"
class RollingUpgradeActionType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The last action performed on the rolling upgrade.
"""
START = "Start"
CANCEL = "Cancel"
class RollingUpgradeStatusCode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Code indicating the current status of the upgrade.
"""
ROLLING_FORWARD = "RollingForward"
CANCELLED = "Cancelled"
COMPLETED = "Completed"
FAULTED = "Faulted"
class SettingNames(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the name of the setting to which the content applies. Possible values are:
FirstLogonCommands and AutoLogon.
"""
AUTO_LOGON = "AutoLogon"
FIRST_LOGON_COMMANDS = "FirstLogonCommands"
class SnapshotStorageAccountTypes(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The sku name.
"""
STANDARD_LRS = "Standard_LRS"
PREMIUM_LRS = "Premium_LRS"
STANDARD_ZRS = "Standard_ZRS"
class StatusLevelTypes(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The level code.
"""
INFO = "Info"
WARNING = "Warning"
ERROR = "Error"
class StorageAccountTypes(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used
with data disks, it cannot be used with OS Disk.
"""
STANDARD_LRS = "Standard_LRS"
PREMIUM_LRS = "Premium_LRS"
STANDARD_SSD_LRS = "StandardSSD_LRS"
ULTRA_SSD_LRS = "UltraSSD_LRS"
class UpgradeMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the mode of an upgrade to virtual machines in the scale set.:code:`<br />`:code:`<br
/>` Possible values are::code:`<br />`:code:`<br />` **Manual** - You control the application
of updates to virtual machines in the scale set. You do this by using the manualUpgrade
action.:code:`<br />`:code:`<br />` **Automatic** - All virtual machines in the scale set are
automatically updated at the same time.
"""
AUTOMATIC = "Automatic"
MANUAL = "Manual"
ROLLING = "Rolling"
class UpgradeOperationInvoker(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Invoker of the Upgrade Operation
"""
UNKNOWN = "Unknown"
USER = "User"
PLATFORM = "Platform"
class UpgradeState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Code indicating the current status of the upgrade.
"""
ROLLING_FORWARD = "RollingForward"
CANCELLED = "Cancelled"
COMPLETED = "Completed"
FAULTED = "Faulted"
class VirtualMachineEvictionPolicyTypes(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the eviction policy for virtual machines in a low priority scale set.
:code:`<br>`:code:`<br>`Minimum api-version: 2017-10-30-preview
"""
DEALLOCATE = "Deallocate"
DELETE = "Delete"
class VirtualMachinePriorityTypes(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the priority for the virtual machines in the scale set.
:code:`<br>`:code:`<br>`Minimum api-version: 2017-10-30-preview
"""
REGULAR = "Regular"
LOW = "Low"
class VirtualMachineScaleSetSkuScaleType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The scale type applicable to the sku.
"""
AUTOMATIC = "Automatic"
NONE = "None"
class VirtualMachineSizeTypes(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the size of the virtual machine. For more information about virtual machine sizes,
see `Sizes for virtual machines
<https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-sizes?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json>`_.
:code:`<br>`:code:`<br>` The available VM sizes depend on region and availability set. For a
list of available sizes use these APIs: :code:`<br>`:code:`<br>` `List all available virtual
machine sizes in an availability set
<https://docs.microsoft.com/rest/api/compute/availabilitysets/listavailablesizes>`_
:code:`<br>`:code:`<br>` `List all available virtual machine sizes in a region
<https://docs.microsoft.com/rest/api/compute/virtualmachinesizes/list>`_
:code:`<br>`:code:`<br>` `List all available virtual machine sizes for resizing
<https://docs.microsoft.com/rest/api/compute/virtualmachines/listavailablesizes>`_
"""
BASIC_A0 = "Basic_A0"
BASIC_A1 = "Basic_A1"
BASIC_A2 = "Basic_A2"
BASIC_A3 = "Basic_A3"
BASIC_A4 = "Basic_A4"
STANDARD_A0 = "Standard_A0"
STANDARD_A1 = "Standard_A1"
STANDARD_A2 = "Standard_A2"
STANDARD_A3 = "Standard_A3"
STANDARD_A4 = "Standard_A4"
STANDARD_A5 = "Standard_A5"
STANDARD_A6 = "Standard_A6"
STANDARD_A7 = "Standard_A7"
STANDARD_A8 = "Standard_A8"
STANDARD_A9 = "Standard_A9"
STANDARD_A10 = "Standard_A10"
STANDARD_A11 = "Standard_A11"
STANDARD_A1_V2 = "Standard_A1_v2"
STANDARD_A2_V2 = "Standard_A2_v2"
STANDARD_A4_V2 = "Standard_A4_v2"
STANDARD_A8_V2 = "Standard_A8_v2"
STANDARD_A2_M_V2 = "Standard_A2m_v2"
STANDARD_A4_M_V2 = "Standard_A4m_v2"
STANDARD_A8_M_V2 = "Standard_A8m_v2"
STANDARD_B1_S = "Standard_B1s"
STANDARD_B1_MS = "Standard_B1ms"
STANDARD_B2_S = "Standard_B2s"
STANDARD_B2_MS = "Standard_B2ms"
STANDARD_B4_MS = "Standard_B4ms"
STANDARD_B8_MS = "Standard_B8ms"
STANDARD_D1 = "Standard_D1"
STANDARD_D2 = "Standard_D2"
STANDARD_D3 = "Standard_D3"
STANDARD_D4 = "Standard_D4"
STANDARD_D11 = "Standard_D11"
STANDARD_D12 = "Standard_D12"
STANDARD_D13 = "Standard_D13"
STANDARD_D14 = "Standard_D14"
STANDARD_D1_V2 = "Standard_D1_v2"
STANDARD_D2_V2 = "Standard_D2_v2"
STANDARD_D3_V2 = "Standard_D3_v2"
STANDARD_D4_V2 = "Standard_D4_v2"
STANDARD_D5_V2 = "Standard_D5_v2"
STANDARD_D2_V3 = "Standard_D2_v3"
STANDARD_D4_V3 = "Standard_D4_v3"
STANDARD_D8_V3 = "Standard_D8_v3"
STANDARD_D16_V3 = "Standard_D16_v3"
STANDARD_D32_V3 = "Standard_D32_v3"
STANDARD_D64_V3 = "Standard_D64_v3"
STANDARD_D2_S_V3 = "Standard_D2s_v3"
STANDARD_D4_S_V3 = "Standard_D4s_v3"
STANDARD_D8_S_V3 = "Standard_D8s_v3"
STANDARD_D16_S_V3 = "Standard_D16s_v3"
STANDARD_D32_S_V3 = "Standard_D32s_v3"
STANDARD_D64_S_V3 = "Standard_D64s_v3"
STANDARD_D11_V2 = "Standard_D11_v2"
STANDARD_D12_V2 = "Standard_D12_v2"
STANDARD_D13_V2 = "Standard_D13_v2"
STANDARD_D14_V2 = "Standard_D14_v2"
STANDARD_D15_V2 = "Standard_D15_v2"
STANDARD_DS1 = "Standard_DS1"
STANDARD_DS2 = "Standard_DS2"
STANDARD_DS3 = "Standard_DS3"
STANDARD_DS4 = "Standard_DS4"
STANDARD_DS11 = "Standard_DS11"
STANDARD_DS12 = "Standard_DS12"
STANDARD_DS13 = "Standard_DS13"
STANDARD_DS14 = "Standard_DS14"
STANDARD_DS1_V2 = "Standard_DS1_v2"
STANDARD_DS2_V2 = "Standard_DS2_v2"
STANDARD_DS3_V2 = "Standard_DS3_v2"
STANDARD_DS4_V2 = "Standard_DS4_v2"
STANDARD_DS5_V2 = "Standard_DS5_v2"
STANDARD_DS11_V2 = "Standard_DS11_v2"
STANDARD_DS12_V2 = "Standard_DS12_v2"
STANDARD_DS13_V2 = "Standard_DS13_v2"
STANDARD_DS14_V2 = "Standard_DS14_v2"
STANDARD_DS15_V2 = "Standard_DS15_v2"
STANDARD_DS13_4_V2 = "Standard_DS13-4_v2"
STANDARD_DS13_2_V2 = "Standard_DS13-2_v2"
STANDARD_DS14_8_V2 = "Standard_DS14-8_v2"
STANDARD_DS14_4_V2 = "Standard_DS14-4_v2"
STANDARD_E2_V3 = "Standard_E2_v3"
STANDARD_E4_V3 = "Standard_E4_v3"
STANDARD_E8_V3 = "Standard_E8_v3"
STANDARD_E16_V3 = "Standard_E16_v3"
STANDARD_E32_V3 = "Standard_E32_v3"
STANDARD_E64_V3 = "Standard_E64_v3"
STANDARD_E2_S_V3 = "Standard_E2s_v3"
STANDARD_E4_S_V3 = "Standard_E4s_v3"
STANDARD_E8_S_V3 = "Standard_E8s_v3"
STANDARD_E16_S_V3 = "Standard_E16s_v3"
STANDARD_E32_S_V3 = "Standard_E32s_v3"
STANDARD_E64_S_V3 = "Standard_E64s_v3"
STANDARD_E32_16_V3 = "Standard_E32-16_v3"
STANDARD_E32_8_S_V3 = "Standard_E32-8s_v3"
STANDARD_E64_32_S_V3 = "Standard_E64-32s_v3"
STANDARD_E64_16_S_V3 = "Standard_E64-16s_v3"
STANDARD_F1 = "Standard_F1"
STANDARD_F2 = "Standard_F2"
STANDARD_F4 = "Standard_F4"
STANDARD_F8 = "Standard_F8"
STANDARD_F16 = "Standard_F16"
STANDARD_F1_S = "Standard_F1s"
STANDARD_F2_S = "Standard_F2s"
STANDARD_F4_S = "Standard_F4s"
STANDARD_F8_S = "Standard_F8s"
STANDARD_F16_S = "Standard_F16s"
STANDARD_F2_S_V2 = "Standard_F2s_v2"
STANDARD_F4_S_V2 = "Standard_F4s_v2"
STANDARD_F8_S_V2 = "Standard_F8s_v2"
STANDARD_F16_S_V2 = "Standard_F16s_v2"
STANDARD_F32_S_V2 = "Standard_F32s_v2"
STANDARD_F64_S_V2 = "Standard_F64s_v2"
STANDARD_F72_S_V2 = "Standard_F72s_v2"
STANDARD_G1 = "Standard_G1"
STANDARD_G2 = "Standard_G2"
STANDARD_G3 = "Standard_G3"
STANDARD_G4 = "Standard_G4"
STANDARD_G5 = "Standard_G5"
STANDARD_GS1 = "Standard_GS1"
STANDARD_GS2 = "Standard_GS2"
STANDARD_GS3 = "Standard_GS3"
STANDARD_GS4 = "Standard_GS4"
STANDARD_GS5 = "Standard_GS5"
STANDARD_GS4_8 = "Standard_GS4-8"
STANDARD_GS4_4 = "Standard_GS4-4"
STANDARD_GS5_16 = "Standard_GS5-16"
STANDARD_GS5_8 = "Standard_GS5-8"
STANDARD_H8 = "Standard_H8"
STANDARD_H16 = "Standard_H16"
STANDARD_H8_M = "Standard_H8m"
STANDARD_H16_M = "Standard_H16m"
STANDARD_H16_R = "Standard_H16r"
STANDARD_H16_MR = "Standard_H16mr"
STANDARD_L4_S = "Standard_L4s"
STANDARD_L8_S = "Standard_L8s"
STANDARD_L16_S = "Standard_L16s"
STANDARD_L32_S = "Standard_L32s"
STANDARD_M64_S = "Standard_M64s"
STANDARD_M64_MS = "Standard_M64ms"
STANDARD_M128_S = "Standard_M128s"
STANDARD_M128_MS = "Standard_M128ms"
STANDARD_M64_32_MS = "Standard_M64-32ms"
STANDARD_M64_16_MS = "Standard_M64-16ms"
STANDARD_M128_64_MS = "Standard_M128-64ms"
STANDARD_M128_32_MS = "Standard_M128-32ms"
STANDARD_NC6 = "Standard_NC6"
STANDARD_NC12 = "Standard_NC12"
STANDARD_NC24 = "Standard_NC24"
STANDARD_NC24_R = "Standard_NC24r"
STANDARD_NC6_S_V2 = "Standard_NC6s_v2"
STANDARD_NC12_S_V2 = "Standard_NC12s_v2"
STANDARD_NC24_S_V2 = "Standard_NC24s_v2"
STANDARD_NC24_RS_V2 = "Standard_NC24rs_v2"
STANDARD_NC6_S_V3 = "Standard_NC6s_v3"
STANDARD_NC12_S_V3 = "Standard_NC12s_v3"
STANDARD_NC24_S_V3 = "Standard_NC24s_v3"
STANDARD_NC24_RS_V3 = "Standard_NC24rs_v3"
STANDARD_ND6_S = "Standard_ND6s"
STANDARD_ND12_S = "Standard_ND12s"
STANDARD_ND24_S = "Standard_ND24s"
STANDARD_ND24_RS = "Standard_ND24rs"
STANDARD_NV6 = "Standard_NV6"
STANDARD_NV12 = "Standard_NV12"
STANDARD_NV24 = "Standard_NV24"
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
import numpy as np
import mxnet as mx
import random
import itertools
from numpy.testing import assert_allclose, assert_array_equal
from mxnet.test_utils import *
import unittest
def test_box_nms_op():
def test_box_nms_forward(data, expected, thresh=0.5, valid=0, topk=-1, coord=2, score=1, cid=0,
force=False, in_format='corner', out_format='corner'):
for dtype in ['float16', 'float32', 'float64']:
data = mx.nd.array(data, dtype=dtype)
out = mx.contrib.nd.box_nms(data, overlap_thresh=thresh, valid_thresh=valid, topk=topk,
coord_start=coord, score_index=score, id_index=cid,
force_suppress=force, in_format=in_format, out_format=out_format)
assert_almost_equal(out.asnumpy(), expected.astype(dtype), rtol=1e-3, atol=1e-3)
def test_box_nms_backward(data, grad, expected, thresh=0.5, valid=0, topk=-1, coord=2, score=1,
cid=0, force=False, in_format='corner', out_format='corner'):
in_var = mx.sym.Variable('data')
arr_data = mx.nd.array(data)
arr_grad = mx.nd.empty(arr_data.shape)
op = mx.contrib.sym.box_nms(in_var, overlap_thresh=thresh, valid_thresh=valid, topk=topk,
coord_start=coord, score_index=score, id_index=cid,
force_suppress=force, in_format=in_format, out_format=out_format)
exe = op.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
exe.forward(is_train=True)
exe.backward(mx.nd.array(grad))
assert_almost_equal(arr_grad.asnumpy(), expected)
def corner_to_center(data):
out = np.reshape(data, (-1, 6)).copy()
out[:, 2] = (data[:, 2] + data[:, 4]) / 2.0
out[:, 3] = (data[:, 3] + data[:, 5]) / 2.0
out[:, 4] = data[:, 4] - data[:, 2]
out[:, 5] = data[:, 5] - data[:, 3]
invalid = np.where(data[:, 0] < 0)[0]
out[invalid, :] = -1
return out
def center_to_corner(data):
data = np.reshape(data, (-1, 6)).copy()
out[:, 2] = data[:, 2] - data[:, 4] / 2.0
out[:, 3] = data[:, 3] - data[:, 5] / 2.0
out[:, 4] = data[:, 2] + data[:, 4] / 2.0
out[:, 5] = data[:, 3] + data[:, 5] / 2.0
invalid = np.where(data[:, 0] < 0)[0]
out[invalid, :] = -1
return out
def swap_position(data, expected, coord=2, score=1, cid=0, new_col=0):
data = np.reshape(data, (-1, 6))
expected = np.reshape(expected, (-1, 6))
new_coord = random.randint(0, 6 + new_col - 4)
others = list(range(new_coord)) + list(range(new_coord + 4, 6 + new_col))
random.shuffle(others)
new_score = others[0]
new_cid = others[1]
new_data = np.full((data.shape[0], data.shape[1] + new_col), -1.0)
new_expected = np.full((expected.shape[0], expected.shape[1] + new_col), -1.0)
new_data[:, new_coord:new_coord+4] = data[:, coord:coord+4]
new_data[:, new_score] = data[:, score]
new_data[:, new_cid] = data[:, cid]
new_expected[:, new_coord:new_coord+4] = expected[:, coord:coord+4]
new_expected[:, new_score] = expected[:, score]
new_expected[:, new_cid] = expected[:, cid]
return new_data, new_expected, new_coord, new_score, new_cid
# manually set up test cases
boxes = [[0, 0.5, 0.1, 0.1, 0.2, 0.2], [1, 0.4, 0.1, 0.1, 0.2, 0.2],
[0, 0.3, 0.1, 0.1, 0.14, 0.14], [2, 0.6, 0.5, 0.5, 0.7, 0.8]]
# case1
force=True
thresh=0.5
expected = [[2, 0.6, 0.5, 0.5, 0.7, 0.8], [0, 0.5, 0.1, 0.1, 0.2, 0.2],
[0, 0.3, 0.1, 0.1, 0.14, 0.14], [-1, -1, -1, -1, -1, -1]]
grad = np.random.rand(4, 6)
expected_in_grad = grad[(1, 3, 2, 0), :]
expected_in_grad[1, :] = 0
test_box_nms_forward(np.array(boxes), np.array(expected), force=force, thresh=thresh)
test_box_nms_backward(np.array(boxes), grad, expected_in_grad, force=force, thresh=thresh)
# case2: multi batch
boxes2 = [boxes] * 3
expected2 = [expected] * 3
grad2 = np.array([grad.tolist()] * 3)
expected_in_grad2 = np.array([expected_in_grad.tolist()] * 3)
test_box_nms_forward(np.array(boxes2), np.array(expected2), force=force, thresh=thresh)
test_box_nms_backward(np.array(boxes2), grad2, expected_in_grad2, force=force, thresh=thresh)
# another new dim
boxes2 = [boxes2] * 2
expected2 = [expected2] * 2
grad2 = np.array([grad2.tolist()] * 2)
expected_in_grad2 = np.array([expected_in_grad2.tolist()] * 2)
test_box_nms_forward(np.array(boxes2), np.array(expected2), force=force, thresh=thresh)
test_box_nms_backward(np.array(boxes2), grad2, expected_in_grad2, force=force, thresh=thresh)
# case3: thresh
thresh = 0.1
boxes3 = boxes
expected3 = [[2, 0.6, 0.5, 0.5, 0.7, 0.8], [0, 0.5, 0.1, 0.1, 0.2, 0.2],
[-1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1]]
grad3 = np.random.rand(4, 6)
expected_in_grad3 = grad3[(1, 3, 2, 0), :]
expected_in_grad3[(1, 2), :] = 0
test_box_nms_forward(np.array(boxes3), np.array(expected3), force=force, thresh=thresh)
test_box_nms_backward(np.array(boxes3), grad3, expected_in_grad3, force=force, thresh=thresh)
# case4: non-force
boxes4 = boxes
force = False
expected4 = [[2, 0.6, 0.5, 0.5, 0.7, 0.8], [0, 0.5, 0.1, 0.1, 0.2, 0.2],
[1, 0.4, 0.1, 0.1, 0.2, 0.2], [-1, -1, -1, -1, -1, -1]]
grad4 = np.random.rand(4, 6)
expected_in_grad4 = grad4[(1, 2, 3, 0), :]
expected_in_grad4[2, :] = 0
test_box_nms_forward(np.array(boxes4), np.array(expected4), force=force, thresh=thresh)
test_box_nms_backward(np.array(boxes4), grad4, expected_in_grad4, force=force, thresh=thresh)
# case5: different coding
boxes5 = corner_to_center(np.array(boxes4))
test_box_nms_forward(np.array(boxes5), np.array(expected4), force=force, thresh=thresh,
in_format='center')
expected5 = corner_to_center(np.array(expected4))
test_box_nms_forward(np.array(boxes4), np.array(expected5), force=force, thresh=thresh,
out_format='center')
test_box_nms_forward(np.array(boxes5), np.array(expected5), force=force, thresh=thresh,
in_format='center', out_format='center')
# case6: different position
boxes6, expected6, new_coord, new_score, new_id = swap_position(np.array(boxes4),
np.array(expected4), new_col=2)
test_box_nms_forward(np.array(boxes6), np.array(expected6), force=force, thresh=thresh,
coord=new_coord, score=new_score, cid=new_id)
# case7: no id, should be same with force=True
force = False
thresh = 0.5
test_box_nms_forward(np.array(boxes), np.array(expected), force=force, thresh=thresh, cid=-1)
# case8: multi-batch thresh + topk
boxes8 = [[[1, 1, 0, 0, 10, 10], [1, 0.4, 0, 0, 10, 10], [1, 0.3, 0, 0, 10, 10]],
[[2, 1, 0, 0, 10, 10], [2, 0.4, 0, 0, 10, 10], [2, 0.3, 0, 0, 10, 10]],
[[3, 1, 0, 0, 10, 10], [3, 0.4, 0, 0, 10, 10], [3, 0.3, 0, 0, 10, 10]]]
expected8 = [[[1, 1, 0, 0, 10, 10], [-1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1]],
[[2, 1, 0, 0, 10, 10], [-1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1]],
[[3, 1, 0, 0, 10, 10], [-1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1]]]
grad8 = np.random.rand(3, 3, 6)
expected_in_grad8 = np.zeros((3, 3, 6))
expected_in_grad8[(0, 1, 2), (0, 0, 0), :] = grad8[(0, 1, 2), (0, 0, 0), :]
force = False
thresh = 0.5
valid = 0.5
topk = 2
test_box_nms_forward(np.array(boxes8), np.array(expected8), force=force, thresh=thresh, valid=valid, topk=topk)
test_box_nms_backward(np.array(boxes8), grad8, expected_in_grad8, force=force, thresh=thresh, valid=valid, topk=topk)
def test_box_iou_op():
def numpy_box_iou(a, b, fmt='corner'):
def area(left, top, right, bottom):
return np.maximum(0, right - left) * np.maximum(0, bottom - top)
assert a.shape[-1] == 4
assert b.shape[-1] == 4
oshape = a.shape[:-1] + b.shape[:-1]
a = a.reshape((-1, 4))
ashape = a.shape
b = b.reshape((-1, 4))
a = np.tile(a, reps=[1, b.shape[0]]).reshape((-1, 4))
b = np.tile(b, reps=[ashape[0], 1]).reshape((-1, 4))
if fmt == 'corner':
al, at, ar, ab = np.split(a, 4, axis=-1)
bl, bt, br, bb = np.split(b, 4, axis=-1)
elif fmt == 'center':
ax, ay, aw, ah = np.split(a, 4, axis=-1)
bx, by, bw, bh = np.split(b, 4, axis=-1)
al, at, ar, ab = ax - aw / 2, ay - ah / 2, ax + aw / 2, ay + ah / 2
bl, bt, br, bb = bx - bw / 2, by - bh / 2, bx + bw / 2, by + bh / 2
else:
raise NotImplementedError("Fmt {} not supported".format(fmt))
width = np.maximum(0, np.minimum(ar, br) - np.maximum(al, bl))
height = np.maximum(0, np.minimum(ab, bb) - np.maximum(at, bt))
intersect = width * height
union = area(al, at, ar, ab) + area(bl, bt, br, bb) - intersect
union[np.where(intersect <= 0)] = 1e-12
iou = intersect / union
return iou.reshape(oshape)
def generate_boxes(dims):
s1, off1, s2, off2 = np.random.rand(4) * 100
xy = np.random.rand(*(dims + [2])) * s1 + off1
wh = np.random.rand(*(dims + [2])) * s2 + off2
xywh = np.concatenate([xy, wh], axis=-1)
ltrb = np.concatenate([xy - wh / 2, xy + wh / 2], axis=-1)
return xywh, ltrb
for ndima in range(1, 6):
for ndimb in range(1, 6):
dims_a = np.random.randint(low=1, high=3, size=ndima).tolist()
dims_b = np.random.randint(low=1, high=3, size=ndimb).tolist()
# generate left, top, right, bottom
xywh_a, ltrb_a = generate_boxes(dims_a)
xywh_b, ltrb_b = generate_boxes(dims_b)
iou_np = numpy_box_iou(ltrb_a, ltrb_b, fmt='corner')
iou_np2 = numpy_box_iou(xywh_a, xywh_b, fmt='center')
iou_mx = mx.nd.contrib.box_iou(mx.nd.array(ltrb_a), mx.nd.array(ltrb_b), format='corner')
iou_mx2 = mx.nd.contrib.box_iou(mx.nd.array(xywh_a), mx.nd.array(xywh_b), format='center')
assert_allclose(iou_np, iou_np2, rtol=1e-5, atol=1e-5)
assert_allclose(iou_np, iou_mx.asnumpy(), rtol=1e-5, atol=1e-5)
assert_allclose(iou_np, iou_mx2.asnumpy(), rtol=1e-5, atol=1e-5)
def test_bipartite_matching_op():
def assert_match(inputs, x, y, threshold, is_ascend=False):
for dtype in ['float16', 'float32', 'float64']:
inputs = mx.nd.array(inputs, dtype=dtype)
x = np.array(x, dtype=dtype)
y = np.array(y, dtype=dtype)
a, b = mx.nd.contrib.bipartite_matching(inputs, threshold=threshold, is_ascend=is_ascend)
assert_array_equal(a.asnumpy().astype('int64'), x.astype('int64'))
assert_array_equal(b.asnumpy().astype('int64'), y.astype('int64'))
assert_match([[0.5, 0.6], [0.1, 0.2], [0.3, 0.4]], [1, -1, 0], [2, 0], 1e-12, False)
assert_match([[0.5, 0.6], [0.1, 0.2], [0.3, 0.4]], [-1, 0, 1], [1, 2], 100, True)
def test_multibox_target_op():
anchors = mx.nd.array([[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8]], ctx=default_context()).reshape((1, -1, 4))
cls_pred = mx.nd.array(list(range(10)), ctx=default_context()).reshape((1, -1, 2))
label = mx.nd.array([1, 0.1, 0.1, 0.5, 0.6], ctx=default_context()).reshape((1, -1, 5))
loc_target, loc_mask, cls_target = \
mx.nd.contrib.MultiBoxTarget(anchors, label, cls_pred,
overlap_threshold=0.5,
negative_mining_ratio=3,
negative_mining_thresh=0.4)
expected_loc_target = np.array([[5.0, 2.5000005, 3.4657357, 4.581454, 0., 0., 0., 0.]])
expected_loc_mask = np.array([[1, 1, 1, 1, 0, 0, 0, 0]])
expected_cls_target = np.array([[2, 0]])
assert_allclose(loc_target.asnumpy(), expected_loc_target, rtol=1e-5, atol=1e-5)
assert_array_equal(loc_mask.asnumpy(), expected_loc_mask)
assert_array_equal(cls_target.asnumpy(), expected_cls_target)
def test_gradient_multiplier_op():
# We use the quadratic function in combination with gradient multiplier
def f(x, a, b, c):
return a * x**2 + b * x + c
a = np.random.random_sample()
b = np.random.random_sample()
c = np.random.random_sample()
m = np.random.random_sample() - 0.5
data = mx.symbol.Variable('data')
quad_sym = mx.sym.contrib.quadratic(data=data, a=a, b=b, c=c)
gr_q_sym = mx.sym.contrib.gradientmultiplier(quad_sym, scalar=m)
for dtype in [np.float16, np.float32, np.float64]:
for ndim in range(1, 6):
shape = rand_shape_nd(ndim, 5)
data_np = np.random.randn(*shape).astype(dtype)
expected = f(data_np, a, b, c)
backward_expected = (2 * a * data_np + b) * m
# check imperative forward
output = mx.nd.contrib.quadratic(mx.nd.array(data_np), a=a, b=b, c=c)
output = mx.nd.contrib.gradientmultiplier(output, scalar=m)
assert_almost_equal(output.asnumpy(), expected,
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
# check forward
check_symbolic_forward(gr_q_sym, [data_np], [expected],
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
# check backward
check_symbolic_backward(gr_q_sym, [data_np], [np.ones(expected.shape)],
[backward_expected],
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
if __name__ == '__main__':
import nose
nose.runmodule()
|
|
"""
For compatibility with numpy libraries, pandas functions or
methods have to accept '*args' and '**kwargs' parameters to
accommodate numpy arguments that are not actually used or
respected in the pandas implementation.
To ensure that users do not abuse these parameters, validation
is performed in 'validators.py' to make sure that any extra
parameters passed correspond ONLY to those in the numpy signature.
Part of that validation includes whether or not the user attempted
to pass in non-default values for these extraneous parameters. As we
want to discourage users from relying on these parameters when calling
the pandas implementation, we want them only to pass in the default values
for these parameters.
This module provides a set of commonly used default arguments for functions
and methods that are spread throughout the codebase. This module will make it
easier to adjust to future upstream changes in the analogous numpy signatures.
"""
from numpy import ndarray
from pandas.util.validators import (validate_args, validate_kwargs,
validate_args_and_kwargs)
from pandas.core.common import is_integer
from pandas.compat import OrderedDict
class CompatValidator(object):
def __init__(self, defaults, fname=None, method=None,
max_fname_arg_count=None):
self.fname = fname
self.method = method
self.defaults = defaults
self.max_fname_arg_count = max_fname_arg_count
def __call__(self, args, kwargs, fname=None,
max_fname_arg_count=None, method=None):
fname = self.fname if fname is None else fname
max_fname_arg_count = (self.max_fname_arg_count if
max_fname_arg_count is None
else max_fname_arg_count)
method = self.method if method is None else method
if method == 'args':
validate_args(fname, args, max_fname_arg_count, self.defaults)
elif method == 'kwargs':
validate_kwargs(fname, kwargs, self.defaults)
elif method == 'both':
validate_args_and_kwargs(fname, args, kwargs,
max_fname_arg_count,
self.defaults)
else:
raise ValueError("invalid validation method "
"'{method}'".format(method=method))
ARGMINMAX_DEFAULTS = dict(out=None)
validate_argmin = CompatValidator(ARGMINMAX_DEFAULTS, fname='argmin',
method='both', max_fname_arg_count=1)
validate_argmax = CompatValidator(ARGMINMAX_DEFAULTS, fname='argmax',
method='both', max_fname_arg_count=1)
def process_skipna(skipna, args):
if isinstance(skipna, ndarray) or skipna is None:
args = (skipna,) + args
skipna = True
return skipna, args
def validate_argmin_with_skipna(skipna, args, kwargs):
"""
If 'Series.argmin' is called via the 'numpy' library,
the third parameter in its signature is 'out', which
takes either an ndarray or 'None', so check if the
'skipna' parameter is either an instance of ndarray or
is None, since 'skipna' itself should be a boolean
"""
skipna, args = process_skipna(skipna, args)
validate_argmin(args, kwargs)
return skipna
def validate_argmax_with_skipna(skipna, args, kwargs):
"""
If 'Series.argmax' is called via the 'numpy' library,
the third parameter in its signature is 'out', which
takes either an ndarray or 'None', so check if the
'skipna' parameter is either an instance of ndarray or
is None, since 'skipna' itself should be a boolean
"""
skipna, args = process_skipna(skipna, args)
validate_argmax(args, kwargs)
return skipna
ARGSORT_DEFAULTS = OrderedDict()
ARGSORT_DEFAULTS['axis'] = -1
ARGSORT_DEFAULTS['kind'] = 'quicksort'
ARGSORT_DEFAULTS['order'] = None
validate_argsort = CompatValidator(ARGSORT_DEFAULTS, fname='argsort',
max_fname_arg_count=0, method='both')
def validate_argsort_with_ascending(ascending, args, kwargs):
"""
If 'Categorical.argsort' is called via the 'numpy' library, the
first parameter in its signature is 'axis', which takes either
an integer or 'None', so check if the 'ascending' parameter has
either integer type or is None, since 'ascending' itself should
be a boolean
"""
if is_integer(ascending) or ascending is None:
args = (ascending,) + args
ascending = True
validate_argsort(args, kwargs, max_fname_arg_count=1)
return ascending
CLIP_DEFAULTS = dict(out=None)
validate_clip = CompatValidator(CLIP_DEFAULTS, fname='clip',
method='both', max_fname_arg_count=3)
def validate_clip_with_axis(axis, args, kwargs):
"""
If 'NDFrame.clip' is called via the numpy library, the third
parameter in its signature is 'out', which can takes an ndarray,
so check if the 'axis' parameter is an instance of ndarray, since
'axis' itself should either be an integer or None
"""
if isinstance(axis, ndarray):
args = (axis,) + args
axis = None
validate_clip(args, kwargs)
return axis
COMPRESS_DEFAULTS = OrderedDict()
COMPRESS_DEFAULTS['axis'] = None
COMPRESS_DEFAULTS['out'] = None
validate_compress = CompatValidator(COMPRESS_DEFAULTS, fname='compress',
method='both', max_fname_arg_count=1)
CUM_FUNC_DEFAULTS = OrderedDict()
CUM_FUNC_DEFAULTS['dtype'] = None
CUM_FUNC_DEFAULTS['out'] = None
validate_cum_func = CompatValidator(CUM_FUNC_DEFAULTS, method='kwargs')
validate_cumsum = CompatValidator(CUM_FUNC_DEFAULTS, fname='cumsum',
method='both', max_fname_arg_count=1)
LOGICAL_FUNC_DEFAULTS = dict(out=None)
validate_logical_func = CompatValidator(LOGICAL_FUNC_DEFAULTS, method='kwargs')
MINMAX_DEFAULTS = dict(out=None)
validate_min = CompatValidator(MINMAX_DEFAULTS, fname='min',
method='both', max_fname_arg_count=1)
validate_max = CompatValidator(MINMAX_DEFAULTS, fname='max',
method='both', max_fname_arg_count=1)
RESHAPE_DEFAULTS = dict(order='C')
validate_reshape = CompatValidator(RESHAPE_DEFAULTS, fname='reshape',
method='both', max_fname_arg_count=1)
REPEAT_DEFAULTS = dict(axis=None)
validate_repeat = CompatValidator(REPEAT_DEFAULTS, fname='repeat',
method='both', max_fname_arg_count=1)
ROUND_DEFAULTS = dict(out=None)
validate_round = CompatValidator(ROUND_DEFAULTS, fname='round',
method='both', max_fname_arg_count=1)
SORT_DEFAULTS = OrderedDict()
SORT_DEFAULTS['axis'] = -1
SORT_DEFAULTS['kind'] = 'quicksort'
SORT_DEFAULTS['order'] = None
validate_sort = CompatValidator(SORT_DEFAULTS, fname='sort',
method='kwargs')
STAT_FUNC_DEFAULTS = OrderedDict()
STAT_FUNC_DEFAULTS['dtype'] = None
STAT_FUNC_DEFAULTS['out'] = None
validate_stat_func = CompatValidator(STAT_FUNC_DEFAULTS,
method='kwargs')
validate_sum = CompatValidator(STAT_FUNC_DEFAULTS, fname='sort',
method='both', max_fname_arg_count=1)
validate_mean = CompatValidator(STAT_FUNC_DEFAULTS, fname='mean',
method='both', max_fname_arg_count=1)
STAT_DDOF_FUNC_DEFAULTS = OrderedDict()
STAT_DDOF_FUNC_DEFAULTS['dtype'] = None
STAT_DDOF_FUNC_DEFAULTS['out'] = None
validate_stat_ddof_func = CompatValidator(STAT_DDOF_FUNC_DEFAULTS,
method='kwargs')
# Currently, numpy (v1.11) has backwards compatibility checks
# in place so that this 'kwargs' parameter is technically
# unnecessary, but in the long-run, this will be needed.
SQUEEZE_DEFAULTS = dict(axis=None)
validate_squeeze = CompatValidator(SQUEEZE_DEFAULTS, fname='squeeze',
method='kwargs')
TAKE_DEFAULTS = OrderedDict()
TAKE_DEFAULTS['out'] = None
TAKE_DEFAULTS['mode'] = 'raise'
validate_take = CompatValidator(TAKE_DEFAULTS, fname='take',
method='kwargs')
def validate_take_with_convert(convert, args, kwargs):
"""
If this function is called via the 'numpy' library, the third
parameter in its signature is 'axis', which takes either an
ndarray or 'None', so check if the 'convert' parameter is either
an instance of ndarray or is None
"""
if isinstance(convert, ndarray) or convert is None:
args = (convert,) + args
convert = True
validate_take(args, kwargs, max_fname_arg_count=3, method='both')
return convert
TRANSPOSE_DEFAULTS = dict(axes=None)
validate_transpose = CompatValidator(TRANSPOSE_DEFAULTS, fname='transpose',
method='both', max_fname_arg_count=0)
def validate_transpose_for_generic(inst, kwargs):
try:
validate_transpose(tuple(), kwargs)
except ValueError as e:
klass = type(inst).__name__
msg = str(e)
# the Panel class actual relies on the 'axes' parameter if called
# via the 'numpy' library, so let's make sure the error is specific
# about saying that the parameter is not supported for particular
# implementations of 'transpose'
if "the 'axes' parameter is not supported" in msg:
msg += " for {klass} instances".format(klass=klass)
raise ValueError(msg)
|
|
from django.conf import settings
from django.core import mail
from django.core.mail import EmailMessage
from django.utils import translation
import mock
from nose.tools import eq_
import mkt.users.notifications
from amo.tests import TestCase
from mkt.site.mail import send_mail, send_html_mail_jinja
from mkt.site.models import FakeEmail
from mkt.users.models import UserNotification, UserProfile
from mkt.zadmin.models import set_config
class TestSendMail(TestCase):
fixtures = ['base/users']
def setUp(self):
self._email_blacklist = list(getattr(settings, 'EMAIL_BLACKLIST', []))
def tearDown(self):
translation.activate('en_US')
settings.EMAIL_BLACKLIST = self._email_blacklist
def test_send_string(self):
to = 'f@f.com'
with self.assertRaises(ValueError):
send_mail('subj', 'body', recipient_list=to)
def test_blacklist(self):
to = 'nobody@mozilla.org'
to2 = 'somebody@mozilla.org'
settings.EMAIL_BLACKLIST = (to,)
success = send_mail('test subject', 'test body',
recipient_list=[to, to2], fail_silently=False)
assert success
eq_(len(mail.outbox), 1)
eq_(mail.outbox[0].to, [to2])
def test_blacklist_flag(self):
to = 'nobody@mozilla.org'
to2 = 'somebody@mozilla.org'
settings.EMAIL_BLACKLIST = (to,)
success = send_mail('test subject', 'test body',
recipient_list=[to, to2], fail_silently=False,
use_blacklist=True)
assert success
eq_(len(mail.outbox), 1)
eq_(mail.outbox[0].to, [to2])
def test_blacklist_flag_off(self):
to = 'nobody@mozilla.org'
to2 = 'somebody@mozilla.org'
settings.EMAIL_BLACKLIST = (to,)
success = send_mail('test subject', 'test_blacklist_flag_off',
recipient_list=[to, to2], fail_silently=False,
use_blacklist=False)
assert success
eq_(len(mail.outbox), 1)
eq_(mail.outbox[0].to, [to, to2])
assert 'test_blacklist_flag_off' in mail.outbox[0].body
@mock.patch.object(settings, 'SEND_REAL_EMAIL', False)
def test_real_list(self):
to = 'nooobody@mozilla.org'
to2 = 'somebody@mozilla.org'
to3 = 'reallywantsemail@mozilla.org'
set_config('real_email_whitelist', to3)
success = send_mail('test subject', 'test_real_list',
recipient_list=[to, to2, to3], fail_silently=False)
assert success
eq_(len(mail.outbox), 1)
eq_(mail.outbox[0].to, [to3])
assert 'test_real_list' in mail.outbox[0].body
eq_(FakeEmail.objects.count(), 1) # Only one mail, two recipients.
fakeemail = FakeEmail.objects.get()
eq_(fakeemail.message.endswith('test_real_list'), True)
assert ('To: %s, %s' % (to, to2)) in fakeemail.message
def test_user_setting_default(self):
user = UserProfile.objects.all()[0]
to = user.email
# Confirm there's nothing in the DB and we're using the default
eq_(UserNotification.objects.count(), 0)
# Make sure that this is True by default
setting = mkt.users.notifications.NOTIFICATIONS_BY_SHORT['reply']
eq_(setting.default_checked, True)
success = send_mail('test subject', 'test body', perm_setting='reply',
recipient_list=[to], fail_silently=False)
assert success, "Email wasn't sent"
eq_(len(mail.outbox), 1)
def test_user_setting_checked(self):
user = UserProfile.objects.all()[0]
to = user.email
n = mkt.users.notifications.NOTIFICATIONS_BY_SHORT['reply']
UserNotification.objects.get_or_create(notification_id=n.id,
user=user, enabled=True)
# Confirm we're reading from the database
eq_(UserNotification.objects.filter(notification_id=n.id).count(), 1)
success = send_mail('test subject', 'test body', perm_setting='reply',
recipient_list=[to], fail_silently=False)
assert success, "Email wasn't sent"
eq_(len(mail.outbox), 1)
def test_user_mandatory(self):
user = UserProfile.objects.all()[0]
to = user.email
n = mkt.users.notifications.NOTIFICATIONS_BY_SHORT['individual_contact']
UserNotification.objects.get_or_create(notification_id=n.id,
user=user, enabled=True)
assert n.mandatory, "Notification isn't mandatory"
success = send_mail('test subject', 'test body', perm_setting=n,
recipient_list=[to], fail_silently=False)
assert success, "Email wasn't sent"
eq_(len(mail.outbox), 1)
def test_user_setting_unchecked(self):
user = UserProfile.objects.all()[0]
to = user.email
n = mkt.users.notifications.NOTIFICATIONS_BY_SHORT['reply']
UserNotification.objects.get_or_create(notification_id=n.id,
user=user, enabled=False)
# Confirm we're reading from the database.
eq_(UserNotification.objects.filter(notification_id=n.id).count(), 1)
success = send_mail('test subject', 'test body', perm_setting='reply',
recipient_list=[to], fail_silently=False)
assert success, "Email wasn't sent"
eq_(len(mail.outbox), 0)
@mock.patch.object(settings, 'EMAIL_BLACKLIST', ())
def test_success_real_mail(self):
assert send_mail('test subject', 'test body',
recipient_list=['nobody@mozilla.org'],
fail_silently=False)
eq_(len(mail.outbox), 1)
eq_(mail.outbox[0].subject.find('test subject'), 0)
eq_(mail.outbox[0].body.find('test body'), 0)
@mock.patch.object(settings, 'EMAIL_BLACKLIST', ())
@mock.patch.object(settings, 'SEND_REAL_EMAIL', False)
def test_success_fake_mail(self):
assert send_mail('test subject', 'test body',
recipient_list=['nobody@mozilla.org'],
fail_silently=False)
eq_(len(mail.outbox), 0)
eq_(FakeEmail.objects.count(), 1)
eq_(FakeEmail.objects.get().message.endswith('test body'), True)
def test_send_html_mail_jinja(self):
emails = ['omg@org.yes']
subject = u'Test'
html_template = 'purchase/receipt.html'
text_template = 'purchase/receipt.ltxt'
send_html_mail_jinja(subject, html_template, text_template,
context={}, recipient_list=emails,
from_email=settings.NOBODY_EMAIL,
use_blacklist=False,
perm_setting='individual_contact',
headers={'Reply-To': settings.EDITORS_EMAIL})
msg = mail.outbox[0]
message = msg.message()
eq_(msg.to, emails)
eq_(msg.subject, subject)
eq_(msg.from_email, settings.NOBODY_EMAIL)
eq_(msg.extra_headers['Reply-To'], settings.EDITORS_EMAIL)
eq_(message.is_multipart(), True)
eq_(message.get_content_type(), 'multipart/alternative')
eq_(message.get_default_type(), 'text/plain')
payload = message.get_payload()
eq_(payload[0].get_content_type(), 'text/plain')
eq_(payload[1].get_content_type(), 'text/html')
message1 = payload[0].as_string()
message2 = payload[1].as_string()
assert '<A HREF' not in message1, 'text-only email contained HTML!'
assert '<A HREF' in message2, 'HTML email did not contain HTML!'
def test_send_multilines_subjects(self):
send_mail('test\nsubject', 'test body', from_email='a@example.com',
recipient_list=['b@example.com'])
eq_('test subject', mail.outbox[0].subject, 'Subject not stripped')
def make_backend_class(self, error_order):
throw_error = iter(error_order)
def make_backend(*args, **kwargs):
if next(throw_error):
class BrokenMessage(object):
def __init__(*args, **kwargs):
pass
def send(*args, **kwargs):
raise RuntimeError('uh oh')
def attach_alternative(*args, **kwargs):
pass
backend = BrokenMessage()
else:
backend = EmailMessage(*args, **kwargs)
return backend
return make_backend
@mock.patch('mkt.site.tasks.EmailMessage')
def test_async_will_retry(self, backend):
backend.side_effect = self.make_backend_class([True, True, False])
with self.assertRaises(RuntimeError):
send_mail('test subject',
'test body',
recipient_list=['somebody@mozilla.org'])
assert send_mail('test subject',
'test body',
async=True,
recipient_list=['somebody@mozilla.org'])
@mock.patch('mkt.site.tasks.EmailMessage')
def test_async_will_stop_retrying(self, backend):
backend.side_effect = self.make_backend_class([True, True])
with self.assertRaises(RuntimeError):
send_mail('test subject',
'test body',
async=True,
max_retries=1,
recipient_list=['somebody@mozilla.org'])
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RNN helpers for TensorFlow models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.rnn.python.ops import core_rnn_cell
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util import nest
# pylint: disable=protected-access
_state_size_with_prefix = rnn_cell_impl._state_size_with_prefix
_infer_state_dtype = rnn._infer_state_dtype
_reverse_seq = rnn._reverse_seq
_rnn_step = rnn._rnn_step
# pylint: enable=protected-access
def static_rnn(cell, inputs, initial_state=None, dtype=None,
sequence_length=None, scope=None):
"""Creates a recurrent neural network specified by RNNCell `cell`.
The simplest form of RNN network generated is:
```python
state = cell.zero_state(...)
outputs = []
for input_ in inputs:
output, state = cell(input_, state)
outputs.append(output)
return (outputs, state)
```
However, a few other options are available:
An initial state can be provided.
If the sequence_length vector is provided, dynamic calculation is performed.
This method of calculation does not compute the RNN steps past the maximum
sequence length of the minibatch (thus saving computational time),
and properly propagates the state at an example's sequence length
to the final state output.
The dynamic calculation performed is, at time `t` for batch row `b`,
```python
(output, state)(b, t) =
(t >= sequence_length(b))
? (zeros(cell.output_size), states(b, sequence_length(b) - 1))
: cell(input(b, t), state(b, t - 1))
```
Args:
cell: An instance of RNNCell.
inputs: A length T list of inputs, each a `Tensor` of shape
`[batch_size, input_size]`, or a nested tuple of such elements.
initial_state: (optional) An initial state for the RNN.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
dtype: (optional) The data type for the initial state and expected output.
Required if initial_state is not provided or RNN state has a heterogeneous
dtype.
sequence_length: Specifies the length of each sequence in inputs.
An int32 or int64 vector (tensor) size `[batch_size]`, values in `[0, T)`.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A pair (outputs, state) where:
- outputs is a length T list of outputs (one for each input), or a nested
tuple of such elements.
- state is the final state
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If `inputs` is `None` or an empty list, or if the input depth
(column size) cannot be inferred from inputs via shape inference.
"""
if not isinstance(cell, core_rnn_cell.RNNCell):
raise TypeError("cell must be an instance of RNNCell")
if not nest.is_sequence(inputs):
raise TypeError("inputs must be a sequence")
if not inputs:
raise ValueError("inputs must not be empty")
outputs = []
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "rnn") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
# Obtain the first sequence of the input
first_input = inputs
while nest.is_sequence(first_input):
first_input = first_input[0]
# Temporarily avoid EmbeddingWrapper and seq2seq badness
# TODO(lukaszkaiser): remove EmbeddingWrapper
if first_input.get_shape().ndims != 1:
input_shape = first_input.get_shape().with_rank_at_least(2)
fixed_batch_size = input_shape[0]
flat_inputs = nest.flatten(inputs)
for flat_input in flat_inputs:
input_shape = flat_input.get_shape().with_rank_at_least(2)
batch_size, input_size = input_shape[0], input_shape[1:]
fixed_batch_size.merge_with(batch_size)
for i, size in enumerate(input_size):
if size.value is None:
raise ValueError(
"Input size (dimension %d of inputs) must be accessible via "
"shape inference, but saw value None." % i)
else:
fixed_batch_size = first_input.get_shape().with_rank_at_least(1)[0]
if fixed_batch_size.value:
batch_size = fixed_batch_size.value
else:
batch_size = array_ops.shape(first_input)[0]
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If no initial_state is provided, "
"dtype must be specified")
state = cell.zero_state(batch_size, dtype)
if sequence_length is not None: # Prepare variables
sequence_length = ops.convert_to_tensor(
sequence_length, name="sequence_length")
if sequence_length.get_shape().ndims not in (None, 1):
raise ValueError(
"sequence_length must be a vector of length batch_size")
def _create_zero_output(output_size):
# convert int to TensorShape if necessary
size = _state_size_with_prefix(output_size, prefix=[batch_size])
output = array_ops.zeros(
array_ops.stack(size), _infer_state_dtype(dtype, state))
shape = _state_size_with_prefix(
output_size, prefix=[fixed_batch_size.value])
output.set_shape(tensor_shape.TensorShape(shape))
return output
output_size = cell.output_size
flat_output_size = nest.flatten(output_size)
flat_zero_output = tuple(
_create_zero_output(size) for size in flat_output_size)
zero_output = nest.pack_sequence_as(structure=output_size,
flat_sequence=flat_zero_output)
sequence_length = math_ops.to_int32(sequence_length)
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
for time, input_ in enumerate(inputs):
if time > 0: varscope.reuse_variables()
# pylint: disable=cell-var-from-loop
call_cell = lambda: cell(input_, state)
# pylint: enable=cell-var-from-loop
if sequence_length is not None:
(output, state) = _rnn_step(
time=time,
sequence_length=sequence_length,
min_sequence_length=min_sequence_length,
max_sequence_length=max_sequence_length,
zero_output=zero_output,
state=state,
call_cell=call_cell,
state_size=cell.state_size)
else:
(output, state) = call_cell()
outputs.append(output)
return (outputs, state)
def static_state_saving_rnn(cell, inputs, state_saver, state_name,
sequence_length=None, scope=None):
"""RNN that accepts a state saver for time-truncated RNN calculation.
Args:
cell: An instance of `RNNCell`.
inputs: A length T list of inputs, each a `Tensor` of shape
`[batch_size, input_size]`.
state_saver: A state saver object with methods `state` and `save_state`.
state_name: Python string or tuple of strings. The name to use with the
state_saver. If the cell returns tuples of states (i.e.,
`cell.state_size` is a tuple) then `state_name` should be a tuple of
strings having the same length as `cell.state_size`. Otherwise it should
be a single string.
sequence_length: (optional) An int32/int64 vector size [batch_size].
See the documentation for rnn() for more details about sequence_length.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A pair (outputs, state) where:
outputs is a length T list of outputs (one for each input)
states is the final state
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If `inputs` is `None` or an empty list, or if the arity and
type of `state_name` does not match that of `cell.state_size`.
"""
state_size = cell.state_size
state_is_tuple = nest.is_sequence(state_size)
state_name_tuple = nest.is_sequence(state_name)
if state_is_tuple != state_name_tuple:
raise ValueError(
"state_name should be the same type as cell.state_size. "
"state_name: %s, cell.state_size: %s"
% (str(state_name), str(state_size)))
if state_is_tuple:
state_name_flat = nest.flatten(state_name)
state_size_flat = nest.flatten(state_size)
if len(state_name_flat) != len(state_size_flat):
raise ValueError("#elems(state_name) != #elems(state_size): %d vs. %d"
% (len(state_name_flat), len(state_size_flat)))
initial_state = nest.pack_sequence_as(
structure=state_size,
flat_sequence=[state_saver.state(s) for s in state_name_flat])
else:
initial_state = state_saver.state(state_name)
(outputs, state) = static_rnn(cell, inputs, initial_state=initial_state,
sequence_length=sequence_length, scope=scope)
if state_is_tuple:
flat_state = nest.flatten(state)
state_name = nest.flatten(state_name)
save_state = [state_saver.save_state(name, substate)
for name, substate in zip(state_name, flat_state)]
else:
save_state = [state_saver.save_state(state_name, state)]
with ops.control_dependencies(save_state):
last_output = outputs[-1]
flat_last_output = nest.flatten(last_output)
flat_last_output = [
array_ops.identity(output) for output in flat_last_output]
outputs[-1] = nest.pack_sequence_as(structure=last_output,
flat_sequence=flat_last_output)
return (outputs, state)
def static_bidirectional_rnn(cell_fw, cell_bw, inputs,
initial_state_fw=None, initial_state_bw=None,
dtype=None, sequence_length=None, scope=None):
"""Creates a bidirectional recurrent neural network.
Similar to the unidirectional case above (rnn) but takes input and builds
independent forward and backward RNNs with the final forward and backward
outputs depth-concatenated, such that the output will have the format
[time][batch][cell_fw.output_size + cell_bw.output_size]. The input_size of
forward and backward cell must match. The initial state for both directions
is zero by default (but can be set optionally) and no intermediate states are
ever returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, input_size], or a nested tuple of such elements.
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
`[batch_size, cell_fw.state_size]`.
If `cell_fw.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell_fw.state_size`.
initial_state_bw: (optional) Same as for `initial_state_fw`, but using
the corresponding properties of `cell_bw`.
dtype: (optional) The data type for the initial state. Required if
either of the initial states are not provided.
sequence_length: (optional) An int32/int64 vector, size `[batch_size]`,
containing the actual lengths for each of the sequences.
scope: VariableScope for the created subgraph; defaults to
"bidirectional_rnn"
Returns:
A tuple (outputs, output_state_fw, output_state_bw) where:
outputs is a length `T` list of outputs (one for each input), which
are depth-concatenated forward and backward outputs.
output_state_fw is the final state of the forward rnn.
output_state_bw is the final state of the backward rnn.
Raises:
TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
ValueError: If inputs is None or an empty list.
"""
if not isinstance(cell_fw, core_rnn_cell.RNNCell):
raise TypeError("cell_fw must be an instance of RNNCell")
if not isinstance(cell_bw, core_rnn_cell.RNNCell):
raise TypeError("cell_bw must be an instance of RNNCell")
if not nest.is_sequence(inputs):
raise TypeError("inputs must be a sequence")
if not inputs:
raise ValueError("inputs must not be empty")
with vs.variable_scope(scope or "bidirectional_rnn"):
# Forward direction
with vs.variable_scope("fw") as fw_scope:
output_fw, output_state_fw = static_rnn(
cell_fw, inputs, initial_state_fw, dtype,
sequence_length, scope=fw_scope)
# Backward direction
with vs.variable_scope("bw") as bw_scope:
reversed_inputs = _reverse_seq(inputs, sequence_length)
tmp, output_state_bw = static_rnn(
cell_bw, reversed_inputs, initial_state_bw,
dtype, sequence_length, scope=bw_scope)
output_bw = _reverse_seq(tmp, sequence_length)
# Concat each of the forward/backward outputs
flat_output_fw = nest.flatten(output_fw)
flat_output_bw = nest.flatten(output_bw)
flat_outputs = tuple(
array_ops.concat([fw, bw], 1)
for fw, bw in zip(flat_output_fw, flat_output_bw))
outputs = nest.pack_sequence_as(structure=output_fw,
flat_sequence=flat_outputs)
return (outputs, output_state_fw, output_state_bw)
|
|
# -*- coding: utf-8 -*-
"""
celery.worker.buckets
~~~~~~~~~~~~~~~~~~~~~
This module implements the rate limiting of tasks,
by having a token bucket queue for each task type.
When a task is allowed to be processed it's moved
over the the ``ready_queue``
The :mod:`celery.worker.mediator` is then responsible
for moving tasks from the ``ready_queue`` to the worker pool.
:copyright: (c) 2009 - 2012 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from __future__ import with_statement
import threading
from collections import deque
from time import time, sleep
from Queue import Queue, Empty
from kombu.utils.limits import TokenBucket
from celery.utils import timeutils
from celery.utils.compat import zip_longest, chain_from_iterable
class RateLimitExceeded(Exception):
"""The token buckets rate limit has been exceeded."""
class TaskBucket(object):
"""This is a collection of token buckets, each task type having
its own token bucket. If the task type doesn't have a rate limit,
it will have a plain :class:`~Queue.Queue` object instead of a
:class:`TokenBucketQueue`.
The :meth:`put` operation forwards the task to its appropriate bucket,
while the :meth:`get` operation iterates over the buckets and retrieves
the first available item.
Say we have three types of tasks in the registry: `twitter.update`,
`feed.refresh` and `video.compress`, the TaskBucket will consist
of the following items::
{"twitter.update": TokenBucketQueue(fill_rate=300),
"feed.refresh": Queue(),
"video.compress": TokenBucketQueue(fill_rate=2)}
The get operation will iterate over these until one of the buckets
is able to return an item. The underlying datastructure is a `dict`,
so the order is ignored here.
:param task_registry: The task registry used to get the task
type class for a given task name.
"""
def __init__(self, task_registry):
self.task_registry = task_registry
self.buckets = {}
self.init_with_registry()
self.immediate = deque()
self.mutex = threading.Lock()
self.not_empty = threading.Condition(self.mutex)
def put(self, request):
"""Put a :class:`~celery.worker.job.Request` into
the appropiate bucket."""
if request.name not in self.buckets:
self.add_bucket_for_type(request.name)
self.buckets[request.name].put_nowait(request)
with self.mutex:
self.not_empty.notify()
put_nowait = put
def _get_immediate(self):
try:
return self.immediate.popleft()
except IndexError:
raise Empty()
def _get(self):
# If the first bucket is always returning items, we would never
# get to fetch items from the other buckets. So we always iterate over
# all the buckets and put any ready items into a queue called
# "immediate". This queue is always checked for cached items first.
try:
return 0, self._get_immediate()
except Empty:
pass
remaining_times = []
for bucket in self.buckets.values():
remaining = bucket.expected_time()
if not remaining:
try:
# Just put any ready items into the immediate queue.
self.immediate.append(bucket.get_nowait())
except Empty:
pass
except RateLimitExceeded:
remaining_times.append(bucket.expected_time())
else:
remaining_times.append(remaining)
# Try the immediate queue again.
try:
return 0, self._get_immediate()
except Empty:
if not remaining_times:
# No items in any of the buckets.
raise
# There's items, but have to wait before we can retrieve them,
# return the shortest remaining time.
return min(remaining_times), None
def get(self, block=True, timeout=None):
"""Retrive the task from the first available bucket.
Available as in, there is an item in the queue and you can
consume tokens from it.
"""
tstart = time()
get = self._get
not_empty = self.not_empty
with not_empty:
while 1:
try:
remaining_time, item = get()
except Empty:
if not block or (timeout and time() - tstart > timeout):
raise
not_empty.wait(timeout)
continue
if remaining_time:
if not block or (timeout and time() - tstart > timeout):
raise Empty()
sleep(min(remaining_time, timeout or 1))
else:
return item
def get_nowait(self):
return self.get(block=False)
def init_with_registry(self):
"""Initialize with buckets for all the task types in the registry."""
for task in self.task_registry.keys():
self.add_bucket_for_type(task)
def refresh(self):
"""Refresh rate limits for all task types in the registry."""
for task in self.task_registry.keys():
self.update_bucket_for_type(task)
def get_bucket_for_type(self, task_name):
"""Get the bucket for a particular task type."""
if task_name not in self.buckets:
return self.add_bucket_for_type(task_name)
return self.buckets[task_name]
def _get_queue_for_type(self, task_name):
bucket = self.buckets[task_name]
if isinstance(bucket, TokenBucketQueue):
return bucket.queue
return bucket
def update_bucket_for_type(self, task_name):
task_type = self.task_registry[task_name]
rate_limit = getattr(task_type, "rate_limit", None)
rate_limit = timeutils.rate(rate_limit)
task_queue = FastQueue()
if task_name in self.buckets:
task_queue = self._get_queue_for_type(task_name)
else:
task_queue = FastQueue()
if rate_limit:
task_queue = TokenBucketQueue(rate_limit, queue=task_queue)
self.buckets[task_name] = task_queue
return task_queue
def add_bucket_for_type(self, task_name):
"""Add a bucket for a task type.
Will read the tasks rate limit and create a :class:`TokenBucketQueue`
if it has one. If the task doesn't have a rate limit
:class:`FastQueue` will be used instead.
"""
if task_name not in self.buckets:
return self.update_bucket_for_type(task_name)
def qsize(self):
"""Get the total size of all the queues."""
return sum(bucket.qsize() for bucket in self.buckets.values())
def empty(self):
"""Returns :const:`True` if all of the buckets are empty."""
return all(bucket.empty() for bucket in self.buckets.values())
def clear(self):
"""Delete the data in all of the buckets."""
for bucket in self.buckets.values():
bucket.clear()
@property
def items(self):
"""Flattens the data in all of the buckets into a single list."""
# for queues with contents [(1, 2), (3, 4), (5, 6), (7, 8)]
# zips and flattens to [1, 3, 5, 7, 2, 4, 6, 8]
return filter(None, chain_from_iterable(zip_longest(*[bucket.items
for bucket in self.buckets.values()])))
class FastQueue(Queue):
""":class:`Queue.Queue` supporting the interface of
:class:`TokenBucketQueue`."""
def clear(self):
return self.queue.clear()
def expected_time(self, tokens=1):
return 0
def wait(self, block=True):
return self.get(block=block)
@property
def items(self):
return self.queue
class TokenBucketQueue(object):
"""Queue with rate limited get operations.
This uses the token bucket algorithm to rate limit the queue on get
operations.
:param fill_rate: The rate in tokens/second that the bucket will
be refilled.
:keyword capacity: Maximum number of tokens in the bucket.
Default is 1.
"""
RateLimitExceeded = RateLimitExceeded
def __init__(self, fill_rate, queue=None, capacity=1):
self._bucket = TokenBucket(fill_rate, capacity)
self.queue = queue
if not self.queue:
self.queue = Queue()
def put(self, item, block=True):
"""Put an item onto the queue."""
self.queue.put(item, block=block)
def put_nowait(self, item):
"""Put an item into the queue without blocking.
:raises Queue.Full: If a free slot is not immediately available.
"""
return self.put(item, block=False)
def get(self, block=True):
"""Remove and return an item from the queue.
:raises RateLimitExceeded: If a token could not be consumed from the
token bucket (consuming from the queue
too fast).
:raises Queue.Empty: If an item is not immediately available.
"""
get = block and self.queue.get or self.queue.get_nowait
if not block and not self.items:
raise Empty()
if not self._bucket.can_consume(1):
raise RateLimitExceeded()
return get()
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
:raises RateLimitExceeded: If a token could not be consumed from the
token bucket (consuming from the queue
too fast).
:raises Queue.Empty: If an item is not immediately available.
"""
return self.get(block=False)
def qsize(self):
"""Returns the size of the queue."""
return self.queue.qsize()
def empty(self):
"""Returns :const:`True` if the queue is empty."""
return self.queue.empty()
def clear(self):
"""Delete all data in the queue."""
return self.items.clear()
def wait(self, block=False):
"""Wait until a token can be retrieved from the bucket and return
the next item."""
get = self.get
expected_time = self.expected_time
while 1:
remaining = expected_time()
if not remaining:
return get(block=block)
sleep(remaining)
def expected_time(self, tokens=1):
"""Returns the expected time in seconds of when a new token should be
available."""
if not self.items:
return 0
return self._bucket.expected_time(tokens)
@property
def items(self):
"""Underlying data. Do not modify."""
return self.queue.queue
|
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
"""
Convert the fluid program to distributed data-parallelism programs.
"""
import paddle.fluid.io as io
from paddle.fluid.communicator import Communicator
from paddle.fluid.framework import default_main_program
from paddle.fluid.framework import default_startup_program
from paddle.fluid.framework import Program
from paddle.fluid.compiler import CompiledProgram
from paddle.fluid.executor import Executor
from paddle.fluid.parallel_executor import ParallelExecutor
from paddle.fluid.optimizer import Optimizer
from paddle.fluid.transpiler.distribute_transpiler import DistributeTranspiler as OriginTranspiler
from paddle.fluid.transpiler.geo_sgd_transpiler import GeoSgdTranspiler
from paddle.fluid.transpiler.distribute_transpiler import DistributeTranspilerConfig
from paddle.fluid.incubate.fleet.base.fleet_base import DistributedOptimizer
from paddle.fluid.incubate.fleet.base.fleet_base import Fleet
from paddle.fluid.incubate.fleet.base.fleet_base import Mode
from paddle.fluid.incubate.fleet.base.role_maker import MPISymetricRoleMaker
class DistributedTranspiler(Fleet):
"""
A subclass for compatibility with fluid.transpiler.DistributeTranspiler.
"""
def __init__(self):
super(DistributedTranspiler, self).__init__(Mode.TRANSPILER)
self._transpile_config = None
self._transpiler = None
self._origin_program = None
self.startup_program = None
self.main_program = None
self._communicator = None
def init_worker(self):
"""
`init_worker` has many many functions to do before training,
first, wait for all parameter servers launch completely.
second, run executor to initialize startup program
third, wait for all worker initialize completely.
Returns:
None
"""
# if MPISymetricRoleMaker is defined
# we suppose a user wants to submit job on mpi cluster
if isinstance(self._role_maker, MPISymetricRoleMaker):
# check whether server has been initialized
from paddle.fluid.transpiler.details.checkport import wait_server_ready
wait_server_ready(fleet.server_endpoints(to_string=False))
if not self._transpile_config.sync_mode:
if self._transpile_config.geo_sgd_mode:
self._communicator = Communicator(
self.main_program, self.vars_info,
fleet.worker_num(),
self._transpile_config.geo_sgd_need_push_nums)
else:
self._communicator = Communicator(self.main_program)
if not self._communicator.is_running():
self._communicator.start()
else:
warnings.warn("communicator has been initialized, skip")
def init_server(self, model_dir=None):
"""
`init_server` has many many functions to do before start pserver,
first, run executor to initialize startup program,
second, if the `model_dir` is not empty, it will load parameters from it for increment training.
Args:
model_dir(str): The directory path.
Returns:
None
"""
if not self.startup_program:
raise ValueError(
"startup_program is None, need invoke DistributedOptimizer.minimize first"
)
self._executor.run(self.startup_program)
if model_dir:
if not os.path.isdir(model_dir):
raise ValueError("There is no directory named '%s'", model_dir)
io.load_persistables(self._executor, model_dir, self.main_program)
def run_server(self):
"""
`run_server` execute executor to start pserver main program.
Returns:
None
"""
if not self.main_program:
raise ValueError(
"main_program is None, need invoke DistributedOptimizer.minimize first"
)
self._executor.run(self.main_program)
def stop_worker(self):
"""
Close this executor.
For the distributed training, this method would free the resource on PServers related to
the current Trainer.
Returns:
None
"""
if not self._transpile_config.sync_mode:
self._communicator.stop()
self._executor.close()
if isinstance(self._role_maker, MPISymetricRoleMaker):
self._role_maker._finalize()
def distributed_optimizer(self, optimizer, strategy=None):
"""
Optimizer for distributed training.
For the distributed training, this method would rebuild a new instance of DistributedOptimizer.
Which has basic Optimizer function and special features for distributed training.
Args:
optimizer(Optimizer): The executor to run for init server.
strategy(DistributeTranspilerConfig): Extra properties for distributed optimizer.
Returns:
TranspilerOptimizer: subclass of DistributedOptimizer.
"""
if not isinstance(optimizer, Optimizer):
raise ValueError("optimizer must be an instance of Optimizer")
self._optimizer = TranspilerOptimizer(optimizer, strategy)
return self._optimizer
def save_inference_model(self,
executor,
dirname,
feeded_var_names,
target_vars,
main_program=None,
export_for_deployment=True):
"""
Prune the given `main_program` to build a new program especially for inference,
and then save it and all related parameters to given `dirname` by the `executor`.
"""
if isinstance(executor, ParallelExecutor):
raise TypeError(
"in fleet.save_inference_model() function, executor must be as Executor type, ParallelExecutor is not allowed"
)
if not isinstance(executor, Executor):
raise TypeError(
"in fleet.save_inference_model() function, executor must be as Executor type"
)
if main_program is not None:
if isinstance(main_program, CompiledProgram):
raise TypeError(
"in fleet.save_inference_model() function, main_program must be as Program type, CompiledProgram is not allowed"
)
io.save_inference_model(dirname, feeded_var_names, target_vars,
executor, main_program, None, None,
export_for_deployment)
else:
io.save_inference_model(dirname, feeded_var_names, target_vars,
executor, self._origin_program, None, None,
export_for_deployment, True)
model_basename = "__model__"
model_filename = os.path.join(dirname, model_basename)
with open(model_filename, "rb") as f:
program_desc_str = f.read()
program = Program.parse_from_string(program_desc_str)
program._copy_dist_param_info_from(self.main_program)
self.save_persistables(executor, dirname, program)
def save_persistables(self, executor, dirname, main_program=None):
"""
This function filters out all variables with `persistable==True` from the
give `main_program` and then saves these variables to the folder `dirname`
or file `filename`.
The `dirname` is used to specify the folder where persistable variables
are going to be saved. If you would like to save variables in separate
files, set `filename` None; if you would like to save all variables in a
single file, use `filename` to specify the file name.
"""
if isinstance(executor, ParallelExecutor):
raise TypeError(
"in fleet.save_persistables() function, executor must be as Executor type, ParallelExecutor is not allowed"
)
if not isinstance(executor, Executor):
raise TypeError(
"in fleet.save_persistables() function, executor must be as Executor type"
)
if main_program is None:
main_program = self.main_program
if isinstance(main_program, CompiledProgram):
raise TypeError(
"in fleet.save_persistables() function, main_program must be as Program type, CompiledProgram is not allowed"
)
if not main_program._is_distributed:
raise ValueError(
"main_program is for local, may not use fleet.save_persistables")
io.save_persistables(executor, dirname, main_program, None)
def _transpile(self, config):
if not isinstance(config, DistributeTranspilerConfig):
raise TypeError(
"config must be an instance of DistributeTranspilerConfig")
if not config.sync_mode:
config.runtime_split_send_recv = True
# _origin_program is a deep copy for default_main_program, for inference
self._origin_program = default_main_program().clone(for_test=False)
self._transpile_config = config
if config.geo_sgd_mode:
self._transpiler = GeoSgdTranspiler(config)
else:
self._transpiler = OriginTranspiler(config)
if self.is_worker():
self._transpiler.transpile(
trainer_id=fleet.worker_index(),
pservers=fleet.server_endpoints(to_string=True),
trainers=fleet.worker_num(),
sync_mode=config.sync_mode)
if isinstance(self._role_maker, MPISymetricRoleMaker):
config.wait_port = False
self.main_program = self._transpiler.get_trainer_program(
wait_port=config.wait_port)
self.startup_program = default_startup_program()
if self._transpile_config.geo_sgd_mode:
self.vars_info = self._transpiler._get_vars_info()
self.startup_program = self._transpiler.trainer_startup_program
else:
self._transpiler.transpile(
trainer_id=fleet.worker_index(),
pservers=fleet.server_endpoints(to_string=True),
trainers=fleet.worker_num(),
sync_mode=config.sync_mode,
current_endpoint=self.server_endpoints()[self.server_index()])
self.main_program, self.startup_program = \
self._transpiler.get_pserver_programs(
self.server_endpoints()[self.server_index()])
fleet = DistributedTranspiler()
class TranspilerOptimizer(DistributedOptimizer):
"""
DistributedOptimizer is a wrapper for paddle.fluid.optimizer
A user should pass a paddle.fluid.optimizer to DistributedOptimizer
minimize() function is implemented.
DistributedOptimizer is the starting point for a user who wants to
run distributed training. The optimized information will be stored in
Fleet() instance who holds the global information about current distributed
training.
Args:
optimizer(Optimizer): subclass of Optimizer.
strategy(DistributeTranspilerConfig): instance of DistributeTranspilerConfig.
Returns:
None
"""
def __init__(self, optimizer, strategy=None):
super(TranspilerOptimizer, self).__init__(optimizer, strategy)
if strategy:
if not isinstance(strategy, DistributeTranspilerConfig):
raise TypeError(
"In {} mode, strategy must be an instance of DistributeTranspilerConfig".
format(fleet._mode))
else:
self._strategy = strategy
else:
self._strategy = DistributeTranspilerConfig()
def backward(self,
loss,
startup_program=None,
parameter_list=None,
no_grad_set=None,
callbacks=None):
"""
First part of `minimize`, do auto-diff to append backward ops for
the current program.
Args:
loss (Variable): loss variable to run optimizations.
startup_program (Program): startup_program for initializing parameters
in `parameter_list`.
parameter_list (list): list of Variables to update.
no_grad_set (set|None): set of Variables should be ignored.
callbacks (list|None): list of callables to run when appending backward
operator for one parameter.
Return:
list: list of (param, grad) pair, grad is the output of backward.
Examples:
See examples in `apply_gradients`.
"""
return self._optimizer.backward(loss, startup_program, parameter_list,
no_grad_set, callbacks)
def apply_gradients(self, params_grads):
"""
Second part of `minimize`, appending optimization operators for
given `params_grads` pairs.
Args:
params_grads (list): list of (param, grad) pair to do optimization.
Returns:
list: A list of operators appended to the current program.
Examples:
.. code-block:: python
loss = network()
optimizer = fluid.optimizer.SGD(learning_rate=0.1)
params_grads = optimizer.backward(loss)
# you may append operations for params_grads here
# ...
optimizer.apply_gradients(params_grads)
"""
return self._optimizer.apply_gradients(params_grads)
def minimize(self,
loss,
scopes=None,
startup_program=None,
parameter_list=None,
no_grad_set=None):
"""
Add operations to minimize `loss` by updating `parameter_list`.
This method combines interface `backward()` and
`apply_gradients()` into one.
Args:
loss (Variable): loss variable to run optimizations.
scopes (None): TranspilerOptimizer doesn't need scope parameter.
startup_program (Program): startup_program for initializing parameters
in `parameter_list`.
parameter_list (list): list of Variables to update.
no_grad_set (set|None): set of Variables should be ignored.
Returns:
tuple: (optimize_ops, params_grads) which are, list of operators appended;
and list of (param, grad) Variables pair for optimization.
"""
if isinstance(loss, list):
raise TypeError(
"DistributedTranspiler's minimize can not accept loss with list")
if isinstance(startup_program, list):
raise TypeError(
"DistributedTranspiler's minimize can not accept program with list"
)
optimize_ops, params_grads = self._optimizer.minimize(
loss, startup_program, parameter_list, no_grad_set)
fleet._transpile(config=self._strategy)
return optimize_ops, params_grads
|
|
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import sys
import numpy as np
from ...core.parameterization.parameterized import Parameterized
from paramz.caching import Cache_this
from .kernel_slice_operations import KernCallsViaSlicerMeta
from functools import reduce
import six
@six.add_metaclass(KernCallsViaSlicerMeta)
class Kern(Parameterized):
#===========================================================================
# This adds input slice support. The rather ugly code for slicing can be
# found in kernel_slice_operations
# __meataclass__ is ignored in Python 3 - needs to be put in the function definiton
# __metaclass__ = KernCallsViaSlicerMeta
# Here, we use the Python module six to support Py3 and Py2 simultaneously
#===========================================================================
_support_GPU = False
def __init__(self, input_dim, active_dims, name, useGPU=False, *a, **kw):
"""
The base class for a kernel: a positive definite function
which forms of a covariance function (kernel).
input_dim:
is the number of dimensions to work on. Make sure to give the
tight dimensionality of inputs.
You most likely want this to be the integer telling the number of
input dimensions of the kernel.
active_dims:
is the active_dimensions of inputs X we will work on.
All kernels will get sliced Xes as inputs, if _all_dims_active is not None
Only positive integers are allowed in active_dims!
if active_dims is None, slicing is switched off and all X will be passed through as given.
:param int input_dim: the number of input dimensions to the function
:param array-like|None active_dims: list of indices on which dimensions this kernel works on, or none if no slicing
Do not instantiate.
"""
super(Kern, self).__init__(name=name, *a, **kw)
self.input_dim = int(input_dim)
if active_dims is None:
active_dims = np.arange(input_dim, dtype=np.int_)
self.active_dims = np.atleast_1d(np.asarray(active_dims, np.int_))
self._all_dims_active = np.atleast_1d(self.active_dims).astype(np.int_)
assert self.active_dims.size == self.input_dim, "input_dim={} does not match len(active_dim)={}".format(self.input_dim, self._all_dims_active.size)
self._sliced_X = 0
self.useGPU = self._support_GPU and useGPU
from .psi_comp import PSICOMP_GH
self.psicomp = PSICOMP_GH()
def _save_to_input_dict(self):
input_dict = {}
input_dict["input_dim"] = self.input_dim
if isinstance(self.active_dims, np.ndarray):
input_dict["active_dims"] = self.active_dims.tolist()
else:
input_dict["active_dims"] = self.active_dims
input_dict["name"] = self.name
input_dict["useGPU"] = self.useGPU
return input_dict
def to_dict(self):
raise NotImplementedError
@staticmethod
def from_dict(input_dict):
"""
Instantiate an object of a derived class using the information
in input_dict (built by the to_dict method of the derived class).
More specifically, after reading the derived class from input_dict,
it calls the method _build_from_input_dict of the derived class.
Note: This method should not be overrided in the derived class. In case
it is needed, please override _build_from_input_dict instate.
:param dict input_dict: Dictionary with all the information needed to
instantiate the object.
"""
import copy
input_dict = copy.deepcopy(input_dict)
kernel_class = input_dict.pop('class')
input_dict["name"] = str(input_dict["name"])
import GPy
kernel_class = eval(kernel_class)
return kernel_class._build_from_input_dict(kernel_class, input_dict)
@staticmethod
def _build_from_input_dict(kernel_class, input_dict):
return kernel_class(**input_dict)
def __setstate__(self, state):
self._all_dims_active = np.arange(0, max(state['active_dims']) + 1)
super(Kern, self).__setstate__(state)
@property
def _effective_input_dim(self):
return np.size(self._all_dims_active)
@Cache_this(limit=3)
def _slice_X(self, X):
try:
return X[:, self._all_dims_active].astype('float')
except:
return X[:, self._all_dims_active]
def _project_dim(self, dim):
try:
return np.where(self._all_dims_active == dim)[0][0]
except:
return None
def K(self, X, X2):
"""
Compute the kernel function.
.. math::
K_{ij} = k(X_i, X_j)
:param X: the first set of inputs to the kernel
:param X2: (optional) the second set of arguments to the kernel. If X2
is None, this is passed throgh to the 'part' object, which
handLes this as X2 == X.
"""
raise NotImplementedError
def Kdiag(self, X):
"""
The diagonal of the kernel matrix K
.. math::
Kdiag_{i} = k(X_i, X_i)
"""
raise NotImplementedError
def psi0(self, Z, variational_posterior):
"""
.. math::
\psi_0 = \sum_{i=0}^{n}E_{q(X)}[k(X_i, X_i)]
"""
return self.psicomp.psicomputations(self, Z, variational_posterior)[0]
def psi1(self, Z, variational_posterior):
"""
.. math::
\psi_1^{n,m} = E_{q(X)}[k(X_n, Z_m)]
"""
return self.psicomp.psicomputations(self, Z, variational_posterior)[1]
def psi2(self, Z, variational_posterior):
"""
.. math::
\psi_2^{m,m'} = \sum_{i=0}^{n}E_{q(X)}[ k(Z_m, X_i) k(X_i, Z_{m'})]
"""
return self.psicomp.psicomputations(self, Z, variational_posterior, return_psi2_n=False)[2]
def psi2n(self, Z, variational_posterior):
"""
.. math::
\psi_2^{n,m,m'} = E_{q(X)}[ k(Z_m, X_n) k(X_n, Z_{m'})]
Thus, we do not sum out n, compared to psi2
"""
return self.psicomp.psicomputations(self, Z, variational_posterior, return_psi2_n=True)[2]
def gradients_X(self, dL_dK, X, X2):
"""
.. math::
\\frac{\partial L}{\partial X} = \\frac{\partial L}{\partial K}\\frac{\partial K}{\partial X}
"""
raise NotImplementedError
def gradients_X_X2(self, dL_dK, X, X2):
return self.gradients_X(dL_dK, X, X2), self.gradients_X(dL_dK.T, X2, X)
def gradients_XX(self, dL_dK, X, X2, cov=True):
"""
.. math::
\\frac{\partial^2 L}{\partial X\partial X_2} = \\frac{\partial L}{\partial K}\\frac{\partial^2 K}{\partial X\partial X_2}
"""
raise NotImplementedError("This is the second derivative of K wrt X and X2, and not implemented for this kernel")
def gradients_XX_diag(self, dL_dKdiag, X, cov=True):
"""
The diagonal of the second derivative w.r.t. X and X2
"""
raise NotImplementedError("This is the diagonal of the second derivative of K wrt X and X2, and not implemented for this kernel")
def gradients_X_diag(self, dL_dKdiag, X):
"""
The diagonal of the derivative w.r.t. X
"""
raise NotImplementedError
def update_gradients_diag(self, dL_dKdiag, X):
""" update the gradients of all parameters when using only the diagonal elements of the covariance matrix"""
raise NotImplementedError
def update_gradients_full(self, dL_dK, X, X2):
"""Set the gradients of all parameters when doing full (N) inference."""
raise NotImplementedError
def reset_gradients(self):
raise NotImplementedError
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
"""
Set the gradients of all parameters when doing inference with
uncertain inputs, using expectations of the kernel.
The essential maths is
.. math::
\\frac{\partial L}{\partial \\theta_i} & = \\frac{\partial L}{\partial \psi_0}\\frac{\partial \psi_0}{\partial \\theta_i}\\
& \quad + \\frac{\partial L}{\partial \psi_1}\\frac{\partial \psi_1}{\partial \\theta_i}\\
& \quad + \\frac{\partial L}{\partial \psi_2}\\frac{\partial \psi_2}{\partial \\theta_i}
Thus, we push the different derivatives through the gradients of the psi
statistics. Be sure to set the gradients for all kernel
parameters here.
"""
dtheta = self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[0]
self.gradient[:] = dtheta
def gradients_Z_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior,
psi0=None, psi1=None, psi2=None):
"""
Returns the derivative of the objective wrt Z, using the chain rule
through the expectation variables.
"""
return self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[1]
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
"""
Compute the gradients wrt the parameters of the variational
distruibution q(X), chain-ruling via the expectations of the kernel
"""
return self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[2:]
def plot(self, x=None, fignum=None, ax=None, title=None, plot_limits=None, resolution=None, **mpl_kwargs):
"""
plot this kernel.
:param x: the value to use for the other kernel argument (kernels are a function of two variables!)
:param fignum: figure number of the plot
:param ax: matplotlib axis to plot on
:param title: the matplotlib title
:param plot_limits: the range over which to plot the kernel
:resolution: the resolution of the lines used in plotting
:mpl_kwargs avalid keyword arguments to pass through to matplotlib (e.g. lw=7)
"""
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ...plotting.matplot_dep import kernel_plots
kernel_plots.plot(self, x, fignum, ax, title, plot_limits, resolution, **mpl_kwargs)
def input_sensitivity(self, summarize=True):
"""
Returns the sensitivity for each dimension of this kernel.
This is an arbitrary measurement based on the parameters
of the kernel per dimension and scaling in general.
Use this as relative measurement, not for absolute comparison between
kernels.
"""
return np.zeros(self.input_dim)
def get_most_significant_input_dimensions(self, which_indices=None):
"""
Determine which dimensions should be plotted
Returns the top three most signification input dimensions
if less then three dimensions, the non existing dimensions are
labeled as None, so for a 1 dimensional input this returns
(0, None, None).
:param which_indices: force the indices to be the given indices.
:type which_indices: int or tuple(int,int) or tuple(int,int,int)
"""
if which_indices is None:
which_indices = np.argsort(self.input_sensitivity())[::-1][:3]
try:
input_1, input_2, input_3 = which_indices
except ValueError:
# which indices is tuple or int
try:
input_3 = None
input_1, input_2 = which_indices
except TypeError:
# which_indices is an int
input_1, input_2 = which_indices, None
except ValueError:
# which_indices was a list or array like with only one int
input_1, input_2 = which_indices[0], None
return input_1, input_2, input_3
def __add__(self, other):
""" Overloading of the '+' operator. for more control, see self.add """
return self.add(other)
def __iadd__(self, other):
return self.add(other)
def add(self, other, name='sum'):
"""
Add another kernel to this one.
:param other: the other kernel to be added
:type other: GPy.kern
"""
assert isinstance(other, Kern), "only kernels can be added to kernels..."
from .add import Add
return Add([self, other], name=name)
def __mul__(self, other):
""" Here we overload the '*' operator. See self.prod for more information"""
return self.prod(other)
def __imul__(self, other):
""" Here we overload the '*' operator. See self.prod for more information"""
return self.prod(other)
def __pow__(self, other):
"""
Shortcut for tensor `prod`.
"""
assert np.all(self._all_dims_active == range(self.input_dim)), "Can only use kernels, which have their input_dims defined from 0"
assert np.all(other._all_dims_active == range(other.input_dim)), "Can only use kernels, which have their input_dims defined from 0"
other._all_dims_active += self.input_dim
return self.prod(other)
def prod(self, other, name='mul'):
"""
Multiply two kernels (either on the same space, or on the tensor
product of the input space).
:param other: the other kernel to be added
:type other: GPy.kern
"""
assert isinstance(other, Kern), "only kernels can be multiplied to kernels..."
from .prod import Prod
# kernels = []
# if isinstance(self, Prod): kernels.extend(self.parameters)
# else: kernels.append(self)
# if isinstance(other, Prod): kernels.extend(other.parameters)
# else: kernels.append(other)
return Prod([self, other], name)
def _check_input_dim(self, X):
assert X.shape[1] == self.input_dim, "{} did not specify active_dims and X has wrong shape: X_dim={}, whereas input_dim={}".format(self.name, X.shape[1], self.input_dim)
def _check_active_dims(self, X):
assert X.shape[1] >= len(self._all_dims_active), "At least {} dimensional X needed, X.shape={!s}".format(len(self._all_dims_active), X.shape)
class CombinationKernel(Kern):
"""
Abstract super class for combination kernels.
A combination kernel combines (a list of) kernels and works on those.
Examples are the HierarchicalKernel or Add and Prod kernels.
"""
def __init__(self, kernels, name, extra_dims=[], link_parameters=True):
"""
Abstract super class for combination kernels.
A combination kernel combines (a list of) kernels and works on those.
Examples are the HierarchicalKernel or Add and Prod kernels.
:param list kernels: List of kernels to combine (can be only one element)
:param str name: name of the combination kernel
:param array-like extra_dims: if needed extra dimensions for the combination kernel to work on
"""
assert all([isinstance(k, Kern) for k in kernels])
extra_dims = np.asarray(extra_dims, dtype=int)
active_dims = reduce(np.union1d, (np.r_[x.active_dims] for x in kernels), extra_dims)
input_dim = active_dims.size
# initialize the kernel with the full input_dim
super(CombinationKernel, self).__init__(input_dim, active_dims, name)
effective_input_dim = reduce(max, (k._all_dims_active.max() for k in kernels)) + 1
self._all_dims_active = np.array(np.concatenate((np.arange(effective_input_dim), extra_dims if extra_dims is not None else [])), dtype=int)
self.extra_dims = extra_dims
if link_parameters:
self.link_parameters(*kernels)
def _save_to_input_dict(self):
input_dict = super(CombinationKernel, self)._save_to_input_dict()
input_dict["parts"] = {}
for ii in range(len(self.parts)):
input_dict["parts"][ii] = self.parts[ii].to_dict()
return input_dict
@staticmethod
def _build_from_input_dict(kernel_class, input_dict):
parts = input_dict.pop('parts', None)
subkerns = []
for pp in parts:
subkerns.append(Kern.from_dict(parts[pp]))
return kernel_class(subkerns)
@property
def parts(self):
return self.parameters
def _set_all_dims_ative(self):
self._all_dims_active = np.atleast_1d(self.active_dims).astype(int)
def input_sensitivity(self, summarize=True):
"""
If summize is true, we want to get the summerized view of the sensitivities,
otherwise put everything into an array with shape (#kernels, input_dim)
in the order of appearance of the kernels in the parameterized object.
"""
if not summarize:
num_params = [0]
parts = []
def sum_params(x):
if (not isinstance(x, CombinationKernel)) and isinstance(x, Kern):
num_params[0] += 1
parts.append(x)
self.traverse(sum_params)
i_s = np.zeros((num_params[0], self.input_dim))
from operator import setitem
[setitem(i_s, (i, k._all_dims_active), k.input_sensitivity(summarize)) for i, k in enumerate(parts)]
return i_s
else:
raise NotImplementedError("Choose the kernel you want to get the sensitivity for. "
"You need to override the default behaviour for getting "
"the input sensitivity to be able to get the input sensitivity. "
"For sum kernel it is the sum of all sensitivities, "
"TODO: product kernel? Other kernels?, also "
"TODO: shall we return all the sensitivities here in the combination "
"kernel? So we can combine them however we want? "
"This could lead to just plot all the sensitivities here...")
def _check_active_dims(self, X):
return
def _check_input_dim(self, X):
# As combination kernels cannot always know, what their inner kernels have as input dims, the check will be done inside them, respectively
return
|
|
# logisticpredict.py
#
# Based on logisticleave1out.py which was based on
# parallel_crossvalidate.py from the paceofchange repo.
#
# Reads all volumes meeting a given set of criteria,
# and uses a leave-one-out strategy to distinguish
# reviewed volumes (class 1) from random
# (class 0). In cases where an author occurs more
# than once in the dataset, it leaves out all
# volumes by that author whenever making a prediction
# about one of them.
#
# This version differs from parallel_crossvalidate
# in using a different metadata structure, and
# especially a multi-tag folksonomic system for
# identifying the positive and negative classes.
# In other words, volumes aren't explicitly divided
# into positive and negative classes in the metadata;
# they can carry any number of tags; you decide, when
# you run the model, which tags you want to group as
# positive and negative classes. The code will ensure
# that no volumes with a positive tag are present in
# the negative class, and also ensure that the two
# groups have roughly similar distributions across
# the timeline.
#
# The main class here is create_model().
# It accepts three parameters, each of which is a tuple
# that gets unpacked.
#
# There are unfortunately a lot of details in those tuples,
# because I've written this script to be very flexible and
# permit a lot of different kinds of modeling.
#
# paths unpacks into
# sourcefolder, extension, metadatapath, outputpath, vocabpath
# where
# sourcefolder is the directory with data files
# extension is the extension those files end with
# metadatapath is the path to a metadata csv
# outputpath is the path to a csv of results to be written
# and vocabpath is the path to a file of words to be used
# as features for all models
#
# exclusions unpacks into
# excludeif, excludeifnot, excludebelow, excludeabove, sizecap
# where
# all the "excludes" are dictionaries pairing a key (the name of a metadata
# column) with a value that should be excluded -- if it's present,
# absent, lower than this, or higher than this.
# sizecap limits the number of vols in the positive class; randomly
# sampled if greater.
#
# classifyconditions unpacks into:
# positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions
# where
# positive_tags is a list of tags to be included in the positive class
# negative_tags is a list of tags to be selected for the negative class
# (unless volume also has a positive_tag, and note that the negative class
# is always selected to match the chronological distribution of the positive
# as closely as possible)
# datetype is the date column to be used for chronological distribution
# numfeatures can be used to limit the features in this model to top N;
# it is in practice not functional right now because I'm using all
# features in the vocab file -- originally selected by doc frequency in
# the whole corpus
# regularization is a constant to be handed to scikit-learn (I'm using one
# established in previous experiments on a different corpus)
# and testconditions ... is complex.
#
# The variable testconditions will be a set of tags. It may contain tags for classes
# that are to be treated as a test set. Positive volumes will be assigned to
# this set if they have no positive tags that are *not* in testconditions.
# A corresponding group of negative volumes will at the same time
# be assigned. It can also contain two integers to be interpreted as dates, a
# pastthreshold and futurethreshold. Dates outside these thresholds will not
# be used for training. If date thresholds are provided they must be provided
# as a pair to clarify which one is the pastthreshold and which the future.
# If you're only wanting to exclude volumes in the future, provide a past
# threshold like "1."
# All of these conditions exclude volumes from the training set, and place them
# in a set that is used only for testing. But also note that these
# exclusions are always IN ADDITION TO leave-one-out crossvalidation by author.
# In other words, if an author w/ multiple volumes has only some of them excluded
# from training by testconditions, it is *still* the case that the author will never
# be in a training set when her own volumes are being predicted.
import numpy as np
import pandas as pd
import csv, os, random, sys, datetime
from collections import Counter
from multiprocessing import Pool
from sklearn.linear_model import LogisticRegression
# from scipy.stats import norm
import matplotlib.pyplot as plt
import modelingprocess
import metafilter
import metautils
usedate = False
# Leave this flag false unless you plan major
# surgery to reactivate the currently-deprecated
# option to use "date" as a predictive feature.
# There are three different date types we can use.
# Choose which here.
# FUNCTIONS GET DEFINED BELOW.
def get_features(wordcounts, wordlist):
numwords = len(wordlist)
wordvec = np.zeros(numwords)
for idx, word in enumerate(wordlist):
if word in wordcounts:
wordvec[idx] = wordcounts[word]
return wordvec
# In an earlier version of this script, we sometimes used
# "publication date" as a feature, to see what would happen.
# In the current version, we don't. Some of the functions
# and features remain, but they are deprecated. E.g.:
def get_features_with_date(wordcounts, wordlist, date, totalcount):
numwords = len(wordlist)
wordvec = np.zeros(numwords + 1)
for idx, word in enumerate(wordlist):
if word in wordcounts:
wordvec[idx] = wordcounts[word]
wordvec = wordvec / (totalcount + 0.0001)
wordvec[numwords] = date
return wordvec
def sliceframe(dataframe, yvals, excludedrows, testrow):
numrows = len(dataframe)
newyvals = list(yvals)
for i in excludedrows:
del newyvals[i]
# NB: This only works if we assume that excluded rows
# has already been sorted in descending order !!!!!!!
trainingset = dataframe.drop(dataframe.index[excludedrows])
newyvals = np.array(newyvals)
testset = dataframe.iloc[testrow]
return trainingset, newyvals, testset
def normalizearray(featurearray, usedate):
'''Normalizes an array by centering on means and
scaling by standard deviations. Also returns the
means and standard deviations for features, so that
they can be pickled.
'''
numinstances, numfeatures = featurearray.shape
means = list()
stdevs = list()
lastcolumn = numfeatures - 1
for featureidx in range(numfeatures):
thiscolumn = featurearray.iloc[ : , featureidx]
thismean = np.mean(thiscolumn)
thisstdev = np.std(thiscolumn)
if (not usedate) or featureidx != lastcolumn:
# If we're using date we don't normalize the last column.
means.append(thismean)
stdevs.append(thisstdev)
featurearray.iloc[ : , featureidx] = (thiscolumn - thismean) / thisstdev
else:
print('FLAG')
means.append(thismean)
thisstdev = 0.1
stdevs.append(thisstdev)
featurearray.iloc[ : , featureidx] = (thiscolumn - thismean) / thisstdev
# We set a small stdev for date.
return featurearray, means, stdevs
def binormal_select(vocablist, positivecounts, negativecounts, totalpos, totalneg, k):
''' A feature-selection option, not currently in use.
'''
all_scores = np.zeros(len(vocablist))
for idx, word in enumerate(vocablist):
# For each word we create a vector the length of vols in each class
# that contains real counts, plus zeroes for all those vols not
# represented.
positives = np.zeros(totalpos, dtype = 'int64')
if word in positivecounts:
positives[0: len(positivecounts[word])] = positivecounts[word]
negatives = np.zeros(totalneg, dtype = 'int64')
if word in negativecounts:
negatives[0: len(negativecounts[word])] = negativecounts[word]
featuremean = np.mean(np.append(positives, negatives))
tp = sum(positives > featuremean)
fp = sum(positives <= featuremean)
tn = sum(negatives > featuremean)
fn = sum(negatives <= featuremean)
tpr = tp/(tp+fn) # true positive ratio
fpr = fp/(fp+tn) # false positive ratio
bns_score = abs(norm.ppf(tpr) - norm.ppf(fpr))
# See Forman
if np.isinf(bns_score) or np.isnan(bns_score):
bns_score = 0
all_scores[idx] = bns_score
zipped = [x for x in zip(all_scores, vocablist)]
zipped.sort(reverse = True)
with open('bnsscores.tsv', mode='w', encoding = 'utf-8') as f:
for score, word in zipped:
f.write(word + '\t' + str(score) + '\n')
return [x[1] for x in zipped[0:k]]
def confirm_testconditions(testconditions, positive_tags):
for elem in testconditions:
if elem in positive_tags or elem.isdigit():
# that's fine
continue
elif elem == '':
# also okay
continue
elif elem == 'donotmatch':
print("You have instructed me that positive volumes matching only a")
print("positive tag in the test-but-not-train group should not be matched")
print("with negative volumes.")
elif elem.startswith('limit=='):
limit = elem.replace('limit==', '')
print()
print("You have instructed me to allow only "+ limit)
print("volumes in the do-not-train set.")
print()
else:
print('Illegal element in testconditions.')
sys.exit(0)
def get_thresholds(testconditions):
''' The testconditions are a set of elements that may include dates
(setting an upper and lower limit for training, outside of which
volumes are only to be in the test set), or may include genre tags.
This function only identifies the dates, if present. If not present,
it returns 0 and 3000. Do not use this code for predicting volumes
dated after 3000 AD. At that point, the whole thing is deprecated.
'''
thresholds = []
for elem in testconditions:
if elem.isdigit():
thresholds.append(int(elem))
thresholds.sort()
if len(thresholds) == 2:
pastthreshold = thresholds[0]
futurethreshold = thresholds[1]
else:
pastthreshold = 0
futurethreshold = 3000
# we are unlikely to have any volumes before or after
# those dates
return pastthreshold, futurethreshold
def get_volume_lists(volumeIDs, volumepaths, IDsToUse):
'''
This function creates an ordered list of volume IDs included in this
modeling process, and an ordered list of volume-path tuples.
It also identifies positive volumes that are not to be included in a training set,
because they belong to a category that is being tested.
'''
volspresent = []
orderedIDs = []
for volid, volpath in zip(volumeIDs, volumepaths):
if volid not in IDsToUse:
continue
else:
volspresent.append((volid, volpath))
orderedIDs.append(volid)
return volspresent, orderedIDs
def first_and_last(idset, metadict, datetype):
min = 3000
max = 0
for anid in idset:
date = metadict[anid][datetype]
if date < min:
min = date
if date > max:
max = date
return min, max
def describe_donttrainset(donttrainset, classdictionary, metadict, datetype):
positivedonts = []
negativedonts = []
for anid in donttrainset:
posneg = classdictionary[anid]
if posneg == 0:
negativedonts.append(anid)
elif posneg == 1:
positivedonts.append(anid)
else:
print('Anomaly in classdictionary.')
min, max = first_and_last(positivedonts, metadict, datetype)
if min > 0:
print("The set of volumes not to be trained on includes " + str(len(positivedonts)))
print("positive volumes, ranging from " + str(min) + " to " + str(max) + ".")
print()
min, max = first_and_last(negativedonts, metadict, datetype)
if min > 0:
print("And also includes " + str(len(negativedonts)))
print("negative volumes, ranging from " + str(min) + " to " + str(max) + ".")
print()
def record_trainflags(metadict, donttrainset):
''' This function records, for each volume, whether it is or is not
to be used in training. Important to run it after add_matching_negs so
that we know which volumes in the negative set were or weren't used
in training.
'''
for docid, metadata in metadict.items():
if docid in donttrainset:
metadata['trainflag'] = 0
else:
metadata['trainflag'] = 1
def make_vocablist(sourcedir, n, vocabpath):
'''
Makes a list of the top n words in sourcedir, and writes it
to vocabpath.
'''
sourcefiles = [x for x in os.listdir(sourcedir) if not x.startswith('.')]
wordcounts = Counter()
for afile in sourcefiles:
path = sourcedir + afile
with open(path, encoding = 'utf-8') as f:
for line in f:
fields = line.strip().split('\t')
if len(fields) > 2 or len(fields) < 2:
continue
word = fields[0]
if len(word) > 0 and word[0].isalpha():
count = int(fields[1])
wordcounts[word] += 1
with open(vocabpath, mode = 'w', encoding = 'utf-8') as f:
writer = csv.writer(f)
writer.writerow(['word', 'docfreq'])
for word, count in wordcounts.most_common(n):
writer.writerow([word, count])
vocabulary = [x[0] for x in wordcounts.most_common(n)]
return vocabulary
def get_vocablist(vocabpath, sourcedir, wordcounts, useall, n):
'''
Gets the vocablist stored in vocabpath or, alternately, if that list
doesn't yet exist, it creates a vocablist and puts it there.
'''
vocablist = []
ctr = 0
if not os.path.isfile(vocabpath):
vocablist = make_vocablist(sourcedir, n, vocabpath)
else:
with open(vocabpath, encoding = 'utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
ctr += 1
if ctr > n:
break
# this allows us to limit how deep we go
word = row['word'].strip()
if wordcounts[word] > 2 or useall:
vocablist.append(word)
if len(vocablist) > n:
vocablist = vocablist[0: n]
return vocablist
def get_docfrequency(volspresent, donttrainset):
'''
This function counts words in volumes. These wordcounts don't necessarily define
a feature set for modeling: at present, the limits of that set are defined primarily
by a fixed list shared across all models (top10k).
'''
wordcounts = Counter()
for volid, volpath in volspresent:
if volid in donttrainset:
continue
else:
with open(volpath, encoding = 'utf-8') as f:
for line in f:
fields = line.strip().split('\t')
if len(fields) > 2 or len(fields) < 2:
# this is a malformed line; there are a few of them,
# but not enough to be important -- ignore
continue
word = fields[0]
if len(word) > 0 and word[0].isalpha():
wordcounts[word] += 1
# We're getting docfrequency (the number of documents that
# contain this word), not absolute number of word occurrences.
# So just add 1 no matter how many times the word occurs.
return wordcounts
def create_model(paths, exclusions, classifyconditions):
''' This is the main function in the module.
It can be called externally; it's also called
if the module is run directly.
'''
sourcefolder, extension, metadatapath, outputpath, vocabpath = paths
excludeif, excludeifnot, excludebelow, excludeabove, sizecap = exclusions
positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions = classifyconditions
verbose = False
holdout_authors = True
# If you want reliable results, always run this with holdout_authors
# set to True. The only reason to set it to False is to confirm that
# this flag is actually making a difference. If you do that, it
# disables the code that keeps other works by the author being predicted
# out of the training set.
# The following function confirms that the testconditions are legal.
confirm_testconditions(testconditions, positive_tags)
if not sourcefolder.endswith('/'):
sourcefolder = sourcefolder + '/'
# This just makes things easier.
# Get a list of files.
allthefiles = os.listdir(sourcefolder)
# random.shuffle(allthefiles)
volumeIDs = list()
volumepaths = list()
for filename in allthefiles:
if filename.endswith(extension):
volID = filename.replace(extension, "")
# The volume ID is basically the filename minus its extension.
# Extensions are likely to be long enough that there is little
# danger of accidental occurrence inside a filename. E.g.
# '.fic.tsv'
path = sourcefolder + filename
volumeIDs.append(volID)
volumepaths.append(path)
metadict = metafilter.get_metadata(metadatapath, volumeIDs, excludeif, excludeifnot, excludebelow, excludeabove)
# Now that we have a list of volumes with metadata, we can select the groups of IDs
# that we actually intend to contrast.
IDsToUse, classdictionary, donttrainset = metafilter.label_classes(metadict, "tagset", positive_tags, negative_tags, sizecap, datetype, excludeif, testconditions)
print()
min, max = first_and_last(IDsToUse, metadict, datetype)
if min > 0:
print("The whole corpus involved here includes " + str(len(IDsToUse)))
print("volumes, ranging in date from " + str(min) + " to " + str(max) + ".")
print()
# We now create an ordered list of id-path tuples for later use, and identify a set of
# positive ids that should never be used in training.
volspresent, orderedIDs = get_volume_lists(volumeIDs, volumepaths, IDsToUse)
# Extend the set of ids not to be used in training by identifying negative volumes that match
# the distribution of positive volumes.
describe_donttrainset(donttrainset, classdictionary, metadict, datetype)
# Create a flag for each volume that indicates whether it was used in training
record_trainflags(metadict, donttrainset)
# Get a count of docfrequency for all words in the corpus. This is probably not needed and
# might be deprecated later.
wordcounts = get_docfrequency(volspresent, donttrainset)
# The feature list we use is defined by the top 10,000 words (by document
# frequency) in the whole corpus, and it will be the same for all models.
vocablist = get_vocablist(vocabpath, sourcefolder, wordcounts, useall = True, n = numfeatures)
# This function either gets the vocabulary list already stored in vocabpath, or
# creates a list of the top 10k words in all files, and stores it there.
# N is a parameter that could be altered right here.
# Useall is a parameter that you basically don't need to worry about unless
# you're changing / testing code. If you set it to false, the vocablist will
# exclude words that occur very rarely. This shouldn't be necessary; the
# crossvalidation routine is designed not to include features that occur
# zero times in the training set. But if you get div-by-zero errors in the
# training process, you could fiddle with this parameter as part of a
# troubleshooting process.
numfeatures = len(vocablist)
# For each volume, we're going to create a list of volumes that should be
# excluded from the training set when it is to be predicted. More precisely,
# we're going to create a list of their *indexes*, so that we can easily
# remove rows from the training matrix.
# This list will include for ALL volumes, the indexes of vols in the donttrainset.
donttrainon = [orderedIDs.index(x) for x in donttrainset]
authormatches = [list(donttrainon) for x in range(len(orderedIDs))]
# Now we proceed to enlarge that list by identifying, for each volume,
# a set of indexes that have the same author. Obvs, there will always be at least one.
# We exclude a vol from it's own training set.
if holdout_authors:
for idx1, anid in enumerate(orderedIDs):
thisauthor = metadict[anid]['author']
for idx2, anotherid in enumerate(orderedIDs):
otherauthor = metadict[anotherid]['author']
if thisauthor == otherauthor and not idx2 in authormatches[idx1]:
authormatches[idx1].append(idx2)
else:
# This code only runs if we're testing the effect of
# holdout_authors by disabling it.
for idx1, anid in enumerate(orderedIDs):
if idx1 not in authormatches[idx1]:
authormatches[idx1].append(idx1)
# The purpose of everything that follows is to
# balance negative and positive instances in each
# training set.
trainingpositives = set()
trainingnegatives = set()
for anid, thisclass in classdictionary.items():
if anid in donttrainset:
continue
if thisclass == 1:
trainingpositives.add(orderedIDs.index(anid))
else:
trainingnegatives.add(orderedIDs.index(anid))
print('Training positives: ' + str(len(trainingpositives)))
print('Training negatives: ' + str(len(trainingnegatives)))
# The code below was intended to balance the size of positive and
# negative in spite of same-author exclusions. But it could
# have grossly unintended effects when there were many donttrainon
# exclusions.
# for alist in authormatches:
# numpositive = 0
# numnegative = 0
# for anidx in alist:
# anid = orderedIDs[anidx]
# thisclass = classdictionary[anid]
# if thisclass == 1:
# numpositive += 1
# else:
# numnegative += 1
# if numpositive > numnegative:
# difference = numpositive - numnegative
# remaining = trainingnegatives - set(alist)
# alist.extend(random.sample(remaining, difference))
# elif numpositive < numnegative:
# difference = numnegative - numpositive
# remaining = trainingpositives - set(alist)
# alist.extend(random.sample(remaining, difference))
# else:
# difference = 0
# Let's record, for each volume, the size of its training set.
trainingsizes = []
numvolumes = len(orderedIDs)
for idx, anid in enumerate(orderedIDs):
excluded = len(authormatches[idx])
metadict[anid]['trainsize'] = numvolumes - excluded
trainingsizes.append(metadict[anid]['trainsize'])
averagetrainingsize = sum(trainingsizes) / len(trainingsizes)
for alist in authormatches:
alist.sort(reverse = True)
# I am reversing the order of indexes so that I can delete them from
# back to front, without changing indexes yet to be deleted.
# This will become important in the modelingprocess module.
volsizes = dict()
voldata = list()
classvector = list()
for volid, volpath in volspresent:
with open(volpath, encoding = 'utf-8') as f:
voldict = dict()
totalcount = 0
for line in f:
fields = line.strip().split('\t')
if len(fields) > 2 or len(fields) < 2:
continue
word = fields[0]
count = int(fields[1])
voldict[word] = count
totalcount += count
date = metautils.infer_date(metadict[volid], datetype)
date = date - 1700
if date < 0:
date = 0
if usedate:
features = get_features_with_date(voldict, vocablist, date, totalcount)
voldata.append(features)
else:
features = get_features(voldict, vocablist)
if totalcount == 0:
totalcount = .00001
voldata.append(features / totalcount)
volsizes[volid] = totalcount
classflag = classdictionary[volid]
classvector.append(classflag)
data = pd.DataFrame(voldata)
sextuplets = list()
for i, volid in enumerate(orderedIDs):
listtoexclude = authormatches[i]
asixtuple = data, classvector, listtoexclude, i, usedate, regularization
sextuplets.append(asixtuple)
# Now do leave-one-out predictions.
print('Beginning multiprocessing.')
pool = Pool(processes = 11)
res = pool.map_async(modelingprocess.model_one_volume, sextuplets)
# After all files are processed, write metadata, errorlog, and counts of phrases.
res.wait()
resultlist = res.get()
assert len(resultlist) == len(orderedIDs)
logisticpredictions = dict()
for i, volid in enumerate(orderedIDs):
logisticpredictions[volid] = resultlist[i]
pool.close()
pool.join()
print('Multiprocessing concluded.')
truepositives = 0
truenegatives = 0
falsepositives = 0
falsenegatives = 0
allvolumes = list()
with open(outputpath, mode = 'w', encoding = 'utf-8') as f:
writer = csv.writer(f)
header = ['volid', 'dateused', 'pubdate', 'birthdate', 'firstpub', 'gender', 'nation', 'allwords', 'logistic', 'realclass', 'trainflag', 'trainsize', 'author', 'title', 'genretags']
writer.writerow(header)
for volid in IDsToUse:
metadata = metadict[volid]
dateused = metadata[datetype]
pubdate = metadata['pubdate']
birthdate = metadata['birthdate']
firstpub = metadata['firstpub']
gender = metadata['gender']
nation = metadata['nation']
author = metadata['author']
title = metadata['title']
allwords = volsizes[volid]
logistic = logisticpredictions[volid]
realclass = classdictionary[volid]
trainflag = metadata['trainflag']
trainsize = metadata['trainsize']
genretags = ' | '.join(metadata['tagset'])
outrow = [volid, dateused, pubdate, birthdate, firstpub, gender, nation, allwords, logistic, realclass, trainflag, trainsize, author, title, genretags]
writer.writerow(outrow)
allvolumes.append(outrow)
if logistic == 0.5:
print("equals!")
predictedpositive = random.sample([True, False], 1)[0]
elif logistic > 0.5:
predictedpositive = True
elif logistic < 0.5:
predictedpositive = False
else:
print('Oh, joy. A fundamental floating point error.')
predictedpositive = random.sample([True, False], 1)[0]
if predictedpositive and classdictionary[volid] > 0.5:
truepositives += 1
elif not predictedpositive and classdictionary[volid] < 0.5:
truenegatives += 1
elif not predictedpositive and classdictionary[volid] > 0.5:
falsenegatives += 1
elif predictedpositive and classdictionary[volid] < 0.5:
falsepositives += 1
else:
print("Wait a second, boss.")
donttrainon.sort(reverse = True)
trainingset, yvals, testset = sliceframe(data, classvector, donttrainon, 0)
trainingset, testset = modelingprocess.remove_zerocols(trainingset, testset)
newmodel = LogisticRegression(C = regularization)
trainingset, means, stdevs = normalizearray(trainingset, usedate)
newmodel.fit(trainingset, yvals)
coefficients = newmodel.coef_[0] * 100
coefficientuples = list(zip(coefficients, (coefficients / np.array(stdevs)), vocablist + ['pub.date']))
coefficientuples.sort()
if verbose:
for coefficient, normalizedcoef, word in coefficientuples:
print(word + " : " + str(coefficient))
print()
totalevaluated = truepositives + truenegatives + falsepositives + falsenegatives
if totalevaluated != len(IDsToUse):
print("Total evaluated = " + str(totalevaluated))
print("But we've got " + str(len(IDsToUse)))
accuracy = (truepositives + truenegatives) / totalevaluated
print('True positives ' + str(truepositives))
print('True negatives ' + str(truenegatives))
print('False positives ' + str(falsepositives))
print('False negatives ' + str(falsenegatives))
print()
print('The average size of the training set was ' + str(averagetrainingsize))
print()
precision = truepositives / (truepositives + falsepositives)
recall = truepositives / (truepositives + falsenegatives)
F1 = 2 * (precision * recall) / (precision + recall)
print("F1 : " + str(F1))
coefficientpath = outputpath.replace('.csv', '.coefs.csv')
with open(coefficientpath, mode = 'w', encoding = 'utf-8') as f:
writer = csv.writer(f)
for triple in coefficientuples:
coef, normalizedcoef, word = triple
writer.writerow([word, coef, normalizedcoef])
return accuracy, allvolumes, coefficientuples
def diachronic_tilt(allvolumes, modeltype, datelimits):
''' Takes a set of predictions produced by a model that knows nothing about date,
and divides it along a line with a diachronic tilt. We need to do this in a way
that doesn't violate crossvalidation. I.e., we shouldn't "know" anything
that the model didn't know. We tried a couple of different ways to do this, but
the simplest and actually most reliable is to divide the whole dataset along a
linear central trend line for the data!
'''
listofrows = list()
classvector = list()
# DEPRECATED
# if modeltype == 'logistic' and len(datelimits) == 2:
# # In this case we construct a subset of data to model on.
# tomodeldata = list()
# tomodelclasses = list()
# pastthreshold, futurethreshold = datelimits
for volume in allvolumes:
date = volume[1]
logistic = volume[8]
realclass = volume[9]
listofrows.append([logistic, date])
classvector.append(realclass)
# DEPRECATED
# if modeltype == 'logistic' and len(datelimits) == 2:
# if date >= pastthreshold and date <= futurethreshold:
# tomodeldata.append([logistic, date])
# tomodelclasses.append(realclass)
y, x = [a for a in zip(*listofrows)]
plt.axis([min(x) - 2, max(x) + 2, min(y) - 0.02, max(y) + 0.02])
reviewedx = list()
reviewedy = list()
randomx = list()
randomy = list()
for idx, reviewcode in enumerate(classvector):
if reviewcode == 1:
reviewedx.append(x[idx])
reviewedy.append(y[idx])
else:
randomx.append(x[idx])
randomy.append(y[idx])
plt.plot(reviewedx, reviewedy, 'ro')
plt.plot(randomx, randomy, 'k+')
if modeltype == 'logistic':
# all this is DEPRECATED
print("Hey, you're attempting to use the logistic-tilt option")
print("that we deactivated. Go in and uncomment the code.")
# if len(datelimits) == 2:
# data = pd.DataFrame(tomodeldata)
# responsevariable = tomodelclasses
# else:
# data = pd.DataFrame(listofrows)
# responsevariable = classvector
# newmodel = LogisticRegression(C = 100000)
# newmodel.fit(data, responsevariable)
# coefficients = newmodel.coef_[0]
# intercept = newmodel.intercept_[0] / (-coefficients[0])
# slope = coefficients[1] / (-coefficients[0])
# p = np.poly1d([slope, intercept])
elif modeltype == 'linear':
# what we actually do
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
slope = z[0]
intercept = z[1]
plt.plot(x,p(x),"b-")
plt.show()
x = np.array(x, dtype='float64')
y = np.array(y, dtype='float64')
classvector = np.array(classvector)
dividingline = intercept + (x * slope)
predicted_as_reviewed = (y > dividingline)
really_reviewed = (classvector == 1)
accuracy = sum(predicted_as_reviewed == really_reviewed) / len(classvector)
return accuracy
if __name__ == '__main__':
# If this class is called directly, it creates a single model using the default
# settings set below.
## PATHS.
# sourcefolder = '/Users/tunder/Dropbox/GenreProject/python/reception/fiction/texts/'
# extension = '.fic.tsv'
# metadatapath = '/Users/tunder/Dropbox/GenreProject/python/reception/fiction/masterficmeta.csv'
# outputpath = '/Users/tunder/Dropbox/GenreProject/python/reception/fiction/predictions.csv'
sourcefolder = '../newdata/'
extension = '.fic.tsv'
metadatapath = '../meta/finalmeta.csv'
vocabpath = '../lexicon/new10k.csv'
modelname = input('Name of model? ')
outputpath = '../results/' + modelname + str(datetime.date.today()) + '.csv'
# We can simply exclude volumes from consideration on the basis on any
# metadata category we want, using the dictionaries defined below.
## EXCLUSIONS.
excludeif = dict()
excludeifnot = dict()
excludeabove = dict()
excludebelow = dict()
daterange = input('Range of dates to use in the model? ')
if ',' in daterange:
dates = [int(x.strip()) for x in daterange.split(',')]
dates.sort()
if len(dates) == 2:
assert dates[0] < dates[1]
excludebelow['firstpub'] = dates[0]
excludeabove['firstpub'] = dates[1]
# allstewgenres = {'cozy', 'hardboiled', 'det100', 'chimyst', 'locdetective', 'lockandkey', 'crime', 'locdetmyst', 'blcrime', 'anatscifi', 'locscifi', 'chiscifi', 'femscifi', 'stangothic', 'pbgothic', 'lochorror', 'chihorror', 'locghost'}
# excludeif['negatives'] = allstewgenres
sizecap = 160
# CLASSIFY CONDITIONS
# We ask the user for a list of categories to be included in the positive
# set, as well as a list for the negative set. Default for the negative set
# is to include all the "random"ly selected categories. Note that random volumes
# can also be tagged with various specific genre tags; they are included in the
# negative set only if they lack tags from the positive set.
tagphrase = input("Comma-separated list of tags to include in the positive class: ")
positive_tags = [x.strip() for x in tagphrase.split(',')]
tagphrase = input("Comma-separated list of tags to include in the negative class: ")
# An easy default option.
if tagphrase == 'r':
negative_tags = ['random', 'grandom', 'chirandom']
else:
negative_tags = [x.strip() for x in tagphrase.split(',')]
# We also ask the user to specify categories of texts to be used only for testing.
# These exclusions from training are in addition to ordinary crossvalidation.
print()
print("You can also specify positive tags to be excluded from training, and/or a pair")
print("of integer dates outside of which vols should be excluded from training.")
print("If you add 'donotmatch' to the list of tags, these volumes will not be")
print("matched with corresponding negative volumes.")
print()
testphrase = input("Comma-separated list of such tags: ")
testconditions = set([x.strip() for x in testphrase.split(',') if len(x) > 0])
datetype = "firstpub"
numfeatures = 10000
regularization = .000075
paths = (sourcefolder, extension, metadatapath, outputpath, vocabpath)
exclusions = (excludeif, excludeifnot, excludebelow, excludeabove, sizecap)
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
rawaccuracy, allvolumes, coefficientuples = create_model(paths, exclusions, classifyconditions)
print('If we divide the dataset with a horizontal line at 0.5, accuracy is: ', str(rawaccuracy))
tiltaccuracy = diachronic_tilt(allvolumes, 'linear', [])
print("Divided with a line fit to the data trend, it's ", str(tiltaccuracy))
|
|
#!/bin/bash
import base64
import os
import struct
from itertools import groupby
from pprint import pprint
split_list = lambda l, n: [l[x: x+n] for x in xrange(0, len(l), n)]
def get_image_info(data):
# http://stackoverflow.com/questions/8032642/how-to-obtain-image-size
if is_png(data):
w, h = struct.unpack('>LL', data[16:24])
width = int(w)
height = int(h)
else:
raise Exception('not a png image')
return width, height
def is_png(data):
return (data[:8] == '\211PNG\r\n\032\n'and (data[12:16] == 'IHDR'))
def scale_image(width, height, new_height):
diff = height - new_height
new_width = width - diff
return (new_width, new_height)
def compile_payment_icons():
# define locations
rootdir = os.path.dirname(os.path.abspath(__file__))
iconsdir = os.path.join(rootdir, "icons", "pi")
builddir = os.path.join(rootdir, "build/")
files = sorted(os.listdir(iconsdir))
# generate list of files
items = {}
for f in files:
fp = os.path.join(iconsdir, f)
name, varient, end = f.rsplit("-", 2)
size = end.split(".")[0]
# fetch image
with open(fp, 'rb') as fh:
data = fh.read()
width, height = get_image_info(data)
data = base64.b64encode(data)
name = "pi-%s-%s" % ( name, varient, )
obj = dict(name=name, size=size, filepath=fp, width=width,
height=height, data=data)
assert obj['height'] == 128
assert name not in items
items[name] = obj
def css_tmpl(obj):
sizetable = {
16 : '1x',
32 : '2x',
64 : '3x',
96 : '4x',
128 : '5x',
}
# create image data
t = ".ai.ai-%(name)s" % obj
t += " { \r\n background-image: url(data:image/png;base64,%(data)s); \r\n" % obj
t += " width:%(width)spx; height:%(height)spx; \r\n}\r\n" % obj
for height in (128, 96, 64, 32, 16):
new_width, new_height = scale_image(obj['width'], obj['height'], height)
sizename = sizetable[height]
t += ".ai.ai-%(name)s.ai-%(sizename)s" % dict(name=obj['name'], sizename=sizename)
t += " { width: %(width)spx; height: %(height)spx; } \r\n" % dict(width=new_width,
height=new_height)
return t
data = ""
for obj in items.values():
nd = css_tmpl(obj)
data += nd
data += ".ai { display:inline-block; background-repeat: no-repeat; background-size:contain; }"
# create css
css_file = os.path.join(builddir, "ai-paymenticons.css")
with open(css_file, 'wb') as fh:
fh.write(data)
# create docs
final = ""
td = items.values()
td = sorted(td, key=lambda x: x['name'])
#for x in td:
#print x['name']
#return
chunks = split_list(td, 3)
final += "| - | - | - |\r\n"
final += "| ----- |:------------------:| -----:|\r\n"
for chunk in chunks:
h = []
d = []
for i in chunk:
linkpath = "/icons/pi/%s" % ( os.path.basename(i['filepath']), )
cssname = "ai-%s" % ( i['name'], )
h.append("**%s**" % ( cssname, ))
d.append("  " % ( linkpath, ))
d = "|".join(d)
d = "| %s |\r\n" % ( d, )
h = " | ".join(h)
h = "| %s |\r\n" % ( h, )
final += h
final += d
#final += "\r\n"
print final
if __name__ == '__main__':
compile_payment_icons()
'''
# break files up into logical css groups
css_data = []
keyfunc = lambda x: (x['name'], x['varient'])
data = sorted(items, key=keyfunc)
for k, g in groupby(data, keyfunc):
# create name
name = "%s-%s" % ( k[0], k[1], )
# group sizes
keyfunc = lambda x: x['size']
g = sorted(g, key=keyfunc)
sizes = []
for size, i in groupby(g, keyfunc):
i = list(i)
assert len(i) == 1
sizes.append((size, i[0]))
sizes = dict(sizes)
def css_tmpl(name, sizes, data):
# write b64 data
css_output = []
items = []
sizeitems = []
for sizename, sizeinfo in sizes.iteritems():
width, height = sizeinfo['width'], sizeinfo['height']
obj = dict(name=name, sizename=sizename, width=width, height=height)
t = ".ai.ai-%(name)s.ai-%(sizename)s" % obj
items.append(t)
sd = " { width: %(width)spx; height: %(height)spx; } " % obj
sd = "%s %s" % ( t, sd)
css_output.append(sd)
css_start = ", ".join(items)
css_start += " { \r\n background-image: url(data:image/png;base64,%s); \r\n}\r\n" % data
css_output.append(css_start)
return "\r\n".join(css_output)
# create 16px
sizes_1x_2x = {
'1x' : {
'width' : sizes['32px']['width'] / 2,
'height' : sizes['32px']['height'] / 2,
},
'2x' : {
'width' : sizes['32px']['width'],
'height' : sizes['32px']['height'],
},
}
sizes_1x_2x = {
'1x' : {
'width' : sizes['32px']['width'] / 2,
'height' : sizes['32px']['height'] / 2,
},
'2x' : {
'width' : sizes['32px']['width'],
'height' : sizes['32px']['height'],
},
}
# store to css
print css_tmpl(name=name, sizes=sizes_1x_2x, data=sizes['32px']['data'])
#css_tmpl(name=name, sizes=(16,32), )
#css_tmpl(name=name, sizes=(64, ), filepath=sizes['64px']['filepath'])
#css_tmpl(name=name, sizes=(96, 128), filepath=sizes['128px']['filepath'])
#print name
#print list(g)
#css_tmpl(name=name)
#css_data.append()
#print i
#print (k,i)
def css_tmpl(name, sizes, filepath):
"""
Some messy/undocumented magic to create CSS
"""
obj = dict(name=name, sizename=sizename, size=size, data=data, height=height,
width=width)
t = ".ai.ai-%(name)s.%(sizename)s" % obj
sd.append(t)
sd = ", ".join(sd)
sd += """ {
background-image: url(data:image/png;base64,%(data)s);
height: %(height)spx;
width: %(width)spx;
}
""" % obj
print sd
.%(name)s.%(varient)s
"""
XXX: Really sorry for the ugly code, this was written to get out of the door
as quickly as possible. It works, ship it.
"""
import os
import base64
css_tmpl = """.bpi.%(name)s-%(itype)s-%(height)s {
background-image: url(data:image/png;base64,%(data)s);
height: %(height)s;
width: %(width)s;
}
"""
docs_tmpl = """
| %(name)s-%(itype)s-32px | %(name)s-%(itype)s-64px | %(name)s-%(itype)s-128px |
| ------------- |:-------------:| -----:|
| s-%(itype)s-32px.png) | s-%(itype)s-64px.png) |s-%(itype)s-128px.png)
"""
def flatten_list(seq):
merged = []
for s in seq:
for x in s:
merged.append(x)
return merged
def convert_to_base64(path):
with open("yourfile.ext", "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
def compile():
rootdir = os.path.dirname(os.path.abspath(__file__))
iconsdir = os.path.join(rootdir, "icons/")
builddir = os.path.join(rootdir, "build/")
files = sorted(os.listdir(iconsdir))
if not os.path.exists(builddir):
os.mkdir(builddir)
doclines = []
lines = {}
for f in files:
# convert to base64
iconpath = os.path.join(rootdir, "icons/", f)
with open(iconpath, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
# determine icon based on file name
name, itype, end = f.rsplit("-", 2)
size = end.split(".")[0]
# magic size calculation
if size == '32px':
width = '51px'
elif size == '64px':
width = '102px'
elif size == '128px':
width = '203px'
else:
raise ValueError, "unsupported size: %s" % ( size, )
#encoded_string = None
context = dict(name=name, itype=itype, height=size, width=width, data=encoded_string)
line = css_tmpl % context
if size not in lines:
lines[size] = []
lines[size].append(line)
if name not in doclines:
doclines.append(name)
for name in doclines:
print docs_tmpl % dict(name=name, itype="curved")
print docs_tmpl % dict(name=name, itype="straight")
# write individual files
for k,v in lines.iteritems():
data = "\r\n".join(v)
fp = os.path.join(builddir, "standalone-bpi-%s.css" % ( k, ))
with open(fp, "wb") as f:
f.write(".bpi { display:inline-block; }\r\n")
f.write(data)
print "Written %s bytes to %s" % ( len(data), fp, )
# write big file
linesall = [ v for k,v in lines.iteritems() ]
linesall = flatten_list(linesall)
data = "\r\n".join(linesall)
fp = os.path.join(builddir, "standalone-bpi-all.css")
with open(fp, "wb") as f:
f.write(".bpi { display:inline-block; }\r\n")
f.write(data)
print "Written %s bytes to %s" % ( len(data), fp, )
if __name__ == '__main__':
compile()
'''
|
|
''' models for the puzzleboard concept '''
import json
import random
import string
from .puzzle import Puzzle, puzzle_from_dict
from .redis import redis_client
config = {
'retries': 2048,
'diagonal_ratio': 0.095,
'random_factor': 32,
'word_density': 0.6
}
class Point:
'''Represents a cell on the puzzle board'''
def __init__(self, x: int, y: int):
self.x = x
self.y = y
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def __iter__(self):
''' make class iterable so that transformation is easier via dict protocol '''
yield 'x', self.x
yield 'y', self.y
def point_from_dict(d: dict) -> Point:
return Point(d['x'], d['y'])
direction_offsets: dict[str, Point] = {
'NW': Point(-1, -1),
'N': Point(0, -1),
'NE': Point(1, -1),
'E': Point(1, 0),
'SE': Point(1, 1),
'S': Point(0, 1),
'SW': Point(-1, 1),
'W': Point(-1, 0)
}
direction_hints: dict[str, str] = {
'vertical': ['N', 'S'],
'horizontal': ['E', 'W'],
'diagonal': ['NW', 'NE', 'SE', 'SW']
}
def is_direction(direction_hint: str):
'''Returns a function that tests a solution for a direction_hint'''
def inner(s: WordSolution):
'''Tests a WordSolution for being in a direction_hint'''
return s.placed and s.direction in direction_hints[direction_hint]
return inner
def word_points(word: str, origin: Point, direction: str) -> list[Point]:
'''Given a word, a direction and point of placement, return the points on the board involved'''
offsets = direction_offsets[direction]
points = []
for i, ch in enumerate(list(word)):
points.append(Point(origin.x, origin.y))
origin.x += offsets.x
origin.y += offsets.y
return points
class WordSolution:
def __init__(self, word, placed=False, origin: Point = Point(0, 0), direction=None, points: list[Point] = None):
self.word = word
self.placed = placed
self.origin = origin
self.direction = direction
if points is None:
points = []
self.points = points
def __iter__(self):
''' make class iterable so that transformation is easier via dict protocol '''
yield 'word', self.word
yield 'placed', self.placed
yield 'origin', dict(self.origin)
yield 'direction', self.direction
yield 'points', [dict(p) for p in self.points]
def __lt__(self, other):
'''Supports sorting by word'''
return self.word < other.word
def wordsolution_from_dict(d: dict) -> WordSolution:
return WordSolution(
d['word'],
d['placed'],
point_from_dict(d['origin']),
d['direction'],
[point_from_dict(p) for p in d['points']]
)
class PuzzleBoard:
def __init__(self,
height: int, width: int,
letters: list[str] = None,
solutions: list[WordSolution] = None,
puzzle: Puzzle = None):
self.height = height
self.width = width
if letters is None:
# don't use * to build up all dimensions - see:
# https://docs.python.org/3/faq/programming.html#how-do-i-create-a-multidimensional-list
letters = [[None] * self.width for r in range(self.height)]
self.letters = letters
if solutions is None:
solutions = []
self.solutions = solutions
self.puzzle = puzzle
def __iter__(self):
''' make class iterable so that transformation is easier via dict protocol '''
yield 'height', self.height
yield 'width', self.width
yield 'letters', self.letters
yield 'solutions', [dict(s) for s in self.solutions]
yield 'puzzle', dict(self.puzzle)
def fill_with_random_letters(self):
'''Fills all empty cells on the board with random upper case letters'''
for y in range(self.height):
for x in range(self.width):
if self.letters[y][x] is None:
self.letters[y][x] = random.choice(string.ascii_uppercase)
def has_density(self) -> float:
''' Count of placed word letters / total grid letter count >= config['word_density'] '''
word_letters = sum([len(sol.word) for sol in self.solutions])
total = self.height * self.width
return word_letters / total >= config['word_density']
def is_full(self):
'''Tests if all the cells of the board are full'''
return not any([cell is None for row in self.letters for cell in row])
def place(self, solution: WordSolution):
''' Commit the solution to the board '''
for letter, point in zip(solution.word, solution.points):
self.letters[point.y][point.x] = letter
self.solutions.append(solution)
def placed_all_words(self) -> bool:
placed_words = set(sorted([sol.word for sol in self.solutions]))
words = set(self.puzzle.words)
return placed_words == words
def try_letter_solution(self, letter: str, point: Point) -> bool:
'''Tests if a letter can be placed in the location requested'''
# make sure point is in the grid
value = ''
if point.x >= 0 and point.x < self.width and point.y >= 0 and point.y < self.height:
value = self.letters[point.y][point.x]
return value is None or value == letter
def try_place_word(self, word: int, origin: Point, direction: str) -> WordSolution:
'''tests if word can be placed on the board, and if so returns WordSolution'''
solution = None
if not self.is_full():
points = word_points(word, origin, direction)
if all([self.try_letter_solution(letter, point) for letter, point in zip(word, points)]):
solution = WordSolution(word=word,
placed=True,
origin=origin,
direction=direction,
points=points)
return solution
def valid(self):
'''Validates the quality of the generated board'''
rc = False
# must have at least config['diagonal_ratio'] diagonal solutions
if len(self.solutions) > 0:
rc = len(list(filter(is_direction('diagonal'), self.solutions))) / len(self.solutions) >= config['diagonal_ratio']
# must have at least one vertical solution
rc &= len(list(filter(is_direction('vertical'), self.solutions))) >= 1
# must have at least one horizontal solution
rc &= len(list(filter(is_direction('horizontal'), self.solutions))) >= 1
# If there are not enough words in the puzzle to fulfill the requirements, and all were placed - accept it
rc |= self.placed_all_words()
return rc
def words_to_place(self):
'''Generator providing words from the puzzle'''
words_set = set(self.puzzle.words)
seen = set()
while True:
word = random.choice(self.puzzle.words)
if word not in seen:
seen.add(word)
yield word
if words_set == seen:
break
def generate_puzzleboard(height: int, width: int, puzzle: Puzzle) -> PuzzleBoard:
'''Generate a puzzleboard for the puzzle and dimensions passed'''
maxtries = len(direction_offsets.keys()) * width * height * config['random_factor']
for retry in range(config['retries']):
# initialize an instance
pboard = PuzzleBoard(height, width, puzzle=puzzle)
assert not pboard.is_full()
for word in pboard.words_to_place():
for curr_try in range(maxtries):
origin = Point(random.randint(0, pboard.width), random.randint(0, pboard.height))
direction = random.choice(list(direction_offsets.keys()))
solution = pboard.try_place_word(word, origin, direction)
if solution:
pboard.place(solution)
break
# do not need any more words
if pboard.has_density():
break
pboard.solutions.sort()
# board quality check
if pboard.valid():
# fill rest of board with random letters
pboard.fill_with_random_letters()
break
return pboard
def puzzleboard_from_json(j: str) -> PuzzleBoard:
pbdict = json.loads(j)
return PuzzleBoard(
pbdict['height'],
pbdict['width'],
pbdict['letters'],
[wordsolution_from_dict(ws) for ws in pbdict['solutions']],
puzzle_from_dict(pbdict['puzzle'])
)
def puzzleboard_urn(name: str) -> str:
''' redis universal resource name '''
return f'puzzleboard:{name}'
def clear_puzzleboards() -> None:
'''Delete all puzzleboard lists'''
r = redis_client()
keys = r.keys(puzzleboard_urn('*'))
r.delete(*keys)
def pop_puzzleboard(name: str) -> PuzzleBoard:
'''Pop a board from the cache; signal consumption'''
r = redis_client()
jpboard = r.lpop(puzzleboard_urn(name))
return puzzleboard_from_json(jpboard)
def push_puzzleboard(name: str, pboard: PuzzleBoard):
'''Place the board in the cache for usage'''
r = redis_client()
r.rpush(puzzleboard_urn(name), json.dumps(dict(pboard)))
|
|
#!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
from unittest import mock
from google.cloud import datacatalog
from looker_sdk import models
from looker_sdk.rtl import serialize
from google.datacatalog_connectors.looker import entities, prepare
class AssembledEntryFactoryTest(unittest.TestCase):
__PREPARE_PACKAGE = 'google.datacatalog_connectors.looker.prepare'
@mock.patch(f'{__PREPARE_PACKAGE}.datacatalog_tag_factory'
f'.DataCatalogTagFactory')
@mock.patch(f'{__PREPARE_PACKAGE}.datacatalog_entry_factory'
f'.DataCatalogEntryFactory')
def setUp(self, mock_entry_factory, mock_tag_factory):
self.__assembled_data_factory = prepare.AssembledEntryFactory(
'project-id', 'location-id', 'entry_group_id', 'user_system',
'https://test.server.com')
self.__entry_factory = mock_entry_factory.return_value
self.__tag_factory = mock_tag_factory.return_value
def test_constructor_should_set_instance_attributes(self):
attrs = self.__assembled_data_factory.__dict__
self.assertIsNotNone(
attrs['_AssembledEntryFactory__datacatalog_entry_factory'])
self.assertIsNotNone(
attrs['_AssembledEntryFactory__datacatalog_tag_factory'])
def test_make_assembled_entries_list_should_process_folders(self):
entry_factory = self.__entry_factory
entry_factory.make_entry_for_folder = self.__mock_make_entry
tag_factory = self.__tag_factory
tag_factory.make_tag_for_folder = self.__mock_make_tag
tag_templates_dict = {
'looker_folder_metadata': {
'name': 'tagTemplates/looker_folder_metadata',
}
}
assembled_entries = \
self.__assembled_data_factory.make_assembled_entries_list(
[self.__make_fake_folder()], [], tag_templates_dict)
self.assertEqual(1, len(assembled_entries))
assembled_entry = assembled_entries[0]
self.assertEqual('test_folder', assembled_entry.entry_id)
self.assertEqual('fake_entries/test_folder',
assembled_entry.entry.name)
tags = assembled_entry.tags
self.assertEqual(1, len(tags))
self.assertEqual('tagTemplates/looker_folder_metadata',
tags[0].template)
def test_make_assembled_entries_list_should_process_dashboards(self):
entry_factory = self.__entry_factory
entry_factory.make_entry_for_folder = self.__mock_make_entry
entry_factory.make_entry_for_dashboard = self.__mock_make_entry
tag_factory = self.__tag_factory
tag_factory.make_tag_for_dashboard = self.__mock_make_tag
dashboard_data = {
'id': 'test_dashboard',
}
dashboard = serialize.deserialize31(data=json.dumps(dashboard_data),
structure=models.Dashboard)
tag_templates_dict = {
'looker_dashboard_metadata': {
'name': 'tagTemplates/looker_dashboard_metadata',
}
}
assembled_entries = \
self.__assembled_data_factory.make_assembled_entries_list(
[self.__make_fake_folder(dashboard=dashboard)], [],
tag_templates_dict)
self.assertEqual(2, len(assembled_entries))
# The first entry refers to the folder.
dashboard_assembled_entry = assembled_entries[1]
self.assertEqual('test_dashboard', dashboard_assembled_entry.entry_id)
self.assertEqual('fake_entries/test_dashboard',
dashboard_assembled_entry.entry.name)
tags = dashboard_assembled_entry.tags
self.assertEqual(1, len(tags))
self.assertEqual('tagTemplates/looker_dashboard_metadata',
tags[0].template)
def test_make_assembled_entries_list_should_process_dashboard_tiles(self):
entry_factory = self.__entry_factory
entry_factory.make_entry_for_folder = self.__mock_make_entry
entry_factory.make_entry_for_dashboard = self.__mock_make_entry
entry_factory.make_entry_for_dashboard_element = self.__mock_make_entry
tag_factory = self.__tag_factory
tag_factory.make_tag_for_dashboard_element = \
self.__mock_make_tag_parent_dep
dashboard_data = {
'id': 'test_dashboard',
'dashboard_elements': [{
'id': 194,
}],
}
dashboard = serialize.deserialize31(data=json.dumps(dashboard_data),
structure=models.Dashboard)
folder = self.__make_fake_folder()
folder.dashboards = [dashboard]
tag_templates_dict = {
'looker_dashboard_element_metadata': {
'name': 'tagTemplates/looker_dashboard_element_metadata',
}
}
assembled_entries = \
self.__assembled_data_factory.make_assembled_entries_list(
[folder], [], tag_templates_dict)
self.assertEqual(3, len(assembled_entries))
# The first entry refers to the folder and the second to the dashboard.
element_assembled_entry = assembled_entries[2]
self.assertEqual('194', element_assembled_entry.entry_id)
self.assertEqual('fake_entries/194',
element_assembled_entry.entry.name)
tags = element_assembled_entry.tags
self.assertEqual(1, len(tags))
self.assertEqual('tagTemplates/looker_dashboard_element_metadata',
tags[0].template)
def test_make_assembled_entries_list_should_skip_empty_title(self):
entry_factory = self.__entry_factory
entry_factory.make_entry_for_folder = self.__mock_make_entry
entry_factory.make_entry_for_dashboard = self.__mock_make_entry
entry_factory.make_entry_for_dashboard_element.return_value = \
None, None
dashboard_data = {
'id': 'test_dashboard',
'dashboard_elements': [{
'id': 194,
}],
}
dashboard = serialize.deserialize31(data=json.dumps(dashboard_data),
structure=models.Dashboard)
folder = self.__make_fake_folder()
folder.dashboards = [dashboard]
assembled_entries = \
self.__assembled_data_factory.make_assembled_entries_list(
[folder], [], {})
# The first entry refers to the folder and the second to the dashboard.
self.assertEqual(2, len(assembled_entries))
def test_make_assembled_entries_list_should_process_looks(self):
entry_factory = self.__entry_factory
entry_factory.make_entry_for_folder = self.__mock_make_entry
entry_factory.make_entry_for_look = self.__mock_make_entry
tag_factory = self.__tag_factory
tag_factory.make_tag_for_look = self.__mock_make_tag
look_data = {
'id': 10,
}
look = serialize.deserialize31(data=json.dumps(look_data),
structure=models.Look)
tag_templates_dict = {
'looker_look_metadata': {
'name': 'tagTemplates/looker_look_metadata',
}
}
assembled_entries = \
self.__assembled_data_factory.make_assembled_entries_list(
[self.__make_fake_folder(look=look)], [],
tag_templates_dict)
self.assertEqual(2, len(assembled_entries))
# The first entry refers to the folder.
dashboard_assembled_entry = assembled_entries[1]
self.assertEqual(10, dashboard_assembled_entry.entry_id)
self.assertEqual('fake_entries/10',
dashboard_assembled_entry.entry.name)
tags = dashboard_assembled_entry.tags
self.assertEqual(1, len(tags))
self.assertEqual('tagTemplates/looker_look_metadata', tags[0].template)
def test_make_assembled_entries_list_should_process_queries(self):
entry_factory = self.__entry_factory
entry_factory.make_entry_for_query = self.__mock_make_entry
tag_factory = self.__tag_factory
tag_factory.make_tag_for_query = self.__mock_make_tag
tag_templates_dict = {
'looker_query_metadata': {
'name': 'tagTemplates/looker_query_metadata',
}
}
query_metadata = entities.AssembledQueryMetadata(
self.__make_fake_query(), 'select *', {}, {})
assembled_entries = \
self.__assembled_data_factory.make_assembled_entries_list(
[], [query_metadata], tag_templates_dict)
self.assertEqual(1, len(assembled_entries))
assembled_entry = assembled_entries[0]
self.assertEqual(837, assembled_entry.entry_id)
self.assertEqual('fake_entries/837', assembled_entry.entry.name)
tags = assembled_entry.tags
self.assertEqual(1, len(tags))
self.assertEqual('tagTemplates/looker_query_metadata',
tags[0].template)
@classmethod
def __make_fake_folder(cls, dashboard=None, look=None):
dashboard_data = json.loads(
serialize.serialize(dashboard)) if dashboard else None
look_data = json.loads(serialize.serialize(look)) if look else None
folder_data = {
'id': 'test_folder',
'name': 'Test folder',
'parent_id': '',
'dashboards': [dashboard_data] if dashboard_data else None,
'looks': [look_data] if look_data else None,
}
return serialize.deserialize31(data=json.dumps(folder_data),
structure=models.Folder)
@classmethod
def __make_fake_query(cls):
query_data = {
'id': 837,
'model': '',
'view': '',
}
return serialize.deserialize31(data=json.dumps(query_data),
structure=models.Query)
@classmethod
def __mock_make_entry(cls, asset):
entry = datacatalog.Entry()
entry_id = asset.id
entry.name = f'fake_entries/{entry_id}'
return entry_id, entry
@classmethod
def __mock_make_tag(cls, tag_template_dict, asset):
tag = datacatalog.Tag()
tag.template = tag_template_dict['name']
return tag
@classmethod
def __mock_make_tag_parent_dep(cls, tag_template_dict, asset, parent):
tag = datacatalog.Tag()
tag.template = tag_template_dict['name']
return tag
|
|
"""
This script is used to write `sqf/dababase.py`, that contains all valid SQF expressions.
It reads a file from here:
https://raw.githubusercontent.com/intercept/intercept/master/src/client/headers/client/sqf_pointers_declaration.hpp
"""
import urllib.request
from sqf.interpreter_types import ForType, IfType, SwitchType, WhileType, TryType, WithType
from sqf.types import Code, Array, Boolean, Number, Type, Nothing, Anything, String, Namespace, \
Object, Config, Script, Control, Group, Display, Side, Task, Location, NetObject, DiaryReport, TeamMember
# The mapping of SQF types to our types
STRING_TO_TYPE = {
'array': Array,
'scalar': Number,
'bool': Boolean,
'code': Code,
'string': String,
'text': String,
'namespace': Namespace,
'config': Config,
'location': Location,
'object': Object,
'group': Group,
'member': TeamMember, # team_member gets split
'control': Control,
'display': Display,
'exception': TryType,
'for': ForType,
'if': IfType,
'switch': SwitchType,
'while': WhileType,
'with': WithType,
'side': Side,
'task': Task,
'script': Script,
'nan': Number,
'nothing': Nothing,
'netobject': NetObject,
'any': Type,
'diary': DiaryReport # diary_record gets split
}
# the argument the type is initialized with
TYPE_TO_INIT_ARGS = {
Namespace: "'missionNamespace'",
}
# The return type "ANY" means that we do not know it, so it is Nothing()
STRING_TO_TYPE_RETURN = STRING_TO_TYPE.copy()
STRING_TO_TYPE_RETURN['any'] = Anything
WRONG_RETURN_TYPES = {
'attachedto': Object,
'getclientstatenumber': Number,
'handgunmagazine': Array,
'ammoonpylon': Anything
}
def _parse_type_names(type_names):
# Alternative types separated by _ char
types_names = type_names.split('_')
# Never care about NaN type (covered by scalar)
if 'nan' in types_names:
types_names.remove('nan')
# Remove parts of types that also get split
if 'team' in types_names:
types_names.remove('team')
if 'record' in types_names:
types_names.remove('record')
return types_names
def _parse_return_type_names(return_type_names):
return_type_names = _parse_type_names(return_type_names)
if len(return_type_names) > 1 and 'nothing' in return_type_names:
return_type_names.remove('nothing')
if len(return_type_names) > 1:
return_type_name = 'any'
else:
return_type_name = return_type_names[0]
return STRING_TO_TYPE_RETURN[return_type_name]
url = 'https://raw.githubusercontent.com/intercept/intercept/master/src/' \
'client/headers/client/sqf_pointers_declaration.hpp'
data = urllib.request.urlopen(url).read().decode('utf-8').split('\n')
expressions = []
for line in data:
if not line.startswith('static '):
continue
sections = line.split('__')
num_sections = len(sections)
if num_sections not in [4, 5, 6]:
print('Could\'t read line: ', line)
continue
# Name always comes first
op_name = sections[1]
# Return type always comes last (some operators have incorrect values for whatever reason)
if op_name in WRONG_RETURN_TYPES:
return_type = WRONG_RETURN_TYPES[op_name]
else:
return_type = _parse_return_type_names(sections[num_sections-1][:-1])
# Adds any relevant initialization argument for the return type
init_code = ''
# Number of sections allows us to classify the operation
if num_sections == 6:
if return_type in TYPE_TO_INIT_ARGS:
init_code = ', action=lambda lhs, rhs, i: %s' % TYPE_TO_INIT_ARGS[return_type]
for lhs_type_name in _parse_type_names(sections[2]):
lhs_type = STRING_TO_TYPE[lhs_type_name]
for rhs_type_name in _parse_type_names(sections[3]):
rhs_type = STRING_TO_TYPE[rhs_type_name]
expression = 'BinaryExpression(' \
'{lhs_type}, ' \
'Keyword(\'{keyword}\'), ' \
'{rhs_type}, {return_type}{init_code})'.format(
lhs_type=lhs_type.__name__,
keyword=op_name,
rhs_type=rhs_type.__name__,
return_type=return_type.__name__,
init_code=init_code
)
expressions.append(expression)
elif num_sections == 5:
if return_type in TYPE_TO_INIT_ARGS:
init_code = ', action=lambda rhs, i: %s' % TYPE_TO_INIT_ARGS[return_type]
for rhs_type_name in _parse_type_names(sections[2]):
rhs_type = STRING_TO_TYPE[rhs_type_name]
expression = 'UnaryExpression(' \
'Keyword(\'{keyword}\'), ' \
'{rhs_type}, {return_type}{init_code})'.format(
keyword=op_name,
rhs_type=rhs_type.__name__,
return_type=return_type.__name__,
init_code=init_code
)
expressions.append(expression)
else:
if return_type in TYPE_TO_INIT_ARGS:
init_code = ', action=lambda i: %s' % TYPE_TO_INIT_ARGS[return_type]
expression = 'NullExpression(' \
'Keyword(\'{keyword}\'), ' \
'{return_type}{init_code})'.format(
keyword=op_name,
return_type=return_type.__name__,
init_code=init_code
)
expressions.append(expression)
preamble = r'''# This file is generated automatically by `build_database.py`. Change it there.
from sqf.expressions import BinaryExpression, UnaryExpression, NullExpression
from sqf.types import Keyword, Type, Nothing, Anything, String, Code, Array, Number, Boolean, Namespace, \
Object, Config, Script, Control, Group, Display, Side, Task, Location, NetObject, DiaryReport, TeamMember
from sqf.interpreter_types import WhileType, \
ForType, SwitchType, IfType, TryType, WithType'''
# Expressions that use symbols are hardcoded since they aren't present in the parsed file
symbols = r'''
EXPRESSIONS = [
BinaryExpression(Array, Keyword('#'), Number, Anything),
BinaryExpression(Number, Keyword('!='), Number, Boolean),
BinaryExpression(String, Keyword('!='), String, Boolean),
BinaryExpression(Object, Keyword('!='), Object, Boolean),
BinaryExpression(Group, Keyword('!='), Group, Boolean),
BinaryExpression(Side, Keyword('!='), Side, Boolean),
BinaryExpression(String, Keyword('!='), String, Boolean),
BinaryExpression(Config, Keyword('!='), Config, Boolean),
BinaryExpression(Display, Keyword('!='), Display, Boolean),
BinaryExpression(Control, Keyword('!='), Control, Boolean),
BinaryExpression(TeamMember, Keyword('!='), TeamMember, Boolean),
BinaryExpression(NetObject, Keyword('!='), NetObject, Boolean),
BinaryExpression(Task, Keyword('!='), Task, Boolean),
BinaryExpression(Location, Keyword('!='), Location, Boolean),
BinaryExpression(Number, Keyword('%'), Number, Number),
BinaryExpression(Boolean, Keyword('&&'), Boolean, Boolean),
BinaryExpression(Boolean, Keyword('&&'), Code, Boolean),
BinaryExpression(Number, Keyword('*'), Number, Number),
BinaryExpression(Number, Keyword('+'), Number, Number),
BinaryExpression(String, Keyword('+'), String, String),
BinaryExpression(Array, Keyword('+'), Array, Array),
BinaryExpression(Number, Keyword('-'), Number, Number),
BinaryExpression(Array, Keyword('-'), Array, Array),
BinaryExpression(Number, Keyword('/'), Number, Number),
BinaryExpression(Config, Keyword('/'), String, Config),
BinaryExpression(SwitchType, Keyword(':'), Code, Nothing),
BinaryExpression(Number, Keyword('<'), Number, Boolean),
BinaryExpression(Number, Keyword('<='), Number, Boolean),
BinaryExpression(Number, Keyword('=='), Number, Boolean),
BinaryExpression(String, Keyword('=='), String, Boolean),
BinaryExpression(Object, Keyword('=='), Object, Boolean),
BinaryExpression(Group, Keyword('=='), Group, Boolean),
BinaryExpression(Side, Keyword('=='), Side, Boolean),
BinaryExpression(String, Keyword('=='), String, Boolean),
BinaryExpression(Config, Keyword('=='), Config, Boolean),
BinaryExpression(Display, Keyword('=='), Display, Boolean),
BinaryExpression(Control, Keyword('=='), Control, Boolean),
BinaryExpression(TeamMember, Keyword('=='), TeamMember, Boolean),
BinaryExpression(NetObject, Keyword('=='), NetObject, Boolean),
BinaryExpression(Task, Keyword('=='), Task, Boolean),
BinaryExpression(Location, Keyword('=='), Location, Boolean),
BinaryExpression(Number, Keyword('>'), Number, Boolean),
BinaryExpression(Number, Keyword('>='), Number, Boolean),
BinaryExpression(Config, Keyword('>>'), String, Config),
BinaryExpression(Number, Keyword('^'), Number, Number),
BinaryExpression(Boolean, Keyword('||'), Boolean, Boolean),
BinaryExpression(Boolean, Keyword('||'), Code, Boolean),
UnaryExpression(Keyword('!'), Boolean, Boolean),
UnaryExpression(Keyword('+'), Number, Number),
UnaryExpression(Keyword('+'), Array, Array),
UnaryExpression(Keyword('-'), Number, Number),
'''
with open('sqf/database.py', 'w') as f:
f.write(preamble + '\n\n')
f.write(symbols + ' ')
f.write(',\n '.join(expressions))
f.write('\n]\n')
|
|
from fabric.api import task, local, env, settings
from fabric.operations import put
import cuisine
from cuisine import run, sudo, package_ensure
import re
import socket
from server.config import get_settings_prod
import paramiko
from fabric.contrib.files import sed, append
PACKAGES_TO_INSTALL = [
'libpq-dev',
'python-dev',
'nginx-extras',
'python-virtualenv',
'supervisor',
'postgresql-9.1',
'fail2ban',
'vim',
'tmux',
]
DD_API_KEY = '1234567890abcd' # Datadog api key.
AWS_HOST = 'api.mobackapp.com' # Amazon EC2 URL.
APP_NAME = 'moback' # Name of the application.
USER_NAME = APP_NAME # Username in which the app should be running.
# Say Hello
@task
def hello_world():
'''Just a test task to test connecitvity'''
run('echo "Hello World"')
# Vagrant
@task
def vagrant():
'''Vagrant!'''
host = '127.0.0.1'
port = '2222'
for line in local('vagrant ssh-config', capture=True).split('\n'):
match = re.search(r'Hostname\s+(\S+)', line)
if match:
host = match.group(1)
continue
match = re.search(r'User\s+(\S+)', line)
if match:
env.user = match.group(1)
continue
match = re.search(r'Port\s+(\S+)', line)
if match:
port = match.group(1)
continue
match = re.search(r'IdentityFile\s(.+)', line)
if match:
env.key_filename = match.group(1)
continue
env.hosts = ['{0}:{1}'.format(host, port)]
@task
def aws():
'''AWS config'''
host = AWS_HOST
port = 22
env.hosts = ['{0}:{1}'.format(host, port)]
env.user = 'ubuntu'
env.key_filename = 'moback.pem'
print 'HOST: ' + AWS_HOST
def update():
"""Update package list"""
with settings(linewise=True, warn_only=True):
sudo('apt-get -y update')
def upgrade():
"""Upgrade packages"""
with settings(linewise=True, warn_only=True):
sudo('aptitude -y upgrade')
def ensure_packages():
for pkg in PACKAGES_TO_INSTALL:
package_ensure(pkg)
def install_datadog():
sudo('DD_API_KEY=%s bash -c "$(curl'
' -L http://dtdg.co/agent-install-ubuntu)"' % (DD_API_KEY, ))
def create_user():
cuisine.user_ensure(
USER_NAME, home='/home/%s' % USER_NAME, shell='/bin/bash')
cuisine.group_user_ensure('www-data', USER_NAME)
def create_virtualenv():
if not cuisine.dir_exists('/home/%s/ENV' % USER_NAME):
sudo('virtualenv -q --distribute '
'/home/%s/ENV' % (
USER_NAME), user=USER_NAME)
def copy_source():
'''archive the git source and copy it'''
local('git archive $(git symbolic-ref HEAD 2>/dev/null)'
' | bzip2 > /tmp/%s.tar.bz2' % APP_NAME)
remote_filename = '/tmp/%s.tar.bz2' % APP_NAME
code_dir = '/home/%s/CODE' % APP_NAME
sudo('rm -rf %s' % code_dir)
if cuisine.file_exists(remote_filename):
sudo('rm %s' % remote_filename)
cuisine.file_upload(
remote_filename, '/tmp/%s.tar.bz2' % APP_NAME)
with cuisine.mode_sudo():
run('mkdir -p %s' % code_dir)
cuisine.file_attribs(remote_filename)
run('tar jxf %s -C %s' % (remote_filename, code_dir))
run('rm %s' % (remote_filename,))
def install_python_reqs():
sudo('. /home/%s/ENV/bin/activate &&'
' pip install -r /home/%s/CODE/requirements.txt'
% (USER_NAME, USER_NAME), user=USER_NAME)
def copy_confs():
nginx_conf_path = '/etc/nginx/sites-available/default'
sprvsr_conf_path = '/etc/supervisor/conf.d/moback.conf'
pghba_path = '/etc/postgresql/9.1/main/pg_hba.conf'
put('confs/nginx_default.conf',
nginx_conf_path, True)
put('confs/supervisord.conf', sprvsr_conf_path,
True, mode=0644)
put('confs/pg_hba.conf', pghba_path, True)
with cuisine.mode_sudo():
cuisine.file_attribs(
nginx_conf_path, owner='root', group='root')
cuisine.file_attribs(
sprvsr_conf_path, owner='root', group='root')
cuisine.file_attribs(
pghba_path, owner='root', group='root')
def setup_db():
cfg = get_settings_prod()
dbname = cfg["db"]["database"]
dbuser = cfg["db"]["username"]
dbpasswd = cfg["db"]["password"]
queries = [
"psql -d postgres -c "
"\"CREATE USER {dbuser} WITH PASSWORD"
" '{dbpasswd}';\"",
"psql -d postgres -c"
" \"GRANT SELECT, INSERT, UPDATE, DELETE ON ALL "
"TABLES IN SCHEMA public TO {dbuser};\"",
"psql -d postgres -c"
" \"GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA"
" public TO {dbuser};\"",
"psql -d postgres -c "
"\"GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA public"
" TO {dbuser};\"",
"psql -c \"CREATE DATABASE"
" {dbname} WITH owner={dbuser};\"",
]
for q in queries:
try:
query = q.format(dbuser=dbuser, dbpasswd=dbpasswd, dbname=dbname)
sudo(query, user='postgres')
except:
pass
try:
sudo("psql -d moback -f /home/%s/CODE/extras/schema.sql" % USER_NAME,
user='moback')
except:
pass
def reboot():
sudo("shutdown -r 0")
def is_host_up(host, counter=0):
print('%d : Attempting connection to host: %s' %
(counter, host))
original_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(1)
host_up = True
try:
paramiko.Transport((host, 22))
except Exception, e:
host_up = False
print('%s down, %s' % (host, e))
finally:
socket.setdefaulttimeout(original_timeout)
return host_up
def ping_untill_starts():
counter = 0
while not is_host_up(env.host, counter):
counter += 1
@task
def restart_server():
sudo('supervisorctl restart moback')
@task
def deploy_code():
'''Deploy latest commit and restart uwsgi'''
copy_source()
restart_server()
@task
def configure_ebs():
'''configure ebs volume for postgres befor executing this method
sudo su -
yes | mkfs -t ext3 /dev/xvdf
mkdir /data
mount /dev/xvdf /data
'''
with settings(warn_only=True):
with cuisine.mode_sudo():
run('service postgresql stop')
run('rm -rf /data/lost+found')
run('mkdir /data/lib/')
run('mkdir /data/lib/PostgreSQL')
run('mkdir /data/lib/PostgreSQL/9.1')
run('mkdir /data/lib/PostgreSQL/9.1/main')
run('cp -r /var/lib/postgresql/9.1/main /data/lib/PostgreSQL/9.1')
run('chown -R postgres:postgres /data/lib/PostgreSQL')
run('chmod 0700 /data/lib/PostgreSQL/9.1/main')
run('touch /data/moback.log')
run('chmod 777 /data/moback.log')
append(
'/etc/fstab',
'/dev/xvdf /data ext3 defaults 0 0', use_sudo=True)
sed('/etc/postgresql/9.1/main/postgresql.conf',
'/var/lib/postgresql/9.1/main',
'/data/lib/PostgreSQL/9.1/main',
use_sudo=True)
append(
'/etc/postgresql/9.1/main/postgresql.conf',
"listen_addresses = '*'", True)
with settings(warn_only=True):
with cuisine.mode_sudo():
run('service postgresql start')
run('service postgresql reload')
@task
def deploy_full(production='false'):
'''Fresh and full deploy'''
update()
upgrade()
ensure_packages()
install_datadog()
create_user()
create_virtualenv()
copy_source()
install_python_reqs()
copy_confs()
setup_db()
configure_ebs()
restart_server()
reboot()
ping_untill_starts()
@task
def ssh():
'''Open up ssh console'''
try:
if env.host == AWS_HOST:
local('ssh -i moback.pem ubuntu@' + AWS_HOST)
except:
pass
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2017 Juan Cabral
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
# DOC
# =============================================================================
"""All feets base tests"""
# =============================================================================
# IMPORTS
# =============================================================================
from feets import (
Extractor,
ExtractorContractError,
FeatureNotFound,
FeatureSet,
FeatureSpace,
FeatureSpaceError,
extractors,
register_extractor,
)
from matplotlib import axes
import numpy as np
import pandas as pd
import pytest
from pytest_unordered import unordered
# =============================================================================
# CONSTANTS
# =============================================================================
TIME_SERIE = dict.fromkeys(extractors.DATAS)
# =============================================================================
# RESULTS
# =============================================================================
def test_invalid_feature():
with pytest.raises(FeatureNotFound):
FeatureSet(
features_names=["Fail"],
values={"fail": 1},
timeserie=TIME_SERIE,
extractors={},
)
def test_iter(foo_extractor):
rs = FeatureSet(
features_names=["foo"],
values={"foo": 1},
timeserie=TIME_SERIE,
extractors={"foo": foo_extractor},
)
feats, values = rs
assert list(feats) == unordered(["foo"])
assert list(values) == unordered([1])
def test_getitem(foo_extractor):
rs = FeatureSet(
features_names=["foo"],
values={"foo": 1},
timeserie=TIME_SERIE,
extractors={"foo": foo_extractor},
)
assert rs["foo"] == 1
with pytest.raises(KeyError):
rs["faaa"]
def test_as_array(foo_extractor):
rs = FeatureSet(
features_names=["foo"],
values={"foo": 1},
timeserie=TIME_SERIE,
extractors={"foo": foo_extractor},
)
feats, values = rs.as_arrays()
assert list(feats) == unordered(["foo"])
assert list(values) == unordered([1])
def test_as_dict(foo_extractor):
rs = FeatureSet(
features_names=["foo"],
values={"foo": 1},
timeserie=TIME_SERIE,
extractors={"foo": foo_extractor},
)
assert rs.as_dict() == {"foo": 1}
def test_as_dataframe(foo_extractor):
rs = FeatureSet(
features_names=["foo"],
values={"foo": 1},
timeserie=TIME_SERIE,
extractors={"foo": foo_extractor},
)
expected = pd.DataFrame([{"foo": 1.0}])
assert rs.as_dataframe().equals(expected)
def test_repr(foo_extractor):
timeserie = TIME_SERIE.copy()
timeserie.update(time=1, error=2)
rs = FeatureSet(
features_names=["foo"],
values={"foo": 1},
timeserie=timeserie,
extractors={"foo": foo_extractor},
)
expected = "FeatureSet(features=<foo>, timeserie=<time, error>)"
assert repr(rs) == str(rs) == expected
def test_plot(foo_extractor):
rs = FeatureSet(
features_names=["foo"],
values={"foo": 1},
timeserie=TIME_SERIE,
extractors={"foo": foo_extractor},
)
assert isinstance(rs.plot("foo"), axes.Axes)
# =============================================================================
# SPACE
# =============================================================================
def test_extract():
space = FeatureSpace(only=["Amplitude"])
magnitude = np.array(
[
0.46057565,
0.51372940,
0.70136533,
0.21454228,
0.54792300,
0.33433717,
0.44879870,
0.55571062,
0.24388037,
0.44793366,
0.30175873,
0.88326381,
0.12208977,
0.37088649,
0.59457310,
0.74705894,
0.24551664,
0.36009236,
0.80661981,
0.04961063,
0.87747311,
0.97388975,
0.95775496,
0.34195989,
0.54201036,
0.87854618,
0.07388174,
0.21543205,
0.59295337,
0.56771493,
]
)
features, values = space.extract(magnitude=magnitude)
assert len(features) == 1 and features[0] == "Amplitude"
np.testing.assert_allclose(values[features == "Amplitude"], 0.45203809)
def test_features_order(mock_extractors_register):
@register_extractor
class ReturnSame(Extractor):
data = ["magnitude"]
features = ["Same"]
def fit(self, magnitude):
return {"Same": magnitude[0]}
space = FeatureSpace(only=["Same"])
for _ in range(200):
data = np.unique(np.random.randint(1, 1000, 10))
np.random.shuffle(data)
features, values_col = space.extract(magnitude=data)
np.testing.assert_array_equal(data[0], values_col)
def test_features_kwargs():
# ok
FeatureSpace(only=["CAR_sigma"], CAR={"minimize_method": "powell"})
# invalid parameter
with pytest.raises(ExtractorContractError):
FeatureSpace(only=["CAR_sigma"], CAR={"o": 1})
# invalid parameter with valid parameter
with pytest.raises(ExtractorContractError):
FeatureSpace(
only=["CAR_sigma"], CAR={"o": 1, "minimize_method": "powell"}
)
def test_remove_by_dependencies(mock_extractors_register):
@register_extractor
class A(Extractor):
data = ["magnitude"]
features = ["test_a", "test_a2"]
def fit(self, *args):
pass
@register_extractor
class B1(Extractor):
data = ["magnitude"]
features = ["test_b1"]
dependencies = ["test_a"]
def fit(self, *args):
pass
@register_extractor
class C(Extractor):
data = ["magnitude"]
features = ["test_c"]
def fit(self, *args):
pass
fs = FeatureSpace(exclude=["test_a"])
assert list(fs.features_) == unordered(["test_c", "test_a2"])
def test_with_optional_data(mock_extractors_register):
@register_extractor
class A(Extractor):
data = ["magnitude", "time"]
optional = ["magnitude"]
features = ["test_a"]
def fit(self, *args):
pass
fs = FeatureSpace(data=["time"])
assert len(fs.features_extractors_) == 1
assert isinstance(list(fs.features_extractors_)[0], A)
fs = FeatureSpace(data=["time", "magnitude"])
assert len(fs.features_extractors_) == 1
assert isinstance(list(fs.features_extractors_)[0], A)
with pytest.raises(FeatureSpaceError):
fs = FeatureSpace(data=["magnitude"])
def test_with_optional_data_call(mock_extractors_register):
@register_extractor
class A(Extractor):
data = ["magnitude", "time"]
optional = ["magnitude"]
features = ["time_arg", "magnitude_arg"]
def fit(self, time, magnitude):
return {"time_arg": time, "magnitude_arg": magnitude}
time, magnitude = [1, 2, 3], [4, 5, 6]
fs = FeatureSpace(data=["time"])
result = fs.extract(time=time, magnitude=magnitude)
np.testing.assert_array_equal(result["time_arg"], time)
np.testing.assert_array_equal(result["magnitude_arg"], magnitude)
result = fs.extract(time=time)
np.testing.assert_array_equal(result["time_arg"], time)
np.testing.assert_array_equal(result["magnitude_arg"], None)
|
|
#!/usr/bin/env python
# encoding: utf-8
"""
Copyright (c) 2010 The Echo Nest. All rights reserved.
Created by Tyler Williams on 2010-04-25.
Utility functions to support the Echo Nest web API interface.
"""
import urllib
import urllib2
import httplib
import config
import logging
import socket
import re
import time
import os
import subprocess
import traceback
from types import StringType, UnicodeType
try:
import json
except ImportError:
import simplejson as json
logger = logging.getLogger(__name__)
TYPENAMES = (
('AR', 'artist'),
('SO', 'song'),
('RE', 'release'),
('TR', 'track'),
('PE', 'person'),
('DE', 'device'),
('LI', 'listener'),
('ED', 'editor'),
('TW', 'tweditor'),
('CA', 'catalog'),
)
foreign_regex = re.compile(r'^.+?:(%s):([^^]+)\^?([0-9\.]+)?' % r'|'.join(n[1] for n in TYPENAMES))
short_regex = re.compile(r'^((%s)[0-9A-Z]{16})\^?([0-9\.]+)?' % r'|'.join(n[0] for n in TYPENAMES))
long_regex = re.compile(r'music://id.echonest.com/.+?/(%s)/(%s)[0-9A-Z]{16}\^?([0-9\.]+)?' % (r'|'.join(n[0] for n in TYPENAMES), r'|'.join(n[0] for n in TYPENAMES)))
headers = [('User-Agent', 'Pyechonest %s' % (config.__version__,))]
class MyBaseHandler(urllib2.BaseHandler):
def default_open(self, request):
if config.TRACE_API_CALLS:
logger.info("%s" % (request.get_full_url(),))
request.start_time = time.time()
return None
class MyErrorProcessor(urllib2.HTTPErrorProcessor):
def http_response(self, request, response):
code = response.code
if config.TRACE_API_CALLS:
logger.info("took %2.2fs: (%i)" % (time.time()-request.start_time,code))
if code/100 in (2, 4, 5):
return response
else:
urllib2.HTTPErrorProcessor.http_response(self, request, response)
opener = urllib2.build_opener(MyBaseHandler(), MyErrorProcessor())
opener.addheaders = headers
class EchoNestException(Exception):
"""
Parent exception class. Catches API and URL/HTTP errors.
"""
def __init__(self, code, message, headers):
if code is None:
code = -1
message = 'Echo Nest Unknown Error'
if message is None:
super(EchoNestException, self).__init__('Echo Nest Error %d' % code,)
else:
super(EchoNestException, self).__init__(message,)
self.headers = headers
self.code = code
class EchoNestAPIError(EchoNestException):
"""
API Specific Errors.
"""
def __init__(self, code, message, headers, http_status):
if http_status:
http_status_message_part = ' [HTTP %d]' % http_status
else:
http_status_message_part = ''
self.http_status = http_status
formatted_message = ('Echo Nest API Error %d: %s%s' %
(code, message, http_status_message_part),)
super(EchoNestAPIError, self).__init__(code, formatted_message, headers)
class EchoNestIOError(EchoNestException):
"""
URL and HTTP errors.
"""
def __init__(self, code=None, error=None, headers=headers):
formatted_message = ('Echo Nest IOError: %s' % headers,)
super(EchoNestIOError, self).__init__(code, formatted_message, headers)
def get_successful_response(raw_json):
if hasattr(raw_json, 'headers'):
headers = raw_json.headers
else:
headers = {'Headers':'No Headers'}
if hasattr(raw_json, 'getcode'):
http_status = raw_json.getcode()
else:
http_status = None
raw_json = raw_json.read()
try:
response_dict = json.loads(raw_json)
status_dict = response_dict['response']['status']
code = int(status_dict['code'])
message = status_dict['message']
if (code != 0):
# do some cute exception handling
raise EchoNestAPIError(code, message, headers, http_status)
del response_dict['response']['status']
return response_dict
except ValueError:
logger.debug(traceback.format_exc())
raise EchoNestAPIError(-1, "Unknown error.", headers, http_status)
def callm(method, param_dict, POST=False, socket_timeout=None, data=None):
"""
Call the api!
Param_dict is a *regular* *python* *dictionary* so if you want to have multi-valued params
put them in a list.
** note, if we require 2.6, we can get rid of this timeout munging.
"""
try:
param_dict['api_key'] = config.ECHO_NEST_API_KEY
param_list = []
if not socket_timeout:
socket_timeout = config.CALL_TIMEOUT
for key,val in param_dict.iteritems():
if isinstance(val, list):
param_list.extend( [(key,subval) for subval in val] )
elif val is not None:
if isinstance(val, unicode):
val = val.encode('utf-8')
param_list.append( (key,val) )
params = urllib.urlencode(param_list)
orig_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(socket_timeout)
if(POST):
if (not method == 'track/upload') or ((method == 'track/upload') and 'url' in param_dict):
"""
this is a normal POST call
"""
url = 'http://%s/%s/%s/%s' % (config.API_HOST, config.API_SELECTOR,
config.API_VERSION, method)
if data is None:
data = ''
data = urllib.urlencode(data)
data = "&".join([data, params])
f = opener.open(url, data=data)
else:
"""
upload with a local file is special, as the body of the request is the content of the file,
and the other parameters stay on the URL
"""
url = '/%s/%s/%s?%s' % (config.API_SELECTOR, config.API_VERSION,
method, params)
if ':' in config.API_HOST:
host, port = config.API_HOST.split(':')
else:
host = config.API_HOST
port = 80
if config.TRACE_API_CALLS:
logger.info("%s/%s" % (host+':'+str(port), url,))
conn = httplib.HTTPConnection(host, port = port)
conn.request('POST', url, body = data, headers = dict([('Content-Type', 'application/octet-stream')]+headers))
f = conn.getresponse()
else:
"""
just a normal GET call
"""
url = 'http://%s/%s/%s/%s?%s' % (config.API_HOST, config.API_SELECTOR, config.API_VERSION,
method, params)
f = opener.open(url)
socket.setdefaulttimeout(orig_timeout)
# try/except
response_dict = get_successful_response(f)
return response_dict
except IOError, e:
if hasattr(e, 'reason'):
raise EchoNestIOError(error=e.reason)
elif hasattr(e, 'code'):
raise EchoNestIOError(code=e.code)
else:
raise
def oauthgetm(method, param_dict, socket_timeout=None):
try:
import oauth2 # lazy import this so oauth2 is not a hard dep
except ImportError:
raise Exception("You must install the python-oauth2 library to use this method.")
"""
Call the api! With Oauth!
Param_dict is a *regular* *python* *dictionary* so if you want to have multi-valued params
put them in a list.
** note, if we require 2.6, we can get rid of this timeout munging.
"""
def build_request(url):
params = {
'oauth_version': "1.0",
'oauth_nonce': oauth2.generate_nonce(),
'oauth_timestamp': int(time.time())
}
consumer = oauth2.Consumer(key=config.ECHO_NEST_CONSUMER_KEY, secret=config.ECHO_NEST_SHARED_SECRET)
params['oauth_consumer_key'] = config.ECHO_NEST_CONSUMER_KEY
req = oauth2.Request(method='GET', url=url, parameters=params)
signature_method = oauth2.SignatureMethod_HMAC_SHA1()
req.sign_request(signature_method, consumer, None)
return req
param_dict['api_key'] = config.ECHO_NEST_API_KEY
param_list = []
if not socket_timeout:
socket_timeout = config.CALL_TIMEOUT
for key,val in param_dict.iteritems():
if isinstance(val, list):
param_list.extend( [(key,subval) for subval in val] )
elif val is not None:
if isinstance(val, unicode):
val = val.encode('utf-8')
param_list.append( (key,val) )
params = urllib.urlencode(param_list)
orig_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(socket_timeout)
"""
just a normal GET call
"""
url = 'http://%s/%s/%s/%s?%s' % (config.API_HOST, config.API_SELECTOR, config.API_VERSION,
method, params)
req = build_request(url)
f = opener.open(req.to_url())
socket.setdefaulttimeout(orig_timeout)
# try/except
response_dict = get_successful_response(f)
return response_dict
def postChunked(host, selector, fields, files):
"""
Attempt to replace postMultipart() with nearly-identical interface.
(The files tuple no longer requires the filename, and we only return
the response body.)
Uses the urllib2_file.py originally from
http://fabien.seisen.org which was also drawn heavily from
http://code.activestate.com/recipes/146306/ .
This urllib2_file.py is more desirable because of the chunked
uploading from a file pointer (no need to read entire file into
memory) and the ability to work from behind a proxy (due to its
basis on urllib2).
"""
params = urllib.urlencode(fields)
url = 'http://%s%s?%s' % (host, selector, params)
u = urllib2.urlopen(url, files)
result = u.read()
[fp.close() for (key, fp) in files]
return result
def fix(x):
# we need this to fix up all the dict keys to be strings, not unicode objects
assert(isinstance(x,dict))
return dict((str(k), v) for (k,v) in x.iteritems())
def map_idspace(input_idspace):
if input_idspace == 'spotify-WW' or input_idspace == 'spotifyv2-ZZ':
return 'spotify'
return input_idspace
|
|
# Copyright 2019 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for voiceover services."""
from __future__ import annotations
from core import feconf
from core.constants import constants
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import opportunity_services
from core.domain import question_services
from core.domain import rights_manager
from core.domain import story_domain
from core.domain import story_services
from core.domain import topic_domain
from core.domain import topic_services
from core.domain import user_services
from core.domain import voiceover_services
from core.platform import models
from core.tests import test_utils
(suggestion_models,) = models.Registry.import_models([models.NAMES.suggestion])
class VoiceoverApplicationServicesUnitTests(test_utils.GenericTestBase):
"""Provides testing of the voiceover services."""
APPLICANT_USERNAME = 'applicant'
APPLICANT_EMAIL = 'applicant@example.com'
def setUp(self):
super(VoiceoverApplicationServicesUnitTests, self).setUp()
self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.APPLICANT_EMAIL, self.APPLICANT_USERNAME)
self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.applicant_id = self.get_user_id_from_email(self.APPLICANT_EMAIL)
self.applicant = user_services.get_user_actions_info(self.applicant_id)
self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME])
self.admin = user_services.get_user_actions_info(self.admin_id)
self.TOPIC_ID = 'topic'
self.STORY_ID = 'story'
self.USER_ID = 'user'
self.SKILL_ID = 'skill'
self.QUESTION_ID = question_services.get_new_question_id()
explorations = [self.save_new_valid_exploration(
'%s' % i,
self.owner_id,
title='title %d' % i,
category='category%d' % i,
end_state_name='End State',
correctness_feedback_enabled=True
) for i in range(2)]
for exp in explorations:
self.publish_exploration(self.owner_id, exp.id)
topic = topic_domain.Topic.create_default_topic(
self.TOPIC_ID, 'topic', 'abbrev', 'description')
topic.thumbnail_filename = 'thumbnail.svg'
topic.thumbnail_bg_color = '#C6DCDA'
topic.subtopics = [
topic_domain.Subtopic(
1, 'Title', ['skill_id_1'], 'image.svg',
constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0], 21131,
'dummy-subtopic-three')]
topic.next_subtopic_id = 2
topic_services.save_new_topic(self.owner_id, topic)
topic_services.publish_topic(self.TOPIC_ID, self.admin_id)
story = story_domain.Story.create_default_story(
self.STORY_ID, 'A story', 'Description', self.TOPIC_ID,
'a-story')
story_services.save_new_story(self.owner_id, story)
topic_services.add_canonical_story(
self.owner_id, self.TOPIC_ID, self.STORY_ID)
topic_services.publish_story(
self.TOPIC_ID, self.STORY_ID, self.admin_id)
story_services.update_story(
self.owner_id, self.STORY_ID, [story_domain.StoryChange({
'cmd': 'add_story_node',
'node_id': 'node_1',
'title': 'Node1',
}), story_domain.StoryChange({
'cmd': 'update_story_node_property',
'property_name': 'exploration_id',
'node_id': 'node_1',
'old_value': None,
'new_value': '0'
})], 'Changes.')
self.add_user_role(
self.CURRICULUM_ADMIN_USERNAME, feconf.ROLE_ID_VOICEOVER_ADMIN)
def test_voiceover_application_creation(self):
user_voiceover_applications = (
voiceover_services.get_user_submitted_voiceover_applications(
self.applicant_id))
self.assertEqual(user_voiceover_applications, [])
voiceover_services.create_new_voiceover_application(
feconf.ENTITY_TYPE_EXPLORATION, '0', 'en', '',
'audio_file.mp3', self.applicant_id)
user_voiceover_applications = (
voiceover_services.get_user_submitted_voiceover_applications(
self.applicant_id))
self.assertEqual(len(user_voiceover_applications), 1)
self.assertEqual(user_voiceover_applications[0].target_id, '0')
def test_get_voiceover_application_from_model_with_invalid_type_raise_error(
self):
suggestion_models.GeneralVoiceoverApplicationModel(
id='application_id',
target_type='exploration',
target_id='0',
status='review',
author_id='author_id',
final_reviewer_id=None,
language_code='en',
filename='filename.mp3',
content='<p>content</p>',
rejection_message=None).put()
voiceover_application_model = (
suggestion_models.GeneralVoiceoverApplicationModel.get_by_id(
'application_id'))
voiceover_application_model.target_type = 'invalid_type'
voiceover_application_model.update_timestamps()
voiceover_application_model.put()
with self.assertRaisesRegexp(
Exception,
'Invalid target type for voiceover application: invalid_type'):
voiceover_services.get_voiceover_application_by_id('application_id')
def test_newly_created_voiceover_application_have_in_review_status(self):
user_voiceover_applications = (
voiceover_services.get_user_submitted_voiceover_applications(
self.applicant_id))
self.assertEqual(user_voiceover_applications, [])
voiceover_services.create_new_voiceover_application(
feconf.ENTITY_TYPE_EXPLORATION, '0', 'en', '',
'audio_file.mp3', self.applicant_id)
user_voiceover_applications = (
voiceover_services.get_user_submitted_voiceover_applications(
self.applicant_id))
self.assertEqual(len(user_voiceover_applications), 1)
self.assertEqual(
user_voiceover_applications[0].status,
suggestion_models.STATUS_IN_REVIEW)
def test_get_reviewable_voiceover_applications(self):
voiceover_applications = (
voiceover_services.get_reviewable_voiceover_applications(
self.admin_id))
self.assertEqual(voiceover_applications, [])
voiceover_services.create_new_voiceover_application(
feconf.ENTITY_TYPE_EXPLORATION, '0', 'en', '',
'audio_file.mp3', self.applicant_id)
voiceover_applications = (
voiceover_services.get_reviewable_voiceover_applications(
self.admin_id))
self.assertEqual(len(voiceover_applications), 1)
self.assertEqual(
voiceover_applications[0].status,
suggestion_models.STATUS_IN_REVIEW)
def test_accept_application_assigns_role_to_entity(self):
voiceover_services.create_new_voiceover_application(
feconf.ENTITY_TYPE_EXPLORATION, '0', 'en', '',
'audio_file.mp3', self.applicant_id)
user_voiceover_applications = (
voiceover_services.get_user_submitted_voiceover_applications(
self.applicant_id))
self.assertEqual(len(user_voiceover_applications), 1)
self.assertEqual(
user_voiceover_applications[0].status,
suggestion_models.STATUS_IN_REVIEW)
voiceover_services.accept_voiceover_application(
user_voiceover_applications[0].voiceover_application_id,
self.admin_id)
user_voiceover_applications = (
voiceover_services.get_user_submitted_voiceover_applications(
self.applicant_id, status=suggestion_models.STATUS_ACCEPTED))
self.assertEqual(len(user_voiceover_applications), 1)
self.assertEqual(
user_voiceover_applications[0].status,
suggestion_models.STATUS_ACCEPTED)
exploration_rights = rights_manager.get_exploration_rights('0')
can_voiceover = rights_manager.check_can_voiceover_activity(
self.applicant, exploration_rights)
self.assertTrue(can_voiceover)
def test_accept_application_removes_exploration_voiceover_opportunity(self):
voiceover_services.create_new_voiceover_application(
feconf.ENTITY_TYPE_EXPLORATION, '0', 'en', '',
'audio_file.mp3', self.applicant_id)
user_voiceover_applications = (
voiceover_services.get_user_submitted_voiceover_applications(
self.applicant_id))
self.assertEqual(len(user_voiceover_applications), 1)
self.assertEqual(
user_voiceover_applications[0].status,
suggestion_models.STATUS_IN_REVIEW)
opportunities, _, more = (
opportunity_services.get_voiceover_opportunities('en', None))
self.assertEqual(len(opportunities), 1)
voiceover_services.accept_voiceover_application(
user_voiceover_applications[0].voiceover_application_id,
self.admin_id)
user_voiceover_applications = (
voiceover_services.get_user_submitted_voiceover_applications(
self.applicant_id, status=suggestion_models.STATUS_ACCEPTED))
self.assertEqual(len(user_voiceover_applications), 1)
self.assertEqual(
user_voiceover_applications[0].status,
suggestion_models.STATUS_ACCEPTED)
opportunities, _, more = (
opportunity_services.get_voiceover_opportunities('en', None))
self.assertEqual(len(opportunities), 0)
self.assertFalse(more)
def test_accept_application_removes_rejectes_other_similar_applications(
self):
voiceover_services.create_new_voiceover_application(
feconf.ENTITY_TYPE_EXPLORATION, '0', 'en', '',
'audio_file.mp3', self.applicant_id)
voiceover_services.create_new_voiceover_application(
feconf.ENTITY_TYPE_EXPLORATION, '0', 'en', '',
'audio_file.mp3', self.owner_id)
user_voiceover_applications = (
voiceover_services.get_user_submitted_voiceover_applications(
self.applicant_id))
self.assertEqual(len(user_voiceover_applications), 1)
self.assertEqual(
user_voiceover_applications[0].status,
suggestion_models.STATUS_IN_REVIEW)
user_voiceover_applications = (
voiceover_services.get_user_submitted_voiceover_applications(
self.owner_id))
self.assertEqual(len(user_voiceover_applications), 1)
self.assertEqual(
user_voiceover_applications[0].status,
suggestion_models.STATUS_IN_REVIEW)
user_voiceover_applications = (
voiceover_services.get_user_submitted_voiceover_applications(
self.applicant_id))
voiceover_services.accept_voiceover_application(
user_voiceover_applications[0].voiceover_application_id,
self.admin_id)
user_voiceover_applications = (
voiceover_services.get_user_submitted_voiceover_applications(
self.applicant_id, status=suggestion_models.STATUS_ACCEPTED))
self.assertEqual(len(user_voiceover_applications), 1)
self.assertEqual(
user_voiceover_applications[0].status,
suggestion_models.STATUS_ACCEPTED)
user_voiceover_applications = (
voiceover_services.get_user_submitted_voiceover_applications(
self.owner_id))
self.assertEqual(len(user_voiceover_applications), 1)
self.assertEqual(
user_voiceover_applications[0].status,
suggestion_models.STATUS_REJECTED)
self.assertEqual(
user_voiceover_applications[0].rejection_message,
'We have to reject your application as another application for the '
'same opportunity got accepted.')
def test_author_accepts_own_voiceover_application_raise_exception(self):
voiceover_services.create_new_voiceover_application(
feconf.ENTITY_TYPE_EXPLORATION, '0', 'en', '',
'audio_file.mp3', self.applicant_id)
user_voiceover_applications = (
voiceover_services.get_user_submitted_voiceover_applications(
self.applicant_id))
with self.assertRaisesRegexp(
Exception, 'Applicants are not allowed to review their own '
'voiceover application.'):
voiceover_services.accept_voiceover_application(
user_voiceover_applications[0].voiceover_application_id,
self.applicant_id)
def test_reject_voiceover_application(self):
voiceover_services.create_new_voiceover_application(
feconf.ENTITY_TYPE_EXPLORATION, '0', 'en', '',
'audio_file.mp3', self.applicant_id)
user_voiceover_applications = (
voiceover_services.get_user_submitted_voiceover_applications(
self.applicant_id))
self.assertEqual(len(user_voiceover_applications), 1)
self.assertEqual(
user_voiceover_applications[0].status,
suggestion_models.STATUS_IN_REVIEW)
opportunities, _, _ = (
opportunity_services.get_voiceover_opportunities('en', None))
self.assertEqual(len(opportunities), 1)
voiceover_services.reject_voiceover_application(
user_voiceover_applications[0].voiceover_application_id,
self.admin_id, 'Rejection message')
user_voiceover_applications = (
voiceover_services.get_user_submitted_voiceover_applications(
self.applicant_id))
self.assertEqual(len(user_voiceover_applications), 1)
self.assertEqual(
user_voiceover_applications[0].status,
suggestion_models.STATUS_REJECTED)
opportunities, _, _ = (
opportunity_services.get_voiceover_opportunities('en', None))
self.assertEqual(len(opportunities), 1)
def test_author_rejects_own_voiceover_application_raise_exception(self):
voiceover_services.create_new_voiceover_application(
feconf.ENTITY_TYPE_EXPLORATION, '0', 'en', '',
'audio_file.mp3', self.applicant_id)
user_voiceover_applications = (
voiceover_services.get_user_submitted_voiceover_applications(
self.applicant_id))
with self.assertRaisesRegexp(
Exception, 'Applicants are not allowed to review their own '
'voiceover application.'):
voiceover_services.reject_voiceover_application(
user_voiceover_applications[0].voiceover_application_id,
self.applicant_id, 'Testing rejection')
def test_get_text_to_create_voiceover_application(self):
exp_services.update_exploration(
self.owner_id, '0', [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': (
exp_domain.STATE_PROPERTY_CONTENT),
'state_name': 'Introduction',
'new_value': {
'content_id': 'content',
'html': '<p>The new content to voiceover</p>'
}
})], 'Adds new content to init state')
content = voiceover_services.get_text_to_create_voiceover_application(
feconf.ENTITY_TYPE_EXPLORATION, '0', 'en')
self.assertEqual(content, '<p>The new content to voiceover</p>')
def test_get_text_to_create_voiceover_application_in_diff_language(self):
exp_services.update_exploration(
self.owner_id, '0', [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': (
exp_domain.STATE_PROPERTY_CONTENT),
'state_name': 'Introduction',
'new_value': {
'content_id': 'content',
'html': '<p>The new content to voiceover</p>'
}
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION,
'state_name': 'Introduction',
'content_id': 'content',
'language_code': 'hi',
'content_html': '<p>The new content to voiceover</p>',
'translation_html': '<p>Translation in Hindi</p>',
'data_format': 'html'
})], 'Adds new content to init state and its translation')
content = voiceover_services.get_text_to_create_voiceover_application(
feconf.ENTITY_TYPE_EXPLORATION, '0', 'hi')
self.assertEqual(content, '<p>Translation in Hindi</p>')
def test_get_text_to_create_voiceover_application_for_invalid_type(self):
with self.assertRaisesRegexp(
Exception, 'Invalid target type: invalid_type'):
voiceover_services.get_text_to_create_voiceover_application(
'invalid_type', '0', 'hi')
|
|
# encoding: utf-8
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.http import HttpResponseRedirect, Http404, HttpResponse
from django.core.urlresolvers import reverse
from django.template import RequestContext
from django.views.generic import ListView
from django.utils.datastructures import MultiValueDictKeyError
from django import forms
from django.core.urlresolvers import reverse
from twython import Twython, TwythonAuthError, TwythonError
import datetime as date
from oauth_hook import OAuthHook
import requests
from urlparse import parse_qs
import json
from pytz import timezone
from news.models import *
from news.utils import from4id, get_absolute_uri, format_email
from settings import FACEBOOK_APP_SECRET, FACEBOOK_APP_ID, MESSAGE_FORMAT
import logging
logger = logging.getLogger(__file__)
class NewsForm(forms.Form):
class Meta:
model = News
emailto = default_email_to
class NewsList(ListView):
#context_object_name="news_list"
template_name = 'news/index.html'
def get_queryset(self):
q1 = News.objects.filter(publishid__isnull=True).order_by('-modifed')
q2 = News.objects.filter(publishid__isnull=False)
return q1 | q2
def index(request, **kwargs):
form = NewsForm()
unpublished = News.objects.filter(publishid__isnull=True).order_by('-modifed')
return render_to_response('news/index.html',
{'form': form, 'news_list' : unpublished },
context_instance=RequestContext(request))
def edit(request, **kwargs):
notice = None
if 'mid' in kwargs:
mid = kwargs['mid']
else:
mid = None
if request.method == 'POST':
tofacebook = False
twitter = False
email = False
try:
subject = request.POST['subject']
message = request.POST['message']
creator = request.POST['name']
toemail = request.POST['emailto']
email_message = request.POST['email']
if 'tofacebook' in request.POST:
tofacebook = True
if 'totwitter' in request.POST:
twitter = True
id = request.POST['newsid']
except MultiValueDictKeyError as error:
return new(request, **kwargs)
try:
news = News.objects.get(id__exact=int(id))
except (News.DoesNotExist, ValueError) as error:
news = News(subject=subject, message=message, creator=creator,
toemail=toemail, totwitter=twitter,
tofacebook = tofacebook)
try:
news.full_clean()
except ValidationError:
kwargs['error'] = 'Unvalid xml detected!!!!'
kwargs['form'] = form
return new(request, **kwargs)
news.save()
#return HttpResponseRedirect(reverse('new', news.id,
# kwargs={'message' : message}))
#return render_to_response('news/new.html',{'form': news,
# 'message' : notice},
# context_instance=RequestContext(request))
kwargs['mid'] = news.id
return new(request, **kwargs)
if 'delete' in request.POST:
news.delete()
return redirect('list')
if 'unpublish' in request.POST:
news.unpublish()
return new(request, **kwargs)
news.subject = subject
news.message = message.strip()
news.creator = creator
news.tofacebook = tofacebook
news.totwitter = twitter
news.toemail = toemail
try:
news.full_clean()
except ValidationError:
kwargs['error'] = 'Unvalid xml detected!!!!'
kwargs['form'] = news
return new(request, **kwargs)
news.save()
notice = 'Saved successfully!\n'
if 'publish' in request.POST:
news.publish(email_message=email_message)
kwargs['message'] = notice
kwargs['form'] = news
return new(request, **kwargs)
if mid:
news = get_object_or_404(News, id__exact=mid)
return new(request, **kwargs)
else:
return redirect(new)
def new(request, **kwargs):
if 'mid' in kwargs:
mid = kwargs['mid']
if 'log' not in kwargs:
kwargs['log'] = Logs.objects.filter(news_id__exact=mid).order_by(
'-date').all()
if 'form' not in kwargs:
kwargs['form'] = get_object_or_404(News, id__exact=mid)
if 'form' not in kwargs:
kwargs['form'] = NewsForm()
return render_to_response('news/new.html', kwargs,
context_instance=RequestContext(request))
def FBRenewToken(request, **kwargs):
# https://developers.facebook.com/docs/authentication/applications/
# https://developers.facebook.com/docs/authentication/
url = "https://www.facebook.com/dialog/oauth?client_id=%s" % FACEBOOK_APP_ID
url += "&scope=publish_actions&redirect_uri=%stoken/&response_type=token" % (
get_absolute_uri(request))
return redirect(url)
def FBGetToken(request, **kwargs):
token = ''
if request.method == 'GET' and 'access_token' in request.GET:
token = request.GET['access_token']
if request.method == 'POST' and 'token' in request.POST and 'expires' in request.POST:
token = request.POST['token']
expires = request.POST['expires']
userid_req = requests.get("https://graph.facebook.com/me?access_token=%s" % token)
#print("%s" % userid_req.text)
userid_json = json.loads(userid_req.text)
if 'id' in userid_json:
userid = userid_json['id']
else:
return render_to_response('news/facebook.html',
{'token' : token, 'expires' : expires,
'message' : _("Failed to add Facebook token!")},
context_instance=RequestContext(request))
addFBToken(token, expires, userid)
return render_to_response('news/facebook.html',
{'token' : token, 'expires' : expires,
'message': 'Added successfully!'},
context_instance=RequestContext(request))
else:
expires = datetime.now(tz=timezone(TIME_ZONE)) + date.timedelta(days=60)
if 'expires_in' in request.GET:
expires = datetime.now(tz=timezone(TIME_ZONE)) + date.timedelta(seconds=request.GET['expires_in'])
return render_to_response('news/facebook.html', {'token' : token,
'expires' : expires},
context_instance=RequestContext(request))
def getTwitterToken(request, **kwargs):
twitter = Twython(
twitter_token = TWITTER_CONSUMER_KEY,
twitter_secret = TWITTER_CONSUMER_SECRET,
callback_url = request.build_absolute_uri(
reverse('news.views.add_twitter_token'))
)
try:
auth_props = twitter.get_authentication_tokens()
except TwythonAuthError as e:
kwargs['error'] = e
return render_to_response('news/twitter_error.html', kwargs,
context_instance=RequestContext(request))
request.session['request_token'] = auth_props
return HttpResponseRedirect(auth_props['auth_url'])
def add_twitter_token(request, **kwargs):
if request.method == 'GET':
twitter = Twython(
twitter_token = TWITTER_CONSUMER_KEY,
twitter_secret = TWITTER_CONSUMER_SECRET,
oauth_token = request.session['request_token']['oauth_token'],
oauth_token_secret = request.session['request_token']['oauth_token_secret'],
)
oauth_verifier = request.GET['oauth_verifier']
try:
authorized_tokens = twitter.get_authorized_tokens(oauth_verifier)
if 'oauth_token' not in authorized_tokens:
raise TwythonError(authorized_tokens)
oauth_token = authorized_tokens['oauth_token']
oauth_secret = authorized_tokens['oauth_token_secret']
tw = addTwitterToken(token=oauth_token, secret=oauth_secret)
except TwythonError as e:
kwargs['error'] = e
return render_to_response('news/twitter_error.html', kwargs,
context_instance=RequestContext(request))
return render_to_response('news/twitter.html', {},
context_instance=RequestContext(request))
else:
raise Http404
def message_json(request, **kwargs):
if request.method != 'POST':
raise Http404
if 'message' not in request.POST or 'subject' not in request.POST:
raise Http404
creator = ""
if 'creator' in request.POST:
creator = request.POST['creator']
message = format_email({'message': request.POST['message'],
'subject': request.POST['subject'], 'creator' : creator})
return HttpResponse(message, mimetype="text/plain")
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""A Relay implementation of graph packing."""
from tvm import relay
from tvm.relay import op, transform
from tvm.relay import ExprMutator
def run_opt_pass(expr, opt_pass):
"""Exectue a relay pass."""
assert isinstance(opt_pass, transform.Pass)
mod = relay.Module.from_expr(expr)
mod = opt_pass(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def _to_shape(shape):
return tuple(int(sh) for sh in shape)
def _pack_batch_channel(data, dshape, bfactor, cfactor):
"""Pack the data channel dimension.
"""
assert int(dshape[0]) % bfactor == 0
assert int(dshape[1]) % cfactor == 0
data = op.reshape(data,
newshape=(int(dshape[0]) // bfactor, bfactor,
int(dshape[1]) // cfactor, cfactor,
int(dshape[2]), int(dshape[3])))
data = op.transpose(
data, axes=(0, 2, 4, 5, 1, 3))
return data
def _unpack_batch_channel(data, old_shape):
"""Unpack the data channel dimension.
"""
data = op.transpose(data, axes=(0, 4, 1, 5, 2, 3))
data = op.reshape(data, newshape=old_shape)
return data
def _pack_weight(data, dshape, cfactor):
"""Pack the weight into packed format.
"""
assert len(dshape) == 4
assert int(dshape[0]) % cfactor == 0
assert int(dshape[1]) % cfactor == 0
data = op.reshape(data,
newshape=(int(dshape[0]) // cfactor, cfactor,
int(dshape[1]) // cfactor, cfactor,
int(dshape[2]), int(dshape[3])))
data = op.transpose(
data, axes=(0, 2, 4, 5, 1, 3))
return data
def _pack_weight_conv2d_transpose(data, dshape, cfactor):
"""Pack the weight into packed format.
"""
dshape = _to_shape(dshape)
assert len(dshape) == 4
assert dshape[0] % cfactor == 0
assert dshape[1] % cfactor == 0
data = op.reshape(data,
newshape=(dshape[0] // cfactor, cfactor,
dshape[1] // cfactor, cfactor,
dshape[2], dshape[3]))
data = op.transpose(
data, axes=(2, 0, 4, 5, 3, 1))
return data
def _pack_const(data, dshape, dtype, bfactor, cfactor):
"""Pack a constant parameter.
"""
dshape = _to_shape(dshape)
assert len(dshape) == 3
assert dshape[0] % cfactor == 0
data = op.reshape(data,
newshape=(dshape[0] // cfactor,
cfactor, dshape[1],
dshape[2], 1))
data = op.transpose(
data, axes=(0, 2, 3, 4, 1))
# broadcast batch dimension to bfactor
data = op.broadcast_to(
data,
shape=(dshape[0] // cfactor, dshape[1], dshape[2], bfactor, cfactor))
return data
def _get_shape(node):
"""Get the shape of a node.
"""
return _to_shape(node.checked_type.shape)
class ExprPack(ExprMutator):
"""Visitor to perform graph packing on an AST.
"""
def __init__(self, bfactor, cfactor, weight_bits):
self.bfactor = bfactor
self.cfactor = cfactor
self.weight_bits = weight_bits
self.start_pack = False
# Cache Operator the algorithm matches against.
self.bitpack_start = op.op.get('annotation.bitpack_start')
self.bitpack_end = op.op.get('annotation.bitpack_end')
self.conv2d = op.op.get("nn.conv2d")
self.conv2d_transpose = op.op.get("nn.conv2d_transpose")
self.add = op.op.get("add")
self.multiply = op.op.get("multiply")
self.bias_add = op.op.get("nn.bias_add")
self.number_of_conv2d = 0
super().__init__()
def visit_call(self, call):
""" Visit the children. """
# First visit the children.
oshape = _get_shape(call)
odtype = call.checked_type.dtype
input_types = [arg.checked_type for arg in call.args]
args = [self.visit(arg) for arg in call.args]
# Start and stop cases.
if call.op == self.bitpack_start:
assert not self.start_pack
self.start_pack = True
return _pack_batch_channel(args[0], oshape, self.bfactor, self.cfactor)
elif call.op == self.bitpack_end:
if self.start_pack:
self.start_pack = False
data = args[0]
data_shape = _get_shape(call.args[0])
return _unpack_batch_channel(data, data_shape)
else:
pass
if self.start_pack:
# Operator cases
if call.op == self.conv2d and odtype == 'int32':
self.number_of_conv2d += 1
assert 8 % self.weight_bits == 0
w_lanes = 8 // self.weight_bits
data_layout = "NCHW%dn%dc" % (self.bfactor, self.cfactor)
kernel_layout = "OIHW%do%di" % (self.cfactor, self.cfactor)
data, weight = args
data_shape = _to_shape(input_types[0].shape)
kernel_shape = _to_shape(input_types[1].shape)
kernel = _pack_weight(weight, kernel_shape, self.cfactor)
# insert bit packing when necessary
if w_lanes != 1:
assert 8 % w_lanes == 0
kernel = op.bitpack(kernel, lanes=w_lanes)
conv2d = op.nn.conv2d(
data,
kernel,
strides=call.attrs.strides,
padding=call.attrs.padding,
dilation=call.attrs.dilation,
groups=call.attrs.groups,
channels=call.attrs.channels,
kernel_size=call.attrs.kernel_size,
data_layout=data_layout,
kernel_layout=kernel_layout,
out_dtype=call.attrs.out_dtype)
return conv2d
elif call.op == self.conv2d_transpose and odtype == 'int32':
self.number_of_conv2d += 1
assert 8 % self.weight_bits == 0
w_lanes = 8 // self.weight_bits
if self.start_pack:
data_layout = "NCHW%dn%dc" % (self.bfactor, self.cfactor)
kernel_layout = "IOHW%di%do" % (self.cfactor, self.cfactor)
data, weight = args
data_shape = _to_shape(input_types[0].shape)
kernel_shape = _to_shape(input_types[1].shape)
kernel = _pack_weight_conv2d_transpose(weight, kernel_shape, self.cfactor)
conv2d = op.nn.conv2d_transpose(
data,
kernel,
strides=call.attrs.strides,
padding=call.attrs.padding,
dilation=call.attrs.dilation,
groups=call.attrs.groups,
channels=call.attrs.channels,
kernel_size=call.attrs.kernel_size,
data_layout=data_layout,
kernel_layout=kernel_layout,
output_padding=call.attrs.output_padding,
out_dtype=call.attrs.out_dtype)
return conv2d
elif call.op == self.add and \
tuple(input_types[0].shape) == tuple(input_types[1].shape):
pass
elif call.op == self.add and len(input_types[1].shape) == 3:
data, const = args
const = _pack_const(const,
_to_shape(input_types[1].shape),
input_types[1].dtype,
self.bfactor,
self.cfactor)
return relay.Call(self.add, [data, const])
elif call.op == self.multiply and \
tuple(input_types[0].shape) == tuple(input_types[1].shape):
pass
elif call.op == self.multiply and len(input_types[1].shape) == 3:
data, const = args
const = _pack_const(const,
_to_shape(input_types[1].shape),
input_types[1].dtype,
self.bfactor,
self.cfactor)
return relay.Call(self.multiply, [data, const])
elif self.start_pack and call.op == self.bias_add:
data, bias = args
bias = _pack_const(bias,
_to_shape(input_types[1].shape),
input_types[1].dtype,
self.bfactor,
self.cfactor)
return relay.Call(self.add, [data, bias])
elif self.start_pack and call.op == op.op.get('cast') and \
input_types[0].dtype == 'int32':
cast = relay.Call(op.op.get('cast'), [args[0]], call.attrs)
return relay.Call(op.op.get('copy'), [cast])
return relay.Call(
self.visit(call.op),
args,
call.attrs)
class BT(Exception):
pass
def get_subgraph(expr, start_name, stop_name):
""" We assume stop_name only appears once for simplicity.
This constraint will be lifted in the future.
bitpack_start and bitpack_end are both inclusive.
"""
bitpack_start = op.op.get('annotation.bitpack_start')
bitpack_end = op.op.get('annotation.bitpack_end')
anf = run_opt_pass(expr, transform.ToANormalForm())
def _recursion(anf, start_found, stop_found):
""" Helper to obtain the subgraph.
"""
if isinstance(anf, relay.expr.Function):
return relay.expr.Function(anf.params,
_recursion(anf.body, start_found, stop_found),
anf.ret_type, anf.type_params, anf.attrs)
elif isinstance(anf, relay.expr.Let):
value = anf.value
if isinstance(value, relay.expr.Call):
if isinstance(value.op, relay.op.Op):
if value.op.name == start_name and not start_found:
value = relay.expr.Call(bitpack_start, [value])
start_found = True
elif value.op.name == stop_name:
raise BT()
try:
return relay.expr.Let(anf.var, value, _recursion(anf.body, start_found, stop_found))
except BT:
assert start_found
assert not stop_found
stop_found = True
value = relay.expr.Call(bitpack_end, [value])
# todo: check anf.body has no more stop_name beside that one
return relay.expr.Let(anf.var, value, anf.body)
else:
assert start_found
assert stop_found
return anf
annotated = _recursion(anf, False, False)
return run_opt_pass(annotated, transform.ToGraphNormalForm())
def graph_pack(expr,
bfactor,
cfactor,
weight_bits,
start_name="nn.max_pool2d",
stop_name="nn.global_avg_pool2d"):
"""Pack the graph into batch&channel packed format.
Parameters
----------
expr : relay.Expr
The input program.
bfactor : int
The packing factor in batch
cfactor : int
The packing factor in channel
weight_bits: int
The bit-width of the weights.
start_name: str, optional
Start packing from certain known node.
stop_name: str, optional
Stop packing from certain known node.
Returns
-------
expr : Expr
The transformed expression.
"""
assert isinstance(expr, relay.Function)
expr = get_subgraph(expr, start_name, stop_name)
expr = run_opt_pass(expr, transform.InferType())
packer = ExprPack(
bfactor, cfactor,
weight_bits)
expr = packer.visit(expr)
assert not packer.start_pack
return run_opt_pass(expr, transform.InferType())
|
|
# sqlalchemy/pool.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Connection pooling for DB-API connections.
Provides a number of connection pool implementations for a variety of
usage scenarios and thread behavior requirements imposed by the
application, DB-API or database itself.
Also provides a DB-API 2.0 connection proxying mechanism allowing
regular DB-API connect() methods to be transparently managed by a
SQLAlchemy connection pool.
"""
import time
import traceback
import weakref
from . import exc, log, event, interfaces, util
from .util import queue as sqla_queue
from .util import threading, memoized_property, \
chop_traceback
from collections import deque
proxies = {}
def manage(module, **params):
"""Return a proxy for a DB-API module that automatically
pools connections.
Given a DB-API 2.0 module and pool management parameters, returns
a proxy for the module that will automatically pool connections,
creating new connection pools for each distinct set of connection
arguments sent to the decorated module's connect() function.
:param module: a DB-API 2.0 database module
:param poolclass: the class used by the pool module to provide
pooling. Defaults to :class:`.QueuePool`.
:param \*\*params: will be passed through to *poolclass*
"""
try:
return proxies[module]
except KeyError:
return proxies.setdefault(module, _DBProxy(module, **params))
def clear_managers():
"""Remove all current DB-API 2.0 managers.
All pools and connections are disposed.
"""
for manager in proxies.values():
manager.close()
proxies.clear()
reset_rollback = util.symbol('reset_rollback')
reset_commit = util.symbol('reset_commit')
reset_none = util.symbol('reset_none')
class _ConnDialect(object):
"""partial implementation of :class:`.Dialect`
which provides DBAPI connection methods.
When a :class:`.Pool` is combined with an :class:`.Engine`,
the :class:`.Engine` replaces this with its own
:class:`.Dialect`.
"""
def do_rollback(self, dbapi_connection):
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
dbapi_connection.commit()
def do_close(self, dbapi_connection):
dbapi_connection.close()
class Pool(log.Identified):
"""Abstract base class for connection pools."""
_dialect = _ConnDialect()
def __init__(self,
creator, recycle=-1, echo=None,
use_threadlocal=False,
logging_name=None,
reset_on_return=True,
listeners=None,
events=None,
_dispatch=None,
_dialect=None):
"""
Construct a Pool.
:param creator: a callable function that returns a DB-API
connection object. The function will be called with
parameters.
:param recycle: If set to non -1, number of seconds between
connection recycling, which means upon checkout, if this
timeout is surpassed the connection will be closed and
replaced with a newly opened connection. Defaults to -1.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param echo: If True, connections being pulled and retrieved
from the pool will be logged to the standard output, as well
as pool sizing information. Echoing can also be achieved by
enabling logging for the "sqlalchemy.pool"
namespace. Defaults to False.
:param use_threadlocal: If set to True, repeated calls to
:meth:`connect` within the same application thread will be
guaranteed to return the same connection object, if one has
already been retrieved from the pool and has not been
returned yet. Offers a slight performance advantage at the
cost of individual transactions by default. The
:meth:`.Pool.unique_connection` method is provided to return
a consistenty unique connection to bypass this behavior
when the flag is set.
.. warning:: The :paramref:`.Pool.use_threadlocal` flag
**does not affect the behavior** of :meth:`.Engine.connect`.
:meth:`.Engine.connect` makes use of the
:meth:`.Pool.unique_connection` method which **does not use thread
local context**. To produce a :class:`.Connection` which refers
to the :meth:`.Pool.connect` method, use
:meth:`.Engine.contextual_connect`.
Note that other SQLAlchemy connectivity systems such as
:meth:`.Engine.execute` as well as the orm
:class:`.Session` make use of
:meth:`.Engine.contextual_connect` internally, so these functions
are compatible with the :paramref:`.Pool.use_threadlocal` setting.
.. seealso::
:ref:`threadlocal_strategy` - contains detail on the
"threadlocal" engine strategy, which provides a more comprehensive
approach to "threadlocal" connectivity for the specific
use case of using :class:`.Engine` and :class:`.Connection` objects
directly.
:param reset_on_return: Determine steps to take on
connections as they are returned to the pool.
reset_on_return can have any of these values:
* ``"rollback"`` - call rollback() on the connection,
to release locks and transaction resources.
This is the default value. The vast majority
of use cases should leave this value set.
* ``True`` - same as 'rollback', this is here for
backwards compatibility.
* ``"commit"`` - call commit() on the connection,
to release locks and transaction resources.
A commit here may be desirable for databases that
cache query plans if a commit is emitted,
such as Microsoft SQL Server. However, this
value is more dangerous than 'rollback' because
any data changes present on the transaction
are committed unconditionally.
* ``None`` - don't do anything on the connection.
This setting should only be made on a database
that has no transaction support at all,
namely MySQL MyISAM. By not doing anything,
performance can be improved. This
setting should **never be selected** for a
database that supports transactions,
as it will lead to deadlocks and stale
state.
* ``False`` - same as None, this is here for
backwards compatibility.
.. versionchanged:: 0.7.6
:paramref:`.Pool.reset_on_return` accepts ``"rollback"``
and ``"commit"`` arguments.
:param events: a list of 2-tuples, each of the form
``(callable, target)`` which will be passed to :func:`.event.listen`
upon construction. Provided here so that event listeners
can be assigned via :func:`.create_engine` before dialect-level
listeners are applied.
:param listeners: Deprecated. A list of
:class:`~sqlalchemy.interfaces.PoolListener`-like objects or
dictionaries of callables that receive events when DB-API
connections are created, checked out and checked in to the
pool. This has been superseded by
:func:`~sqlalchemy.event.listen`.
"""
if logging_name:
self.logging_name = self._orig_logging_name = logging_name
else:
self._orig_logging_name = None
log.instance_logger(self, echoflag=echo)
self._threadconns = threading.local()
self._creator = creator
self._recycle = recycle
self._invalidate_time = 0
self._use_threadlocal = use_threadlocal
if reset_on_return in ('rollback', True, reset_rollback):
self._reset_on_return = reset_rollback
elif reset_on_return in (None, False, reset_none):
self._reset_on_return = reset_none
elif reset_on_return in ('commit', reset_commit):
self._reset_on_return = reset_commit
else:
raise exc.ArgumentError(
"Invalid value for 'reset_on_return': %r"
% reset_on_return)
self.echo = echo
if _dispatch:
self.dispatch._update(_dispatch, only_propagate=False)
if _dialect:
self._dialect = _dialect
if events:
for fn, target in events:
event.listen(self, target, fn)
if listeners:
util.warn_deprecated(
"The 'listeners' argument to Pool (and "
"create_engine()) is deprecated. Use event.listen().")
for l in listeners:
self.add_listener(l)
def _close_connection(self, connection):
self.logger.debug("Closing connection %r", connection)
try:
self._dialect.do_close(connection)
except (SystemExit, KeyboardInterrupt):
raise
except:
self.logger.error("Exception closing connection %r",
connection, exc_info=True)
@util.deprecated(
2.7, "Pool.add_listener is deprecated. Use event.listen()")
def add_listener(self, listener):
"""Add a :class:`.PoolListener`-like object to this pool.
``listener`` may be an object that implements some or all of
PoolListener, or a dictionary of callables containing implementations
of some or all of the named methods in PoolListener.
"""
interfaces.PoolListener._adapt_listener(self, listener)
def unique_connection(self):
"""Produce a DBAPI connection that is not referenced by any
thread-local context.
This method is equivalent to :meth:`.Pool.connect` when the
:paramref:`.Pool.use_threadlocal` flag is not set to True.
When :paramref:`.Pool.use_threadlocal` is True, the
:meth:`.Pool.unique_connection` method provides a means of bypassing
the threadlocal context.
"""
return _ConnectionFairy._checkout(self)
def _create_connection(self):
"""Called by subclasses to create a new ConnectionRecord."""
return _ConnectionRecord(self)
def _invalidate(self, connection, exception=None):
"""Mark all connections established within the generation
of the given connection as invalidated.
If this pool's last invalidate time is before when the given
connection was created, update the timestamp til now. Otherwise,
no action is performed.
Connections with a start time prior to this pool's invalidation
time will be recycled upon next checkout.
"""
rec = getattr(connection, "_connection_record", None)
if not rec or self._invalidate_time < rec.starttime:
self._invalidate_time = time.time()
if getattr(connection, 'is_valid', False):
connection.invalidate(exception)
def recreate(self):
"""Return a new :class:`.Pool`, of the same class as this one
and configured with identical creation arguments.
This method is used in conjunction with :meth:`dispose`
to close out an entire :class:`.Pool` and create a new one in
its place.
"""
raise NotImplementedError()
def dispose(self):
"""Dispose of this pool.
This method leaves the possibility of checked-out connections
remaining open, as it only affects connections that are
idle in the pool.
See also the :meth:`Pool.recreate` method.
"""
raise NotImplementedError()
def connect(self):
"""Return a DBAPI connection from the pool.
The connection is instrumented such that when its
``close()`` method is called, the connection will be returned to
the pool.
"""
if not self._use_threadlocal:
return _ConnectionFairy._checkout(self)
try:
rec = self._threadconns.current()
except AttributeError:
pass
else:
if rec is not None:
return rec._checkout_existing()
return _ConnectionFairy._checkout(self, self._threadconns)
def _return_conn(self, record):
"""Given a _ConnectionRecord, return it to the :class:`.Pool`.
This method is called when an instrumented DBAPI connection
has its ``close()`` method called.
"""
if self._use_threadlocal:
try:
del self._threadconns.current
except AttributeError:
pass
self._do_return_conn(record)
def _do_get(self):
"""Implementation for :meth:`get`, supplied by subclasses."""
raise NotImplementedError()
def _do_return_conn(self, conn):
"""Implementation for :meth:`return_conn`, supplied by subclasses."""
raise NotImplementedError()
def status(self):
raise NotImplementedError()
class _ConnectionRecord(object):
"""Internal object which maintains an individual DBAPI connection
referenced by a :class:`.Pool`.
The :class:`._ConnectionRecord` object always exists for any particular
DBAPI connection whether or not that DBAPI connection has been
"checked out". This is in contrast to the :class:`._ConnectionFairy`
which is only a public facade to the DBAPI connection while it is checked
out.
A :class:`._ConnectionRecord` may exist for a span longer than that
of a single DBAPI connection. For example, if the
:meth:`._ConnectionRecord.invalidate`
method is called, the DBAPI connection associated with this
:class:`._ConnectionRecord`
will be discarded, but the :class:`._ConnectionRecord` may be used again,
in which case a new DBAPI connection is produced when the :class:`.Pool`
next uses this record.
The :class:`._ConnectionRecord` is delivered along with connection
pool events, including :meth:`.PoolEvents.connect` and
:meth:`.PoolEvents.checkout`, however :class:`._ConnectionRecord` still
remains an internal object whose API and internals may change.
.. seealso::
:class:`._ConnectionFairy`
"""
def __init__(self, pool):
self.__pool = pool
self.connection = self.__connect()
self.finalize_callback = deque()
pool.dispatch.first_connect.\
for_modify(pool.dispatch).\
exec_once(self.connection, self)
pool.dispatch.connect(self.connection, self)
connection = None
"""A reference to the actual DBAPI connection being tracked.
May be ``None`` if this :class:`._ConnectionRecord` has been marked
as invalidated; a new DBAPI connection may replace it if the owning
pool calls upon this :class:`._ConnectionRecord` to reconnect.
"""
@util.memoized_property
def info(self):
"""The ``.info`` dictionary associated with the DBAPI connection.
This dictionary is shared among the :attr:`._ConnectionFairy.info`
and :attr:`.Connection.info` accessors.
"""
return {}
@classmethod
def checkout(cls, pool):
rec = pool._do_get()
try:
dbapi_connection = rec.get_connection()
except:
rec.checkin()
raise
echo = pool._should_log_debug()
fairy = _ConnectionFairy(dbapi_connection, rec, echo)
rec.fairy_ref = weakref.ref(
fairy,
lambda ref: _finalize_fairy and
_finalize_fairy(
dbapi_connection,
rec, pool, ref, echo)
)
_refs.add(rec)
if echo:
pool.logger.debug("Connection %r checked out from pool",
dbapi_connection)
return fairy
def checkin(self):
self.fairy_ref = None
connection = self.connection
pool = self.__pool
while self.finalize_callback:
finalizer = self.finalize_callback.pop()
finalizer(connection)
if pool.dispatch.checkin:
pool.dispatch.checkin(connection, self)
pool._return_conn(self)
def close(self):
if self.connection is not None:
self.__close()
def invalidate(self, e=None):
"""Invalidate the DBAPI connection held by this :class:`._ConnectionRecord`.
This method is called for all connection invalidations, including
when the :meth:`._ConnectionFairy.invalidate` or
:meth:`.Connection.invalidate` methods are called, as well as when any
so-called "automatic invalidation" condition occurs.
.. seealso::
:ref:`pool_connection_invalidation`
"""
# already invalidated
if self.connection is None:
return
self.__pool.dispatch.invalidate(self.connection, self, e)
if e is not None:
self.__pool.logger.info(
"Invalidate connection %r (reason: %s:%s)",
self.connection, e.__class__.__name__, e)
else:
self.__pool.logger.info(
"Invalidate connection %r", self.connection)
self.__close()
self.connection = None
def get_connection(self):
recycle = False
if self.connection is None:
self.connection = self.__connect()
self.info.clear()
if self.__pool.dispatch.connect:
self.__pool.dispatch.connect(self.connection, self)
elif self.__pool._recycle > -1 and \
time.time() - self.starttime > self.__pool._recycle:
self.__pool.logger.info(
"Connection %r exceeded timeout; recycling",
self.connection)
recycle = True
elif self.__pool._invalidate_time > self.starttime:
self.__pool.logger.info(
"Connection %r invalidated due to pool invalidation; " +
"recycling",
self.connection
)
recycle = True
if recycle:
self.__close()
self.connection = self.__connect()
self.info.clear()
if self.__pool.dispatch.connect:
self.__pool.dispatch.connect(self.connection, self)
return self.connection
def __close(self):
self.__pool._close_connection(self.connection)
def __connect(self):
try:
self.starttime = time.time()
connection = self.__pool._creator()
self.__pool.logger.debug("Created new connection %r", connection)
return connection
except Exception as e:
self.__pool.logger.debug("Error on connect(): %s", e)
raise
def _finalize_fairy(connection, connection_record,
pool, ref, echo, fairy=None):
"""Cleanup for a :class:`._ConnectionFairy` whether or not it's already
been garbage collected.
"""
_refs.discard(connection_record)
if ref is not None and \
connection_record.fairy_ref is not ref:
return
if connection is not None:
if connection_record and echo:
pool.logger.debug("Connection %r being returned to pool",
connection)
try:
fairy = fairy or _ConnectionFairy(
connection, connection_record, echo)
assert fairy.connection is connection
fairy._reset(pool)
# Immediately close detached instances
if not connection_record:
pool._close_connection(connection)
except Exception as e:
pool.logger.error(
"Exception during reset or similar", exc_info=True)
if connection_record:
connection_record.invalidate(e=e)
if isinstance(e, (SystemExit, KeyboardInterrupt)):
raise
if connection_record:
connection_record.checkin()
_refs = set()
class _ConnectionFairy(object):
"""Proxies a DBAPI connection and provides return-on-dereference
support.
This is an internal object used by the :class:`.Pool` implementation
to provide context management to a DBAPI connection delivered by
that :class:`.Pool`.
The name "fairy" is inspired by the fact that the
:class:`._ConnectionFairy` object's lifespan is transitory, as it lasts
only for the length of a specific DBAPI connection being checked out from
the pool, and additionally that as a transparent proxy, it is mostly
invisible.
.. seealso::
:class:`._ConnectionRecord`
"""
def __init__(self, dbapi_connection, connection_record, echo):
self.connection = dbapi_connection
self._connection_record = connection_record
self._echo = echo
connection = None
"""A reference to the actual DBAPI connection being tracked."""
_connection_record = None
"""A reference to the :class:`._ConnectionRecord` object associated
with the DBAPI connection.
This is currently an internal accessor which is subject to change.
"""
_reset_agent = None
"""Refer to an object with a ``.commit()`` and ``.rollback()`` method;
if non-None, the "reset-on-return" feature will call upon this object
rather than directly against the dialect-level do_rollback() and
do_commit() methods.
In practice, a :class:`.Connection` assigns a :class:`.Transaction` object
to this variable when one is in scope so that the :class:`.Transaction`
takes the job of committing or rolling back on return if
:meth:`.Connection.close` is called while the :class:`.Transaction`
still exists.
This is essentially an "event handler" of sorts but is simplified as an
instance variable both for performance/simplicity as well as that there
can only be one "reset agent" at a time.
"""
@classmethod
def _checkout(cls, pool, threadconns=None, fairy=None):
if not fairy:
fairy = _ConnectionRecord.checkout(pool)
fairy._pool = pool
fairy._counter = 0
if threadconns is not None:
threadconns.current = weakref.ref(fairy)
if fairy.connection is None:
raise exc.InvalidRequestError("This connection is closed")
fairy._counter += 1
if not pool.dispatch.checkout or fairy._counter != 1:
return fairy
# Pool listeners can trigger a reconnection on checkout
attempts = 2
while attempts > 0:
try:
pool.dispatch.checkout(fairy.connection,
fairy._connection_record,
fairy)
return fairy
except exc.DisconnectionError as e:
pool.logger.info(
"Disconnection detected on checkout: %s", e)
fairy._connection_record.invalidate(e)
fairy.connection = fairy._connection_record.get_connection()
attempts -= 1
pool.logger.info("Reconnection attempts exhausted on checkout")
fairy.invalidate()
raise exc.InvalidRequestError("This connection is closed")
def _checkout_existing(self):
return _ConnectionFairy._checkout(self._pool, fairy=self)
def _checkin(self):
_finalize_fairy(self.connection, self._connection_record,
self._pool, None, self._echo, fairy=self)
self.connection = None
self._connection_record = None
_close = _checkin
def _reset(self, pool):
if pool.dispatch.reset:
pool.dispatch.reset(self, self._connection_record)
if pool._reset_on_return is reset_rollback:
if self._echo:
pool.logger.debug("Connection %s rollback-on-return%s",
self.connection,
", via agent"
if self._reset_agent else "")
if self._reset_agent:
self._reset_agent.rollback()
else:
pool._dialect.do_rollback(self)
elif pool._reset_on_return is reset_commit:
if self._echo:
pool.logger.debug("Connection %s commit-on-return%s",
self.connection,
", via agent"
if self._reset_agent else "")
if self._reset_agent:
self._reset_agent.commit()
else:
pool._dialect.do_commit(self)
@property
def _logger(self):
return self._pool.logger
@property
def is_valid(self):
"""Return True if this :class:`._ConnectionFairy` still refers
to an active DBAPI connection."""
return self.connection is not None
@util.memoized_property
def info(self):
"""Info dictionary associated with the underlying DBAPI connection
referred to by this :class:`.ConnectionFairy`, allowing user-defined
data to be associated with the connection.
The data here will follow along with the DBAPI connection including
after it is returned to the connection pool and used again
in subsequent instances of :class:`._ConnectionFairy`. It is shared
with the :attr:`._ConnectionRecord.info` and :attr:`.Connection.info`
accessors.
"""
return self._connection_record.info
def invalidate(self, e=None):
"""Mark this connection as invalidated.
This method can be called directly, and is also called as a result
of the :meth:`.Connection.invalidate` method. When invoked,
the DBAPI connection is immediately closed and discarded from
further use by the pool. The invalidation mechanism proceeds
via the :meth:`._ConnectionRecord.invalidate` internal method.
.. seealso::
:ref:`pool_connection_invalidation`
"""
if self.connection is None:
util.warn("Can't invalidate an already-closed connection.")
return
if self._connection_record:
self._connection_record.invalidate(e=e)
self.connection = None
self._checkin()
def cursor(self, *args, **kwargs):
"""Return a new DBAPI cursor for the underlying connection.
This method is a proxy for the ``connection.cursor()`` DBAPI
method.
"""
return self.connection.cursor(*args, **kwargs)
def __getattr__(self, key):
return getattr(self.connection, key)
def detach(self):
"""Separate this connection from its Pool.
This means that the connection will no longer be returned to the
pool when closed, and will instead be literally closed. The
containing ConnectionRecord is separated from the DB-API connection,
and will create a new connection when next used.
Note that any overall connection limiting constraints imposed by a
Pool implementation may be violated after a detach, as the detached
connection is removed from the pool's knowledge and control.
"""
if self._connection_record is not None:
_refs.remove(self._connection_record)
self._connection_record.fairy_ref = None
self._connection_record.connection = None
# TODO: should this be _return_conn?
self._pool._do_return_conn(self._connection_record)
self.info = self.info.copy()
self._connection_record = None
def close(self):
self._counter -= 1
if self._counter == 0:
self._checkin()
class SingletonThreadPool(Pool):
"""A Pool that maintains one connection per thread.
Maintains one connection per each thread, never moving a connection to a
thread other than the one which it was created in.
Options are the same as those of :class:`.Pool`, as well as:
:param pool_size: The number of threads in which to maintain connections
at once. Defaults to five.
:class:`.SingletonThreadPool` is used by the SQLite dialect
automatically when a memory-based database is used.
See :ref:`sqlite_toplevel`.
"""
def __init__(self, creator, pool_size=5, **kw):
kw['use_threadlocal'] = True
Pool.__init__(self, creator, **kw)
self._conn = threading.local()
self._all_conns = set()
self.size = pool_size
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(self._creator,
pool_size=self.size,
recycle=self._recycle,
echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
_dispatch=self.dispatch,
_dialect=self._dialect)
def dispose(self):
"""Dispose of this pool."""
for conn in self._all_conns:
try:
conn.close()
except (SystemExit, KeyboardInterrupt):
raise
except:
# pysqlite won't even let you close a conn from a thread
# that didn't create it
pass
self._all_conns.clear()
def _cleanup(self):
while len(self._all_conns) >= self.size:
c = self._all_conns.pop()
c.close()
def status(self):
return "SingletonThreadPool id:%d size: %d" % \
(id(self), len(self._all_conns))
def _do_return_conn(self, conn):
pass
def _do_get(self):
try:
c = self._conn.current()
if c:
return c
except AttributeError:
pass
c = self._create_connection()
self._conn.current = weakref.ref(c)
if len(self._all_conns) >= self.size:
self._cleanup()
self._all_conns.add(c)
return c
class QueuePool(Pool):
"""A :class:`.Pool` that imposes a limit on the number of open connections.
:class:`.QueuePool` is the default pooling implementation used for
all :class:`.Engine` objects, unless the SQLite dialect is in use.
"""
def __init__(self, creator, pool_size=5, max_overflow=10, timeout=30,
**kw):
"""
Construct a QueuePool.
:param creator: a callable function that returns a DB-API
connection object, same as that of :paramref:`.Pool.creator`.
:param pool_size: The size of the pool to be maintained,
defaults to 5. This is the largest number of connections that
will be kept persistently in the pool. Note that the pool
begins with no connections; once this number of connections
is requested, that number of connections will remain.
``pool_size`` can be set to 0 to indicate no size limit; to
disable pooling, use a :class:`~sqlalchemy.pool.NullPool`
instead.
:param max_overflow: The maximum overflow size of the
pool. When the number of checked-out connections reaches the
size set in pool_size, additional connections will be
returned up to this limit. When those additional connections
are returned to the pool, they are disconnected and
discarded. It follows then that the total number of
simultaneous connections the pool will allow is pool_size +
`max_overflow`, and the total number of "sleeping"
connections the pool will allow is pool_size. `max_overflow`
can be set to -1 to indicate no overflow limit; no limit
will be placed on the total number of concurrent
connections. Defaults to 10.
:param timeout: The number of seconds to wait before giving up
on returning a connection. Defaults to 30.
:param \**kw: Other keyword arguments including
:paramref:`.Pool.recycle`, :paramref:`.Pool.echo`,
:paramref:`.Pool.reset_on_return` and others are passed to the
:class:`.Pool` constructor.
"""
Pool.__init__(self, creator, **kw)
self._pool = sqla_queue.Queue(pool_size)
self._overflow = 0 - pool_size
self._max_overflow = max_overflow
self._timeout = timeout
self._overflow_lock = threading.Lock()
def _do_return_conn(self, conn):
try:
self._pool.put(conn, False)
except sqla_queue.Full:
try:
conn.close()
finally:
self._dec_overflow()
def _do_get(self):
use_overflow = self._max_overflow > -1
try:
wait = use_overflow and self._overflow >= self._max_overflow
return self._pool.get(wait, self._timeout)
except sqla_queue.Empty:
if use_overflow and self._overflow >= self._max_overflow:
if not wait:
return self._do_get()
else:
raise exc.TimeoutError(
"QueuePool limit of size %d overflow %d reached, "
"connection timed out, timeout %d" %
(self.size(), self.overflow(), self._timeout))
if self._inc_overflow():
try:
return self._create_connection()
except:
self._dec_overflow()
raise
else:
return self._do_get()
def _inc_overflow(self):
if self._max_overflow == -1:
self._overflow += 1
return True
with self._overflow_lock:
if self._overflow < self._max_overflow:
self._overflow += 1
return True
else:
return False
def _dec_overflow(self):
if self._max_overflow == -1:
self._overflow -= 1
return True
with self._overflow_lock:
self._overflow -= 1
return True
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(self._creator, pool_size=self._pool.maxsize,
max_overflow=self._max_overflow,
timeout=self._timeout,
recycle=self._recycle, echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
_dispatch=self.dispatch,
_dialect=self._dialect)
def dispose(self):
while True:
try:
conn = self._pool.get(False)
conn.close()
except sqla_queue.Empty:
break
self._overflow = 0 - self.size()
self.logger.info("Pool disposed. %s", self.status())
def status(self):
return "Pool size: %d Connections in pool: %d "\
"Current Overflow: %d Current Checked out "\
"connections: %d" % (self.size(),
self.checkedin(),
self.overflow(),
self.checkedout())
def size(self):
return self._pool.maxsize
def checkedin(self):
return self._pool.qsize()
def overflow(self):
return self._overflow
def checkedout(self):
return self._pool.maxsize - self._pool.qsize() + self._overflow
class NullPool(Pool):
"""A Pool which does not pool connections.
Instead it literally opens and closes the underlying DB-API connection
per each connection open/close.
Reconnect-related functions such as ``recycle`` and connection
invalidation are not supported by this Pool implementation, since
no connections are held persistently.
.. versionchanged:: 0.7
:class:`.NullPool` is used by the SQlite dialect automatically
when a file-based database is used. See :ref:`sqlite_toplevel`.
"""
def status(self):
return "NullPool"
def _do_return_conn(self, conn):
conn.close()
def _do_get(self):
return self._create_connection()
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(self._creator,
recycle=self._recycle,
echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
_dispatch=self.dispatch,
_dialect=self._dialect)
def dispose(self):
pass
class StaticPool(Pool):
"""A Pool of exactly one connection, used for all requests.
Reconnect-related functions such as ``recycle`` and connection
invalidation (which is also used to support auto-reconnect) are not
currently supported by this Pool implementation but may be implemented
in a future release.
"""
@memoized_property
def _conn(self):
return self._creator()
@memoized_property
def connection(self):
return _ConnectionRecord(self)
def status(self):
return "StaticPool"
def dispose(self):
if '_conn' in self.__dict__:
self._conn.close()
self._conn = None
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(creator=self._creator,
recycle=self._recycle,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
echo=self.echo,
logging_name=self._orig_logging_name,
_dispatch=self.dispatch,
_dialect=self._dialect)
def _create_connection(self):
return self._conn
def _do_return_conn(self, conn):
pass
def _do_get(self):
return self.connection
class AssertionPool(Pool):
"""A :class:`.Pool` that allows at most one checked out connection at
any given time.
This will raise an exception if more than one connection is checked out
at a time. Useful for debugging code that is using more connections
than desired.
.. versionchanged:: 0.7
:class:`.AssertionPool` also logs a traceback of where
the original connection was checked out, and reports
this in the assertion error raised.
"""
def __init__(self, *args, **kw):
self._conn = None
self._checked_out = False
self._store_traceback = kw.pop('store_traceback', True)
self._checkout_traceback = None
Pool.__init__(self, *args, **kw)
def status(self):
return "AssertionPool"
def _do_return_conn(self, conn):
if not self._checked_out:
raise AssertionError("connection is not checked out")
self._checked_out = False
assert conn is self._conn
def dispose(self):
self._checked_out = False
if self._conn:
self._conn.close()
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(self._creator, echo=self.echo,
logging_name=self._orig_logging_name,
_dispatch=self.dispatch,
_dialect=self._dialect)
def _do_get(self):
if self._checked_out:
if self._checkout_traceback:
suffix = ' at:\n%s' % ''.join(
chop_traceback(self._checkout_traceback))
else:
suffix = ''
raise AssertionError("connection is already checked out" + suffix)
if not self._conn:
self._conn = self._create_connection()
self._checked_out = True
if self._store_traceback:
self._checkout_traceback = traceback.format_stack()
return self._conn
class _DBProxy(object):
"""Layers connection pooling behavior on top of a standard DB-API module.
Proxies a DB-API 2.0 connect() call to a connection pool keyed to the
specific connect parameters. Other functions and attributes are delegated
to the underlying DB-API module.
"""
def __init__(self, module, poolclass=QueuePool, **kw):
"""Initializes a new proxy.
module
a DB-API 2.0 module
poolclass
a Pool class, defaulting to QueuePool
Other parameters are sent to the Pool object's constructor.
"""
self.module = module
self.kw = kw
self.poolclass = poolclass
self.pools = {}
self._create_pool_mutex = threading.Lock()
def close(self):
for key in list(self.pools):
del self.pools[key]
def __del__(self):
self.close()
def __getattr__(self, key):
return getattr(self.module, key)
def get_pool(self, *args, **kw):
key = self._serialize(*args, **kw)
try:
return self.pools[key]
except KeyError:
self._create_pool_mutex.acquire()
try:
if key not in self.pools:
kw.pop('sa_pool_key', None)
pool = self.poolclass(
lambda: self.module.connect(*args, **kw), **self.kw)
self.pools[key] = pool
return pool
else:
return self.pools[key]
finally:
self._create_pool_mutex.release()
def connect(self, *args, **kw):
"""Activate a connection to the database.
Connect to the database using this DBProxy's module and the given
connect arguments. If the arguments match an existing pool, the
connection will be returned from the pool's current thread-local
connection instance, or if there is no thread-local connection
instance it will be checked out from the set of pooled connections.
If the pool has no available connections and allows new connections
to be created, a new database connection will be made.
"""
return self.get_pool(*args, **kw).connect()
def dispose(self, *args, **kw):
"""Dispose the pool referenced by the given connect arguments."""
key = self._serialize(*args, **kw)
try:
del self.pools[key]
except KeyError:
pass
def _serialize(self, *args, **kw):
if "sa_pool_key" in kw:
return kw['sa_pool_key']
return tuple(
list(args) +
[(k, kw[k]) for k in sorted(kw)]
)
|
|
import discord
from discord.ext import commands
import json
import os
import sys
from modules import utilities
from utilities import checks
sys.path.insert(0, "..")
from units.files import create_folder
sys.path.pop(0)
def setup(bot):
bot.add_cog(Battlerite(bot))
class Battlerite(commands.Cog):
'''
Deprecated now that the Battlerite API is closed/shut down:
https://developer.battlerite.com/
https://twitter.com/Battlerite/status/1151092200106876933
'''
def __init__(self, bot):
self.bot = bot
self.mappings = {}
self.bot.loop.create_task(self.load_mappings_and_emoji(), name = "Load Battlerite mappings and emoji")
async def load_mappings_and_emoji(self):
await self.load_mappings()
await self.load_emoji()
async def load_emoji(self):
# TODO: Check only within Emoji Server emojis?
champions = filter(lambda m: m["Type"] == "Characters", self.mappings.values())
champions = set(c["Name"].lower().replace(' ', '_') for c in champions)
champions.discard("random_champion")
champions.discard("egg_bakko") # For Easter Event Egg Brawl
champions.discard("egg_raigon") # For Easter Event Egg Brawl
champions.discard("rabbit") # For Battlerite Royale
await self.bot.wait_until_ready()
for champion in champions:
setattr(self, champion + "_emoji", discord.utils.get(self.bot.emojis, name = "battlerite_" + champion) or "")
async def load_mappings(self):
create_folder(self.bot.data_path + "/battlerite")
if os.path.isfile(self.bot.data_path + "/battlerite/mappings.json"):
with open(self.bot.data_path + "/battlerite/mappings.json", 'r') as mappings_file:
self.mappings = json.load(mappings_file)
return
if not os.path.isfile(self.bot.data_path + "/battlerite/stackables.json"):
url = ("https://raw.githubusercontent.com/StunlockStudios/battlerite-assets/master/mappings/"
"67104/stackables.json")
async with self.bot.aiohttp_session.get(url) as resp:
data = await resp.content.read()
with open(self.bot.data_path + "/battlerite/stackables.json", "wb") as stackables_file:
stackables_file.write(data)
if not os.path.isfile(self.bot.data_path + "/battlerite/English.ini"):
url = ("https://raw.githubusercontent.com/StunlockStudios/battlerite-assets/master/mappings/"
"67104/Localization/English.ini")
async with self.bot.aiohttp_session.get(url) as resp:
data = await resp.content.read()
with open(self.bot.data_path + "/battlerite/English.ini", "wb") as localization_file:
localization_file.write(data)
with open(self.bot.data_path + "/battlerite/stackables.json", 'r') as stackables_file:
stackables = json.load(stackables_file)
localization = {}
with open(self.bot.data_path + "/battlerite/English.ini", 'r', encoding = "UTF-16") as localization_file:
for line in localization_file:
id_name = line.strip().split('=', maxsplit = 1)
localization[id_name[0]] = id_name[1]
self.mappings = {}
for item in stackables["Mappings"]:
name = localization.get(item.get("LocalizedName"), item["DevName"])
self.mappings[str(item["StackableId"])] = {"Name": name, "Type": item["StackableRangeName"]}
with open(self.bot.data_path + "/battlerite/mappings.json", 'w') as mappings_file:
json.dump(self.mappings, mappings_file, indent = 4)
async def cog_check(self, ctx):
return await checks.not_forbidden().predicate(ctx)
@commands.group(hidden = True,
invoke_without_command = True, case_insensitive = True)
async def battlerite(self, ctx):
'''
Battlerite
Deprecated now that the Battlerite API is closed/shut down:
https://developer.battlerite.com/
https://twitter.com/Battlerite/status/1151092200106876933
Was previously using revision 67104 mappings
'''
await ctx.send_help(ctx.command)
# TODO: Make converter?
async def get_player(self, player):
url = "https://api.developer.battlerite.com/shards/global/players"
headers = {"Authorization": self.bot.BATTLERITE_API_KEY, "Accept": "application/vnd.api+json"}
params = {"filter[playerNames]": player}
async with self.bot.aiohttp_session.get(url, headers = headers, params = params) as resp:
data = await resp.json()
# TODO: Raise and handle error if not found?
return next(iter(data["data"]), None)
# TODO: Handle missing Battlerite Arena stats
# TODO: Get values safely + handle division by zero
# TODO: Get values by type name
@battlerite.group(enabled = False, hidden = True,
invoke_without_command = True, case_insensitive = True)
async def player(self, ctx, player: str):
'''Player'''
data = await self.get_player(player)
if not data:
return await ctx.embed_reply(f"{ctx.bot.error_emoji} Error: Player not found")
stats = data["attributes"]["stats"]
'''
# Code to print/list mappings:
for stat, value in stats.items():
if stat in self.mappings:
print(f"{self.mappings[stat]['Name']} {self.mappings[stat]['Type']} ({stat}): {value}")
else:
print(f"Missing Mapping ({stat}): {value}")
'''
fields = (("Account Level", stats["26"]), ("Account XP", f"{stats['25']:,}"),
("Time Played", utilities.secs_to_letter_format(stats['8'], limit = 3600)),
("Wins", stats['2']), ("Losses", stats['3']),
("Winrate", f"{stats['2'] / (stats['2'] + stats['3']) * 100:.2f}%"),
("Ranked 2v2 Wins - Losses (Winrate)",
f"{stats['14']} - {stats['15']} ({stats['14'] / (stats['14'] + stats['15']) * 100:.2f}%)"),
("Ranked 3v3 Wins - Losses (Winrate)",
f"{stats['16']} - {stats['17']} ({stats['16'] / (stats['16'] + stats['17']) * 100:.2f}%)"),
("Casual 2v2 Wins - Losses (Winrate)",
f"{stats['10']} - {stats['11']} ({stats['10'] / (stats['10'] + stats['11']) * 100:.2f}%)"),
("Casual 3v3 Wins - Losses (Winrate)",
f"{stats['12']} - {stats['13']} ({stats['12'] / (stats['12'] + stats['13']) * 100:.2f}%)"))
await ctx.embed_reply(f"ID: {data['id']}", title = data["attributes"]["name"], fields = fields)
# TODO: Handle 25+ fields
# TODO: Dynamic champion commands?
# TODO: Battle Season Level and XP?
# Casual and Ranked 2v2 and 3v3 subcommands blocked by https://github.com/StunlockStudios/battlerite-assets/issues/8
@player.command(enabled = False, hidden = True, name = "brawl")
async def player_brawl(self, ctx, player: str):
'''Brawl'''
data = await self.get_player(player)
if not data:
return await ctx.embed_reply(f"{ctx.bot.error_emoji} Error: Player not found")
stats = data["attributes"]["stats"]
wins = stats.get("18", 0)
losses = stats.get("19", 0)
fields = [("Brawl Wins", wins), ("Brawl Losses", losses)]
if wins + losses:
fields.append(("Brawl Winrate", f"{wins / (wins + losses) * 100:.2f}%"))
await ctx.embed_reply(f"ID: {data['id']}", title = data["attributes"]["name"], fields = fields)
@player.group(enabled = False, hidden = True,
name = "casual", aliases = ["unranked"], case_insensitive = True)
async def player_casual(self, ctx, player: str):
'''Casual'''
data = await self.get_player(player)
if not data:
return await ctx.embed_reply(f"{ctx.bot.error_emoji} Error: Player not found")
stats = data["attributes"]["stats"]
wins_2v2 = stats.get("10", 0)
losses_2v2 = stats.get("11", 0)
wins_3v3 = stats.get("12", 0)
losses_3v3 = stats.get("13", 0)
fields = [("Casual 2v2 Wins", wins_2v2), ("Casual 2v2 Losses", losses_2v2)]
if wins_2v2 + losses_2v2:
fields.append(("Casual 2v2 Winrate", f"{wins_2v2 / (wins_2v2 + losses_2v2) * 100:.2f}%"))
elif wins_3v3 + losses_3v3:
fields.append(("Casual 2v2 Winrate", "N/A"))
fields.extend((("Casual 3v3 Wins", wins_3v3), ("Casual 3v3 Losses", losses_3v3)))
if wins_3v3 + losses_3v3:
fields.append(("Casual 3v3 Winrate", f"{wins_3v3 / (wins_3v3 + losses_3v3) * 100:.2f}%"))
elif wins_2v2 + losses_2v2:
fields.append(("Casual 3v3 Winrate", "N/A"))
await ctx.embed_reply(f"ID: {data['id']}", title = data["attributes"]["name"], fields = fields)
@player.command(enabled = False, hidden = True,
name = "levels", aliases = ["level", "xp", "exp", "experience"])
async def player_levels(self, ctx, player: str):
'''Levels'''
data = await self.get_player(player)
if not data:
return await ctx.embed_reply(f"{ctx.bot.error_emoji} Error: Player not found")
stats = data["attributes"]["stats"]
fields = [("Account Level", f"{stats['26']} ({stats['25']:,} XP)", False)]
levels = {}
xp = {}
for stat, value in stats.items():
if self.mappings.get(stat, {}).get("Type") == "Level":
levels[self.mappings[stat]["Name"]] = value
elif self.mappings.get(stat, {}).get("Type") == "XP":
xp[self.mappings[stat]["Name"]] = value
# levels.pop("Random Champion", None)
xp = sorted(xp.items(), key = lambda x: x[1], reverse = True)
for name, value in xp:
emoji = getattr(self, name.lower().replace(' ', '_') + "_emoji", "")
fields.append((f"{emoji} {name}", f"{levels[name]} ({value:,} XP)"))
await ctx.embed_reply(f"ID: {data['id']}", title = data["attributes"]["name"], fields = fields)
@player.group(enabled = False, hidden = True,
name = "ranked", aliases = ["comp", "competitive", "league"], case_insensitive = True)
async def player_ranked(self, ctx, player: str):
'''Ranked'''
data = await self.get_player(player)
if not data:
return await ctx.embed_reply(f"{ctx.bot.error_emoji} Error: Player not found")
stats = data["attributes"]["stats"]
wins_2v2 = stats.get("14", 0)
losses_2v2 = stats.get("15", 0)
wins_3v3 = stats.get("16", 0)
losses_3v3 = stats.get("17", 0)
fields = [("Ranked 2v2 Wins", wins_2v2), ("Ranked 2v2 Losses", losses_2v2)]
if wins_2v2 + losses_2v2:
fields.append(("Ranked 2v2 Winrate", f"{wins_2v2 / (wins_2v2 + losses_2v2) * 100:.2f}%"))
elif wins_3v3 + losses_3v3:
fields.append(("Ranked 2v2 Winrate", "N/A"))
fields.extend((("Ranked 3v3 Wins", wins_3v3), ("Ranked 3v3 Losses", losses_3v3)))
if wins_3v3 + losses_3v3:
fields.append(("Ranked 3v3 Winrate", f"{wins_3v3 / (wins_3v3 + losses_3v3) * 100:.2f}%"))
elif wins_2v2 + losses_2v2:
fields.append(("Ranked 3v3 Winrate", "N/A"))
await ctx.embed_reply(f"ID: {data['id']}", title = data["attributes"]["name"], fields = fields)
@player.command(enabled = False, hidden = True,
name = "time", aliases = ["played"])
async def player_time(self, ctx, player: str):
'''Time Played'''
data = await self.get_player(player)
if not data:
return await ctx.embed_reply(f"{ctx.bot.error_emoji} Error: Player not found")
stats = data["attributes"]["stats"]
fields = [("Total Time Played", utilities.secs_to_letter_format(stats['8'], limit = 3600), False)]
time_played = {}
for stat, value in stats.items():
if self.mappings.get(stat, {}).get("Type") == "CharacterTimePlayed":
time_played[self.mappings[stat]["Name"]] = value
time_played.pop("Random Champion", None)
time_played = sorted(time_played.items(), key = lambda x: x[1], reverse = True)
for name, value in time_played:
emoji = getattr(self, name.lower().replace(' ', '_') + "_emoji", "")
fields.append((f"{emoji} {name}", utilities.secs_to_letter_format(value, limit = 3600)))
await ctx.embed_reply(f"ID: {data['id']}", title = data["attributes"]["name"], fields = fields)
@player.command(enabled = False, hidden = True,
name = "wins", aliases = ["losses"])
async def player_wins(self, ctx, player: str):
'''Wins/Losses'''
data = await self.get_player(player)
if not data:
return await ctx.embed_reply(f"{ctx.bot.error_emoji} Error: Player not found")
stats = data["attributes"]["stats"]
field_value = f"{stats['2']} - {stats['3']} ({stats['2'] / (stats['2'] + stats['3']) * 100:.2f}%)"
# TODO: Handle division by 0
fields = [("Total Wins - Losses (Winrate)", field_value, False)]
wins = {}
losses = {}
for stat, value in stats.items():
if self.mappings.get(stat, {}).get("Type") == "CharacterWins":
wins[self.mappings[stat]["Name"]] = value
elif self.mappings.get(stat, {}).get("Type") == "CharacterLosses":
losses[self.mappings[stat]["Name"]] = value
wins = sorted(wins.items(), key = lambda x: losses.get(x[0], 0) + x[1], reverse = True)
# TODO: Handle character with losses and no wins
for name, value in wins:
emoji = getattr(self, name.lower().replace(' ', '_') + "_emoji", "")
field_value = f"{value} - {losses.get(name, 0)} ({value / (value + losses.get(name, 0)) * 100:.2f}%)"
# TODO: Handle division by 0
fields.append((f"{emoji} {name}", field_value))
await ctx.embed_reply(f"ID: {data['id']}", title = data["attributes"]["name"], fields = fields)
@battlerite.group(enabled = False, hidden = True,
invoke_without_command = True, case_insensitive = True)
async def royale(self, ctx):
'''Battlerite Royale'''
await ctx.send_help(ctx.command)
@royale.group(enabled = False, hidden = True,
name = "player", invoke_without_command = True, case_insensitive = True)
async def royale_player(self, ctx, player: str):
'''Player'''
data = await self.get_player(player)
if not data:
return await ctx.embed_reply(f"{ctx.bot.error_emoji} Error: Player not found")
stats = data["attributes"]["stats"]
fields = []
level_id = discord.utils.find(lambda m: m[1]["Type"] == "RoyaleAccountLevel", self.mappings.items())[0]
if level_id in stats:
fields.append(("Account Level", stats[level_id]))
xp_id = discord.utils.find(lambda m: m[1]["Type"] == "RoyaleAccountXP", self.mappings.items())[0]
if xp_id in stats:
fields.append(("Account XP", f"{stats[xp_id]:,}"))
time_id = discord.utils.find(lambda m: m[1]["Type"] == "RoyaleTimePlayed", self.mappings.items())[0]
if time_id in stats:
fields.append(("Time Played", utilities.secs_to_letter_format(stats[time_id], limit = 3600)))
await ctx.embed_reply(f"ID: {data['id']}", title = data["attributes"]["name"], fields = fields)
@royale_player.command(enabled = False, hidden = True,
name = "levels", aliases = ["level", "xp", "exp", "experience"])
async def royale_player_levels(self, ctx, player: str):
'''Levels'''
data = await self.get_player(player)
if not data:
return await ctx.embed_reply(f"{ctx.bot.error_emoji} Error: Player not found")
stats = data["attributes"]["stats"]
fields = []
account_level_id = discord.utils.find(lambda m: m[1]["Type"] == "RoyaleAccountLevel", self.mappings.items())[0]
account_xp_id = discord.utils.find(lambda m: m[1]["Type"] == "RoyaleAccountXP", self.mappings.items())[0]
if account_level_id in stats and account_xp_id in stats:
fields.append(("Account Level", f"{stats[account_level_id]} ({stats[account_xp_id]:,} XP)", False))
levels = {}
xp = {}
for stat, value in stats.items():
if self.mappings.get(stat, {}).get("Type") == "RoyaleChampionLevel":
levels[self.mappings[stat]["Name"]] = value
elif self.mappings.get(stat, {}).get("Type") == "RoyaleChampionXP":
xp[self.mappings[stat]["Name"]] = value
xp = sorted(xp.items(), key = lambda x: x[1], reverse = True)
for name, value in xp:
emoji = getattr(self, name.lower().replace(' ', '_') + "_emoji", "")
fields.append((f"{emoji} {name}", f"{levels[name]} ({value:,} XP)"))
await ctx.embed_reply(f"ID: {data['id']}", title = data["attributes"]["name"], fields = fields)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ReduceJoin op from string_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
def _input_array(num_dims):
"""Creates an ndarray where each element is the binary of its linear index.
Args:
num_dims: The number of dimensions to create.
Returns:
An ndarray of shape [2] * num_dims.
"""
formatter = "{:0%db}" % num_dims
strings = [formatter.format(i) for i in xrange(2**num_dims)]
return np.array(strings, dtype="S%d" % num_dims).reshape([2] * num_dims)
def _joined_array(num_dims, reduce_dim):
"""Creates an ndarray with the result from reduce_join on input_array.
Args:
num_dims: The number of dimensions of the original input array.
reduce_dim: The dimension to reduce.
Returns:
An ndarray of shape [2] * (num_dims - 1).
"""
formatter = "{:0%db}" % (num_dims - 1)
result = np.zeros(shape=[2] * (num_dims - 1), dtype="S%d" % (2 * num_dims))
flat = result.ravel()
for i in xrange(2**(num_dims - 1)):
dims = formatter.format(i)
flat[i] = "".join([(dims[:reduce_dim] + "%d" + dims[reduce_dim:]) % j
for j in xrange(2)])
return result
class UnicodeTestCase(test.TestCase):
"""Test case with Python3-compatible string comparator."""
def assertAllEqualUnicode(self, truth, actual):
self.assertAllEqual(
np.array(truth).astype("U"), np.array(actual).astype("U"))
class ReduceJoinTestHelperTest(UnicodeTestCase):
"""Tests for helper functions."""
def testInputArray(self):
num_dims = 3
truth = ["{:03b}".format(i) for i in xrange(2**num_dims)]
output_array = _input_array(num_dims).reshape([-1])
self.assertAllEqualUnicode(truth, output_array)
def testJoinedArray(self):
num_dims = 3
truth_dim_zero = [["000100", "001101"], ["010110", "011111"]]
truth_dim_one = [["000010", "001011"], ["100110", "101111"]]
truth_dim_two = [["000001", "010011"], ["100101", "110111"]]
output_array_dim_zero = _joined_array(num_dims, reduce_dim=0)
output_array_dim_one = _joined_array(num_dims, reduce_dim=1)
output_array_dim_two = _joined_array(num_dims, reduce_dim=2)
self.assertAllEqualUnicode(truth_dim_zero, output_array_dim_zero)
self.assertAllEqualUnicode(truth_dim_one, output_array_dim_one)
self.assertAllEqualUnicode(truth_dim_two, output_array_dim_two)
class ReduceJoinTest(UnicodeTestCase):
def _testReduceJoin(self,
input_array,
truth,
truth_shape,
reduction_indices,
keep_dims=False,
separator=""):
"""Compares the output of reduce_join to an expected result.
Args:
input_array: The string input to be joined.
truth: An array or np.array of the expected result.
truth_shape: An array or np.array of the expected shape.
reduction_indices: The indices to reduce over.
keep_dims: Whether or not to retain reduced dimensions.
separator: The separator to use for joining.
"""
with self.test_session():
output = string_ops.reduce_join(
inputs=input_array,
reduction_indices=reduction_indices,
keep_dims=keep_dims,
separator=separator)
output_array = output.eval()
self.assertAllEqualUnicode(truth, output_array)
self.assertAllEqual(truth_shape, output.get_shape())
def _testMultipleReduceJoin(self,
input_array,
reduction_indices,
separator=" "):
"""Tests reduce_join for one input and multiple reduction_indices.
Does so by comparing the output to that from nested reduce_string_joins.
The correctness of single-dimension reduce_join is verified by other
tests below using _testReduceJoin.
Args:
input_array: The input to test.
reduction_indices: The indices to reduce.
separator: The separator to use when joining.
"""
num_dims = len(input_array.shape)
truth_red_indices = reduction_indices or list(reversed(xrange(num_dims)))
with self.test_session():
output = string_ops.reduce_join(
inputs=input_array,
reduction_indices=reduction_indices,
keep_dims=False,
separator=separator)
output_keep_dims = string_ops.reduce_join(
inputs=input_array,
reduction_indices=reduction_indices,
keep_dims=True,
separator=separator)
truth = input_array
for index in truth_red_indices:
truth = string_ops.reduce_join(
inputs=truth,
reduction_indices=index,
keep_dims=True,
separator=separator)
truth_squeezed = array_ops.squeeze(truth, squeeze_dims=truth_red_indices)
output_array = output.eval()
output_keep_dims_array = output_keep_dims.eval()
truth_array = truth.eval()
truth_squeezed_array = truth_squeezed.eval()
self.assertAllEqualUnicode(truth_array, output_keep_dims_array)
self.assertAllEqualUnicode(truth_squeezed_array, output_array)
self.assertAllEqual(truth.get_shape(), output_keep_dims.get_shape())
self.assertAllEqual(truth_squeezed.get_shape(), output.get_shape())
def testRankOne(self):
input_array = ["this", "is", "a", "test"]
truth = "thisisatest"
truth_shape = []
self._testReduceJoin(input_array, truth, truth_shape, reduction_indices=0)
def testRankTwo(self):
input_array = [["this", "is", "a", "test"],
["please", "do", "not", "panic"]]
truth_dim_zero = ["thisplease", "isdo", "anot", "testpanic"]
truth_shape_dim_zero = [4]
truth_dim_one = ["thisisatest", "pleasedonotpanic"]
truth_shape_dim_one = [2]
self._testReduceJoin(
input_array, truth_dim_zero, truth_shape_dim_zero, reduction_indices=0)
self._testReduceJoin(
input_array, truth_dim_one, truth_shape_dim_one, reduction_indices=1)
def testRankFive(self):
input_array = _input_array(num_dims=5)
truths = [_joined_array(num_dims=5, reduce_dim=i) for i in xrange(5)]
truth_shape = [2] * 4
for i in xrange(5):
self._testReduceJoin(
input_array, truths[i], truth_shape, reduction_indices=i)
def testNegative(self):
input_array = _input_array(num_dims=5)
truths = [_joined_array(num_dims=5, reduce_dim=i) for i in xrange(5)]
truth_shape = [2] * 4
for i in xrange(5):
self._testReduceJoin(
input_array, truths[i], truth_shape, reduction_indices=i - 5)
def testSingletonDimension(self):
input_arrays = [
_input_array(num_dims=5).reshape([2] * i + [1] + [2] * (5 - i))
for i in xrange(6)
]
truth = _input_array(num_dims=5)
truth_shape = [2] * 5
for i in xrange(6):
self._testReduceJoin(
input_arrays[i], truth, truth_shape, reduction_indices=i)
def testSeparator(self):
input_array = [["this", "is", "a", "test"],
["please", "do", "not", "panic"]]
truth_dim_zero = ["this please", "is do", "a not", "test panic"]
truth_shape_dim_zero = [4]
truth_dim_one = ["this is a test", "please do not panic"]
truth_shape_dim_one = [2]
self._testReduceJoin(
input_array,
truth_dim_zero,
truth_shape_dim_zero,
reduction_indices=0,
separator=" ")
self._testReduceJoin(
input_array,
truth_dim_one,
truth_shape_dim_one,
reduction_indices=1,
separator=" ")
def testUnknownShape(self):
input_array = [["a"], ["b"]]
truth = ["ab"]
truth_shape = None
with self.test_session():
placeholder = array_ops.placeholder(dtypes.string, name="placeholder")
reduced = string_ops.reduce_join(placeholder, reduction_indices=0)
output_array = reduced.eval(feed_dict={placeholder.name: input_array})
self.assertAllEqualUnicode(truth, output_array)
self.assertAllEqual(truth_shape, reduced.get_shape())
def testUnknownIndices(self):
input_array = [["this", "is", "a", "test"],
["please", "do", "not", "panic"]]
truth_dim_zero = ["thisplease", "isdo", "anot", "testpanic"]
truth_dim_one = ["thisisatest", "pleasedonotpanic"]
truth_shape = None
with self.test_session():
placeholder = array_ops.placeholder(dtypes.int32, name="placeholder")
reduced = string_ops.reduce_join(
input_array, reduction_indices=placeholder)
output_array_dim_zero = reduced.eval(feed_dict={placeholder.name: [0]})
output_array_dim_one = reduced.eval(feed_dict={placeholder.name: [1]})
self.assertAllEqualUnicode(truth_dim_zero, output_array_dim_zero)
self.assertAllEqualUnicode(truth_dim_one, output_array_dim_one)
self.assertAllEqual(truth_shape, reduced.get_shape())
def testKeepDims(self):
input_array = [["this", "is", "a", "test"],
["please", "do", "not", "panic"]]
truth_dim_zero = [["thisplease", "isdo", "anot", "testpanic"]]
truth_shape_dim_zero = [1, 4]
truth_dim_one = [["thisisatest"], ["pleasedonotpanic"]]
truth_shape_dim_one = [2, 1]
self._testReduceJoin(
input_array,
truth_dim_zero,
truth_shape_dim_zero,
reduction_indices=0,
keep_dims=True)
self._testReduceJoin(
input_array,
truth_dim_one,
truth_shape_dim_one,
reduction_indices=1,
keep_dims=True)
def testMultiIndex(self):
num_dims = 3
input_array = _input_array(num_dims=num_dims)
# Also tests [].
for i in xrange(num_dims + 1):
for permutation in itertools.permutations(xrange(num_dims), i):
self._testMultipleReduceJoin(input_array, reduction_indices=permutation)
def testInvalidReductionIndices(self):
with self.test_session():
with self.assertRaisesRegexp(ValueError, "Invalid reduction dim"):
string_ops.reduce_join(inputs="", reduction_indices=0)
with self.assertRaisesRegexp(ValueError,
"Invalid reduction dimension -3"):
string_ops.reduce_join(inputs=[[""]], reduction_indices=-3)
with self.assertRaisesRegexp(ValueError, "Invalid reduction dimension 2"):
string_ops.reduce_join(inputs=[[""]], reduction_indices=2)
with self.assertRaisesRegexp(ValueError,
"Invalid reduction dimension -3"):
string_ops.reduce_join(inputs=[[""]], reduction_indices=[0, -3])
with self.assertRaisesRegexp(ValueError, "Invalid reduction dimension 2"):
string_ops.reduce_join(inputs=[[""]], reduction_indices=[0, 2])
with self.assertRaisesRegexp(ValueError, "Duplicate reduction index 0"):
string_ops.reduce_join(inputs=[[""]], reduction_indices=[0, 0])
def testZeroDims(self):
valid_truth_shape = [0]
with self.test_session():
inputs = np.zeros([0, 1], dtype=str)
with self.assertRaisesRegexp(ValueError, "dimension 0 with size 0"):
string_ops.reduce_join(inputs=inputs, reduction_indices=0)
valid = string_ops.reduce_join(inputs=inputs, reduction_indices=1)
valid_array_shape = valid.eval().shape
self.assertAllEqualUnicode(valid_truth_shape, valid_array_shape)
def testInvalidArgsUnknownShape(self):
with self.test_session():
placeholder = array_ops.placeholder(dtypes.string, name="placeholder")
index_too_high = string_ops.reduce_join(placeholder, reduction_indices=1)
duplicate_index = string_ops.reduce_join(
placeholder, reduction_indices=[-1, 1])
with self.assertRaisesOpError("Invalid reduction dimension 1"):
index_too_high.eval(feed_dict={placeholder.name: [""]})
with self.assertRaisesOpError("Duplicate reduction dimension 1"):
duplicate_index.eval(feed_dict={placeholder.name: [[""]]})
def testInvalidArgsUnknownIndices(self):
with self.test_session():
placeholder = array_ops.placeholder(dtypes.int32, name="placeholder")
reduced = string_ops.reduce_join(
["test", "test2"], reduction_indices=placeholder)
with self.assertRaisesOpError("reduction dimension -2"):
reduced.eval(feed_dict={placeholder.name: -2})
with self.assertRaisesOpError("reduction dimension 2"):
reduced.eval(feed_dict={placeholder.name: 2})
if __name__ == "__main__":
test.main()
|
|
import numpy as np
import pandas as pd
import json
from tl_algs import tl_alg
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import euclidean_distances
def _kmeans_cluster(test_set_X, train_pool_X, train_pool_y, cluster_factor,
rand_seed):
"""
Partition dataset into clusters using k-means. The number of clusters is
the number of training and test instances divided by the cluster factor.
Args:
test_set_X: DataFrame representing feature matrix for test set.
train_pool_X: DataFrame respresenting feature matrix for training set.
train_pool_y: Series representing label vector for training set.
cluster_factor: Ratio of instances to clusters. For example, a cluster
factor of 100 yields approximately 100 instances per cluster.
rand_seed: Random seed passed to sklearn.cluster.KMeans.
Returns:
List of dictionaries with keys 'X_train', a DataFrame of training
instances; 'y_train', a Series of labels for X_train; and 'X_test', a
DataFrame of test instances. This list has the property that its ith
entry is the ith cluster of training and test instances.
"""
master_X_df = train_pool_X.append(test_set_X)
num_clust = master_X_df.shape[0] / cluster_factor
kmeans = KMeans(n_clusters=num_clust, random_state=rand_seed)
cluster_model = kmeans.fit(master_X_df)
clusters = [{
'X_train': pd.DataFrame(),
'y_train': pd.Series(),
'X_test': pd.DataFrame(),
'y_test': pd.Series()
} for i in range(num_clust)]
# Populate clusters based on test data.
X_test_clusters = cluster_model.predict(test_set_X)
for i, clust in enumerate(X_test_clusters):
clusters[clust]['X_test'] = clusters[clust]['X_test']. \
append(test_set_X.iloc[i, ])
# Populate clusters based on training data.
X_train_clusters = cluster_model.predict(train_pool_X)
for i, clust in enumerate(X_train_clusters):
clusters[clust]['X_train'] = clusters[clust]['X_train']. \
append(train_pool_X.iloc[i, ])
clusters[clust]['y_train'] = clusters[clust]['y_train'] \
.append(pd.Series([train_pool_y.iloc[i]]))
# Remove clusters with no test instance.
to_remove = [
i for (i, d) in enumerate(clusters)
if (d['X_test'].shape[0] == 0 or d['X_train'].shape[0] == 0)
]
to_remove.reverse()
for i in to_remove:
del clusters[i]
return clusters
class Burak(tl_alg.Base_Transfer):
"""
Implements Burak's algorithm [1]. First, the training set is filtered using
k-nearest neighbors, so that for each test instance, the k unique nearest
training instances by Euclidean distance are retained. Next, the classifier
is trained on the filtered training instances. Since computing the distance
between every test instance and every training instance is computationally
expensive, this implementation clusters training instances using k-means
and applies Burak's algorithm within each cluster [2].
[1] Burak Turhan, Tim Menzies, Ayse B. Bener, and Justin Di Stefano. 2009.
"On the Relative Value of Cross-Company and Within-Company Data for Defect
Prediction." Empirical Software Engineering 14, 5, 540-78.
[2] Fayola Peters, Tim Menzies, and Andrian Marcus. 2013. "Better Cross-
Company Defect Prediction. IEEE International Working Conference on Mining
Software Repositories, 409-18.
Attributes:
k: The number of unique nearest training instances to retain for each
target test instance. Following [1], the default value is 10.
cluster_factor: The ratio of instances to clusters for k-means. For
example, a cluster factor of 100 yields approximately 100 instances
per cluster.
"""
def __init__(self, test_set_X, test_set_domain, train_pool_X, train_pool_y,
train_pool_domain, Base_Classifier, classifier_params={},
rand_seed=None, k=10, cluster_factor=100):
super(Burak, self).__init__(
test_set_X,
test_set_domain,
train_pool_X,
train_pool_y,
train_pool_domain,
Base_Classifier,
rand_seed=rand_seed,
classifier_params=classifier_params
)
self.k = k
self.cluster_factor = cluster_factor
def filter_instances(self, train_pool_X, train_pool_y, X_test, k):
"""
For each test instance, retain the k unique nearest training instances
by Euclidean distance.
Args:
train_pool_X: DataFrame representing training set feature matrix.
train_pool_y: Series representing training set label vector.
X_test: DataFrame representing test set feature matrix.
k: For each test instance, the number of unique nearest training
instances to retain.
Returns:
train_X: Feature matrix for filtered training set.
train_y: Label vector for filtered training set.
"""
filtered_X = pd.DataFrame()
filtered_y = []
working_X = train_pool_X.reset_index(drop=True)
working_y = list(train_pool_y)
# for each instance in the the test set
for (__, row) in X_test.iterrows():
# find distances to all instances in training pool
distances = euclidean_distances([row], train_pool_X)[0]
# get indexes of closest instances
sorted_distance_indexes = [index for index, __ in sorted(enumerate(distances), key=lambda x:x[1])]
# Add top k closest instances to output
for i in sorted_distance_indexes[:k]:
if i not in filtered_X.index:
filtered_X = filtered_X.append(working_X.iloc[i,:])
filtered_y.append(working_y[i])
return filtered_X, pd.Series(filtered_y)
def burak_filter(self, test_set_X, test_set_domain, train_pool_X,
train_pool_y, train_pool_domain, Base_Classifier, k=10,
rand_seed=None, classifier_params={}):
"""
Train classifier on filtered training data and return class predictions
and predicted-class probabilities. See class documentation for more
information on the form of this method's arguments.
Returns:
confidence: List of class-prediction probabilities, the ith entry
of which gives the confidence for the ith prediction.
predictions: List of class predictions.
"""
X_filtered, y_filtered = self.filter_instances(
train_pool_X,
train_pool_y,
test_set_X,
k
)
classifier = Base_Classifier(
random_state=rand_seed,
**classifier_params
)
f = classifier.fit(X_filtered, y_filtered)
confidence = [a[1] for a in f.predict_proba(test_set_X)]
predictions = f.predict(test_set_X)
return confidence, predictions
def batch_burak_filter(self, test_set_X, test_set_domain, train_pool_X,
train_pool_y, train_pool_domain, Base_Classifier,
k=10, rand_seed=None, cluster_factor=100,
classifier_params={}):
"""
Train classifier on filtered training data using the k-means heuristic
and return class predictions and class-prediction probabilities. See
class documentation for more information on the form of this method's
arguments.
Returns:
confidence: List of class-prediction probabilities, the ith entry
of which gives the confidence for the ith prediction.
predictions: List of class predictions.
"""
clusters = _kmeans_cluster(
test_set_X,
train_pool_X,
train_pool_y,
cluster_factor,
rand_seed
)
X_train_filtered = pd.DataFrame()
y_train_filtered = pd.Series()
# Apply Burak filter within each cluster.
for d in clusters:
more_X_train, more_y_train = self.filter_instances(
d['X_train'],
d['y_train'],
d['X_test'],
k
)
X_train_filtered = X_train_filtered.append(more_X_train)
y_train_filtered = y_train_filtered.append(more_y_train)
classifier = Base_Classifier(
random_state=rand_seed,
**classifier_params
)
f = classifier.fit(X_train_filtered, y_train_filtered.tolist())
confidence = [a[-1] for a in f.predict_proba(test_set_X)]
predictions = f.predict(test_set_X)
return confidence, predictions
def train_filter_test(self):
"""
Train classifier on filtered training data using the k-means heuristic
and return class predictions and class-prediction probabilities. This
method calls batch_burak_filter with class attributes.
Returns:
confidence: List of class-prediction probabilities, the ith entry
of which gives the confidence for the ith prediction.
predictions: List of class predictions.
"""
return self.batch_burak_filter(
self.test_set_X,
self.test_set_domain,
self.train_pool_X,
self.train_pool_y,
self.train_pool_domain,
self.Base_Classifier,
k=self.k,
rand_seed=self.rand_seed,
cluster_factor=self.cluster_factor,
classifier_params=self.classifier_params
)
def json_encode(self):
base = tl_alg.Base_Transfer.json_encode(self)
base.update({"cluster_factor": self.cluster_factor})
return base
|
|
# -*- coding: utf-8 -*-
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
A Commander gather Variables to set them with an UpdateList.
The command process can be AllSetsForEach (ie a map of the update succesively for each)
or a EachSetForAll (ie each set is a map of each).
NOTE : the walk and self attributes are always resetted to False after a call of command.
"""
#<DefineAugmentation>
import ShareYourSystem as SYS
BaseModuleStr="ShareYourSystem.Standards.Itemizers.Pather"
DecorationModuleStr="ShareYourSystem.Standards.Classors.Classer"
SYS.setSubModule(globals())
#</DefineAugmentation>
#<ImportSpecificModules>
from ShareYourSystem.Standards.Itemizers import Getter,Setter
#</ImportSpecificModules>
#<DefineLocals>
CommandPrefixStr="--"
CommandWalkStr="..."
CommandSelfStr="/"
CommandAddStr="+"
#</DefineLocals>
#<DefineClass>
@DecorationClass()
class CommanderClass(BaseClass):
def default_init(
self,
_CommandTopDeriveCommanderRigidVariable=None,
_CommandingKeyVariable=None,
_CommandingSetVariable=None,
_CommandingOrderStr="AllSetsForEachGet",
_CommandingBeforeWalkRigidBool=False,
_CommandingAfterWalkRigidBool=False,
_CommandingBeforeSelfRigidBool=False,
_CommandingAfterSelfRigidBool=False,
_CommandingGetRigidBool=True,
_CommandingSetRigidBool=True,
_CommandingSetAttrOrCallRigidBool=False,
_CommandingExtraKeyVariable=None,
_CommandedValueVariablesList=None,
_CommandedSetVariablesList=None,
_CommandedExtraValueVariablesList=None,
**_KwargVariablesDict
):
#Call the parent __init__ method
BaseClass.__init__(self,**_KwargVariablesDict)
def do_command(self):
""" """
#/####################/#
# Determine the top Commander
#
#debug
'''
self.debug(
[
'First determine the CommandTopDeriveCommanderRigidVariable',
('self.',self,['CommandTopDeriveCommanderRigidVariable'])
]
)
'''
#Check
if self.CommandTopDeriveCommanderRigidVariable==None:
self.CommandTopDeriveCommanderRigidVariable=self
#/####################/#
# Adapt maybe the type for getting things to command
#
#Check
if self.CommandingGetRigidBool:
#debug
'''
self.debug(
[
'Adapt the type for getting things to command',
("self.",self,[
'CommandingKeyVariable',
'CommandingSetVariable',
'CommandingBeforeWalkRigidBool',
'CommandingBeforeSelfRigidBool'
])
]
)
'''
#init
self.CommandedValueVariablesList=SYS.GetList(
self.CommandingKeyVariable,
self
)
#init
self.CommandedExtraValueVariablesList=SYS.GetList(
self.CommandingExtraKeyVariable,
self
)
else:
#init
self.CommandedValueVariablesList=self.CommandingKeyVariable
#init
self.CommandedExtraValueVariablesList=self.CommandingExtraKeyVariable
#debug
'''
self.debug(
[
('self.',self,['CommandingKeyVariable']),
'in the end, self.CommandedValueVariablesList is ',
SYS._str(self.CommandedValueVariablesList)
]
)
'''
#/###################/#
# Inform the getted values who is the top
#
#debug
'''
self.debug(
[
'We inform the commanded values who is the top commander'
]
)
'''
#map
map(
lambda __CommandedValueVariable:
setattr(
__CommandedValueVariable,
'CommandTopDeriveCommanderRigidVariable',
self.CommandTopDeriveCommanderRigidVariable
),
self.CommandedValueVariablesList
)
#/###################/#
# Check if we have to walk before
#
#Check
if self.CommandingBeforeWalkRigidBool:
#debug
'''
self.debug(
[
'we are going to walk before the command',
'before we setCommand'
]
)
'''
#set
self.setCommand()
#Debug
'''
for __CommandedValueVariable in CommandedValueVariablesList:
#debug
self.debug(
'__CommandedValueVariable is '+SYS._str( __CommandedValueVariable)
)
#set
__CommandedValueVariable.set(
'GettingNewBool',False
).command(
*self.getDoing().values()
).set(
'GettingNewBool',True
)
'''
#debug
'''
self.debug(
[
'Ok we can setAttr now'
]
)
'''
#map the recursion but pay watch to not set new things to walk in...it is an infinite walk either !
map(
lambda __CommandedValueVariable:
__CommandedValueVariable.setAttr(
'GettingNewBool',False
).command(
).setAttr(
'GettingNewBool',True
),
self.CommandedValueVariablesList+self.CommandedExtraValueVariablesList
)
#/####################/#
# Adapt maybe the type for setting things in the commanded variables
#
#Check
if self.CommandingSetRigidBool:
#debug
'''
self.debug(
[
'Adapt the type for setting things in the commanded variables',
("self.",self,['CommandingSetVariable'])
]
)
'''
#inits
self.CommandedSetVariablesList=SYS.SetList(self.CommandingSetVariable)
else:
#alias direct
self.CommandedSetVariablesList=self.CommandingSetVariable
#debug
'''
self.debug(
[
'in the end, CommandedSetVariablesList is ',
SYS._str(CommandedSetVariablesList)
]
)
'''
#/###################/#
# Ok now we command locally
#
#Check
if self.CommandingBeforeSelfRigidBool:
#debug
'''
self.debug(
[
'We command before self here',
('self.',self,[
'CommandingSetRigidBool',
'CommandingSetAttrOrCallRigidBool'
])
]
)
'''
#Check
if self.CommandingSetAttrOrCallRigidBool==False:
#add
self.mapSet(
self.CommandedSetVariablesList
)
else:
#add
map(
lambda __ElementVariable:
self.setAttrOrCall(
__ElementVariable
),
self.CommandedSetVariablesList
)
#Check for the order
if self.CommandingOrderStr=="AllSetsForEachGet":
#debug
'''
self.debug(
[
'Ok now we do a AllSetsForEachGet'
]
)
'''
#Debug
"""
for __CommandedValueVariable in CommandedValueVariablesList:
#debug
self.debug(
[
'__CommandedValueVariable is ',
SYS._str(__CommandedValueVariable),
'CommandedSetVariablesList is ',
SYS._str(CommandedSetVariablesList)
]
)
#map
map(
lambda __CommandedSetVariable:
__CommandedValueVariable.set(
*__CommandedSetVariable
),
CommandedSetVariablesList
)
"""
#Check
if self.CommandingSetAttrOrCallRigidBool:
#debug
'''
self.debug(
[
'map a SetAttrOrCallBool',
('self.',self,[
'CommandedValueVariablesList',
'CommandedSetVariablesList'
])
]
)
'''
#map
map(
lambda __CommandedValueVariable:
map(
lambda __CommandedSetVariable:
__CommandedValueVariable.setAttrOrCall(
__CommandedSetVariable
),
self.CommandedSetVariablesList
),
self.CommandedValueVariablesList
)
#debug
'''
self.debug(
[
'Ok end of SetAttrOrCallBool'
]
)
'''
else:
#debug
'''
self.debug(
[
'We call a map set',
('self.',self,[
'CommandedValueVariablesList',
'CommandedSetVariablesList'
])
]
)
'''
#map
map(
lambda __CommandedValueVariable:
map(
lambda __CommandedSetVariable:
__CommandedValueVariable.set(
*__CommandedSetVariable
)
if hasattr(__CommandedValueVariable,'set')
else None,
self.CommandedSetVariablesList
),
self.CommandedValueVariablesList
)
elif self.CommandingOrderStr=="EachSetForAllGets":
#Check
if self.CommandingSetAttrOrCallRigidBool:
#map
map(
lambda __CommandedSetVariable:
map(
lambda __CommandedValueVariable:
__CommandedValueVariable.setAttrOrCall(
__CommandedSetVariable
),
self.CommandedValueVariablesList
),
self.CommandedSetVariablesList
)
else:
#map
map(
lambda __CommandedSetVariable:
map(
lambda __CommandedValueVariable:
__CommandedValueVariable.set(
*__CommandedSetVariable
),
self.CommandedValueVariablesList
),
self.CommandedSetVariablesList
)
#Check
if self.CommandingAfterSelfRigidBool:
#debug
self.debug(
[
'We command after self here'
]
)
#Check
if self.CommandingSetAttrOrCallRigidBool==False:
#add
self.mapSet(
self.CommandedSetVariablesList
)
else:
#add
map(
lambda __ElementVariable:
self.setAttrOrCall(
__ElementVariable
),
self.CommandedSetVariablesList
)
#/###################/#
# And we check for a walk after
#
#Check
if self.CommandingAfterWalkRigidBool:
#debug
'''
self.debug(
[
'we are going to walk after the command',
#'self.CommandedValueVariablesList is '+SYS._str(
# self.CommandedValueVariablesList),
#('self.',self,['CommandingKeyVariable']),
'We have to determine the things to propagate',
'CommandingKeyVariable and CommandingSetVariable notably ',
'if it is None in the commanded value"
]
)
'''
#set
self.setCommand()
#Debug
'''
for __CommandedValueVariable in CommandedValueVariablesList:
#debug
self.debug(
'__CommandedValueVariable is '+SYS._str( __CommandedValueVariable)
)
#set
__CommandedValueVariable.set(
'GettingNewBool',False
).command(
*self.getDoing().values()
).set(
'GettingNewBool',True
)
'''
#debug
self.debug(
[
'Ok we can command now',
('self.',self,[
'CommandedValueVariablesList',
'CommandedExtraValueVariablesList'
])
]
)
#map the recursion but pay watch to not set new things to walk in...it is an infinite walk either !
map(
lambda __CommandedValueVariable:
__CommandedValueVariable.setAttr(
'GettingNewBool',False
).command(
).setAttr(
'GettingNewBool',True
)
if hasattr(
__CommandedValueVariable,
'command'
)
else None,
self.CommandedValueVariablesList+self.CommandedExtraValueVariablesList
)
#/#######################/#
# Reset always these values to False
#
#set
self.CommandingBeforeWalkRigidBool=False
self.CommandingAfterWalkRigidBool=False
self.CommandingBeforeSelfRigidBool=False
self.CommandingAfterSelfRigidBool=False
self.CommandingSetAttrOrCallRigidBool=False
self.CommandingGetRigidBool=True
self.CommandingSetRigidBool=True
self.CommandTopDeriveCommanderRigidVariable=None
#debug
'''
self.debug(
[
'End of the command'
]
)
'''
def setCommand(self):
#/##############/#
# Get all the commanding attributes
#
#set
CommandedOrderedDict=self.getDoing(
SYS.CommanderClass
)
CommandedOrderedDict['CommandingBeforeSelfRigidBool']=False
CommandedLiargVariablesList=CommandedOrderedDict.values()
#/##############/#
# Special get for KeyVariable and SetVariable
#
#get
CommandedNewKeyVariable=CommandedLiargVariablesList[0]
#get
CommandedNewSetVariable=CommandedLiargVariablesList[1]
#get
CommandedNewTuplesList=zip(
CommandedOrderedDict.keys()[2:],
CommandedLiargVariablesList[2:]
)
#/##############/#
# Map a setAttr
#
#map
map(
lambda __CommandedValueVariable:
__CommandedValueVariable.setAttr(
'CommandingKeyVariable',
CommandedNewKeyVariable
)
if __CommandedValueVariable.CommandingKeyVariable==None
else None,
self.CommandedValueVariablesList
)
#map
map(
lambda __CommandedValueVariable:
__CommandedValueVariable.setAttr(
'CommandingSetVariable',
CommandedNewSetVariable
)
if __CommandedValueVariable.CommandingSetVariable==None
else None,
self.CommandedValueVariablesList
)
#map
map(
lambda __CommandedValueVariable:
__CommandedValueVariable.mapSetAttr(
CommandedNewTuplesList
),
self.CommandedValueVariablesList
)
def mimic_get(self):
#debug
'''
self.debug(
('self.',self,[
'GettingKeyVariable',
])
)
'''
#Check
if type(self.GettingKeyVariable)==str:
#Check
if self.GettingKeyVariable.startswith(CommandAddStr):
#split
AddGetKeyStrsList=self.GettingKeyVariable.split(CommandAddStr)[1:]
#debug
'''
self.debug(
[
'We map get',
'AddGetKeyStrsList is '+str(AddGetKeyStrsList)
]
)
'''
#map get
AddVariablesList=self[
Getter.GetMapStr
](*AddGetKeyStrsList).ItemizedMapValueVariablesList
#debug
'''
self.debug(
[
'We sum now',
'AddVariablesList is '+SYS._str(AddVariablesList)
]
)
'''
#map get
self.GettedValueVariable=SYS.sum(AddVariablesList)
#return
return {'HookingIsBool':False}
#return
return BaseClass.get(self)
def mimic_set(self):
#debug
'''
self.debug(
('self.',self,[
'SettingKeyVariable',
'SettingValueVariable'
])
)
'''
#Check
if type(self.SettingKeyVariable)==str:
#Check
if self.SettingKeyVariable.startswith(
CommandPrefixStr
):
#debug
'''
self.debug(
'We command here'
)
'''
#deprefix
CommandGetKeyStr=SYS.deprefix(
self.SettingKeyVariable,
CommandPrefixStr
)
#Check
if CommandGetKeyStr.startswith(CommandWalkStr):
#debug
'''
self.debug(
'We command-walk here'
)
'''
#command
self.command(
SYS.deprefix(
CommandGetKeyStr,
CommandWalkStr
),
self.SettingValueVariable,
_AfterWalkRigidBool=True
)
#stop the setting
return {'HookingIsBool':False}
elif CommandGetKeyStr.startswith(CommandSelfStr+CommandWalkStr):
#debug
'''
self.debug(
'We command-self-walk here'
)
'''
#command
self.command(
SYS.deprefix(
CommandGetKeyStr,
CommandSelfStr+CommandWalkStr
),
self.SettingValueVariable,
_AfterWalkRigidBool=True,
_SelfBool=True
)
#stop the setting
return {'HookingIsBool':False}
else:
#command
self.command(
CommandGetKeyStr,
self.SettingValueVariable
)
#stop the setting
return {'HookingIsBool':False}
#Check
elif self.SettingKeyVariable.startswith(
CommandWalkStr
):
#debug
'''
self.debug(
'We walk-command here'
)
'''
CommandGetKeyStr=SYS.deprefix(
self.SettingKeyVariable,
CommandWalkStr
)
#Check
if CommandGetKeyStr.startswith(CommandPrefixStr):
#command
self.command(
SYS.deprefix(
CommandGetKeyStr,
CommandPrefixStr
),
self.SettingValueVariable,
_BeforeWalkRigidBool=True
)
#stop the setting
return {'HookingIsBool':False}
elif CommandGetKeyStr.startswith(CommandSelfStr):
#command
self.command(
SYS.deprefix(
CommandGetKeyStr,
CommandSelfStr+CommandPrefixStr
),
self.SettingValueVariable,
_BeforeWalkRigidBool=True,
_AfterSelfRigidBool=True
)
#stop the setting
return {'HookingIsBool':False}
#Check
elif self.SettingKeyVariable.startswith(
CommandSelfStr+CommandWalkStr+CommandPrefixStr
):
#command
self.command(
SYS.deprefix(
self.SettingKeyVariable,
CommandSelfStr+CommandWalkStr+CommandPrefixStr
),
self.SettingValueVariable,
_BeforeWalkRigidBool=True,
_BeforeSelfRigidBool=True
)
#stop the setting
return {'HookingIsBool':False}
#debug
'''
self.debug(
[
'Call the base set method',
'BaseClass is '+str(BaseClass),
('self.',self,['SettingKeyVariable'])
]
)
'''
#Call the base method
BaseClass.set(self)
#</DefineClass>
#</DefinePrint>
CommanderClass.PrintingClassSkipKeyStrsList.extend(
[
'CommandTopDeriveCommanderRigidVariable',
'CommandingKeyVariable',
'CommandingSetVariable',
'CommandingOrderStr',
'CommandingBeforeWalkRigidBool',
'CommandingAfterWalkRigidBool',
'CommandingBeforeSelfRigidBool',
'CommandingAfterSelfRigidBool',
'CommandingGetRigidBool',
'CommandingSetRigidBool',
'CommandingSetAttrOrCallRigidBool',
'CommandingExtraKeyVariable',
'CommandedValueVariablesList',
'CommandedSetVariablesList',
'CommandedLiargVariablesList',
'CommandedExtraValueVariablesList'
]
)
#<DefinePrint>
|
|
from fontTools.misc.py23 import byteord, tostr
import re
from bisect import bisect_right
try:
# use unicodedata backport compatible with python2:
# https://github.com/mikekap/unicodedata2
from unicodedata2 import *
except ImportError: # pragma: no cover
# fall back to built-in unicodedata (possibly outdated)
from unicodedata import *
from . import Blocks, Scripts, ScriptExtensions, OTTags
__all__ = [tostr(s) for s in (
# names from built-in unicodedata module
"lookup",
"name",
"decimal",
"digit",
"numeric",
"category",
"bidirectional",
"combining",
"east_asian_width",
"mirrored",
"decomposition",
"normalize",
"unidata_version",
"ucd_3_2_0",
# additonal functions
"block",
"script",
"script_extension",
"script_name",
"script_code",
"script_horizontal_direction",
"ot_tags_from_script",
"ot_tag_to_script",
)]
def script(char):
""" Return the four-letter script code assigned to the Unicode character
'char' as string.
>>> script("a")
'Latn'
>>> script(",")
'Zyyy'
>>> script(chr(0x10FFFF))
'Zzzz'
"""
code = byteord(char)
# 'bisect_right(a, x, lo=0, hi=len(a))' returns an insertion point which
# comes after (to the right of) any existing entries of x in a, and it
# partitions array a into two halves so that, for the left side
# all(val <= x for val in a[lo:i]), and for the right side
# all(val > x for val in a[i:hi]).
# Our 'SCRIPT_RANGES' is a sorted list of ranges (only their starting
# breakpoints); we want to use `bisect_right` to look up the range that
# contains the given codepoint: i.e. whose start is less than or equal
# to the codepoint. Thus, we subtract -1 from the index returned.
i = bisect_right(Scripts.RANGES, code)
return Scripts.VALUES[i-1]
def script_extension(char):
""" Return the script extension property assigned to the Unicode character
'char' as a set of string.
>>> script_extension("a") == {'Latn'}
True
>>> script_extension(chr(0x060C)) == {'Rohg', 'Syrc', 'Yezi', 'Arab', 'Thaa'}
True
>>> script_extension(chr(0x10FFFF)) == {'Zzzz'}
True
"""
code = byteord(char)
i = bisect_right(ScriptExtensions.RANGES, code)
value = ScriptExtensions.VALUES[i-1]
if value is None:
# code points not explicitly listed for Script Extensions
# have as their value the corresponding Script property value
return {script(char)}
return value
def script_name(code, default=KeyError):
""" Return the long, human-readable script name given a four-letter
Unicode script code.
If no matching name is found, a KeyError is raised by default.
You can use the 'default' argument to return a fallback value (e.g.
'Unknown' or None) instead of throwing an error.
"""
try:
return str(Scripts.NAMES[code].replace("_", " "))
except KeyError:
if isinstance(default, type) and issubclass(default, KeyError):
raise
return default
_normalize_re = re.compile(r"[-_ ]+")
def _normalize_property_name(string):
"""Remove case, strip space, '-' and '_' for loose matching."""
return _normalize_re.sub("", string).lower()
_SCRIPT_CODES = {_normalize_property_name(v): k
for k, v in Scripts.NAMES.items()}
def script_code(script_name, default=KeyError):
"""Returns the four-letter Unicode script code from its long name
If no matching script code is found, a KeyError is raised by default.
You can use the 'default' argument to return a fallback string (e.g.
'Zzzz' or None) instead of throwing an error.
"""
normalized_name = _normalize_property_name(script_name)
try:
return _SCRIPT_CODES[normalized_name]
except KeyError:
if isinstance(default, type) and issubclass(default, KeyError):
raise
return default
# The data on script direction is taken from CLDR 37:
# https://github.com/unicode-org/cldr/blob/release-37/common/properties/scriptMetadata.txt
RTL_SCRIPTS = {
# Unicode-1.1 additions
'Arab', # Arabic
'Hebr', # Hebrew
# Unicode-3.0 additions
'Syrc', # Syriac
'Thaa', # Thaana
# Unicode-4.0 additions
'Cprt', # Cypriot
# Unicode-4.1 additions
'Khar', # Kharoshthi
# Unicode-5.0 additions
'Phnx', # Phoenician
'Nkoo', # Nko
# Unicode-5.1 additions
'Lydi', # Lydian
# Unicode-5.2 additions
'Avst', # Avestan
'Armi', # Imperial Aramaic
'Phli', # Inscriptional Pahlavi
'Prti', # Inscriptional Parthian
'Sarb', # Old South Arabian
'Orkh', # Old Turkic
'Samr', # Samaritan
# Unicode-6.0 additions
'Mand', # Mandaic
# Unicode-6.1 additions
'Merc', # Meroitic Cursive
'Mero', # Meroitic Hieroglyphs
# Unicode-7.0 additions
'Mani', # Manichaean
'Mend', # Mende Kikakui
'Nbat', # Nabataean
'Narb', # Old North Arabian
'Palm', # Palmyrene
'Phlp', # Psalter Pahlavi
# Unicode-8.0 additions
'Hatr', # Hatran
'Hung', # Old Hungarian
# Unicode-9.0 additions
'Adlm', # Adlam
# Unicode-11.0 additions
'Rohg', # Hanifi Rohingya
'Sogo', # Old Sogdian
'Sogd', # Sogdian
# Unicode-12.0 additions
'Elym', # Elymaic
# Unicode-13.0 additions
'Chrs', # Chorasmian
'Yezi', # Yezidi
}
def script_horizontal_direction(script_code, default=KeyError):
""" Return "RTL" for scripts that contain right-to-left characters
according to the Bidi_Class property. Otherwise return "LTR".
"""
if script_code not in Scripts.NAMES:
if isinstance(default, type) and issubclass(default, KeyError):
raise default(script_code)
return default
return str("RTL") if script_code in RTL_SCRIPTS else str("LTR")
def block(char):
""" Return the block property assigned to the Unicode character 'char'
as a string.
>>> block("a")
'Basic Latin'
>>> block(chr(0x060C))
'Arabic'
>>> block(chr(0xEFFFF))
'No_Block'
"""
code = byteord(char)
i = bisect_right(Blocks.RANGES, code)
return Blocks.VALUES[i-1]
def ot_tags_from_script(script_code):
""" Return a list of OpenType script tags associated with a given
Unicode script code.
Return ['DFLT'] script tag for invalid/unknown script codes.
"""
if script_code not in Scripts.NAMES:
return [OTTags.DEFAULT_SCRIPT]
script_tags = [
OTTags.SCRIPT_EXCEPTIONS.get(
script_code,
script_code[0].lower() + script_code[1:]
)
]
if script_code in OTTags.NEW_SCRIPT_TAGS:
script_tags.extend(OTTags.NEW_SCRIPT_TAGS[script_code])
script_tags.reverse() # last in, first out
return script_tags
def ot_tag_to_script(tag):
""" Return the Unicode script code for the given OpenType script tag, or
None for "DFLT" tag or if there is no Unicode script associated with it.
Raises ValueError if the tag is invalid.
"""
tag = tostr(tag).strip()
if not tag or " " in tag or len(tag) > 4:
raise ValueError("invalid OpenType tag: %r" % tag)
while len(tag) != 4:
tag += str(" ") # pad with spaces
if tag == OTTags.DEFAULT_SCRIPT:
# it's unclear which Unicode script the "DFLT" OpenType tag maps to,
# so here we return None
return None
if tag in OTTags.NEW_SCRIPT_TAGS_REVERSED:
return OTTags.NEW_SCRIPT_TAGS_REVERSED[tag]
# This side of the conversion is fully algorithmic
# Any spaces at the end of the tag are replaced by repeating the last
# letter. Eg 'nko ' -> 'Nkoo'.
# Change first char to uppercase
script_code = tag[0].upper() + tag[1]
for i in range(2, 4):
script_code += (script_code[i-1] if tag[i] == " " else tag[i])
if script_code not in Scripts.NAMES:
return None
return script_code
|
|
"Test posix functions"
from test import support
# Skip these tests if there is no posix module.
posix = support.import_module('posix')
import errno
import sys
import time
import os
import platform
import pwd
import stat
import tempfile
import unittest
import warnings
_DUMMY_SYMLINK = os.path.join(tempfile.gettempdir(),
support.TESTFN + '-dummy-symlink')
class PosixTester(unittest.TestCase):
def setUp(self):
# create empty file
fp = open(support.TESTFN, 'w+')
fp.close()
self.teardown_files = [ support.TESTFN ]
self._warnings_manager = support.check_warnings()
self._warnings_manager.__enter__()
warnings.filterwarnings('ignore', '.* potential security risk .*',
RuntimeWarning)
def tearDown(self):
for teardown_file in self.teardown_files:
support.unlink(teardown_file)
self._warnings_manager.__exit__(None, None, None)
def testNoArgFunctions(self):
# test posix functions which take no arguments and have
# no side-effects which we need to cleanup (e.g., fork, wait, abort)
NO_ARG_FUNCTIONS = [ "ctermid", "getcwd", "getcwdb", "uname",
"times", "getloadavg",
"getegid", "geteuid", "getgid", "getgroups",
"getpid", "getpgrp", "getppid", "getuid", "sync",
]
for name in NO_ARG_FUNCTIONS:
posix_func = getattr(posix, name, None)
if posix_func is not None:
posix_func()
self.assertRaises(TypeError, posix_func, 1)
@unittest.skipUnless(hasattr(posix, 'getresuid'),
'test needs posix.getresuid()')
def test_getresuid(self):
user_ids = posix.getresuid()
self.assertEqual(len(user_ids), 3)
for val in user_ids:
self.assertGreaterEqual(val, 0)
@unittest.skipUnless(hasattr(posix, 'getresgid'),
'test needs posix.getresgid()')
def test_getresgid(self):
group_ids = posix.getresgid()
self.assertEqual(len(group_ids), 3)
for val in group_ids:
self.assertGreaterEqual(val, 0)
@unittest.skipUnless(hasattr(posix, 'setresuid'),
'test needs posix.setresuid()')
def test_setresuid(self):
current_user_ids = posix.getresuid()
self.assertIsNone(posix.setresuid(*current_user_ids))
# -1 means don't change that value.
self.assertIsNone(posix.setresuid(-1, -1, -1))
@unittest.skipUnless(hasattr(posix, 'setresuid'),
'test needs posix.setresuid()')
def test_setresuid_exception(self):
# Don't do this test if someone is silly enough to run us as root.
current_user_ids = posix.getresuid()
if 0 not in current_user_ids:
new_user_ids = (current_user_ids[0]+1, -1, -1)
self.assertRaises(OSError, posix.setresuid, *new_user_ids)
@unittest.skipUnless(hasattr(posix, 'setresgid'),
'test needs posix.setresgid()')
def test_setresgid(self):
current_group_ids = posix.getresgid()
self.assertIsNone(posix.setresgid(*current_group_ids))
# -1 means don't change that value.
self.assertIsNone(posix.setresgid(-1, -1, -1))
@unittest.skipUnless(hasattr(posix, 'setresgid'),
'test needs posix.setresgid()')
def test_setresgid_exception(self):
# Don't do this test if someone is silly enough to run us as root.
current_group_ids = posix.getresgid()
if 0 not in current_group_ids:
new_group_ids = (current_group_ids[0]+1, -1, -1)
self.assertRaises(OSError, posix.setresgid, *new_group_ids)
@unittest.skipUnless(hasattr(posix, 'initgroups'),
"test needs os.initgroups()")
def test_initgroups(self):
# It takes a string and an integer; check that it raises a TypeError
# for other argument lists.
self.assertRaises(TypeError, posix.initgroups)
self.assertRaises(TypeError, posix.initgroups, None)
self.assertRaises(TypeError, posix.initgroups, 3, "foo")
self.assertRaises(TypeError, posix.initgroups, "foo", 3, object())
# If a non-privileged user invokes it, it should fail with OSError
# EPERM.
if os.getuid() != 0:
try:
name = pwd.getpwuid(posix.getuid()).pw_name
except KeyError:
# the current UID may not have a pwd entry
raise unittest.SkipTest("need a pwd entry")
try:
posix.initgroups(name, 13)
except OSError as e:
self.assertEqual(e.errno, errno.EPERM)
else:
self.fail("Expected OSError to be raised by initgroups")
@unittest.skipUnless(hasattr(posix, 'statvfs'),
'test needs posix.statvfs()')
def test_statvfs(self):
self.assertTrue(posix.statvfs(os.curdir))
@unittest.skipUnless(hasattr(posix, 'fstatvfs'),
'test needs posix.fstatvfs()')
def test_fstatvfs(self):
fp = open(support.TESTFN)
try:
self.assertTrue(posix.fstatvfs(fp.fileno()))
self.assertTrue(posix.statvfs(fp.fileno()))
finally:
fp.close()
@unittest.skipUnless(hasattr(posix, 'ftruncate'),
'test needs posix.ftruncate()')
def test_ftruncate(self):
fp = open(support.TESTFN, 'w+')
try:
# we need to have some data to truncate
fp.write('test')
fp.flush()
posix.ftruncate(fp.fileno(), 0)
finally:
fp.close()
@unittest.skipUnless(hasattr(posix, 'truncate'), "test needs posix.truncate()")
def test_truncate(self):
with open(support.TESTFN, 'w') as fp:
fp.write('test')
fp.flush()
posix.truncate(support.TESTFN, 0)
@unittest.skipUnless(getattr(os, 'execve', None) in os.supports_fd, "test needs execve() to support the fd parameter")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_fexecve(self):
fp = os.open(sys.executable, os.O_RDONLY)
try:
pid = os.fork()
if pid == 0:
os.chdir(os.path.split(sys.executable)[0])
posix.execve(fp, [sys.executable, '-c', 'pass'], os.environ)
else:
self.assertEqual(os.waitpid(pid, 0), (pid, 0))
finally:
os.close(fp)
@unittest.skipUnless(hasattr(posix, 'waitid'), "test needs posix.waitid()")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
def test_waitid(self):
pid = os.fork()
if pid == 0:
os.chdir(os.path.split(sys.executable)[0])
posix.execve(sys.executable, [sys.executable, '-c', 'pass'], os.environ)
else:
res = posix.waitid(posix.P_PID, pid, posix.WEXITED)
self.assertEqual(pid, res.si_pid)
@unittest.skipUnless(hasattr(posix, 'lockf'), "test needs posix.lockf()")
def test_lockf(self):
fd = os.open(support.TESTFN, os.O_WRONLY | os.O_CREAT)
try:
os.write(fd, b'test')
os.lseek(fd, 0, os.SEEK_SET)
posix.lockf(fd, posix.F_LOCK, 4)
# section is locked
posix.lockf(fd, posix.F_ULOCK, 4)
finally:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'pread'), "test needs posix.pread()")
def test_pread(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
try:
os.write(fd, b'test')
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(b'es', posix.pread(fd, 2, 1))
# the first pread() shouldn't disturb the file offset
self.assertEqual(b'te', posix.read(fd, 2))
finally:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'pwrite'), "test needs posix.pwrite()")
def test_pwrite(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
try:
os.write(fd, b'test')
os.lseek(fd, 0, os.SEEK_SET)
posix.pwrite(fd, b'xx', 1)
self.assertEqual(b'txxt', posix.read(fd, 4))
finally:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'posix_fallocate'),
"test needs posix.posix_fallocate()")
def test_posix_fallocate(self):
fd = os.open(support.TESTFN, os.O_WRONLY | os.O_CREAT)
try:
posix.posix_fallocate(fd, 0, 10)
except OSError as inst:
# issue10812, ZFS doesn't appear to support posix_fallocate,
# so skip Solaris-based since they are likely to have ZFS.
if inst.errno != errno.EINVAL or not sys.platform.startswith("sunos"):
raise
finally:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'posix_fadvise'),
"test needs posix.posix_fadvise()")
def test_posix_fadvise(self):
fd = os.open(support.TESTFN, os.O_RDONLY)
try:
posix.posix_fadvise(fd, 0, 0, posix.POSIX_FADV_WILLNEED)
finally:
os.close(fd)
@unittest.skipUnless(os.utime in os.supports_fd, "test needs fd support in os.utime")
def test_utime_with_fd(self):
now = time.time()
fd = os.open(support.TESTFN, os.O_RDONLY)
try:
posix.utime(fd)
posix.utime(fd, None)
self.assertRaises(TypeError, posix.utime, fd, (None, None))
self.assertRaises(TypeError, posix.utime, fd, (now, None))
self.assertRaises(TypeError, posix.utime, fd, (None, now))
posix.utime(fd, (int(now), int(now)))
posix.utime(fd, (now, now))
self.assertRaises(ValueError, posix.utime, fd, (now, now), ns=(now, now))
self.assertRaises(ValueError, posix.utime, fd, (now, 0), ns=(None, None))
self.assertRaises(ValueError, posix.utime, fd, (None, None), ns=(now, 0))
posix.utime(fd, (int(now), int((now - int(now)) * 1e9)))
posix.utime(fd, ns=(int(now), int((now - int(now)) * 1e9)))
finally:
os.close(fd)
@unittest.skipUnless(os.utime in os.supports_follow_symlinks, "test needs follow_symlinks support in os.utime")
def test_utime_nofollow_symlinks(self):
now = time.time()
posix.utime(support.TESTFN, None, follow_symlinks=False)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (None, None), follow_symlinks=False)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, None), follow_symlinks=False)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (None, now), follow_symlinks=False)
posix.utime(support.TESTFN, (int(now), int(now)), follow_symlinks=False)
posix.utime(support.TESTFN, (now, now), follow_symlinks=False)
posix.utime(support.TESTFN, follow_symlinks=False)
@unittest.skipUnless(hasattr(posix, 'writev'), "test needs posix.writev()")
def test_writev(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
try:
n = os.writev(fd, (b'test1', b'tt2', b't3'))
self.assertEqual(n, 10)
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(b'test1tt2t3', posix.read(fd, 10))
# Issue #20113: empty list of buffers should not crash
try:
size = posix.writev(fd, [])
except OSError:
# writev(fd, []) raises OSError(22, "Invalid argument")
# on OpenIndiana
pass
else:
self.assertEqual(size, 0)
finally:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'readv'), "test needs posix.readv()")
def test_readv(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
try:
os.write(fd, b'test1tt2t3')
os.lseek(fd, 0, os.SEEK_SET)
buf = [bytearray(i) for i in [5, 3, 2]]
self.assertEqual(posix.readv(fd, buf), 10)
self.assertEqual([b'test1', b'tt2', b't3'], [bytes(i) for i in buf])
# Issue #20113: empty list of buffers should not crash
try:
size = posix.readv(fd, [])
except OSError:
# readv(fd, []) raises OSError(22, "Invalid argument")
# on OpenIndiana
pass
else:
self.assertEqual(size, 0)
finally:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'dup'),
'test needs posix.dup()')
def test_dup(self):
fp = open(support.TESTFN)
try:
fd = posix.dup(fp.fileno())
self.assertIsInstance(fd, int)
os.close(fd)
finally:
fp.close()
@unittest.skipUnless(hasattr(posix, 'confstr'),
'test needs posix.confstr()')
def test_confstr(self):
self.assertRaises(ValueError, posix.confstr, "CS_garbage")
self.assertEqual(len(posix.confstr("CS_PATH")) > 0, True)
@unittest.skipUnless(hasattr(posix, 'dup2'),
'test needs posix.dup2()')
def test_dup2(self):
fp1 = open(support.TESTFN)
fp2 = open(support.TESTFN)
try:
posix.dup2(fp1.fileno(), fp2.fileno())
finally:
fp1.close()
fp2.close()
@unittest.skipUnless(hasattr(os, 'O_CLOEXEC'), "needs os.O_CLOEXEC")
@support.requires_linux_version(2, 6, 23)
def test_oscloexec(self):
fd = os.open(support.TESTFN, os.O_RDONLY|os.O_CLOEXEC)
self.addCleanup(os.close, fd)
self.assertFalse(os.get_inheritable(fd))
@unittest.skipUnless(hasattr(posix, 'O_EXLOCK'),
'test needs posix.O_EXLOCK')
def test_osexlock(self):
fd = os.open(support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
if hasattr(posix, "O_SHLOCK"):
fd = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'O_SHLOCK'),
'test needs posix.O_SHLOCK')
def test_osshlock(self):
fd1 = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
fd2 = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
os.close(fd2)
os.close(fd1)
if hasattr(posix, "O_EXLOCK"):
fd = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, support.TESTFN,
os.O_RDONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'fstat'),
'test needs posix.fstat()')
def test_fstat(self):
fp = open(support.TESTFN)
try:
self.assertTrue(posix.fstat(fp.fileno()))
self.assertTrue(posix.stat(fp.fileno()))
self.assertRaisesRegex(TypeError,
'should be string, bytes, os.PathLike or integer, not',
posix.stat, float(fp.fileno()))
finally:
fp.close()
@unittest.skipUnless(hasattr(posix, 'stat'),
'test needs posix.stat()')
def test_stat(self):
self.assertTrue(posix.stat(support.TESTFN))
self.assertTrue(posix.stat(os.fsencode(support.TESTFN)))
self.assertWarnsRegex(DeprecationWarning,
'should be string, bytes, os.PathLike or integer, not',
posix.stat, bytearray(os.fsencode(support.TESTFN)))
self.assertRaisesRegex(TypeError,
'should be string, bytes, os.PathLike or integer, not',
posix.stat, None)
self.assertRaisesRegex(TypeError,
'should be string, bytes, os.PathLike or integer, not',
posix.stat, list(support.TESTFN))
self.assertRaisesRegex(TypeError,
'should be string, bytes, os.PathLike or integer, not',
posix.stat, list(os.fsencode(support.TESTFN)))
@unittest.skipUnless(hasattr(posix, 'mkfifo'), "don't have mkfifo()")
def test_mkfifo(self):
support.unlink(support.TESTFN)
posix.mkfifo(support.TESTFN, stat.S_IRUSR | stat.S_IWUSR)
self.assertTrue(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
@unittest.skipUnless(hasattr(posix, 'mknod') and hasattr(stat, 'S_IFIFO'),
"don't have mknod()/S_IFIFO")
def test_mknod(self):
# Test using mknod() to create a FIFO (the only use specified
# by POSIX).
support.unlink(support.TESTFN)
mode = stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR
try:
posix.mknod(support.TESTFN, mode, 0)
except OSError as e:
# Some old systems don't allow unprivileged users to use
# mknod(), or only support creating device nodes.
self.assertIn(e.errno, (errno.EPERM, errno.EINVAL))
else:
self.assertTrue(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
# Keyword arguments are also supported
support.unlink(support.TESTFN)
try:
posix.mknod(path=support.TESTFN, mode=mode, device=0,
dir_fd=None)
except OSError as e:
self.assertIn(e.errno, (errno.EPERM, errno.EINVAL))
@unittest.skipUnless(hasattr(posix, 'stat'), 'test needs posix.stat()')
@unittest.skipUnless(hasattr(posix, 'makedev'), 'test needs posix.makedev()')
def test_makedev(self):
st = posix.stat(support.TESTFN)
dev = st.st_dev
self.assertIsInstance(dev, int)
self.assertGreaterEqual(dev, 0)
major = posix.major(dev)
self.assertIsInstance(major, int)
self.assertGreaterEqual(major, 0)
self.assertEqual(posix.major(dev), major)
self.assertRaises(TypeError, posix.major, float(dev))
self.assertRaises(TypeError, posix.major)
self.assertRaises((ValueError, OverflowError), posix.major, -1)
minor = posix.minor(dev)
self.assertIsInstance(minor, int)
self.assertGreaterEqual(minor, 0)
self.assertEqual(posix.minor(dev), minor)
self.assertRaises(TypeError, posix.minor, float(dev))
self.assertRaises(TypeError, posix.minor)
self.assertRaises((ValueError, OverflowError), posix.minor, -1)
self.assertEqual(posix.makedev(major, minor), dev)
self.assertRaises(TypeError, posix.makedev, float(major), minor)
self.assertRaises(TypeError, posix.makedev, major, float(minor))
self.assertRaises(TypeError, posix.makedev, major)
self.assertRaises(TypeError, posix.makedev)
def _test_all_chown_common(self, chown_func, first_param, stat_func):
"""Common code for chown, fchown and lchown tests."""
def check_stat(uid, gid):
if stat_func is not None:
stat = stat_func(first_param)
self.assertEqual(stat.st_uid, uid)
self.assertEqual(stat.st_gid, gid)
uid = os.getuid()
gid = os.getgid()
# test a successful chown call
chown_func(first_param, uid, gid)
check_stat(uid, gid)
chown_func(first_param, -1, gid)
check_stat(uid, gid)
chown_func(first_param, uid, -1)
check_stat(uid, gid)
if uid == 0:
# Try an amusingly large uid/gid to make sure we handle
# large unsigned values. (chown lets you use any
# uid/gid you like, even if they aren't defined.)
#
# This problem keeps coming up:
# http://bugs.python.org/issue1747858
# http://bugs.python.org/issue4591
# http://bugs.python.org/issue15301
# Hopefully the fix in 4591 fixes it for good!
#
# This part of the test only runs when run as root.
# Only scary people run their tests as root.
big_value = 2**31
chown_func(first_param, big_value, big_value)
check_stat(big_value, big_value)
chown_func(first_param, -1, -1)
check_stat(big_value, big_value)
chown_func(first_param, uid, gid)
check_stat(uid, gid)
elif platform.system() in ('HP-UX', 'SunOS'):
# HP-UX and Solaris can allow a non-root user to chown() to root
# (issue #5113)
raise unittest.SkipTest("Skipping because of non-standard chown() "
"behavior")
else:
# non-root cannot chown to root, raises OSError
self.assertRaises(OSError, chown_func, first_param, 0, 0)
check_stat(uid, gid)
self.assertRaises(OSError, chown_func, first_param, 0, -1)
check_stat(uid, gid)
if 0 not in os.getgroups():
self.assertRaises(OSError, chown_func, first_param, -1, 0)
check_stat(uid, gid)
# test illegal types
for t in str, float:
self.assertRaises(TypeError, chown_func, first_param, t(uid), gid)
check_stat(uid, gid)
self.assertRaises(TypeError, chown_func, first_param, uid, t(gid))
check_stat(uid, gid)
@unittest.skipUnless(hasattr(posix, 'chown'), "test needs os.chown()")
def test_chown(self):
# raise an OSError if the file does not exist
os.unlink(support.TESTFN)
self.assertRaises(OSError, posix.chown, support.TESTFN, -1, -1)
# re-create the file
support.create_empty_file(support.TESTFN)
self._test_all_chown_common(posix.chown, support.TESTFN,
getattr(posix, 'stat', None))
@unittest.skipUnless(hasattr(posix, 'fchown'), "test needs os.fchown()")
def test_fchown(self):
os.unlink(support.TESTFN)
# re-create the file
test_file = open(support.TESTFN, 'w')
try:
fd = test_file.fileno()
self._test_all_chown_common(posix.fchown, fd,
getattr(posix, 'fstat', None))
finally:
test_file.close()
@unittest.skipUnless(hasattr(posix, 'lchown'), "test needs os.lchown()")
def test_lchown(self):
os.unlink(support.TESTFN)
# create a symlink
os.symlink(_DUMMY_SYMLINK, support.TESTFN)
self._test_all_chown_common(posix.lchown, support.TESTFN,
getattr(posix, 'lstat', None))
@unittest.skipUnless(hasattr(posix, 'chdir'), 'test needs posix.chdir()')
def test_chdir(self):
posix.chdir(os.curdir)
self.assertRaises(OSError, posix.chdir, support.TESTFN)
def test_listdir(self):
self.assertTrue(support.TESTFN in posix.listdir(os.curdir))
def test_listdir_default(self):
# When listdir is called without argument,
# it's the same as listdir(os.curdir).
self.assertTrue(support.TESTFN in posix.listdir())
def test_listdir_bytes(self):
# When listdir is called with a bytes object,
# the returned strings are of type bytes.
self.assertTrue(os.fsencode(support.TESTFN) in posix.listdir(b'.'))
@unittest.skipUnless(posix.listdir in os.supports_fd,
"test needs fd support for posix.listdir()")
def test_listdir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
self.addCleanup(posix.close, f)
self.assertEqual(
sorted(posix.listdir('.')),
sorted(posix.listdir(f))
)
# Check that the fd offset was reset (issue #13739)
self.assertEqual(
sorted(posix.listdir('.')),
sorted(posix.listdir(f))
)
@unittest.skipUnless(hasattr(posix, 'access'), 'test needs posix.access()')
def test_access(self):
self.assertTrue(posix.access(support.TESTFN, os.R_OK))
@unittest.skipUnless(hasattr(posix, 'umask'), 'test needs posix.umask()')
def test_umask(self):
old_mask = posix.umask(0)
self.assertIsInstance(old_mask, int)
posix.umask(old_mask)
@unittest.skipUnless(hasattr(posix, 'strerror'),
'test needs posix.strerror()')
def test_strerror(self):
self.assertTrue(posix.strerror(0))
@unittest.skipUnless(hasattr(posix, 'pipe'), 'test needs posix.pipe()')
def test_pipe(self):
reader, writer = posix.pipe()
os.close(reader)
os.close(writer)
@unittest.skipUnless(hasattr(os, 'pipe2'), "test needs os.pipe2()")
@support.requires_linux_version(2, 6, 27)
def test_pipe2(self):
self.assertRaises(TypeError, os.pipe2, 'DEADBEEF')
self.assertRaises(TypeError, os.pipe2, 0, 0)
# try calling with flags = 0, like os.pipe()
r, w = os.pipe2(0)
os.close(r)
os.close(w)
# test flags
r, w = os.pipe2(os.O_CLOEXEC|os.O_NONBLOCK)
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
self.assertFalse(os.get_inheritable(r))
self.assertFalse(os.get_inheritable(w))
self.assertFalse(os.get_blocking(r))
self.assertFalse(os.get_blocking(w))
# try reading from an empty pipe: this should fail, not block
self.assertRaises(OSError, os.read, r, 1)
# try a write big enough to fill-up the pipe: this should either
# fail or perform a partial write, not block
try:
os.write(w, b'x' * support.PIPE_MAX_SIZE)
except OSError:
pass
@support.cpython_only
@unittest.skipUnless(hasattr(os, 'pipe2'), "test needs os.pipe2()")
@support.requires_linux_version(2, 6, 27)
def test_pipe2_c_limits(self):
# Issue 15989
import _testcapi
self.assertRaises(OverflowError, os.pipe2, _testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, os.pipe2, _testcapi.UINT_MAX + 1)
@unittest.skipUnless(hasattr(posix, 'utime'), 'test needs posix.utime()')
def test_utime(self):
now = time.time()
posix.utime(support.TESTFN, None)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (None, None))
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, None))
self.assertRaises(TypeError, posix.utime, support.TESTFN, (None, now))
posix.utime(support.TESTFN, (int(now), int(now)))
posix.utime(support.TESTFN, (now, now))
def _test_chflags_regular_file(self, chflags_func, target_file, **kwargs):
st = os.stat(target_file)
self.assertTrue(hasattr(st, 'st_flags'))
# ZFS returns EOPNOTSUPP when attempting to set flag UF_IMMUTABLE.
flags = st.st_flags | stat.UF_IMMUTABLE
try:
chflags_func(target_file, flags, **kwargs)
except OSError as err:
if err.errno != errno.EOPNOTSUPP:
raise
msg = 'chflag UF_IMMUTABLE not supported by underlying fs'
self.skipTest(msg)
try:
new_st = os.stat(target_file)
self.assertEqual(st.st_flags | stat.UF_IMMUTABLE, new_st.st_flags)
try:
fd = open(target_file, 'w+')
except OSError as e:
self.assertEqual(e.errno, errno.EPERM)
finally:
posix.chflags(target_file, st.st_flags)
@unittest.skipUnless(hasattr(posix, 'chflags'), 'test needs os.chflags()')
def test_chflags(self):
self._test_chflags_regular_file(posix.chflags, support.TESTFN)
@unittest.skipUnless(hasattr(posix, 'lchflags'), 'test needs os.lchflags()')
def test_lchflags_regular_file(self):
self._test_chflags_regular_file(posix.lchflags, support.TESTFN)
self._test_chflags_regular_file(posix.chflags, support.TESTFN, follow_symlinks=False)
@unittest.skipUnless(hasattr(posix, 'lchflags'), 'test needs os.lchflags()')
def test_lchflags_symlink(self):
testfn_st = os.stat(support.TESTFN)
self.assertTrue(hasattr(testfn_st, 'st_flags'))
os.symlink(support.TESTFN, _DUMMY_SYMLINK)
self.teardown_files.append(_DUMMY_SYMLINK)
dummy_symlink_st = os.lstat(_DUMMY_SYMLINK)
def chflags_nofollow(path, flags):
return posix.chflags(path, flags, follow_symlinks=False)
for fn in (posix.lchflags, chflags_nofollow):
# ZFS returns EOPNOTSUPP when attempting to set flag UF_IMMUTABLE.
flags = dummy_symlink_st.st_flags | stat.UF_IMMUTABLE
try:
fn(_DUMMY_SYMLINK, flags)
except OSError as err:
if err.errno != errno.EOPNOTSUPP:
raise
msg = 'chflag UF_IMMUTABLE not supported by underlying fs'
self.skipTest(msg)
try:
new_testfn_st = os.stat(support.TESTFN)
new_dummy_symlink_st = os.lstat(_DUMMY_SYMLINK)
self.assertEqual(testfn_st.st_flags, new_testfn_st.st_flags)
self.assertEqual(dummy_symlink_st.st_flags | stat.UF_IMMUTABLE,
new_dummy_symlink_st.st_flags)
finally:
fn(_DUMMY_SYMLINK, dummy_symlink_st.st_flags)
def test_environ(self):
if os.name == "nt":
item_type = str
else:
item_type = bytes
for k, v in posix.environ.items():
self.assertEqual(type(k), item_type)
self.assertEqual(type(v), item_type)
@unittest.skipUnless(hasattr(posix, 'getcwd'), 'test needs posix.getcwd()')
def test_getcwd_long_pathnames(self):
dirname = 'getcwd-test-directory-0123456789abcdef-01234567890abcdef'
curdir = os.getcwd()
base_path = os.path.abspath(support.TESTFN) + '.getcwd'
try:
os.mkdir(base_path)
os.chdir(base_path)
except:
# Just returning nothing instead of the SkipTest exception, because
# the test results in Error in that case. Is that ok?
# raise unittest.SkipTest("cannot create directory for testing")
return
def _create_and_do_getcwd(dirname, current_path_length = 0):
try:
os.mkdir(dirname)
except:
raise unittest.SkipTest("mkdir cannot create directory sufficiently deep for getcwd test")
os.chdir(dirname)
try:
os.getcwd()
if current_path_length < 1027:
_create_and_do_getcwd(dirname, current_path_length + len(dirname) + 1)
finally:
os.chdir('..')
os.rmdir(dirname)
_create_and_do_getcwd(dirname)
finally:
os.chdir(curdir)
support.rmtree(base_path)
@unittest.skipUnless(hasattr(posix, 'getgrouplist'), "test needs posix.getgrouplist()")
@unittest.skipUnless(hasattr(pwd, 'getpwuid'), "test needs pwd.getpwuid()")
@unittest.skipUnless(hasattr(os, 'getuid'), "test needs os.getuid()")
def test_getgrouplist(self):
user = pwd.getpwuid(os.getuid())[0]
group = pwd.getpwuid(os.getuid())[3]
self.assertIn(group, posix.getgrouplist(user, group))
@unittest.skipUnless(hasattr(os, 'getegid'), "test needs os.getegid()")
def test_getgroups(self):
with os.popen('id -G 2>/dev/null') as idg:
groups = idg.read().strip()
ret = idg.close()
if ret is not None or not groups:
raise unittest.SkipTest("need working 'id -G'")
# Issues 16698: OS X ABIs prior to 10.6 have limits on getgroups()
if sys.platform == 'darwin':
import sysconfig
dt = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET') or '10.0'
if tuple(int(n) for n in dt.split('.')[0:2]) < (10, 6):
raise unittest.SkipTest("getgroups(2) is broken prior to 10.6")
# 'id -G' and 'os.getgroups()' should return the same
# groups, ignoring order and duplicates.
# #10822 - it is implementation defined whether posix.getgroups()
# includes the effective gid so we include it anyway, since id -G does
self.assertEqual(
set([int(x) for x in groups.split()]),
set(posix.getgroups() + [posix.getegid()]))
# tests for the posix *at functions follow
@unittest.skipUnless(os.access in os.supports_dir_fd, "test needs dir_fd support for os.access()")
def test_access_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
self.assertTrue(posix.access(support.TESTFN, os.R_OK, dir_fd=f))
finally:
posix.close(f)
@unittest.skipUnless(os.chmod in os.supports_dir_fd, "test needs dir_fd support in os.chmod()")
def test_chmod_dir_fd(self):
os.chmod(support.TESTFN, stat.S_IRUSR)
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.chmod(support.TESTFN, stat.S_IRUSR | stat.S_IWUSR, dir_fd=f)
s = posix.stat(support.TESTFN)
self.assertEqual(s[0] & stat.S_IRWXU, stat.S_IRUSR | stat.S_IWUSR)
finally:
posix.close(f)
@unittest.skipUnless(os.chown in os.supports_dir_fd, "test needs dir_fd support in os.chown()")
def test_chown_dir_fd(self):
support.unlink(support.TESTFN)
support.create_empty_file(support.TESTFN)
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.chown(support.TESTFN, os.getuid(), os.getgid(), dir_fd=f)
finally:
posix.close(f)
@unittest.skipUnless(os.stat in os.supports_dir_fd, "test needs dir_fd support in os.stat()")
def test_stat_dir_fd(self):
support.unlink(support.TESTFN)
with open(support.TESTFN, 'w') as outfile:
outfile.write("testline\n")
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
s1 = posix.stat(support.TESTFN)
s2 = posix.stat(support.TESTFN, dir_fd=f)
self.assertEqual(s1, s2)
s2 = posix.stat(support.TESTFN, dir_fd=None)
self.assertEqual(s1, s2)
self.assertRaisesRegex(TypeError, 'should be integer or None, not',
posix.stat, support.TESTFN, dir_fd=posix.getcwd())
self.assertRaisesRegex(TypeError, 'should be integer or None, not',
posix.stat, support.TESTFN, dir_fd=float(f))
self.assertRaises(OverflowError,
posix.stat, support.TESTFN, dir_fd=10**20)
finally:
posix.close(f)
@unittest.skipUnless(os.utime in os.supports_dir_fd, "test needs dir_fd support in os.utime()")
def test_utime_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
now = time.time()
posix.utime(support.TESTFN, None, dir_fd=f)
posix.utime(support.TESTFN, dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, now, dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (None, None), dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, None), dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (None, now), dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, "x"), dir_fd=f)
posix.utime(support.TESTFN, (int(now), int(now)), dir_fd=f)
posix.utime(support.TESTFN, (now, now), dir_fd=f)
posix.utime(support.TESTFN,
(int(now), int((now - int(now)) * 1e9)), dir_fd=f)
posix.utime(support.TESTFN, dir_fd=f,
times=(int(now), int((now - int(now)) * 1e9)))
# try dir_fd and follow_symlinks together
if os.utime in os.supports_follow_symlinks:
try:
posix.utime(support.TESTFN, follow_symlinks=False, dir_fd=f)
except ValueError:
# whoops! using both together not supported on this platform.
pass
finally:
posix.close(f)
@unittest.skipUnless(os.link in os.supports_dir_fd, "test needs dir_fd support in os.link()")
def test_link_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.link(support.TESTFN, support.TESTFN + 'link', src_dir_fd=f, dst_dir_fd=f)
# should have same inodes
self.assertEqual(posix.stat(support.TESTFN)[1],
posix.stat(support.TESTFN + 'link')[1])
finally:
posix.close(f)
support.unlink(support.TESTFN + 'link')
@unittest.skipUnless(os.mkdir in os.supports_dir_fd, "test needs dir_fd support in os.mkdir()")
def test_mkdir_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.mkdir(support.TESTFN + 'dir', dir_fd=f)
posix.stat(support.TESTFN + 'dir') # should not raise exception
finally:
posix.close(f)
support.rmtree(support.TESTFN + 'dir')
@unittest.skipUnless((os.mknod in os.supports_dir_fd) and hasattr(stat, 'S_IFIFO'),
"test requires both stat.S_IFIFO and dir_fd support for os.mknod()")
def test_mknod_dir_fd(self):
# Test using mknodat() to create a FIFO (the only use specified
# by POSIX).
support.unlink(support.TESTFN)
mode = stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.mknod(support.TESTFN, mode, 0, dir_fd=f)
except OSError as e:
# Some old systems don't allow unprivileged users to use
# mknod(), or only support creating device nodes.
self.assertIn(e.errno, (errno.EPERM, errno.EINVAL))
else:
self.assertTrue(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
finally:
posix.close(f)
@unittest.skipUnless(os.open in os.supports_dir_fd, "test needs dir_fd support in os.open()")
def test_open_dir_fd(self):
support.unlink(support.TESTFN)
with open(support.TESTFN, 'w') as outfile:
outfile.write("testline\n")
a = posix.open(posix.getcwd(), posix.O_RDONLY)
b = posix.open(support.TESTFN, posix.O_RDONLY, dir_fd=a)
try:
res = posix.read(b, 9).decode(encoding="utf-8")
self.assertEqual("testline\n", res)
finally:
posix.close(a)
posix.close(b)
@unittest.skipUnless(os.readlink in os.supports_dir_fd, "test needs dir_fd support in os.readlink()")
def test_readlink_dir_fd(self):
os.symlink(support.TESTFN, support.TESTFN + 'link')
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
self.assertEqual(posix.readlink(support.TESTFN + 'link'),
posix.readlink(support.TESTFN + 'link', dir_fd=f))
finally:
support.unlink(support.TESTFN + 'link')
posix.close(f)
@unittest.skipUnless(os.rename in os.supports_dir_fd, "test needs dir_fd support in os.rename()")
def test_rename_dir_fd(self):
support.unlink(support.TESTFN)
support.create_empty_file(support.TESTFN + 'ren')
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.rename(support.TESTFN + 'ren', support.TESTFN, src_dir_fd=f, dst_dir_fd=f)
except:
posix.rename(support.TESTFN + 'ren', support.TESTFN)
raise
else:
posix.stat(support.TESTFN) # should not raise exception
finally:
posix.close(f)
@unittest.skipUnless(os.symlink in os.supports_dir_fd, "test needs dir_fd support in os.symlink()")
def test_symlink_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.symlink(support.TESTFN, support.TESTFN + 'link', dir_fd=f)
self.assertEqual(posix.readlink(support.TESTFN + 'link'), support.TESTFN)
finally:
posix.close(f)
support.unlink(support.TESTFN + 'link')
@unittest.skipUnless(os.unlink in os.supports_dir_fd, "test needs dir_fd support in os.unlink()")
def test_unlink_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
support.create_empty_file(support.TESTFN + 'del')
posix.stat(support.TESTFN + 'del') # should not raise exception
try:
posix.unlink(support.TESTFN + 'del', dir_fd=f)
except:
support.unlink(support.TESTFN + 'del')
raise
else:
self.assertRaises(OSError, posix.stat, support.TESTFN + 'link')
finally:
posix.close(f)
@unittest.skipUnless(os.mkfifo in os.supports_dir_fd, "test needs dir_fd support in os.mkfifo()")
def test_mkfifo_dir_fd(self):
support.unlink(support.TESTFN)
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.mkfifo(support.TESTFN, stat.S_IRUSR | stat.S_IWUSR, dir_fd=f)
self.assertTrue(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
finally:
posix.close(f)
requires_sched_h = unittest.skipUnless(hasattr(posix, 'sched_yield'),
"don't have scheduling support")
requires_sched_affinity = unittest.skipUnless(hasattr(posix, 'sched_setaffinity'),
"don't have sched affinity support")
@requires_sched_h
def test_sched_yield(self):
# This has no error conditions (at least on Linux).
posix.sched_yield()
@requires_sched_h
@unittest.skipUnless(hasattr(posix, 'sched_get_priority_max'),
"requires sched_get_priority_max()")
def test_sched_priority(self):
# Round-robin usually has interesting priorities.
pol = posix.SCHED_RR
lo = posix.sched_get_priority_min(pol)
hi = posix.sched_get_priority_max(pol)
self.assertIsInstance(lo, int)
self.assertIsInstance(hi, int)
self.assertGreaterEqual(hi, lo)
# OSX evidently just returns 15 without checking the argument.
if sys.platform != "darwin":
self.assertRaises(OSError, posix.sched_get_priority_min, -23)
self.assertRaises(OSError, posix.sched_get_priority_max, -23)
@unittest.skipUnless(hasattr(posix, 'sched_setscheduler'), "can't change scheduler")
def test_get_and_set_scheduler_and_param(self):
possible_schedulers = [sched for name, sched in posix.__dict__.items()
if name.startswith("SCHED_")]
mine = posix.sched_getscheduler(0)
self.assertIn(mine, possible_schedulers)
try:
parent = posix.sched_getscheduler(os.getppid())
except OSError as e:
if e.errno != errno.EPERM:
raise
else:
self.assertIn(parent, possible_schedulers)
self.assertRaises(OSError, posix.sched_getscheduler, -1)
self.assertRaises(OSError, posix.sched_getparam, -1)
param = posix.sched_getparam(0)
self.assertIsInstance(param.sched_priority, int)
# POSIX states that calling sched_setparam() or sched_setscheduler() on
# a process with a scheduling policy other than SCHED_FIFO or SCHED_RR
# is implementation-defined: NetBSD and FreeBSD can return EINVAL.
if not sys.platform.startswith(('freebsd', 'netbsd')):
try:
posix.sched_setscheduler(0, mine, param)
posix.sched_setparam(0, param)
except OSError as e:
if e.errno != errno.EPERM:
raise
self.assertRaises(OSError, posix.sched_setparam, -1, param)
self.assertRaises(OSError, posix.sched_setscheduler, -1, mine, param)
self.assertRaises(TypeError, posix.sched_setscheduler, 0, mine, None)
self.assertRaises(TypeError, posix.sched_setparam, 0, 43)
param = posix.sched_param(None)
self.assertRaises(TypeError, posix.sched_setparam, 0, param)
large = 214748364700
param = posix.sched_param(large)
self.assertRaises(OverflowError, posix.sched_setparam, 0, param)
param = posix.sched_param(sched_priority=-large)
self.assertRaises(OverflowError, posix.sched_setparam, 0, param)
@unittest.skipUnless(hasattr(posix, "sched_rr_get_interval"), "no function")
def test_sched_rr_get_interval(self):
try:
interval = posix.sched_rr_get_interval(0)
except OSError as e:
# This likely means that sched_rr_get_interval is only valid for
# processes with the SCHED_RR scheduler in effect.
if e.errno != errno.EINVAL:
raise
self.skipTest("only works on SCHED_RR processes")
self.assertIsInstance(interval, float)
# Reasonable constraints, I think.
self.assertGreaterEqual(interval, 0.)
self.assertLess(interval, 1.)
@requires_sched_affinity
def test_sched_getaffinity(self):
mask = posix.sched_getaffinity(0)
self.assertIsInstance(mask, set)
self.assertGreaterEqual(len(mask), 1)
self.assertRaises(OSError, posix.sched_getaffinity, -1)
for cpu in mask:
self.assertIsInstance(cpu, int)
self.assertGreaterEqual(cpu, 0)
self.assertLess(cpu, 1 << 32)
@requires_sched_affinity
def test_sched_setaffinity(self):
mask = posix.sched_getaffinity(0)
if len(mask) > 1:
# Empty masks are forbidden
mask.pop()
posix.sched_setaffinity(0, mask)
self.assertEqual(posix.sched_getaffinity(0), mask)
self.assertRaises(OSError, posix.sched_setaffinity, 0, [])
self.assertRaises(ValueError, posix.sched_setaffinity, 0, [-10])
self.assertRaises(OverflowError, posix.sched_setaffinity, 0, [1<<128])
self.assertRaises(OSError, posix.sched_setaffinity, -1, mask)
def test_rtld_constants(self):
# check presence of major RTLD_* constants
posix.RTLD_LAZY
posix.RTLD_NOW
posix.RTLD_GLOBAL
posix.RTLD_LOCAL
@unittest.skipUnless(hasattr(os, 'SEEK_HOLE'),
"test needs an OS that reports file holes")
def test_fs_holes(self):
# Even if the filesystem doesn't report holes,
# if the OS supports it the SEEK_* constants
# will be defined and will have a consistent
# behaviour:
# os.SEEK_DATA = current position
# os.SEEK_HOLE = end of file position
with open(support.TESTFN, 'r+b') as fp:
fp.write(b"hello")
fp.flush()
size = fp.tell()
fno = fp.fileno()
try :
for i in range(size):
self.assertEqual(i, os.lseek(fno, i, os.SEEK_DATA))
self.assertLessEqual(size, os.lseek(fno, i, os.SEEK_HOLE))
self.assertRaises(OSError, os.lseek, fno, size, os.SEEK_DATA)
self.assertRaises(OSError, os.lseek, fno, size, os.SEEK_HOLE)
except OSError :
# Some OSs claim to support SEEK_HOLE/SEEK_DATA
# but it is not true.
# For instance:
# http://lists.freebsd.org/pipermail/freebsd-amd64/2012-January/014332.html
raise unittest.SkipTest("OSError raised!")
def test_path_error2(self):
"""
Test functions that call path_error2(), providing two filenames in their exceptions.
"""
for name in ("rename", "replace", "link"):
function = getattr(os, name, None)
if function is None:
continue
for dst in ("noodly2", support.TESTFN):
try:
function('doesnotexistfilename', dst)
except OSError as e:
self.assertIn("'doesnotexistfilename' -> '{}'".format(dst), str(e))
break
else:
self.fail("No valid path_error2() test for os." + name)
def test_path_with_null_character(self):
fn = support.TESTFN
fn_with_NUL = fn + '\0'
self.addCleanup(support.unlink, fn)
support.unlink(fn)
fd = None
try:
with self.assertRaises(ValueError):
fd = os.open(fn_with_NUL, os.O_WRONLY | os.O_CREAT) # raises
finally:
if fd is not None:
os.close(fd)
self.assertFalse(os.path.exists(fn))
self.assertRaises(ValueError, os.mkdir, fn_with_NUL)
self.assertFalse(os.path.exists(fn))
open(fn, 'wb').close()
self.assertRaises(ValueError, os.stat, fn_with_NUL)
def test_path_with_null_byte(self):
fn = os.fsencode(support.TESTFN)
fn_with_NUL = fn + b'\0'
self.addCleanup(support.unlink, fn)
support.unlink(fn)
fd = None
try:
with self.assertRaises(ValueError):
fd = os.open(fn_with_NUL, os.O_WRONLY | os.O_CREAT) # raises
finally:
if fd is not None:
os.close(fd)
self.assertFalse(os.path.exists(fn))
self.assertRaises(ValueError, os.mkdir, fn_with_NUL)
self.assertFalse(os.path.exists(fn))
open(fn, 'wb').close()
self.assertRaises(ValueError, os.stat, fn_with_NUL)
class PosixGroupsTester(unittest.TestCase):
def setUp(self):
if posix.getuid() != 0:
raise unittest.SkipTest("not enough privileges")
if not hasattr(posix, 'getgroups'):
raise unittest.SkipTest("need posix.getgroups")
if sys.platform == 'darwin':
raise unittest.SkipTest("getgroups(2) is broken on OSX")
self.saved_groups = posix.getgroups()
def tearDown(self):
if hasattr(posix, 'setgroups'):
posix.setgroups(self.saved_groups)
elif hasattr(posix, 'initgroups'):
name = pwd.getpwuid(posix.getuid()).pw_name
posix.initgroups(name, self.saved_groups[0])
@unittest.skipUnless(hasattr(posix, 'initgroups'),
"test needs posix.initgroups()")
def test_initgroups(self):
# find missing group
g = max(self.saved_groups or [0]) + 1
name = pwd.getpwuid(posix.getuid()).pw_name
posix.initgroups(name, g)
self.assertIn(g, posix.getgroups())
@unittest.skipUnless(hasattr(posix, 'setgroups'),
"test needs posix.setgroups()")
def test_setgroups(self):
for groups in [[0], list(range(16))]:
posix.setgroups(groups)
self.assertListEqual(groups, posix.getgroups())
def test_main():
try:
support.run_unittest(PosixTester, PosixGroupsTester)
finally:
support.reap_children()
if __name__ == '__main__':
test_main()
|
|
#!/usr/bin/python
import sys
import os
import time
import ConfigParser
import tempfile
import codecs
from StringIO import StringIO
from vt_manager_kvm.communication.sfa.util.xml import XML
default_config = \
"""
"""
def isbool(v):
return v.lower() in ("true", "false")
def str2bool(v):
return v.lower() in ("true", "1")
class Config:
# def __init__(self, config_file='/etc/sfa/sfa_config'):
# def __init__(self, config_file='/opt/ofelia/vt_manager_kvm/src/python/vt_manager_kvm/communication/sfa/configs/config'):
def __init__(self, config_file=None):
if not config_file:
config_file = os.path.join(os.path.dirname(__file__), "../sfa_config/config.py")
self._files = []
self.config_path = os.path.dirname(config_file)
self.config = ConfigParser.ConfigParser()
self.filename = config_file
if not os.path.isfile(self.filename):
self.create(self.filename)
self.load(self.filename)
def _header(self):
header = """
DO NOT EDIT. This file was automatically generated at
%s from:
%s
""" % (time.asctime(), os.linesep.join(self._files))
# Get rid of the surrounding newlines
return header.strip().split(os.linesep)
def create(self, filename):
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
configfile = open(filename, 'w')
configfile.write(default_config)
configfile.close()
def load(self, filename):
if filename:
try:
self.config.read(filename)
except ConfigParser.MissingSectionHeaderError:
if filename.endswith('.xml'):
self.load_xml(filename)
else:
self.load_shell(filename)
self._files.append(filename)
self.set_attributes()
def load_xml(self, filename):
xml = XML(filename)
categories = xml.xpath('//configuration/variables/category')
for category in categories:
section_name = category.get('id')
if not self.config.has_section(section_name):
self.config.add_section(section_name)
options = category.xpath('./variablelist/variable')
for option in options:
option_name = option.get('id')
value = option.xpath('./value')[0].text
if not value:
value = ""
self.config.set(section_name, option_name, value)
def load_shell(self, filename):
f = open(filename, 'r')
for line in f:
try:
if line.startswith('#'):
continue
parts = line.strip().split("=")
if len(parts) < 2:
continue
option = parts[0]
value = parts[1].replace('"', '').replace("'","")
section, var = self.locate_varname(option, strict=False)
if section and var:
self.set(section, var, value)
except:
pass
f.close()
def locate_varname(self, varname, strict=True):
varname = varname.lower()
sections = self.config.sections()
section_name = ""
var_name = ""
for section in sections:
if varname.startswith(section.lower()) and len(section) > len(section_name):
section_name = section.lower()
var_name = varname.replace(section_name, "")[1:]
if strict and not self.config.has_option(section_name, var_name):
raise ConfigParser.NoOptionError(var_name, section_name)
return (section_name, var_name)
def set_attributes(self):
sections = self.config.sections()
for section in sections:
for item in self.config.items(section):
name = "%s_%s" % (section, item[0])
value = item[1]
if isbool(value):
value = str2bool(value)
elif value.isdigit():
value = int(value)
setattr(self, name, value)
setattr(self, name.upper(), value)
def variables(self):
"""
Return all variables.
Returns:
variables = { 'category_id': (category, variablelist) }
category = { 'id': "category_identifier",
'name': "Category name",
'description': "Category description" }
variablelist = { 'variable_id': variable }
variable = { 'id': "variable_identifier",
'type': "variable_type",
'value': "variable_value",
'name': "Variable name",
'description': "Variable description" }
"""
variables = {}
for section in self.config.sections():
category = {
'id': section,
'name': section,
'description': section,
}
variable_list = {}
for item in self.config.items(section):
var_name = item[0]
name = "%s_%s" % (section, var_name)
value = item[1]
if isbool(value):
value_type = bool
elif value.isdigit():
value_type = int
else:
value_type = str
variable = {
'id': var_name,
'type': value_type,
'value': value,
'name': name,
'description': name,
}
variable_list[name] = variable
variables[section] = (category, variable_list)
return variables
def verify(self, config1, config2, validate_method):
return True
def validate_type(self, var_type, value):
return True
@staticmethod
def is_xml(config_file):
try:
x = Xml(config_file)
return True
except:
return False
@staticmethod
def is_ini(config_file):
try:
c = ConfigParser.ConfigParser()
c.read(config_file)
return True
except ConfigParser.MissingSectionHeaderError:
return False
def dump(self, sections = []):
sys.stdout.write(output_python())
def output_python(self, encoding = "utf-8"):
buf = codecs.lookup(encoding)[3](StringIO())
buf.writelines(["# " + line + os.linesep for line in self._header()])
for section in self.sections():
buf.write("[%s]%s" % (section, os.linesep))
for (name,value) in self.items(section):
buf.write("%s=%s%s" % (name,value,os.linesep))
buf.write(os.linesep)
return buf.getvalue()
def output_shell(self, show_comments = True, encoding = "utf-8"):
"""
Return variables as a shell script.
"""
buf = codecs.lookup(encoding)[3](StringIO())
buf.writelines(["# " + line + os.linesep for line in self._header()])
for section in self.sections():
for (name,value) in self.items(section):
# bash does not have the concept of NULL
if value:
option = "%s_%s" % (section.upper(), name.upper())
if isbool(value):
value = str(str2bool(value))
elif not value.isdigit():
value = '"%s"' % value
buf.write(option + "=" + value + os.linesep)
return buf.getvalue()
def output_php(selfi, encoding = "utf-8"):
"""
Return variables as a PHP script.
"""
buf = codecs.lookup(encoding)[3](StringIO())
buf.write("<?php" + os.linesep)
buf.writelines(["// " + line + os.linesep for line in self._header()])
for section in self.sections():
for (name,value) in self.items(section):
option = "%s_%s" % (section, name)
buf.write(os.linesep)
buf.write("// " + option + os.linesep)
if value is None:
value = 'NULL'
buf.write("define('%s', %s);" % (option, value) + os.linesep)
buf.write("?>" + os.linesep)
return buf.getvalue()
def output_xml(self, encoding = "utf-8"):
pass
def output_variables(self, encoding="utf-8"):
"""
Return list of all variable names.
"""
buf = codecs.lookup(encoding)[3](StringIO())
for section in self.sections():
for (name,value) in self.items(section):
option = "%s_%s" % (section,name)
buf.write(option + os.linesep)
return buf.getvalue()
pass
def write(self, filename=None):
if not filename:
filename = self.filename
configfile = open(filename, 'w')
self.config.write(configfile)
def save(self, filename=None):
self.write(filename)
def get_trustedroots_dir(self):
return self.config_path + os.sep + 'trusted_roots'
def get_openflow_aggrMgr_info(self):
aggr_mgr_ip = 'localhost'
if (hasattr(self,'openflow_aggregate_manager_ip')):
aggr_mgr_ip = self.OPENFLOW_AGGREGATE_MANAGER_IP
aggr_mgr_port = 2603
if (hasattr(self,'openflow_aggregate_manager_port')):
aggr_mgr_port = self.OPENFLOW_AGGREGATE_MANAGER_PORT
return (aggr_mgr_ip,aggr_mgr_port)
def get_interface_hrn(self):
if (hasattr(self,'sfa_interface_hrn')):
return self.SFA_INTERFACE_HRN
else:
return "plc"
def __getattr__(self, attr):
print self.config.__dict__.keys()
return getattr(self.config, attr)
if __name__ == '__main__':
filename = None
if len(sys.argv) > 1:
filename = sys.argv[1]
config = Config(filename)
else:
config = Config()
config.dump()
|
|
import os
import sys
import json
import types
import shutil
import logging
import inspect
import tempfile
import contextlib
from pyblish import api
from . import lib, schema
from . import (
_registered_families,
_registered_data,
_registered_formats,
_registered_loaders_paths,
_registered_host,
_registered_root,
)
from .vendor import six
self = sys.modules[__name__]
self.log = logging.getLogger("pyblish-mindbender")
self._is_installed = False
def install(host):
"""Install `host` into the running Python session.
Arguments:
host (module): A Python module containing the Pyblish
mindbender host-interface.
"""
# Optional host install function
if hasattr(host, "install"):
host.install()
register_host(host)
register_plugins()
register_root(os.getenv("PROJECTDIR"))
self._is_installed = True
self.log.info("Successfully installed Pyblish Mindbender!")
def uninstall():
try:
registered_host().uninstall()
except AttributeError:
pass
deregister_host()
deregister_plugins()
self.log.info("Successfully uninstalled Pyblish Mindbender!")
def is_installed():
"""Return state of installation
Returns:
True if installed, False otherwise
"""
return self._is_installed
class Loader(object):
families = list()
def process(self, asset, subset, version, representation):
pass
def discover_loaders():
"""Find and return available loaders
"""
loaders = dict()
# Include plug-ins from registered paths
for path in _registered_loaders_paths:
path = os.path.normpath(path)
assert os.path.isdir(path), "%s is not a directory" % path
for fname in os.listdir(path):
abspath = os.path.join(path, fname)
if not os.path.isfile(abspath):
continue
mod_name, mod_ext = os.path.splitext(fname)
if not mod_ext == ".py":
continue
module = types.ModuleType(mod_name)
module.__file__ = abspath
try:
with open(abspath) as f:
six.exec_(f.read(), module.__dict__)
# Store reference to original module, to avoid
# garbage collection from collecting it's global
# imports, such as `import os`.
sys.modules[mod_name] = module
except Exception as err:
print("Skipped: \"%s\" (%s)", mod_name, err)
continue
for plugin in loaders_from_module(module):
if plugin.__name__ in loaders:
print("Duplicate plug-in found: %s", plugin)
continue
loaders[plugin.__name__] = plugin
return list(loaders.values())
def loaders_from_module(module):
"""Return plug-ins from module
Arguments:
module (types.ModuleType): Imported module from which to
parse valid Pyblish plug-ins.
Returns:
List of plug-ins, or empty list if none is found.
"""
loaders = list()
for name in dir(module):
# It could be anything at this point
obj = getattr(module, name)
if not inspect.isclass(obj):
continue
if not issubclass(obj, Loader):
continue
loaders.append(obj)
return loaders
def register_loaders_path(path):
path = os.path.normpath(path)
_registered_loaders_paths.add(path)
def registered_loaders_paths():
return list(_registered_loaders_paths)
def deregister_loaders_path(path):
_registered_loaders_paths.pop(path)
def ls(root=None):
"""List available assets
Return a list of available assets.
The interface of this function, along with its schema, is designed
to facilitate a potential transition into database-driven queries.
Arguments:
root (str, optional): Absolute path to asset directory
A note on performance:
This function is a generator, it scans the system one asset
at a time. However, scanning implies both listing directories
and opening files - one per asset per version.
Therefore, performance drops combinatorially for each new
version added to the project.
In small pipelines - e.g. 100s of assets, with 10s of versions -
this should not pose a problem.
In large pipelines - e.g. 1000s of assets, with 100s of versions -
this would likely become unbearable and manifest itself in
surrounding areas of the pipeline where disk-access is
critical; such as saving or loading files.
..note: The order of the list is undefined, but is typically alphabetical
due to dependence on os.listdir()
..note: The order of versions returned is guaranteed to be sorted, so
as to simplify retrieving the latest one via `versions[-1]`
"""
assetsdir = root or registered_root()
assert assetsdir is not None, ("No registered root.")
for asset in lib.listdir(assetsdir):
assetdir = os.path.join(assetsdir, asset)
publishdir = lib.format_shared_dir(assetdir)
asset_entry = {
"schema": "pyblish-mindbender:asset-1.0",
"name": asset,
"subsets": list()
}
for subset in lib.listdir(publishdir):
subsetdir = os.path.join(publishdir, subset)
subset_entry = {
"schema": "pyblish-mindbender:subset-1.0",
"name": subset,
"versions": list(),
}
asset_entry["subsets"].append(subset_entry)
for version in lib.listdir(subsetdir):
versiondir = os.path.join(subsetdir, version)
fname = os.path.join(versiondir, ".metadata.json")
try:
with open(fname) as f:
version_entry = json.load(f)
except IOError:
self.log.warning("\"%s\" not found." % fname)
continue
if version_entry.get("schema") != ("pyblish-mindbender"
":version-1.0"):
self.log.warning("\"%s\" unsupported schema." % fname)
continue
subset_entry["versions"].append(version_entry)
# Sort versions by integer
subset_entry["versions"].sort(key=lambda v: v["version"])
schema.validate(asset_entry, "asset")
yield asset_entry
def any_representation(version):
"""Pick any compatible representation.
Arguments:
version ("pyblish-mindbender:version-1.0"): Version from which
to pick a representation, based on currently registered formats.
"""
supported_formats = registered_formats()
try:
representation = next(
rep for rep in version["representations"]
if rep["format"] in supported_formats and
rep["path"] != "{dirname}/source{format}"
)
except StopIteration:
formats = list(r["format"] for r in version["representations"])
raise ValueError(
"No supported representations.\n\n"
"Available representations:\n%s\n\n"
"Supported representations:\n%s"
% ("\n- ".join(formats),
"\n- ".join(supported_formats))
)
return representation
@contextlib.contextmanager
def fixture(assets=["Asset1"], subsets=["animRig"], versions=1):
"""Build transient fixture of `assets` and `versions`
Generate a temporary fixture of customisable assets
with current metadata schema. This function is intended
for use in tests and tutorials.
Arguments:
assets (list, optional): Names of assets to create,
defaults to one asset named "Asset1"
subsets (int, optional): Number of subsets for each asset,
defaults to 1 subset.
versions (list, optional): Number of versions of each subset,
defaults to 1 version.
Thread Safety:
This function modifies globals state and is
therefore not thread-safe.
Usage:
>>> with fixture(assets=["MyAsset1"]):
... for asset in ls():
... assert asset["name"] == "MyAsset1"
...
"""
tempdir = tempfile.mkdtemp()
for asset in assets:
assetdir = os.path.join(tempdir, asset)
shareddir = lib.format_shared_dir(assetdir)
os.makedirs(shareddir)
for subset in subsets:
subsetdir = os.path.join(shareddir, subset)
os.makedirs(subsetdir)
for version in range(versions):
version = lib.format_version(version + 1)
versiondir = os.path.join(subsetdir, version)
os.makedirs(versiondir)
fname = os.path.join(versiondir, asset + ".ma")
open(fname, "w").close() # touch
fname = os.path.join(versiondir, ".metadata.json")
with open(fname, "w") as f:
json.dump({
"schema": "pyblish-mindbender:version-1.0",
"version": lib.parse_version(version),
"path": versiondir,
"time": "",
"author": "mottosso",
"source": os.path.join(
"{project}",
"maya",
"scenes",
"scene.ma"
),
"representations": [
{
"schema": ("pyblish-mindbender:"
"representation-1.0"),
"format": ".ma",
"path": os.path.join(
"{dirname}",
"%s{format}" % asset
),
},
]
}, f)
# Keep track of original root
_ = _registered_root["_"]
try:
_registered_root["_"] = tempdir
yield tempdir
finally:
_registered_root["_"] = _
shutil.rmtree(tempdir)
def register_root(path):
"""Register currently active root"""
self.log.info("Registering root: %s" % path)
_registered_root["_"] = path
def registered_root():
"""Return currently registered root"""
return _registered_root["_"]
def register_format(format):
"""Register a supported format
A supported format is used to determine which of any available
representations are relevant to the currently registered host.
"""
_registered_formats.append(format)
def register_host(host):
missing = list()
for member in ("load",
"create",
"ls",):
if not hasattr(host, member):
missing.append(member)
assert not missing, (
"Incomplete interface for host: '%s'\n"
"Missing: %s" % (host, ", ".join(missing))
)
_registered_host["_"] = host
def register_plugins():
"""Register accompanying plugins"""
module_path = sys.modules[__name__].__file__
package_path = os.path.dirname(module_path)
plugins_path = os.path.join(package_path, "plugins")
api.register_plugin_path(plugins_path)
def register_data(key, value, help=None):
"""Register new default attribute
Arguments:
key (str): Name of data
value (object): Arbitrary value of data
help (str, optional): Briefly describe
"""
_registered_data[key] = value
def deregister_data(key):
_registered_data.pop(key)
def register_family(name,
label=None,
data=None,
help=None,
loader=None):
"""Register family and attributes for family
Arguments:
name (str): Name of family, e.g. mindbender.model
label (str): Nice name for family, e.g. Model
data (dict, optional): Additional data, see
:func:`register_data` for docstring on members
help (str, optional): Briefly describe this family
"""
_registered_families[name] = {
"name": name,
"label": label,
"data": data or {},
"help": help or "",
"loader": loader
}
def deregister_family(name):
_registered_families.pop(name)
def registered_formats():
return _registered_formats[:]
def registered_families():
return _registered_families.copy()
def registered_data():
return _registered_data.copy()
def registered_host():
return _registered_host["_"]
def deregister_plugins():
from . import plugins
plugin_path = os.path.dirname(plugins.__file__)
try:
api.deregister_plugin_path(plugin_path)
except ValueError:
self.log.warning("pyblish-mindbender plug-ins not registered.")
def deregister_host():
_registered_host["_"] = default_host()
def default_host():
"""A default host, in place of anything better
This may be considered as reference for the
interface a host must implement. It also ensures
that the system runs, even when nothing is there
to support it.
"""
host = types.ModuleType("defaultHost")
host.__dict__.update({
"ls": lambda: [],
"load": lambda subset, version=-1, representation=None: None,
"create": lambda name, family, nodes=None: "my_instance",
})
return host
def debug_host():
"""A debug host, useful to debugging features that depend on a host"""
host = types.ModuleType("debugHost")
host.__dict__.update({
"ls": lambda: [],
"load": lambda subset, version=-1, representation=None:
sys.stdout.write(json.dumps({
"subset": subset,
"version": version,
"representation": representation
}, indent=4) + "\n"),
"create": lambda name, family, nodes=None:
sys.stdout.write(json.dumps({
"name": name,
"family": family,
}, indent=4))
})
return host
|
|
#
# DrawingMixin.py -- enable drawing capabilities.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import time
from ginga import trcalc
from ginga.misc.Bunch import Bunch
from .CanvasMixin import CanvasMixin
__all__ = ['DrawingMixin']
class DrawingMixin(object):
"""The DrawingMixin is a mixin class that adds drawing capability for
some of the basic CanvasObject-derived types. The set_surface method is
used to associate a ImageViewCanvas object for layering on.
"""
def __init__(self):
assert isinstance(self, CanvasMixin), "Missing CanvasMixin class"
from .CanvasObject import drawCatalog
# For interactive drawing
self.candraw = False
self.dc = drawCatalog
# canvas objects which we know how to draw have an "idraw"
# class method
self.drawtypes = [key for key in self.dc.keys()
if hasattr(self.dc[key], 'idraw')]
self.drawtypes.sort()
self.t_drawtype = 'point'
self.t_drawparams = {}
# holds the drawing context
self._draw_cxt = None
# For interactive editing
self.canedit = False
# Set to False to disable drag moves except from move control pt
self.easymove = True
self._start_x = 0
self._start_y = 0
self._cp_index = None
self._edit_obj = None
self._edit_status = False
self._edit_detail = {}
self._pick_cur_objs = set([])
self._pick_sel_objs = set([])
# For modes
self._mode = 'draw'
self._mode_tbl = Bunch()
self.add_draw_mode(None)
self.add_draw_mode('draw', down=self.draw_start,
move=self.draw_motion, up=self.draw_stop,
poly_add=self.draw_poly_add,
poly_delete=self.draw_poly_delete)
self.add_draw_mode('edit', down=self.edit_start,
move=self.edit_motion, up=self.edit_stop,
poly_add=self.edit_poly_add,
poly_delete=self.edit_poly_delete)
self.add_draw_mode('pick', down=self.pick_start,
move=self.pick_motion, up=self.pick_stop,
hover=self.pick_hover, key=self.pick_key,
poly_add=self.edit_poly_add,
poly_delete=self.edit_poly_delete)
# For selection
self._selected = []
self.multi_select_ok = False
# this controls whether an object is automatically selected for
# editing immediately after being drawn
self.edit_follows_draw = False
self._process_time = 0.0
# time delta threshold for deciding whether to update the image
self._delta_time = 0.020
self._draw_obj = None
# NOTE: must be mixed in with a Callback.Callbacks
for name in ('draw-event', 'draw-down', 'draw-move', 'draw-up',
'cursor-down', 'cursor-up', 'cursor-move',
'draw-scroll', 'keydown-poly_add', 'keydown-poly_del',
'keydown-edit_del', 'edit-event',
'edit-select', 'drag-drop'):
self.enable_callback(name)
for name in ['key-down', 'key-up', 'btn-down', 'btn-move', 'btn-up',
'scroll', 'pinch', 'pan']:
self.enable_callback('%s-none' % (name))
def set_surface(self, viewer):
self.viewer = viewer
# Register this canvas for events of interest.
# Assumes we are mixed in with a canvas
canvas = self
# for legacy drawing via draw mode in Bindmap
canvas.add_callback('draw-down', self.draw_start, viewer)
canvas.add_callback('draw-move', self.draw_motion, viewer)
canvas.add_callback('draw-up', self.draw_stop, viewer)
canvas.add_callback('key-down-none', self._draw_op, 'key', viewer)
canvas.add_callback('keydown-poly_add', self._draw_op, 'poly_add',
viewer)
canvas.add_callback('keydown-poly_del', self._draw_op, 'poly_delete',
viewer)
canvas.add_callback('keydown-edit_del', self.edit_delete_cb, viewer)
#canvas.add_callback('draw-scroll', self._edit_rotate_cb, viewer)
#canvas.add_callback('draw-scroll', self._edit_scale_cb, viewer)
def register_for_cursor_drawing(self, viewer):
canvas = self
canvas.add_callback('cursor-down', self._draw_op, 'down', viewer)
canvas.add_callback('cursor-move', self._draw_op, 'move', viewer)
canvas.add_callback('cursor-up', self._draw_op, 'up', viewer)
canvas.set_callback('none-move', self._draw_op, 'hover', viewer)
##### MODE LOGIC #####
def add_draw_mode(self, name, **kwargs):
try:
bnch = self._mode_tbl[name]
except KeyError:
bnch = Bunch(name=name, **kwargs)
self._mode_tbl[name] = bnch
return bnch
def set_draw_mode(self, mode):
if mode not in self._mode_tbl:
modes = list(self._mode_tbl.keys())
raise ValueError("mode must be one of: %s" % (str(modes)))
self._mode = mode
if mode != 'edit':
self.clear_selected()
self.update_canvas()
def get_draw_mode(self):
return self._mode
def _draw_op(self, canvas, event, data_x, data_y, opn, viewer):
if viewer != event.viewer:
return False
mode = self._mode
# Hack to handle legacy drawing using draw mode in Bindmap
if self.is_drawing():
mode = 'draw'
try:
method = self._mode_tbl[mode][opn]
except KeyError:
return False
if method is not None:
return method(canvas, event, data_x, data_y, viewer)
return False
##### DRAWING LOGIC #####
def _draw_update(self, data_x, data_y, cxt, force_update=False):
obj = None
# update the context with current position
x, y = cxt.crdmap.data_to((data_x, data_y))
cxt.setvals(x=x, y=y, data_x=data_x, data_y=data_y)
draw_class = cxt.draw_class
if draw_class is None:
return False
obj = draw_class.idraw(self, cxt)
# update display every delta_time secs
if obj is not None:
obj.initialize(self, cxt.viewer, self.logger)
self._draw_obj = obj
if force_update or (time.time() - self._process_time > self._delta_time):
self.process_drawing()
return True
def draw_start(self, canvas, event, data_x, data_y, viewer):
if not self.candraw:
return False
self._draw_obj = None
self.clear_selected()
# get the drawing coordinate type (default 'data')
crdtype = self.t_drawparams.get('coord', 'data')
crdmap = viewer.get_coordmap(crdtype)
x, y = crdmap.data_to((data_x, data_y))
klass = self.dc.get(self.t_drawtype, None)
# create the drawing context
self._draw_cxt = Bunch(start_x=x, start_y=y, points=[(x, y)],
x=x, y=y, data_x=data_x, data_y=data_y,
drawparams=self.t_drawparams,
crdmap=crdmap, viewer=viewer,
draw_class=klass, logger=self.logger)
self._draw_update(data_x, data_y, self._draw_cxt, force_update=True)
return True
def draw_stop(self, canvas, event, data_x, data_y, viewer):
if not self.candraw:
return False
self._draw_update(data_x, data_y, self._draw_cxt)
obj, self._draw_obj = self._draw_obj, None
if obj is not None:
objtag = self.add(obj)
self.make_callback('draw-event', objtag)
if self.edit_follows_draw:
#self.set_draw_mode('edit')
self.edit_select(obj)
self.make_callback('edit-select', self._edit_obj)
return True
else:
self.process_drawing()
def draw_motion(self, canvas, event, data_x, data_y, viewer):
if not self.candraw:
return False
self._draw_update(data_x, data_y, self._draw_cxt)
return True
def draw_poly_add(self, canvas, event, data_x, data_y, viewer):
if not self.candraw:
return False
cxt = self._draw_cxt
if self.t_drawtype in ('polygon', 'freepolygon', 'path', 'freepath'):
x, y = cxt.crdmap.data_to((data_x, data_y))
cxt.points.append((x, y))
elif self.t_drawtype == 'beziercurve' and len(cxt.points) < 3:
x, y = cxt.crdmap.data_to((data_x, data_y))
cxt.points.append((x, y))
self._draw_update(data_x, data_y, cxt, force_update=True)
return True
def draw_poly_delete(self, canvas, event, data_x, data_y, viewer):
if not self.candraw:
return False
cxt = self._draw_cxt
if self.t_drawtype in ('polygon', 'freepolygon', 'path',
'freepath', 'beziercurve'):
if len(cxt.points) > 0:
cxt.points.pop()
self._draw_update(data_x, data_y, cxt, force_update=True)
return True
def is_drawing(self):
return self._draw_obj is not None
def enable_draw(self, tf):
self.candraw = tf
def set_drawcolor(self, colorname):
self.t_drawparams['color'] = colorname
def set_drawtype(self, drawtype, **drawparams):
if drawtype is not None:
drawtype = drawtype.lower()
assert drawtype in self.drawtypes, \
ValueError("Bad drawing type '%s': must be one of %s" % (
drawtype, self.drawtypes))
self.t_drawtype = drawtype
self.t_drawparams = drawparams.copy()
def get_drawtypes(self):
return self.drawtypes
def get_drawtype(self):
return self.t_drawtype
def get_draw_class(self, drawtype):
drawtype = drawtype.lower()
klass = self.dc[drawtype]
return klass
def get_draw_classes(self):
return self.dc
def get_drawparams(self):
return self.t_drawparams.copy()
def process_drawing(self):
self._process_time = time.time()
#self.redraw(whence=3)
self.update_canvas()
def register_canvas_type(self, name, klass):
drawtype = name.lower()
self.dc[drawtype] = klass
if drawtype not in self.drawtypes:
self.drawtypes.append(drawtype)
self.drawtypes.sort()
##### EDITING LOGIC #####
def get_edit_object(self):
return self._edit_obj
def is_editing(self):
return self.get_edit_obj() is not None
def enable_edit(self, tf):
self.canedit = tf
def _rot_xlate(self, obj, data_x, data_y):
# translate point back into non-rotated form
rot_deg = - obj.rot_deg
xoff, yoff = obj.get_center_pt()
data_x, data_y = trcalc.rotate_pt(data_x, data_y, rot_deg,
xoff=xoff, yoff=yoff)
return data_x, data_y
def _edit_update(self, data_x, data_y, viewer):
if (not self.canedit) or (self._cp_index is None):
return False
x, y = data_x, data_y
if self._cp_index < 0:
if self.easymove:
self._edit_obj.set_edit_point(0, (x - self._start_x,
y - self._start_y),
self._edit_detail)
else:
# special hack for objects that have rot_deg attribute
if hasattr(self._edit_obj, 'rot_deg') and (self._cp_index > 0):
x, y = self._rot_xlate(self._edit_obj, x, y)
self._edit_obj.set_edit_point(self._cp_index, (x, y),
self._edit_detail)
#self._edit_obj.sync_state()
if time.time() - self._process_time > self._delta_time:
self.process_drawing()
return True
def _is_editable(self, obj, pt, is_inside):
return is_inside and obj.editable
def _prepare_to_move(self, obj, data_x, data_y):
#print(("moving an object", obj.editable))
self.edit_select(obj)
self._cp_index = -1
ref_x, ref_y = self._edit_obj.get_reference_pt()
self._start_x, self._start_y = data_x - ref_x, data_y - ref_y
#print(("end moving an object", obj.editable))
def edit_start(self, canvas, event, data_x, data_y, viewer):
if not self.canedit:
return False
self._edit_tmp = self._edit_obj
self._edit_status = False
self._edit_detail = Bunch(viewer=viewer)
self._cp_index = None
#shift_held = 'shift' in event.modifiers
shift_held = False
selects = self.get_selected()
if len(selects) == 0:
#print("no objects already selected")
# <-- no objects already selected
# check for objects at this location
#print("getting items")
objs = canvas.select_items_at(viewer, (data_x, data_y),
test=self._is_editable)
#print("items: %s" % (str(objs)))
if len(objs) == 0:
# <-- no objects under cursor
return False
# pick top object
obj = objs[-1]
self._prepare_to_move(obj, data_x, data_y)
else:
self._edit_status = True
# Ugh. Check each selected object's control points
# for a match
contains = []
for obj in selects:
#print("editing: checking for cp")
edit_pts = obj.get_edit_points(viewer)
#print((self._edit_obj, edit_pts))
idx = obj.get_pt(viewer, edit_pts, (data_x, data_y),
obj.cap_radius)
if len(idx) > 0:
i = idx[0]
#print("editing cp #%d" % (i))
# editing a control point from an existing object
self._edit_obj = obj
self._cp_index = i
if hasattr(obj, 'rot_deg'):
x, y = self._rot_xlate(self._edit_obj, data_x, data_y)
else:
x, y = data_x, data_y
self._edit_detail.start_pos = (x, y)
obj.setup_edit(self._edit_detail)
self._edit_update(data_x, data_y, viewer)
return True
i = None
## if obj.contains_pt((data_x, data_y)):
## contains.append(obj)
# update: check if objects bbox contains this point
x1, y1, x2, y2 = obj.get_llur()
if (x1 <= data_x <= x2) and (y1 <= data_y <= y2):
contains.append(obj)
# <-- no control points match, is there an object that contains
# this point?
if len(contains) > 0:
# TODO?: make a compound object of contains and move it?
obj = contains[-1]
if self.is_selected(obj) and shift_held:
# deselecting object
self.select_remove(obj)
else:
self._prepare_to_move(obj, data_x, data_y)
## Compound = self.get_draw_class('compoundobject')
## c_obj = Compound(*self.get_selected())
## c_obj.inherit_from(obj)
## self._prepare_to_move(c_obj, data_x, data_y)
else:
# <-- user clicked outside any selected item's control pt
# and outside any selected item
if not shift_held:
self.clear_selected()
# see now if there is an unselected item at this location
objs = canvas.select_items_at(viewer, (data_x, data_y),
test=self._is_editable)
#print("new items: %s" % (str(objs)))
if len(objs) > 0:
# pick top object
obj = objs[-1]
#print(("top object", obj))
if self.num_selected() > 0:
#print("there are previously selected items")
# if there are already some selected items, then
# add this object to the selection, make a compound
# object
self.edit_select(obj)
Compound = self.get_draw_class('compoundobject')
c_obj = Compound(*self.get_selected())
c_obj.inherit_from(obj)
self._prepare_to_move(c_obj, data_x, data_y)
else:
# otherwise just start over with this new object
#print(("starting over"))
self._prepare_to_move(obj, data_x, data_y)
self.process_drawing()
return True
def edit_stop(self, canvas, event, data_x, data_y, viewer):
if not self.canedit:
return False
if (self._edit_tmp != self._edit_obj) or (
(self._edit_obj is not None) and
(self._edit_status != self.is_selected(self._edit_obj))):
# <-- editing status has changed
#print("making edit-select callback")
self.make_callback('edit-select', self._edit_obj)
if (self._edit_obj is not None) and (self._cp_index is not None):
# <-- an object has been edited
self._edit_update(data_x, data_y, viewer)
self._cp_index = None
self.make_callback('edit-event', self._edit_obj)
self._edit_obj.make_callback('edited')
return True
def edit_motion(self, canvas, event, data_x, data_y, viewer):
if not self.canedit:
return False
if (self._edit_obj is not None) and (self._cp_index is not None):
self._edit_update(data_x, data_y, viewer)
return True
return False
def edit_poly_add(self, canvas, event, data_x, data_y, viewer):
if not self.canedit:
return False
obj = self._edit_obj
if ((obj is not None) and self.is_selected(obj) and
(obj.kind in ('polygon', 'path'))):
self.logger.debug("checking points")
# determine which line we are adding a point to
points = list(obj.get_data_points())
if obj.kind == 'polygon':
points = points + [points[0]]
x0, y0 = points[0]
insert = None
for i in range(1, len(points[1:]) + 1):
x1, y1 = points[i]
self.logger.debug("checking line %d" % (i))
if obj.within_line(viewer, (data_x, data_y),
(x0, y0), (x1, y1), 8):
insert = i
break
x0, y0 = x1, y1
if insert is not None:
self.logger.debug("inserting point")
# Point near a line
pt = obj.crdmap.data_to((data_x, data_y))
obj.insert_pt(insert, pt)
self.process_drawing()
else:
self.logger.debug("cursor not near a line")
return True
def edit_poly_delete(self, canvas, event, data_x, data_y, viewer):
if not self.canedit:
return False
obj = self._edit_obj
if ((obj is not None) and self.is_selected(obj) and
(obj.kind in ('polygon', 'path'))):
self.logger.debug("checking points")
# determine which point we are deleting
points = list(obj.get_data_points())
delete = None
for i in range(len(points)):
x1, y1 = points[i]
self.logger.debug("checking vertex %d" % (i))
if obj.within_radius(viewer, (data_x, data_y), (x1, y1),
8):
delete = i
break
if delete is not None:
self.logger.debug("deleting point")
obj.delete_pt(delete)
self.process_drawing()
else:
self.logger.debug("cursor not near a point")
return True
def edit_rotate(self, delta_deg, viewer):
if self._edit_obj is None:
return False
self._edit_obj.rotate_by_deg([delta_deg])
self.process_drawing()
self.make_callback('edit-event', self._edit_obj)
return True
def _edit_rotate_cb(self, canvas, event, viewer, msg=True):
if not self.canedit or (viewer != event.viewer):
return False
bd = viewer.get_bindings()
amount = event.amount
if bd.get_direction(event.direction) == 'down':
amount = - amount
return self.edit_rotate(amount)
def edit_scale(self, delta_x, delta_y, viewer):
if self._edit_obj is None:
return False
self._edit_obj.scale_by_factors((delta_x, delta_y))
self.process_drawing()
self.make_callback('edit-event', self._edit_obj)
return True
def _edit_scale_cb(self, canvas, event, viewer, msg=True):
if not self.canedit or (viewer != event.viewer):
return False
bd = viewer.get_bindings()
if bd.get_direction(event.direction) == 'down':
amount = 0.9
else:
amount = 1.1
return self.edit_scale(amount, amount)
def edit_delete(self):
if (self._edit_obj is not None) and self.is_selected(self._edit_obj):
self.select_remove(self._edit_obj)
obj, self._edit_obj = self._edit_obj, None
self.delete_object(obj)
self.make_callback('edit-event', self._edit_obj)
return True
def edit_delete_cb(self, canvas, event, data_x, data_y, viewer):
if not self.canedit or (viewer != event.viewer):
return False
return self.edit_delete()
def edit_select(self, newobj):
if not self.canedit:
return False
if not self.multi_select_ok:
self.clear_selected()
# add new object to selection
self.select_add(newobj)
self._edit_obj = newobj
return True
##### SELECTION LOGIC #####
def _is_selectable(self, obj, x, y, is_inside):
return is_inside and obj.editable
#return is_inside
def is_selected(self, obj):
return obj in self._selected
def get_selected(self):
return self._selected
def num_selected(self):
return len(self._selected)
def clear_selected(self):
self._selected = []
def select_remove(self, obj):
try:
self._selected.remove(obj)
except Exception:
pass
def select_add(self, obj):
if obj not in self._selected:
self._selected.append(obj)
##### PICK LOGIC #####
def _do_pick(self, canvas, event, data_x, data_y, ptype, viewer):
# check for objects at this location
objs = canvas.select_items_at(viewer, (data_x, data_y))
picked = set(filter(lambda obj: obj.pickable, objs))
newly_out = self._pick_cur_objs - picked
newly_in = picked - self._pick_cur_objs
self._pick_cur_objs = picked
if ptype not in ('move', 'up'):
self._pick_sel_objs = picked
# leaving an object
for obj in newly_out:
pt = obj.crdmap.data_to((data_x, data_y))
obj.make_callback('pick-leave', canvas, event, pt)
# entering an object
for obj in newly_in:
pt = obj.crdmap.data_to((data_x, data_y))
obj.make_callback('pick-enter', canvas, event, pt)
# pick down/up
res = False
for obj in self._pick_sel_objs:
cb_name = 'pick-%s' % (ptype)
self.logger.debug("%s event in %s obj at x, y = %d, %d" % (
cb_name, obj.kind, data_x, data_y))
pt = obj.crdmap.data_to((data_x, data_y))
if obj.make_callback(cb_name, canvas, event, pt):
res = True
return res
def pick_start(self, canvas, event, data_x, data_y, viewer):
return self._do_pick(canvas, event, data_x, data_y,
'down', viewer)
def pick_motion(self, canvas, event, data_x, data_y, viewer):
return self._do_pick(canvas, event, data_x, data_y,
'move', viewer)
def pick_hover(self, canvas, event, data_x, data_y, viewer):
return self._do_pick(canvas, event, data_x, data_y,
'hover', viewer)
def pick_key(self, canvas, event, data_x, data_y, viewer):
return self._do_pick(canvas, event, data_x, data_y,
'key', viewer)
def pick_stop(self, canvas, event, data_x, data_y, viewer):
return self._do_pick(canvas, event, data_x, data_y,
'up', viewer)
# The canvas drawing
def draw(self, viewer):
# Draw everything else as usual
super(DrawingMixin, self).draw(viewer)
# Draw our current drawing object, if any
if self._draw_obj:
self._draw_obj.draw(viewer)
# Draw control points on edited objects
selected = list(self.get_selected())
if len(selected) > 0:
for obj in selected:
cr = viewer.renderer.setup_cr(obj)
obj.draw_edit(cr, viewer)
### NON-PEP8 EQUIVALENTS -- TO BE DEPRECATED ###
setSurface = set_surface
getDrawClass = get_draw_class
# END
|
|
"""Simple HTTP Server.
This module builds on BaseHTTPServer by implementing the standard GET
and HEAD requests in a fairly straightforward manner.
"""
__version__ = "0.6"
__all__ = ["SimpleMsHTTPRequestHandler"]
import os
import posixpath
import BaseHTTPServer
import urllib
import cgi
import shutil
import mimetypes
from StringIO import StringIO
import MsTimestampServer
import sys
import SocketServer
class SimpleMsHTTPRequestHandler(MsTimestampServer.MsHTTPRequestHandler):
"""Simple HTTP request handler with GET and HEAD commands.
This serves files from the current directory and any of its
subdirectories. It assumes that all files are plain text files
unless they have the extension ".html" in which case it assumes
they are HTML files.
The GET and HEAD requests are identical except that the HEAD
request omits the actual contents of the file.
"""
server_version = "SimpleHTTP/" + __version__
def do_GET(self):
"""Serve a GET request."""
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close()
def do_HEAD(self):
"""Serve a HEAD request."""
f = self.send_head()
if f:
f.close()
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
if ctype.startswith('text/'):
mode = 'r'
else:
mode = 'rb'
try:
f = open(path, mode)
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
self.end_headers()
return f
def list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
try:
list = os.listdir(path)
except os.error:
self.send_error(404, "No permission to list directory")
return None
list.sort(lambda a, b: cmp(a.lower(), b.lower()))
f = StringIO()
f.write("<title>Directory listing for %s</title>\n" % self.path)
f.write("<h2>Directory listing for %s</h2>\n" % self.path)
f.write("<hr>\n<ul>\n")
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name = cgi.escape(name)
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
f.write('<li><a href="%s">%s</a>\n' % (linkname, displayname))
f.write("</ul>\n<hr>\n")
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
return f
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
path = posixpath.normpath(urllib.unquote(path))
words = path.split('/')
words = filter(None, words)
path = os.getcwd()
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir): continue
path = os.path.join(path, word)
return path
def copyfile(self, source, outputfile):
"""Copy all data between two file objects.
The SOURCE argument is a file object open for reading
(or anything with a read() method) and the DESTINATION
argument is a file object open for writing (or
anything with a write() method).
The only reason for overriding this would be to change
the block size or perhaps to replace newlines by CRLF
-- note however that this the default server uses this
to copy binary data as well.
"""
shutil.copyfileobj(source, outputfile)
def guess_type(self, path):
"""Guess the type of a file.
Argument is a PATH (a filename).
Return value is a string of the form type/subtype,
usable for a MIME Content-type header.
The default implementation looks the file's extension
up in the table self.extensions_map, using text/plain
as a default; however it would be permissible (if
slow) to look inside the data to make a better guess.
"""
base, ext = posixpath.splitext(path)
if self.extensions_map.has_key(ext):
return self.extensions_map[ext]
ext = ext.lower()
if self.extensions_map.has_key(ext):
return self.extensions_map[ext]
else:
return self.extensions_map['']
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'application/octet-stream', # Default
'.py': 'text/plain',
'.c': 'text/plain',
'.h': 'text/plain',
})
def test(HandlerClass = SimpleMsHTTPRequestHandler,
ServerClass = BaseHTTPServer.HTTPServer):
BaseHTTPServer.test(HandlerClass, ServerClass)
def run(listeningPort):
handler = SimpleMsHTTPRequestHandler
httpd = SocketServer.TCPServer(("", listeningPort), handler)
print("Listening for HTTP requests on port " + str(listeningPort) + "...")
httpd.serve_forever()
if __name__ == '__main__':
if len(sys.argv) > 1:
listeningPort = int(sys.argv[1])
else:
listeningPort = 9002
run(listeningPort)
|
|
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test of Policy Engine For Neutron"""
import mock
from oslo_policy import policy as oslo_policy
from oslo_serialization import jsonutils
from oslo_utils import importutils
import six
import six.moves.urllib.request as urlrequest
import neutron
from neutron.api.v2 import attributes
from neutron.common import constants as const
from neutron.common import exceptions
from neutron import context
from neutron import manager
from neutron import policy
from neutron.tests import base
class PolicyFileTestCase(base.BaseTestCase):
def setUp(self):
super(PolicyFileTestCase, self).setUp()
self.context = context.Context('fake', 'fake', is_admin=False)
self.target = {'tenant_id': 'fake'}
def test_modified_policy_reloads(self):
tmpfilename = self.get_temp_file_path('policy')
action = "example:test"
with open(tmpfilename, "w") as policyfile:
policyfile.write("""{"example:test": ""}""")
policy.refresh(policy_file=tmpfilename)
policy.enforce(self.context, action, self.target)
with open(tmpfilename, "w") as policyfile:
policyfile.write("""{"example:test": "!"}""")
policy.refresh(policy_file=tmpfilename)
self.target = {'tenant_id': 'fake_tenant'}
self.assertRaises(oslo_policy.PolicyNotAuthorized,
policy.enforce,
self.context,
action,
self.target)
class PolicyTestCase(base.BaseTestCase):
def setUp(self):
super(PolicyTestCase, self).setUp()
# NOTE(vish): preload rules to circumvent reloading from file
rules = {
"true": '@',
"example:allowed": '@',
"example:denied": '!',
"example:get_http": "http:http://www.example.com",
"example:my_file": "role:compute_admin or tenant_id:%(tenant_id)s",
"example:early_and_fail": "! and @",
"example:early_or_success": "@ or !",
"example:lowercase_admin": "role:admin or role:sysadmin",
"example:uppercase_admin": "role:ADMIN or role:sysadmin",
}
policy.refresh()
# NOTE(vish): then overload underlying rules
policy.set_rules(oslo_policy.Rules.from_dict(rules))
self.context = context.Context('fake', 'fake', roles=['member'])
self.target = {}
def test_enforce_nonexistent_action_throws(self):
action = "example:noexist"
self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
def test_enforce_bad_action_throws(self):
action = "example:denied"
self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
def test_check_bad_action_noraise(self):
action = "example:denied"
result = policy.check(self.context, action, self.target)
self.assertEqual(result, False)
def test_check_non_existent_action(self):
action = "example:idonotexist"
result_1 = policy.check(self.context, action, self.target)
self.assertFalse(result_1)
result_2 = policy.check(self.context, action, self.target,
might_not_exist=True)
self.assertTrue(result_2)
def test_enforce_good_action(self):
action = "example:allowed"
result = policy.enforce(self.context, action, self.target)
self.assertEqual(result, True)
@mock.patch.object(urlrequest, 'urlopen',
return_value=six.StringIO("True"))
def test_enforce_http_true(self, mock_urlrequest):
action = "example:get_http"
target = {}
result = policy.enforce(self.context, action, target)
self.assertEqual(result, True)
def test_enforce_http_false(self):
def fakeurlopen(url, post_data):
return six.StringIO("False")
with mock.patch.object(urlrequest, 'urlopen', new=fakeurlopen):
action = "example:get_http"
target = {}
self.assertRaises(oslo_policy.PolicyNotAuthorized,
policy.enforce, self.context,
action, target)
def test_templatized_enforcement(self):
target_mine = {'tenant_id': 'fake'}
target_not_mine = {'tenant_id': 'another'}
action = "example:my_file"
policy.enforce(self.context, action, target_mine)
self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce,
self.context, action, target_not_mine)
def test_early_AND_enforcement(self):
action = "example:early_and_fail"
self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
def test_early_OR_enforcement(self):
action = "example:early_or_success"
policy.enforce(self.context, action, self.target)
def test_ignore_case_role_check(self):
lowercase_action = "example:lowercase_admin"
uppercase_action = "example:uppercase_admin"
# NOTE(dprince) we mix case in the Admin role here to ensure
# case is ignored
admin_context = context.Context('admin', 'fake', roles=['AdMiN'])
policy.enforce(admin_context, lowercase_action, self.target)
policy.enforce(admin_context, uppercase_action, self.target)
class DefaultPolicyTestCase(base.BaseTestCase):
def setUp(self):
super(DefaultPolicyTestCase, self).setUp()
tmpfilename = self.get_temp_file_path('policy.json')
self.rules = {
"default": '',
"example:exist": '!',
}
with open(tmpfilename, "w") as policyfile:
jsonutils.dump(self.rules, policyfile)
policy.refresh(policy_file=tmpfilename)
self.context = context.Context('fake', 'fake')
def test_policy_called(self):
self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce,
self.context, "example:exist", {})
def test_not_found_policy_calls_default(self):
policy.enforce(self.context, "example:noexist", {})
FAKE_RESOURCE_NAME = 'fake_resource'
FAKE_SPECIAL_RESOURCE_NAME = 'fake_policy'
FAKE_RESOURCES = {"%ss" % FAKE_RESOURCE_NAME:
{'attr': {'allow_post': True,
'allow_put': True,
'is_visible': True,
'default': None,
'enforce_policy': True,
'validate': {'type:dict':
{'sub_attr_1': {'type:string': None},
'sub_attr_2': {'type:string': None}}}
}},
# special plural name
"%s" % FAKE_SPECIAL_RESOURCE_NAME.replace('y', 'ies'):
{'attr': {'allow_post': True,
'allow_put': True,
'is_visible': True,
'default': None,
'enforce_policy': True,
'validate': {'type:dict':
{'sub_attr_1': {'type:string': None},
'sub_attr_2': {'type:string': None}}}
}}}
class NeutronPolicyTestCase(base.BaseTestCase):
def fakepolicyinit(self, **kwargs):
enf = policy._ENFORCER
enf.set_rules(oslo_policy.Rules(self.rules))
def setUp(self):
super(NeutronPolicyTestCase, self).setUp()
policy.refresh()
# Add Fake resources to RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP.update(FAKE_RESOURCES)
self._set_rules()
def remove_fake_resource():
del attributes.RESOURCE_ATTRIBUTE_MAP["%ss" % FAKE_RESOURCE_NAME]
self.patcher = mock.patch.object(neutron.policy,
'init',
new=self.fakepolicyinit)
self.patcher.start()
self.addCleanup(remove_fake_resource)
self.context = context.Context('fake', 'fake', roles=['user'])
plugin_klass = importutils.import_class(
"neutron.db.db_base_plugin_v2.NeutronDbPluginV2")
self.manager_patcher = mock.patch('neutron.manager.NeutronManager')
fake_manager = self.manager_patcher.start()
fake_manager_instance = fake_manager.return_value
fake_manager_instance.plugin = plugin_klass()
def _set_rules(self, **kwargs):
rules_dict = {
"context_is_admin": "role:admin",
"context_is_advsvc": "role:advsvc",
"admin_or_network_owner": "rule:context_is_admin or "
"tenant_id:%(network:tenant_id)s",
"admin_or_owner": ("rule:context_is_admin or "
"tenant_id:%(tenant_id)s"),
"admin_only": "rule:context_is_admin",
"regular_user": "role:user",
"shared": "field:networks:shared=True",
"external": "field:networks:router:external=True",
"default": '@',
"create_network": "rule:admin_or_owner",
"create_network:shared": "rule:admin_only",
"update_network": '@',
"update_network:shared": "rule:admin_only",
"get_network": "rule:admin_or_owner or rule:shared or "
"rule:external or rule:context_is_advsvc",
"create_subnet": "rule:admin_or_network_owner",
"create_port:mac": "rule:admin_or_network_owner or "
"rule:context_is_advsvc",
"update_port": "rule:admin_or_owner or rule:context_is_advsvc",
"get_port": "rule:admin_or_owner or rule:context_is_advsvc",
"delete_port": "rule:admin_or_owner or rule:context_is_advsvc",
"create_fake_resource": "rule:admin_or_owner",
"create_fake_resource:attr": "rule:admin_or_owner",
"create_fake_resource:attr:sub_attr_1": "rule:admin_or_owner",
"create_fake_resource:attr:sub_attr_2": "rule:admin_only",
"create_fake_policy:": "rule:admin_or_owner",
"get_firewall_policy": "rule:admin_or_owner or "
"rule:shared",
"get_firewall_rule": "rule:admin_or_owner or "
"rule:shared",
"insert_rule": "rule:admin_or_owner",
"remove_rule": "rule:admin_or_owner",
}
rules_dict.update(**kwargs)
self.rules = oslo_policy.Rules.from_dict(rules_dict)
def test_firewall_policy_insert_rule_with_admin_context(self):
action = "insert_rule"
target = {}
result = policy.check(context.get_admin_context(), action, target)
self.assertTrue(result)
def test_firewall_policy_insert_rule_with_owner(self):
action = "insert_rule"
target = {"tenant_id": "own_tenant"}
user_context = context.Context('', "own_tenant", roles=['user'])
result = policy.check(user_context, action, target)
self.assertTrue(result)
def test_firewall_policy_remove_rule_without_admin_or_owner(self):
action = "remove_rule"
target = {"firewall_rule_id": "rule_id", "tenant_id": "tenantA"}
user_context = context.Context('', "another_tenant", roles=['user'])
result = policy.check(user_context, action, target)
self.assertFalse(result)
def _test_action_on_attr(self, context, action, obj, attr, value,
exception=None, **kwargs):
action = "%s_%s" % (action, obj)
target = {'tenant_id': 'the_owner', attr: value}
if kwargs:
target.update(kwargs)
if exception:
self.assertRaises(exception, policy.enforce,
context, action, target)
else:
result = policy.enforce(context, action, target)
self.assertEqual(result, True)
def _test_nonadmin_action_on_attr(self, action, attr, value,
exception=None, **kwargs):
user_context = context.Context('', "user", roles=['user'])
self._test_action_on_attr(user_context, action, "network", attr,
value, exception, **kwargs)
def _test_advsvc_action_on_attr(self, action, obj, attr, value,
exception=None, **kwargs):
user_context = context.Context('', "user",
roles=['user', 'advsvc'])
self._test_action_on_attr(user_context, action, obj, attr,
value, exception, **kwargs)
def test_nonadmin_write_on_private_fails(self):
self._test_nonadmin_action_on_attr('create', 'shared', False,
oslo_policy.PolicyNotAuthorized)
def test_nonadmin_read_on_private_fails(self):
self._test_nonadmin_action_on_attr('get', 'shared', False,
oslo_policy.PolicyNotAuthorized)
def test_nonadmin_write_on_shared_fails(self):
self._test_nonadmin_action_on_attr('create', 'shared', True,
oslo_policy.PolicyNotAuthorized)
def test_advsvc_get_network_works(self):
self._test_advsvc_action_on_attr('get', 'network', 'shared', False)
def test_advsvc_create_network_fails(self):
self._test_advsvc_action_on_attr('create', 'network', 'shared', False,
oslo_policy.PolicyNotAuthorized)
def test_advsvc_create_port_works(self):
self._test_advsvc_action_on_attr('create', 'port:mac', 'shared', False)
def test_advsvc_get_port_works(self):
self._test_advsvc_action_on_attr('get', 'port', 'shared', False)
def test_advsvc_update_port_works(self):
kwargs = {const.ATTRIBUTES_TO_UPDATE: ['shared']}
self._test_advsvc_action_on_attr('update', 'port', 'shared', True,
**kwargs)
def test_advsvc_delete_port_works(self):
self._test_advsvc_action_on_attr('delete', 'port', 'shared', False)
def test_advsvc_create_subnet_fails(self):
self._test_advsvc_action_on_attr('create', 'subnet', 'shared', False,
oslo_policy.PolicyNotAuthorized)
def test_nonadmin_read_on_shared_succeeds(self):
self._test_nonadmin_action_on_attr('get', 'shared', True)
def test_check_is_admin_with_admin_context_succeeds(self):
admin_context = context.get_admin_context()
# explicitly set roles as this test verifies user credentials
# with the policy engine
admin_context.roles = ['admin']
self.assertTrue(policy.check_is_admin(admin_context))
def test_check_is_admin_with_user_context_fails(self):
self.assertFalse(policy.check_is_admin(self.context))
def test_check_is_admin_with_no_admin_policy_fails(self):
del self.rules[policy.ADMIN_CTX_POLICY]
admin_context = context.get_admin_context()
self.assertFalse(policy.check_is_admin(admin_context))
def test_check_is_advsvc_with_admin_context_fails(self):
admin_context = context.get_admin_context()
self.assertFalse(policy.check_is_advsvc(admin_context))
def test_check_is_advsvc_with_svc_context_succeeds(self):
svc_context = context.Context('', 'svc', roles=['advsvc'])
self.assertTrue(policy.check_is_advsvc(svc_context))
def test_check_is_advsvc_with_no_advsvc_policy_fails(self):
del self.rules[policy.ADVSVC_CTX_POLICY]
svc_context = context.Context('', 'svc', roles=['advsvc'])
self.assertFalse(policy.check_is_advsvc(svc_context))
def test_check_is_advsvc_with_user_context_fails(self):
self.assertFalse(policy.check_is_advsvc(self.context))
def _test_enforce_adminonly_attribute(self, action, **kwargs):
admin_context = context.get_admin_context()
target = {'shared': True}
if kwargs:
target.update(kwargs)
result = policy.enforce(admin_context, action, target)
self.assertEqual(result, True)
def test_enforce_adminonly_attribute_create(self):
self._test_enforce_adminonly_attribute('create_network')
def test_enforce_adminonly_attribute_update(self):
kwargs = {const.ATTRIBUTES_TO_UPDATE: ['shared']}
self._test_enforce_adminonly_attribute('update_network', **kwargs)
def test_reset_adminonly_attr_to_default_fails(self):
kwargs = {const.ATTRIBUTES_TO_UPDATE: ['shared']}
self._test_nonadmin_action_on_attr('update', 'shared', False,
oslo_policy.PolicyNotAuthorized,
**kwargs)
def test_enforce_adminonly_attribute_nonadminctx_returns_403(self):
action = "create_network"
target = {'shared': True, 'tenant_id': 'somebody_else'}
self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce,
self.context, action, target)
def _test_build_subattribute_match_rule(self, validate_value):
bk = FAKE_RESOURCES['%ss' % FAKE_RESOURCE_NAME]['attr']['validate']
FAKE_RESOURCES['%ss' % FAKE_RESOURCE_NAME]['attr']['validate'] = (
validate_value)
action = "create_" + FAKE_RESOURCE_NAME
target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x'}}
self.assertFalse(policy._build_subattr_match_rule(
'attr',
FAKE_RESOURCES['%ss' % FAKE_RESOURCE_NAME]['attr'],
action,
target))
FAKE_RESOURCES['%ss' % FAKE_RESOURCE_NAME]['attr']['validate'] = bk
def test_build_subattribute_match_rule_empty_dict_validator(self):
self._test_build_subattribute_match_rule({})
def test_build_subattribute_match_rule_wrong_validation_info(self):
self._test_build_subattribute_match_rule(
{'type:dict': 'wrong_stuff'})
def test_build_match_rule_special_pluralized(self):
action = "create_" + FAKE_SPECIAL_RESOURCE_NAME
pluralized = "create_fake_policies"
target = {}
result = policy._build_match_rule(action, target, pluralized)
self.assertEqual("rule:" + action, str(result))
def test_build_match_rule_normal_pluralized_when_create(self):
action = "create_" + FAKE_RESOURCE_NAME
target = {}
result = policy._build_match_rule(action, target, None)
self.assertEqual("rule:" + action, str(result))
def test_enforce_subattribute(self):
action = "create_" + FAKE_RESOURCE_NAME
target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x'}}
result = policy.enforce(self.context, action, target, None)
self.assertEqual(result, True)
def test_enforce_admin_only_subattribute(self):
action = "create_" + FAKE_RESOURCE_NAME
target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x',
'sub_attr_2': 'y'}}
result = policy.enforce(context.get_admin_context(),
action, target, None)
self.assertEqual(result, True)
def test_enforce_admin_only_subattribute_nonadminctx_returns_403(self):
action = "create_" + FAKE_RESOURCE_NAME
target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x',
'sub_attr_2': 'y'}}
self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce,
self.context, action, target, None)
def test_enforce_regularuser_on_read(self):
action = "get_network"
target = {'shared': True, 'tenant_id': 'somebody_else'}
result = policy.enforce(self.context, action, target)
self.assertTrue(result)
def test_enforce_firewall_policy_shared(self):
action = "get_firewall_policy"
target = {'shared': True, 'tenant_id': 'somebody_else'}
result = policy.enforce(self.context, action, target)
self.assertTrue(result)
def test_enforce_firewall_rule_shared(self):
action = "get_firewall_rule"
target = {'shared': True, 'tenant_id': 'somebody_else'}
result = policy.enforce(self.context, action, target)
self.assertTrue(result)
def test_enforce_tenant_id_check(self):
# Trigger a policy with rule admin_or_owner
action = "create_network"
target = {'tenant_id': 'fake'}
result = policy.enforce(self.context, action, target)
self.assertTrue(result)
def test_enforce_tenant_id_check_parent_resource(self):
def fakegetnetwork(*args, **kwargs):
return {'tenant_id': 'fake'}
action = "create_port:mac"
with mock.patch.object(manager.NeutronManager.get_instance().plugin,
'get_network', new=fakegetnetwork):
target = {'network_id': 'whatever'}
result = policy.enforce(self.context, action, target)
self.assertTrue(result)
def test_enforce_plugin_failure(self):
def fakegetnetwork(*args, **kwargs):
raise NotImplementedError('Blast!')
# the policy check and plugin method we use in this test are irrelevant
# so long that we verify that, if *f* blows up, the behavior of the
# policy engine to propagate the exception is preserved
action = "create_port:mac"
with mock.patch.object(manager.NeutronManager.get_instance().plugin,
'get_network', new=fakegetnetwork):
target = {'network_id': 'whatever'}
self.assertRaises(NotImplementedError,
policy.enforce,
self.context,
action,
target)
def test_enforce_tenant_id_check_parent_resource_bw_compatibility(self):
def fakegetnetwork(*args, **kwargs):
return {'tenant_id': 'fake'}
self._set_rules(
admin_or_network_owner="role:admin or "
"tenant_id:%(network_tenant_id)s")
action = "create_port:mac"
with mock.patch.object(manager.NeutronManager.get_instance().plugin,
'get_network', new=fakegetnetwork):
target = {'network_id': 'whatever'}
result = policy.enforce(self.context, action, target)
self.assertTrue(result)
def test_tenant_id_check_no_target_field_raises(self):
# Try and add a bad rule
self.assertRaises(
exceptions.PolicyInitError,
oslo_policy.Rules.from_dict,
{'test_policy': 'tenant_id:(wrong_stuff)'})
def _test_enforce_tenant_id_raises(self, bad_rule):
self._set_rules(admin_or_owner=bad_rule)
# Trigger a policy with rule admin_or_owner
action = "create_network"
target = {'tenant_id': 'fake'}
self.fakepolicyinit()
self.assertRaises(exceptions.PolicyCheckError,
policy.enforce,
self.context, action, target)
def test_enforce_tenant_id_check_malformed_target_field_raises(self):
self._test_enforce_tenant_id_raises('tenant_id:%(malformed_field)s')
def test_enforce_tenant_id_check_invalid_parent_resource_raises(self):
self._test_enforce_tenant_id_raises('tenant_id:%(foobaz_tenant_id)s')
def test_process_rules(self):
action = "create_" + FAKE_RESOURCE_NAME
# Construct RuleChecks for an action, attribute and subattribute
match_rule = oslo_policy.RuleCheck('rule', action)
attr_rule = oslo_policy.RuleCheck(
'rule', '%s:%ss' % (action, FAKE_RESOURCE_NAME))
sub_attr_rules = [oslo_policy.RuleCheck(
'rule', '%s:%s:%s' % (action, 'attr', 'sub_attr_1'))]
# Build an AndCheck from the given RuleChecks
# Make the checks nested to better check the recursion
sub_attr_rules = oslo_policy.AndCheck(sub_attr_rules)
attr_rule = oslo_policy.AndCheck(
[attr_rule, sub_attr_rules])
match_rule = oslo_policy.AndCheck([match_rule, attr_rule])
# Assert that the rules are correctly extracted from the match_rule
rules = policy._process_rules_list([], match_rule)
self.assertEqual(['create_fake_resource',
'create_fake_resource:fake_resources',
'create_fake_resource:attr:sub_attr_1'], rules)
@mock.patch.object(policy.LOG, 'isEnabledFor', return_value=True)
@mock.patch.object(policy.LOG, 'debug')
def test_log_rule_list(self, mock_debug, mock_is_e):
policy.log_rule_list(oslo_policy.RuleCheck('rule', 'create_'))
self.assertTrue(mock_is_e.called)
self.assertTrue(mock_debug.called)
|
|
import os
import pytest
from .common import * # NOQA
namespace = {"p_client": None, "ns": None, "cluster": None, "project": None}
CLUSTER_NAME = os.environ.get("CLUSTER_NAME", "")
RANCHER_CLEANUP_PROJECT = os.environ.get("RANCHER_CLEANUP_PROJECT", "True")
@pytest.fixture
def ns_default_quota():
return {"limit": {"pods": "5",
"requestsCpu": "500m"}}
@pytest.fixture
def default_project_quota():
return {"limit": {"pods": "20",
"requestsCpu": "2000m"}}
def ns_quota():
return {"limit": {"pods": "10",
"requestsCpu": "500m"}}
def test_create_project_quota():
# Create Project Resource Quota and verify quota is created
# successfully. Verify namespacedefault resource quota is set
cluster = namespace["cluster"]
client = get_user_client()
c_client = namespace["c_client"]
quota = default_project_quota()
nsquota = ns_default_quota()
proj = client.create_project(name='test-' + random_str(),
clusterId=cluster.id,
resourceQuota=quota,
namespaceDefaultResourceQuota=nsquota)
proj = client.wait_success(proj)
assert proj.resourceQuota is not None
assert proj.resourceQuota.limit.pods == quota["limit"]["pods"]
assert proj.resourceQuota.limit.requestsCpu == \
quota["limit"]["requestsCpu"]
assert proj.namespaceDefaultResourceQuota is not None
assert proj.namespaceDefaultResourceQuota.limit.pods == \
nsquota["limit"]["pods"]
ns = create_ns(c_client, cluster, proj)
print(ns)
assert ns is not None
assert ns.resourceQuota is not None
assert ns.resourceQuota.limit.pods == nsquota["limit"]["pods"]
assert ns.resourceQuota.limit.requestsCpu == \
nsquota["limit"]["requestsCpu"]
validate_resoucequota_thru_kubectl(ns)
def test_resource_quota_create_namespace_with_ns_quota():
# Create project quota and namspaces and verify
# namespace creation is allowed within the quota
cluster = namespace["cluster"]
client = get_user_client()
c_client = namespace["c_client"]
quota = default_project_quota()
nsquota = ns_quota()
proj = client.create_project(name='test-' + random_str(),
clusterId=cluster.id,
resourceQuota=quota,
namespaceDefaultResourceQuota=quota)
proj = client.wait_success(proj)
assert proj.resourceQuota is not None
# Create a namespace with namespace resource quota
ns_name = random_str()
ns = c_client.create_namespace(name=ns_name,
projectId=proj.id,
resourceQuota=ns_quota())
ns = c_client.wait_success(ns)
assert ns is not None
assert ns.resourceQuota is not None
assert ns.resourceQuota.limit.pods == nsquota["limit"]["pods"]
assert ns.resourceQuota.limit.requestsCpu == \
nsquota["limit"]["requestsCpu"]
validate_resoucequota_thru_kubectl(ns)
# Create another namespace with quota and it should succeed
ns1 = c_client.create_namespace(name=random_str(),
projectId=proj.id,
resourceQuota=nsquota)
ns1 = c_client.wait_success(ns1)
print(ns1)
assert ns1 is not None
assert ns1.resourceQuota is not None
assert ns1.resourceQuota.limit.pods == nsquota["limit"]["pods"]
assert ns1.resourceQuota.limit.requestsCpu == \
nsquota["limit"]["requestsCpu"]
validate_resoucequota_thru_kubectl(ns1)
# Creating another namespace should fail as it exceeds the allotted limit
try:
c_client.create_namespace(name=random_str(),
projectId=proj.id,
resourceQuota=ns_quota())
except Exception as e:
errorstring = str(e)
print(str(e))
assert "MaxLimitExceeded" in errorstring
def test_namespace_quota_edit(remove_resource):
client, cluster = get_global_admin_client_and_cluster()
pj1 = client.create_project(name=random_str(),
clusterId=cluster.id,
resourceQuota={"limit": {"limitsCpu": "20m"}},
namespaceDefaultResourceQuota={
"limit": {"limitsCpu": "10m"}})
pj2 = client.create_project(name=random_str(),
clusterId=cluster.id,
resourceQuota={"limit": {"limitsCpu": "15m"}},
namespaceDefaultResourceQuota={
"limit": {"limitsCpu": "15m"}})
p_client = get_cluster_client_for_token(cluster, ADMIN_TOKEN)
ns1 = p_client.create_namespace(name=random_str(),
clusterId=cluster.id,
projectId=pj1.id)
ns2 = p_client.create_namespace(name=random_str(),
clusterId=cluster.id,
projectId=pj2.id)
ns1 = p_client.wait_success(ns1)
ns2 = p_client.wait_success(ns2)
p_client.action(obj=ns2,
action_name="move",
projectId=None)
ns1 = p_client.update(ns1,
resourceQuota={"limit": {"limitsCpu": "11m"}})
ns1_limitsCpu = ns1.resourceQuota.limit.limitsCpu
assert ns1_limitsCpu == "11m"
remove_resource(pj1)
remove_resource(pj2)
remove_resource(ns2)
def validate_resoucequota_thru_kubectl(namespace):
# This method executes `kubectl describe quota command` and verifies if the
# resource quota from kubectl and the quota assigned for the namespace
# through API are the same
command = "get quota --namespace " + namespace['id']
print(command)
result = execute_kubectl_cmd(command, json_out=True)
print("Kubectl command result")
print(result)
testdict = namespace['resourceQuota']
response = result["items"]
assert "spec" in response[0]
quotadict = (response[0]["spec"])
assert quotadict['hard']['pods'] == testdict['limit']['pods']
assert \
quotadict['hard']['requests.cpu'] == testdict['limit']['requestsCpu']
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
p, ns = create_project_and_ns(USER_TOKEN, cluster, "testworkload")
p_client = get_project_client_for_token(p, USER_TOKEN)
c_client = get_cluster_client_for_token(cluster, USER_TOKEN)
namespace["p_client"] = p_client
namespace["ns"] = ns
namespace["cluster"] = cluster
namespace["project"] = p
namespace["c_client"] = c_client
def fin():
client = get_user_client()
client.delete(namespace["project"])
request.addfinalizer(fin)
|
|
#
# Copyright (c) 2016, SUSE LLC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of ceph-auto-aws nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import logging
import unittest
from handson import main
from handson.test_setup import SetUp
from mock import patch
from yaml.parser import ParserError
def mock_connect(*_):
return 'DummyValue'
def mock_connect_ec2(*_):
return 'DummyValue'
class MockMyYaml(object):
def write(self):
return True
class MockVPCConnection(object):
def create_vpc(self, *_):
return {'id': 'DummyID', 'cidr_block': '10.0.0.0/16'}
def get_all_vpcs(self, *_):
return ['DummyValue']
class TestHandsOn(SetUp, unittest.TestCase):
def test_init(self):
m = main.HandsOn()
with self.assertRaises(SystemExit) as cm:
m.parser.parse_args([
'-h',
])
self.assertEqual(cm.exception.code, 0)
with self.assertRaises(SystemExit) as cm:
m.parser.parse_args([
'--version',
])
self.assertEqual(cm.exception.code, 0)
def test_install_01(self):
m = main.HandsOn()
with self.assertRaises(AssertionError):
m.run([
'-v', 'install', 'delegates', '1-50',
])
def test_install_02(self):
m = main.HandsOn()
with self.assertRaises(AssertionError):
m.run([
'install', 'delegates', '51',
])
def test_install_03(self):
m = main.HandsOn()
with self.assertRaises(ValueError):
m.run([
'install', 'delegates', 'FartOnTheWater',
])
def test_install_04(self):
m = main.HandsOn()
with self.assertRaises(AssertionError):
m.run([
'install', 'delegates', '0,1,3',
])
def test_install_05(self):
m = main.HandsOn()
with self.assertRaises(AssertionError):
m.run([
'install', 'delegates', '1,3-2',
])
def test_install_06(self):
m = main.HandsOn()
with self.assertRaises(SystemExit) as cm:
m.run([
'install', 'delegates'
])
self.assertEqual(cm.exception.code, 0)
@patch('boto.connect_ec2', side_effects=mock_connect_ec2)
def test_probe_aws(self, mock_connect_ec2):
m = main.HandsOn()
with self.assertRaises(SystemExit) as cm:
m.run([
'-v', 'probe', 'aws',
])
self.assertEqual(cm.exception.code, 0)
l = logging.getLogger('handson')
self.assertIs(l.getEffectiveLevel(), logging.DEBUG)
with self.assertRaises(SystemExit) as cm:
m.run([
'probe', 'aws',
])
self.assertEqual(cm.exception.code, 0)
l = logging.getLogger('handson')
self.assertIs(l.getEffectiveLevel(), logging.INFO)
def test_probe_region(self):
m = main.HandsOn()
with self.assertRaises(SystemExit) as cm:
m.run([
'probe', 'region',
])
self.assertEqual(cm.exception.code, 0)
def test_probe_subnets(self):
m = main.HandsOn()
with self.assertRaises(SystemExit) as cm:
m.run([
'probe', 'subnets',
])
self.assertEqual(cm.exception.code, 0)
with self.assertRaises(SystemExit) as cm:
m.run([
'probe', 'subnets', '--retag',
])
self.assertEqual(cm.exception.code, 0)
def test_probe_vpc(self):
m = main.HandsOn()
with self.assertRaises(SystemExit) as cm:
m.run([
'probe', 'vpc',
])
self.assertEqual(cm.exception.code, 0)
def test_probe_yaml(self):
m = main.HandsOn()
with self.assertRaises(SystemExit) as cm:
m.run([
'probe', 'yaml',
])
self.assertEqual(cm.exception.code, 0)
with self.assertRaises(ParserError):
m.run([
'-y', './bootstrap',
'probe', 'yaml',
])
with self.assertRaises(AssertionError):
m.run([
'-y', './data/bogus.yaml',
'probe', 'yaml',
])
|
|
"""
HTML forms.
"""
import re
import collections
from werkzeug.datastructures import OrderedMultiDict
from robobrowser.compat import iteritems, encode_if_py2
from . import fields
from .. import helpers
from .. import exceptions
_tags = ['input', 'textarea', 'select']
_tag_ptn = re.compile(
'|'.join(_tags),
re.I
)
def _group_flat_tags(tag, tags):
"""Extract tags sharing the same name as the provided tag. Used to collect
options for radio and checkbox inputs.
:param Tag tag: BeautifulSoup tag
:param list tags: List of tags
:return: List of matching tags
"""
grouped = [tag]
name = tag.get('name', '').lower()
while tags and tags[0].get('name', '').lower() == name:
grouped.append(tags.pop(0))
return grouped
def _parse_field(tag, tags):
tag_type = tag.name.lower()
if tag_type == 'input':
tag_type = tag.get('type', '').lower()
if tag_type == 'submit':
return fields.Submit(tag)
if tag_type == 'file':
return fields.FileInput(tag)
if tag_type == 'radio':
radios = _group_flat_tags(tag, tags)
return fields.Radio(radios)
if tag_type == 'checkbox':
checkboxes = _group_flat_tags(tag, tags)
return fields.Checkbox(checkboxes)
return fields.Input(tag)
if tag_type == 'textarea':
return fields.Textarea(tag)
if tag_type == 'select':
if tag.get('multiple') is not None:
return fields.MultiSelect(tag)
return fields.Select(tag)
def _parse_fields(parsed):
"""Parse form fields from HTML.
:param BeautifulSoup parsed: Parsed HTML
:return OrderedDict: Collection of field objects
"""
# Note: Call this `out` to avoid name conflict with `fields` module
out = []
# Prepare field tags
tags = parsed.find_all(_tag_ptn)
for tag in tags:
helpers.lowercase_attr_names(tag)
while tags:
tag = tags.pop(0)
try:
field = _parse_field(tag, tags)
except exceptions.InvalidNameError:
continue
if field is not None:
out.append(field)
return out
def _filter_fields(fields, predicate):
return OrderedMultiDict([
(key, value)
for key, value in fields.items(multi=True)
if predicate(value)
])
class Payload(object):
"""Container for serialized form outputs that knows how to export to
the format expected by Requests. By default, form values are stored in
`data`.
"""
def __init__(self):
self.data = OrderedMultiDict()
self.options = collections.defaultdict(OrderedMultiDict)
@classmethod
def from_fields(cls, fields):
"""
:param OrderedMultiDict fields:
"""
payload = cls()
for _, field in fields.items(multi=True):
if not field.disabled:
payload.add(field.serialize(), field.payload_key)
return payload
def add(self, data, key=None):
"""Add field values to container.
:param dict data: Serialized values for field
:param str key: Optional key; if not provided, values will be added
to `self.payload`.
"""
sink = self.options[key] if key is not None else self.data
for key, value in iteritems(data):
sink.add(key, value)
def to_requests(self, method='get'):
"""Export to Requests format.
:param str method: Request method
:return: Dict of keyword arguments formatted for `requests.request`
"""
out = {}
data_key = 'params' if method.lower() == 'get' else 'data'
out[data_key] = self.data
out.update(self.options)
return dict([
(key, list(value.items(multi=True)))
for key, value in iteritems(out)
])
def prepare_fields(all_fields, submit_fields, submit):
if len(list(submit_fields.items(multi=True))) > 1:
if not submit:
raise exceptions.InvalidSubmitError()
if submit not in submit_fields.getlist(submit.name):
raise exceptions.InvalidSubmitError()
return _filter_fields(
all_fields,
lambda f: not isinstance(f, fields.Submit) or f == submit
)
return all_fields
class Form(object):
"""Representation of an HTML form."""
def __init__(self, parsed):
parsed = helpers.ensure_soup(parsed)
if parsed.name != 'form':
parsed = parsed.find('form')
self.parsed = parsed
self.action = self.parsed.get('action')
self.method = self.parsed.get('method', 'get')
self.fields = OrderedMultiDict()
for field in _parse_fields(self.parsed):
self.add_field(field)
def add_field(self, field):
"""Add a field.
:param field: Field to add
:raise: ValueError if `field` is not an instance of `BaseField`.
"""
if not isinstance(field, fields.BaseField):
raise ValueError('Argument "field" must be an instance of '
'BaseField')
self.fields.add(field.name, field)
@property
def submit_fields(self):
return _filter_fields(
self.fields,
lambda field: isinstance(field, fields.Submit)
)
@encode_if_py2
def __repr__(self):
state = u', '.join(
[
u'{0}={1}'.format(name, field.value)
for name, field in self.fields.items(multi=True)
]
)
if state:
return u'<RoboForm {0}>'.format(state)
return u'<RoboForm>'
def keys(self):
return self.fields.keys()
def __getitem__(self, item):
return self.fields[item]
def __setitem__(self, key, value):
self.fields[key].value = value
def serialize(self, submit=None):
"""Serialize each form field to a Payload container.
:param Submit submit: Optional `Submit` to click, if form includes
multiple submits
:return: Payload instance
"""
include_fields = prepare_fields(self.fields, self.submit_fields, submit)
return Payload.from_fields(include_fields)
|
|
import datetime
from django.test import TestCase
# Create your tests here.
from py_yr.config.settings import FORECAST_TYPE_STANDARD, FORECAST_TYPE_HOURLY
from api.helper import is_valid_location, is_valid_language, is_valid_forecast_type, time_is_less_then_x_minutes_ago
from api.models import TimeZone, Location, Sun, Credit, Forecast, Precipitation, Pressure, Symbol, Temperature, \
WindDirection, WindSpeed, Time
class TestHelperValidation(TestCase):
def setUp(self):
self.valid_location = 'norway/hordaland/bergen/bergen'
self.invalid_location_to_many = 'norway/hordaland/bergen/bergen/bergen'
self.valid_language_en = 'en'
self.valid_language_nb = 'nb'
self.valid_language_nn = 'nb'
self.invalid_language = 'ru'
self.valid_forecast_type_hourly = FORECAST_TYPE_HOURLY
self.valid_forecast_type_standard = FORECAST_TYPE_STANDARD
self.invalid_forecast_type_hourly = 'invalid'
self.invalid_forecast_type_standard = 'invalid'
def test_is_valid_location(self) -> None:
self.assertTrue(is_valid_location(self.valid_location)[0])
self.assertFalse(is_valid_location(self.invalid_location_to_many)[0])
def test_is_valid_language(self) -> None:
self.assertTrue(is_valid_language(self.valid_language_en)[0])
self.assertTrue(is_valid_language(self.valid_language_nb)[0])
self.assertTrue(is_valid_language(self.valid_language_nn)[0])
self.assertFalse(is_valid_language(self.invalid_language)[0])
def test_is_valid_forecast_type(self) -> None:
self.assertTrue(is_valid_forecast_type(self.valid_forecast_type_hourly)[0])
self.assertTrue(is_valid_forecast_type(self.valid_forecast_type_standard)[0])
self.assertFalse(is_valid_forecast_type(self.invalid_forecast_type_hourly)[0])
self.assertFalse(is_valid_forecast_type(self.invalid_forecast_type_standard)[0])
def test_validate_search_request(self) -> None:
self.assertTrue(is_valid_language(self.valid_language_en)[0]
and is_valid_location(self.valid_location)[0]
and is_valid_forecast_type(self.valid_forecast_type_hourly)[0])
self.assertFalse(is_valid_language(self.invalid_language)[0]
and is_valid_location(self.valid_location)[0]
and is_valid_forecast_type(self.valid_forecast_type_hourly)[0])
self.assertFalse(is_valid_language(self.valid_language_en)[0]
and is_valid_location(self.valid_location)[0]
and is_valid_forecast_type(self.invalid_forecast_type_standard)[0])
def test_location_special_case(self):
self.assertTrue(is_valid_location('norge/hordaland/bergen/bergen/')[0])
self.assertTrue(is_valid_location('norge/hordaland/bergen/bergen')[0])
self.assertTrue(is_valid_location('spain/catelonia/barcelona')[0])
self.assertTrue(is_valid_location('spain/catelonia/barcelona/')[0])
self.assertFalse(is_valid_location('norge/hordaland/bergen/bergen/bergen')[0])
self.assertFalse(is_valid_location('norge/hordaland/bergen/bergen/bergen/')[0])
self.assertFalse(is_valid_location('norge/oslo')[0])
self.assertFalse(is_valid_location('norge/oslo/')[0])
self.assertFalse(is_valid_location('norge')[0])
self.assertFalse(is_valid_location('norge/')[0])
self.assertFalse(is_valid_location('/')[0])
self.assertFalse(is_valid_location('')[0])
self.assertFalse(is_valid_location('//')[0])
self.assertFalse(is_valid_location('///')[0])
self.assertFalse(is_valid_location('///')[0])
class TestHelperSaving(TestCase):
def setUp(self):
self.timezone = TimeZone(
utcoffsetMinutes=60,
zone='utc'
)
self.location = Location(
timezone=self.timezone,
name='norway/hordaland/bergen/bergen',
country='Norway',
type='standard'
)
self.sun = Sun(
rise=datetime.datetime.now(),
set=datetime.datetime.now()
)
self.credit = Credit(
url='credit url',
text='much credit'
)
self.forecast = Forecast(
sun=self.sun,
credit=self.credit,
location=self.location,
search='norway/hordaland/bergen/bergen',
forecast_type='standard',
language='en'
)
self.rain = Precipitation(
value=2.4,
min_value=0.0,
max_value=0.2)
self.pressure = Pressure(
unit='hpa',
value=1024
)
self.symbol = Symbol(
name='sun',
var='01d',
number=1
)
self.temp = Temperature(
unit='Celsius',
value=24
)
self.direction = WindDirection(
degree=137.5,
code='N',
name='North'
)
self.speed = WindSpeed(
mps=2,
name='Breeze'
)
self.time = Time(
start=datetime.datetime.now(),
end=datetime.datetime.now(),
period=3,
forecast=self.forecast,
precipitation=self.rain,
symbol=self.symbol,
wind_direction=self.direction,
wind_speed=self.speed,
temperature=self.temp,
pressure=self.pressure
)
def test_save_location(self):
locations = Location.objects.all()
self.assertTrue(len(locations) is 0)
self.timezone.save()
self.location.timezone = self.timezone
self.location.save()
locations = Location.objects.all()
self.assertFalse(len(locations) is 0)
def test_save_timezone(self):
timezones = TimeZone.objects.all()
self.assertTrue(len(timezones) is 0)
self.timezone.save()
timezones = TimeZone.objects.all()
self.assertFalse(len(timezones) is 0)
def test_save_sun(self):
suns = Sun.objects.all()
self.assertTrue(len(suns) is 0)
self.sun.save()
suns = Sun.objects.all()
self.assertFalse(len(suns) is 0)
def test_save_credit(self):
creditz = Credit.objects.all()
self.assertTrue(len(creditz) is 0)
self.credit.save()
creditz = Credit.objects.all()
self.assertFalse(len(creditz) is 0)
def test_save_forecast(self):
pass
def test_save_rain(self):
rains = Precipitation.objects.all()
self.assertTrue(len(rains) is 0)
self.rain.save()
rains = Precipitation.objects.all()
self.assertFalse(len(rains) is 0)
def test_save_pressure(self):
pressures = Pressure.objects.all()
self.assertTrue(len(pressures) is 0)
self.pressure.save()
pressures = Pressure.objects.all()
self.assertFalse(len(pressures) is 0)
def test_save_temperature(self):
temperatures = Temperature.objects.all()
self.assertTrue(len(temperatures) is 0)
self.temp.save()
temperatures = Temperature.objects.all()
self.assertFalse(len(temperatures) is 0)
def test_save_wind_speed(self):
speeds = WindSpeed.objects.all()
self.assertTrue(len(speeds) is 0)
self.speed.save()
speeds = WindSpeed.objects.all()
self.assertFalse(len(speeds) is 0)
def test_save_wind_direction(self):
directions = WindDirection.objects.all()
self.assertTrue(len(directions) is 0)
self.direction.save()
directions = WindDirection.objects.all()
self.assertFalse(len(directions) is 0)
def test_save_symbol(self):
symbols = Symbol.objects.all()
self.assertTrue(len(symbols) is 0)
self.symbol.save()
symbols = Symbol.objects.all()
self.assertFalse(len(symbols) is 0)
def test_save_time(self):
times = Time.objects.all()
self.assertTrue(len(times) is 0)
self.pressure.save()
self.rain.save()
self.temp.save()
self.speed.save()
self.direction.save()
self.symbol.save()
self.credit.save()
self.sun.save()
self.timezone.save()
self.location.timezone = self.timezone
self.location.save()
self.forecast.location = self.location
self.forecast.credit_id = self.credit.id
self.forecast.sun_id = self.sun.id
self.forecast.save()
self.time.wind_direction_id = self.direction.id
self.time.symbol_id = self.symbol.id
self.time.pressure_id = self.pressure.id
self.time.precipitation_id = self.rain.id
self.time.wind_speed_id = self.speed.id
self.time.temperature_id = self.temp.id
self.time.forecast = self.forecast
self.time.save()
times = Time.objects.all()
self.assertFalse(len(times) is 0)
class TestHelper(TestCase):
def test_is_younger_then(self):
self.assertFalse(time_is_less_then_x_minutes_ago(datetime.datetime.now() + datetime.timedelta(days=2), 10))
self.assertTrue(time_is_less_then_x_minutes_ago(datetime.datetime.now(), 10))
self.assertFalse(time_is_less_then_x_minutes_ago(datetime.datetime.now(), -10))
self.assertFalse(time_is_less_then_x_minutes_ago(datetime.datetime.now(), None))
self.assertFalse(time_is_less_then_x_minutes_ago(datetime.datetime.now(), ''))
|
|
from __future__ import print_function
from typing import cast, Any, Dict, Iterable, List, Mapping, Optional, Sequence, Tuple, Text
from confirmation.models import Confirmation, create_confirmation_link
from django.conf import settings
from django.template import loader
from django.utils.timezone import now as timezone_now
from zerver.decorator import statsd_increment
from zerver.lib.send_email import send_future_email, \
send_email_from_dict, FromAddress
from zerver.lib.queue import queue_json_publish
from zerver.models import (
Recipient,
ScheduledEmail,
UserMessage,
Stream,
get_display_recipient,
UserProfile,
get_user,
get_user_profile_by_id,
receives_offline_notifications,
get_context_for_message,
Message,
Realm,
)
import datetime
from email.utils import formataddr
import re
import subprocess
import ujson
from six.moves import urllib
from collections import defaultdict
def one_click_unsubscribe_link(user_profile, email_type):
# type: (UserProfile, str) -> str
"""
Generate a unique link that a logged-out user can visit to unsubscribe from
Zulip e-mails without having to first log in.
"""
return create_confirmation_link(user_profile, user_profile.realm.host,
Confirmation.UNSUBSCRIBE,
url_args = {'email_type': email_type})
def hash_util_encode(string):
# type: (Text) -> Text
# Do the same encoding operation as hash_util.encodeHashComponent on the
# frontend.
# `safe` has a default value of "/", but we want those encoded, too.
return urllib.parse.quote(
string.encode("utf-8"), safe=b"").replace(".", "%2E").replace("%", ".")
def pm_narrow_url(realm, participants):
# type: (Realm, List[Text]) -> Text
participants.sort()
base_url = u"%s/#narrow/pm-with/" % (realm.uri,)
return base_url + hash_util_encode(",".join(participants))
def stream_narrow_url(realm, stream):
# type: (Realm, Text) -> Text
base_url = u"%s/#narrow/stream/" % (realm.uri,)
return base_url + hash_util_encode(stream)
def topic_narrow_url(realm, stream, topic):
# type: (Realm, Text, Text) -> Text
base_url = u"%s/#narrow/stream/" % (realm.uri,)
return u"%s%s/topic/%s" % (base_url, hash_util_encode(stream),
hash_util_encode(topic))
def build_message_list(user_profile, messages):
# type: (UserProfile, List[Message]) -> List[Dict[str, Any]]
"""
Builds the message list object for the missed message email template.
The messages are collapsed into per-recipient and per-sender blocks, like
our web interface
"""
messages_to_render = [] # type: List[Dict[str, Any]]
def sender_string(message):
# type: (Message) -> Text
if message.recipient.type in (Recipient.STREAM, Recipient.HUDDLE):
return message.sender.full_name
else:
return ''
def relative_to_full_url(content):
# type: (Text) -> Text
# URLs for uploaded content are of the form
# "/user_uploads/abc.png". Make them full paths.
#
# There's a small chance of colliding with non-Zulip URLs containing
# "/user_uploads/", but we don't have much information about the
# structure of the URL to leverage.
content = re.sub(
r"/user_uploads/(\S*)",
user_profile.realm.uri + r"/user_uploads/\1", content)
# Our proxying user-uploaded images seems to break inline images in HTML
# emails, so scrub the image but leave the link.
content = re.sub(
r"<img src=(\S+)/user_uploads/(\S+)>", "", content)
# URLs for emoji are of the form
# "static/generated/emoji/images/emoji/snowflake.png".
content = re.sub(
r"/static/generated/emoji/images/emoji/",
user_profile.realm.uri + r"/static/generated/emoji/images/emoji/",
content)
# Realm emoji should use absolute URLs when referenced in missed-message emails.
content = re.sub(
r"/user_avatars/(\d+)/emoji/",
user_profile.realm.uri + r"/user_avatars/\1/emoji/", content)
# Stream links need to be converted from relative to absolute. They
# have href values in the form of "/#narrow/stream/...".
content = re.sub(
r"/#narrow/stream/",
user_profile.realm.uri + r"/#narrow/stream/",
content)
return content
def fix_plaintext_image_urls(content):
# type: (Text) -> Text
# Replace image URLs in plaintext content of the form
# [image name](image url)
# with a simple hyperlink.
return re.sub(r"\[(\S*)\]\((\S*)\)", r"\2", content)
def fix_emoji_sizes(html):
# type: (Text) -> Text
return html.replace(' class="emoji"', ' height="20px"')
def build_message_payload(message):
# type: (Message) -> Dict[str, Text]
plain = message.content
plain = fix_plaintext_image_urls(plain)
plain = relative_to_full_url(plain)
assert message.rendered_content is not None
html = message.rendered_content
html = relative_to_full_url(html)
html = fix_emoji_sizes(html)
return {'plain': plain, 'html': html}
def build_sender_payload(message):
# type: (Message) -> Dict[str, Any]
sender = sender_string(message)
return {'sender': sender,
'content': [build_message_payload(message)]}
def message_header(user_profile, message):
# type: (UserProfile, Message) -> Dict[str, Any]
disp_recipient = get_display_recipient(message.recipient)
if message.recipient.type == Recipient.PERSONAL:
header = u"You and %s" % (message.sender.full_name,)
html_link = pm_narrow_url(user_profile.realm, [message.sender.email])
header_html = u"<a style='color: #ffffff;' href='%s'>%s</a>" % (html_link, header)
elif message.recipient.type == Recipient.HUDDLE:
assert not isinstance(disp_recipient, Text)
other_recipients = [r['full_name'] for r in disp_recipient
if r['email'] != user_profile.email]
header = u"You and %s" % (", ".join(other_recipients),)
html_link = pm_narrow_url(user_profile.realm, [r["email"] for r in disp_recipient
if r["email"] != user_profile.email])
header_html = u"<a style='color: #ffffff;' href='%s'>%s</a>" % (html_link, header)
else:
assert isinstance(disp_recipient, Text)
header = u"%s > %s" % (disp_recipient, message.topic_name())
stream_link = stream_narrow_url(user_profile.realm, disp_recipient)
topic_link = topic_narrow_url(user_profile.realm, disp_recipient, message.subject)
header_html = u"<a href='%s'>%s</a> > <a href='%s'>%s</a>" % (
stream_link, disp_recipient, topic_link, message.subject)
return {"plain": header,
"html": header_html,
"stream_message": message.recipient.type_name() == "stream"}
# # Collapse message list to
# [
# {
# "header": {
# "plain":"header",
# "html":"htmlheader"
# }
# "senders":[
# {
# "sender":"sender_name",
# "content":[
# {
# "plain":"content",
# "html":"htmlcontent"
# }
# {
# "plain":"content",
# "html":"htmlcontent"
# }
# ]
# }
# ]
# },
# ]
messages.sort(key=lambda message: message.pub_date)
for message in messages:
header = message_header(user_profile, message)
# If we want to collapse into the previous recipient block
if len(messages_to_render) > 0 and messages_to_render[-1]['header'] == header:
sender = sender_string(message)
sender_block = messages_to_render[-1]['senders']
# Same message sender, collapse again
if sender_block[-1]['sender'] == sender:
sender_block[-1]['content'].append(build_message_payload(message))
else:
# Start a new sender block
sender_block.append(build_sender_payload(message))
else:
# New recipient and sender block
recipient_block = {'header': header,
'senders': [build_sender_payload(message)]}
messages_to_render.append(recipient_block)
return messages_to_render
@statsd_increment("missed_message_reminders")
def do_send_missedmessage_events_reply_in_zulip(user_profile, missed_messages, message_count):
# type: (UserProfile, List[Message], int) -> None
"""
Send a reminder email to a user if she's missed some PMs by being offline.
The email will have its reply to address set to a limited used email
address that will send a zulip message to the correct recipient. This
allows the user to respond to missed PMs, huddles, and @-mentions directly
from the email.
`user_profile` is the user to send the reminder to
`missed_messages` is a list of Message objects to remind about they should
all have the same recipient and subject
"""
from zerver.context_processors import common_context
# Disabled missedmessage emails internally
if not user_profile.enable_offline_email_notifications:
return
recipients = set((msg.recipient_id, msg.subject) for msg in missed_messages)
if len(recipients) != 1:
raise ValueError(
'All missed_messages must have the same recipient and subject %r' %
recipients
)
unsubscribe_link = one_click_unsubscribe_link(user_profile, "missed_messages")
context = common_context(user_profile)
context.update({
'name': user_profile.full_name,
'messages': build_message_list(user_profile, missed_messages),
'message_count': message_count,
'mention': missed_messages[0].recipient.type == Recipient.STREAM,
'unsubscribe_link': unsubscribe_link,
})
# If this setting (email mirroring integration) is enabled, only then
# can users reply to email to send message to Zulip. Thus, one must
# ensure to display warning in the template.
if settings.EMAIL_GATEWAY_PATTERN:
context.update({
'reply_warning': False,
'reply_to_zulip': True,
})
else:
context.update({
'reply_warning': True,
'reply_to_zulip': False,
})
from zerver.lib.email_mirror import create_missed_message_address
reply_to_address = create_missed_message_address(user_profile, missed_messages[0])
if reply_to_address == FromAddress.NOREPLY:
reply_to_name = None
else:
reply_to_name = "Zulip"
senders = list(set(m.sender for m in missed_messages))
if (missed_messages[0].recipient.type == Recipient.HUDDLE):
display_recipient = get_display_recipient(missed_messages[0].recipient)
# Make sure that this is a list of strings, not a string.
assert not isinstance(display_recipient, Text)
other_recipients = [r['full_name'] for r in display_recipient
if r['id'] != user_profile.id]
context.update({'group_pm': True})
if len(other_recipients) == 2:
huddle_display_name = u"%s" % (" and ".join(other_recipients))
context.update({'huddle_display_name': huddle_display_name})
elif len(other_recipients) == 3:
huddle_display_name = u"%s, %s, and %s" % (other_recipients[0], other_recipients[1], other_recipients[2])
context.update({'huddle_display_name': huddle_display_name})
else:
huddle_display_name = u"%s, and %s others" % (', '.join(other_recipients[:2]), len(other_recipients) - 2)
context.update({'huddle_display_name': huddle_display_name})
elif (missed_messages[0].recipient.type == Recipient.PERSONAL):
context.update({'private_message': True})
else:
# Keep only the senders who actually mentioned the user
#
# TODO: When we add wildcard mentions that send emails, add
# them to the filter here.
senders = list(set(m.sender for m in missed_messages if
UserMessage.objects.filter(message=m, user_profile=user_profile,
flags=UserMessage.flags.mentioned).exists()))
context.update({'at_mention': True})
context.update({
'sender_str': ", ".join(sender.full_name for sender in senders),
'realm_str': user_profile.realm.name,
})
from_name = "Zulip missed messages" # type: Text
from_address = FromAddress.NOREPLY
if len(senders) == 1 and settings.SEND_MISSED_MESSAGE_EMAILS_AS_USER:
# If this setting is enabled, you can reply to the Zulip
# missed message emails directly back to the original sender.
# However, one must ensure the Zulip server is in the SPF
# record for the domain, or there will be spam/deliverability
# problems.
sender = senders[0]
from_name, from_address = (sender.full_name, sender.email)
context.update({
'reply_warning': False,
'reply_to_zulip': False,
})
email_dict = {
'template_prefix': 'zerver/emails/missed_message',
'to_user_id': user_profile.id,
'from_name': from_name,
'from_address': from_address,
'reply_to_email': formataddr((reply_to_name, reply_to_address)),
'context': context}
queue_json_publish("missedmessage_email_senders", email_dict, send_email_from_dict)
user_profile.last_reminder = timezone_now()
user_profile.save(update_fields=['last_reminder'])
def handle_missedmessage_emails(user_profile_id, missed_email_events):
# type: (int, Iterable[Dict[str, Any]]) -> None
message_ids = [event.get('message_id') for event in missed_email_events]
user_profile = get_user_profile_by_id(user_profile_id)
if not receives_offline_notifications(user_profile):
return
messages = Message.objects.filter(usermessage__user_profile_id=user_profile,
id__in=message_ids,
usermessage__flags=~UserMessage.flags.read)
# Cancel missed-message emails for deleted messages
messages = [um for um in messages if um.content != "(deleted)"]
if not messages:
return
messages_by_recipient_subject = defaultdict(list) # type: Dict[Tuple[int, Text], List[Message]]
for msg in messages:
messages_by_recipient_subject[(msg.recipient_id, msg.topic_name())].append(msg)
message_count_by_recipient_subject = {
recipient_subject: len(msgs)
for recipient_subject, msgs in messages_by_recipient_subject.items()
}
for msg_list in messages_by_recipient_subject.values():
msg = min(msg_list, key=lambda msg: msg.pub_date)
if msg.recipient.type == Recipient.STREAM:
msg_list.extend(get_context_for_message(msg))
# Send an email per recipient subject pair
for recipient_subject, msg_list in messages_by_recipient_subject.items():
unique_messages = {m.id: m for m in msg_list}
do_send_missedmessage_events_reply_in_zulip(
user_profile,
list(unique_messages.values()),
message_count_by_recipient_subject[recipient_subject],
)
def clear_scheduled_emails(user_id, email_type=None):
# type: (int, Optional[int]) -> None
items = ScheduledEmail.objects.filter(user_id=user_id)
if email_type is not None:
items = items.filter(type=email_type)
items.delete()
def log_digest_event(msg):
# type: (Text) -> None
import logging
logging.basicConfig(filename=settings.DIGEST_LOG_PATH, level=logging.INFO)
logging.info(msg)
def enqueue_welcome_emails(user_id):
# type: (int) -> None
from zerver.context_processors import common_context
if settings.WELCOME_EMAIL_SENDER is not None:
# line break to avoid triggering lint rule
from_name = settings.WELCOME_EMAIL_SENDER['name']
from_address = settings.WELCOME_EMAIL_SENDER['email']
else:
from_name = None
from_address = FromAddress.SUPPORT
user_profile = get_user_profile_by_id(user_id)
unsubscribe_link = one_click_unsubscribe_link(user_profile, "welcome")
context = common_context(user_profile)
context.update({
'unsubscribe_link': unsubscribe_link,
'organization_setup_advice_link':
user_profile.realm.uri + '%s/help/getting-your-organization-started-with-zulip',
'is_realm_admin': user_profile.is_realm_admin,
})
send_future_email(
"zerver/emails/followup_day1", to_user_id=user_id, from_name=from_name,
from_address=from_address, context=context, delay=datetime.timedelta(hours=1))
send_future_email(
"zerver/emails/followup_day2", to_user_id=user_id, from_name=from_name,
from_address=from_address, context=context, delay=datetime.timedelta(days=1))
def convert_html_to_markdown(html):
# type: (Text) -> Text
# On Linux, the tool installs as html2markdown, and there's a command called
# html2text that does something totally different. On OSX, the tool installs
# as html2text.
commands = ["html2markdown", "html2text"]
for command in commands:
try:
# A body width of 0 means do not try to wrap the text for us.
p = subprocess.Popen(
[command, "--body-width=0"], stdout=subprocess.PIPE,
stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
break
except OSError:
continue
markdown = p.communicate(input=html.encode('utf-8'))[0].decode('utf-8').strip()
# We want images to get linked and inline previewed, but html2text will turn
# them into links of the form ``, which is
# ugly. Run a regex over the resulting description, turning links of the
# form `` into
# `[image.png](http://foo.com/image.png)`.
return re.sub(u"!\\[\\]\\((\\S*)/(\\S*)\\?(\\S*)\\)",
u"[\\2](\\1/\\2)", markdown)
|
|
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.test import SimpleTestCase, TestCase
from django_dynamic_fixture import G
from mock import patch, Mock
from restraint import core, constants
from restraint.models import PermSet, Perm, PermLevel, PermAccess
import restraint.tests.configuration as test_configuration
class TestGetRestraintConfig(SimpleTestCase):
def test_get_configuration(self):
self.assertEqual(
core.get_restraint_config()['perm_sets'],
test_configuration.get_configuration()['perm_sets']
)
class TestRestraintLoadPerms(TestCase):
def setUp(self):
core.update_restraint_db()
def test_individual_user(self):
# Create an individual user
user = G(User, is_superuser=False, is_staff=False)
# Get the users restraints
restraints = core.Restraint(user)
perms = restraints.perms
self.assertEqual(
perms,
{
'can_edit_stuff': {
'some_stuff': test_configuration.user_some_stuff_id_filter
}
}
)
def test_super_user(self):
# Make a user that is a superuser and verify they get all proper permissions
user = G(User, is_superuser=True)
restraints = core.Restraint(user)
perms = restraints.perms
self.assertEquals(
perms,
{
'can_edit_stuff': {
'all_stuff': None,
'some_stuff': test_configuration.user_some_stuff_id_filter,
},
'can_view_stuff': {
'': None
},
'can_access_users_named_foo': {
'': None
}
}
)
def test_user_with_additional_perms(self):
# Make an individual user and verify they get all of the perms.
# Also, add an individual permission to the user and verify they get that too
user = G(User, is_superuser=False, is_staff=False)
pa = G(PermAccess, perm_user_id=user.id, perm_user_type=ContentType.objects.get_for_model(user))
pa.perm_levels.add(PermLevel.objects.get(name='all_stuff'))
r = core.Restraint(user)
perms = r.perms
self.assertEquals(
perms,
{
'can_edit_stuff': {
'some_stuff': test_configuration.user_some_stuff_id_filter,
'all_stuff': None
}
}
)
def test_load_some_perms(self):
user = G(User, is_superuser=True)
restraints = core.Restraint(user, ['can_edit_stuff'])
perms = restraints.perms
self.assertEquals(
perms,
{
'can_edit_stuff': {
'all_stuff': None,
'some_stuff': test_configuration.user_some_stuff_id_filter
}
}
)
class TestRestraintHasPerms(SimpleTestCase):
@patch.object(core.Restraint, '_load_perms', spec_set=True)
def test_has_perm_w_level_true(self, mock_load_perms):
r = core.Restraint(Mock())
r._perms = {
'can_view_stuff': {
'': None,
},
'can_edit_stuff': {
'all_stuff': None,
'some_stuff': None,
}
}
self.assertTrue(r.has_perm('can_edit_stuff', 'all_stuff'))
@patch.object(core.Restraint, '_load_perms', spec_set=True)
def test_has_perm_w_level_false(self, mock_load_perms):
r = core.Restraint(Mock())
r._perms = {
'can_view_stuff': {
'': None,
},
'can_edit_stuff': {
'all_stuff': None,
'some_stuff': None,
}
}
self.assertFalse(r.has_perm('can_edit_stuff', 'no_stuff'))
@patch.object(core.Restraint, '_load_perms', spec_set=True)
def test_has_perm_wo_level_true(self, mock_load_perms):
r = core.Restraint(Mock())
r._perms = {
'can_view_stuff': {
'': None,
},
'can_edit_stuff': {
'all_stuff': None,
'some_stuff': None,
}
}
self.assertTrue(r.has_perm('can_edit_stuff'))
@patch.object(core.Restraint, '_load_perms', spec_set=True)
def test_has_perm_wo_level_false(self, mock_load_perms):
r = core.Restraint(Mock())
r._perms = {
'can_view_stuff': {
'': None,
},
'can_edit_stuff': {
'all_stuff': None,
'some_stuff': None,
}
}
self.assertFalse(r.has_perm('can_mess_with_stuff'))
class TestRestraintFilterQSet(TestCase):
def setUp(self):
core.update_restraint_db()
def test_filter_qset_global_access(self):
# Make a user that is a superuser and verify they get all of the
# super user perms
u = G(User, is_superuser=True)
# Make another user that they will be able to edit
u2 = G(User)
r = core.Restraint(u)
filtered_qset = r.filter_qset(User.objects.all(), 'can_edit_stuff')
self.assertEquals(set(filtered_qset), set([u, u2]))
def test_filter_qset_local_access(self):
# Make a user that is not a superuser
u = G(User, is_superuser=False)
# Make another user that they will not be able to edit
G(User)
r = core.Restraint(u)
filtered_qset = r.filter_qset(User.objects.all(), 'can_edit_stuff')
self.assertEquals(set(filtered_qset), set([u]))
def test_filter_qset_multiple_local_access(self):
# Make a user that is staff
u = G(User, is_superuser=False, is_staff=True)
# Make another user that they will not be able to edit
G(User)
# Make another super user that they will be able to edit
u2 = G(User, is_superuser=True)
r = core.Restraint(u)
filtered_qset = r.filter_qset(User.objects.all(), 'can_edit_stuff')
self.assertEquals(set(filtered_qset), set([u, u2]))
def test_filter_qset_no_perms(self):
# Make a user that is staff
u = G(User, is_superuser=False, is_staff=True)
# Load permissions that will not give them access to edit any accounts
r = core.Restraint(u, ['bad_perm'])
filtered_qset = r.filter_qset(User.objects.all(), 'can_edit_stuff')
self.assertEquals(set(filtered_qset), set([]))
def test_filter_qset_restrict_subset(self):
models = [
G(User, first_name='foo', last_name='foofington'),
G(User, first_name='bar', last_name='barski'),
G(User, first_name='foo', last_name='foogeelala'),
]
# Make a user that is a superuser and verify they get all of the
# super user perms
u = G(User, is_superuser=True)
r = core.Restraint(u)
filtered_qset = r.filter_qset(
User.objects.all(), 'can_access_users_named_foo', restrict_kwargs={'first_name': 'foo'})
self.assertEquals(set(filtered_qset), set(models + [u]))
def test_filter_qset_restrict_subset_no_perms(self):
models = [
G(User, first_name='foo', last_name='foofington'),
G(User, first_name='bar', last_name='barski'),
G(User, first_name='foo', last_name='foogeelala'),
]
# Make a user that is a superuser and verify they get all of the
# super user perms
u = G(User, is_superuser=False)
r = core.Restraint(u)
filtered_qset = r.filter_qset(
User.objects.all(), 'can_access_users_named_foo', restrict_kwargs={'first_name': 'foo'})
self.assertEquals(set(filtered_qset), set([models[1]] + [u]))
class UpdateRestraintDbTest(TestCase):
def add_custom_permission_set(self):
# Setup a custom permission set
custom_permission_set = PermSet.objects.create(
name='custom',
display_name='Custom',
is_private=False
)
# Add some custom default access levels
PermAccess.objects.set_default(
permission_set_name=custom_permission_set.name,
permission_name='can_edit_stuff',
levels=['all_stuff']
)
@patch.object(core, 'get_restraint_config')
def test_full_update_scenario_not_flush_default_access(self, mock_get_restraint_config):
mock_get_restraint_config.return_value = {
'perm_sets': {
'global': {
'display_name': 'Global',
},
'restricted': {
'display_name': 'Restricted',
},
},
'perms': {
'can_edit_stuff': {
'display_name': 'Can Edit Stuff',
'levels': {
'all_stuff': {
'display_name': 'All Stuff',
'id_filter': None,
},
'some_stuff': {
'display_name': 'Some Stuff',
'id_filter': None,
},
},
},
'can_view_stuff': {
'display_name': 'Can View Stuff',
'levels': constants.BOOLEAN_LEVELS_CONFIG,
},
},
'default_access': {
'global': {
'can_edit_stuff': ['all_stuff', 'some_stuff'],
'can_view_stuff': [constants.BOOLEAN_LEVELS_NAME],
},
'restricted': {
'can_edit_stuff': ['some_stuff'],
}
}
}
core.update_restraint_db()
core.update_restraint_db()
@patch.object(core, 'get_restraint_config')
def test_full_update_scenario_not_flush_default_access_update_new_perm(self, mock_get_restraint_config):
"""
Verifies that existing permission set is given access to new permission
"""
config = {
'perm_sets': {
'global': {
'display_name': 'Global',
},
'restricted': {
'display_name': 'Restricted',
},
},
'perms': {
'can_edit_stuff': {
'display_name': 'Can Edit Stuff',
'levels': {
'all_stuff': {
'display_name': 'All Stuff',
'id_filter': None,
},
'some_stuff': {
'display_name': 'Some Stuff',
'id_filter': None,
},
},
},
'can_view_stuff': {
'display_name': 'Can View Stuff',
'levels': constants.BOOLEAN_LEVELS_CONFIG,
},
},
'default_access': {
'global': {
'can_edit_stuff': ['all_stuff', 'some_stuff'],
'can_view_stuff': [constants.BOOLEAN_LEVELS_NAME],
},
'restricted': {
'can_edit_stuff': ['some_stuff'],
}
}
}
mock_get_restraint_config.return_value = config
core.update_restraint_db()
self.add_custom_permission_set()
# add permission
config['perms']['can_do_stuff'] = {
'display_name': 'Can Do Stuff',
'levels': {
'all_stuff': {
'display_name': 'All Stuff',
'id_filter': None,
},
'this_thing': {
'display_name': 'This Thing',
'id_filter': None,
},
},
}
config['perms']['can_alter_stuff'] = {
'display_name': 'Can Alter Stuff',
'levels': constants.BOOLEAN_LEVELS_CONFIG
}
config['default_access']['global']['can_alter_stuff'] = [constants.BOOLEAN_LEVELS_NAME]
config['default_access']['restricted']['can_do_stuff'] = ['all_stuff', 'this_thing']
mock_get_restraint_config.return_value = config
# update again
core.update_restraint_db()
self.assertEquals(
set(PermSet.objects.filter(is_private=True).values_list('name', flat=True)),
{'global', 'restricted'}
)
self.assertEquals(
set(PermSet.objects.all().values_list('name', flat=True)),
{'global', 'restricted', 'custom'}
)
self.assertEquals(
set(Perm.objects.values_list('name', flat=True)),
{'can_view_stuff', 'can_edit_stuff', 'can_do_stuff', 'can_alter_stuff'}
)
self.assertEquals(
list(PermLevel.objects.order_by(
'perm__name',
'name'
).values_list(
'perm__name',
'name'
)),
[
('can_alter_stuff', ''),
('can_do_stuff', 'all_stuff'),
('can_do_stuff', 'this_thing'),
('can_edit_stuff', 'all_stuff'),
('can_edit_stuff', 'some_stuff'),
('can_view_stuff', '')
]
)
self.assertEquals(
list(
PermAccess.objects.order_by(
'perm_set__name',
'perm_levels__perm__name',
'perm_levels__name'
).values_list(
'perm_set__name',
'perm_levels__perm__name',
'perm_levels__name'
)
),
[
('custom', 'can_edit_stuff', 'all_stuff'),
('global', 'can_alter_stuff', ''),
('global', 'can_edit_stuff', 'all_stuff'),
('global', 'can_edit_stuff', 'some_stuff'),
('global', 'can_view_stuff', ''),
('restricted', 'can_do_stuff', 'all_stuff'),
('restricted', 'can_do_stuff', 'this_thing'),
('restricted', 'can_edit_stuff', 'some_stuff')
]
)
@patch.object(core, 'get_restraint_config')
def test_full_update_scenario_flush_default_access(self, mock_get_restraint_config):
config = {
'perm_sets': {
'global': {
'display_name': 'Global',
},
'restricted': {
'display_name': 'Restricted',
},
},
'perms': {
'can_edit_stuff': {
'display_name': 'Can Edit Stuff',
'levels': {
'all_stuff': {
'display_name': 'All Stuff',
'id_filter': None,
},
'some_stuff': {
'display_name': 'Some Stuff',
'id_filter': None,
},
},
},
'can_view_stuff': {
'display_name': 'Can View Stuff',
'levels': constants.BOOLEAN_LEVELS_CONFIG,
},
},
'default_access': {
'global': {
'can_edit_stuff': ['all_stuff', 'some_stuff'],
'can_view_stuff': [constants.BOOLEAN_LEVELS_NAME],
},
'restricted': {
'can_edit_stuff': ['some_stuff'],
}
}
}
mock_get_restraint_config.return_value = config
core.update_restraint_db()
self.add_custom_permission_set()
config = {
'perm_sets': {
'global': {
'display_name': 'Global',
},
'restricted': {
'display_name': 'Restricted',
},
},
'perms': {
'can_edit_stuff': {
'display_name': 'Can Edit Stuff',
'levels': {
'all_stuff': {
'display_name': 'All Stuff',
'id_filter': None,
},
'some_stuff': {
'display_name': 'Some Stuff',
'id_filter': None,
},
},
},
'can_view_stuff': {
'display_name': 'Can View Stuff',
'levels': constants.BOOLEAN_LEVELS_CONFIG,
},
},
'default_access': {
'global': {
'can_edit_stuff': ['all_stuff'],
},
}
}
mock_get_restraint_config.return_value = config
core.update_restraint_db(flush_default_access=True)
self.assertEquals(
set(PermSet.objects.filter(is_private=True).values_list('name', flat=True)),
{'global', 'restricted'}
)
self.assertEquals(
set(PermSet.objects.all().values_list('name', flat=True)),
{'custom', 'global', 'restricted'}
)
self.assertEquals(
set(Perm.objects.values_list('name', flat=True)),
{'can_view_stuff', 'can_edit_stuff'}
)
self.assertEquals(
list(PermLevel.objects.order_by(
'perm__name',
'name'
).values_list(
'perm__name',
'name'
)),
[
('can_edit_stuff', 'all_stuff'),
('can_edit_stuff', 'some_stuff'),
('can_view_stuff', '')
]
)
self.assertEquals(
list(
PermAccess.objects.order_by(
'perm_set__name',
'perm_levels__perm__name',
'perm_levels__name'
).values_list(
'perm_set__name',
'perm_levels__perm__name',
'perm_levels__name'
)
),
[
('custom', 'can_edit_stuff', 'all_stuff'),
('global', 'can_edit_stuff', 'all_stuff'),
('restricted', None, None)
]
)
|
|
import json
import os
import re
import uuid
from json import JSONEncoder
import parser
import yaml
DEFAULT_REGIONATOR_NAME = '{0} - Generated by Regionator'
ORIENTATION_TO_ID = {
'FACE_WEST': 0,
'FACE_NORTH': 1,
'FACE_EAST': 2,
'FACE_SOUTH': 3,
}
def _default(self, obj):
return getattr(obj.__class__, "to_json", _default.default)(obj)
_default.default = JSONEncoder().default
JSONEncoder.default = _default
MOD_INDEX = {}
with open('./mod_index.yml', 'r') as mod_index:
MOD_INDEX = yaml.load(mod_index)
MOD_DEFAULTS = {}
with open('./mod_defaults.yml', 'r') as mod_defaults:
MOD_DEFAULTS = yaml.load(mod_defaults)
MOD_RENAMES = {}
with open('./mod_renames.yml', 'r') as mod_renames:
MOD_RENAMES = yaml.load(mod_renames)
class Mod(object):
def __init__(self, region, identifier, params={}, additional_params={},
contained_mods=[], parent=None):
self.region = region
self.identifier = identifier
self.params = params
self.additional_params = additional_params
self.contained_mods = contained_mods
self.id = str(uuid.uuid4())[:4]
self.parent = parent
for contained_mod in self.contained_mods:
contained_mod.parent = self
def __repr__(self):
return '<Mod(identifier="{0}", params={1})>'.format(self.identifier, self.params)
@property
def neohabitat_mod(self):
mod_json = {
'type': self.neohabitat_name,
'x': int(self.params['x']),
'y': int(self.params['y']),
'orientation': int(self.params['or']),
}
if self.neohabitat_name in MOD_DEFAULTS:
mod_json.update(MOD_DEFAULTS[self.neohabitat_name])
if 'style' in self.params:
mod_json['style'] = int(self.params['style'])
if 'gr_state' in self.params:
mod_json['gr_state'] = int(self.params['gr_state'])
# Handles the conversion of numerical mod parameters to their respective Neohabitat
# Mod fields.
if self.additional_params and self.neohabitat_name in MOD_INDEX:
chomp_start_key = 8
translator = MOD_INDEX[self.neohabitat_name]
for index, mod_key in translator.items():
if str(index) != 'CHOMP':
if str(index) in self.additional_params:
mod_json[mod_key] = int(self.additional_params[str(index)])
chomp_start_key += 1
# If a CHOMP key has been specified, chomps all remaining additional params into
# the specified field.
if 'CHOMP' in translator:
chomp_key = translator['CHOMP']
mod_json[chomp_key] = self._chomped_params(chomp_start_key)
if self.neohabitat_name == 'Vendo_front':
display_item = 0
slot_bools = [False for _ in range(10)]
for contained_mod in self.contained_mods:
slot_bools[int(contained_mod.params['y'])] = True
for slot in range(len(slot_bools)):
if not slot_bools[slot]:
display_item = slot + 1
break
mod_json['display_item'] = display_item
elif self.neohabitat_name == 'Door':
mod_json['connection'] = self.find_connection_context('front')
elif self.neohabitat_name == 'Building':
mod_json['connection'] = self.find_connection_context('interior')
elif self.neohabitat_name in ('Teleport', 'Elevator'):
# Add in an empty address string, which will be filled in separately
mod_json['address'] = ''
return mod_json
@property
def neohabitat_name(self):
if self.identifier in MOD_RENAMES:
return MOD_RENAMES[self.identifier]
else:
return self.identifier.capitalize()
@property
def neohabitat_ref(self):
return 'item-{0}.{1}.{2}'.format(self.neohabitat_name, self.id,
self.region.name.replace('-', '.'))
def find_connection_context(self, suffix):
'''
Look for a region parameter that has the given suffix. This is used to
connect a Building or Door object to their destination.
'''
for location in self.region.params.values():
m = re.match(r'(\w+_{})'.format(suffix), location)
if m:
return 'context-{}'.format(m.group(1))
return ''
def _chomped_params(self, start_index=8):
params_with_numeric_keys = {
int(param[0]): param[1] for param in self.additional_params.items()
}
ascii_list = []
for param_key in sorted(params_with_numeric_keys.keys()):
if param_key >= start_index:
ascii_list.append(int(params_with_numeric_keys[param_key]))
return ascii_list
def to_json(self):
json_mod = {
'type': 'item',
'ref': self.neohabitat_ref,
'name': self.neohabitat_name,
'mods': [self.neohabitat_mod],
}
if self.parent is not None:
json_mod['in'] = self.parent.neohabitat_ref
else:
json_mod['in'] = self.region.neohabitat_context
return json_mod
class Region(object):
def __init__(self, name, params=None, mods=None, parse_results=None):
self.name = name
if params is None:
self.params = {}
else:
self.params = params
if mods is None:
self.mods = []
else:
self.mods = mods
if parse_results is not None:
# It's much easier to work with the pure Python representation of a
# pyparsing.ParseResults, hence this horrible hack.
exec('self.raw_results = ' + parse_results.__repr__())
self.results_dict = self.raw_results[1]
def __repr__(self):
return '<Region(name="{0}", params={1}, mods={2})>'.format(self.name, self.params,
self.mods)
@classmethod
def from_rdl_file(cls, rdl_file):
# For now, we'll assume a 1-to-1 mapping between the region file name and the name
# of the region
region_name = os.path.basename(rdl_file.split('.')[-2])
with open(rdl_file, 'r') as rdlfile:
rdlfile_text = rdlfile.read()
results = parser.region.parseString(rdlfile_text)
return cls.from_parse_results(region_name, results)
@classmethod
def from_parse_results(cls, name, parse_results):
region = cls(name=name, parse_results=parse_results)
region._parse_params_from_results()
region._parse_mods_from_results()
return region
@property
def neohabitat_context(self):
return 'context-{0}'.format(self.name)
def _parse_params_from_results(self):
self.params = self._parse_params(self.results_dict['region_params'][0][0])
def _parse_params(self, param_tokens):
params = {}
param_name = None
param_value = None
on_name = True
for token in param_tokens:
if token == '\n':
pass
elif ':' in token:
on_name = False
elif token == ';':
params[param_name] = param_value
on_name = True
elif on_name:
param_name = token
else:
param_value = token
return params
def _parse_mods_from_results(self):
mods = self.results_dict['mods']
for mod in mods[0][1]['mod']:
mod_dict = mod[1]
mod_identifier = mod_dict['mod_identifier'][0]
mod_params = {}
if 'mod_params' in mod_dict:
mod_params.update(self._parse_params(mod_dict['mod_params'][0][0]))
mod_params_additional = {}
if 'mod_params_additional' in mod_dict:
mod_params_additional.update(
self._parse_params(mod_dict['mod_params_additional'][0][0]))
# Handles the parsing of contained mods using the power of Hack Mode 7.
contained_mods = []
if 'inner_mod_1' in mod_dict:
for _ in range(len(mod_dict['inner_mod_1'])):
inner_mod_1 = mod_dict['inner_mod_1'].pop(0)
inner_mod_1_dict = inner_mod_1[1]
inner_mod_1_identifier = mod_dict['inner_mod_1_identifier'][len(contained_mods)]
inner_mod_1_params = {}
if 'inner_mod_1_params' in inner_mod_1_dict:
inner_mod_1_params.update(
self._parse_params(inner_mod_1_dict['inner_mod_1_params'][0][0]))
inner_mod_1_params_additional = {}
if 'inner_mod_1_params_additional' in inner_mod_1_dict:
inner_mod_1_params_additional.update(
self._parse_params(inner_mod_1_dict['inner_mod_1_params_additional'][0][0]))
inner_mod_1_contained_mods = []
if 'inner_mod_2' in mod_dict:
for _ in range(len(mod_dict['inner_mod_2'])):
inner_mod_2 = mod_dict['inner_mod_2'].pop(0)
inner_mod_2_dict = inner_mod_2[1]
inner_mod_2_identifier = mod_dict['inner_mod_2_identifier'][len(inner_mod_1_contained_mods)]
inner_mod_2_params = {}
if 'inner_mod_2_params' in inner_mod_2_dict:
inner_mod_2_params.update(
self._parse_params(inner_mod_2_dict['inner_mod_2_params'][0][0]))
inner_mod_2_params_additional = {}
if 'inner_mod_2_params_additional' in inner_mod_2_dict:
inner_mod_2_params_additional.update(
self._parse_params(inner_mod_2_dict['inner_mod_2_params_additional'][0][0]))
inner_mod_2_contained_mods = []
if 'inner_mod_3' in mod_dict:
for _ in range(len(mod_dict['inner_mod_3'])):
inner_mod_3 = mod_dict['inner_mod_3'].pop(0)
inner_mod_3_dict = inner_mod_3[1]
inner_mod_3_identifier = mod_dict['inner_mod_3_identifier'][len(inner_mod_2_contained_mods)]
inner_mod_3_params = {}
if 'inner_mod_3_params' in inner_mod_3_dict:
inner_mod_3_params.update(
self._parse_params(inner_mod_3_dict['inner_mod_3_params'][0][0]))
inner_mod_3_params_additional = {}
if 'inner_mod_3_params_additional' in inner_mod_3_dict:
inner_mod_3_params_additional.update(
self._parse_params(inner_mod_3_dict['inner_mod_3_params_additional'][0][0]))
inner_mod_2_contained_mods.append(
Mod(region=self, identifier=inner_mod_3_identifier,
params=inner_mod_3_params,
additional_params=inner_mod_3_params_additional))
inner_mod_1_contained_mods.append(
Mod(region=self, identifier=inner_mod_2_identifier,
params=inner_mod_2_params,
additional_params=inner_mod_2_params_additional,
contained_mods=inner_mod_2_contained_mods))
contained_mods.append(
Mod(region=self, identifier=inner_mod_1_identifier,
params=inner_mod_1_params,
additional_params=inner_mod_1_params_additional,
contained_mods=inner_mod_1_contained_mods))
self.mods.append(Mod(region=self, identifier=mod_identifier, params=mod_params,
additional_params=mod_params_additional, contained_mods=contained_mods))
def to_json(self):
region_mod = {
'town_dir': '',
'port_dir': '',
'type': 'Region',
'nitty_bits': 3,
'neighbors': ['', '', '', ''],
}
if 'north' in self.params:
region_mod['neighbors'][0] = 'context-{0}'.format(
self.params['north'].split('.')[0])
if 'east' in self.params:
region_mod['neighbors'][1] = 'context-{0}'.format(
self.params['east'].split('.')[0])
if 'south' in self.params:
region_mod['neighbors'][2] = 'context-{0}'.format(
self.params['south'].split('.')[0])
if 'west' in self.params:
region_mod['neighbors'][3] = 'context-{0}'.format(
self.params['west'].split('.')[0])
if 'region_orientation' in self.params and self.params['region_orientation'] in ORIENTATION_TO_ID:
region_mod['orientation'] = ORIENTATION_TO_ID[self.params['region_orientation']]
region_context = {
'type': 'context',
'ref': self.neohabitat_context,
'capacity': 64,
'name': DEFAULT_REGIONATOR_NAME.format(self.name),
'mods': [region_mod]
}
region_contents = [region_context]
# Performs a depth-first search through the containership tree of all mods.
def _dfs_mods(cur_mods):
for mod in cur_mods:
_dfs_mods(mod.contained_mods)
region_contents.append(mod)
_dfs_mods(self.mods)
return region_contents
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from django.utils.encoding import force_unicode
from django.utils.html import escape
from django.utils.translation import ugettext as _
from desktop.lib.django_util import JsonResponse, render
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.rest.http_client import RestException
from desktop.models import Document2, Document
from libsolr.api import SolrApi
from indexer.management.commands import indexer_setup
from search.api import _guess_gap, _zoom_range_facet, _new_range_facet
from search.conf import SOLR_URL, LATEST
from search.data_export import download as export_download
from search.decorators import allow_owner_only, allow_viewer_only
from search.management.commands import search_setup
from search.models import Collection2, augment_solr_response, augment_solr_exception, pairwise2
from search.search_controller import SearchController
LOG = logging.getLogger(__name__)
def index(request):
hue_collections = SearchController(request.user).get_search_collections()
collection_id = request.GET.get('collection')
if not hue_collections or not collection_id:
return admin_collections(request, True)
try:
collection_doc = Document2.objects.get(id=collection_id)
collection_doc.doc.get().can_read_or_exception(request.user)
collection = Collection2(request.user, document=collection_doc)
except Exception, e:
raise PopupException(e, title=_("Dashboard does not exist or you don't have the permission to access it."))
query = {'qs': [{'q': ''}], 'fqs': [], 'start': 0}
return render('search.mako', request, {
'collection': collection,
'query': query,
'initial': json.dumps({'collections': [], 'layout': [], 'is_latest': LATEST.get()}),
'is_owner': collection_doc.doc.get().can_write(request.user)
})
def new_search(request):
collections = SearchController(request.user).get_all_indexes()
if not collections:
return no_collections(request)
collection = Collection2(user=request.user, name=collections[0])
query = {'qs': [{'q': ''}], 'fqs': [], 'start': 0}
return render('search.mako', request, {
'collection': collection,
'query': query,
'initial': json.dumps({
'collections': collections,
'layout': [
{"size":2,"rows":[{"widgets":[]}],"drops":["temp"],"klass":"card card-home card-column span2"},
{"size":10,"rows":[{"widgets":[
{"size":12,"name":"Filter Bar","widgetType":"filter-widget", "id":"99923aef-b233-9420-96c6-15d48293532b",
"properties":{},"offset":0,"isLoading":True,"klass":"card card-widget span12"}]},
{"widgets":[
{"size":12,"name":"Grid Results","widgetType":"resultset-widget", "id":"14023aef-b233-9420-96c6-15d48293532b",
"properties":{},"offset":0,"isLoading":True,"klass":"card card-widget span12"}]}],
"drops":["temp"],"klass":"card card-home card-column span10"},
],
'is_latest': LATEST.get()
}),
'is_owner': True
})
def browse(request, name):
collections = SearchController(request.user).get_all_indexes()
if not collections:
return no_collections(request)
collection = Collection2(user=request.user, name=name)
query = {'qs': [{'q': ''}], 'fqs': [], 'start': 0}
return render('search.mako', request, {
'collection': collection,
'query': query,
'initial': json.dumps({
'autoLoad': True,
'collections': collections,
'layout': [
{"size":12,"rows":[{"widgets":[
{"size":12,"name":"Grid Results","id":"52f07188-f30f-1296-2450-f77e02e1a5c0","widgetType":"resultset-widget",
"properties":{},"offset":0,"isLoading":True,"klass":"card card-widget span12"}]}],
"drops":["temp"],"klass":"card card-home card-column span10"}
],
'is_latest': LATEST.get()
}),
'is_owner': True
})
@allow_viewer_only
def search(request):
response = {}
collection = json.loads(request.POST.get('collection', '{}'))
query = json.loads(request.POST.get('query', '{}'))
query['download'] = 'download' in request.POST
if collection:
try:
response = SolrApi(SOLR_URL.get(), request.user).query(collection, query)
response = augment_solr_response(response, collection, query)
except RestException, e:
try:
response['error'] = json.loads(e.message)['error']['msg']
except:
response['error'] = force_unicode(e)
except Exception, e:
raise PopupException(e, title=_('Error while accessing Solr'))
response['error'] = force_unicode(e)
else:
response['error'] = _('There is no collection to search.')
if 'error' in response:
augment_solr_exception(response, collection)
return JsonResponse(response)
@allow_owner_only
def save(request):
response = {'status': -1}
collection = json.loads(request.POST.get('collection', '{}'))
layout = json.loads(request.POST.get('layout', '{}'))
collection['template']['extracode'] = escape(collection['template']['extracode'])
if collection:
if collection['id']:
dashboard_doc = Document2.objects.get(id=collection['id'])
else:
dashboard_doc = Document2.objects.create(name=collection['name'], uuid=collection['uuid'], type='search-dashboard', owner=request.user, description=collection['label'])
Document.objects.link(dashboard_doc, owner=request.user, name=collection['name'], description=collection['label'], extra='search-dashboard')
dashboard_doc.update_data({
'collection': collection,
'layout': layout
})
dashboard_doc.name = collection['label']
dashboard_doc.description = collection['description']
dashboard_doc.save()
response['status'] = 0
response['id'] = dashboard_doc.id
response['message'] = _('Page saved !')
else:
response['message'] = _('There is no collection to search.')
return JsonResponse(response)
@allow_owner_only
def save_definition(request):
response = {'status': -1}
collection = json.loads(request.POST.get('collection', '{}')) # id
query = json.loads(request.POST.get('query', '{}'))
query['name'] = 'My def'
query['uuid'] = 'uuid'
query['name'] = 'My def'
if collection and query:
collection = Collection.objects.get(id=collection['id'])
if query['id']:
definition_doc = Document2.objects.get(id=collection['id'])
else:
definition_doc = Document2.objects.create(name=query['name'], uuid=query['uuid'], type='search-definition', owner=request.user, dependencies=[collection])
#Document.objects.link(coordinator_doc, owner=coordinator_doc.owner, name=coordinator_doc.name, description=coordinator_doc.description, extra='coordinator2')
definition_doc.update_data(query)
definition_doc.save()
response['status'] = 0
response['id'] = definition_doc.id
response['message'] = _('Definition saved !')
else:
response['message'] = _('There is no collection to search.')
return JsonResponse(response)
@allow_viewer_only
def download(request):
try:
file_format = 'csv' if 'csv' in request.POST else 'xls' if 'xls' in request.POST else 'json'
response = search(request)
if file_format == 'json':
docs = json.loads(response.content)['response']['docs']
resp = JsonResponse(docs, safe=False)
resp['Content-Disposition'] = 'attachment; filename=%s.%s' % ('query_result', file_format)
return resp
else:
collection = json.loads(request.POST.get('collection', '{}'))
return export_download(json.loads(response.content), file_format, collection)
except Exception, e:
raise PopupException(_("Could not download search results: %s") % e)
def no_collections(request):
return render('no_collections.mako', request, {})
def admin_collections(request, is_redirect=False):
existing_hue_collections = SearchController(request.user).get_search_collections()
if request.GET.get('format') == 'json':
collections = []
for collection in existing_hue_collections:
massaged_collection = collection.to_dict()
massaged_collection['isOwner'] = collection.doc.get().can_write(request.user)
collections.append(massaged_collection)
return JsonResponse(collections, safe=False)
return render('admin_collections.mako', request, {
'existing_hue_collections': existing_hue_collections,
'is_redirect': is_redirect
})
def admin_collection_delete(request):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
collections = json.loads(request.POST.get('collections'))
searcher = SearchController(request.user)
response = {
'result': searcher.delete_collections([collection['id'] for collection in collections])
}
return JsonResponse(response)
def admin_collection_copy(request):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
collections = json.loads(request.POST.get('collections'))
searcher = SearchController(request.user)
response = {
'result': searcher.copy_collections([collection['id'] for collection in collections])
}
return JsonResponse(response)
def query_suggest(request, collection_id, query=""):
hue_collection = Collection.objects.get(id=collection_id)
result = {'status': -1, 'message': 'Error'}
solr_query = {}
solr_query['collection'] = hue_collection.name
solr_query['q'] = query
try:
response = SolrApi(SOLR_URL.get(), request.user).suggest(solr_query, hue_collection)
result['message'] = response
result['status'] = 0
except Exception, e:
result['message'] = force_unicode(e)
return JsonResponse(result)
def index_fields_dynamic(request):
result = {'status': -1, 'message': 'Error'}
try:
name = request.POST['name']
dynamic_fields = SolrApi(SOLR_URL.get(), request.user).luke(name)
result['message'] = ''
result['fields'] = [Collection2._make_field(name, properties)
for name, properties in dynamic_fields['fields'].iteritems() if 'dynamicBase' in properties]
result['gridlayout_header_fields'] = [Collection2._make_gridlayout_header_field({'name': name}, True)
for name, properties in dynamic_fields['fields'].iteritems() if 'dynamicBase' in properties]
result['status'] = 0
except Exception, e:
result['message'] = force_unicode(e)
return JsonResponse(result)
@allow_viewer_only
def get_document(request):
result = {'status': -1, 'message': 'Error'}
try:
collection = json.loads(request.POST.get('collection', '{}'))
doc_id = request.POST.get('id')
if doc_id:
result['doc'] = SolrApi(SOLR_URL.get(), request.user).get(collection['name'], doc_id)
if result['doc']['doc']:
result['status'] = 0
result['message'] = ''
else:
result['status'] = 1
result['message'] = _('No document was returned by Solr.')
else:
result['message'] = _('This document does not have any index id.')
result['status'] = 1
except Exception, e:
result['message'] = force_unicode(e)
return JsonResponse(result)
@allow_viewer_only
def get_stats(request):
result = {'status': -1, 'message': 'Error'}
try:
collection = json.loads(request.POST.get('collection', '{}'))
query = json.loads(request.POST.get('query', '{}'))
analysis = json.loads(request.POST.get('analysis', '{}'))
field = analysis['name']
facet = analysis['stats']['facet']
result['stats'] = SolrApi(SOLR_URL.get(), request.user).stats(collection['name'], [field], query, facet)
result['status'] = 0
result['message'] = ''
except Exception, e:
result['message'] = force_unicode(e)
if 'not currently supported' in result['message']:
result['status'] = 1
result['message'] = _('This field does not support stats')
return JsonResponse(result)
@allow_viewer_only
def get_terms(request):
result = {'status': -1, 'message': 'Error'}
try:
collection = json.loads(request.POST.get('collection', '{}'))
analysis = json.loads(request.POST.get('analysis', '{}'))
field = analysis['name']
properties = {
'terms.limit': 25,
'terms.prefix': analysis['terms']['prefix']
# lower
# limit
# mincount
# maxcount
}
result['terms'] = SolrApi(SOLR_URL.get(), request.user).terms(collection['name'], field, properties)
result['terms'] = pairwise2(field, [], result['terms']['terms'][field])
result['status'] = 0
result['message'] = ''
except Exception, e:
result['message'] = force_unicode(e)
if 'not currently supported' in result['message']:
result['status'] = 1
result['message'] = _('This field does not support stats')
return JsonResponse(result)
@allow_viewer_only
def get_timeline(request):
result = {'status': -1, 'message': 'Error'}
try:
collection = json.loads(request.POST.get('collection', '{}'))
query = json.loads(request.POST.get('query', '{}'))
facet = json.loads(request.POST.get('facet', '{}'))
qdata = json.loads(request.POST.get('qdata', '{}'))
multiQ = request.POST.get('multiQ', 'query')
if multiQ == 'query':
label = qdata['q']
query['qs'] = [qdata]
elif facet['type'] == 'range':
_prop = filter(lambda prop: prop['from'] == qdata, facet['properties'])[0]
label = '%(from)s - %(to)s ' % _prop
facet_id = facet['id']
# Only care about our current field:value filter
for fq in query['fqs']:
if fq['id'] == facet_id:
fq['properties'] = [_prop]
else:
label = qdata
facet_id = facet['id']
# Only care about our current field:value filter
for fq in query['fqs']:
if fq['id'] == facet_id:
fq['filter'] = [{'value': qdata, 'exclude': False}]
# Remove other facets from collection for speed
collection['facets'] = filter(lambda f: f['widgetType'] == 'histogram-widget', collection['facets'])
response = SolrApi(SOLR_URL.get(), request.user).query(collection, query)
response = augment_solr_response(response, collection, query)
label += ' (%s) ' % response['response']['numFound']
result['series'] = {'label': label, 'counts': response['normalized_facets'][0]['counts']}
result['status'] = 0
result['message'] = ''
except Exception, e:
result['message'] = force_unicode(e)
return JsonResponse(result)
@allow_viewer_only
def new_facet(request):
result = {'status': -1, 'message': 'Error'}
try:
collection = json.loads(request.POST.get('collection', '{}'))
facet_id = request.POST['id']
facet_label = request.POST['label']
facet_field = request.POST['field']
widget_type = request.POST['widget_type']
result['message'] = ''
result['facet'] = _create_facet(collection, request.user, facet_id, facet_label, facet_field, widget_type)
result['status'] = 0
except Exception, e:
result['message'] = force_unicode(e)
return JsonResponse(result)
def _create_facet(collection, user, facet_id, facet_label, facet_field, widget_type):
properties = {
'sort': 'desc',
'canRange': False,
'stacked': False,
'limit': 10,
'mincount': 0,
'isDate': False,
'aggregate': 'unique'
}
if widget_type in ('tree-widget', 'heatmap-widget', 'map-widget'):
facet_type = 'pivot'
elif widget_type == 'hit-widget':
facet_type = 'function'
else:
solr_api = SolrApi(SOLR_URL.get(), user)
range_properties = _new_range_facet(solr_api, collection, facet_field, widget_type)
if range_properties:
facet_type = 'range'
properties.update(range_properties)
properties['initial_gap'] = properties['gap']
properties['initial_start'] = properties['start']
properties['initial_end'] = properties['end']
else:
facet_type = 'field'
if widget_type == 'bucket-widget':
facet_type = 'nested'
properties['facets_form'] = {'field': '', 'mincount': 1, 'limit': 10, 'aggregate': 'count'}
properties['facets'] = []
properties['scope'] = 'stack'
properties['timelineChartType'] = 'bar'
if widget_type in ('tree-widget', 'heatmap-widget', 'map-widget'):
properties['mincount'] = 1
properties['facets'] = []
properties['stacked'] = True
properties['facets_form'] = {'field': '', 'mincount': 1, 'limit': 5}
if widget_type == 'map-widget':
properties['scope'] = 'world'
properties['limit'] = 100
else:
properties['scope'] = 'stack' if widget_type == 'heatmap-widget' else 'tree'
return {
'id': facet_id,
'label': facet_label,
'field': facet_field,
'type': facet_type,
'widgetType': widget_type,
'properties': properties
}
@allow_viewer_only
def get_range_facet(request):
result = {'status': -1, 'message': ''}
try:
collection = json.loads(request.POST.get('collection', '{}'))
facet = json.loads(request.POST.get('facet', '{}'))
action = request.POST.get('action', 'select')
solr_api = SolrApi(SOLR_URL.get(), request.user)
if action == 'select':
properties = _guess_gap(solr_api, collection, facet, facet['properties']['start'], facet['properties']['end'])
else:
properties = _zoom_range_facet(solr_api, collection, facet) # Zoom out
result['properties'] = properties
result['status'] = 0
except Exception, e:
result['message'] = force_unicode(e)
return JsonResponse(result)
def get_collection(request):
result = {'status': -1, 'message': ''}
try:
name = request.POST['name']
collection = Collection2(request.user, name=name)
collection_json = collection.get_json(request.user)
result['collection'] = json.loads(collection_json)
result['status'] = 0
except Exception, e:
result['message'] = force_unicode(e)
return JsonResponse(result)
def get_collections(request):
result = {'status': -1, 'message': ''}
try:
show_all = json.loads(request.POST.get('show_all'))
result['collection'] = SearchController(request.user).get_all_indexes(show_all=show_all)
result['status'] = 0
except Exception, e:
if 'does not have privileges' in str(e):
result['status'] = 0
result['collection'] = [json.loads(request.POST.get('collection'))['name']]
else:
result['message'] = force_unicode(e)
return JsonResponse(result)
def install_examples(request):
result = {'status': -1, 'message': ''}
if not request.user.is_superuser:
return PopupException(_("You must be a superuser."))
if request.method != 'POST':
result['message'] = _('A POST request is required.')
else:
try:
search_setup.Command().handle_noargs()
indexer_setup.Command().handle_noargs()
result['status'] = 0
except Exception, e:
LOG.exception(e)
result['message'] = str(e)
return JsonResponse(result)
|
|
import re
import socket
import base64
import hashlib
import os
import sys
try:
# Python 3
from http.client import HTTPS_PORT
from urllib.request import parse_keqv_list, parse_http_list
except (ImportError):
# Python 2
from httplib import HTTPS_PORT
from urllib2 import parse_keqv_list, parse_http_list
from ..console_write import console_write
from .debuggable_https_response import DebuggableHTTPSResponse
from .debuggable_http_connection import DebuggableHTTPConnection
from .invalid_certificate_exception import InvalidCertificateException
# The following code is wrapped in a try because the Linux versions of Sublime
# Text do not include the ssl module due to the fact that different distros
# have different versions
try:
import ssl
class ValidatingHTTPSConnection(DebuggableHTTPConnection):
"""
A custom HTTPConnection class that validates SSL certificates, and
allows proxy authentication for HTTPS connections.
"""
default_port = HTTPS_PORT
response_class = DebuggableHTTPSResponse
_debug_protocol = 'HTTPS'
def __init__(self, host, port=None, key_file=None, cert_file=None,
ca_certs=None, **kwargs):
passed_args = {}
if 'timeout' in kwargs:
passed_args['timeout'] = kwargs['timeout']
if 'debug' in kwargs:
passed_args['debug'] = kwargs['debug']
DebuggableHTTPConnection.__init__(self, host, port, **passed_args)
self.passwd = kwargs.get('passwd')
self.key_file = key_file
self.cert_file = cert_file
self.ca_certs = ca_certs
if 'user_agent' in kwargs:
self.user_agent = kwargs['user_agent']
if self.ca_certs:
self.cert_reqs = ssl.CERT_REQUIRED
else:
self.cert_reqs = ssl.CERT_NONE
def get_valid_hosts_for_cert(self, cert):
"""
Returns a list of valid hostnames for an SSL certificate
:param cert: A dict from SSLSocket.getpeercert()
:return: An array of hostnames
"""
if 'subjectAltName' in cert:
return [x[1] for x in cert['subjectAltName']
if x[0].lower() == 'dns']
else:
return [x[0][1] for x in cert['subject']
if x[0][0].lower() == 'commonname']
def validate_cert_host(self, cert, hostname):
"""
Checks if the cert is valid for the hostname
:param cert: A dict from SSLSocket.getpeercert()
:param hostname: A string hostname to check
:return: A boolean if the cert is valid for the hostname
"""
hosts = self.get_valid_hosts_for_cert(cert)
for host in hosts:
host_re = host.replace('.', '\.').replace('*', '[^.]*')
if re.search('^%s$' % (host_re,), hostname, re.I):
return True
return False
def _tunnel(self):
"""
This custom _tunnel method allows us to read and print the debug
log for the whole response before throwing an error, and adds
support for proxy authentication
"""
self._proxy_host = self.host
self._proxy_port = self.port
self._set_hostport(self._tunnel_host, self._tunnel_port)
self._tunnel_headers['Host'] = u"%s:%s" % (self.host, self.port)
self._tunnel_headers['User-Agent'] = self.user_agent
self._tunnel_headers['Proxy-Connection'] = 'Keep-Alive'
request = "CONNECT %s:%d HTTP/1.1\r\n" % (self.host, self.port)
for header, value in self._tunnel_headers.items():
request += "%s: %s\r\n" % (header, value)
request += "\r\n"
if sys.version_info >= (3,):
request = bytes(request, 'iso-8859-1')
self.send(request)
response = self.response_class(self.sock, method=self._method)
(version, code, message) = response._read_status()
status_line = u"%s %s %s" % (version, code, message.rstrip())
headers = [status_line]
if self.debuglevel in [-1, 5]:
console_write(u'Urllib %s Debug Read' % self._debug_protocol, True)
console_write(u" %s" % status_line)
content_length = 0
close_connection = False
while True:
line = response.fp.readline()
if sys.version_info >= (3,):
line = str(line, encoding='iso-8859-1')
if line == '\r\n':
break
headers.append(line.rstrip())
parts = line.rstrip().split(': ', 1)
name = parts[0].lower()
value = parts[1].lower().strip()
if name == 'content-length':
content_length = int(value)
if name in ['connection', 'proxy-connection'] and value == 'close':
close_connection = True
if self.debuglevel in [-1, 5]:
console_write(u" %s" % line.rstrip())
# Handle proxy auth for SSL connections since regular urllib punts on this
if code == 407 and self.passwd and 'Proxy-Authorization' not in self._tunnel_headers:
if content_length:
response._safe_read(content_length)
supported_auth_methods = {}
for line in headers:
parts = line.split(': ', 1)
if parts[0].lower() != 'proxy-authenticate':
continue
details = parts[1].split(' ', 1)
supported_auth_methods[details[0].lower()] = details[1] if len(details) > 1 else ''
username, password = self.passwd.find_user_password(None, "%s:%s" % (
self._proxy_host, self._proxy_port))
if 'digest' in supported_auth_methods:
response_value = self.build_digest_response(
supported_auth_methods['digest'], username, password)
if response_value:
self._tunnel_headers['Proxy-Authorization'] = u"Digest %s" % response_value
elif 'basic' in supported_auth_methods:
response_value = u"%s:%s" % (username, password)
response_value = base64.b64encode(response_value).strip()
self._tunnel_headers['Proxy-Authorization'] = u"Basic %s" % response_value
if 'Proxy-Authorization' in self._tunnel_headers:
self.host = self._proxy_host
self.port = self._proxy_port
# If the proxy wanted the connection closed, we need to make a new connection
if close_connection:
self.sock.close()
self.sock = socket.create_connection((self.host, self.port), self.timeout)
return self._tunnel()
if code != 200:
self.close()
raise socket.error("Tunnel connection failed: %d %s" % (code,
message.strip()))
def build_digest_response(self, fields, username, password):
"""
Takes a Proxy-Authenticate: Digest header and creates a response
header
:param fields:
The string portion of the Proxy-Authenticate header after
"Digest "
:param username:
The username to use for the response
:param password:
The password to use for the response
:return:
None if invalid Proxy-Authenticate header, otherwise the
string of fields for the Proxy-Authorization: Digest header
"""
fields = parse_keqv_list(parse_http_list(fields))
realm = fields.get('realm')
nonce = fields.get('nonce')
qop = fields.get('qop')
algorithm = fields.get('algorithm')
if algorithm:
algorithm = algorithm.lower()
opaque = fields.get('opaque')
if algorithm in ['md5', None]:
def md5hash(string):
return hashlib.md5(string).hexdigest()
hash = md5hash
elif algorithm == 'sha':
def sha1hash(string):
return hashlib.sha1(string).hexdigest()
hash = sha1hash
else:
return None
host_port = u"%s:%s" % (self.host, self.port)
a1 = "%s:%s:%s" % (username, realm, password)
a2 = "CONNECT:%s" % host_port
ha1 = hash(a1)
ha2 = hash(a2)
if qop == None:
response = hash(u"%s:%s:%s" % (ha1, nonce, ha2))
elif qop == 'auth':
nc = '00000001'
cnonce = hash(os.urandom(8))[:8]
response = hash(u"%s:%s:%s:%s:%s:%s" % (ha1, nonce, nc, cnonce, qop, ha2))
else:
return None
response_fields = {
'username': username,
'realm': realm,
'nonce': nonce,
'response': response,
'uri': host_port
}
if algorithm:
response_fields['algorithm'] = algorithm
if qop == 'auth':
response_fields['nc'] = nc
response_fields['cnonce'] = cnonce
response_fields['qop'] = qop
if opaque:
response_fields['opaque'] = opaque
return ', '.join([u"%s=\"%s\"" % (field, response_fields[field]) for field in response_fields])
def connect(self):
"""
Adds debugging and SSL certification validation
"""
if self.debuglevel == -1:
console_write(u"Urllib HTTPS Debug General", True)
console_write(u" Connecting to %s on port %s" % (self.host, self.port))
self.sock = socket.create_connection((self.host, self.port), self.timeout)
if self._tunnel_host:
self._tunnel()
if self.debuglevel == -1:
console_write(u"Urllib HTTPS Debug General", True)
console_write(u" Connecting to %s on port %s" % (self.host, self.port))
console_write(u" CA certs file at %s" % (self.ca_certs.decode(sys.getfilesystemencoding())))
self.sock = ssl.wrap_socket(self.sock, keyfile=self.key_file,
certfile=self.cert_file, cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs)
if self.debuglevel == -1:
console_write(u" Successfully upgraded connection to %s:%s with SSL" % (
self.host, self.port))
# This debugs and validates the SSL certificate
if self.cert_reqs & ssl.CERT_REQUIRED:
cert = self.sock.getpeercert()
if self.debuglevel == -1:
subjectMap = {
'organizationName': 'O',
'commonName': 'CN',
'organizationalUnitName': 'OU',
'countryName': 'C',
'serialNumber': 'serialNumber',
'commonName': 'CN',
'localityName': 'L',
'stateOrProvinceName': 'S'
}
subject_list = list(cert['subject'])
subject_list.reverse()
subject_parts = []
for pair in subject_list:
if pair[0][0] in subjectMap:
field_name = subjectMap[pair[0][0]]
else:
field_name = pair[0][0]
subject_parts.append(field_name + '=' + pair[0][1])
console_write(u" Server SSL certificate:")
console_write(u" subject: " + ','.join(subject_parts))
if 'subjectAltName' in cert:
console_write(u" common name: " + cert['subjectAltName'][0][1])
if 'notAfter' in cert:
console_write(u" expire date: " + cert['notAfter'])
hostname = self.host.split(':', 0)[0]
if not self.validate_cert_host(cert, hostname):
if self.debuglevel == -1:
console_write(u" Certificate INVALID")
raise InvalidCertificateException(hostname, cert,
'hostname mismatch')
if self.debuglevel == -1:
console_write(u" Certificate validated for %s" % hostname)
except (ImportError):
pass
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Project.status'
db.add_column('storybase_user_project', 'status',
self.gf('django.db.models.fields.CharField')(default=u'draft', max_length=10),
keep_default=False)
# Adding field 'Project.published'
db.add_column('storybase_user_project', 'published',
self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True),
keep_default=False)
# Adding field 'Organization.status'
db.add_column('storybase_user_organization', 'status',
self.gf('django.db.models.fields.CharField')(default=u'draft', max_length=10),
keep_default=False)
# Adding field 'Organization.published'
db.add_column('storybase_user_organization', 'published',
self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True),
keep_default=False)
# Adding field 'Organization.contact_info'
db.add_column('storybase_user_organization', 'contact_info',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Project.status'
db.delete_column('storybase_user_project', 'status')
# Deleting field 'Project.published'
db.delete_column('storybase_user_project', 'published')
# Deleting field 'Organization.status'
db.delete_column('storybase_user_organization', 'status')
# Deleting field 'Organization.published'
db.delete_column('storybase_user_organization', 'published')
# Deleting field 'Organization.contact_info'
db.delete_column('storybase_user_organization', 'contact_info')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'storybase_asset.asset': {
'Meta': {'object_name': 'Asset'},
'asset_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'asset_id': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'attribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datasets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'assets'", 'blank': 'True', 'to': "orm['storybase_asset.DataSet']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'assets'", 'null': 'True', 'to': "orm['auth.User']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'section_specific': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'storybase_asset.dataset': {
'Meta': {'object_name': 'DataSet'},
'attribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dataset_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'dataset_id': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'links_to_file': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'datasets'", 'null': 'True', 'to': "orm['auth.User']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'})
},
'storybase_geo.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['storybase_geo.GeoLevel']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'storybase_geo.location': {
'Meta': {'object_name': 'Location'},
'address': ('storybase.fields.ShortTextField', [], {'blank': 'True'}),
'address2': ('storybase.fields.ShortTextField', [], {'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'lng': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'location_id': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'name': ('storybase.fields.ShortTextField', [], {'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locations'", 'null': 'True', 'to': "orm['auth.User']"}),
'point': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'raw': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'storybase_geo.place': {
'Meta': {'object_name': 'Place'},
'boundary': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'null': 'True', 'blank': 'True'}),
'children': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['storybase_geo.Place']", 'null': 'True', 'through': "orm['storybase_geo.PlaceRelation']", 'blank': 'True'}),
'geolevel': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'places'", 'null': 'True', 'to': "orm['storybase_geo.GeoLevel']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('storybase.fields.ShortTextField', [], {}),
'place_id': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_geo.placerelation': {
'Meta': {'unique_together': "(('parent', 'child'),)", 'object_name': 'PlaceRelation'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'place_parent'", 'to': "orm['storybase_geo.Place']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'place_child'", 'to': "orm['storybase_geo.Place']"})
},
'storybase_story.story': {
'Meta': {'object_name': 'Story'},
'allow_connected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_asset.Asset']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'stories'", 'null': 'True', 'to': "orm['auth.User']"}),
'byline': ('django.db.models.fields.TextField', [], {}),
'contact_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datasets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_asset.DataSet']"}),
'featured_assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'featured_in_stories'", 'blank': 'True', 'to': "orm['storybase_asset.Asset']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_template': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'locations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_geo.Location']"}),
'on_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_user.Organization']"}),
'places': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_geo.Place']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_user.Project']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'related_stories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'related_to'", 'blank': 'True', 'through': "orm['storybase_story.StoryRelation']", 'to': "orm['storybase_story.Story']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'}),
'story_id': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'structure_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'template_story': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'template_for'", 'null': 'True', 'to': "orm['storybase_story.Story']"}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_taxonomy.Category']"})
},
'storybase_story.storyrelation': {
'Meta': {'object_name': 'StoryRelation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'relation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'relation_type': ('django.db.models.fields.CharField', [], {'default': "'connected'", 'max_length': '25'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'target'", 'to': "orm['storybase_story.Story']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'source'", 'to': "orm['storybase_story.Story']"})
},
'storybase_taxonomy.category': {
'Meta': {'object_name': 'Category'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['storybase_taxonomy.Category']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'storybase_taxonomy.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'tag_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_taxonomy.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'storybase_taxonomy_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['storybase_taxonomy.Tag']"})
},
'storybase_user.organization': {
'Meta': {'object_name': 'Organization'},
'contact_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'curated_stories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'curated_in_organizations'", 'blank': 'True', 'through': "orm['storybase_user.OrganizationStory']", 'to': "orm['storybase_story.Story']"}),
'featured_assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'featured_in_organizations'", 'blank': 'True', 'to': "orm['storybase_asset.Asset']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'organizations'", 'blank': 'True', 'through': "orm['storybase_user.OrganizationMembership']", 'to': "orm['auth.User']"}),
'on_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'organization_id': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'storybase_user.organizationmembership': {
'Meta': {'object_name': 'OrganizationMembership'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member_type': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '140'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Organization']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'storybase_user.organizationstory': {
'Meta': {'object_name': 'OrganizationStory'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Organization']"}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Story']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storybase_user.organizationtranslation': {
'Meta': {'unique_together': "(('organization', 'language'),)", 'object_name': 'OrganizationTranslation'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '15'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('storybase.fields.ShortTextField', [], {}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Organization']"}),
'translation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_user.project': {
'Meta': {'object_name': 'Project'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'curated_stories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'curated_in_projects'", 'blank': 'True', 'through': "orm['storybase_user.ProjectStory']", 'to': "orm['storybase_story.Story']"}),
'featured_assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'featured_in_projects'", 'blank': 'True', 'to': "orm['storybase_asset.Asset']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'projects'", 'blank': 'True', 'through': "orm['storybase_user.ProjectMembership']", 'to': "orm['auth.User']"}),
'on_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'projects'", 'blank': 'True', 'to': "orm['storybase_user.Organization']"}),
'project_id': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'storybase_user.projectmembership': {
'Meta': {'object_name': 'ProjectMembership'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member_type': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '140'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'storybase_user.projectstory': {
'Meta': {'object_name': 'ProjectStory'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Project']"}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Story']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storybase_user.projecttranslation': {
'Meta': {'unique_together': "(('project', 'language'),)", 'object_name': 'ProjectTranslation'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '15'}),
'name': ('storybase.fields.ShortTextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Project']"}),
'translation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_user.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notify_admin': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'notify_digest': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'notify_story_comment': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'notify_story_featured': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'notify_updates': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'profile_id': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['storybase_user']
|
|
import itertools
from eth_utils import (
encode_hex,
to_tuple,
is_list_like,
coerce_return_to_text,
event_abi_to_log_topic,
)
from eth_abi import (
decode_abi,
decode_single,
encode_single,
)
from eth_abi.abi import (
process_type,
)
from .abi import (
exclude_indexed_event_inputs,
get_abi_input_names,
get_indexed_event_inputs,
map_abi_data,
normalize_event_input_types,
)
from web3.utils.normalizers import (
BASE_RETURN_NORMALIZERS,
)
@coerce_return_to_text
def construct_event_topic_set(event_abi, arguments=None):
if arguments is None:
arguments = {}
if isinstance(arguments, (list, tuple)):
if len(arguments) != len(event_abi['inputs']):
raise ValueError(
"When passing an argument list, the number of arguments must "
"match the event constructor."
)
arguments = {
arg['name']: [arg_value]
for arg, arg_value
in zip(event_abi['inputs'], arguments)
}
normalized_args = {
key: value if is_list_like(value) else [value]
for key, value in arguments.items()
}
event_topic = encode_hex(event_abi_to_log_topic(event_abi))
indexed_args = get_indexed_event_inputs(event_abi)
zipped_abi_and_args = [
(arg, normalized_args.get(arg['name'], [None]))
for arg in indexed_args
]
encoded_args = [
[
None if option is None else encode_hex(encode_single(arg['type'], option))
for option in arg_options]
for arg, arg_options in zipped_abi_and_args
]
topics = [
[event_topic] + list(permutation)
if any(value is not None for value in permutation)
else [event_topic]
for permutation in itertools.product(*encoded_args)
]
return topics
@coerce_return_to_text
def construct_event_data_set(event_abi, arguments=None):
if arguments is None:
arguments = {}
if isinstance(arguments, (list, tuple)):
if len(arguments) != len(event_abi['inputs']):
raise ValueError(
"When passing an argument list, the number of arguments must "
"match the event constructor."
)
arguments = {
arg['name']: [arg_value]
for arg, arg_value
in zip(event_abi['inputs'], arguments)
}
normalized_args = {
key: value if is_list_like(value) else [value]
for key, value in arguments.items()
}
indexed_args = exclude_indexed_event_inputs(event_abi)
zipped_abi_and_args = [
(arg, normalized_args.get(arg['name'], [None]))
for arg in indexed_args
]
encoded_args = [
[
None if option is None else encode_hex(encode_single(arg['type'], option))
for option in arg_options]
for arg, arg_options in zipped_abi_and_args
]
topics = [
list(permutation)
if any(value is not None for value in permutation)
else []
for permutation in itertools.product(*encoded_args)
]
return topics
def is_dynamic_sized_type(_type):
base_type, type_size, arrlist = process_type(_type)
if arrlist:
return True
elif base_type == 'string':
return True
elif base_type == 'bytes' and type_size == '':
return True
return False
@to_tuple
def get_event_abi_types_for_decoding(event_inputs):
"""
Event logs use the `sha3(value)` for indexed inputs of type `bytes` or
`string`. Because of this we need to modify the types so that we can
decode the log entries using the correct types.
"""
for input_abi in event_inputs:
if input_abi['indexed'] and is_dynamic_sized_type(input_abi['type']):
yield 'bytes32'
else:
yield input_abi['type']
def get_event_data(event_abi, log_entry):
"""
Given an event ABI and a log entry for that event, return the decoded
"""
if event_abi['anonymous']:
log_topics = log_entry['topics']
else:
log_topics = log_entry['topics'][1:]
log_topics_abi = get_indexed_event_inputs(event_abi)
log_topic_normalized_inputs = normalize_event_input_types(log_topics_abi)
log_topic_types = get_event_abi_types_for_decoding(log_topic_normalized_inputs)
log_topic_names = get_abi_input_names({'inputs': log_topics_abi})
if len(log_topics) != len(log_topic_types):
raise ValueError("Expected {0} log topics. Got {1}".format(
len(log_topic_types),
len(log_topics),
))
log_data = log_entry['data']
log_data_abi = exclude_indexed_event_inputs(event_abi)
log_data_normalized_inputs = normalize_event_input_types(log_data_abi)
log_data_types = get_event_abi_types_for_decoding(log_data_normalized_inputs)
log_data_names = get_abi_input_names({'inputs': log_data_abi})
# sanity check that there are not name intersections between the topic
# names and the data argument names.
duplicate_names = set(log_topic_names).intersection(log_data_names)
if duplicate_names:
raise ValueError(
"Invalid Event ABI: The following argument names are duplicated "
"between event inputs: '{0}'".format(', '.join(duplicate_names))
)
decoded_log_data = decode_abi(log_data_types, log_data)
normalized_log_data = map_abi_data(
BASE_RETURN_NORMALIZERS,
log_data_types,
decoded_log_data
)
decoded_topic_data = [
decode_single(topic_type, topic_data)
for topic_type, topic_data
in zip(log_topic_types, log_topics)
]
normalized_topic_data = map_abi_data(
BASE_RETURN_NORMALIZERS,
log_topic_types,
decoded_topic_data
)
event_args = dict(itertools.chain(
zip(log_topic_names, normalized_topic_data),
zip(log_data_names, normalized_log_data),
))
event_data = {
'args': event_args,
'event': event_abi['name'],
'logIndex': log_entry['logIndex'],
'transactionIndex': log_entry['transactionIndex'],
'transactionHash': log_entry['transactionHash'],
'address': log_entry['address'],
'blockHash': log_entry['blockHash'],
'blockNumber': log_entry['blockNumber'],
}
return event_data
|
|
__all__ = ['fixed_quad','quadrature','romberg','trapz','simps','romb',
'cumtrapz','newton_cotes','composite']
from scipy.special.orthogonal import p_roots
from scipy.special import gammaln
from numpy import sum, ones, add, diff, isinf, isscalar, \
asarray, real, trapz, arange, empty
import numpy as np
def fixed_quad(func,a,b,args=(),n=5):
"""Compute a definite integral using fixed-order Gaussian quadrature.
Description:
Integrate func from a to b using Gaussian quadrature of order n.
Inputs:
func -- a Python function or method to integrate
(must accept vector inputs)
a -- lower limit of integration
b -- upper limit of integration
args -- extra arguments to pass to function.
n -- order of quadrature integration.
Outputs: (val, None)
val -- Gaussian quadrature approximation to the integral.
See also:
quad - adaptive quadrature using QUADPACK
dblquad, tplquad - double and triple integrals
romberg - adaptive Romberg quadrature
quadrature - adaptive Gaussian quadrature
romb, simps, trapz - integrators for sampled data
cumtrapz - cumulative integration for sampled data
ode, odeint - ODE integrators
"""
[x,w] = p_roots(n)
x = real(x)
ainf, binf = map(isinf,(a,b))
if ainf or binf:
raise ValueError, "Gaussian quadrature is only available for " \
"finite limits."
y = (b-a)*(x+1)/2.0 + a
return (b-a)/2.0*sum(w*func(y,*args),0), None
def vectorize1(func, args=(), vec_func=False):
"""Vectorize the call to a function.
This is an internal utility function used by `romberg` and
`quadrature` to create a vectorized version of a function.
If `vec_func` is True, the function `func` is assumed to take vector
arguments.
Parameters
----------
func : callable
User defined function.
args : tuple
Extra arguments for the function.
vec_func : bool
True if the function func takes vector arguments.
Returns
-------
vfunc : callable
A function that will take a vector argument and return the
result.
"""
if vec_func:
def vfunc(x):
return func(x, *args)
else:
def vfunc(x):
if isscalar(x):
return func(x, *args)
x = asarray(x)
# call with first point to get output type
y0 = func(x[0], *args)
n = len(x)
if hasattr(y0, 'dtype'):
output = empty((n,), dtype=y0.dtype)
else:
output = empty((n,), dtype=type(y0))
output[0] = y0
for i in xrange(1, n):
output[i] = func(x[i], *args)
return output
return vfunc
def quadrature(func,a,b,args=(),tol=1.49e-8,maxiter=50, vec_func=True):
"""Compute a definite integral using fixed-tolerance Gaussian quadrature.
Description:
Integrate func from a to b using Gaussian quadrature
with absolute tolerance tol.
Inputs:
func -- a Python function or method to integrate.
a -- lower limit of integration.
b -- upper limit of integration.
args -- extra arguments to pass to function.
tol -- iteration stops when error between last two iterates is less than
tolerance.
maxiter -- maximum number of iterations.
vec_func -- True or False if func handles arrays as arguments (is
a "vector" function ). Default is True.
Outputs: (val, err)
val -- Gaussian quadrature approximation (within tolerance) to integral.
err -- Difference between last two estimates of the integral.
See also:
romberg - adaptive Romberg quadrature
fixed_quad - fixed-order Gaussian quadrature
quad - adaptive quadrature using QUADPACK
dblquad, tplquad - double and triple integrals
romb, simps, trapz - integrators for sampled data
cumtrapz - cumulative integration for sampled data
ode, odeint - ODE integrators
"""
err = 100.0
val = err
n = 1
vfunc = vectorize1(func, args, vec_func=vec_func)
while (err > tol) and (n < maxiter):
newval = fixed_quad(vfunc, a, b, (), n)[0]
err = abs(newval-val)
val = newval
n = n + 1
if n == maxiter:
print "maxiter (%d) exceeded. Latest difference = %e" % (n,err)
return val, err
def tupleset(t, i, value):
l = list(t)
l[i] = value
return tuple(l)
def cumtrapz(y, x=None, dx=1.0, axis=-1):
"""Cumulatively integrate y(x) using samples along the given axis
and the composite trapezoidal rule. If x is None, spacing given by dx
is assumed.
See also:
quad - adaptive quadrature using QUADPACK
romberg - adaptive Romberg quadrature
quadrature - adaptive Gaussian quadrature
fixed_quad - fixed-order Gaussian quadrature
dblquad, tplquad - double and triple integrals
romb, trapz - integrators for sampled data
cumtrapz - cumulative integration for sampled data
ode, odeint - ODE integrators
"""
y = asarray(y)
if x is None:
d = dx
else:
d = diff(x,axis=axis)
nd = len(y.shape)
slice1 = tupleset((slice(None),)*nd, axis, slice(1, None))
slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1))
return add.accumulate(d * (y[slice1]+y[slice2])/2.0,axis)
def _basic_simps(y,start,stop,x,dx,axis):
nd = len(y.shape)
if start is None:
start = 0
step = 2
all = (slice(None),)*nd
slice0 = tupleset(all, axis, slice(start, stop, step))
slice1 = tupleset(all, axis, slice(start+1, stop+1, step))
slice2 = tupleset(all, axis, slice(start+2, stop+2, step))
if x is None: # Even spaced Simpson's rule.
result = add.reduce(dx/3.0* (y[slice0]+4*y[slice1]+y[slice2]),
axis)
else:
# Account for possibly different spacings.
# Simpson's rule changes a bit.
h = diff(x,axis=axis)
sl0 = tupleset(all, axis, slice(start, stop, step))
sl1 = tupleset(all, axis, slice(start+1, stop+1, step))
h0 = h[sl0]
h1 = h[sl1]
hsum = h0 + h1
hprod = h0 * h1
h0divh1 = h0 / h1
result = add.reduce(hsum/6.0*(y[slice0]*(2-1.0/h0divh1) + \
y[slice1]*hsum*hsum/hprod + \
y[slice2]*(2-h0divh1)),axis)
return result
def simps(y, x=None, dx=1, axis=-1, even='avg'):
"""Integrate y(x) using samples along the given axis and the composite
Simpson's rule. If x is None, spacing of dx is assumed.
If there are an even number of samples, N, then there are an odd
number of intervals (N-1), but Simpson's rule requires an even number
of intervals. The parameter 'even' controls how this is handled as
follows:
even='avg': Average two results: 1) use the first N-2 intervals with
a trapezoidal rule on the last interval and 2) use the last
N-2 intervals with a trapezoidal rule on the first interval
even='first': Use Simpson's rule for the first N-2 intervals with
a trapezoidal rule on the last interval.
even='last': Use Simpson's rule for the last N-2 intervals with a
trapezoidal rule on the first interval.
For an odd number of samples that are equally spaced the result is
exact if the function is a polynomial of order 3 or less. If
the samples are not equally spaced, then the result is exact only
if the function is a polynomial of order 2 or less.
See also:
quad - adaptive quadrature using QUADPACK
romberg - adaptive Romberg quadrature
quadrature - adaptive Gaussian quadrature
fixed_quad - fixed-order Gaussian quadrature
dblquad, tplquad - double and triple integrals
romb, trapz - integrators for sampled data
cumtrapz - cumulative integration for sampled data
ode, odeint - ODE integrators
"""
y = asarray(y)
nd = len(y.shape)
N = y.shape[axis]
last_dx = dx
first_dx = dx
returnshape = 0
if not x is None:
x = asarray(x)
if len(x.shape) == 1:
shapex = ones(nd)
shapex[axis] = x.shape[0]
saveshape = x.shape
returnshape = 1
x=x.reshape(tuple(shapex))
elif len(x.shape) != len(y.shape):
raise ValueError, "If given, shape of x must be 1-d or the " \
"same as y."
if x.shape[axis] != N:
raise ValueError, "If given, length of x along axis must be the " \
"same as y."
if N % 2 == 0:
val = 0.0
result = 0.0
slice1 = (slice(None),)*nd
slice2 = (slice(None),)*nd
if not even in ['avg', 'last', 'first']:
raise ValueError, \
"Parameter 'even' must be 'avg', 'last', or 'first'."
# Compute using Simpson's rule on first intervals
if even in ['avg', 'first']:
slice1 = tupleset(slice1, axis, -1)
slice2 = tupleset(slice2, axis, -2)
if not x is None:
last_dx = x[slice1] - x[slice2]
val += 0.5*last_dx*(y[slice1]+y[slice2])
result = _basic_simps(y,0,N-3,x,dx,axis)
# Compute using Simpson's rule on last set of intervals
if even in ['avg', 'last']:
slice1 = tupleset(slice1, axis, 0)
slice2 = tupleset(slice2, axis, 1)
if not x is None:
first_dx = x[tuple(slice2)] - x[tuple(slice1)]
val += 0.5*first_dx*(y[slice2]+y[slice1])
result += _basic_simps(y,1,N-2,x,dx,axis)
if even == 'avg':
val /= 2.0
result /= 2.0
result = result + val
else:
result = _basic_simps(y,0,N-2,x,dx,axis)
if returnshape:
x = x.reshape(saveshape)
return result
def romb(y, dx=1.0, axis=-1, show=False):
"""Romberg integration using samples of a function
Inputs:
y - a vector of 2**k + 1 equally-spaced samples of a fucntion
dx - the sample spacing.
axis - the axis along which to integrate
show - When y is a single 1-d array, then if this argument is True
print the table showing Richardson extrapolation from the
samples.
Output: ret
ret - The integrated result for each axis.
See also:
quad - adaptive quadrature using QUADPACK
romberg - adaptive Romberg quadrature
quadrature - adaptive Gaussian quadrature
fixed_quad - fixed-order Gaussian quadrature
dblquad, tplquad - double and triple integrals
simps, trapz - integrators for sampled data
cumtrapz - cumulative integration for sampled data
ode, odeint - ODE integrators
"""
y = asarray(y)
nd = len(y.shape)
Nsamps = y.shape[axis]
Ninterv = Nsamps-1
n = 1
k = 0
while n < Ninterv:
n <<= 1
k += 1
if n != Ninterv:
raise ValueError, \
"Number of samples must be one plus a non-negative power of 2."
R = {}
all = (slice(None),) * nd
slice0 = tupleset(all, axis, 0)
slicem1 = tupleset(all, axis, -1)
h = Ninterv*asarray(dx)*1.0
R[(1,1)] = (y[slice0] + y[slicem1])/2.0*h
slice_R = all
start = stop = step = Ninterv
for i in range(2,k+1):
start >>= 1
slice_R = tupleset(slice_R, axis, slice(start,stop,step))
step >>= 1
R[(i,1)] = 0.5*(R[(i-1,1)] + h*add.reduce(y[slice_R],axis))
for j in range(2,i+1):
R[(i,j)] = R[(i,j-1)] + \
(R[(i,j-1)]-R[(i-1,j-1)]) / ((1 << (2*(j-1)))-1)
h = h / 2.0
if show:
if not isscalar(R[(1,1)]):
print "*** Printing table only supported for integrals" + \
" of a single data set."
else:
try:
precis = show[0]
except (TypeError, IndexError):
precis = 5
try:
width = show[1]
except (TypeError, IndexError):
width = 8
formstr = "%" + str(width) + '.' + str(precis)+'f'
print "\n Richardson Extrapolation Table for Romberg Integration "
print "===================================================================="
for i in range(1,k+1):
for j in range(1,i+1):
print formstr % R[(i,j)],
print
print "====================================================================\n"
return R[(k,k)]
# Romberg quadratures for numeric integration.
#
# Written by Scott M. Ransom <ransom@cfa.harvard.edu>
# last revision: 14 Nov 98
#
# Cosmetic changes by Konrad Hinsen <hinsen@cnrs-orleans.fr>
# last revision: 1999-7-21
#
# Adapted to scipy by Travis Oliphant <oliphant.travis@ieee.org>
# last revision: Dec 2001
def _difftrap(function, interval, numtraps):
"""
Perform part of the trapezoidal rule to integrate a function.
Assume that we had called difftrap with all lower powers-of-2
starting with 1. Calling difftrap only returns the summation
of the new ordinates. It does _not_ multiply by the width
of the trapezoids. This must be performed by the caller.
'function' is the function to evaluate (must accept vector arguments).
'interval' is a sequence with lower and upper limits
of integration.
'numtraps' is the number of trapezoids to use (must be a
power-of-2).
"""
if numtraps <= 0:
raise ValueError("numtraps must be > 0 in difftrap().")
elif numtraps == 1:
return 0.5*(function(interval[0])+function(interval[1]))
else:
numtosum = numtraps/2
h = float(interval[1]-interval[0])/numtosum
lox = interval[0] + 0.5 * h;
points = lox + h * arange(0, numtosum)
s = sum(function(points),0)
return s
def _romberg_diff(b, c, k):
"""
Compute the differences for the Romberg quadrature corrections.
See Forman Acton's "Real Computing Made Real," p 143.
"""
tmp = 4.0**k
return (tmp * c - b)/(tmp - 1.0)
def _printresmat(function, interval, resmat):
# Print the Romberg result matrix.
i = j = 0
print 'Romberg integration of', `function`,
print 'from', interval
print ''
print '%6s %9s %9s' % ('Steps', 'StepSize', 'Results')
for i in range(len(resmat)):
print '%6d %9f' % (2**i, (interval[1]-interval[0])/(i+1.0)),
for j in range(i+1):
print '%9f' % (resmat[i][j]),
print ''
print ''
print 'The final result is', resmat[i][j],
print 'after', 2**(len(resmat)-1)+1, 'function evaluations.'
def romberg(function, a, b, args=(), tol=1.48E-8, show=False,
divmax=10, vec_func=False):
"""Romberg integration of a callable function or method.
Returns the integral of |function| (a function of one variable)
over |interval| (a sequence of length two containing the lower and
upper limit of the integration interval), calculated using
Romberg integration up to the specified |accuracy|. If |show| is 1,
the triangular array of the intermediate results will be printed.
If |vec_func| is True (default is False), then |function| is
assumed to support vector arguments.
See also:
quad - adaptive quadrature using QUADPACK
quadrature - adaptive Gaussian quadrature
fixed_quad - fixed-order Gaussian quadrature
dblquad, tplquad - double and triple integrals
romb, simps, trapz - integrators for sampled data
cumtrapz - cumulative integration for sampled data
ode, odeint - ODE integrators
"""
if isinf(a) or isinf(b):
raise ValueError("Romberg integration only available for finite limits.")
vfunc = vectorize1(function, args, vec_func=vec_func)
i = n = 1
interval = [a,b]
intrange = b-a
ordsum = _difftrap(vfunc, interval, n)
result = intrange * ordsum
resmat = [[result]]
lastresult = result + tol * 2.0
while (abs(result - lastresult) > tol) and (i <= divmax):
n = n * 2
ordsum = ordsum + _difftrap(vfunc, interval, n)
resmat.append([])
resmat[i].append(intrange * ordsum / n)
for k in range(i):
resmat[i].append(_romberg_diff(resmat[i-1][k], resmat[i][k], k+1))
result = resmat[i][i]
lastresult = resmat[i-1][i-1]
i = i + 1
if show:
_printresmat(vfunc, interval, resmat)
return result
# Coefficients for Netwon-Cotes quadrature
#
# These are the points being used
# to construct the local interpolating polynomial
# a are the weights for Newton-Cotes integration
# B is the error coefficient.
# error in these coefficients grows as N gets larger.
# or as samples are closer and closer together
# You can use maxima to find these rational coefficients
# for equally spaced data using the commands
# a(i,N) := integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) / ((N-i)! * i!) * (-1)^(N-i);
# Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N));
# Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N));
# B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N));
#
# pre-computed for equally-spaced weights
#
# num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N]
#
# a = num_a*array(int_a)/den_a
# B = num_B*1.0 / den_B
#
# integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*)
# where k = N // 2
#
_builtincoeffs = {
1:(1,2,[1,1],-1,12),
2:(1,3,[1,4,1],-1,90),
3:(3,8,[1,3,3,1],-3,80),
4:(2,45,[7,32,12,32,7],-8,945),
5:(5,288,[19,75,50,50,75,19],-275,12096),
6:(1,140,[41,216,27,272,27,216,41],-9,1400),
7:(7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400),
8:(4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989],
-2368,467775),
9:(9,89600,[2857,15741,1080,19344,5778,5778,19344,1080,
15741,2857], -4671, 394240),
10:(5,299376,[16067,106300,-48525,272400,-260550,427368,
-260550,272400,-48525,106300,16067],
-673175, 163459296),
11:(11,87091200,[2171465,13486539,-3237113, 25226685,-9595542,
15493566,15493566,-9595542,25226685,-3237113,
13486539,2171465], -2224234463, 237758976000),
12:(1, 5255250, [1364651,9903168,-7587864,35725120,-51491295,
87516288,-87797136,87516288,-51491295,35725120,
-7587864,9903168,1364651], -3012, 875875),
13:(13, 402361344000,[8181904909, 56280729661, -31268252574,
156074417954,-151659573325,206683437987,
-43111992612,-43111992612,206683437987,
-151659573325,156074417954,-31268252574,
56280729661,8181904909], -2639651053,
344881152000),
14:(7, 2501928000, [90241897,710986864,-770720657,3501442784,
-6625093363,12630121616,-16802270373,19534438464,
-16802270373,12630121616,-6625093363,3501442784,
-770720657,710986864,90241897], -3740727473,
1275983280000)
}
def newton_cotes(rn,equal=0):
r"""Return weights and error coefficient for Netwon-Cotes integration.
Suppose we have (N+1) samples of f at the positions
x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the
integral between x_0 and x_N is:
$\int_{x_0}^{x_N} f(x)dx = \Delta x \sum_{i=0}^{N} a_i f(x_i)
+ B_N (\Delta x)^{N+2} f^{N+1} (\xi)$
where $\xi \in [x_0,x_N]$ and $\Delta x = \frac{x_N-x_0}{N}$ is the
averages samples spacing.
If the samples are equally-spaced and N is even, then the error
term is $B_N (\Delta x)^{N+3} f^{N+2}(\xi)$.
Normally, the Newton-Cotes rules are used on smaller integration
regions and a composite rule is used to return the total integral.
Inputs:
rn -- the integer order for equally-spaced data
or the relative positions of the samples with
the first sample at 0 and the last at N, where
N+1 is the length of rn. N is the order of the Newt
equal -- Set to 1 to enforce equally spaced data
Outputs:
an -- 1-d array of weights to apply to the function at
the provided sample positions.
B -- error coefficient
"""
try:
N = len(rn)-1
if equal:
rn = np.arange(N+1)
elif np.all(np.diff(rn)==1):
equal = 1
except:
N = rn
rn = np.arange(N+1)
equal = 1
if equal and N in _builtincoeffs:
na, da, vi, nb, db = _builtincoeffs[N]
return na*np.array(vi,float)/da, float(nb)/db
if (rn[0] != 0) or (rn[-1] != N):
raise ValueError, "The sample positions must start at 0"\
" and end at N"
yi = rn / float(N)
ti = 2.0*yi - 1
nvec = np.arange(0,N+1)
C = np.mat(ti**nvec[:,np.newaxis])
Cinv = C.I
# improve precision of result
Cinv = 2*Cinv - Cinv*C*Cinv
Cinv = 2*Cinv - Cinv*C*Cinv
Cinv = Cinv.A
vec = 2.0/ (nvec[::2]+1)
ai = np.dot(Cinv[:,::2],vec) * N/2
if (N%2 == 0) and equal:
BN = N/(N+3.)
power = N+2
else:
BN = N/(N+2.)
power = N+1
BN = BN - np.dot(yi**power, ai)
p1 = power+1
fac = power*math.log(N) - gammaln(p1)
fac = math.exp(fac)
return ai, BN*fac
# Should only use if samples are forced on you
def composite(f,x=None,dx=1,axis=-1,n=5):
pass
|
|
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###########################################################################
# This module is responsible for creating the overall debugger controls
# as well as setting up the UI's basic tabbed interface.
#
# Its additions are managed by the debug_overlay object, which contains all the
# MainWindow additions relating to debugger control.
import pygtk
pygtk.require('2.0')
import gtk
import os.path
import code
import exceptions
import sys
import dbus
import dbus.service
from util import *
from resources import Resources
from call_stack_tab import CallStackTab
from thread_tab import ThreadTab
from process_tab import ProcessTab
from breakpoint_tab import *
from breakpoint_persistence_manager import *
from stack_explorer import StackExplorer
from output_tab import OutputTab
from interactive_tab import InteractiveTab
from python_tab import PythonTab
from attach_to_process_dialog import AttachToProcessDialog
from butter_bar import *
from main_options_dialog import *
from debugger import *
UI_STATUS_NOT_DEBUGGING = 'UI Status Not Debugging'
UI_STATUS_BREAK = 'UI Status Break'
UI_STATUS_RUNNING = 'UI Status Running'
UI_LAYOUT_EDIT = "UIEditMode"
UI_LAYOUT_RUN = "UIRunMode"
class MainControlBase(dbus.service.Object):
mw = property(lambda self: self._mw)
always_overlay = property(lambda self: self._always_overlay)
when_not_debugging_overlay = property(lambda self: self._when_not_debugging_overlay)
when_debugging_overlay = property(lambda self: self._when_debugging_overlay)
when_break_overlay = property(lambda self: self._when_break_overlay)
when_running_overlay = property(lambda self: self._when_running_overlay)
debugger = property(lambda self: self._debugger)
editor = property(lambda self: self._editor)
filemanager = property(lambda self: self._filemanager)
resources = property(lambda self: self._mw.resources)
settings = property(lambda self: self._settings)
butter_bar_collection = property(lambda self: self._mw.butter_bar_collection)
def new_overlay(self,name):
return self._mw.new_overlay(name)
def __init__(self, settings, mw):
self._settings = settings
self._mw = mw
self._registered_process = RegisteredProcess()
dbus.service.Object.__init__(self, dbus.SessionBus(), "/MainControl")
self._mw.connect('show', self._on_show)
self._when_running_overlay = self.new_overlay("Run only overlay")
self._when_break_overlay = self.new_overlay("Break only overlay")
self._when_not_debugging_overlay = self.new_overlay("Not debugging overlay")
self._when_debugging_overlay = self.new_overlay("Debugging only overlay")
self._always_overlay = self.new_overlay("Always-on overlay")
self._debugger = Debugger()
self._filemanager = FileManager(self.settings, self.debugger)
assert self.ui_status == UI_STATUS_NOT_DEBUGGING
self._mw.layout = UI_LAYOUT_EDIT
self._init_launcher()
self._init_bbar_system()
# center stage setup... this doesn't go through an overlay yet, sigh
if self.settings.Editor == "SourceViewEditor":
log1("Initializing SourceViewEditor")
sourceviewmodule = __import__("ui.source_view_editor",fromlist=[True])
editor = sourceviewmodule.SourceViewEditor(self)
elif self.settings.Editor == "GVimEditor":
log1("Initializing GVimEditor")
gvimeditormodule = __import__("ui.gvim_editor",fromlist=[True])
editor = gvimeditormodule.GVimEditor(self)
elif self.settings.Editor == "EmacsEditor":
log1("Initializing EmacsEditor")
emacseditormodule = __import__("ui.emacs_editor",fromlist=[True])
editor = emacseditormodule.EmacsEditor(self)
else:
raise Exception("~/.ndbg : Editor=%s is not one of the recognieed settings." % self.settings.Editor)
mw.add_center_stage(editor.widget)
self._editor = editor
self._always_overlay.add_tabs_menu_item("tabs.editor", lambda x,y: self.focus_editor())
# persistence managers
###########################################################################
bpm = BreakpointPersistenceManager(self)
self._bpm = bpm
# tabs and overlay setup
###########################################################################
cs = CallStackTab(self)
self._when_break_overlay.add_tab(cs,"tabpage.call_stack")
self._when_break_overlay.add_tabs_menu_item("tabs.call_stack", lambda x,y: self._focus_tab(cs)),
ot = OutputTab(self)
self._always_overlay.add_tab(ot,"tabpage.output")
self._always_overlay.add_tabs_menu_item("tabs.output", lambda x,y: self._focus_tab(ot))
bt = BreakpointTab(self)
self._always_overlay.add_tab(bt,"tabpage.breakpoints")
self._always_overlay.add_tabs_menu_item("tabs.breakpoints", lambda x,y: self._focus_tab(bt)),
# sx = StackExplorer(self)
# self._when_break_overlay.add_tab(sx)
tt = ThreadTab(self)
self._when_break_overlay.add_tab(tt,"tabpage.threads")
self._when_break_overlay.add_tabs_menu_item("tabs.threads", lambda x,y: self._focus_tab(tt)),
pt = ProcessTab(self)
self._when_debugging_overlay.add_tab(pt,"tabpage.processes")
self._when_debugging_overlay.add_tabs_menu_item("tabs.processes", lambda x,y: self._focus_tab(pt))
it = InteractiveTab(self)
self._when_break_overlay.add_tab(it,"tabpage.interactive")
def focus_interactive_tab():
self._focus_tab(it)
it.focus_entry()
self._when_break_overlay.add_tabs_menu_item("tabs.interactive", lambda x,y: focus_interactive_tab())
pyt = PythonTab(self)
self._always_overlay.add_tab(pyt,"tabpage.python")
def focus_python_tab():
self._focus_tab(pyt)
pyt.focus_entry()
self._always_overlay.add_tabs_menu_item("tabs.python", lambda x,y: focus_python_tab())
self._always_overlay.add_tools_menu_item('tools.options', self._on_tools_options_clicked)
# debug menu items
self._when_running_overlay.add_debug_menu_item("debug.break", self._on_break)
self._when_break_overlay.add_debug_menu_item('debug.step_over', self._on_step_over)
self._when_break_overlay.add_debug_menu_item('debug.step_into', self._on_step_into)
self._when_break_overlay.add_debug_menu_item('debug.step_out', self._on_step_out)
self._when_break_overlay.add_debug_menu_item('debug.continue', self._on_continue)
self._always_overlay.add_debug_menu_item('debug.launch_process', lambda *args: self._launch_process())
self._always_overlay.add_debug_menu_item('debug.attach_to_process', lambda *args: self._attach_to_pids())
self._when_debugging_overlay.add_debug_menu_item('debug.end_debugging', self._end_debugging)
# previous debugging
settings.register("RunPrimaryExecutableMode", str, "active")
self._when_not_debugging_overlay.add_debug_menu_item('debug.run_primary_executable', lambda *args: self._run_primary_executable())
self._when_not_debugging_overlay.add_debug_menu_item('debug.run_primary_executable_suspended', lambda *args: self._run_primary_executable_suspended())
self._primary_executable = None
# event listening
self._previous_ui_status = self.ui_status
self._widget_that_had_focus = { UI_STATUS_NOT_DEBUGGING: None, UI_STATUS_BREAK: None, UI_STATUS_RUNNING: None }
self._debugger.status_changed.add_listener(self._on_status_changed)
self._debugger.passive_processes.changed.add_listener(lambda: self._update_title())
# get it going
mw.show()
self._update_title()
self._debugger.fire_all_listeners()
for panel in mw.panels.values():
self.resources.apply_small_fontsize(panel)
# apply fontsize tweak
for ovl in mw.overlays:
for tab in ovl.tabs:
self.resources.apply_small_fontsize(tab)
def _on_tools_options_clicked(self,*args):
dlg = MainOptionsDialog(self.settings)
if self._primary_executable:
dlg.primary_executable = self._primary_executable
res = dlg.run()
if res == gtk.RESPONSE_OK:
changed = ProcessUtils.shlex_join(self._primary_executable) != ProcessUtils.shlex_join(dlg.primary_executable)
if changed and self.debugger.num_processes_of_all_types != 0:
b = ButterBar("Changes to primary exectuable arguments will not take effect until you end debugging.")
b.set_stock_icon(gtk.STOCK_DIALOG_WARNING)
def on_restart():
suspended = self.debugger.status == STATUS_BREAK
self._end_debugging()
self._run_primary_executable(suspended)
b.add_button("Re-run program", on_restart)
b.add_close_button()
self.butter_bar_collection.add_bar(b)
self._primary_executable = dlg.primary_executable
self._on_status_changed()
def D(self):
import debugger.gdb_backend
debugger.gdb_backend.gdb_toggle_enable_debug_window()
def focus_editor(self):
if hasattr(self._editor,'grab_focus'):
log1("Focusing Editor via editor.grab_focus")
self._editor.grab_focus()
else:
log1("Focusing Editor via editor.widget.grab_focus")
self._editor.widget.grab_focus()
def _on_load(self):
log1("Main control: processing command line arguments...")
if self._settings.ExecLaunch != None:
self._primary_executable = self._settings.ExecLaunch
self._run_primary_executable(suspended=True)
elif self._settings.ExecAttach != -1:
self._attach_to_pids([self._settings.ExecAttach])
def _on_show(self,*args):
log2("Main control: window shown, scheduling load in 200ms")
glib.timeout_add(200, self._on_load)
def _end_debugging(self,*args):
for proc in list(self.debugger.launchable_processes):
proc.end_debugging()
for proc in list(self.debugger.passive_processes):
if proc.was_launched:
proc.kill()
else:
proc.detach()
if self._debugger.status == STATUS_RUNNING:
self._debugger.begin_interrupt().wait()
procs = list(self._debugger.processes) # copy it 'cause we're about to muck around
for proc in procs:
if proc.was_launched:
proc.kill()
else:
proc.detach()
def _run_primary_executable(self, suspended = False):
if self.settings.RunPrimaryExecutableMode == "active":
self._run_primary_executable_active(suspended)
elif self.settings.RunPrimaryExecutableMode == "passive":
self._run_primary_executable_passive()
else:
raise Exception("Unrecognized rerun mode.")
def _run_primary_executable_suspended(self):
if self.settings.RunPrimaryExecutableMode == "active":
self._run_primary_executable_active(True)
elif self.settings.RunPrimaryExecutableMode == "passive":
b = ButterBar("You have passive debugging selected. Debugging process anyway...")
b.set_stock_icon(gtk.STOCK_DIALOG_INFO)
b.add_close_button()
self.butter_bar_collection.add_bar(b)
def autoclose():
if b.get_parent():
self.butter_bar_collection.close_bar(b)
MessageLoop.add_delayed_message(autoclose, 5000)
self._run_primary_executable_active(True)
else:
raise Exception("Unrecognized rerun mode.")
def _run_primary_executable_active(self,suspended = False):
if self._primary_executable:
self.find_tab(OutputTab).on_rerun()
self._launch_process(self._primary_executable,suspended = suspended)
else:
self._launch_process(suspended = True) # prompt for program...
def _run_primary_executable_passive(self):
if self._primary_executable:
args=self._primary_executable
log2("Launching %s", args)
sub = subprocess.Popen(args)
proc = DPassiveProcess(sub, was_launched=True)
self.debugger.passive_processes.append(proc)
self._rerun_passive_process = proc
def destroy(self):
self._end_debugging()
log2("Shutting down editor")
self._editor.destroy()
log2("Shutting down mw")
self._mw.destroy()
log2("Shutting down debugger")
self._debugger.shutdown()
log2("Destroying filemanager")
self._filemanager.shutdown()
log2("Destroy complete")
self._registered_process = None
def _launch_process(self,launch_args = None, suspended = True):
if launch_args == None:
dlg = gtk.Dialog("Launch process",
None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_OK,gtk.RESPONSE_OK,
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL))
hbox = gtk.HBox()
label = gtk.Label("Command line:")
entry = gtk.Entry()
entry.set_size_request(400,-1)
entry.set_activates_default(True)
hbox.pack_start(label,False,False,4)
hbox.pack_start(entry,True,True,0)
hbox.show_all()
dlg.get_content_area().pack_start(hbox,False,False,0)
dlg.set_default_response(gtk.RESPONSE_OK)
resp = dlg.run()
dlg.hide()
if resp != gtk.RESPONSE_OK or entry.get_text() == "":
return
launch_args = shlex.split(entry.get_text())
# do launch
self._focus_tab(self.find_tab(OutputTab))
status_dlg = StatusDialog("Status")
was_running = self.debugger.status == STATUS_RUNNING
if was_running:
status_dlg.status = "Stopping other processes..."
self.debugger.begin_interrupt().wait()
self._primary_executable = launch_args
status_dlg.status = "Beginning launch..."
launch_done = self.debugger.begin_launch_suspended(launch_args)
status_dlg.status = "Loading symbols..."
def on_done(proc):
log2("Launch Done")
status_dlg.destroy_please()
if was_running or suspended == False:
assert self.debugger.status == STATUS_BREAK
self.debugger.active_thread.begin_resume()
launch_done.when_done(on_done)
def get_hidden_pids(self):
hidden_pids = []
# ui
hidden_pids += [os.getpid()]
# debuggers
hidden_pids += [backend.debugger_pid for backend in self.debugger._backends if backend.debugger_pid]
# currently attached processes
hidden_pids += [proc.backend_info.pid for proc in self.debugger.processes if proc.backend_info]
return hidden_pids
def _attach_to_pids(self,pids = []):
if type(pids) != list:
raise "Expected pids to be a list"
if len(pids) == 0:
hidden_pids = self.get_hidden_pids()
dlg = AttachToProcessDialog(self._settings, hidden_pids)
resp = dlg.run()
dlg.hide()
if resp != gtk.RESPONSE_OK:
return
pids = dlg.selected_pids
if len(pids) == 0:
raise Exception("pids should be nonzero in length.")
self._focus_tab(self.find_tab(OutputTab))
status_dlg = StatusDialog("Debugger Status")
was_running = self.debugger.status == STATUS_RUNNING
if was_running:
status_dlg.status = "Stopping other processes..."
self.debugger.begin_interrupt().wait()
try:
cmdline = ProcessUtils.get_pid_full_cmdline(pids[0])
status_dlg.status = "Attaching to pid %s:\n%s" % (pids[0], cmdline)
except Exception, ex:
status_dlg.status = "Attaching to process #%s" % pids[0]
pid_attached = self.debugger.begin_attach_to_pid(pids[0])
def do_next_pid(proc):
del pids[0]
if len(pids) == 0:
log2("no more pids")
status_dlg.hide()
if was_running:
self.debugger.active_thread.begin_resume()
else:
try:
cmdline = ProcessUtils.get_pid_full_cmdline(pids[0])
status_dlg.status = "Attaching to pid %s:\n%s" % (pids[0], cmdline)
except Exception, ex:
status_dlg.status = "Attaching to process #%s" % pids[0]
pid_attached = self.debugger.begin_attach_to_pid(pids[0])
pid_attached.when_done(do_next_pid)
pid_attached.when_done(do_next_pid)
def find_tab_by_id(self,tab_id):
for ovl in self._mw.overlays:
t = ovl.find_tab_by_id(tab_id)
if t:
return t
return None
def find_tab(self,tabType):
for ovl in self._mw.overlays:
t = ovl.find_tab(tabType)
if t:
return t
return None
def _focus_tab(self,tab):
book = tab.get_parent()
book_pages = book.get_children()
for i in range(0,len(book_pages)):
if book_pages[i] == tab:
book.set_current_page(i)
if hasattr(tab, 'special_grab_focus'):
tab.special_grab_focus()
return
firstChild = tab.get_children()[0]
if type(firstChild) == gtk.ScrolledWindow:
target = firstChild.get_children()[0]
target.grab_focus()
else:
firstChild.grab_focus()
def focus_location(self,l):
self._editor.focus_location(l)
self.focus_editor()
def _on_break(self,*args):
self.debugger.begin_interrupt()
def _on_continue(self,*args):
self.debugger.active_thread.begin_resume()
def _on_step_into(self,*args):
assert(self.debugger.status == STATUS_BREAK)
self.debugger.active_thread.begin_step_into()
def _on_step_over(self,*args):
assert(self.debugger.status == STATUS_BREAK)
self.debugger.active_thread.begin_step_over()
def _on_step_out(self,*args):
assert(self.debugger.status == STATUS_BREAK)
self.debugger.active_thread.begin_step_out()
###########################################################################
def _on_status_changed(self):
self._update_title()
self._update_overlays()
def _update_title(self):
debugger = self.debugger
name = None
if len(debugger.processes):
assert len(debugger.processes) != 0
assert debugger.first_added_process != None
first_valid_name = None
n_others = 0
for proc in debugger.processes:
if proc.backend_info:
if not first_valid_name:
first_valid_name = os.path.basename(proc.target_exe)
else:
n_others += 1
if n_others > 0:
name = "%s (+%i more)" % (first_valid_name, n_others)
elif first_valid_name:
name = first_valid_name
else:
name = None
else:
if self._primary_executable:
app = self._primary_executable[0]
name = os.path.basename(app)
else:
name = None
import ndbg
if ndbg.is_debug_python_runtime():
prefix = "%i - " % os.getpid()
else:
prefix = ""
if self.debugger.status == STATUS_RUNNING:
self.mw.set_title("%sNicer Debugger - %s [Running]" % (prefix, name))
else:
if len(self.debugger.processes):
self.mw.set_title("%sNicer Debugger - %s [Stopped]" % (prefix, name))
else:
if len(self.debugger.passive_processes):
substat = "Background processes running";
else:
substat = "Not debugging";
if name:
self.mw.set_title("%sNicer Debugger - %s [%s]" % (prefix, name, substat))
else:
self.mw.set_title("%sNicer Debugger - [%s]" % (prefix, substat))
@property
def ui_status(self):
if self.debugger.status == STATUS_RUNNING:
return UI_STATUS_RUNNING
else:
if self._debugger.num_processes_of_all_types:
return UI_STATUS_BREAK
else:
return UI_STATUS_NOT_DEBUGGING
def _update_overlays(self):
if self._previous_ui_status != self.ui_status:
self._widget_that_had_focus[self._previous_ui_status] = self._mw.get_focus()
if self.ui_status == UI_STATUS_RUNNING:
self._when_not_debugging_overlay.visible = False
self._when_debugging_overlay.visible = True
self._when_running_overlay.visible = True
self._when_break_overlay.visible = True
self._when_break_overlay.enabled = False
elif self.ui_status == UI_STATUS_BREAK:
self._when_not_debugging_overlay.visible = False
self._when_debugging_overlay.visible = True
self._when_running_overlay.visible = False
self._when_break_overlay.visible = True
self._when_break_overlay.enabled = True
else:
assert self.ui_status == UI_STATUS_NOT_DEBUGGING
self._when_not_debugging_overlay.visible = True
self._when_debugging_overlay.visible = False
self._when_running_overlay.visible = False
self._when_break_overlay.visible = False
if self.ui_status == UI_STATUS_NOT_DEBUGGING:
self._mw.layout = UI_LAYOUT_EDIT
else:
self._mw.layout = UI_LAYOUT_RUN
if self._previous_ui_status != self.ui_status:
if self._widget_that_had_focus[self.ui_status] != None:
self._widget_that_had_focus[self.ui_status].grab_focus()
# set mainwindow layout to the new mode...
self._previous_ui_status = self.ui_status
# bbar system puts up butterbars for passive and launchable processes
###########################################################################
def _init_bbar_system(self):
self._butter_bars_by_process = {}
def _on_launchable_process_added(self, idx, proc):
# create butter bar, keep it up until the process is gone
b = ButterBar("Request to launch a new process: %s" % " ".join(proc.target_full_cmdline))
b.set_stock_icon(gtk.STOCK_DIALOG_INFO)
def on_accept():
proc.launch()
def on_ignore():
proc.ignore_launch()
b.add_button("_Accept and Launch", on_accept)
b.add_button("Ignor_e", on_ignore)
b.add_close_button(on_ignore)
self._butter_bars_by_process[proc] = b
self.butter_bar_collection.add_bar(b)
def _on_launchable_process_deleted(self, idx, proc):
if self._butter_bars_by_process.has_key(proc):
b = self._butter_bars_by_process[proc]
self.butter_bar_collection.close_bar(b)
del self._butter_bars_by_process[proc]
def _on_passive_process_added(self, idx, proc):
# create butter bar, but set timeout to remove it in 5 seconds
cmdline = proc.target_full_cmdline
if cmdline:
title = "Process %i is running: %s" % (proc.pid, " ".join(cmdline))
else:
title = "Process %i is running" % (proc.pid)
b = ButterBar(title)
b.set_stock_icon(gtk.STOCK_DIALOG_INFO)
def on_accept():
proc.attach()
def on_ignore_or_timeout():
if not self._butter_bars_by_process.has_key(proc):
return
self.butter_bar_collection.close_bar(b)
del self._butter_bars_by_process[proc]
MessageLoop.add_delayed_message(on_ignore_or_timeout, 5000)
b.add_button("_Attach", on_accept)
b.add_button("Ignor_e", on_ignore_or_timeout)
b.add_close_button(on_ignore_or_timeout)
self._butter_bars_by_process[proc] = b
self.butter_bar_collection.add_bar(b)
self._on_status_changed()
def _on_passive_process_deleted(self, idx, proc):
log2("passive process deleted %s", proc);
if self._butter_bars_by_process.has_key(proc):
b = self._butter_bars_by_process[proc]
self.butter_bar_collection.close_bar(b)
del self._butter_bars_by_process[proc]
# pass message down to launcher, which needs to know this too
self._launcher_on_passive_process_deleted(proc)
self._on_status_changed()
# backend control for ndbg -e flow
###########################################################################
@staticmethod
def get_all_remote_instances():
"""Presents user with a list of MainControl's that are running, and lets them pick one."""
remote_dbus_names = RegisteredProcess.find_registered_dbus_names()
bus = dbus.SessionBus()
log1("Found existing processes: %s", remote_dbus_names)
mcs = [bus.get_object(remote_bus_name, "/MainControl") for remote_bus_name in remote_dbus_names]
return mcs
@dbus.service.method(dbus_interface='ndbg.MainControl')
def get_title(self):
return self._mw.get_title()
def _init_launcher(self):
self._debugger.launchable_processes.item_added.add_listener(self._on_launchable_process_added)
self._debugger.launchable_processes.item_deleted.add_listener(self._on_launchable_process_deleted)
self._debugger.passive_processes.item_added.add_listener(self._on_passive_process_added)
self._debugger.passive_processes.item_deleted.add_listener(self._on_passive_process_deleted)
self._next_launcher_id = 0
self._launchable_processes_by_launcher_id = {}
self._passive_processes_by_launcher_id = {}
@dbus.service.method(dbus_interface='ndbg.MainControl', sender_keyword="sender")
def add_launchable_process(self, cmdline, sender):
launcher = dbus.SessionBus().get_object(sender, "/Launcher")
cmdline = [str(x) for x in cmdline]
id = self._next_launcher_id
id = "%s/%s" % (dbus.SessionBus().get_unique_name(), self._next_launcher_id)
self._next_launcher_id += 1
log1("Add launchable process for %s", cmdline)
def on_launch():
launcher.on_accept_launch(id)
del self._launchable_processes_by_launcher_id[id]
def on_ignore_launch():
launcher.on_ignore_launch(id)
def on_detach():
launcher.on_kill_launch(id)
proc = DLaunchableProcess(cmdline, on_launch, on_ignore_launch, on_detach)
self._debugger.launchable_processes.append(proc)
# watch the launcher --- if it disappears, remove this launchable process
launcher_pid = BoxedObject()
def check_launcher_aliveness():
if launcher_pid.get() == None:
print "getting launcher pid"
launcher_pid.set(launcher.get_pid())
MessageLoop.add_delayed_message(check_launcher_aliveness, 250)
if not self._launchable_processes_by_launcher_id.has_key(id):
return False
if not ProcessUtils.is_proc_alive(launcher_pid.get()):
log1("Launchable process host %s gone. Removing launched process.", id)
del self._launchable_processes_by_launcher_id[id]
if proc in self._debugger.launchable_processes:
self._debugger.launchable_processes.remove(proc)
return False
return True
MessageLoop.add_message(check_launcher_aliveness)
# return the id of this process
log1("Added launchable process %s", id)
self._launchable_processes_by_launcher_id[id] = proc
return id
@dbus.service.method(dbus_interface='ndbg.MainControl')
def attach_to_launched_pid(self, launched_pid):
launched_pid = int(launched_pid)
log1("on_accept_launch_complete(%i)", launched_pid);
self._attach_to_pids([launched_pid])
@dbus.service.method(dbus_interface='ndbg.MainControl', sender_keyword="sender")
def remove_launchable_process(self, id, sender):
log1("Remove launchable process %s", id)
proc = self._launchable_processes_by_launcher_id[id]
del self._launchable_processes_by_launcher_id[id]
if proc in self.debugger.launchable_processes:
self.debugger.launchable_processes.remove(proc)
@dbus.service.method(dbus_interface='ndbg.MainControl', sender_keyword="sender")
def add_passive_process(self, pid, was_launched, sender):
launcher = dbus.SessionBus().get_object(sender, "/Launcher")
pid = int(pid)
id = "%s/%s" % (dbus.SessionBus().get_unique_name(), self._next_launcher_id)
self._next_launcher_id += 1
log1("Add passive process for %i", pid)
def on_attach():
launcher.notify_of_attach(id)
proc = DPassiveProcess(pid, on_attach, was_launched)
self.debugger.passive_processes.append(proc)
self._passive_processes_by_launcher_id[id] = proc
return id
def _launcher_on_passive_process_deleted(self, proc):
for id in self._passive_processes_by_launcher_id:
if self._passive_processes_by_launcher_id[id] == proc:
del self._passive_processes_by_launcher_id[id]
break
@dbus.service.method(dbus_interface='ndbg.MainControl')
def remove_passive_process(self, id):
proc = self._passive_processes_by_launcher_id[id]
self._debugger.passive_processes.remove(proc)
|
|
from datetime import datetime
import decimal
import random
from faker import data
from faker import Faker
from faker.utils import uk_postcode, bothify
from anonymizer import replacers
randrange = random.SystemRandom().randrange
alphanumeric = ""
for i in range(ord('A'), ord('Z')+1):
alphanumeric += chr(i)
for i in range(ord('a'), ord('z')+1):
alphanumeric += chr(i)
for i in range(ord('0'), ord('9')+1):
alphanumeric += chr(i)
general_chars = alphanumeric + " _-"
class DjangoFaker(object):
"""
Class that provides fake data, using Django specific knowledge to ensure
acceptable data for Django models.
"""
faker = Faker()
def __init__(self):
self.init_values = {}
def _prep_init(self, field):
if field in self.init_values:
return
field_vals = set(x[0] for x in field.model._default_manager.values_list(field.name))
self.init_values[field] = field_vals
def get_allowed_value(self, source, field):
retval = source()
if field is None:
return retval
# Enforce unique. Ensure we don't set the same values, as either
# any of the existing values, or any of the new ones we make up.
unique = getattr(field, 'unique', None)
if unique:
self._prep_init(field)
used = self.init_values[field]
for i in xrange(0, 10):
if retval in used:
retval = source()
else:
break
if retval in used:
raise Exception("Cannot generate unique data for field %s. Last value tried %s" % (field, retval))
used.add(retval)
# Enforce max_length
max_length = getattr(field, 'max_length', None)
if max_length is not None:
retval = retval[:max_length]
return retval
### Public interace ##
def varchar(self, field=None):
"""
Returns a chunk of text, of maximum length 'max_length'
"""
assert field is not None, "The field parameter must be passed to the 'varchar' method."
max_length = field.max_length
def source():
length = random.choice(range(0, max_length + 1))
return "".join(random.choice(general_chars) for i in xrange(length))
return self.get_allowed_value(source, field)
def simple_pattern(self, pattern, field=None):
"""
Use a simple pattern to make the field - # is replaced with a random number,
? with a random letter.
"""
source = lambda: bothify(pattern)
return self.get_allowed_value(source, field)
def bool(self, field=None):
"""
Returns a random boolean
"""
source = lambda: bool(randrange(0, 2))
return self.get_allowed_value(source, field)
def integer(self, field=None):
source = lambda: random.randint(-1000000, 1000000)
return self.get_allowed_value(source, field)
def positive_integer(self, field=None):
source = lambda: random.randint(0, 1000000)
return self.get_allowed_value(source, field)
def small_integer(self, field=None):
source = lambda: random.randint(-32768, +32767)
return self.get_allowed_value(source, field)
def positive_small_integer(self, field=None):
source = lambda: random.randint(0, 32767)
return self.get_allowed_value(source, field)
def datetime(self, field=None, val=None):
"""
Returns a random datetime. If 'val' is passed, a datetime within two
years of that date will be returned.
"""
if val is None:
source = lambda: datetime.fromtimestamp(randrange(1, 2100000000))
else:
source = lambda: datetime.fromtimestamp(int(val.strftime("%s")) +
randrange(-365*24*3600*2, 365*24*3600*2))
return self.get_allowed_value(source, field)
def date(self, field=None, val=None):
"""
Like datetime, but truncated to be a date only
"""
d = self.datetime(field=field, val=val)
return d.date()
def decimal(self, field=None, val=None):
source = lambda: decimal.Decimal(random.randrange(0, 100000))/(10**field.decimal_places)
return self.get_allowed_value(source, field)
def uk_postcode(self, field=None):
return self.get_allowed_value(uk_postcode, field)
def uk_county(self, field=None):
source = lambda: random.choice(data.UK_COUNTIES)
return self.get_allowed_value(source, field)
def uk_country(self, field=None):
source = lambda: random.choice(data.UK_COUNTRIES)
return self.get_allowed_value(source, field)
def lorem(self, field=None, val=None):
"""
Returns lorem ipsum text. If val is provided, the lorem ipsum text will
be the same length as the original text, and with the same pattern of
line breaks.
"""
if val is not None:
def generate(length):
# Get lorem ipsum of a specific length.
collect = ""
while len(collect) < length:
collect += self.faker.lorem()
collect = collect[:length]
return collect
# We want to match the pattern of the text - linebreaks
# in the same places.
def source():
parts = val.split("\n")
for i, p in enumerate(parts):
# Replace each bit with lorem ipsum of the same length
parts[i] = generate(len(p))
return "\n".join(parts)
else:
source = self.faker.lorem
return self.get_allowed_value(source, field)
def choice(self, field=None):
assert field is not None, "The field parameter must be passed to the 'choice' method."
choices = [c[0] for c in field.choices]
source = lambda: random.choice(choices)
return self.get_allowed_value(source, field)
## Other attributes provided by 'Faker':
# username
# first_name
# last_name
# name
# email
# full_address
# phonenumber
# street_address
# city
# state
# zip_code
# company
def __getattr__(self, name):
# we delegate most calls to faker, but add checks
source = getattr(self.faker, name)
def func(*args, **kwargs):
field = kwargs.get('field', None)
return self.get_allowed_value(source, field)
return func
class Anonymizer(object):
"""
Base class for all anonymizers. When executed with the ``run()`` method,
it will anonymize the data for a specific model.
"""
model = None
# attributes is a dictionary of {attribute_name: replacer}, where replacer is
# a callable that takes as arguments this Anonymizer instance, the object to
# be altered, the field to be altered, and the current field value, and
# returns a replacement value.
# This signature is designed to be useful for making lambdas that call the
# 'faker' instance provided on this class, but it can be used with any
# function.
attributes = None
# To impose an order on Anonymizers within a module, this can be set - lower
# values are done first.
order = 0
faker = DjangoFaker()
def get_queryset(self):
"""
Returns the QuerySet to be manipulated
"""
if self.model is None:
raise Exception("'model' attribute must be set")
qs = self.model._default_manager.get_queryset()
if len([f for f in self.model._meta.fields if f.name == 'id']) == 1:
qs = qs.order_by('id')
return qs
def get_attributes(self):
if self.attributes is None:
raise Exception("'attributes' attribute must be set")
return self.attributes
def alter_object(self, obj):
"""
Alters all the attributes in an individual object.
If it returns False, the object will not be saved
"""
attributes = self.get_attributes()
for attname, replacer in attributes:
if replacer == "SKIP":
continue
self.alter_object_attribute(obj, attname, replacer)
def alter_object_attribute(self, obj, attname, replacer):
"""
Alters a single attribute in an object.
"""
currentval = getattr(obj, attname)
field = obj._meta.get_field(attname)
if isinstance(replacer, str):
# 'email' is shortcut for: replacers.email
replacer = getattr(replacers, replacer)
elif not callable(replacer):
raise Exception("Expected callable or string to be passed, got %r." % replacer)
replacement = replacer(self, obj, field, currentval)
setattr(obj, attname, replacement)
def run(self):
self.validate()
for obj in self.get_queryset().iterator():
retval = self.alter_object(obj)
if retval is not False:
obj.save()
def validate(self):
attributes = self.get_attributes()
model_attrs = set(f.attname for f in self.model._meta.fields)
given_attrs = set(name for name,replacer in attributes)
if model_attrs != given_attrs:
msg = ""
missing_attrs = model_attrs - given_attrs
if missing_attrs:
msg += "The following fields are missing: %s. " % ", ".join(missing_attrs)
msg += "Add the replacer \"SKIP\" to skip these fields."
extra_attrs = given_attrs - model_attrs
if extra_attrs:
msg += "The following non-existent fields were supplied: %s." % ", ".join(extra_attrs)
raise ValueError("The attributes list for %s does not match the complete list of fields for that model. %s" % (self.model.__name__, msg))
|
|
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: swift_conn
# You'll see swift_conn passed around a few places in this file. This is the
# source httplib connection of whatever it is attached to.
# It is used when early termination of reading from the connection should
# happen, such as when a range request is satisfied but there's still more the
# source connection would like to send. To prevent having to read all the data
# that could be left, the source connection can be .close() and then reads
# commence to empty out any buffers.
# These shenanigans are to ensure all related objects can be garbage
# collected. We've seen objects hang around forever otherwise.
import time
import functools
from eventlet import spawn_n, GreenPile, Timeout
from eventlet.queue import Queue, Empty, Full
from eventlet.timeout import Timeout
from swift.common.utils import normalize_timestamp, TRUE_VALUES, public
from swift.common.bufferedhttp import http_connect
from swift.common.constraints import MAX_ACCOUNT_NAME_LENGTH
from swift.common.exceptions import ChunkReadTimeout, ConnectionTimeout
from swift.common.http import is_informational, is_success, is_redirection, \
is_server_error, HTTP_OK, HTTP_PARTIAL_CONTENT, HTTP_MULTIPLE_CHOICES, \
HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_SERVICE_UNAVAILABLE, \
HTTP_INSUFFICIENT_STORAGE
from swift.common.swob import Request, Response, status_map
def update_headers(response, headers):
"""
Helper function to update headers in the response.
:param response: swob.Response object
:param headers: dictionary headers
"""
if hasattr(headers, 'items'):
headers = headers.items()
for name, value in headers:
if name == 'etag':
response.headers[name] = value.replace('"', '')
elif name not in ('date', 'content-length', 'content-type',
'connection', 'x-put-timestamp', 'x-delete-after'):
response.headers[name] = value
def source_key(resp):
"""
Provide the timestamp of the swift http response as a floating
point value. Used as a sort key.
:param resp: httplib response object
"""
return float(resp.getheader('x-put-timestamp') or
resp.getheader('x-timestamp') or 0)
def delay_denial(func):
"""
Decorator to declare which methods should have any swift.authorize call
delayed. This is so the method can load the Request object up with
additional information that may be needed by the authorization system.
:param func: function for which authorization will be delayed
"""
func.delay_denial = True
@functools.wraps(func)
def wrapped(*a, **kw):
return func(*a, **kw)
return wrapped
def get_account_memcache_key(account):
return 'account/%s' % account
def get_container_memcache_key(account, container):
return 'container/%s/%s' % (account, container)
class Controller(object):
"""Base WSGI controller class for the proxy"""
server_type = 'Base'
# Ensure these are all lowercase
pass_through_headers = []
def __init__(self, app):
self.account_name = None
self.app = app
self.trans_id = '-'
def transfer_headers(self, src_headers, dst_headers):
st = self.server_type.lower()
x_remove = 'x-remove-%s-meta-' % st
x_remove_read = 'x-remove-%s-read' % st
x_remove_write = 'x-remove-%s-write' % st
x_meta = 'x-%s-meta-' % st
dst_headers.update((k.lower().replace('-remove', '', 1), '')
for k in src_headers
if k.lower().startswith(x_remove) or
k.lower() in (x_remove_read, x_remove_write))
dst_headers.update((k.lower(), v)
for k, v in src_headers.iteritems()
if k.lower() in self.pass_through_headers or
k.lower().startswith(x_meta))
def error_increment(self, node):
"""
Handles incrementing error counts when talking to nodes.
:param node: dictionary of node to increment the error count for
"""
node['errors'] = node.get('errors', 0) + 1
node['last_error'] = time.time()
def error_occurred(self, node, msg):
"""
Handle logging, and handling of errors.
:param node: dictionary of node to handle errors for
:param msg: error message
"""
self.error_increment(node)
self.app.logger.error(_('%(msg)s %(ip)s:%(port)s'),
{'msg': msg, 'ip': node['ip'],
'port': node['port']})
def exception_occurred(self, node, typ, additional_info):
"""
Handle logging of generic exceptions.
:param node: dictionary of node to log the error for
:param typ: server type
:param additional_info: additional information to log
"""
self.app.logger.exception(
_('ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: '
'%(info)s'),
{'type': typ, 'ip': node['ip'], 'port': node['port'],
'device': node['device'], 'info': additional_info})
def error_limited(self, node):
"""
Check if the node is currently error limited.
:param node: dictionary of node to check
:returns: True if error limited, False otherwise
"""
now = time.time()
if not 'errors' in node:
return False
if 'last_error' in node and node['last_error'] < \
now - self.app.error_suppression_interval:
del node['last_error']
if 'errors' in node:
del node['errors']
return False
limited = node['errors'] > self.app.error_suppression_limit
if limited:
self.app.logger.debug(
_('Node error limited %(ip)s:%(port)s (%(device)s)'), node)
return limited
def error_limit(self, node):
"""
Mark a node as error limited.
:param node: dictionary of node to error limit
"""
node['errors'] = self.app.error_suppression_limit + 1
node['last_error'] = time.time()
def account_info(self, account, autocreate=False):
"""
Get account information, and also verify that the account exists.
:param account: name of the account to get the info for
:returns: tuple of (account partition, account nodes, container_count)
or (None, None, None) if it does not exist
"""
partition, nodes = self.app.account_ring.get_nodes(account)
# 0 = no responses, 200 = found, 404 = not found, -1 = mixed responses
if self.app.memcache:
cache_key = get_account_memcache_key(account)
cache_value = self.app.memcache.get(cache_key)
if not isinstance(cache_value, dict):
result_code = cache_value
container_count = 0
else:
result_code = cache_value['status']
container_count = cache_value['container_count']
if result_code == HTTP_OK:
return partition, nodes, container_count
elif result_code == HTTP_NOT_FOUND and not autocreate:
return None, None, None
result_code = 0
container_count = 0
attempts_left = len(nodes)
path = '/%s' % account
headers = {'x-trans-id': self.trans_id, 'Connection': 'close'}
iternodes = self.iter_nodes(partition, nodes, self.app.account_ring)
while attempts_left > 0:
try:
node = iternodes.next()
except StopIteration:
break
attempts_left -= 1
try:
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(node['ip'], node['port'],
node['device'], partition, 'HEAD',
path, headers)
with Timeout(self.app.node_timeout):
resp = conn.getresponse()
body = resp.read()
if is_success(resp.status):
result_code = HTTP_OK
container_count = int(
resp.getheader('x-account-container-count') or 0)
break
elif resp.status == HTTP_NOT_FOUND:
if result_code == 0:
result_code = HTTP_NOT_FOUND
elif result_code != HTTP_NOT_FOUND:
result_code = -1
elif resp.status == HTTP_INSUFFICIENT_STORAGE:
self.error_limit(node)
continue
else:
result_code = -1
except (Exception, Timeout):
self.exception_occurred(node, _('Account'),
_('Trying to get account info for %s')
% path)
if result_code == HTTP_NOT_FOUND and autocreate:
if len(account) > MAX_ACCOUNT_NAME_LENGTH:
return None, None, None
headers = {'X-Timestamp': normalize_timestamp(time.time()),
'X-Trans-Id': self.trans_id,
'Connection': 'close'}
resp = self.make_requests(Request.blank('/v1' + path),
self.app.account_ring, partition, 'PUT',
path, [headers] * len(nodes))
if not is_success(resp.status_int):
self.app.logger.warning('Could not autocreate account %r' %
path)
return None, None, None
result_code = HTTP_OK
if self.app.memcache and result_code in (HTTP_OK, HTTP_NOT_FOUND):
if result_code == HTTP_OK:
cache_timeout = self.app.recheck_account_existence
else:
cache_timeout = self.app.recheck_account_existence * 0.1
self.app.memcache.set(cache_key,
{'status': result_code,
'container_count': container_count},
timeout=cache_timeout)
if result_code == HTTP_OK:
return partition, nodes, container_count
return None, None, None
def container_info(self, account, container, account_autocreate=False):
"""
Get container information and thusly verify container existance.
This will also make a call to account_info to verify that the
account exists.
:param account: account name for the container
:param container: container name to look up
:returns: dict containing at least container partition ('partition'),
container nodes ('containers'), container read
acl ('read_acl'), container write acl ('write_acl'),
and container sync key ('sync_key').
Values are set to None if the container does not exist.
"""
part, nodes = self.app.container_ring.get_nodes(account, container)
path = '/%s/%s' % (account, container)
container_info = {'status': 0, 'read_acl': None,
'write_acl': None, 'sync_key': None,
'count': None, 'bytes': None,
'versions': None, 'partition': None,
'nodes': None}
if self.app.memcache:
cache_key = get_container_memcache_key(account, container)
cache_value = self.app.memcache.get(cache_key)
if isinstance(cache_value, dict):
if 'container_size' in cache_value:
cache_value['count'] = cache_value['container_size']
if is_success(cache_value['status']):
container_info.update(cache_value)
container_info['partition'] = part
container_info['nodes'] = nodes
return container_info
if not self.account_info(account, autocreate=account_autocreate)[1]:
return container_info
attempts_left = len(nodes)
headers = {'x-trans-id': self.trans_id, 'Connection': 'close'}
for node in self.iter_nodes(part, nodes, self.app.container_ring):
try:
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(node['ip'], node['port'],
node['device'], part, 'HEAD',
path, headers)
with Timeout(self.app.node_timeout):
resp = conn.getresponse()
body = resp.read()
if is_success(resp.status):
container_info.update({
'status': HTTP_OK,
'read_acl': resp.getheader('x-container-read'),
'write_acl': resp.getheader('x-container-write'),
'sync_key': resp.getheader('x-container-sync-key'),
'count': resp.getheader('x-container-object-count'),
'bytes': resp.getheader('x-container-bytes-used'),
'versions': resp.getheader('x-versions-location')})
break
elif resp.status == HTTP_NOT_FOUND:
container_info['status'] = HTTP_NOT_FOUND
else:
container_info['status'] = -1
if resp.status == HTTP_INSUFFICIENT_STORAGE:
self.error_limit(node)
except (Exception, Timeout):
self.exception_occurred(
node, _('Container'),
_('Trying to get container info for %s') % path)
attempts_left -= 1
if attempts_left <= 0:
break
if self.app.memcache:
if container_info['status'] == HTTP_OK:
self.app.memcache.set(
cache_key, container_info,
timeout=self.app.recheck_container_existence)
elif container_info['status'] == HTTP_NOT_FOUND:
self.app.memcache.set(
cache_key, container_info,
timeout=self.app.recheck_container_existence * 0.1)
if container_info['status'] == HTTP_OK:
container_info['partition'] = part
container_info['nodes'] = nodes
return container_info
def iter_nodes(self, partition, nodes, ring):
"""
Node iterator that will first iterate over the normal nodes for a
partition and then the handoff partitions for the node.
:param partition: partition to iterate nodes for
:param nodes: list of node dicts from the ring
:param ring: ring to get handoff nodes from
"""
for node in nodes:
if not self.error_limited(node):
yield node
handoffs = 0
for node in ring.get_more_nodes(partition):
if not self.error_limited(node):
handoffs += 1
if self.app.log_handoffs:
self.app.logger.increment('handoff_count')
self.app.logger.warning(
'Handoff requested (%d)' % handoffs)
if handoffs == len(nodes):
self.app.logger.increment('handoff_all_count')
yield node
def _make_request(self, nodes, part, method, path, headers, query,
logger_thread_locals):
self.app.logger.thread_locals = logger_thread_locals
for node in nodes:
try:
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(node['ip'], node['port'],
node['device'], part, method, path,
headers=headers, query_string=query)
conn.node = node
with Timeout(self.app.node_timeout):
resp = conn.getresponse()
if not is_informational(resp.status) and \
not is_server_error(resp.status):
return resp.status, resp.reason, resp.read()
elif resp.status == HTTP_INSUFFICIENT_STORAGE:
self.error_limit(node)
except (Exception, Timeout):
self.exception_occurred(node, self.server_type,
_('Trying to %(method)s %(path)s') %
{'method': method, 'path': path})
def make_requests(self, req, ring, part, method, path, headers,
query_string=''):
"""
Sends an HTTP request to multiple nodes and aggregates the results.
It attempts the primary nodes concurrently, then iterates over the
handoff nodes as needed.
:param headers: a list of dicts, where each dict represents one
backend request that should be made.
:returns: a swob.Response object
"""
start_nodes = ring.get_part_nodes(part)
nodes = self.iter_nodes(part, start_nodes, ring)
pile = GreenPile(len(start_nodes))
for head in headers:
pile.spawn(self._make_request, nodes, part, method, path,
head, query_string, self.app.logger.thread_locals)
response = [resp for resp in pile if resp]
while len(response) < len(start_nodes):
response.append((HTTP_SERVICE_UNAVAILABLE, '', ''))
statuses, reasons, bodies = zip(*response)
return self.best_response(req, statuses, reasons, bodies,
'%s %s' % (self.server_type, req.method))
def best_response(self, req, statuses, reasons, bodies, server_type,
etag=None):
"""
Given a list of responses from several servers, choose the best to
return to the API.
:param req: swob.Request object
:param statuses: list of statuses returned
:param reasons: list of reasons for each status
:param bodies: bodies of each response
:param server_type: type of server the responses came from
:param etag: etag
:returns: swob.Response object with the correct status, body, etc. set
"""
resp = Response(request=req)
if len(statuses):
for hundred in (HTTP_OK, HTTP_MULTIPLE_CHOICES, HTTP_BAD_REQUEST):
hstatuses = \
[s for s in statuses if hundred <= s < hundred + 100]
if len(hstatuses) > len(statuses) / 2:
status = max(hstatuses)
status_index = statuses.index(status)
resp.status = '%s %s' % (status, reasons[status_index])
resp.body = bodies[status_index]
resp.content_type = 'text/html'
if etag:
resp.headers['etag'] = etag.strip('"')
return resp
self.app.logger.error(_('%(type)s returning 503 for %(statuses)s'),
{'type': server_type, 'statuses': statuses})
resp.status = '503 Internal Server Error'
return resp
@public
def GET(self, req):
"""Handler for HTTP GET requests."""
return self.GETorHEAD(req)
@public
def HEAD(self, req):
"""Handler for HTTP HEAD requests."""
return self.GETorHEAD(req)
def _make_app_iter_reader(self, node, source, queue, logger_thread_locals):
"""
Reads from the source and places data in the queue. It expects
something else be reading from the queue and, if nothing does within
self.app.client_timeout seconds, the process will be aborted.
:param node: The node dict that the source is connected to, for
logging/error-limiting purposes.
:param source: The httplib.Response object to read from.
:param queue: The eventlet.queue.Queue to place read source data into.
:param logger_thread_locals: The thread local values to be set on the
self.app.logger to retain transaction
logging information.
"""
self.app.logger.thread_locals = logger_thread_locals
success = True
try:
try:
while True:
with ChunkReadTimeout(self.app.node_timeout):
chunk = source.read(self.app.object_chunk_size)
if not chunk:
break
queue.put(chunk, timeout=self.app.client_timeout)
except Full:
self.app.logger.warn(
_('Client did not read from queue within %ss') %
self.app.client_timeout)
self.app.logger.increment('client_timeouts')
success = False
except (Exception, Timeout):
self.exception_occurred(node, _('Object'),
_('Trying to read during GET'))
success = False
finally:
# Ensure the queue getter gets a terminator.
queue.resize(2)
queue.put(success)
# Close-out the connection as best as possible.
if getattr(source, 'swift_conn', None):
self.close_swift_conn(source)
def _make_app_iter(self, node, source):
"""
Returns an iterator over the contents of the source (via its read
func). There is also quite a bit of cleanup to ensure garbage
collection works and the underlying socket of the source is closed.
:param source: The httplib.Response object this iterator should read
from.
:param node: The node the source is reading from, for logging purposes.
"""
try:
# Spawn reader to read from the source and place in the queue.
# We then drop any reference to the source or node, for garbage
# collection purposes.
queue = Queue(1)
spawn_n(self._make_app_iter_reader, node, source, queue,
self.app.logger.thread_locals)
source = node = None
while True:
chunk = queue.get(timeout=self.app.node_timeout)
if isinstance(chunk, bool): # terminator
success = chunk
if not success:
raise Exception(_('Failed to read all data'
' from the source'))
break
yield chunk
except Empty:
raise ChunkReadTimeout()
except (GeneratorExit, Timeout):
self.app.logger.warn(_('Client disconnected on read'))
except Exception:
self.app.logger.exception(_('Trying to send to client'))
raise
def close_swift_conn(self, src):
try:
src.swift_conn.close()
except Exception:
pass
src.swift_conn = None
try:
while src.read(self.app.object_chunk_size):
pass
except Exception:
pass
try:
src.close()
except Exception:
pass
def GETorHEAD_base(self, req, server_type, partition, nodes, path,
attempts):
"""
Base handler for HTTP GET or HEAD requests.
:param req: swob.Request object
:param server_type: server type
:param partition: partition
:param nodes: nodes
:param path: path for the request
:param attempts: number of attempts to try
:returns: swob.Response object
"""
statuses = []
reasons = []
bodies = []
sources = []
newest = req.headers.get('x-newest', 'f').lower() in TRUE_VALUES
nodes = iter(nodes)
while len(statuses) < attempts:
try:
node = nodes.next()
except StopIteration:
break
if self.error_limited(node):
continue
try:
with ConnectionTimeout(self.app.conn_timeout):
headers = dict(req.headers)
headers['Connection'] = 'close'
conn = http_connect(
node['ip'], node['port'], node['device'], partition,
req.method, path, headers=headers,
query_string=req.query_string)
with Timeout(self.app.node_timeout):
possible_source = conn.getresponse()
# See NOTE: swift_conn at top of file about this.
possible_source.swift_conn = conn
except (Exception, Timeout):
self.exception_occurred(
node, server_type, _('Trying to %(method)s %(path)s') %
{'method': req.method, 'path': req.path})
continue
if is_success(possible_source.status) or \
is_redirection(possible_source.status):
# 404 if we know we don't have a synced copy
if not float(possible_source.getheader('X-PUT-Timestamp', 1)):
statuses.append(HTTP_NOT_FOUND)
reasons.append('')
bodies.append('')
self.close_swift_conn(possible_source)
else:
statuses.append(possible_source.status)
reasons.append(possible_source.reason)
bodies.append('')
sources.append(possible_source)
if not newest: # one good source is enough
break
else:
statuses.append(possible_source.status)
reasons.append(possible_source.reason)
bodies.append(possible_source.read())
if possible_source.status == HTTP_INSUFFICIENT_STORAGE:
self.error_limit(node)
elif is_server_error(possible_source.status):
self.error_occurred(node, _('ERROR %(status)d %(body)s '
'From %(type)s Server') %
{'status': possible_source.status,
'body': bodies[-1][:1024],
'type': server_type})
if sources:
sources.sort(key=source_key)
source = sources.pop()
for src in sources:
self.close_swift_conn(src)
res = Response(request=req, conditional_response=True)
if req.method == 'GET' and \
source.status in (HTTP_OK, HTTP_PARTIAL_CONTENT):
res.app_iter = self._make_app_iter(node, source)
# See NOTE: swift_conn at top of file about this.
res.swift_conn = source.swift_conn
res.status = source.status
update_headers(res, source.getheaders())
if not res.environ:
res.environ = {}
res.environ['swift_x_timestamp'] = \
source.getheader('x-timestamp')
res.accept_ranges = 'bytes'
res.content_length = source.getheader('Content-Length')
if source.getheader('Content-Type'):
res.charset = None
res.content_type = source.getheader('Content-Type')
return res
return self.best_response(req, statuses, reasons, bodies,
'%s %s' % (server_type, req.method))
|
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import math
import numpy
import warnings
from . import _ni_support
from . import _nd_image
from ._ni_docstrings import docdict
from scipy._lib import doccer
# Change the default 'reflect' to 'constant' via modifying a copy of docdict
docdict_copy = docdict.copy()
del docdict
docdict_copy['mode'] = docdict_copy['mode'].replace("Default is 'reflect'",
"Default is 'constant'")
docfiller = doccer.filldoc(docdict_copy)
__all__ = ['spline_filter1d', 'spline_filter', 'geometric_transform',
'map_coordinates', 'affine_transform', 'shift', 'zoom', 'rotate']
@docfiller
def spline_filter1d(input, order=3, axis=-1, output=numpy.float64,
mode='mirror'):
"""
Calculate a one-dimensional spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
Parameters
----------
%(input)s
order : int, optional
The order of the spline, default is 3.
axis : int, optional
The axis along which the spline filter is applied. Default is the last
axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array. Default is ``numpy.float64``.
%(mode)s
Returns
-------
spline_filter1d : ndarray
The filtered input.
Notes
-----
All functions in `ndimage.interpolation` do spline interpolation of
the input image. If using b-splines of `order > 1`, the input image
values have to be converted to b-spline coefficients first, which is
done by applying this one-dimensional filter sequentially along all
axes of the input. All functions that require b-spline coefficients
will automatically filter their inputs, a behavior controllable with
the `prefilter` keyword argument. For functions that accept a `mode`
parameter, the result will only be correct if it matches the `mode`
used when filtering.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output = _ni_support._get_output(output, input)
if order in [0, 1]:
output[...] = numpy.array(input)
else:
mode = _ni_support._extend_mode_to_code(mode)
axis = _ni_support._check_axis(axis, input.ndim)
_nd_image.spline_filter1d(input, order, axis, output, mode)
return output
def spline_filter(input, order=3, output=numpy.float64, mode='mirror'):
"""
Multi-dimensional spline filter.
For more details, see `spline_filter1d`.
See Also
--------
spline_filter1d
Notes
-----
The multi-dimensional filter is implemented as a sequence of
one-dimensional spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
if order < 2 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output = _ni_support._get_output(output, input)
if order not in [0, 1] and input.ndim > 0:
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output=output, mode=mode)
input = output
else:
output[...] = input[...]
return output
@docfiller
def geometric_transform(input, mapping, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True,
extra_arguments=(), extra_keywords={}):
"""
Apply an arbitrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
Parameters
----------
%(input)s
mapping : {callable, scipy.LowLevelCallable}
A callable object that accepts a tuple of length equal to the output
array rank, and returns the corresponding input coordinates as a tuple
of length equal to the input array rank.
output_shape : tuple of ints, optional
Shape tuple.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode)s
%(cval)s
%(prefilter)s
extra_arguments : tuple, optional
Extra arguments passed to `mapping`.
extra_keywords : dict, optional
Extra keywords passed to `mapping`.
Returns
-------
output : ndarray
The filtered input.
See Also
--------
map_coordinates, affine_transform, spline_filter1d
Notes
-----
This function also accepts low-level callback functions with one
the following signatures and wrapped in `scipy.LowLevelCallable`:
.. code:: c
int mapping(npy_intp *output_coordinates, double *input_coordinates,
int output_rank, int input_rank, void *user_data)
int mapping(intptr_t *output_coordinates, double *input_coordinates,
int output_rank, int input_rank, void *user_data)
The calling function iterates over the elements of the output array,
calling the callback function at each element. The coordinates of the
current output element are passed through ``output_coordinates``. The
callback function must return the coordinates at which the input must
be interpolated in ``input_coordinates``. The rank of the input and
output arrays are given by ``input_rank`` and ``output_rank``
respectively. ``user_data`` is the data pointer provided
to `scipy.LowLevelCallable` as-is.
The callback function must return an integer error status that is zero
if something went wrong and one otherwise. If an error occurs, you should
normally set the python error status with an informative message
before returning, otherwise a default error message is set by the
calling function.
In addition, some other low-level function pointer specifications
are accepted, but these are for backward compatibility only and should
not be used in new code.
Examples
--------
>>> import numpy as np
>>> from scipy.ndimage import geometric_transform
>>> a = np.arange(12.).reshape((4, 3))
>>> def shift_func(output_coords):
... return (output_coords[0] - 0.5, output_coords[1] - 0.5)
...
>>> geometric_transform(a, shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.362, 2.738],
[ 0. , 4.812, 6.187],
[ 0. , 8.263, 9.637]])
>>> b = [1, 2, 3, 4, 5]
>>> def shift_func(output_coords):
... return (output_coords[0] - 3,)
...
>>> geometric_transform(b, shift_func, mode='constant')
array([0, 0, 0, 1, 2])
>>> geometric_transform(b, shift_func, mode='nearest')
array([1, 1, 1, 1, 2])
>>> geometric_transform(b, shift_func, mode='reflect')
array([3, 2, 1, 1, 2])
>>> geometric_transform(b, shift_func, mode='wrap')
array([2, 3, 4, 1, 2])
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _ni_support._extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
output = _ni_support._get_output(output, input, shape=output_shape)
_nd_image.geometric_transform(filtered, mapping, None, None, None, output,
order, mode, cval, extra_arguments,
extra_keywords)
return output
@docfiller
def map_coordinates(input, coordinates, output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
The shape of the output is derived from that of the coordinate
array by dropping the first axis. The values of the array along
the first axis are the coordinates in the input array at which the
output value is found.
Parameters
----------
%(input)s
coordinates : array_like
The coordinates at which `input` is evaluated.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode)s
%(cval)s
%(prefilter)s
Returns
-------
map_coordinates : ndarray
The result of transforming the input. The shape of the output is
derived from that of `coordinates` by dropping the first axis.
See Also
--------
spline_filter, geometric_transform, scipy.interpolate
Examples
--------
>>> from scipy import ndimage
>>> a = np.arange(12.).reshape((4, 3))
>>> a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
>>> ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
array([ 2., 7.])
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
>>> ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
>>> ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
>>> ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool)
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
coordinates = numpy.asarray(coordinates)
if numpy.iscomplexobj(coordinates):
raise TypeError('Complex type not supported')
output_shape = coordinates.shape[1:]
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
if coordinates.shape[0] != input.ndim:
raise RuntimeError('invalid shape for coordinate array')
mode = _ni_support._extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
output = _ni_support._get_output(output, input,
shape=output_shape)
_nd_image.geometric_transform(filtered, None, coordinates, None, None,
output, order, mode, cval, None, None)
return output
@docfiller
def affine_transform(input, matrix, offset=0.0, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Apply an affine transformation.
Given an output image pixel index vector ``o``, the pixel value
is determined from the input image at position
``np.dot(matrix, o) + offset``.
Parameters
----------
%(input)s
matrix : ndarray
The inverse coordinate transformation matrix, mapping output
coordinates to input coordinates. If ``ndim`` is the number of
dimensions of ``input``, the given matrix must have one of the
following shapes:
- ``(ndim, ndim)``: the linear transformation matrix for each
output coordinate.
- ``(ndim,)``: assume that the 2D transformation matrix is
diagonal, with the diagonal specified by the given value. A more
efficient algorithm is then used that exploits the separability
of the problem.
- ``(ndim + 1, ndim + 1)``: assume that the transformation is
specified using homogeneous coordinates [1]_. In this case, any
value passed to ``offset`` is ignored.
- ``(ndim, ndim + 1)``: as above, but the bottom row of a
homogeneous transformation matrix is always ``[0, 0, ..., 1]``,
and may be omitted.
offset : float or sequence, optional
The offset into the array where the transform is applied. If a float,
`offset` is the same for each axis. If a sequence, `offset` should
contain one value for each axis.
output_shape : tuple of ints, optional
Shape tuple.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode)s
%(cval)s
%(prefilter)s
Returns
-------
affine_transform : ndarray
The transformed input.
Notes
-----
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode.
.. versionchanged:: 0.18.0
Previously, the exact interpretation of the affine transformation
depended on whether the matrix was supplied as a one-dimensional or
two-dimensional array. If a one-dimensional array was supplied
to the matrix parameter, the output pixel value at index ``o``
was determined from the input image at position
``matrix * (o + offset)``.
References
----------
.. [1] https://en.wikipedia.org/wiki/Homogeneous_coordinates
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _ni_support._extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
output = _ni_support._get_output(output, input,
shape=output_shape)
matrix = numpy.asarray(matrix, dtype=numpy.float64)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError('no proper affine matrix provided')
if (matrix.ndim == 2 and matrix.shape[1] == input.ndim + 1 and
(matrix.shape[0] in [input.ndim, input.ndim + 1])):
if matrix.shape[0] == input.ndim + 1:
exptd = [0] * input.ndim + [1]
if not numpy.all(matrix[input.ndim] == exptd):
msg = ('Expected homogeneous transformation matrix with '
'shape %s for image shape %s, but bottom row was '
'not equal to %s' % (matrix.shape, input.shape, exptd))
raise ValueError(msg)
# assume input is homogeneous coordinate transformation matrix
offset = matrix[:input.ndim, input.ndim]
matrix = matrix[:input.ndim, :input.ndim]
if matrix.shape[0] != input.ndim:
raise RuntimeError('affine matrix has wrong number of rows')
if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
raise RuntimeError('affine matrix has wrong number of columns')
if not matrix.flags.contiguous:
matrix = matrix.copy()
offset = _ni_support._normalize_sequence(offset, input.ndim)
offset = numpy.asarray(offset, dtype=numpy.float64)
if offset.ndim != 1 or offset.shape[0] < 1:
raise RuntimeError('no proper offset provided')
if not offset.flags.contiguous:
offset = offset.copy()
if matrix.ndim == 1:
warnings.warn(
"The behaviour of affine_transform with a one-dimensional "
"array supplied for the matrix parameter has changed in "
"scipy 0.18.0."
)
_nd_image.zoom_shift(filtered, matrix, offset/matrix, output, order,
mode, cval)
else:
_nd_image.geometric_transform(filtered, None, None, matrix, offset,
output, order, mode, cval, None, None)
return output
@docfiller
def shift(input, shift, output=None, order=3, mode='constant', cval=0.0,
prefilter=True):
"""
Shift an array.
The array is shifted using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode.
Parameters
----------
%(input)s
shift : float or sequence
The shift along the axes. If a float, `shift` is the same for each
axis. If a sequence, `shift` should contain one value for each axis.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode)s
%(cval)s
%(prefilter)s
Returns
-------
shift : ndarray
The shifted input.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _ni_support._extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
output = _ni_support._get_output(output, input)
shift = _ni_support._normalize_sequence(shift, input.ndim)
shift = [-ii for ii in shift]
shift = numpy.asarray(shift, dtype=numpy.float64)
if not shift.flags.contiguous:
shift = shift.copy()
_nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval)
return output
@docfiller
def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0,
prefilter=True):
"""
Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Parameters
----------
%(input)s
zoom : float or sequence
The zoom factor along the axes. If a float, `zoom` is the same for each
axis. If a sequence, `zoom` should contain one value for each axis.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode)s
%(cval)s
%(prefilter)s
Returns
-------
zoom : ndarray
The zoomed input.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.zoom(ascent, 3.0)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
>>> print(ascent.shape)
(512, 512)
>>> print(result.shape)
(1536, 1536)
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _ni_support._extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
zoom = _ni_support._normalize_sequence(zoom, input.ndim)
output_shape = tuple(
[int(round(ii * jj)) for ii, jj in zip(input.shape, zoom)])
output_shape_old = tuple(
[int(ii * jj) for ii, jj in zip(input.shape, zoom)])
if output_shape != output_shape_old:
warnings.warn(
"From scipy 0.13.0, the output shape of zoom() is calculated "
"with round() instead of int() - for these inputs the size of "
"the returned array has changed.", UserWarning)
zoom_div = numpy.array(output_shape, float) - 1
# Zooming to infinite values is unpredictable, so just choose
# zoom factor 1 instead
zoom = numpy.divide(numpy.array(input.shape) - 1, zoom_div,
out=numpy.ones_like(input.shape, dtype=numpy.float64),
where=zoom_div != 0)
output = _ni_support._get_output(output, input,
shape=output_shape)
zoom = numpy.ascontiguousarray(zoom)
_nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval)
return output
def _minmax(coor, minc, maxc):
if coor[0] < minc[0]:
minc[0] = coor[0]
if coor[0] > maxc[0]:
maxc[0] = coor[0]
if coor[1] < minc[1]:
minc[1] = coor[1]
if coor[1] > maxc[1]:
maxc[1] = coor[1]
return minc, maxc
@docfiller
def rotate(input, angle, axes=(1, 0), reshape=True, output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Rotate an array.
The array is rotated in the plane defined by the two axes given by the
`axes` parameter using spline interpolation of the requested order.
Parameters
----------
%(input)s
angle : float
The rotation angle in degrees.
axes : tuple of 2 ints, optional
The two axes that define the plane of rotation. Default is the first
two axes.
reshape : bool, optional
If `reshape` is true, the output shape is adapted so that the input
array is contained completely in the output. Default is True.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode)s
%(cval)s
%(prefilter)s
Returns
-------
rotate : ndarray
The rotated input.
"""
input = numpy.asarray(input)
axes = list(axes)
rank = input.ndim
if axes[0] < 0:
axes[0] += rank
if axes[1] < 0:
axes[1] += rank
if axes[0] < 0 or axes[1] < 0 or axes[0] > rank or axes[1] > rank:
raise RuntimeError('invalid rotation plane specified')
if axes[0] > axes[1]:
axes = axes[1], axes[0]
angle = numpy.pi / 180 * angle
m11 = math.cos(angle)
m12 = math.sin(angle)
m21 = -math.sin(angle)
m22 = math.cos(angle)
matrix = numpy.array([[m11, m12],
[m21, m22]], dtype=numpy.float64)
iy = input.shape[axes[0]]
ix = input.shape[axes[1]]
if reshape:
mtrx = numpy.array([[m11, -m21],
[-m12, m22]], dtype=numpy.float64)
minc = [0, 0]
maxc = [0, 0]
coor = numpy.dot(mtrx, [0, ix])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, 0])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, ix])
minc, maxc = _minmax(coor, minc, maxc)
oy = int(maxc[0] - minc[0] + 0.5)
ox = int(maxc[1] - minc[1] + 0.5)
else:
oy = input.shape[axes[0]]
ox = input.shape[axes[1]]
offset = numpy.zeros((2,), dtype=numpy.float64)
offset[0] = float(oy) / 2.0 - 0.5
offset[1] = float(ox) / 2.0 - 0.5
offset = numpy.dot(matrix, offset)
tmp = numpy.zeros((2,), dtype=numpy.float64)
tmp[0] = float(iy) / 2.0 - 0.5
tmp[1] = float(ix) / 2.0 - 0.5
offset = tmp - offset
output_shape = list(input.shape)
output_shape[axes[0]] = oy
output_shape[axes[1]] = ox
output_shape = tuple(output_shape)
output = _ni_support._get_output(output, input,
shape=output_shape)
if input.ndim <= 2:
affine_transform(input, matrix, offset, output_shape, output,
order, mode, cval, prefilter)
else:
coordinates = []
size = numpy.product(input.shape, axis=0)
size //= input.shape[axes[0]]
size //= input.shape[axes[1]]
for ii in range(input.ndim):
if ii not in axes:
coordinates.append(0)
else:
coordinates.append(slice(None, None, None))
iter_axes = list(range(input.ndim))
iter_axes.reverse()
iter_axes.remove(axes[0])
iter_axes.remove(axes[1])
os = (output_shape[axes[0]], output_shape[axes[1]])
for ii in range(size):
ia = input[tuple(coordinates)]
oa = output[tuple(coordinates)]
affine_transform(ia, matrix, offset, os, oa, order, mode,
cval, prefilter)
for jj in iter_axes:
if coordinates[jj] < input.shape[jj] - 1:
coordinates[jj] += 1
break
else:
coordinates[jj] = 0
return output
|
|
#!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Light weight EcmaScript state tracker that reads tokens and tracks state."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
import re
import javascripttokenizer
import javascripttokens
import tokenutil
# Shorthand
Type = javascripttokens.JavaScriptTokenType
class DocFlag(object):
"""Generic doc flag object.
Attribute:
flag_type: param, return, define, type, etc.
flag_token: The flag token.
type_start_token: The first token specifying the flag type,
including braces.
type_end_token: The last token specifying the flag type,
including braces.
type: The type spec.
name_token: The token specifying the flag name.
name: The flag name
description_start_token: The first token in the description.
description_end_token: The end token in the description.
description: The description.
"""
# Please keep these lists alphabetized.
# The list of standard jsdoc tags is from
STANDARD_DOC = frozenset([
'author',
'bug',
'classTemplate',
'consistentIdGenerator',
'const',
'constructor',
'define',
'deprecated',
'dict',
'enum',
'export',
'expose',
'extends',
'externs',
'fileoverview',
'idGenerator',
'implements',
'implicitCast',
'interface',
'lends',
'license',
'ngInject', # This annotation is specific to AngularJS.
'noalias',
'nocompile',
'nosideeffects',
'override',
'owner',
'param',
'preserve',
'private',
'protected',
'public',
'return',
'see',
'stableIdGenerator',
'struct',
'supported',
'template',
'this',
'type',
'typedef',
'wizaction', # This annotation is specific to Wiz.
'wizmodule', # This annotation is specific to Wiz.
])
ANNOTATION = frozenset(['preserveTry', 'suppress'])
LEGAL_DOC = STANDARD_DOC | ANNOTATION
# Includes all Closure Compiler @suppress types.
# Not all of these annotations are interpreted by Closure Linter.
#
# Specific cases:
# - accessControls is supported by the compiler at the expression
# and method level to suppress warnings about private/protected
# access (method level applies to all references in the method).
# The linter mimics the compiler behavior.
SUPPRESS_TYPES = frozenset([
'accessControls',
'ambiguousFunctionDecl',
'checkRegExp',
'checkStructDictInheritance',
'checkTypes',
'checkVars',
'const',
'constantProperty',
'deprecated',
'duplicate',
'es5Strict',
'externsValidation',
'extraProvide',
'extraRequire',
'fileoverviewTags',
'globalThis',
'internetExplorerChecks',
'invalidCasts',
'missingProperties',
'missingProvide',
'missingRequire',
'missingReturn',
'nonStandardJsDocs',
'strictModuleDepCheck',
'tweakValidation',
'typeInvalidation',
'undefinedNames',
'undefinedVars',
'underscore',
'unknownDefines',
'unusedPrivateMembers',
'uselessCode',
'visibility',
'with'])
HAS_DESCRIPTION = frozenset([
'define', 'deprecated', 'desc', 'fileoverview', 'license', 'param',
'preserve', 'return', 'supported'])
HAS_TYPE = frozenset([
'define', 'enum', 'extends', 'implements', 'param', 'return', 'type',
'suppress', 'const'])
TYPE_ONLY = frozenset(['enum', 'extends', 'implements', 'suppress', 'type',
'const'])
HAS_NAME = frozenset(['param'])
EMPTY_COMMENT_LINE = re.compile(r'^\s*\*?\s*$')
EMPTY_STRING = re.compile(r'^\s*$')
def __init__(self, flag_token):
"""Creates the DocFlag object and attaches it to the given start token.
Args:
flag_token: The starting token of the flag.
"""
self.flag_token = flag_token
self.flag_type = flag_token.string.strip().lstrip('@')
# Extract type, if applicable.
self.type = None
self.type_start_token = None
self.type_end_token = None
if self.flag_type in self.HAS_TYPE:
brace = tokenutil.SearchUntil(flag_token, [Type.DOC_START_BRACE],
Type.FLAG_ENDING_TYPES)
if brace:
end_token, contents = _GetMatchingEndBraceAndContents(brace)
self.type = contents
self.type_start_token = brace
self.type_end_token = end_token
elif (self.flag_type in self.TYPE_ONLY and
flag_token.next.type not in Type.FLAG_ENDING_TYPES and
flag_token.line_number == flag_token.next.line_number):
# b/10407058. If the flag is expected to be followed by a type then
# search for type in same line only. If no token after flag in same
# line then conclude that no type is specified.
self.type_start_token = flag_token.next
self.type_end_token, self.type = _GetEndTokenAndContents(
self.type_start_token)
if self.type is not None:
self.type = self.type.strip()
# Extract name, if applicable.
self.name_token = None
self.name = None
if self.flag_type in self.HAS_NAME:
# Handle bad case, name could be immediately after flag token.
self.name_token = _GetNextPartialIdentifierToken(flag_token)
# Handle good case, if found token is after type start, look for
# a identifier (substring to cover cases like [cnt] b/4197272) after
# type end, since types contain identifiers.
if (self.type and self.name_token and
tokenutil.Compare(self.name_token,
self.type_start_token) > 0):
self.name_token = _GetNextPartialIdentifierToken(
self.type_end_token)
if self.name_token:
self.name = self.name_token.string
# Extract description, if applicable.
self.description_start_token = None
self.description_end_token = None
self.description = None
if self.flag_type in self.HAS_DESCRIPTION:
search_start_token = flag_token
if self.name_token and self.type_end_token:
if tokenutil.Compare(self.type_end_token, self.name_token) > 0:
search_start_token = self.type_end_token
else:
search_start_token = self.name_token
elif self.name_token:
search_start_token = self.name_token
elif self.type:
search_start_token = self.type_end_token
interesting_token = tokenutil.Search(search_start_token,
Type.FLAG_DESCRIPTION_TYPES | Type.FLAG_ENDING_TYPES)
if interesting_token.type in Type.FLAG_DESCRIPTION_TYPES:
self.description_start_token = interesting_token
self.description_end_token, self.description = (
_GetEndTokenAndContents(interesting_token))
class DocComment(object):
"""JavaScript doc comment object.
Attributes:
ordered_params: Ordered list of parameters documented.
start_token: The token that starts the doc comment.
end_token: The token that ends the doc comment.
suppressions: Map of suppression type to the token that added it.
"""
def __init__(self, start_token):
"""Create the doc comment object.
Args:
start_token: The first token in the doc comment.
"""
self.__flags = []
self.start_token = start_token
self.end_token = None
self.suppressions = {}
self.invalidated = False
@property
def ordered_params(self):
"""Gives the list of parameter names as a list of strings."""
params = []
for flag in self.__flags:
if flag.flag_type == 'param' and flag.name:
params.append(flag.name)
return params
def Invalidate(self):
"""Indicate that the JSDoc is well-formed but we had problems parsing it.
This is a short-circuiting mechanism so that we don't emit false
positives about well-formed doc comments just because we don't support
hot new syntaxes.
"""
self.invalidated = True
def IsInvalidated(self):
"""Test whether Invalidate() has been called."""
return self.invalidated
def AddSuppression(self, token):
"""Add a new error suppression flag.
Args:
token: The suppression flag token.
"""
#TODO(user): Error if no braces
brace = tokenutil.SearchUntil(token, [Type.DOC_START_BRACE],
[Type.DOC_FLAG])
if brace:
end_token, contents = _GetMatchingEndBraceAndContents(brace)
for suppression in contents.split('|'):
self.suppressions[suppression] = token
def SuppressionOnly(self):
"""Returns whether this comment contains only suppression flags."""
if not self.__flags:
return False
for flag in self.__flags:
if flag.flag_type != 'suppress':
return False
return True
def AddFlag(self, flag):
"""Add a new document flag.
Args:
flag: DocFlag object.
"""
self.__flags.append(flag)
def InheritsDocumentation(self):
"""Test if the jsdoc implies documentation inheritance.
Returns:
True if documentation may be pulled off the superclass.
"""
return self.HasFlag('inheritDoc') or self.HasFlag('override')
def HasFlag(self, flag_type):
"""Test if the given flag has been set.
Args:
flag_type: The type of the flag to check.
Returns:
True if the flag is set.
"""
for flag in self.__flags:
if flag.flag_type == flag_type:
return True
return False
def GetFlag(self, flag_type):
"""Gets the last flag of the given type.
Args:
flag_type: The type of the flag to get.
Returns:
The last instance of the given flag type in this doc comment.
"""
for flag in reversed(self.__flags):
if flag.flag_type == flag_type:
return flag
def GetDocFlags(self):
"""Return the doc flags for this comment."""
return list(self.__flags)
def _YieldDescriptionTokens(self):
for token in self.start_token:
if (token is self.end_token or
token.type is javascripttokens.JavaScriptTokenType.DOC_FLAG or
token.type not in javascripttokens.JavaScriptTokenType.COMMENT_TYPES):
return
if token.type not in [
javascripttokens.JavaScriptTokenType.START_DOC_COMMENT,
javascripttokens.JavaScriptTokenType.END_DOC_COMMENT,
javascripttokens.JavaScriptTokenType.DOC_PREFIX]:
yield token
@property
def description(self):
return tokenutil.TokensToString(
self._YieldDescriptionTokens())
def GetTargetIdentifier(self):
"""Returns the identifier (as a string) that this is a comment for.
Note that this uses method uses GetIdentifierForToken to get the full
identifier, even if broken up by whitespace, newlines, or comments,
and thus could be longer than GetTargetToken().string.
Returns:
The identifier for the token this comment is for.
"""
token = self.GetTargetToken()
if token:
return tokenutil.GetIdentifierForToken(token)
def GetTargetToken(self):
"""Get this comment's target token.
Returns:
The token that is the target of this comment, or None if there isn't one.
"""
# File overviews describe the file, not a token.
if self.HasFlag('fileoverview'):
return
skip_types = frozenset([
Type.WHITESPACE,
Type.BLANK_LINE,
Type.START_PAREN])
target_types = frozenset([
Type.FUNCTION_NAME,
Type.IDENTIFIER,
Type.SIMPLE_LVALUE])
token = self.end_token.next
while token:
if token.type in target_types:
return token
# Handles the case of a comment on "var foo = ...'
if token.IsKeyword('var'):
next_code_token = tokenutil.CustomSearch(
token,
lambda t: t.type not in Type.NON_CODE_TYPES)
if (next_code_token and
next_code_token.IsType(Type.SIMPLE_LVALUE)):
return next_code_token
return
# Handles the case of a comment on "function foo () {}"
if token.type is Type.FUNCTION_DECLARATION:
next_code_token = tokenutil.CustomSearch(
token,
lambda t: t.type not in Type.NON_CODE_TYPES)
if next_code_token.IsType(Type.FUNCTION_NAME):
return next_code_token
return
# Skip types will end the search.
if token.type not in skip_types:
return
token = token.next
def CompareParameters(self, params):
"""Computes the edit distance and list from the function params to the docs.
Uses the Levenshtein edit distance algorithm, with code modified from
http://en.wikibooks.org/wiki/Algorithm_implementation/Strings/Levenshtein_distance#Python
Args:
params: The parameter list for the function declaration.
Returns:
The edit distance, the edit list.
"""
source_len, target_len = len(self.ordered_params), len(params)
edit_lists = [[]]
distance = [[]]
for i in range(target_len + 1):
edit_lists[0].append(['I'] * i)
distance[0].append(i)
for j in range(1, source_len + 1):
edit_lists.append([['D'] * j])
distance.append([j])
for i in range(source_len):
for j in range(target_len):
cost = 1
if self.ordered_params[i] == params[j]:
cost = 0
deletion = distance[i][j + 1] + 1
insertion = distance[i + 1][j] + 1
substitution = distance[i][j] + cost
edit_list = None
best = None
if deletion <= insertion and deletion <= substitution:
# Deletion is best.
best = deletion
edit_list = list(edit_lists[i][j + 1])
edit_list.append('D')
elif insertion <= substitution:
# Insertion is best.
best = insertion
edit_list = list(edit_lists[i + 1][j])
edit_list.append('I')
edit_lists[i + 1].append(edit_list)
else:
# Substitution is best.
best = substitution
edit_list = list(edit_lists[i][j])
if cost:
edit_list.append('S')
else:
edit_list.append('=')
edit_lists[i + 1].append(edit_list)
distance[i + 1].append(best)
return distance[source_len][target_len], edit_lists[source_len][
target_len]
def __repr__(self):
"""Returns a string representation of this object.
Returns:
A string representation of this object.
"""
return '<DocComment: %s, %s>' % (
str(self.ordered_params), str(self.__flags))
#
# Helper methods used by DocFlag and DocComment to parse out flag information.
#
def _GetMatchingEndBraceAndContents(start_brace):
"""Returns the matching end brace and contents between the two braces.
If any FLAG_ENDING_TYPE token is encountered before a matching end brace, then
that token is used as the matching ending token. Contents will have all
comment prefixes stripped out of them, and all comment prefixes in between the
start and end tokens will be split out into separate DOC_PREFIX tokens.
Args:
start_brace: The DOC_START_BRACE token immediately before desired contents.
Returns:
The matching ending token (DOC_END_BRACE or FLAG_ENDING_TYPE) and a string
of the contents between the matching tokens, minus any comment prefixes.
"""
open_count = 1
close_count = 0
contents = []
# We don't consider the start brace part of the type string.
token = start_brace.next
while open_count != close_count:
if token.type == Type.DOC_START_BRACE:
open_count += 1
elif token.type == Type.DOC_END_BRACE:
close_count += 1
if token.type != Type.DOC_PREFIX:
contents.append(token.string)
if token.type in Type.FLAG_ENDING_TYPES:
break
token = token.next
#Don't include the end token (end brace, end doc comment, etc.) in type.
token = token.previous
contents = contents[:-1]
return token, ''.join(contents)
def _GetNextPartialIdentifierToken(start_token):
"""Returns the first token having identifier as substring after a token.
Searches each token after the start to see if it contains an identifier.
If found, token is returned. If no identifier is found returns None.
Search is abandoned when a FLAG_ENDING_TYPE token is found.
Args:
start_token: The token to start searching after.
Returns:
The token found containing identifier, None otherwise.
"""
token = start_token.next
while token and token.type not in Type.FLAG_ENDING_TYPES:
match = javascripttokenizer.JavaScriptTokenizer.IDENTIFIER.search(
token.string)
if match is not None and token.type == Type.COMMENT:
return token
token = token.next
return None
def _GetEndTokenAndContents(start_token):
"""Returns last content token and all contents before FLAG_ENDING_TYPE token.
Comment prefixes are split into DOC_PREFIX tokens and stripped from the
returned contents.
Args:
start_token: The token immediately before the first content token.
Returns:
The last content token and a string of all contents including start and
end tokens, with comment prefixes stripped.
"""
iterator = start_token
last_line = iterator.line_number
last_token = None
contents = ''
doc_depth = 0
while not iterator.type in Type.FLAG_ENDING_TYPES or doc_depth > 0:
if (iterator.IsFirstInLine() and
DocFlag.EMPTY_COMMENT_LINE.match(iterator.line)):
# If we have a blank comment line, consider that an implicit
# ending of the description. This handles a case like:
#
# * @return {boolean} True
# *
# * Note: This is a sentence.
#
# The note is not part of the @return description, but there was
# no definitive ending token. Rather there was a line containing
# only a doc comment prefix or whitespace.
break
# b/2983692
# don't prematurely match against a @flag if inside a doc flag
# need to think about what is the correct behavior for unterminated
# inline doc flags
if (iterator.type == Type.DOC_START_BRACE and
iterator.next.type == Type.DOC_INLINE_FLAG):
doc_depth += 1
elif (iterator.type == Type.DOC_END_BRACE and
doc_depth > 0):
doc_depth -= 1
if iterator.type in Type.FLAG_DESCRIPTION_TYPES:
contents += iterator.string
last_token = iterator
iterator = iterator.next
if iterator.line_number != last_line:
contents += '\n'
last_line = iterator.line_number
end_token = last_token
if DocFlag.EMPTY_STRING.match(contents):
contents = None
else:
# Strip trailing newline.
contents = contents[:-1]
return end_token, contents
class Function(object):
"""Data about a JavaScript function.
Attributes:
block_depth: Block depth the function began at.
doc: The DocComment associated with the function.
has_return: If the function has a return value.
has_this: If the function references the 'this' object.
is_assigned: If the function is part of an assignment.
is_constructor: If the function is a constructor.
name: The name of the function, whether given in the function keyword or
as the lvalue the function is assigned to.
start_token: First token of the function (the function' keyword token).
end_token: Last token of the function (the closing '}' token).
parameters: List of parameter names.
"""
def __init__(self, block_depth, is_assigned, doc, name):
self.block_depth = block_depth
self.is_assigned = is_assigned
self.is_constructor = doc and doc.HasFlag('constructor')
self.is_interface = doc and doc.HasFlag('interface')
self.has_return = False
self.has_throw = False
self.has_this = False
self.name = name
self.doc = doc
self.start_token = None
self.end_token = None
self.parameters = None
class StateTracker(object):
"""EcmaScript state tracker.
Tracks block depth, function names, etc. within an EcmaScript token stream.
"""
OBJECT_LITERAL = 'o'
CODE = 'c'
def __init__(self, doc_flag=DocFlag):
"""Initializes a JavaScript token stream state tracker.
Args:
doc_flag: An optional custom DocFlag used for validating
documentation flags.
"""
self._doc_flag = doc_flag
self.Reset()
def Reset(self):
"""Resets the state tracker to prepare for processing a new page."""
self._block_depth = 0
self._is_block_close = False
self._paren_depth = 0
self._function_stack = []
self._functions_by_name = {}
self._last_comment = None
self._doc_comment = None
self._cumulative_params = None
self._block_types = []
self._last_non_space_token = None
self._last_line = None
self._first_token = None
self._documented_identifiers = set()
self._variables_in_scope = []
def InFunction(self):
"""Returns true if the current token is within a function.
Returns:
True if the current token is within a function.
"""
return bool(self._function_stack)
def InConstructor(self):
"""Returns true if the current token is within a constructor.
Returns:
True if the current token is within a constructor.
"""
return self.InFunction() and self._function_stack[-1].is_constructor
def InInterfaceMethod(self):
"""Returns true if the current token is within an interface method.
Returns:
True if the current token is within an interface method.
"""
if self.InFunction():
if self._function_stack[-1].is_interface:
return True
else:
name = self._function_stack[-1].name
prototype_index = name.find('.prototype.')
if prototype_index != -1:
class_function_name = name[0:prototype_index]
if (class_function_name in self._functions_by_name and
self._functions_by_name[
class_function_name].is_interface):
return True
return False
def InTopLevelFunction(self):
"""Returns true if the current token is within a top level function.
Returns:
True if the current token is within a top level function.
"""
return len(self._function_stack) == 1 and self.InTopLevel()
def InAssignedFunction(self):
"""Returns true if the current token is within a function variable.
Returns:
True if if the current token is within a function variable
"""
return self.InFunction() and self._function_stack[-1].is_assigned
def IsFunctionOpen(self):
"""Returns true if the current token is a function block open.
Returns:
True if the current token is a function block open.
"""
return (self._function_stack and
self._function_stack[-1].block_depth == self._block_depth - 1)
def IsFunctionClose(self):
"""Returns true if the current token is a function block close.
Returns:
True if the current token is a function block close.
"""
return (self._function_stack and
self._function_stack[-1].block_depth == self._block_depth)
def InBlock(self):
"""Returns true if the current token is within a block.
Returns:
True if the current token is within a block.
"""
return bool(self._block_depth)
def IsBlockClose(self):
"""Returns true if the current token is a block close.
Returns:
True if the current token is a block close.
"""
return self._is_block_close
def InObjectLiteral(self):
"""Returns true if the current token is within an object literal.
Returns:
True if the current token is within an object literal.
"""
return self._block_depth and self._block_types[
-1] == self.OBJECT_LITERAL
def InObjectLiteralDescendant(self):
"""Returns true if the current token has an object literal ancestor.
Returns:
True if the current token has an object literal ancestor.
"""
return self.OBJECT_LITERAL in self._block_types
def InParentheses(self):
"""Returns true if the current token is within parentheses.
Returns:
True if the current token is within parentheses.
"""
return bool(self._paren_depth)
def ParenthesesDepth(self):
"""Returns the number of parens surrounding the token.
Returns:
The number of parenthesis surrounding the token.
"""
return self._paren_depth
def BlockDepth(self):
"""Returns the number of blocks in which the token is nested.
Returns:
The number of blocks in which the token is nested.
"""
return self._block_depth
def FunctionDepth(self):
"""Returns the number of functions in which the token is nested.
Returns:
The number of functions in which the token is nested.
"""
return len(self._function_stack)
def InTopLevel(self):
"""Whether we are at the top level in the class.
This function call is language specific. In some languages like
JavaScript, a function is top level if it is not inside any parenthesis.
In languages such as ActionScript, a function is top level if it is directly
within a class.
"""
raise TypeError('Abstract method InTopLevel not implemented')
def GetBlockType(self, token):
"""Determine the block type given a START_BLOCK token.
Code blocks come after parameters, keywords like else, and closing parens.
Args:
token: The current token. Can be assumed to be type START_BLOCK.
Returns:
Code block type for current token.
"""
raise TypeError('Abstract method GetBlockType not implemented')
def GetParams(self):
"""Returns the accumulated input params as an array.
In some EcmasSript languages, input params are specified like
(param:Type, param2:Type2, ...)
in other they are specified just as
(param, param2)
We handle both formats for specifying parameters here and leave
it to the compilers for each language to detect compile errors.
This allows more code to be reused between lint checkers for various
EcmaScript languages.
Returns:
The accumulated input params as an array.
"""
params = []
if self._cumulative_params:
params = re.compile(r'\s+').sub('', self._cumulative_params).split(
',')
# Strip out the type from parameters of the form name:Type.
params = map(lambda param: param.split(':')[0], params)
return params
def GetLastComment(self):
"""Return the last plain comment that could be used as documentation.
Returns:
The last plain comment that could be used as documentation.
"""
return self._last_comment
def GetDocComment(self):
"""Return the most recent applicable documentation comment.
Returns:
The last applicable documentation comment.
"""
return self._doc_comment
def HasDocComment(self, identifier):
"""Returns whether the identifier has been documented yet.
Args:
identifier: The identifier.
Returns:
Whether the identifier has been documented yet.
"""
return identifier in self._documented_identifiers
def InDocComment(self):
"""Returns whether the current token is in a doc comment.
Returns:
Whether the current token is in a doc comment.
"""
return self._doc_comment and self._doc_comment.end_token is None
def GetDocFlag(self):
"""Returns the current documentation flags.
Returns:
The current documentation flags.
"""
return self._doc_flag
def IsTypeToken(self, t):
if self.InDocComment() and t.type not in (Type.START_DOC_COMMENT,
Type.DOC_FLAG,
Type.DOC_INLINE_FLAG,
Type.DOC_PREFIX):
f = tokenutil.SearchUntil(t, [Type.DOC_FLAG],
[Type.START_DOC_COMMENT],
None, True)
if (f and f.attached_object.type_start_token is not None and
f.attached_object.type_end_token is not None):
return (
tokenutil.Compare(t, f.attached_object.type_start_token) > 0 and
tokenutil.Compare(t, f.attached_object.type_end_token) < 0)
return False
def GetFunction(self):
"""Return the function the current code block is a part of.
Returns:
The current Function object.
"""
if self._function_stack:
return self._function_stack[-1]
def GetBlockDepth(self):
"""Return the block depth.
Returns:
The current block depth.
"""
return self._block_depth
def GetLastNonSpaceToken(self):
"""Return the last non whitespace token."""
return self._last_non_space_token
def GetLastLine(self):
"""Return the last line."""
return self._last_line
def GetFirstToken(self):
"""Return the very first token in the file."""
return self._first_token
def IsVariableInScope(self, token_string):
"""Checks if string is variable in current scope.
For given string it checks whether the string is a defined variable
(including function param) in current state.
E.g. if variables defined (variables in current scope) is docs
then docs, docs.length etc will be considered as variable in current
scope. This will help in avoding extra goog.require for variables.
Args:
token_string: String to check if its is a variable in current scope.
Returns:
true if given string is a variable in current scope.
"""
for variable in self._variables_in_scope:
if (token_string == variable
or token_string.startswith(variable + '.')):
return True
return False
def HandleToken(self, token, last_non_space_token):
"""Handles the given token and updates state.
Args:
token: The token to handle.
last_non_space_token:
"""
self._is_block_close = False
if not self._first_token:
self._first_token = token
# Track block depth.
type = token.type
if type == Type.START_BLOCK:
self._block_depth += 1
# Subclasses need to handle block start very differently because
# whether a block is a CODE or OBJECT_LITERAL block varies significantly
# by language.
self._block_types.append(self.GetBlockType(token))
# When entering a function body, record its parameters.
if self.InFunction():
function = self._function_stack[-1]
if self._block_depth == function.block_depth + 1:
function.parameters = self.GetParams()
# Track block depth.
elif type == Type.END_BLOCK:
self._is_block_close = not self.InObjectLiteral()
self._block_depth -= 1
self._block_types.pop()
# Track parentheses depth.
elif type == Type.START_PAREN:
self._paren_depth += 1
# Track parentheses depth.
elif type == Type.END_PAREN:
self._paren_depth -= 1
elif type == Type.COMMENT:
self._last_comment = token.string
elif type == Type.START_DOC_COMMENT:
self._last_comment = None
self._doc_comment = DocComment(token)
elif type == Type.END_DOC_COMMENT:
self._doc_comment.end_token = token
elif type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):
flag = self._doc_flag(token)
token.attached_object = flag
self._doc_comment.AddFlag(flag)
if flag.flag_type == 'suppress':
self._doc_comment.AddSuppression(token)
elif type == Type.FUNCTION_DECLARATION:
last_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, None,
True)
doc = None
# Only functions outside of parens are eligible for documentation.
if not self._paren_depth:
doc = self._doc_comment
name = ''
is_assigned = last_code and (last_code.IsOperator('=') or
last_code.IsOperator(
'||') or last_code.IsOperator(
'&&') or
(last_code.IsOperator(
':') and not self.InObjectLiteral()))
if is_assigned:
# TODO(robbyw): This breaks for x[2] = ...
# Must use loop to find full function name in the case of line-wrapped
# declarations (bug 1220601) like:
# my.function.foo.
# bar = function() ...
identifier = tokenutil.Search(last_code, Type.SIMPLE_LVALUE,
None, True)
while identifier and identifier.type in (
Type.IDENTIFIER, Type.SIMPLE_LVALUE):
name = identifier.string + name
# Traverse behind us, skipping whitespace and comments.
while True:
identifier = identifier.previous
if not identifier or not identifier.type in Type.NON_CODE_TYPES:
break
else:
next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
while next_token and next_token.IsType(Type.FUNCTION_NAME):
name += next_token.string
next_token = tokenutil.Search(next_token,
Type.FUNCTION_NAME, 2)
function = Function(self._block_depth, is_assigned, doc, name)
function.start_token = token
self._function_stack.append(function)
self._functions_by_name[name] = function
# Add a delimiter in stack for scope variables to define start of
# function. This helps in popping variables of this function when
# function declaration ends.
self._variables_in_scope.append('')
elif type == Type.START_PARAMETERS:
self._cumulative_params = ''
elif type == Type.PARAMETERS:
self._cumulative_params += token.string
self._variables_in_scope.extend(self.GetParams())
elif type == Type.KEYWORD and token.string == 'return':
next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
if not next_token.IsType(Type.SEMICOLON):
function = self.GetFunction()
if function:
function.has_return = True
elif type == Type.KEYWORD and token.string == 'throw':
function = self.GetFunction()
if function:
function.has_throw = True
elif type == Type.KEYWORD and token.string == 'var':
function = self.GetFunction()
next_token = tokenutil.Search(token, [Type.IDENTIFIER,
Type.SIMPLE_LVALUE])
if next_token:
if next_token.type == Type.SIMPLE_LVALUE:
self._variables_in_scope.append(
next_token.values['identifier'])
else:
self._variables_in_scope.append(next_token.string)
elif type == Type.SIMPLE_LVALUE:
identifier = token.values['identifier']
jsdoc = self.GetDocComment()
if jsdoc:
self._documented_identifiers.add(identifier)
self._HandleIdentifier(identifier, True)
elif type == Type.IDENTIFIER:
self._HandleIdentifier(token.string, False)
# Detect documented non-assignments.
next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
if next_token and next_token.IsType(Type.SEMICOLON):
if (self._last_non_space_token and
self._last_non_space_token.IsType(
Type.END_DOC_COMMENT)):
self._documented_identifiers.add(token.string)
def _HandleIdentifier(self, identifier, is_assignment):
"""Process the given identifier.
Currently checks if it references 'this' and annotates the function
accordingly.
Args:
identifier: The identifer to process.
is_assignment: Whether the identifer is being written to.
"""
if identifier == 'this' or identifier.startswith('this.'):
function = self.GetFunction()
if function:
function.has_this = True
def HandleAfterToken(self, token):
"""Handle updating state after a token has been checked.
This function should be used for destructive state changes such as
deleting a tracked object.
Args:
token: The token to handle.
"""
type = token.type
if type == Type.SEMICOLON or type == Type.END_PAREN or (
type == Type.END_BRACKET and
self._last_non_space_token.type not in (
Type.SINGLE_QUOTE_STRING_END,
Type.DOUBLE_QUOTE_STRING_END)):
# We end on any numeric array index, but keep going for string based
# array indices so that we pick up manually exported identifiers.
self._doc_comment = None
self._last_comment = None
elif type == Type.END_BLOCK:
self._doc_comment = None
self._last_comment = None
if self.InFunction() and self.IsFunctionClose():
# TODO(robbyw): Detect the function's name for better errors.
function = self._function_stack.pop()
function.end_token = token
# Pop all variables till delimiter ('') those were defined in the
# function being closed so make them out of scope.
while self._variables_in_scope and self._variables_in_scope[-1]:
self._variables_in_scope.pop()
# Pop delimiter
if self._variables_in_scope:
self._variables_in_scope.pop()
elif type == Type.END_PARAMETERS and self._doc_comment:
self._doc_comment = None
self._last_comment = None
if not token.IsAnyType(Type.WHITESPACE, Type.BLANK_LINE):
self._last_non_space_token = token
self._last_line = token.line
|
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import contextlib
import datetime
import mock
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants
from neutron_lib import context as n_context
from neutron_lib.exceptions import l3 as l3_exc
from neutron_lib.plugins import constants as plugin_constants
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
from sqlalchemy import orm
import testscenarios
import testtools
from neutron.common import rpc as n_rpc
from neutron.db import db_base_plugin_v2 as db_v2
from neutron.db import l3_db
from neutron.db import l3_dvr_db
from neutron.db import l3_dvr_ha_scheduler_db
from neutron.db import l3_dvrscheduler_db
from neutron.db import l3_hamode_db
from neutron.db import l3_hascheduler_db
from neutron.extensions import l3agentscheduler as l3agent
from neutron import manager
from neutron.objects import agent as agent_obj
from neutron.objects import l3_hamode
from neutron.objects import l3agent as rb_obj
from neutron.scheduler import l3_agent_scheduler
from neutron.tests import base
from neutron.tests.common import helpers
from neutron.tests.unit.db import test_db_base_plugin_v2
from neutron.tests.unit.extensions import test_l3
from neutron.tests.unit import testlib_api
# the below code is required for the following reason
# (as documented in testscenarios)
"""Multiply tests depending on their 'scenarios' attribute.
This can be assigned to 'load_tests' in any test module to make this
automatically work across tests in the module.
"""
load_tests = testscenarios.load_tests_apply_scenarios
HOST_DVR = 'my_l3_host_dvr'
HOST_DVR_SNAT = 'my_l3_host_dvr_snat'
DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake'
DEVICE_OWNER_COMPUTE_NOVA = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'nova'
class FakeL3Scheduler(l3_agent_scheduler.L3Scheduler):
def schedule(self):
pass
def _choose_router_agent(self):
pass
def _choose_router_agents_for_ha(self):
pass
class FakePortDB(object):
def __init__(self, port_list):
self._port_list = port_list
def _get_query_answer(self, port_list, filters):
answers = []
for port in port_list:
matched = True
for key, search_values in filters.items():
port_value = port.get(key, None)
if not port_value:
matched = False
break
if isinstance(port_value, list):
sub_answers = self._get_query_answer(port_value,
search_values)
matched = len(sub_answers) > 0
else:
matched = port_value in search_values
if not matched:
break
if matched:
answers.append(port)
return answers
def get_port(self, context, port_id):
for port in self._port_list:
if port['id'] == port_id:
if port['tenant_id'] == context.tenant_id or context.is_admin:
return port
break
return None
def get_ports(self, context, filters=None):
query_filters = dict()
if filters:
query_filters.update(filters)
if not context.is_admin:
query_filters['tenant_id'] = [context.tenant_id]
result = self._get_query_answer(self._port_list, query_filters)
return result
class L3SchedulerBaseTestCase(base.BaseTestCase):
def setUp(self):
super(L3SchedulerBaseTestCase, self).setUp()
self.scheduler = FakeL3Scheduler()
self.plugin = mock.Mock()
def _test__get_routers_can_schedule(self, routers, agent, target_routers):
self.plugin.get_l3_agent_candidates.return_value = agent
result = self.scheduler._get_routers_can_schedule(
self.plugin, mock.ANY, routers, mock.ANY)
self.assertEqual(target_routers, result)
def test__get_routers_can_schedule_with_compat_agent(self):
routers = [{'id': 'foo_router'}]
self._test__get_routers_can_schedule(routers, mock.ANY, routers)
def test__get_routers_can_schedule_with_no_compat_agent(self):
routers = [{'id': 'foo_router'}]
self._test__get_routers_can_schedule(routers, None, [])
def test__bind_routers_centralized(self):
routers = [{'id': 'foo_router'}]
agent = agent_obj.Agent(mock.ANY, id=uuidutils.generate_uuid())
with mock.patch.object(self.scheduler, 'bind_router') as mock_bind:
self.scheduler._bind_routers(mock.ANY, mock.ANY, routers, agent)
mock_bind.assert_called_once_with(mock.ANY, mock.ANY,
'foo_router', agent.id)
def _test__bind_routers_ha(self, has_binding):
routers = [{'id': 'foo_router', 'ha': True, 'tenant_id': '42'}]
agent = agent_obj.Agent(mock.ANY, id=uuidutils.generate_uuid())
with mock.patch.object(self.scheduler,
'_router_has_binding',
return_value=has_binding) as mock_has_binding,\
mock.patch.object(self.scheduler,
'create_ha_port_and_bind') as mock_bind:
self.scheduler._bind_routers(mock.ANY, mock.ANY, routers, agent)
mock_has_binding.assert_called_once_with(mock.ANY, 'foo_router',
agent.id)
self.assertEqual(not has_binding, mock_bind.called)
def test__bind_routers_ha_has_binding(self):
self._test__bind_routers_ha(has_binding=True)
def test__bind_routers_ha_no_binding(self):
self._test__bind_routers_ha(has_binding=False)
def test__get_candidates_iterable_on_early_returns(self):
plugin = mock.MagicMock()
# non-distributed router already hosted
plugin.get_l3_agents_hosting_routers.return_value = [{'id': 'a1'}]
router = {'distributed': False, 'id': 'falafel'}
iter(self.scheduler._get_candidates(plugin, mock.MagicMock(), router))
# distributed router but no agents
router['distributed'] = True
plugin.get_l3_agents.return_value = []
iter(self.scheduler._get_candidates(plugin, mock.MagicMock(), router))
self.assertFalse(plugin.get_l3_agent_candidates.called)
def test__get_candidates_skips_get_l3_agent_candidates_if_dvr_scheduled(
self):
plugin = mock.MagicMock()
# distributed router already hosted
plugin.get_l3_agents_hosting_routers.return_value = [{'id': 'a1'}]
router = {'distributed': True, 'id': uuidutils.generate_uuid()}
plugin.get_l3_agents.return_value = ['a1']
self.scheduler._get_candidates(plugin, mock.MagicMock(), router)
self.assertFalse(plugin.get_l3_agent_candidates.called)
class L3SchedulerBaseMixin(object):
def _register_l3_agents(self, plugin=None):
self.agent1 = helpers.register_l3_agent(
'host_1', constants.L3_AGENT_MODE_LEGACY)
self.agent_id1 = self.agent1.id
self.agent2 = helpers.register_l3_agent(
'host_2', constants.L3_AGENT_MODE_LEGACY)
self.agent_id2 = self.agent2.id
def _register_l3_dvr_agents(self):
self.l3_dvr_agent = helpers.register_l3_agent(
HOST_DVR, constants.L3_AGENT_MODE_DVR)
self.l3_dvr_agent_id = self.l3_dvr_agent.id
self.l3_dvr_snat_agent = helpers.register_l3_agent(
HOST_DVR_SNAT, constants.L3_AGENT_MODE_DVR_SNAT)
self.l3_dvr_snat_id = self.l3_dvr_snat_agent.id
def _set_l3_agent_admin_state(self, context, agent_id, state=True):
update = {'agent': {'admin_state_up': state}}
self.plugin.update_agent(context, agent_id, update)
def _set_l3_agent_dead(self, agent_id):
update = {
'agent': {
'heartbeat_timestamp':
timeutils.utcnow() - datetime.timedelta(hours=1)}}
self.plugin.update_agent(self.adminContext, agent_id, update)
@contextlib.contextmanager
def router_with_ext_gw(self, name='router1', admin_state_up=True,
fmt=None, tenant_id=uuidutils.generate_uuid(),
external_gateway_info=None,
subnet=None, set_context=False,
**kwargs):
router = self._make_router(fmt or self.fmt, tenant_id, name,
admin_state_up, external_gateway_info,
set_context, **kwargs)
self._add_external_gateway_to_router(
router['router']['id'],
subnet['subnet']['network_id'])
yield router
self._remove_external_gateway_from_router(
router['router']['id'], subnet['subnet']['network_id'])
self._delete('routers', router['router']['id'])
class L3SchedulerTestBaseMixin(object):
def _test_add_router_to_l3_agent(self,
distributed=False,
already_scheduled=False,
external_gw=None):
agent_id = self.agent_id1
agent = self.agent1
if distributed:
self._register_l3_dvr_agents()
agent_id = self.l3_dvr_snat_id
agent = self.l3_dvr_snat_agent
router = self._make_router(self.fmt,
tenant_id=uuidutils.generate_uuid(),
name='r1')
router['router']['distributed'] = distributed
router['router']['external_gateway_info'] = external_gw
if already_scheduled:
self._test_schedule_bind_router(agent, router)
with mock.patch.object(self.plugin,
"validate_agent_router_combination"),\
mock.patch.object(self.plugin,
"create_router_to_agent_binding") as auto_s,\
mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
return_value=router['router']):
self.plugin.add_router_to_l3_agent(self.adminContext, agent_id,
router['router']['id'])
self.assertNotEqual(already_scheduled, auto_s.called)
def test__unbind_router_removes_binding(self):
agent_id = self.agent_id1
agent = self.agent1
router = self._make_router(self.fmt,
tenant_id=uuidutils.generate_uuid(),
name='r1')
self._test_schedule_bind_router(agent, router)
self.plugin._unbind_router(self.adminContext,
router['router']['id'],
agent_id)
bindings = rb_obj.RouterL3AgentBinding.get_l3_agents_by_router_ids(
self.adminContext, [router['router']['id']])
self.assertEqual(0, len(bindings))
def _create_router_for_l3_agent_dvr_test(self,
distributed=False,
external_gw=None):
router = self._make_router(self.fmt,
tenant_id=uuidutils.generate_uuid(),
name='r1')
router['router']['distributed'] = distributed
router['router']['external_gateway_info'] = external_gw
return router
def _prepare_l3_agent_dvr_move_exceptions(self,
distributed=False,
external_gw=None,
agent_id=None,
expected_exception=None):
router = self._create_router_for_l3_agent_dvr_test(
distributed=distributed, external_gw=external_gw)
with mock.patch.object(self.plugin, "create_router_to_agent_binding"),\
mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
return_value=router['router']):
self.assertRaises(expected_exception,
self.plugin.add_router_to_l3_agent,
self.adminContext, agent_id,
router['router']['id'])
def test__schedule_router_skips_unschedulable_routers(self):
mock.patch.object(self.plugin, 'router_supports_scheduling',
return_value=False).start()
scheduler = l3_agent_scheduler.ChanceScheduler()
self.assertIsNone(scheduler._schedule_router(self.plugin,
self.adminContext,
'router_id'))
def test_add_router_to_l3_agent_mismatch_error_dvr_to_legacy(self):
self._register_l3_agents()
self._prepare_l3_agent_dvr_move_exceptions(
distributed=True,
agent_id=self.agent_id1,
expected_exception=l3agent.RouterL3AgentMismatch)
def test_add_router_to_l3_agent_mismatch_error_legacy_to_dvr(self):
self._register_l3_dvr_agents()
self._prepare_l3_agent_dvr_move_exceptions(
agent_id=self.l3_dvr_agent_id,
expected_exception=l3agent.DVRL3CannotAssignToDvrAgent)
def test_add_router_to_l3_agent_mismatch_error_dvr_to_dvr(self):
self._register_l3_dvr_agents()
self._prepare_l3_agent_dvr_move_exceptions(
distributed=True,
agent_id=self.l3_dvr_agent_id,
expected_exception=l3agent.DVRL3CannotAssignToDvrAgent)
def test_add_router_to_l3_agent_dvr_to_snat(self):
external_gw_info = {
"network_id": uuidutils.generate_uuid(),
"enable_snat": True
}
self._register_l3_dvr_agents()
agent_id = self.l3_dvr_snat_id
router = self._create_router_for_l3_agent_dvr_test(
distributed=True,
external_gw=external_gw_info)
with mock.patch.object(self.plugin,
"validate_agent_router_combination"),\
mock.patch.object(
self.plugin,
"create_router_to_agent_binding") as rtr_agent_binding,\
mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
return_value=router['router']):
self.plugin.add_router_to_l3_agent(self.adminContext, agent_id,
router['router']['id'])
rtr_agent_binding.assert_called_once_with(
self.adminContext, mock.ANY, router['router'])
def test_add_router_to_l3_agent(self):
self._test_add_router_to_l3_agent()
def test_add_distributed_router_to_l3_agent(self):
external_gw_info = {
"network_id": uuidutils.generate_uuid(),
"enable_snat": True
}
self._test_add_router_to_l3_agent(distributed=True,
external_gw=external_gw_info)
def test_add_router_to_l3_agent_already_scheduled(self):
self._test_add_router_to_l3_agent(already_scheduled=True)
def test_add_distributed_router_to_l3_agent_already_scheduled(self):
external_gw_info = {
"network_id": uuidutils.generate_uuid(),
"enable_snat": True
}
self._test_add_router_to_l3_agent(distributed=True,
already_scheduled=True,
external_gw=external_gw_info)
def test_remove_router_from_l3_agent_in_dvr_mode(self):
self._register_l3_dvr_agents()
self.assertRaises(l3agent.DVRL3CannotRemoveFromDvrAgent,
self.plugin.remove_router_from_l3_agent,
self.adminContext,
self.l3_dvr_agent_id,
mock.ANY)
def test_remove_router_from_l3_agent_in_dvr_snat_mode(self):
self._register_l3_dvr_agents()
router = self._create_router_for_l3_agent_dvr_test(
distributed=True)
agent_id = self.l3_dvr_snat_id
l3_notifier = mock.Mock()
self.plugin.agent_notifiers = {constants.AGENT_TYPE_L3: l3_notifier}
self.plugin.remove_router_from_l3_agent(self.adminContext, agent_id,
router['router']['id'])
l3_notifier.router_removed_from_agent.assert_called_once_with(
self.adminContext, router['router']['id'],
self.l3_dvr_snat_agent.host)
def _prepare_schedule_dvr_tests(self):
scheduler = l3_agent_scheduler.ChanceScheduler()
agent = agent_obj.Agent(mock.ANY, id=uuidutils.generate_uuid())
agent.admin_state_up = True
agent.heartbeat_timestamp = timeutils.utcnow()
plugin = mock.Mock()
plugin.get_l3_agents_hosting_routers.return_value = []
plugin.get_l3_agents.return_value = [agent]
plugin.get_l3_agent_candidates.return_value = [agent]
return scheduler, agent, plugin
def test_schedule_dvr_router_without_snatbinding_and_no_gw(self):
scheduler, agent, plugin = self._prepare_schedule_dvr_tests()
sync_router = {
'id': 'foo_router_id',
'distributed': True
}
plugin.get_router.return_value = sync_router
with mock.patch.object(scheduler, 'bind_router'),\
mock.patch.object(plugin,
'get_snat_bindings',
return_value=False):
scheduler._schedule_router(
plugin, self.adminContext, 'foo_router_id', None)
expected_calls = [
mock.call.get_router(mock.ANY, 'foo_router_id'),
mock.call.get_l3_agents_hosting_routers(
mock.ANY, ['foo_router_id'], admin_state_up=True),
mock.call.get_l3_agents(mock.ANY, active=True),
mock.call.get_l3_agent_candidates(mock.ANY, sync_router, [agent]),
]
plugin.assert_has_calls(expected_calls)
def test_schedule_router_distributed(self):
scheduler, agent, plugin = self._prepare_schedule_dvr_tests()
sync_router = {
'id': 'foo_router_id',
'distributed': True,
'external_gateway_info': {
'network_id': uuidutils.generate_uuid(),
'enable_snat': True
}
}
plugin.get_router.return_value = sync_router
with mock.patch.object(scheduler, 'bind_router'):
scheduler._schedule_router(
plugin, self.adminContext, 'foo_router_id', None)
expected_calls = [
mock.call.get_router(mock.ANY, 'foo_router_id'),
mock.call.get_l3_agents_hosting_routers(
mock.ANY, ['foo_router_id'], admin_state_up=True),
mock.call.get_l3_agents(mock.ANY, active=True),
mock.call.get_l3_agent_candidates(mock.ANY, sync_router,
[agent]),
]
plugin.assert_has_calls(expected_calls)
def _test_schedule_bind_router(self, agent, router):
ctx = self.adminContext
scheduler = l3_agent_scheduler.ChanceScheduler()
rid = router['router']['id']
scheduler.bind_router(self.plugin, ctx, rid, agent.id)
results = rb_obj.RouterL3AgentBinding.get_objects(ctx, router_id=rid)
self.assertGreater(len(results), 0)
self.assertIn(agent.id, [bind.l3_agent_id for bind in results])
def test_bind_new_router(self):
router = self._make_router(self.fmt,
tenant_id=uuidutils.generate_uuid(),
name='r1')
with mock.patch.object(l3_agent_scheduler.LOG, 'debug') as flog:
self._test_schedule_bind_router(self.agent1, router)
self.assertEqual(1, flog.call_count)
args, kwargs = flog.call_args
self.assertIn('is scheduled', args[0])
def test_bind_absent_router(self):
scheduler = l3_agent_scheduler.ChanceScheduler()
# checking that bind_router() is not throwing
# when supplied with router_id of non-existing router
scheduler.bind_router(self.plugin, self.adminContext,
uuidutils.generate_uuid(), self.agent_id1)
def test_bind_existing_router(self):
router = self._make_router(self.fmt,
tenant_id=uuidutils.generate_uuid(),
name='r2')
self._test_schedule_bind_router(self.agent1, router)
with mock.patch.object(l3_agent_scheduler.LOG, 'debug') as flog:
self._test_schedule_bind_router(self.agent1, router)
self.assertEqual(1, flog.call_count)
args, kwargs = flog.call_args
self.assertIn('has already been scheduled', args[0])
def _check_get_l3_agent_candidates(
self, router, agent_list, exp_host, count=1):
candidates = self.plugin.get_l3_agent_candidates(self.adminContext,
router, agent_list)
self.assertEqual(count, len(candidates))
if count:
self.assertEqual(exp_host, candidates[0]['host'])
def test_get_l3_agent_candidates_legacy(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=uuidutils.generate_uuid(),
name='r2')
router['external_gateway_info'] = None
router['id'] = uuidutils.generate_uuid()
agent_list = [self.agent1, self.l3_dvr_agent]
# test legacy agent_mode case: only legacy agent should be candidate
router['distributed'] = False
exp_host = 'host_1'
self._check_get_l3_agent_candidates(router, agent_list, exp_host)
def test_get_l3_agent_candidates_dvr(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=uuidutils.generate_uuid(),
name='r2')
router['external_gateway_info'] = None
router['id'] = uuidutils.generate_uuid()
agent_list = [self.agent1, self.l3_dvr_agent]
# test dvr agent_mode case no candidates
router['distributed'] = True
self.get_subnet_ids_on_router = mock.Mock()
self._check_dvr_serviceable_ports_on_host = mock.Mock(
return_value=True)
self._check_get_l3_agent_candidates(router, agent_list, None, count=0)
def test_get_l3_agent_candidates_dvr_no_vms(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=uuidutils.generate_uuid(),
name='r2')
router['external_gateway_info'] = None
router['id'] = uuidutils.generate_uuid()
agent_list = [self.agent1, self.l3_dvr_agent]
router['distributed'] = True
# Test no VMs present case
self.get_subnet_ids_on_router = mock.Mock()
self._check_dvr_serviceable_ports_on_host = mock.Mock(
return_value=False)
self._check_get_l3_agent_candidates(
router, agent_list, HOST_DVR, count=0)
def test_get_l3_agent_candidates_dvr_snat(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=uuidutils.generate_uuid(),
name='r2')
router['external_gateway_info'] = None
router['id'] = uuidutils.generate_uuid()
router['distributed'] = True
agent_list = [self.l3_dvr_snat_agent]
self.get_subnet_ids_on_router = mock.Mock()
self._check_dvr_serviceable_ports_on_host = mock.Mock(
return_value=True)
self._check_get_l3_agent_candidates(router, agent_list, HOST_DVR_SNAT)
def test_get_l3_agent_candidates_dvr_snat_no_vms(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=uuidutils.generate_uuid(),
name='r2')
router['external_gateway_info'] = None
router['id'] = uuidutils.generate_uuid()
router['distributed'] = True
agent_list = [self.l3_dvr_snat_agent]
self._check_dvr_serviceable_ports_on_host = mock.Mock(
return_value=False)
# Test no VMs present case
self.get_subnet_ids_on_router = mock.Mock()
self._check_dvr_serviceable_ports_on_host.return_value = False
self._check_get_l3_agent_candidates(
router, agent_list, HOST_DVR_SNAT, count=1)
def test_get_l3_agent_candidates_dvr_ha_snat_no_vms(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=uuidutils.generate_uuid(),
name='r2')
router['external_gateway_info'] = None
router['id'] = uuidutils.generate_uuid()
router['distributed'] = True
router['ha'] = True
agent_list = [self.l3_dvr_snat_agent]
self.check_ports_exist_on_l3agent = mock.Mock(return_value=False)
# Test no VMs present case
self.check_ports_exist_on_l3agent.return_value = False
self.get_subnet_ids_on_router = mock.Mock(return_value=set())
self._check_get_l3_agent_candidates(
router, agent_list, HOST_DVR_SNAT, count=1)
def test_get_l3_agent_candidates_centralized(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=uuidutils.generate_uuid(),
name='r2')
router['external_gateway_info'] = None
router['id'] = uuidutils.generate_uuid()
# check centralized test case
router['distributed'] = False
agent_list = [self.l3_dvr_snat_agent]
self._check_get_l3_agent_candidates(router, agent_list, HOST_DVR_SNAT)
def test_get_l3_agents_hosting_routers(self):
agent = helpers.register_l3_agent('host_6')
router = self._make_router(self.fmt,
tenant_id=uuidutils.generate_uuid(),
name='r1')
ctx = self.adminContext
router_id = router['router']['id']
self.plugin.router_scheduler.bind_router(self.plugin, ctx,
router_id, agent.id)
agents = self.plugin.get_l3_agents_hosting_routers(ctx,
[router_id])
self.assertEqual([agent.id], [agt.id for agt in agents])
agents = self.plugin.get_l3_agents_hosting_routers(ctx,
[router_id],
admin_state_up=True)
self.assertEqual([agent.id], [agt.id for agt in agents])
self._set_l3_agent_admin_state(ctx, agent.id, False)
agents = self.plugin.get_l3_agents_hosting_routers(ctx,
[router_id])
self.assertEqual([agent.id], [agt.id for agt in agents])
agents = self.plugin.get_l3_agents_hosting_routers(ctx,
[router_id],
admin_state_up=True)
self.assertEqual([], agents)
class L3SchedulerTestCaseMixin(test_l3.L3NatTestCaseMixin,
L3SchedulerBaseMixin,
L3SchedulerTestBaseMixin):
def setUp(self):
self.mock_rescheduling = False
ext_mgr = test_l3.L3TestExtensionManager()
plugin_str = ('neutron.tests.unit.extensions.test_l3.'
'TestL3NatIntAgentSchedulingPlugin')
super(L3SchedulerTestCaseMixin, self).setUp(plugin=plugin_str,
ext_mgr=ext_mgr)
self.adminContext = n_context.get_admin_context()
self.plugin = directory.get_plugin()
self.plugin.router_scheduler = importutils.import_object(
'neutron.scheduler.l3_agent_scheduler.ChanceScheduler'
)
self._register_l3_agents()
class L3AgentChanceSchedulerTestCase(L3SchedulerTestCaseMixin,
test_db_base_plugin_v2.
NeutronDbPluginV2TestCase):
def setUp(self):
super(L3AgentChanceSchedulerTestCase, self).setUp()
# Removes MissingAuthPlugin exception from logs
self.patch_notifier = mock.patch(
'neutron.notifiers.batch_notifier.BatchNotifier._notify')
self.patch_notifier.start()
def test_random_scheduling(self):
random_patch = mock.patch('random.choice')
random_mock = random_patch.start()
def side_effect(seq):
return seq[0]
random_mock.side_effect = side_effect
with self.subnet() as subnet:
self._set_net_external(subnet['subnet']['network_id'])
with self.router_with_ext_gw(name='r1', subnet=subnet) as r1:
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['router']['id']],
admin_state_up=True)
self.assertEqual(1, len(agents))
self.assertEqual(1, random_mock.call_count)
with self.router_with_ext_gw(name='r2', subnet=subnet) as r2:
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r2['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
self.assertEqual(2, random_mock.call_count)
random_patch.stop()
def test_scheduler_auto_schedule_when_agent_added(self):
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id1, False)
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, False)
with self.subnet() as subnet:
self._set_net_external(subnet['subnet']['network_id'])
with self.router_with_ext_gw(name='r1', subnet=subnet) as r1:
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['router']['id']],
admin_state_up=True)
self.assertEqual(0, len(agents))
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id1, True)
self.plugin.auto_schedule_routers(self.adminContext, 'host_1')
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['router']['id']],
admin_state_up=True)
self.assertEqual('host_1', agents[0]['host'])
class L3AgentLeastRoutersSchedulerTestCase(L3SchedulerTestCaseMixin,
test_db_base_plugin_v2.
NeutronDbPluginV2TestCase):
def setUp(self):
super(L3AgentLeastRoutersSchedulerTestCase, self).setUp()
self.plugin.router_scheduler = importutils.import_object(
'neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler'
)
def test_scheduler(self):
# disable one agent to force the scheduling to the only one.
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, False)
with self.subnet() as subnet:
self._set_net_external(subnet['subnet']['network_id'])
with self.router_with_ext_gw(name='r1', subnet=subnet) as r1:
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['router']['id']],
admin_state_up=True)
self.assertEqual(1, len(agents))
agent_id1 = agents[0]['id']
with self.router_with_ext_gw(name='r2', subnet=subnet) as r2:
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r2['router']['id']],
admin_state_up=True)
self.assertEqual(1, len(agents))
agent_id2 = agents[0]['id']
self.assertEqual(agent_id1, agent_id2)
# re-enable the second agent to see whether the next router
# spawned will be on this one.
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, True)
with self.router_with_ext_gw(name='r3',
subnet=subnet) as r3:
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r3['router']['id']],
admin_state_up=True)
self.assertEqual(1, len(agents))
agent_id3 = agents[0]['id']
self.assertNotEqual(agent_id1, agent_id3)
class L3DvrScheduler(l3_db.L3_NAT_db_mixin,
l3_dvrscheduler_db.L3_DVRsch_db_mixin):
pass
class L3DvrSchedulerTestCase(L3SchedulerBaseMixin,
test_db_base_plugin_v2.
NeutronDbPluginV2TestCase):
l3_plugin = ('neutron.tests.unit.extensions.test_l3.'
'TestL3NatAgentSchedulingServicePlugin')
def setUp(self):
if self.l3_plugin:
service_plugins = {
'l3_plugin_name': self.l3_plugin,
'flavors_plugin_name': 'neutron.services.flavors.'
'flavors_plugin.FlavorsPlugin'
}
else:
service_plugins = None
super(L3DvrSchedulerTestCase, self).setUp('ml2',
service_plugins=service_plugins)
self.setup_coreplugin('ml2')
self.adminContext = n_context.get_admin_context()
self.dut = L3DvrScheduler()
self.l3plugin = directory.get_plugin(plugin_constants.L3)
def test__notify_l3_agent_update_port_with_allowed_address_pairs_revert(
self):
port_id = uuidutils.generate_uuid()
kwargs = {
'context': self.adminContext,
'port': {
'id': port_id,
'admin_state_up': False,
portbindings.HOST_ID: 'vm-host',
'device_id': 'vm-id',
'allowed_address_pairs': [
{'ip_address': '10.1.0.201',
'mac_address': 'aa:bb:cc:dd:ee:ff'}],
'device_owner': DEVICE_OWNER_COMPUTE,
},
'original_port': {
'id': port_id,
'admin_state_up': True,
portbindings.HOST_ID: 'vm-host',
'device_id': 'vm-id',
'allowed_address_pairs': [
{'ip_address': '10.1.0.201',
'mac_address': 'aa:bb:cc:dd:ee:ff'}],
'device_owner': DEVICE_OWNER_COMPUTE,
},
}
port = kwargs.get('original_port')
l3plugin = mock.Mock()
directory.add_plugin(plugin_constants.L3, l3plugin)
l3_dvrscheduler_db._notify_l3_agent_port_update(
'port', 'after_update', mock.ANY, **kwargs)
l3plugin._get_allowed_address_pair_fixed_ips.return_value = (
['10.1.0.21'])
self.assertFalse(
l3plugin.update_arp_entry_for_dvr_service_port.called)
l3plugin.delete_arp_entry_for_dvr_service_port.\
assert_called_once_with(
self.adminContext,
port,
fixed_ips_to_delete=mock.ANY)
def test__notify_l3_agent_update_port_with_allowed_address_pairs(self):
port_id = uuidutils.generate_uuid()
kwargs = {
'context': self.adminContext,
'port': {
'id': port_id,
portbindings.HOST_ID: 'vm-host',
'allowed_address_pairs': [
{'ip_address': '10.1.0.201',
'mac_address': 'aa:bb:cc:dd:ee:ff'}],
'device_id': 'vm-id',
'device_owner': DEVICE_OWNER_COMPUTE,
'admin_state_up': True,
},
'original_port': {
'id': port_id,
portbindings.HOST_ID: 'vm-host',
'device_id': 'vm-id',
'device_owner': DEVICE_OWNER_COMPUTE,
'admin_state_up': True,
},
}
l3plugin = mock.Mock()
directory.add_plugin(plugin_constants.L3, l3plugin)
l3_dvrscheduler_db._notify_l3_agent_port_update(
'port', 'after_update', mock.ANY, **kwargs)
self.assertTrue(
l3plugin.update_arp_entry_for_dvr_service_port.called)
def test__notify_l3_agent_when_unbound_port_migrates_to_bound_host(self):
port_id = 'fake-port'
kwargs = {
'context': self.adminContext,
'original_port': {
'id': port_id,
portbindings.HOST_ID: '',
'device_owner': '',
'admin_state_up': True,
},
'port': {
'id': port_id,
portbindings.HOST_ID: 'vm-host',
'device_owner': DEVICE_OWNER_COMPUTE,
'mac_address': '02:04:05:17:18:19'
},
}
port = kwargs.get('port')
plugin = directory.get_plugin()
l3plugin = mock.Mock()
l3plugin.supported_extension_aliases = [
'router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS,
constants.L3_DISTRIBUTED_EXT_ALIAS
]
directory.add_plugin(plugin_constants.L3, l3plugin)
l3_dvrscheduler_db._notify_l3_agent_port_update(
'port', 'after_update', plugin, **kwargs)
l3plugin.dvr_handle_new_service_port.assert_called_once_with(
self.adminContext, port, unbound_migrate=True)
def test__notify_l3_agent_update_port_no_removing_routers(self):
port_id = 'fake-port'
kwargs = {
'context': self.adminContext,
'port': None,
'original_port': {
'id': port_id,
portbindings.HOST_ID: 'vm-host',
'device_id': 'vm-id',
'device_owner': DEVICE_OWNER_COMPUTE,
'mac_address': '02:04:05:17:18:19'
},
'mac_address_updated': True
}
plugin = directory.get_plugin()
l3plugin = mock.Mock()
l3plugin.supported_extension_aliases = [
'router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS,
constants.L3_DISTRIBUTED_EXT_ALIAS
]
directory.add_plugin(plugin_constants.L3, l3plugin)
l3_dvrscheduler_db._notify_l3_agent_port_update(
'port', 'after_update', plugin, **kwargs)
self.assertFalse(
l3plugin.update_arp_entry_for_dvr_service_port.called)
self.assertFalse(
l3plugin.dvr_handle_new_service_port.called)
self.assertFalse(l3plugin.remove_router_from_l3_agent.called)
self.assertFalse(l3plugin.get_dvr_routers_to_remove.called)
def test__notify_l3_agent_new_port_action(self):
kwargs = {
'context': self.adminContext,
'original_port': None,
'port': {
'device_owner': DEVICE_OWNER_COMPUTE,
},
}
l3plugin = mock.Mock()
directory.add_plugin(plugin_constants.L3, l3plugin)
l3_dvrscheduler_db._notify_l3_agent_new_port(
'port', 'after_create', mock.ANY, **kwargs)
l3plugin.update_arp_entry_for_dvr_service_port.\
assert_called_once_with(
self.adminContext, kwargs.get('port'))
l3plugin.dvr_handle_new_service_port.assert_called_once_with(
self.adminContext, kwargs.get('port'))
def test__notify_l3_agent_new_port_no_action(self):
kwargs = {
'context': self.adminContext,
'original_port': None,
'port': {
'device_owner': constants.DEVICE_OWNER_NETWORK_PREFIX + 'None',
}
}
l3plugin = mock.Mock()
directory.add_plugin(plugin_constants.L3, l3plugin)
l3_dvrscheduler_db._notify_l3_agent_new_port(
'port', 'after_create', mock.ANY, **kwargs)
self.assertFalse(
l3plugin.update_arp_entry_for_dvr_service_port.called)
self.assertFalse(
l3plugin.dvr_handle_new_service_port.called)
def test__notify_l3_agent_update_port_with_migration_port_profile(self):
kwargs = {
'context': self.adminContext,
'original_port': {
portbindings.HOST_ID: 'vm-host',
'device_owner': DEVICE_OWNER_COMPUTE,
},
'port': {
portbindings.HOST_ID: 'vm-host',
'device_owner': DEVICE_OWNER_COMPUTE,
portbindings.PROFILE: {'migrating_to': 'vm-host2'},
},
}
l3plugin = mock.Mock()
directory.add_plugin(plugin_constants.L3, l3plugin)
l3_dvrscheduler_db._notify_l3_agent_port_update(
'port', 'after_update', mock.ANY, **kwargs)
l3plugin.dvr_handle_new_service_port.assert_called_once_with(
self.adminContext, kwargs.get('port'), dest_host='vm-host2')
l3plugin.update_arp_entry_for_dvr_service_port.\
assert_called_once_with(
self.adminContext, kwargs.get('port'))
def test__notify_l3_agent_update_port_no_action(self):
kwargs = {
'context': self.adminContext,
'original_port': {
portbindings.HOST_ID: 'vm-host',
'device_owner': DEVICE_OWNER_COMPUTE,
},
'port': {
portbindings.HOST_ID: 'vm-host',
'device_owner': DEVICE_OWNER_COMPUTE,
},
}
l3plugin = mock.Mock()
directory.add_plugin(plugin_constants.L3, l3plugin)
l3_dvrscheduler_db._notify_l3_agent_port_update(
'port', 'after_update', mock.ANY, **kwargs)
self.assertFalse(
l3plugin.update_arp_entry_for_dvr_service_port.called)
self.assertFalse(
l3plugin.dvr_handle_new_service_port.called)
self.assertFalse(l3plugin.remove_router_from_l3_agent.called)
self.assertFalse(l3plugin.get_dvr_routers_to_remove.called)
def test__notify_l3_agent_update_port_with_mac_address_update(self):
kwargs = {
'context': self.adminContext,
'original_port': {
portbindings.HOST_ID: 'vm-host',
'device_owner': DEVICE_OWNER_COMPUTE,
'mac_address': '02:04:05:17:18:19'
},
'port': {
portbindings.HOST_ID: 'vm-host',
'device_owner': DEVICE_OWNER_COMPUTE,
'mac_address': '02:04:05:17:18:29'
},
'mac_address_updated': True
}
l3plugin = mock.Mock()
directory.add_plugin(plugin_constants.L3, l3plugin)
l3_dvrscheduler_db._notify_l3_agent_port_update(
'port', 'after_update', mock.ANY, **kwargs)
l3plugin.update_arp_entry_for_dvr_service_port.\
assert_called_once_with(
self.adminContext, kwargs.get('port'))
self.assertFalse(l3plugin.dvr_handle_new_service_port.called)
def test__notify_l3_agent_update_port_with_ip_update(self):
kwargs = {
'context': self.adminContext,
'original_port': {
portbindings.HOST_ID: 'vm-host',
'device_owner': constants.DEVICE_OWNER_ROUTER_GW,
'fixed_ips': [{'ip_address': '1.1.1.1'}],
'mac_address': '02:04:05:17:18:19'
},
'port': {
portbindings.HOST_ID: 'vm-host',
'device_owner': constants.DEVICE_OWNER_ROUTER_GW,
'fixed_ips': [{'ip_address': '2.2.2.2'}],
'mac_address': '02:04:05:17:18:19'
},
'mac_address_updated': False
}
l3plugin = mock.Mock()
directory.add_plugin(plugin_constants.L3, l3plugin)
l3_dvrscheduler_db._notify_l3_agent_port_update(
'port', 'after_update', mock.ANY, **kwargs)
l3plugin.update_arp_entry_for_dvr_service_port.\
assert_called_once_with(
self.adminContext, kwargs.get('port'))
self.assertFalse(l3plugin.dvr_handle_new_service_port.called)
def test__notify_l3_agent_update_port_without_ip_change(self):
kwargs = {
'context': self.adminContext,
'original_port': {
portbindings.HOST_ID: 'vm-host',
'device_owner': constants.DEVICE_OWNER_ROUTER_GW,
'fixed_ips': [{'ip_address': '1.1.1.1'}],
},
'port': {
portbindings.HOST_ID: 'vm-host',
'device_owner': constants.DEVICE_OWNER_ROUTER_GW,
'fixed_ips': [{'ip_address': '1.1.1.1'}],
},
}
l3plugin = mock.Mock()
directory.add_plugin(plugin_constants.L3, l3plugin)
l3_dvrscheduler_db._notify_l3_agent_port_update(
'port', 'after_update', mock.ANY, **kwargs)
self.assertFalse(l3plugin.update_arp_entry_for_dvr_service_port.called)
self.assertFalse(l3plugin.dvr_handle_new_service_port.called)
def test__notify_l3_agent_port_binding_change(self):
self._test__notify_l3_agent_port_binding_change()
def test__notify_l3_agent_port_binding_change_removed_routers(self):
router_to_remove = [{'agent_id': 'foo_agent',
'router_id': 'foo_id',
'host': 'vm-host1'}]
self._test__notify_l3_agent_port_binding_change(router_to_remove)
def test__notify_l3_agent_port_binding_change_removed_routers_fip(self):
fip = {'router_id': 'router_id'}
router_to_remove = [{'agent_id': 'foo_agent',
'router_id': 'foo_id',
'host': 'vm-host1'}]
self._test__notify_l3_agent_port_binding_change(router_to_remove, fip)
def test__notify_l3_agent_port_binding_change_with_fip(self):
fip = {'router_id': 'router_id'}
self._test__notify_l3_agent_port_binding_change(None, fip)
def test__notify_l3_agent_port_binding_change_fip_dvr(self):
fip = {'router_id': 'router_id'}
is_distributed = True
self._test__notify_l3_agent_port_binding_change(None,
fip, is_distributed)
def test__notify_l3_agent_port_binding_change_fip_dvr_rmrt(self):
fip = {'router_id': 'router_id'}
router_to_remove = [{'agent_id': 'foo_agent',
'router_id': 'foo_id',
'host': 'vm-host1'}]
is_distributed = True
self._test__notify_l3_agent_port_binding_change(router_to_remove,
fip, is_distributed)
def test__notify_l3_agent_port_binding_change_fip_dvr_on_rmrt(self):
fip = {'router_id': 'foo_id'}
router_to_remove = [{'agent_id': 'foo_agent',
'router_id': 'foo_id',
'host': 'vm-host1'}]
is_distributed = True
self._test__notify_l3_agent_port_binding_change(router_to_remove,
fip, is_distributed)
def _test__notify_l3_agent_port_binding_change(self,
routers_to_remove=None,
fip=None,
is_distributed=False):
source_host = 'vm-host1'
kwargs = {
'context': self.adminContext,
'original_port': {
'id': uuidutils.generate_uuid(),
portbindings.HOST_ID: source_host,
'device_owner': DEVICE_OWNER_COMPUTE,
},
'port': {
portbindings.HOST_ID: 'vm-host2',
'device_owner': DEVICE_OWNER_COMPUTE,
},
}
l3plugin = mock.Mock()
directory.add_plugin(plugin_constants.L3, l3plugin)
with mock.patch.object(l3plugin, 'get_dvr_routers_to_remove',
return_value=routers_to_remove
if routers_to_remove else []),\
mock.patch.object(l3plugin, '_get_floatingips_by_port_id',
return_value=[fip] if fip else []),\
mock.patch.object(l3_dvr_db, 'is_distributed_router',
return_value=is_distributed):
l3_dvrscheduler_db._notify_l3_agent_port_update(
'port', 'after_update', mock.ANY, **kwargs)
if routers_to_remove:
(l3plugin.l3_rpc_notifier.router_removed_from_agent.
assert_called_once_with(mock.ANY, 'foo_id', source_host))
self.assertEqual(
1,
l3plugin.delete_arp_entry_for_dvr_service_port.call_count)
if fip and is_distributed and not (routers_to_remove and
fip['router_id'] is routers_to_remove[0]['router_id']):
(l3plugin.l3_rpc_notifier.routers_updated_on_host.
assert_called_once_with(mock.ANY, ['router_id'], source_host))
self.assertEqual(
1, l3plugin.update_arp_entry_for_dvr_service_port.call_count)
l3plugin.dvr_handle_new_service_port.assert_called_once_with(
self.adminContext, kwargs.get('port'), dest_host=None)
def test__notify_l3_agent_update_port_removing_routers(self):
port_id = 'fake-port'
source_host = 'vm-host'
kwargs = {
'context': self.adminContext,
'port': {
'id': port_id,
portbindings.HOST_ID: None,
'device_id': '',
'device_owner': ''
},
'mac_address_updated': False,
'original_port': {
'id': port_id,
portbindings.HOST_ID: source_host,
'device_id': 'vm-id',
'device_owner': DEVICE_OWNER_COMPUTE
}
}
plugin = directory.get_plugin()
l3plugin = mock.Mock()
l3plugin.supported_extension_aliases = [
'router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS,
constants.L3_DISTRIBUTED_EXT_ALIAS
]
directory.add_plugin(plugin_constants.L3, l3plugin)
with mock.patch.object(l3plugin, 'get_dvr_routers_to_remove',
return_value=[{'agent_id': 'foo_agent',
'router_id': 'foo_id',
'host': source_host}]),\
mock.patch.object(l3plugin, '_get_floatingips_by_port_id',
return_value=[]):
l3_dvrscheduler_db._notify_l3_agent_port_update(
'port', 'after_update', plugin, **kwargs)
self.assertEqual(
1, l3plugin.delete_arp_entry_for_dvr_service_port.call_count)
l3plugin.delete_arp_entry_for_dvr_service_port.\
assert_called_once_with(
self.adminContext, mock.ANY)
self.assertFalse(
l3plugin.dvr_handle_new_service_port.called)
(l3plugin.l3_rpc_notifier.router_removed_from_agent.
assert_called_once_with(mock.ANY, 'foo_id', source_host))
def test__notify_port_delete(self):
plugin = directory.get_plugin()
l3plugin = mock.Mock()
l3plugin.supported_extension_aliases = [
'router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS,
constants.L3_DISTRIBUTED_EXT_ALIAS
]
directory.add_plugin(plugin_constants.L3, l3plugin)
port = {
'id': uuidutils.generate_uuid(),
'device_id': 'abcd',
'device_owner': DEVICE_OWNER_COMPUTE_NOVA,
portbindings.HOST_ID: 'host1',
}
kwargs = {
'context': self.adminContext,
'port': port,
'removed_routers': [
{'agent_id': 'foo_agent', 'router_id': 'foo_id'},
],
}
removed_routers = [{'agent_id': 'foo_agent',
'router_id': 'foo_id',
'host': 'foo_host'}]
l3plugin.get_dvr_routers_to_remove.return_value = removed_routers
l3_dvrscheduler_db._notify_port_delete(
'port', 'after_delete', plugin, **kwargs)
l3plugin.delete_arp_entry_for_dvr_service_port.\
assert_called_once_with(
self.adminContext, mock.ANY)
(l3plugin.l3_rpc_notifier.router_removed_from_agent.
assert_called_once_with(mock.ANY, 'foo_id', 'foo_host'))
def test_dvr_handle_new_service_port(self):
port = {
'id': 'port1',
'device_id': 'abcd',
'device_owner': DEVICE_OWNER_COMPUTE_NOVA,
portbindings.HOST_ID: 'host1',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.3'
}
]
}
dvr_ports = [
{
'id': 'dvr_port1',
'device_id': 'r1',
'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE,
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.1'
}
]
},
{
'id': 'dvr_port2',
'device_id': 'r2',
'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE,
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.123'
}
]
}
]
agent_on_host = {'id': 'agent1'}
with mock.patch(
'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_ports',
return_value=dvr_ports),\
mock.patch('neutron.api.rpc.agentnotifiers.l3_rpc_agent_api'
'.L3AgentNotifyAPI'),\
mock.patch.object(
self.dut, 'get_l3_agents',
return_value=[agent_on_host]) as get_l3_agents,\
mock.patch.object(
self.dut, 'get_hosts_to_notify',
return_value=['other_host']),\
mock.patch.object(
self.dut, '_check_for_rtr_serviceable_ports',
return_value=True):
self.dut.dvr_handle_new_service_port(
self.adminContext, port)
get_l3_agents.assert_called_once_with(
self.adminContext,
filters={'host': [port[portbindings.HOST_ID]]})
self.dut.l3_rpc_notifier.routers_updated_on_host.assert_has_calls(
[mock.call(self.adminContext, {'r1', 'r2'}, 'host1'),
mock.call(self.adminContext, {'r1', 'r2'}, 'other_host')],
any_order=True)
self.assertFalse(self.dut.l3_rpc_notifier.routers_updated.called)
def test_get_dvr_routers_by_subnet_ids(self):
subnet_id = '80947d4a-fbc8-484b-9f92-623a6bfcf3e0'
dvr_port = {
'id': 'dvr_port1',
'device_id': 'r1',
'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE,
'fixed_ips': [
{
'subnet_id': subnet_id,
'ip_address': '10.10.10.1'
}
]
}
r1 = {
'id': 'r1',
'distributed': True,
}
with mock.patch(
'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_port',
return_value=dvr_port),\
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
'.get_ports', return_value=[dvr_port]):
router_id = self.dut.get_dvr_routers_by_subnet_ids(
self.adminContext, [subnet_id])
self.assertEqual(r1['id'], router_id.pop())
def test_get_subnet_ids_on_router(self):
dvr_port = {
'id': 'dvr_port1',
'device_id': 'r1',
'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE,
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.1'
}
]
}
r1 = {
'id': 'r1',
'distributed': True,
}
with mock.patch(
'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_ports',
return_value=[dvr_port]):
sub_ids = self.dut.get_subnet_ids_on_router(self.adminContext,
r1['id'])
self.assertEqual(sub_ids.pop(),
dvr_port.get('fixed_ips').pop(0).get('subnet_id'))
def test_get_subnet_ids_on_router_no_subnet(self):
dvr_port = {
'id': 'dvr_port1',
'device_id': 'r1',
'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE,
'fixed_ips': []
}
r1 = {
'id': 'r1',
'distributed': True,
}
with mock.patch.object(db_v2.NeutronDbPluginV2, 'get_ports',
return_value=[dvr_port]):
sub_ids = self.dut.get_subnet_ids_on_router(self.adminContext,
r1['id'])
self.assertEqual(0, len(sub_ids))
def test__check_dvr_serviceable_ports_on_host(self):
# HOST_DVR = 'my_l3_host_dvr'
# HOST_DVR_SNAT = 'my_l3_host_dvr_snat'
# HOST_DVR is a sub-string of HOST_DVR_SNAT
self._register_l3_dvr_agents()
host_args = {'admin_state_up': True,
portbindings.PROFILE: {'migrating to': HOST_DVR_SNAT}}
with self.network() as network:
with self.subnet(network=network) as subnet:
subnet_ids = []
subnet_ids.append(subnet['subnet']['id'])
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=('admin_state_up',
portbindings.PROFILE,), **host_args):
# Check DVR serviceable ports on HOST_DVR_SNAT.
# Should find existence since it is an exact match to the
# target host name of the port binding profile.
result0 = self.l3plugin. \
_check_dvr_serviceable_ports_on_host(self.adminContext,
self.l3_dvr_snat_agent['host'], subnet_ids)
# Check DVR serviceable ports on HOST_DVR.
# Should not find existence since the sub-string won't get
# matched with the target host.
result1 = self.l3plugin. \
_check_dvr_serviceable_ports_on_host(self.adminContext,
self.l3_dvr_agent['host'], subnet_ids)
self.assertTrue(result0)
self.assertFalse(result1)
def _prepare_schedule_snat_tests(self):
agent = agent_obj.Agent(mock.ANY, id=uuidutils.generate_uuid())
agent.admin_state_up = True
agent.heartbeat_timestamp = timeutils.utcnow()
router = {
'id': 'foo_router_id',
'distributed': True,
'external_gateway_info': {
'network_id': uuidutils.generate_uuid(),
'enable_snat': True
}
}
return agent, router
class L3HAPlugin(db_v2.NeutronDbPluginV2,
l3_hamode_db.L3_HA_NAT_db_mixin,
l3_hascheduler_db.L3_HA_scheduler_db_mixin):
supported_extension_aliases = ["l3-ha", "router_availability_zone"]
@classmethod
def get_plugin_type(cls):
return plugin_constants.L3
def get_plugin_description(self):
return "L3 Routing Service Plugin for testing"
class L3HATestCaseMixin(testlib_api.SqlTestCase,
L3SchedulerBaseMixin):
def setUp(self):
super(L3HATestCaseMixin, self).setUp()
self.adminContext = n_context.get_admin_context()
mock.patch.object(n_rpc, 'get_client').start()
self.setup_coreplugin('ml2', load_plugins=False)
cfg.CONF.set_override('service_plugins',
['neutron.tests.unit.scheduler.'
'test_l3_agent_scheduler.L3HAPlugin'])
cfg.CONF.set_override('max_l3_agents_per_router', 0)
manager.init()
self.plugin = directory.get_plugin(plugin_constants.L3)
self.plugin.router_scheduler = importutils.import_object(
'neutron.scheduler.l3_agent_scheduler.ChanceScheduler'
)
self._register_l3_agents()
@staticmethod
def get_router_l3_agent_binding(context, router_id, l3_agent_id=None,
binding_index=None):
args = {'router_id': router_id}
if l3_agent_id:
args['l3_agent_id'] = l3_agent_id
if binding_index:
args['binding_index'] = binding_index
return rb_obj.RouterL3AgentBinding.get_objects(context, **args)
def _create_ha_router(self, ha=True, tenant_id='tenant1', az_hints=None):
self.adminContext.tenant_id = tenant_id
router = {'name': 'router1', 'admin_state_up': True,
'tenant_id': tenant_id}
if ha is not None:
router['ha'] = ha
if az_hints is None:
az_hints = []
router['availability_zone_hints'] = az_hints
return self.plugin.create_router(self.adminContext,
{'router': router})
def test_create_ha_port_and_bind_catch_integrity_error(self):
router = self._create_ha_router(tenant_id='foo_tenant')
self.plugin.schedule_router(self.adminContext, router['id'])
agent = {'id': 'foo_agent'}
orig_fn = orm.Session.add
def db_ref_err_for_add_haportbinding(s, instance):
if instance.__class__.__name__ == 'L3HARouterAgentPortBinding':
instance.router_id = 'nonexistent_router'
return orig_fn(s, instance)
with mock.patch.object(self.plugin.router_scheduler, 'bind_router'):
with mock.patch.object(
orm.Session, 'add',
side_effect=db_ref_err_for_add_haportbinding,
autospec=True):
self.plugin.router_scheduler.create_ha_port_and_bind(
self.plugin, self.adminContext,
router['id'], router['tenant_id'], agent)
def test_create_ha_port_and_bind_wont_create_redundant_ports(self):
# When migrating from HA to DVR+HA router, create_ha_port_and_bind
# should create only one network:router_ha_interface port on a router
# when binding to same agent. So we need only one agent for testing
# (preferably with dvr_snat mode).
agent_obj.Agent.update_objects(
self.adminContext, {'admin_state_up': False})
l3_dvr_snat_agent = helpers.register_l3_agent(
'fake_l3_host_dvr_snat', constants.L3_AGENT_MODE_DVR_SNAT)
router = self._create_ha_router(tenant_id='foo_tenant')
self.plugin.schedule_router(self.adminContext, router['id'])
router['admin_state_up'] = False
updated_router1 = self.plugin.update_router(
self.adminContext, router['id'], {'router': router})
updated_router1['distributed'] = True
self.plugin.update_router(
self.adminContext, router['id'], {'router': updated_router1})
self.plugin.router_scheduler.create_ha_port_and_bind(
self.plugin, self.adminContext, router['id'],
router['tenant_id'], l3_dvr_snat_agent)
filters = {'device_owner': ['network:router_ha_interface'],
'device_id': [router['id']]}
self.core_plugin = directory.get_plugin()
ports = self.core_plugin.get_ports(
self.adminContext, filters=filters)
self.assertEqual(1, len(ports))
def test_create_ha_port_and_bind_catch_router_not_found(self):
router = self._create_ha_router(tenant_id='foo_tenant')
self.plugin.schedule_router(self.adminContext, router['id'])
agent = {'id': 'foo_agent'}
with mock.patch.object(self.plugin.router_scheduler, 'bind_router'):
with mock.patch.object(
self.plugin, 'add_ha_port',
side_effect=l3_exc.RouterNotFound(
router_id='foo_router')),\
mock.patch.object(
self.plugin, 'safe_delete_ha_network') as sd_ha_net:
self.plugin.router_scheduler.create_ha_port_and_bind(
self.plugin, self.adminContext,
router['id'], router['tenant_id'], agent)
self.assertTrue(sd_ha_net.called)
def test_create_ha_port_and_bind_bind_router_returns_None(self):
router = self._create_ha_router(tenant_id='foo_tenant')
agent = {'id': 'foo_agent'}
with mock.patch.object(self.plugin.router_scheduler, 'bind_router',
return_value=None):
with mock.patch.object(self.plugin, 'add_ha_port') as add_ha_port:
self.plugin.router_scheduler.create_ha_port_and_bind(
self.plugin, self.adminContext,
router['id'], router['tenant_id'], agent)
self.assertFalse(add_ha_port.called)
class VacantBindingIndexTestCase(L3HATestCaseMixin):
"""Test various scenarios for get_vacant_binding_index().
binding_index
The binding_index we want to delete/unschedule.
is_manual_scheduling
Whether or not this is a scheduling requested by the user
(`neutron l3-agent-router-add`) or by some worker (scheduler or RPC
from agent). If this is a manual scheduling we should always
comply.
"""
binding_scenarios = [
('Delete first binding_index',
dict(binding_index=1)),
('Delete middle binding_index',
dict(binding_index=2)),
('Delete last binding_index',
dict(binding_index=3)),
('Do not remove any bindings',
dict(binding_index=None)),
]
manual_scheduling_scenarios = [
('with manual scheduling',
dict(is_manual_scheduling=True)),
('without manual scheduling',
dict(is_manual_scheduling=False)),
]
scenarios = testscenarios.multiply_scenarios(
binding_scenarios, manual_scheduling_scenarios)
def test_get_vacant_binding_index(self):
helpers.register_l3_agent('host_3')
cfg.CONF.set_override('max_l3_agents_per_router', 3)
router = self._create_ha_router()
if self.binding_index:
bindings = self.get_router_l3_agent_binding(
self.adminContext, router['id'],
binding_index=self.binding_index)
self.assertEqual(1, len(bindings))
bindings[0].delete()
vacant_binding_index = self.plugin.get_vacant_binding_index(
self.adminContext, router['id'], self.is_manual_scheduling)
if self.binding_index:
self.assertEqual(self.binding_index, vacant_binding_index)
else:
if self.is_manual_scheduling:
# If this is a manual scheduling, the user requested the
# binding so we should always provide a new one.
self.assertEqual(cfg.CONF.max_l3_agents_per_router + 1,
vacant_binding_index)
else:
# Else, we already have 3 so -1 is the 'error' value.
self.assertEqual(-1, vacant_binding_index)
class L3_HA_scheduler_db_mixinTestCase(L3HATestCaseMixin):
def _register_l3_agents(self, plugin=None):
super(L3_HA_scheduler_db_mixinTestCase,
self)._register_l3_agents(plugin=plugin)
self.agent3 = helpers.register_l3_agent(host='host_3')
self.agent_id3 = self.agent3.id
self.agent4 = helpers.register_l3_agent(host='host_4')
self.agent_id4 = self.agent4.id
def test_get_routers_l3_agents_count(self):
router1 = self._create_ha_router()
cfg.CONF.set_override('max_l3_agents_per_router', 2)
router2 = self._create_ha_router()
router3 = self._create_ha_router(ha=False)
result = self.plugin.get_routers_l3_agents_count(self.adminContext)
self.assertEqual(3, len(result))
check_result = [(router['id'], agents) for router, agents in result]
self.assertIn((router1['id'], 4), check_result)
self.assertIn((router2['id'], 2), check_result)
self.assertIn((router3['id'], 0), check_result)
def test_get_ordered_l3_agents_by_num_routers(self):
# Mock scheduling so that the test can control it explicitly
mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin,
'_notify_router_updated').start()
with mock.patch.object(self.plugin, 'schedule_router'):
router1 = self._create_ha_router()
router2 = self._create_ha_router()
router3 = self._create_ha_router(ha=False)
router4 = self._create_ha_router(ha=False)
# Agent 1 will host 0 routers, agent 2 will host 1, agent 3 will
# host 2, and agent 4 will host 3.
self.plugin.schedule_router(self.adminContext, router1['id'],
candidates=[self.agent2, self.agent4])
self.plugin.schedule_router(self.adminContext, router2['id'],
candidates=[self.agent3, self.agent4])
self.plugin.schedule_router(self.adminContext, router3['id'],
candidates=[self.agent3])
self.plugin.schedule_router(self.adminContext, router4['id'],
candidates=[self.agent4])
agent_ids = [self.agent_id1, self.agent_id2, self.agent_id3,
self.agent_id4]
result = self.plugin.get_l3_agents_ordered_by_num_routers(
self.adminContext, agent_ids)
self.assertEqual(agent_ids, [record['id'] for record in result])
class L3AgentSchedulerDbMixinTestCase(L3HATestCaseMixin):
def _setup_ha_router(self):
router = self._create_ha_router()
agents = self._get_agents_scheduled_for_router(router)
return router, agents
def test_reschedule_ha_routers_from_down_agents(self):
agents = self._setup_ha_router()[1]
self.assertEqual(2, len(agents))
self._set_l3_agent_dead(self.agent_id1)
with mock.patch.object(self.plugin, 'reschedule_router') as reschedule:
self.plugin.reschedule_routers_from_down_agents()
self.assertFalse(reschedule.called)
def test_list_l3_agents_hosting_ha_router(self):
router = self._create_ha_router()
agents = self.plugin.list_l3_agents_hosting_router(
self.adminContext, router['id'])['agents']
for agent in agents:
self.assertEqual('standby', agent['ha_state'])
self.plugin.update_routers_states(
self.adminContext, {router['id']: 'active'}, self.agent1.host)
agents = self.plugin.list_l3_agents_hosting_router(
self.adminContext, router['id'])['agents']
for agent in agents:
expected_state = ('active' if agent['host'] == self.agent1.host
else 'standby')
self.assertEqual(expected_state, agent['ha_state'])
def test_list_l3_agents_hosting_legacy_router(self):
router = self._create_ha_router(ha=False)
self.plugin.schedule_router(self.adminContext, router['id'])
agent = self.plugin.list_l3_agents_hosting_router(
self.adminContext, router['id'])['agents'][0]
self.assertIsNone(agent['ha_state'])
def test_get_agents_dict_for_router_unscheduled_returns_empty_list(self):
self.assertEqual({'agents': []},
self.plugin._get_agents_dict_for_router([]))
def test_router_doesnt_support_scheduling(self):
with mock.patch.object(self.plugin, 'router_supports_scheduling',
return_value=False):
agent = helpers.register_l3_agent(host='myhost_3')
with testtools.ExpectedException(
l3agent.RouterDoesntSupportScheduling):
self.plugin.add_router_to_l3_agent(
self.adminContext, agent.id, 'router_id')
def test_manual_add_ha_router_to_agent(self):
cfg.CONF.set_override('max_l3_agents_per_router', 2)
router, agents = self._setup_ha_router()
self.assertEqual(2, len(agents))
agent = helpers.register_l3_agent(host='myhost_3')
# We allow to exceed max l3 agents per router via manual scheduling
self.plugin.add_router_to_l3_agent(
self.adminContext, agent.id, router['id'])
agents = self._get_agents_scheduled_for_router(router)
self.assertIn(agent.id, [_agent.id for _agent in agents])
self.assertEqual(3, len(agents))
def test_manual_remove_ha_router_from_agent(self):
router, agents = self._setup_ha_router()
self.assertEqual(2, len(agents))
agent = agents.pop()
# Remove router from agent and make sure it is removed
self.plugin.remove_router_from_l3_agent(
self.adminContext, agent.id, router['id'])
agents = self._get_agents_scheduled_for_router(router)
self.assertEqual(1, len(agents))
self.assertNotIn(agent.id, [_agent.id for _agent in agents])
def test_manual_remove_ha_router_from_all_agents(self):
router, agents = self._setup_ha_router()
self.assertEqual(2, len(agents))
agent = agents.pop()
self.plugin.remove_router_from_l3_agent(
self.adminContext, agent.id, router['id'])
agent = agents.pop()
self.plugin.remove_router_from_l3_agent(
self.adminContext, agent.id, router['id'])
agents = self._get_agents_scheduled_for_router(router)
self.assertEqual(0, len(agents))
def _get_agents_scheduled_for_router(self, router):
return self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
def test_delete_ha_interfaces_from_agent(self):
router, agents = self._setup_ha_router()
agent = agents.pop()
self.plugin.remove_router_from_l3_agent(
self.adminContext, agent.id, router['id'])
objs = l3_hamode.L3HARouterAgentPortBinding.get_objects(
self.adminContext, router_id=router['id'])
results = [binding.l3_agent_id for binding in objs]
self.assertNotIn(agent.id, results)
def test_add_ha_interface_to_l3_agent(self):
agent = self.plugin.get_agents_db(self.adminContext)[0]
router = self._create_ha_router()
self.plugin.add_router_to_l3_agent(self.adminContext, agent.id,
router['id'])
# Verify agent has HA interface
ha_ports = self.plugin.get_ha_router_port_bindings(self.adminContext,
[router['id']])
self.assertIn(agent.id, [ha_port.l3_agent_id for ha_port in ha_ports])
def test_schedule_routers_unique_binding_indices(self):
cfg.CONF.set_override('max_l3_agents_per_router', 2)
router = self._create_ha_router()
bindings = self.get_router_l3_agent_binding(self.adminContext,
router['id'])
binding_indices = [binding.binding_index for binding in bindings]
self.assertEqual(list(range(1, cfg.CONF.max_l3_agents_per_router + 1)),
binding_indices)
def test_bind_router_twice_for_non_ha(self):
router = self._create_ha_router(ha=False)
self.plugin.router_scheduler.bind_router(self.plugin,
self.adminContext,
router['id'],
self.agent_id1)
self.plugin.router_scheduler.bind_router(self.plugin,
self.adminContext,
router['id'],
self.agent_id2)
# Make sure the second bind_router call didn't schedule the router to
# more agents than allowed.
agents = self.plugin.get_l3_agents_hosting_routers(self.adminContext,
[router['id']])
self.assertEqual(1, len(agents))
# Moreover, make sure that the agent that did get bound, only got bound
# once.
bindings = self.get_router_l3_agent_binding(
self.adminContext, router['id'], l3_agent_id=agents[0]['id'])
self.assertEqual(1, len(bindings))
class L3HAChanceSchedulerTestCase(L3HATestCaseMixin):
def test_scheduler_with_ha_enabled(self):
router = self._create_ha_router()
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
for agent in agents:
sync_data = self.plugin.get_ha_sync_data_for_host(
self.adminContext, router_ids=[router['id']],
host=agent.host, agent=agent)
self.assertEqual(1, len(sync_data))
interface = sync_data[0][constants.HA_INTERFACE_KEY]
self.assertIsNotNone(interface)
def test_auto_schedule(self):
# Mock scheduling so that the test can control it explicitly
mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin,
'_notify_router_updated').start()
router = self._create_ha_router()
self.plugin.auto_schedule_routers(self.adminContext, self.agent1.host)
self.plugin.auto_schedule_routers(self.adminContext, self.agent2.host)
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']])
self.assertEqual(2, len(agents))
def test_auto_schedule_specific_router_when_agent_added(self):
self._auto_schedule_when_agent_added(True)
def test_auto_schedule_all_routers_when_agent_added(self):
self._auto_schedule_when_agent_added(False)
def test_auto_schedule_ha_router_when_incompatible_agent_exist(self):
handle_internal_only_routers_agent = helpers.register_l3_agent(
'host_3', constants.L3_AGENT_MODE_LEGACY, internal_only=False)
router = self._create_ha_router()
self.plugin.auto_schedule_routers(
self.adminContext, handle_internal_only_routers_agent.host)
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
agent_ids = [agent['id'] for agent in agents]
self.assertEqual(2, len(agents))
self.assertNotIn(handle_internal_only_routers_agent.id, agent_ids)
def test_auto_schedule_ha_router_when_dvr_agent_exist(self):
dvr_agent = helpers.register_l3_agent(
HOST_DVR, constants.L3_AGENT_MODE_DVR)
router = self._create_ha_router()
self.plugin.auto_schedule_routers(self.adminContext, dvr_agent.host)
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
agent_ids = [agent['id'] for agent in agents]
self.assertEqual(2, len(agents))
self.assertNotIn(dvr_agent.id, agent_ids)
def _auto_schedule_when_agent_added(self, specific_router):
router = self._create_ha_router()
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
agent_ids = [agent['id'] for agent in agents]
self.assertIn(self.agent_id1, agent_ids)
self.assertIn(self.agent_id2, agent_ids)
agent = helpers.register_l3_agent(host='host_3')
self.agent_id3 = agent.id
self.plugin.auto_schedule_routers(self.adminContext, 'host_3')
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
self.assertEqual(3, len(agents))
# Simulate agent restart to make sure we don't try to re-bind
self.plugin.auto_schedule_routers(self.adminContext, 'host_3')
class L3HALeastRoutersSchedulerTestCase(L3HATestCaseMixin):
def _register_l3_agents(self, plugin=None):
super(L3HALeastRoutersSchedulerTestCase,
self)._register_l3_agents(plugin=plugin)
agent = helpers.register_l3_agent(host='host_3')
self.agent_id3 = agent.id
agent = helpers.register_l3_agent(host='host_4')
self.agent_id4 = agent.id
def setUp(self):
super(L3HALeastRoutersSchedulerTestCase, self).setUp()
self.plugin.router_scheduler = importutils.import_object(
'neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler'
)
def test_scheduler(self):
cfg.CONF.set_override('max_l3_agents_per_router', 2)
# disable the third agent to be sure that the router will
# be scheduled of the two firsts
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id3, False)
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id4, False)
r1 = self._create_ha_router()
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
agent_ids = [agent['id'] for agent in agents]
self.assertIn(self.agent_id1, agent_ids)
self.assertIn(self.agent_id2, agent_ids)
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id3, True)
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id4, True)
r2 = self._create_ha_router()
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r2['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
agent_ids = [agent['id'] for agent in agents]
self.assertIn(self.agent_id3, agent_ids)
self.assertIn(self.agent_id4, agent_ids)
class TestGetL3AgentsWithFilter(testlib_api.SqlTestCase,
L3SchedulerBaseMixin):
"""Test cases to test get_l3_agents.
6 l3 agents are registered in the order - legacy, dvr_snat, dvr,
dvr_no_external, fake_mode and legacy
"""
scenarios = [
('no filter',
dict(agent_modes=[],
host=['host_1'],
expected_agent_modes=['legacy', 'dvr_snat', 'dvr',
'dvr_no_external', 'fake_mode',
'legacy'],
expected_host=['host_1'])),
('legacy',
dict(agent_modes=['legacy'],
host=['host_1'],
expected_agent_modes=['legacy', 'legacy'],
expected_host=['host_1'])),
('dvr_snat',
dict(agent_modes=['dvr_snat'],
host=['host_2'],
expected_agent_modes=['dvr_snat'],
expected_host=['host_2'])),
('dvr',
dict(agent_modes=['dvr'],
host=['host_3'],
expected_agent_modes=['dvr'],
expected_host=['host_3'])),
('dvr_no_external',
dict(agent_modes=['dvr_no_external'],
host=['host_4'],
expected_agent_modes=['dvr_no_external'],
expected_host=['host_4'])),
('dvr_snat and dvr',
dict(agent_modes=['dvr_snat', 'dvr'],
host=['host_5'],
expected_agent_modes=['dvr_snat', 'dvr'],
expected_host=['host_5'])),
('dvr_snat and dvr_no_external',
dict(agent_modes=['dvr_snat', 'dvr_no_external'],
host=['host_5'],
expected_agent_modes=['dvr_snat', 'dvr_no_external'],
expected_host=['host_5'])),
('dvr_snat, dvr and dvr_no_external',
dict(agent_modes=['dvr_snat', 'dvr', 'dvr_no_external'],
host=['host_6'],
expected_agent_modes=['dvr_snat', 'dvr', 'dvr_no_external'],
expected_host=['host_6'])),
('invalid',
dict(agent_modes=['invalid'],
host=['host_invalid'],
expected_agent_modes=[],
expected_host=[])),
]
def setUp(self):
super(TestGetL3AgentsWithFilter, self).setUp()
self.plugin = L3HAPlugin()
self.setup_coreplugin('ml2')
self.adminContext = n_context.get_admin_context()
hosts = ['host_1', 'host_2', 'host_3', 'host_4', 'host_5', 'host_6']
agent_modes = ['legacy', 'dvr_snat', 'dvr', 'dvr_no_external',
'fake_mode', 'legacy']
for host, agent_mode in zip(hosts, agent_modes):
helpers.register_l3_agent(host, agent_mode)
class TestGetL3AgentsWithAgentModeFilter(TestGetL3AgentsWithFilter):
"""Test cases to test get_l3_agents 'agent_mode'.
This class tests the L3AgentSchedulerDbMixin.get_l3_agents()
for the 'agent_mode' filter with various values.
"""
def _get_agent_mode(self, agent):
agent_conf = self.plugin.get_configuration_dict(agent)
return agent_conf.get('agent_mode', 'None')
def test_get_l3_agents(self):
l3_agents = self.plugin.get_l3_agents(
self.adminContext, filters={'agent_modes': self.agent_modes})
self.assertEqual(len(self.expected_agent_modes), len(l3_agents))
returned_agent_modes = [self._get_agent_mode(agent)
for agent in l3_agents]
self.assertItemsEqual(self.expected_agent_modes, returned_agent_modes)
class TestGetL3AgentsWithHostFilter(TestGetL3AgentsWithFilter):
"""Test cases to test get_l3_agents 'hosts'.
This class tests the L3AgentSchedulerDbMixin.get_l3_agents()
for the 'host' filter with various values.
"""
def _get_host(self, agent):
return agent.get('host', 'None')
def test_get_l3_agents(self):
l3_agents = self.plugin.get_l3_agents(
self.adminContext, filters={'host': self.host})
self.assertEqual(len(self.expected_host), len(l3_agents))
returned_host = [self._get_host(agent)
for agent in l3_agents]
self.assertEqual(self.expected_host, returned_host)
class L3AgentAZLeastRoutersSchedulerTestCase(L3HATestCaseMixin):
def setUp(self):
super(L3AgentAZLeastRoutersSchedulerTestCase, self).setUp()
self.plugin.router_scheduler = importutils.import_object(
'neutron.scheduler.l3_agent_scheduler.AZLeastRoutersScheduler')
# Mock scheduling so that the test can control it explicitly
mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin,
'_notify_router_updated').start()
# Removes MissingAuthPlugin exception from logs
self.patch_notifier = mock.patch(
'neutron.notifiers.batch_notifier.BatchNotifier._notify')
self.patch_notifier.start()
def _register_l3_agents(self):
self.agent1 = helpers.register_l3_agent(host='az1-host1', az='az1')
self.agent2 = helpers.register_l3_agent(host='az1-host2', az='az1')
self.agent3 = helpers.register_l3_agent(host='az2-host1', az='az2')
self.agent4 = helpers.register_l3_agent(host='az2-host2', az='az2')
self.agent5 = helpers.register_l3_agent(host='az3-host1', az='az3')
self.agent6 = helpers.register_l3_agent(host='az3-host2', az='az3')
def test_az_scheduler_auto_schedule(self):
r1 = self._create_ha_router(ha=False, az_hints=['az1'])
self.plugin.auto_schedule_routers(self.adminContext, 'az1-host2')
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['id']])
self.assertEqual(1, len(agents))
self.assertEqual('az1-host2', agents[0]['host'])
def test_az_scheduler_auto_schedule_no_match(self):
r1 = self._create_ha_router(ha=False, az_hints=['az1'])
self.plugin.auto_schedule_routers(self.adminContext, 'az2-host1')
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['id']])
self.assertEqual(0, len(agents))
def test_az_scheduler_default_az(self):
cfg.CONF.set_override('default_availability_zones', ['az2'])
r1 = self._create_ha_router(ha=False)
r2 = self._create_ha_router(ha=False)
r3 = self._create_ha_router(ha=False)
self.plugin.schedule_router(self.adminContext, r1['id'])
self.plugin.schedule_router(self.adminContext, r2['id'])
self.plugin.schedule_router(self.adminContext, r3['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['id'], r2['id'], r3['id']])
self.assertEqual(3, len(agents))
expected_hosts = set(['az2-host1', 'az2-host2'])
hosts = set([a['host'] for a in agents])
self.assertEqual(expected_hosts, hosts)
def test_az_scheduler_az_hints(self):
r1 = self._create_ha_router(ha=False, az_hints=['az3'])
r2 = self._create_ha_router(ha=False, az_hints=['az3'])
r3 = self._create_ha_router(ha=False, az_hints=['az3'])
self.plugin.schedule_router(self.adminContext, r1['id'])
self.plugin.schedule_router(self.adminContext, r2['id'])
self.plugin.schedule_router(self.adminContext, r3['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['id'], r2['id'], r3['id']])
self.assertEqual(3, len(agents))
expected_hosts = set(['az3-host1', 'az3-host2'])
hosts = set([a['host'] for a in agents])
self.assertEqual(expected_hosts, hosts)
def test_az_scheduler_least_routers(self):
r1 = self._create_ha_router(ha=False, az_hints=['az1'])
r2 = self._create_ha_router(ha=False, az_hints=['az1'])
r3 = self._create_ha_router(ha=False, az_hints=['az1'])
r4 = self._create_ha_router(ha=False, az_hints=['az1'])
self.plugin.schedule_router(self.adminContext, r1['id'])
self.plugin.schedule_router(self.adminContext, r2['id'])
self.plugin.schedule_router(self.adminContext, r3['id'])
self.plugin.schedule_router(self.adminContext, r4['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['id'], r2['id'], r3['id'], r4['id']])
host_num = collections.defaultdict(int)
for agent in agents:
host_num[agent['host']] += 1
self.assertEqual(2, host_num['az1-host1'])
self.assertEqual(2, host_num['az1-host2'])
def test_az_scheduler_ha_az_hints(self):
cfg.CONF.set_override('max_l3_agents_per_router', 2)
r1 = self._create_ha_router(az_hints=['az1', 'az3'])
self.plugin.schedule_router(self.adminContext, r1['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['id']])
self.assertEqual(2, len(agents))
expected_azs = set(['az1', 'az3'])
azs = set([a['availability_zone'] for a in agents])
self.assertEqual(expected_azs, azs)
def test_az_scheduler_ha_auto_schedule(self):
cfg.CONF.set_override('max_l3_agents_per_router', 3)
self._set_l3_agent_admin_state(self.adminContext, self.agent2['id'],
state=False)
self._set_l3_agent_admin_state(self.adminContext, self.agent6['id'],
state=False)
r1 = self._create_ha_router(az_hints=['az1', 'az3'])
self.plugin.schedule_router(self.adminContext, r1['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['id']])
self.assertEqual(2, len(agents))
hosts = set([a['host'] for a in agents])
self.assertEqual(set(['az1-host1', 'az3-host1']), hosts)
self._set_l3_agent_admin_state(self.adminContext, self.agent6['id'],
state=True)
self.plugin.auto_schedule_routers(self.adminContext, 'az3-host2')
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['id']])
self.assertEqual(3, len(agents))
expected_hosts = set(['az1-host1', 'az3-host1', 'az3-host2'])
hosts = set([a['host'] for a in agents])
self.assertEqual(expected_hosts, hosts)
def test__get_routers_can_schedule_with_no_target_routers(self):
result = self.plugin.router_scheduler._get_routers_can_schedule(
self.plugin, mock.ANY, [], mock.ANY)
self.assertEqual([], result)
class L3DVRHAPlugin(db_v2.NeutronDbPluginV2,
l3_hamode_db.L3_HA_NAT_db_mixin,
l3_dvr_ha_scheduler_db.L3_DVR_HA_scheduler_db_mixin):
pass
class L3DVRHATestCaseMixin(testlib_api.SqlTestCase,
L3SchedulerBaseMixin):
def setUp(self):
super(L3DVRHATestCaseMixin, self).setUp()
self.adminContext = n_context.get_admin_context()
self.plugin = L3DVRHAPlugin()
|
|
import torch
from torch_geometric.loader import DataLoader
import torch.optim as optim
import torch.nn.functional as F
from torchvision import transforms
from gnn import GNN
from tqdm import tqdm
import argparse
import time
import numpy as np
import pandas as pd
import os
### importing OGB
from ogb.graphproppred import PygGraphPropPredDataset, Evaluator
### importing utils
from utils import ASTNodeEncoder, get_vocab_mapping
### for data transform
from utils import augment_edge, encode_y_to_arr, decode_arr_to_seq
multicls_criterion = torch.nn.CrossEntropyLoss()
def train(model, device, loader, optimizer):
model.train()
loss_accum = 0
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = batch.to(device)
if batch.x.shape[0] == 1 or batch.batch[-1] == 0:
pass
else:
pred_list = model(batch)
optimizer.zero_grad()
loss = 0
for i in range(len(pred_list)):
loss += multicls_criterion(pred_list[i].to(torch.float32), batch.y_arr[:,i])
loss = loss / len(pred_list)
loss.backward()
optimizer.step()
loss_accum += loss.item()
print('Average training loss: {}'.format(loss_accum / (step + 1)))
def eval(model, device, loader, evaluator, arr_to_seq):
model.eval()
seq_ref_list = []
seq_pred_list = []
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = batch.to(device)
if batch.x.shape[0] == 1:
pass
else:
with torch.no_grad():
pred_list = model(batch)
mat = []
for i in range(len(pred_list)):
mat.append(torch.argmax(pred_list[i], dim = 1).view(-1,1))
mat = torch.cat(mat, dim = 1)
seq_pred = [arr_to_seq(arr) for arr in mat]
# PyG = 1.4.3
# seq_ref = [batch.y[i][0] for i in range(len(batch.y))]
# PyG >= 1.5.0
seq_ref = [batch.y[i] for i in range(len(batch.y))]
seq_ref_list.extend(seq_ref)
seq_pred_list.extend(seq_pred)
input_dict = {"seq_ref": seq_ref_list, "seq_pred": seq_pred_list}
return evaluator.eval(input_dict)
def main():
# Training settings
parser = argparse.ArgumentParser(description='GNN baselines on ogbg-code2 data with Pytorch Geometrics')
parser.add_argument('--device', type=int, default=0,
help='which gpu to use if any (default: 0)')
parser.add_argument('--gnn', type=str, default='gcn-virtual',
help='GNN gin, gin-virtual, or gcn, or gcn-virtual (default: gcn-virtual)')
parser.add_argument('--drop_ratio', type=float, default=0,
help='dropout ratio (default: 0)')
parser.add_argument('--max_seq_len', type=int, default=5,
help='maximum sequence length to predict (default: 5)')
parser.add_argument('--num_vocab', type=int, default=5000,
help='the number of vocabulary used for sequence prediction (default: 5000)')
parser.add_argument('--num_layer', type=int, default=5,
help='number of GNN message passing layers (default: 5)')
parser.add_argument('--emb_dim', type=int, default=300,
help='dimensionality of hidden units in GNNs (default: 300)')
parser.add_argument('--batch_size', type=int, default=128,
help='input batch size for training (default: 128)')
parser.add_argument('--epochs', type=int, default=25,
help='number of epochs to train (default: 25)')
parser.add_argument('--random_split', action='store_true')
parser.add_argument('--num_workers', type=int, default=0,
help='number of workers (default: 0)')
parser.add_argument('--dataset', type=str, default="ogbg-code2",
help='dataset name (default: ogbg-code2)')
parser.add_argument('--filename', type=str, default="",
help='filename to output result (default: )')
args = parser.parse_args()
print(args)
device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
### automatic dataloading and splitting
dataset = PygGraphPropPredDataset(name = args.dataset)
seq_len_list = np.array([len(seq) for seq in dataset.data.y])
print('Target seqence less or equal to {} is {}%.'.format(args.max_seq_len, np.sum(seq_len_list <= args.max_seq_len) / len(seq_len_list)))
split_idx = dataset.get_idx_split()
if args.random_split:
print('Using random split')
perm = torch.randperm(len(dataset))
num_train, num_valid, num_test = len(split_idx['train']), len(split_idx['valid']), len(split_idx['test'])
split_idx['train'] = perm[:num_train]
split_idx['valid'] = perm[num_train:num_train+num_valid]
split_idx['test'] = perm[num_train+num_valid:]
assert(len(split_idx['train']) == num_train)
assert(len(split_idx['valid']) == num_valid)
assert(len(split_idx['test']) == num_test)
# print(split_idx['train'])
# print(split_idx['valid'])
# print(split_idx['test'])
# train_method_name = [' '.join(dataset.data.y[i]) for i in split_idx['train']]
# valid_method_name = [' '.join(dataset.data.y[i]) for i in split_idx['valid']]
# test_method_name = [' '.join(dataset.data.y[i]) for i in split_idx['test']]
# print('#train')
# print(len(train_method_name))
# print('#valid')
# print(len(valid_method_name))
# print('#test')
# print(len(test_method_name))
# train_method_name_set = set(train_method_name)
# valid_method_name_set = set(valid_method_name)
# test_method_name_set = set(test_method_name)
# # unique method name
# print('#unique train')
# print(len(train_method_name_set))
# print('#unique valid')
# print(len(valid_method_name_set))
# print('#unique test')
# print(len(test_method_name_set))
# # unique valid/test method name
# print('#valid unseen during training')
# print(len(valid_method_name_set - train_method_name_set))
# print('#test unseen during training')
# print(len(test_method_name_set - train_method_name_set))
### building vocabulary for sequence predition. Only use training data.
vocab2idx, idx2vocab = get_vocab_mapping([dataset.data.y[i] for i in split_idx['train']], args.num_vocab)
# test encoder and decoder
# for data in dataset:
# # PyG >= 1.5.0
# print(data.y)
#
# # PyG 1.4.3
# # print(data.y[0])
# data = encode_y_to_arr(data, vocab2idx, args.max_seq_len)
# print(data.y_arr[0])
# decoded_seq = decode_arr_to_seq(data.y_arr[0], idx2vocab)
# print(decoded_seq)
# print('')
## test augment_edge
# data = dataset[2]
# print(data)
# data_augmented = augment_edge(data)
# print(data_augmented)
### set the transform function
# augment_edge: add next-token edge as well as inverse edges. add edge attributes.
# encode_y_to_arr: add y_arr to PyG data object, indicating the array representation of a sequence.
dataset.transform = transforms.Compose([augment_edge, lambda data: encode_y_to_arr(data, vocab2idx, args.max_seq_len)])
### automatic evaluator. takes dataset name as input
evaluator = Evaluator(args.dataset)
train_loader = DataLoader(dataset[split_idx["train"]], batch_size=args.batch_size, shuffle=True, num_workers = args.num_workers)
valid_loader = DataLoader(dataset[split_idx["valid"]], batch_size=args.batch_size, shuffle=False, num_workers = args.num_workers)
test_loader = DataLoader(dataset[split_idx["test"]], batch_size=args.batch_size, shuffle=False, num_workers = args.num_workers)
nodetypes_mapping = pd.read_csv(os.path.join(dataset.root, 'mapping', 'typeidx2type.csv.gz'))
nodeattributes_mapping = pd.read_csv(os.path.join(dataset.root, 'mapping', 'attridx2attr.csv.gz'))
print(nodeattributes_mapping)
### Encoding node features into emb_dim vectors.
### The following three node features are used.
# 1. node type
# 2. node attribute
# 3. node depth
node_encoder = ASTNodeEncoder(args.emb_dim, num_nodetypes = len(nodetypes_mapping['type']), num_nodeattributes = len(nodeattributes_mapping['attr']), max_depth = 20)
if args.gnn == 'gin':
model = GNN(num_vocab = len(vocab2idx), max_seq_len = args.max_seq_len, node_encoder = node_encoder, num_layer = args.num_layer, gnn_type = 'gin', emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = False).to(device)
elif args.gnn == 'gin-virtual':
model = GNN(num_vocab = len(vocab2idx), max_seq_len = args.max_seq_len, node_encoder = node_encoder, num_layer = args.num_layer, gnn_type = 'gin', emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = True).to(device)
elif args.gnn == 'gcn':
model = GNN(num_vocab = len(vocab2idx), max_seq_len = args.max_seq_len, node_encoder = node_encoder, num_layer = args.num_layer, gnn_type = 'gcn', emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = False).to(device)
elif args.gnn == 'gcn-virtual':
model = GNN(num_vocab = len(vocab2idx), max_seq_len = args.max_seq_len, node_encoder = node_encoder, num_layer = args.num_layer, gnn_type = 'gcn', emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = True).to(device)
else:
raise ValueError('Invalid GNN type')
optimizer = optim.Adam(model.parameters(), lr=0.001)
print(f'#Params: {sum(p.numel() for p in model.parameters())}')
valid_curve = []
test_curve = []
train_curve = []
for epoch in range(1, args.epochs + 1):
print("=====Epoch {}".format(epoch))
print('Training...')
train(model, device, train_loader, optimizer)
print('Evaluating...')
train_perf = eval(model, device, train_loader, evaluator, arr_to_seq = lambda arr: decode_arr_to_seq(arr, idx2vocab))
valid_perf = eval(model, device, valid_loader, evaluator, arr_to_seq = lambda arr: decode_arr_to_seq(arr, idx2vocab))
test_perf = eval(model, device, test_loader, evaluator, arr_to_seq = lambda arr: decode_arr_to_seq(arr, idx2vocab))
print({'Train': train_perf, 'Validation': valid_perf, 'Test': test_perf})
train_curve.append(train_perf[dataset.eval_metric])
valid_curve.append(valid_perf[dataset.eval_metric])
test_curve.append(test_perf[dataset.eval_metric])
print('F1')
best_val_epoch = np.argmax(np.array(valid_curve))
best_train = max(train_curve)
print('Finished training!')
print('Best validation score: {}'.format(valid_curve[best_val_epoch]))
print('Test score: {}'.format(test_curve[best_val_epoch]))
if not args.filename == '':
result_dict = {'Val': valid_curve[best_val_epoch], 'Test': test_curve[best_val_epoch], 'Train': train_curve[best_val_epoch], 'BestTrain': best_train}
torch.save(result_dict, args.filename)
if __name__ == "__main__":
main()
|
|
"""Support for HomeMatic devices."""
from datetime import timedelta, datetime
from functools import partial
import logging
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_MODE,
ATTR_NAME,
CONF_HOST,
CONF_HOSTS,
CONF_PASSWORD,
CONF_PLATFORM,
CONF_SSL,
CONF_USERNAME,
CONF_VERIFY_SSL,
EVENT_HOMEASSISTANT_STOP,
STATE_UNKNOWN,
)
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DOMAIN = "homematic"
SCAN_INTERVAL_HUB = timedelta(seconds=300)
SCAN_INTERVAL_VARIABLES = timedelta(seconds=30)
DISCOVER_SWITCHES = "homematic.switch"
DISCOVER_LIGHTS = "homematic.light"
DISCOVER_SENSORS = "homematic.sensor"
DISCOVER_BINARY_SENSORS = "homematic.binary_sensor"
DISCOVER_COVER = "homematic.cover"
DISCOVER_CLIMATE = "homematic.climate"
DISCOVER_LOCKS = "homematic.locks"
DISCOVER_BATTERY = "homematic.battery"
ATTR_DISCOVER_DEVICES = "devices"
ATTR_PARAM = "param"
ATTR_CHANNEL = "channel"
ATTR_ADDRESS = "address"
ATTR_VALUE = "value"
ATTR_VALUE_TYPE = "value_type"
ATTR_INTERFACE = "interface"
ATTR_ERRORCODE = "error"
ATTR_MESSAGE = "message"
ATTR_TIME = "time"
ATTR_UNIQUE_ID = "unique_id"
ATTR_PARAMSET_KEY = "paramset_key"
ATTR_PARAMSET = "paramset"
ATTR_DISCOVERY_TYPE = "discovery_type"
ATTR_LOW_BAT = "LOW_BAT"
ATTR_LOWBAT = "LOWBAT"
EVENT_KEYPRESS = "homematic.keypress"
EVENT_IMPULSE = "homematic.impulse"
EVENT_ERROR = "homematic.error"
SERVICE_VIRTUALKEY = "virtualkey"
SERVICE_RECONNECT = "reconnect"
SERVICE_SET_VARIABLE_VALUE = "set_variable_value"
SERVICE_SET_DEVICE_VALUE = "set_device_value"
SERVICE_SET_INSTALL_MODE = "set_install_mode"
SERVICE_PUT_PARAMSET = "put_paramset"
HM_DEVICE_TYPES = {
DISCOVER_SWITCHES: [
"Switch",
"SwitchPowermeter",
"IOSwitch",
"IPSwitch",
"RFSiren",
"IPSwitchPowermeter",
"HMWIOSwitch",
"Rain",
"EcoLogic",
"IPKeySwitchPowermeter",
"IPGarage",
"IPKeySwitch",
"IPKeySwitchLevel",
"IPMultiIO",
],
DISCOVER_LIGHTS: [
"Dimmer",
"KeyDimmer",
"IPKeyDimmer",
"IPDimmer",
"ColorEffectLight",
"IPKeySwitchLevel",
],
DISCOVER_SENSORS: [
"SwitchPowermeter",
"Motion",
"MotionV2",
"RemoteMotion",
"MotionIP",
"ThermostatWall",
"AreaThermostat",
"RotaryHandleSensor",
"WaterSensor",
"PowermeterGas",
"LuxSensor",
"WeatherSensor",
"WeatherStation",
"ThermostatWall2",
"TemperatureDiffSensor",
"TemperatureSensor",
"CO2Sensor",
"IPSwitchPowermeter",
"HMWIOSwitch",
"FillingLevel",
"ValveDrive",
"EcoLogic",
"IPThermostatWall",
"IPSmoke",
"RFSiren",
"PresenceIP",
"IPAreaThermostat",
"IPWeatherSensor",
"RotaryHandleSensorIP",
"IPPassageSensor",
"IPKeySwitchPowermeter",
"IPThermostatWall230V",
"IPWeatherSensorPlus",
"IPWeatherSensorBasic",
"IPBrightnessSensor",
"IPGarage",
"UniversalSensor",
"MotionIPV2",
"IPMultiIO",
"IPThermostatWall2",
],
DISCOVER_CLIMATE: [
"Thermostat",
"ThermostatWall",
"MAXThermostat",
"ThermostatWall2",
"MAXWallThermostat",
"IPThermostat",
"IPThermostatWall",
"ThermostatGroup",
"IPThermostatWall230V",
"IPThermostatWall2",
],
DISCOVER_BINARY_SENSORS: [
"ShutterContact",
"Smoke",
"SmokeV2",
"Motion",
"MotionV2",
"MotionIP",
"RemoteMotion",
"WeatherSensor",
"TiltSensor",
"IPShutterContact",
"HMWIOSwitch",
"MaxShutterContact",
"Rain",
"WiredSensor",
"PresenceIP",
"IPWeatherSensor",
"IPPassageSensor",
"SmartwareMotion",
"IPWeatherSensorPlus",
"MotionIPV2",
"WaterIP",
"IPMultiIO",
"TiltIP",
"IPShutterContactSabotage",
],
DISCOVER_COVER: ["Blind", "KeyBlind", "IPKeyBlind", "IPKeyBlindTilt"],
DISCOVER_LOCKS: ["KeyMatic"],
}
HM_IGNORE_DISCOVERY_NODE = ["ACTUAL_TEMPERATURE", "ACTUAL_HUMIDITY"]
HM_IGNORE_DISCOVERY_NODE_EXCEPTIONS = {
"ACTUAL_TEMPERATURE": [
"IPAreaThermostat",
"IPWeatherSensor",
"IPWeatherSensorPlus",
"IPWeatherSensorBasic",
"IPThermostatWall",
"IPThermostatWall2",
]
}
HM_ATTRIBUTE_SUPPORT = {
"LOWBAT": ["battery", {0: "High", 1: "Low"}],
"LOW_BAT": ["battery", {0: "High", 1: "Low"}],
"ERROR": ["error", {0: "No"}],
"ERROR_SABOTAGE": ["sabotage", {0: "No", 1: "Yes"}],
"SABOTAGE": ["sabotage", {0: "No", 1: "Yes"}],
"RSSI_PEER": ["rssi_peer", {}],
"RSSI_DEVICE": ["rssi_device", {}],
"VALVE_STATE": ["valve", {}],
"LEVEL": ["level", {}],
"BATTERY_STATE": ["battery", {}],
"CONTROL_MODE": [
"mode",
{0: "Auto", 1: "Manual", 2: "Away", 3: "Boost", 4: "Comfort", 5: "Lowering"},
],
"POWER": ["power", {}],
"CURRENT": ["current", {}],
"VOLTAGE": ["voltage", {}],
"OPERATING_VOLTAGE": ["voltage", {}],
"WORKING": ["working", {0: "No", 1: "Yes"}],
"STATE_UNCERTAIN": ["state_uncertain", {}],
}
HM_PRESS_EVENTS = [
"PRESS_SHORT",
"PRESS_LONG",
"PRESS_CONT",
"PRESS_LONG_RELEASE",
"PRESS",
]
HM_IMPULSE_EVENTS = ["SEQUENCE_OK"]
CONF_RESOLVENAMES_OPTIONS = ["metadata", "json", "xml", False]
DATA_HOMEMATIC = "homematic"
DATA_STORE = "homematic_store"
DATA_CONF = "homematic_conf"
CONF_INTERFACES = "interfaces"
CONF_LOCAL_IP = "local_ip"
CONF_LOCAL_PORT = "local_port"
CONF_PORT = "port"
CONF_PATH = "path"
CONF_CALLBACK_IP = "callback_ip"
CONF_CALLBACK_PORT = "callback_port"
CONF_RESOLVENAMES = "resolvenames"
CONF_JSONPORT = "jsonport"
CONF_VARIABLES = "variables"
CONF_DEVICES = "devices"
CONF_PRIMARY = "primary"
DEFAULT_LOCAL_IP = "0.0.0.0"
DEFAULT_LOCAL_PORT = 0
DEFAULT_RESOLVENAMES = False
DEFAULT_JSONPORT = 80
DEFAULT_PORT = 2001
DEFAULT_PATH = ""
DEFAULT_USERNAME = "Admin"
DEFAULT_PASSWORD = ""
DEFAULT_SSL = False
DEFAULT_VERIFY_SSL = False
DEFAULT_CHANNEL = 1
DEVICE_SCHEMA = vol.Schema(
{
vol.Required(CONF_PLATFORM): "homematic",
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_ADDRESS): cv.string,
vol.Required(ATTR_INTERFACE): cv.string,
vol.Optional(ATTR_CHANNEL, default=DEFAULT_CHANNEL): vol.Coerce(int),
vol.Optional(ATTR_PARAM): cv.string,
vol.Optional(ATTR_UNIQUE_ID): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_INTERFACES, default={}): {
cv.match_all: {
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_PATH, default=DEFAULT_PATH): cv.string,
vol.Optional(
CONF_RESOLVENAMES, default=DEFAULT_RESOLVENAMES
): vol.In(CONF_RESOLVENAMES_OPTIONS),
vol.Optional(CONF_JSONPORT, default=DEFAULT_JSONPORT): cv.port,
vol.Optional(
CONF_USERNAME, default=DEFAULT_USERNAME
): cv.string,
vol.Optional(
CONF_PASSWORD, default=DEFAULT_PASSWORD
): cv.string,
vol.Optional(CONF_CALLBACK_IP): cv.string,
vol.Optional(CONF_CALLBACK_PORT): cv.port,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(
CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL
): cv.boolean,
}
},
vol.Optional(CONF_HOSTS, default={}): {
cv.match_all: {
vol.Required(CONF_HOST): cv.string,
vol.Optional(
CONF_USERNAME, default=DEFAULT_USERNAME
): cv.string,
vol.Optional(
CONF_PASSWORD, default=DEFAULT_PASSWORD
): cv.string,
}
},
vol.Optional(CONF_LOCAL_IP, default=DEFAULT_LOCAL_IP): cv.string,
vol.Optional(CONF_LOCAL_PORT): cv.port,
}
)
},
extra=vol.ALLOW_EXTRA,
)
SCHEMA_SERVICE_VIRTUALKEY = vol.Schema(
{
vol.Required(ATTR_ADDRESS): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_CHANNEL): vol.Coerce(int),
vol.Required(ATTR_PARAM): cv.string,
vol.Optional(ATTR_INTERFACE): cv.string,
}
)
SCHEMA_SERVICE_SET_VARIABLE_VALUE = vol.Schema(
{
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_VALUE): cv.match_all,
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
}
)
SCHEMA_SERVICE_SET_DEVICE_VALUE = vol.Schema(
{
vol.Required(ATTR_ADDRESS): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_CHANNEL): vol.Coerce(int),
vol.Required(ATTR_PARAM): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_VALUE): cv.match_all,
vol.Optional(ATTR_VALUE_TYPE): vol.In(
["boolean", "dateTime.iso8601", "double", "int", "string"]
),
vol.Optional(ATTR_INTERFACE): cv.string,
}
)
SCHEMA_SERVICE_RECONNECT = vol.Schema({})
SCHEMA_SERVICE_SET_INSTALL_MODE = vol.Schema(
{
vol.Required(ATTR_INTERFACE): cv.string,
vol.Optional(ATTR_TIME, default=60): cv.positive_int,
vol.Optional(ATTR_MODE, default=1): vol.All(vol.Coerce(int), vol.In([1, 2])),
vol.Optional(ATTR_ADDRESS): vol.All(cv.string, vol.Upper),
}
)
SCHEMA_SERVICE_PUT_PARAMSET = vol.Schema(
{
vol.Required(ATTR_INTERFACE): cv.string,
vol.Required(ATTR_ADDRESS): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_PARAMSET_KEY): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_PARAMSET): dict,
}
)
def setup(hass, config):
"""Set up the Homematic component."""
from pyhomematic import HMConnection
conf = config[DOMAIN]
hass.data[DATA_CONF] = remotes = {}
hass.data[DATA_STORE] = set()
# Create hosts-dictionary for pyhomematic
for rname, rconfig in conf[CONF_INTERFACES].items():
remotes[rname] = {
"ip": rconfig.get(CONF_HOST),
"port": rconfig.get(CONF_PORT),
"path": rconfig.get(CONF_PATH),
"resolvenames": rconfig.get(CONF_RESOLVENAMES),
"jsonport": rconfig.get(CONF_JSONPORT),
"username": rconfig.get(CONF_USERNAME),
"password": rconfig.get(CONF_PASSWORD),
"callbackip": rconfig.get(CONF_CALLBACK_IP),
"callbackport": rconfig.get(CONF_CALLBACK_PORT),
"ssl": rconfig.get(CONF_SSL),
"verify_ssl": rconfig.get(CONF_VERIFY_SSL),
"connect": True,
}
for sname, sconfig in conf[CONF_HOSTS].items():
remotes[sname] = {
"ip": sconfig.get(CONF_HOST),
"port": DEFAULT_PORT,
"username": sconfig.get(CONF_USERNAME),
"password": sconfig.get(CONF_PASSWORD),
"connect": False,
}
# Create server thread
bound_system_callback = partial(_system_callback_handler, hass, config)
hass.data[DATA_HOMEMATIC] = homematic = HMConnection(
local=config[DOMAIN].get(CONF_LOCAL_IP),
localport=config[DOMAIN].get(CONF_LOCAL_PORT, DEFAULT_LOCAL_PORT),
remotes=remotes,
systemcallback=bound_system_callback,
interface_id="homeassistant",
)
# Start server thread, connect to hosts, initialize to receive events
homematic.start()
# Stops server when HASS is shutting down
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, hass.data[DATA_HOMEMATIC].stop)
# Init homematic hubs
entity_hubs = []
for hub_name in conf[CONF_HOSTS].keys():
entity_hubs.append(HMHub(hass, homematic, hub_name))
def _hm_service_virtualkey(service):
"""Service to handle virtualkey servicecalls."""
address = service.data.get(ATTR_ADDRESS)
channel = service.data.get(ATTR_CHANNEL)
param = service.data.get(ATTR_PARAM)
# Device not found
hmdevice = _device_from_servicecall(hass, service)
if hmdevice is None:
_LOGGER.error("%s not found for service virtualkey!", address)
return
# Parameter doesn't exist for device
if param not in hmdevice.ACTIONNODE:
_LOGGER.error("%s not datapoint in hm device %s", param, address)
return
# Channel doesn't exist for device
if channel not in hmdevice.ACTIONNODE[param]:
_LOGGER.error("%i is not a channel in hm device %s", channel, address)
return
# Call parameter
hmdevice.actionNodeData(param, True, channel)
hass.services.register(
DOMAIN,
SERVICE_VIRTUALKEY,
_hm_service_virtualkey,
schema=SCHEMA_SERVICE_VIRTUALKEY,
)
def _service_handle_value(service):
"""Service to call setValue method for HomeMatic system variable."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
name = service.data[ATTR_NAME]
value = service.data[ATTR_VALUE]
if entity_ids:
entities = [
entity for entity in entity_hubs if entity.entity_id in entity_ids
]
else:
entities = entity_hubs
if not entities:
_LOGGER.error("No HomeMatic hubs available")
return
for hub in entities:
hub.hm_set_variable(name, value)
hass.services.register(
DOMAIN,
SERVICE_SET_VARIABLE_VALUE,
_service_handle_value,
schema=SCHEMA_SERVICE_SET_VARIABLE_VALUE,
)
def _service_handle_reconnect(service):
"""Service to reconnect all HomeMatic hubs."""
homematic.reconnect()
hass.services.register(
DOMAIN,
SERVICE_RECONNECT,
_service_handle_reconnect,
schema=SCHEMA_SERVICE_RECONNECT,
)
def _service_handle_device(service):
"""Service to call setValue method for HomeMatic devices."""
address = service.data.get(ATTR_ADDRESS)
channel = service.data.get(ATTR_CHANNEL)
param = service.data.get(ATTR_PARAM)
value = service.data.get(ATTR_VALUE)
value_type = service.data.get(ATTR_VALUE_TYPE)
# Convert value into correct XML-RPC Type.
# https://docs.python.org/3/library/xmlrpc.client.html#xmlrpc.client.ServerProxy
if value_type:
if value_type == "int":
value = int(value)
elif value_type == "double":
value = float(value)
elif value_type == "boolean":
value = bool(value)
elif value_type == "dateTime.iso8601":
value = datetime.strptime(value, "%Y%m%dT%H:%M:%S")
else:
# Default is 'string'
value = str(value)
# Device not found
hmdevice = _device_from_servicecall(hass, service)
if hmdevice is None:
_LOGGER.error("%s not found!", address)
return
hmdevice.setValue(param, value, channel)
hass.services.register(
DOMAIN,
SERVICE_SET_DEVICE_VALUE,
_service_handle_device,
schema=SCHEMA_SERVICE_SET_DEVICE_VALUE,
)
def _service_handle_install_mode(service):
"""Service to set interface into install mode."""
interface = service.data.get(ATTR_INTERFACE)
mode = service.data.get(ATTR_MODE)
time = service.data.get(ATTR_TIME)
address = service.data.get(ATTR_ADDRESS)
homematic.setInstallMode(interface, t=time, mode=mode, address=address)
hass.services.register(
DOMAIN,
SERVICE_SET_INSTALL_MODE,
_service_handle_install_mode,
schema=SCHEMA_SERVICE_SET_INSTALL_MODE,
)
def _service_put_paramset(service):
"""Service to call the putParamset method on a HomeMatic connection."""
interface = service.data.get(ATTR_INTERFACE)
address = service.data.get(ATTR_ADDRESS)
paramset_key = service.data.get(ATTR_PARAMSET_KEY)
# When passing in the paramset from a YAML file we get an OrderedDict
# here instead of a dict, so add this explicit cast.
# The service schema makes sure that this cast works.
paramset = dict(service.data.get(ATTR_PARAMSET))
_LOGGER.debug(
"Calling putParamset: %s, %s, %s, %s",
interface,
address,
paramset_key,
paramset,
)
homematic.putParamset(interface, address, paramset_key, paramset)
hass.services.register(
DOMAIN,
SERVICE_PUT_PARAMSET,
_service_put_paramset,
schema=SCHEMA_SERVICE_PUT_PARAMSET,
)
return True
def _system_callback_handler(hass, config, src, *args):
"""System callback handler."""
# New devices available at hub
if src == "newDevices":
(interface_id, dev_descriptions) = args
interface = interface_id.split("-")[-1]
# Device support active?
if not hass.data[DATA_CONF][interface]["connect"]:
return
addresses = []
for dev in dev_descriptions:
address = dev["ADDRESS"].split(":")[0]
if address not in hass.data[DATA_STORE]:
hass.data[DATA_STORE].add(address)
addresses.append(address)
# Register EVENTS
# Search all devices with an EVENTNODE that includes data
bound_event_callback = partial(_hm_event_handler, hass, interface)
for dev in addresses:
hmdevice = hass.data[DATA_HOMEMATIC].devices[interface].get(dev)
if hmdevice.EVENTNODE:
hmdevice.setEventCallback(callback=bound_event_callback, bequeath=True)
# Create HASS entities
if addresses:
for component_name, discovery_type in (
("switch", DISCOVER_SWITCHES),
("light", DISCOVER_LIGHTS),
("cover", DISCOVER_COVER),
("binary_sensor", DISCOVER_BINARY_SENSORS),
("sensor", DISCOVER_SENSORS),
("climate", DISCOVER_CLIMATE),
("lock", DISCOVER_LOCKS),
("binary_sensor", DISCOVER_BATTERY),
):
# Get all devices of a specific type
found_devices = _get_devices(hass, discovery_type, addresses, interface)
# When devices of this type are found
# they are setup in HASS and a discovery event is fired
if found_devices:
discovery.load_platform(
hass,
component_name,
DOMAIN,
{
ATTR_DISCOVER_DEVICES: found_devices,
ATTR_DISCOVERY_TYPE: discovery_type,
},
config,
)
# Homegear error message
elif src == "error":
_LOGGER.error("Error: %s", args)
(interface_id, errorcode, message) = args
hass.bus.fire(EVENT_ERROR, {ATTR_ERRORCODE: errorcode, ATTR_MESSAGE: message})
def _get_devices(hass, discovery_type, keys, interface):
"""Get the HomeMatic devices for given discovery_type."""
device_arr = []
for key in keys:
device = hass.data[DATA_HOMEMATIC].devices[interface][key]
class_name = device.__class__.__name__
metadata = {}
# Class not supported by discovery type
if (
discovery_type != DISCOVER_BATTERY
and class_name not in HM_DEVICE_TYPES[discovery_type]
):
continue
# Load metadata needed to generate a parameter list
if discovery_type == DISCOVER_SENSORS:
metadata.update(device.SENSORNODE)
elif discovery_type == DISCOVER_BINARY_SENSORS:
metadata.update(device.BINARYNODE)
elif discovery_type == DISCOVER_BATTERY:
if ATTR_LOWBAT in device.ATTRIBUTENODE:
metadata.update({ATTR_LOWBAT: device.ATTRIBUTENODE[ATTR_LOWBAT]})
elif ATTR_LOW_BAT in device.ATTRIBUTENODE:
metadata.update({ATTR_LOW_BAT: device.ATTRIBUTENODE[ATTR_LOW_BAT]})
else:
continue
else:
metadata.update({None: device.ELEMENT})
# Generate options for 1...n elements with 1...n parameters
for param, channels in metadata.items():
if (
param in HM_IGNORE_DISCOVERY_NODE
and class_name not in HM_IGNORE_DISCOVERY_NODE_EXCEPTIONS.get(param, [])
):
continue
if discovery_type == DISCOVER_SWITCHES and class_name == "IPKeySwitchLevel":
channels.remove(8)
channels.remove(12)
if discovery_type == DISCOVER_LIGHTS and class_name == "IPKeySwitchLevel":
channels.remove(4)
# Add devices
_LOGGER.debug(
"%s: Handling %s: %s: %s", discovery_type, key, param, channels
)
for channel in channels:
name = _create_ha_id(
name=device.NAME, channel=channel, param=param, count=len(channels)
)
unique_id = _create_ha_id(
name=key, channel=channel, param=param, count=len(channels)
)
device_dict = {
CONF_PLATFORM: "homematic",
ATTR_ADDRESS: key,
ATTR_INTERFACE: interface,
ATTR_NAME: name,
ATTR_CHANNEL: channel,
ATTR_UNIQUE_ID: unique_id,
}
if param is not None:
device_dict[ATTR_PARAM] = param
# Add new device
try:
DEVICE_SCHEMA(device_dict)
device_arr.append(device_dict)
except vol.MultipleInvalid as err:
_LOGGER.error("Invalid device config: %s", str(err))
return device_arr
def _create_ha_id(name, channel, param, count):
"""Generate a unique entity id."""
# HMDevice is a simple device
if count == 1 and param is None:
return name
# Has multiple elements/channels
if count > 1 and param is None:
return f"{name} {channel}"
# With multiple parameters on first channel
if count == 1 and param is not None:
return f"{name} {param}"
# Multiple parameters with multiple channels
if count > 1 and param is not None:
return f"{name} {channel} {param}"
def _hm_event_handler(hass, interface, device, caller, attribute, value):
"""Handle all pyhomematic device events."""
try:
channel = int(device.split(":")[1])
address = device.split(":")[0]
hmdevice = hass.data[DATA_HOMEMATIC].devices[interface].get(address)
except (TypeError, ValueError):
_LOGGER.error("Event handling channel convert error!")
return
# Return if not an event supported by device
if attribute not in hmdevice.EVENTNODE:
return
_LOGGER.debug("Event %s for %s channel %i", attribute, hmdevice.NAME, channel)
# Keypress event
if attribute in HM_PRESS_EVENTS:
hass.bus.fire(
EVENT_KEYPRESS,
{ATTR_NAME: hmdevice.NAME, ATTR_PARAM: attribute, ATTR_CHANNEL: channel},
)
return
# Impulse event
if attribute in HM_IMPULSE_EVENTS:
hass.bus.fire(EVENT_IMPULSE, {ATTR_NAME: hmdevice.NAME, ATTR_CHANNEL: channel})
return
_LOGGER.warning("Event is unknown and not forwarded")
def _device_from_servicecall(hass, service):
"""Extract HomeMatic device from service call."""
address = service.data.get(ATTR_ADDRESS)
interface = service.data.get(ATTR_INTERFACE)
if address == "BIDCOS-RF":
address = "BidCoS-RF"
if interface:
return hass.data[DATA_HOMEMATIC].devices[interface].get(address)
for devices in hass.data[DATA_HOMEMATIC].devices.values():
if address in devices:
return devices[address]
class HMHub(Entity):
"""The HomeMatic hub. (CCU2/HomeGear)."""
def __init__(self, hass, homematic, name):
"""Initialize HomeMatic hub."""
self.hass = hass
self.entity_id = "{}.{}".format(DOMAIN, name.lower())
self._homematic = homematic
self._variables = {}
self._name = name
self._state = None
# Load data
self.hass.helpers.event.track_time_interval(self._update_hub, SCAN_INTERVAL_HUB)
self.hass.add_job(self._update_hub, None)
self.hass.helpers.event.track_time_interval(
self._update_variables, SCAN_INTERVAL_VARIABLES
)
self.hass.add_job(self._update_variables, None)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def should_poll(self):
"""Return false. HomeMatic Hub object updates variables."""
return False
@property
def state(self):
"""Return the state of the entity."""
return self._state
@property
def state_attributes(self):
"""Return the state attributes."""
attr = self._variables.copy()
return attr
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return "mdi:gradient"
def _update_hub(self, now):
"""Retrieve latest state."""
service_message = self._homematic.getServiceMessages(self._name)
state = None if service_message is None else len(service_message)
# state have change?
if self._state != state:
self._state = state
self.schedule_update_ha_state()
def _update_variables(self, now):
"""Retrieve all variable data and update hmvariable states."""
variables = self._homematic.getAllSystemVariables(self._name)
if variables is None:
return
state_change = False
for key, value in variables.items():
if key in self._variables and value == self._variables[key]:
continue
state_change = True
self._variables.update({key: value})
if state_change:
self.schedule_update_ha_state()
def hm_set_variable(self, name, value):
"""Set variable value on CCU/Homegear."""
if name not in self._variables:
_LOGGER.error("Variable %s not found on %s", name, self.name)
return
old_value = self._variables.get(name)
if isinstance(old_value, bool):
value = cv.boolean(value)
else:
value = float(value)
self._homematic.setSystemVariable(self.name, name, value)
self._variables.update({name: value})
self.schedule_update_ha_state()
class HMDevice(Entity):
"""The HomeMatic device base object."""
def __init__(self, config):
"""Initialize a generic HomeMatic device."""
self._name = config.get(ATTR_NAME)
self._address = config.get(ATTR_ADDRESS)
self._interface = config.get(ATTR_INTERFACE)
self._channel = config.get(ATTR_CHANNEL)
self._state = config.get(ATTR_PARAM)
self._unique_id = config.get(ATTR_UNIQUE_ID)
self._data = {}
self._homematic = None
self._hmdevice = None
self._connected = False
self._available = False
# Set parameter to uppercase
if self._state:
self._state = self._state.upper()
async def async_added_to_hass(self):
"""Load data init callbacks."""
await self.hass.async_add_job(self.link_homematic)
@property
def unique_id(self):
"""Return unique ID. HomeMatic entity IDs are unique by default."""
return self._unique_id.replace(" ", "_")
@property
def should_poll(self):
"""Return false. HomeMatic states are pushed by the XML-RPC Server."""
return False
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def available(self):
"""Return true if device is available."""
return self._available
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
attr = {}
# Generate a dictionary with attributes
for node, data in HM_ATTRIBUTE_SUPPORT.items():
# Is an attribute and exists for this object
if node in self._data:
value = data[1].get(self._data[node], self._data[node])
attr[data[0]] = value
# Static attributes
attr["id"] = self._hmdevice.ADDRESS
attr["interface"] = self._interface
return attr
def link_homematic(self):
"""Connect to HomeMatic."""
if self._connected:
return True
# Initialize
self._homematic = self.hass.data[DATA_HOMEMATIC]
self._hmdevice = self._homematic.devices[self._interface][self._address]
self._connected = True
try:
# Initialize datapoints of this object
self._init_data()
self._load_data_from_hm()
# Link events from pyhomematic
self._subscribe_homematic_events()
self._available = not self._hmdevice.UNREACH
except Exception as err: # pylint: disable=broad-except
self._connected = False
_LOGGER.error("Exception while linking %s: %s", self._address, str(err))
def _hm_event_callback(self, device, caller, attribute, value):
"""Handle all pyhomematic device events."""
_LOGGER.debug("%s received event '%s' value: %s", self._name, attribute, value)
has_changed = False
# Is data needed for this instance?
if attribute in self._data:
# Did data change?
if self._data[attribute] != value:
self._data[attribute] = value
has_changed = True
# Availability has changed
if self.available != (not self._hmdevice.UNREACH):
self._available = not self._hmdevice.UNREACH
has_changed = True
# If it has changed data point, update HASS
if has_changed:
self.schedule_update_ha_state()
def _subscribe_homematic_events(self):
"""Subscribe all required events to handle job."""
channels_to_sub = set()
# Push data to channels_to_sub from hmdevice metadata
for metadata in (
self._hmdevice.SENSORNODE,
self._hmdevice.BINARYNODE,
self._hmdevice.ATTRIBUTENODE,
self._hmdevice.WRITENODE,
self._hmdevice.EVENTNODE,
self._hmdevice.ACTIONNODE,
):
for node, channels in metadata.items():
# Data is needed for this instance
if node in self._data:
# chan is current channel
if len(channels) == 1:
channel = channels[0]
else:
channel = self._channel
# Prepare for subscription
try:
channels_to_sub.add(int(channel))
except (ValueError, TypeError):
_LOGGER.error("Invalid channel in metadata from %s", self._name)
# Set callbacks
for channel in channels_to_sub:
_LOGGER.debug("Subscribe channel %d from %s", channel, self._name)
self._hmdevice.setEventCallback(
callback=self._hm_event_callback, bequeath=False, channel=channel
)
def _load_data_from_hm(self):
"""Load first value from pyhomematic."""
if not self._connected:
return False
# Read data from pyhomematic
for metadata, funct in (
(self._hmdevice.ATTRIBUTENODE, self._hmdevice.getAttributeData),
(self._hmdevice.WRITENODE, self._hmdevice.getWriteData),
(self._hmdevice.SENSORNODE, self._hmdevice.getSensorData),
(self._hmdevice.BINARYNODE, self._hmdevice.getBinaryData),
):
for node in metadata:
if metadata[node] and node in self._data:
self._data[node] = funct(name=node, channel=self._channel)
return True
def _hm_set_state(self, value):
"""Set data to main datapoint."""
if self._state in self._data:
self._data[self._state] = value
def _hm_get_state(self):
"""Get data from main datapoint."""
if self._state in self._data:
return self._data[self._state]
return None
def _init_data(self):
"""Generate a data dict (self._data) from the HomeMatic metadata."""
# Add all attributes to data dictionary
for data_note in self._hmdevice.ATTRIBUTENODE:
self._data.update({data_note: STATE_UNKNOWN})
# Initialize device specific data
self._init_data_struct()
def _init_data_struct(self):
"""Generate a data dictionary from the HomeMatic device metadata."""
raise NotImplementedError
|
|
from __future__ import annotations
import os
import heapq
import bisect
import shutil
import asyncio
import logging
import contextlib
import regex
import synapse.exc as s_exc
import synapse.common as s_common
import synapse.lib.base as s_base
import synapse.lib.coro as s_coro
import synapse.lib.slabseqn as s_slabseqn
import synapse.lib.lmdbslab as s_lmdbslab
from typing import List, Tuple, Dict, Optional, Any, AsyncIterator
logger = logging.getLogger(__name__)
seqnslabre = regex.compile(r'^seqn([0-9a-f]{16})\.lmdb$')
class MultiSlabSeqn(s_base.Base):
'''
An append-optimized sequence of byte blobs stored across multiple slabs for fast rotating/culling
'''
async def __anit__(self, # type: ignore
dirn: str,
opts: Optional[Dict] = None,
slabopts: Optional[Dict] = None):
'''
Args:
dirn (str): directory where to store the slabs
opts (Optional[Dict]): options for this multislab
slabopts (Optional[Dict]): options to pass through to the slab creation
'''
await s_base.Base.__anit__(self)
if opts is None:
opts = {}
self.offsevents: List[Tuple[int, int, asyncio.Event]] = [] # as a heap
self._waitcounter = 0
self.dirn: str = dirn
s_common.gendir(self.dirn)
self.slabopts: Dict[str, Any] = {} if slabopts is None else slabopts
# The last/current slab
self.tailslab: Optional[s_lmdbslab.Slab] = None
self.tailseqn: Optional[s_slabseqn.SlabSeqn] = None
# The most recently accessed slab/seqn that isn't the tail
self._cacheslab: Optional[s_lmdbslab.Slab] = None
self._cacheseqn: Optional[s_slabseqn.SlabSeqn] = None
self._cacheridx: Optional[int] = None
# A startidx -> (Slab, Seqn) dict for all open Slabs, so we don't accidentally open the same Slab twice
self._openslabs: Dict[int, Tuple[s_lmdbslab.Slab, s_slabseqn.SlabSeqn]] = {}
# Lock to avoid an open race
self._openlock = asyncio.Lock()
await self._discoverRanges()
async def fini():
for slab, _ in list(self._openslabs.values()):
# We incref the slabs, so might have to fini multiple times
count = 1
while count:
count = await slab.fini()
self.onfini(fini)
def __repr__(self):
return f'MultiSlabSeqn: {self.dirn!r}'
@staticmethod
def _getFirstIndx(slab) -> Optional[int]:
db = slab.initdb('info')
bytz = slab.get(b'firstindx', db=db)
if bytz is None:
return 0
return s_common.int64un(bytz)
@staticmethod
def _setFirstIndx(slab, indx) -> bool:
db = slab.initdb('info')
return slab.put(b'firstindx', s_common.int64en(indx), db=db)
async def _discoverRanges(self):
'''
Go through the slabs and get the starting indices of the sequence in each slab
'''
fnstartidx = 0
lastidx = None
self._ranges: List[int] = [] # Starting offsets of all the slabs in order
self.firstindx = 0 # persistently-stored indicator of lowest index
self.indx = 0 # The next place an add() will go
lowindx = None
# Make sure the files are in order
for fn in sorted(s_common.listdir(self.dirn, glob='*seqn' + '[abcdef01234567890]' * 16 + '.lmdb')):
if not os.path.isdir(fn):
logger.warning(f'Found a non-directory {fn} where a directory should be')
continue
match = seqnslabre.match(os.path.basename(fn))
assert match
newstartidx = int(match.group(1), 16)
assert newstartidx >= fnstartidx
fnstartidx = newstartidx
if lowindx is None:
lowindx = fnstartidx
if lastidx is not None:
if fnstartidx <= lastidx:
mesg = f'Multislab: overlapping files ({fn}). Previous last index is {lastidx}.'
raise s_exc.BadCoreStore(mesg=mesg)
if fnstartidx != lastidx + 1:
logger.debug(f'Multislab: gap in indices at {fn}. Previous last index is {lastidx}.')
async with await s_lmdbslab.Slab.anit(fn, **self.slabopts) as slab:
self.firstindx = self._getFirstIndx(slab)
# We use the old name of the sequence to ease migration from the old system
seqn = slab.getSeqn('nexuslog')
firstitem = seqn.first()
if firstitem is None:
self.indx = fnstartidx
else:
self.indx = seqn.indx
firstidx = firstitem[0] # might not match the separately stored first index due to culling
if firstidx < fnstartidx:
raise s_exc.BadCoreStore('Multislab: filename inconsistent with contents')
lastidx = seqn.index() - 1
self._ranges.append(fnstartidx)
# An admin might have manually culled by rm'ing old slabs. Update firstidx accordingly.
if lowindx is not None and lowindx > self.firstindx:
self.firstindx = lowindx
if self.firstindx > self.indx:
raise s_exc.BadCoreStore('Invalid firstindx value')
await self._initTailSlab(fnstartidx)
@staticmethod
def slabFilename(dirn: str, indx: int):
return s_common.genpath(dirn, f'seqn{indx:016x}.lmdb')
async def _initTailSlab(self, indx: int) -> int:
if self.tailslab:
await self.tailslab.fini()
self.tailslab, self.tailseqn = await self._makeSlab(indx)
if not self.tailslab.dbexists('info'):
self._setFirstIndx(self.tailslab, self.firstindx)
self.tailseqn.indx = indx
self._ranges.append(indx)
return indx
def _wake_waiters(self) -> None:
while self.offsevents and self.offsevents[0][0] < self.indx:
_, _, evnt = heapq.heappop(self.offsevents)
evnt.set()
async def rotate(self) -> int:
'''
Rotate the Nexus log at the current index.
Note:
After this executes the tailseqn will be empty.
Waiting for this indx to be written will indicate
when it is possible to cull 1 minus the return value
such that the rotated seqn is deleted.
Returns:
int: The starting index of the new seqn
'''
assert self.tailslab and self.tailseqn and self._ranges
if self.indx <= self._ranges[-1]:
logger.info('Seqn %s at indx %d is empty', self.tailslab.path, self.indx)
return self._ranges[-1]
logger.info('Rotating %s at indx %d', self.tailslab.path, self.indx)
return await self._initTailSlab(self.indx)
async def cull(self, offs: int) -> bool:
'''
Remove entries up to (and including) the given offset.
'''
logger.info('Culling %s at offs %d', self.dirn, offs)
# Note: we don't bother deleting the rows from inside a partially culled slab. We just update self.firstindx
# so nothing will return those rows anymore. We only delete from disk entire slabs once they are culled.
if offs < self.firstindx:
logger.warning('Unable to cull %s; offs (%d) < starting indx (%d)', self.dirn, offs, self.firstindx)
return False
# We keep at least one entry; this avoids offsets possibly going lower after a restart
if offs >= self.indx - 1:
logger.warning('Unable to cull %s at offs %d; must keep at least one entry', self.dirn, offs)
return False
if self._cacheridx is not None:
self._cacheridx = None
assert self._cacheslab
await self._cacheslab.fini()
self._cacheslab = self._cacheseqn = None
del_ridx = None
for ridx in range(len(self._ranges) - 1):
startidx = self._ranges[ridx]
if self._openslabs.get(startidx):
raise s_exc.SlabInUse(mesg='Attempt to cull while another task is still using it')
fn = self.slabFilename(self.dirn, startidx)
if offs < self._ranges[ridx + 1] - 1:
logger.warning('Log %s will not be deleted since offs is less than last indx', fn)
break
optspath = s_common.switchext(fn, ext='.opts.yaml')
try:
os.unlink(optspath)
except FileNotFoundError: # pragma: no cover
pass
logger.info('Removing log %s with startidx %d', fn, startidx)
shutil.rmtree(fn)
del_ridx = ridx
await asyncio.sleep(0)
self.firstindx = offs + 1
self._setFirstIndx(self.tailslab, offs + 1)
if del_ridx is not None:
del self._ranges[:del_ridx + 1]
# Log if there was an attempt to cull into the tailseqn
if offs >= self._ranges[-1]:
fn = self.tailslab.path
logger.warning('Log %s will not be deleted since offs is in the currently active log', fn)
return True
async def _makeSlab(self, startidx: int) -> Tuple[s_lmdbslab.Slab, s_slabseqn.SlabSeqn]:
async with self._openlock: # Avoid race in two tasks making the same slab
item = self._openslabs.get(startidx)
if item is not None:
item[0].incref()
return item
fn = self.slabFilename(self.dirn, startidx)
slab = await s_lmdbslab.Slab.anit(fn, **self.slabopts)
seqn = slab.getSeqn('nexuslog')
self._openslabs[startidx] = slab, seqn
def fini():
self._openslabs.pop(startidx, None)
slab.onfini(fini)
return slab, seqn
@contextlib.asynccontextmanager
async def _getSeqn(self, ridx: int) -> AsyncIterator[s_slabseqn.SlabSeqn]:
'''
Get the sequence corresponding to an index into self._ranges
'''
if ridx == len(self._ranges) - 1:
assert self.tailslab and self.tailseqn
slab, seqn = self.tailslab, self.tailseqn
elif ridx == self._cacheridx:
assert self._cacheslab and self._cacheseqn
slab, seqn = self._cacheslab, self._cacheseqn
else:
startidx = self._ranges[ridx]
self._cacheridx = None
if self._cacheslab is not None:
await self._cacheslab.fini()
slab, seqn = self._cacheslab, self._cacheseqn = await self._makeSlab(startidx)
self._cacheridx = ridx
slab.incref()
try:
yield seqn
finally:
await slab.fini()
async def add(self, item: Any, indx=None) -> int:
'''
Add a single item to the sequence.
'''
advances = True
if indx is not None:
if indx < self.firstindx:
raise s_exc.BadIndxValu(mesg=f'indx lower than first index in sequence {self.firstindx}')
if indx < self._ranges[-1]:
ridx = self._getRangeIndx(indx)
assert ridx is not None
async with self._getSeqn(ridx) as seqn:
seqn.add(item, indx=indx)
return indx
if indx >= self.indx:
self.indx = indx
else:
advances = False
else:
indx = self.indx
assert self.tailseqn
retn = self.tailseqn.add(item, indx=indx)
if advances:
self.indx += 1
self._wake_waiters()
return retn
async def last(self) -> Optional[Tuple[int, Any]]:
ridx = self._getRangeIndx(self.indx - 1)
if ridx is None:
return None
async with self._getSeqn(ridx) as seqn:
return seqn.last()
def index(self) -> int:
'''
Return the current index to be used
'''
return self.indx
def setIndex(self, indx: int) -> None:
self.indx = indx
def _getRangeIndx(self, offs: int) -> Optional[int]:
'''
Return the index into self._ranges that contains the offset
'''
if offs < self.firstindx:
return None
indx = bisect.bisect_right(self._ranges, offs)
assert indx
return indx - 1
async def iter(self, offs: int) -> AsyncIterator[Tuple[int, Any]]:
'''
Iterate over items in a sequence from a given offset.
Args:
offs (int): The offset to begin iterating from.
Yields:
(indx, valu): The index and valu of the item.
'''
offs = max(offs, self.firstindx)
ri = ridx = self._getRangeIndx(offs)
assert ridx is not None
# ranges could get appended while iterating due to a rotation
while ri < len(self._ranges):
if ri > ridx:
offs = self._ranges[ri]
async with self._getSeqn(ri) as seqn:
for item in seqn.iter(offs):
yield item
ri += 1
async def gets(self, offs, wait=True) -> AsyncIterator[Tuple[int, Any]]:
'''
Just like iter, but optionally waits for new entries once the end is reached.
'''
while True:
async for (indx, valu) in self.iter(offs):
yield (indx, valu)
offs = indx + 1
if not wait:
return
await self.waitForOffset(self.indx)
async def get(self, offs: int) -> Any:
'''
Retrieve a single row by offset
'''
ridx = self._getRangeIndx(offs)
if ridx is None:
raise s_exc.BadIndxValu(mesg=f'offs lower than first index {self.firstindx}')
async with self._getSeqn(ridx) as seqn:
return seqn.get(offs)
def getOffsetEvent(self, offs: int) -> asyncio.Event:
'''
Returns an asyncio Event that will be set when the particular offset is written. The event will be set if the
offset has already been reached.
'''
evnt = asyncio.Event()
if offs < self.indx:
evnt.set()
return evnt
# We add a simple counter to the tuple to cause stable (and FIFO) sorting and prevent ties
heapq.heappush(self.offsevents, (offs, self._waitcounter, evnt))
self._waitcounter += 1
return evnt
async def waitForOffset(self, offs: int, timeout=None) -> bool:
'''
Returns:
true if the event got set, False if timed out
'''
if offs < self.indx:
return True
evnt = self.getOffsetEvent(offs)
return await s_coro.event_wait(evnt, timeout=timeout)
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from django.db.models import Q
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from desktop.lib.django_util import render, JsonResponse
from desktop.lib.json_utils import JSONEncoderForHTML
from desktop.models import Document2, Document
from notebook.decorators import check_document_access_permission, check_document_modify_permission
from notebook.connectors.base import Notebook, get_api
from notebook.management.commands.notebook_setup import Command
from notebook.connectors.spark_shell import SparkApi
from notebook.conf import get_interpreters
LOG = logging.getLogger(__name__)
@check_document_access_permission()
def notebook(request):
notebook_id = request.GET.get('notebook')
if notebook_id:
notebook = Notebook(document=Document2.objects.get(id=notebook_id))
else:
notebook = Notebook()
is_yarn_mode = False
try:
from spark.conf import LIVY_SERVER_SESSION_KIND
is_yarn_mode = LIVY_SERVER_SESSION_KIND.get()
except:
LOG.exception('Spark is not enabled')
return render('notebook.mako', request, {
'notebooks_json': json.dumps([notebook.get_data()]),
'options_json': json.dumps({
'languages': get_interpreters(request.user),
'session_properties': SparkApi.PROPERTIES,
}),
'is_yarn_mode': is_yarn_mode
})
@check_document_access_permission()
def editor(request):
editor_id = request.GET.get('editor')
editor_type = request.GET.get('type', 'hive')
if editor_id:
editor = Notebook(document=Document2.objects.get(id=editor_id))
else:
editor = Notebook()
data = editor.get_data()
data['name'] = 'Untitled %s Query' % editor_type.title()
data['type'] = 'query-%s' % editor_type
editor.data = json.dumps(data)
return render('editor.mako', request, {
'notebooks_json': json.dumps([editor.get_data()]),
'options_json': json.dumps({
'languages': [{"name": "%s SQL" % editor_type.title(), "type": editor_type}],
'mode': 'editor',
}),
'editor_type': editor_type,
})
def new(request):
return notebook(request)
def browse(request):
database = request.GET.get('database', 'default')
table = request.GET.get('table')
editor_type = request.GET.get('type', 'hive')
editor = Notebook()
editor.data = json.dumps({
'description':'',
'sessions':[
{
'type':'hive',
'properties':[
],
'id':None
}
],
'selectedSnippet':'hive',
'type': 'query-%s' % editor_type,
'snippets':[
{
'status':'ready-execute',
'id':'e8b323b3-88ef-3a84-6264-af11fa5fbefb',
'statement_raw':'select * from %(database)s',
'statement':'select * from sample_07',
'type':'hive',
'properties':{
'files':[
],
'settings':[
]
},
'name': 'Browse',
'database':'default',
'result':{ }
}
],
'name':'Browse'
})
return render('editor.mako', request, {
'notebooks_json': json.dumps([editor.get_data()]),
'options_json': json.dumps({
'languages': [{"name": "%s SQL" % editor_type.title(), "type": editor_type}],
'mode': 'editor',
}),
'editor_type': editor_type,
})
def notebooks(request):
notebooks = [d.content_object.to_dict() for d in Document.objects.get_docs(request.user, Document2, qfilter=Q(extra='notebook') | Q(extra__startswith='query')) if not d.content_object.is_history]
return render('notebooks.mako', request, {
'notebooks_json': json.dumps(notebooks, cls=JSONEncoderForHTML)
})
@check_document_modify_permission()
def delete(request):
notebooks = json.loads(request.POST.get('notebooks', '[]'))
for notebook in notebooks:
doc2 = Document2.objects.get(uuid=notebook['uuid'])
doc = doc2.doc.get()
doc.can_write_or_exception(request.user)
doc.delete()
doc2.delete()
return JsonResponse({})
@check_document_access_permission()
def copy(request):
notebooks = json.loads(request.POST.get('notebooks', '[]'))
for notebook in notebooks:
doc2 = Document2.objects.get(uuid=notebook['uuid'])
doc = doc2.doc.get()
name = doc2.name + '-copy'
doc2 = doc2.copy(name=name, owner=request.user)
doc.copy(content_object=doc2, name=name, owner=request.user)
return JsonResponse({})
@check_document_access_permission()
def download(request):
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
file_format = request.POST.get('format', 'csv')
return get_api(request.user, snippet, request.fs, request.jt).download(notebook, snippet, file_format)
def install_examples(request):
response = {'status': -1, 'message': ''}
if request.method == 'POST':
try:
Command().handle(user=request.user)
response['status'] = 0
except Exception, err:
LOG.exception(err)
response['message'] = str(err)
else:
response['message'] = _('A POST request is required.')
return JsonResponse(response)
|
|
# Licensed to the Apache Software Foundation (ASF) under one *
# or more contributor license agreements. See the NOTICE file *
# distributed with this work for additional information *
# regarding copyright ownership. The ASF licenses this file *
# to you under the Apache License, Version 2.0 (the *
# "License"); you may not use this file except in compliance *
# with the License. You may obtain a copy of the License at *
# *
# http://www.apache.org/licenses/LICENSE-2.0 *
# *
# Unless required by applicable law or agreed to in writing, *
# software distributed under the License is distributed on an *
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
# KIND, either express or implied. See the License for the *
# specific language governing permissions and limitations *
# under the License.
import unittest
from etch.python.Types import *
from etch.binding.msg.StructValue import *
from etch.binding.msg.Type import Type
from etch.binding.msg.Field import Field
from etch.binding.msg.IdName import IdName
#from etch.binding.support.ComboValidator import ComboValidator
from etch.binding.support.Validator_int import Validator_int
from etch.binding.support.Validator_boolean import Validator_boolean
from etch.binding.support.Validator_string import Validator_string
class Test_EtchMsgStructValue(unittest.TestCase):
def setUp(self):
#print " "
pass
mt1 = Type("one")
mt2 = Type("two")
mf1 = Field("f1")
mf2 = Field("f2")
mf3 = Field("f3")
mf4 = Field("f4")
mf5 = Field("f5")
mf6 = Field("f6")
mt1.putValidator(mf1, Validator_boolean.get(0))
mt1.putValidator(mf2, Validator_boolean.get(1))
mt1.putValidator(mf3, Validator_int.get(0))
mt1.putValidator(mf4, Validator_int.get(1))
mt1.putValidator(mf5, Validator_string.get(0))
mt1.putValidator(mf6, Validator_string.get(1))
self._mt1 = mt1
self._mt2 = mt2
self._mf1 = mf1
self._mf2 = mf2
self._mf3 = mf3
self._mf4 = mf4
self._mf5 = mf5
self._mf6 = mf6
def test_toString(self):
sv = StructValue(self._mt1)
self.assertEquals("one(785945377): {}", repr(sv))
sv = StructValue(self._mt2)
self.assertEquals("two(827843303): {}", repr(sv))
sv = StructValue(self._mt1)
sv.put(self._mf1, True)
self.assertEquals("one(785945377): {f1(1512176592): True}", repr(sv))
sv = StructValue(self._mt1)
sv.put(self._mf3, 23)
self.assertEquals("one(785945377): {f3(1512176594): 23}", repr(sv))
sv = StructValue(self._mt1)
sv.put(self._mf1, False)
sv.put(self._mf3, 74)
self.assertEqual(True, "one(785945377): {f1(1512176592): False, f3(1512176594): 74}" == repr(sv) or "one(785945377): {f3(1512176594): 74, f1(1512176592): False}" == repr(sv) )
def test_getType(self):
sv = StructValue(self._mt1)
self.assertEqual(self._mt1, sv.type())
sv = StructValue(self._mt2)
self.assertEqual(self._mt2, sv.type())
def test_isType(self):
sv = StructValue(self._mt1)
self.assertEqual(True, sv.isType(self._mt1))
self.assertEqual(False, sv.isType(self._mt2))
sv = StructValue(self._mt2)
self.assertEqual(True, sv.isType(self._mt2))
self.assertEqual(False, sv.isType(self._mt1))
def test_checkType1(self):
StructValue(self._mt1).checkType(self._mt1)
StructValue(self._mt2).checkType(self._mt2)
def test_checkType2(self):
self.assertRaises(IllegalArgumentException, StructValue(self._mt1).checkType, self._mt2)
def test_checkType3(self):
self.assertRaises(IllegalArgumentException, StructValue(self._mt2).checkType, self._mt1)
def test_get(self):
sv = StructValue(self._mt1)
self.assertEqual(True, sv.isEmpty())
self.assertEqual(0, sv.size())
self.assertEqual(None, sv.get(self._mf1))
sv.put(self._mf1, True)
self.assertEqual(False, sv.isEmpty())
self.assertEqual(1, sv.size())
self.assertEqual(True, sv.get(self._mf1))
sv.put(self._mf1, False)
self.assertEqual(False, sv.isEmpty())
self.assertEqual(1, sv.size())
self.assertEqual(False, sv.get(self._mf1))
sv.put(self._mf1, None)
self.assertEqual(True, sv.isEmpty())
self.assertEqual(0, sv.size())
self.assertEqual(None, sv.get(self._mf1))
def test_remove(self):
sv = StructValue(self._mt1)
self.assertEqual(True, sv.isEmpty())
self.assertEqual(0, sv.size())
self.assertEqual(None, sv.get(self._mf1))
sv.remove(self._mf1)
self.assertEqual(True, sv.isEmpty())
self.assertEqual(0, sv.size())
self.assertEqual(None, sv.get(self._mf1))
sv.put(self._mf1, True)
self.assertEqual(False, sv.isEmpty())
self.assertEqual(1, sv.size())
self.assertEqual(True, sv.get(self._mf1))
sv.remove(self._mf1)
self.assertEqual(True, sv.isEmpty())
self.assertEqual(0, sv.size())
self.assertEqual(None, sv.get(self._mf1))
sv.remove(self._mf1)
self.assertEqual(True, sv.isEmpty())
self.assertEqual(0, sv.size())
self.assertEqual(None, sv.get(self._mf1))
def test_put1(self):
sv = StructValue(self._mt1)
sv.put(self._mf1, False)
sv.put(self._mf2, [True, False])
sv.put(self._mf3, 1)
sv.put(self._mf4, [1,2])
sv.put(self._mf5, "a")
sv.put(self._mf6, ["a", "b"])
self.assertEqual(6, sv.size())
def test_put2(self):
sv = StructValue(self._mt1)
sv.put(self._mf1, None)
sv.put(self._mf2, None)
sv.put(self._mf3, None)
sv.put(self._mf4, None)
sv.put(self._mf5, None)
sv.put(self._mf6, None)
self.assertEqual(0, sv.size())
def test_put3(self):
sv = StructValue(self._mt1)
sv.put(self._mf1, False)
sv.put(self._mf2, [True, False])
sv.put(self._mf3, 1)
sv.put(self._mf4, [1,2])
sv.put(self._mf5, "a")
sv.put(self._mf6, ["a", "b"])
self.assertEqual(6, sv.size())
# Now remove them
sv.put(self._mf1, None)
sv.put(self._mf2, None)
sv.put(self._mf3, None)
sv.put(self._mf4, None)
sv.put(self._mf5, None)
sv.put(self._mf6, None)
self.assertEqual(0, sv.size())
def test_put5(self):
sv = StructValue(self._mt1)
self.assertRaises(IllegalArgumentException, sv.put, self._mf1, Byte(1))
def test_put6(self):
sv = StructValue(self._mt1)
self.assertRaises(IllegalArgumentException, sv.put, self._mf1, Short(1))
def test_put7(self):
sv = StructValue(self._mt1)
self.assertRaises(IllegalArgumentException, sv.put, self._mf1, 1)
def test_put8(self):
sv = StructValue(self._mt1)
self.assertRaises(IllegalArgumentException, sv.put, self._mf1, 1L)
def test_put9(self):
sv = StructValue(self._mt1)
self.assertRaises(IllegalArgumentException, sv.put, self._mf1, 1.2)
def test_put10(self):
sv = StructValue(self._mt1)
self.assertRaises(IllegalArgumentException, sv.put, self._mf1, Float(1.2))
def test_put11(self):
sv = StructValue(self._mt1)
self.assertRaises(IllegalArgumentException, sv.put, self._mf1, "a")
def test_put12(self):
sv = StructValue(self._mt1)
sv.put(self._mf2, None)
def test_put13(self):
sv = StructValue(self._mt1)
self.assertRaises(IllegalArgumentException, sv.put, self._mf2, [])
def test_put14(self):
sv = StructValue(self._mt1)
self.assertRaises(IllegalArgumentException, sv.put, self._mf2, True)
def test_put15(self):
sv = StructValue(self._mt1)
self.assertRaises(IllegalArgumentException, sv.put, self._mf2, [[]])
def test_iterator(self):
# TODO - implement test_iterator
pass
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2010 OpenStack Foundation
# Copyright 2012 University Of Minho
# Copyright 2014-2015 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import mock
from oslo_config import cfg
from oslo_utils import encodeutils
from nova import context
from nova import exception
from nova import test
from nova.tests.unit.virt.libvirt import fakelibvirt
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import host
host.libvirt = fakelibvirt
libvirt_guest.libvirt = fakelibvirt
CONF = cfg.CONF
if sys.version_info > (3,):
long = int
class GuestTestCase(test.NoDBTestCase):
def setUp(self):
super(GuestTestCase, self).setUp()
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.host = host.Host("qemu:///system")
self.context = context.get_admin_context()
self.domain = mock.Mock(spec=fakelibvirt.virDomain)
self.guest = libvirt_guest.Guest(self.domain)
def test_repr(self):
self.domain.ID.return_value = 99
self.domain.UUIDString.return_value = "UUID"
self.domain.name.return_value = "foo"
self.assertEqual("<Guest 99 foo UUID>", repr(self.guest))
@mock.patch.object(fakelibvirt.Connection, 'defineXML')
def test_create(self, mock_define):
libvirt_guest.Guest.create("xml", self.host)
mock_define.assert_called_once_with("xml")
@mock.patch.object(fakelibvirt.Connection, 'defineXML')
def test_create_exception(self, mock_define):
mock_define.side_effect = test.TestingException
self.assertRaises(test.TestingException,
libvirt_guest.Guest.create,
"foo", self.host)
def test_launch(self):
self.guest.launch()
self.domain.createWithFlags.assert_called_once_with(0)
def test_launch_and_pause(self):
self.guest.launch(pause=True)
self.domain.createWithFlags.assert_called_once_with(
fakelibvirt.VIR_DOMAIN_START_PAUSED)
@mock.patch.object(encodeutils, 'safe_decode')
def test_launch_exception(self, mock_safe_decode):
self.domain.createWithFlags.side_effect = test.TestingException
mock_safe_decode.return_value = "</xml>"
self.assertRaises(test.TestingException, self.guest.launch)
self.assertEqual(1, mock_safe_decode.called)
@mock.patch.object(utils, 'execute')
@mock.patch.object(libvirt_guest.Guest, 'get_interfaces')
def test_enable_hairpin(self, mock_get_interfaces, mock_execute):
mock_get_interfaces.return_value = ["vnet0", "vnet1"]
self.guest.enable_hairpin()
mock_execute.assert_has_calls([
mock.call(
'tee', '/sys/class/net/vnet0/brport/hairpin_mode',
run_as_root=True, process_input='1', check_exit_code=[0, 1]),
mock.call(
'tee', '/sys/class/net/vnet1/brport/hairpin_mode',
run_as_root=True, process_input='1', check_exit_code=[0, 1])])
@mock.patch.object(encodeutils, 'safe_decode')
@mock.patch.object(utils, 'execute')
@mock.patch.object(libvirt_guest.Guest, 'get_interfaces')
def test_enable_hairpin_exception(self, mock_get_interfaces,
mock_execute, mock_safe_decode):
mock_get_interfaces.return_value = ["foo"]
mock_execute.side_effect = test.TestingException('oops')
self.assertRaises(test.TestingException, self.guest.enable_hairpin)
self.assertEqual(1, mock_safe_decode.called)
def test_get_interfaces(self):
self.domain.XMLDesc.return_value = """<domain>
<devices>
<interface type="network">
<target dev="vnet0"/>
</interface>
<interface type="network">
<target dev="vnet1"/>
</interface>
</devices>
</domain>"""
self.assertEqual(["vnet0", "vnet1"], self.guest.get_interfaces())
def test_get_interfaces_exception(self):
self.domain.XMLDesc.return_value = "<bad xml>"
self.assertEqual([], self.guest.get_interfaces())
def test_poweroff(self):
self.guest.poweroff()
self.domain.destroy.assert_called_once_with()
def test_resume(self):
self.guest.resume()
self.domain.resume.assert_called_once_with()
def test_get_vcpus_info(self):
self.domain.vcpus.return_value = ([(0, 1, long(10290000000), 2)],
[(True, True)])
vcpus = list(self.guest.get_vcpus_info())
self.assertEqual(0, vcpus[0].id)
self.assertEqual(2, vcpus[0].cpu)
self.assertEqual(1, vcpus[0].state)
self.assertEqual(long(10290000000), vcpus[0].time)
def test_delete_configuration(self):
self.guest.delete_configuration()
self.domain.undefineFlags.assert_called_once_with(
fakelibvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE)
def test_delete_configuration_exception(self):
self.domain.undefineFlags.side_effect = fakelibvirt.libvirtError(
'oops')
self.domain.ID.return_value = 1
self.guest.delete_configuration()
self.domain.undefine.assert_called_once_with()
def test_attach_device(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.attach_device(conf)
self.domain.attachDeviceFlags.assert_called_once_with(
"</xml>", flags=0)
def test_attach_device_persistent(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.attach_device(conf, persistent=True)
self.domain.attachDeviceFlags.assert_called_once_with(
"</xml>", flags=fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG)
def test_attach_device_live(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.attach_device(conf, live=True)
self.domain.attachDeviceFlags.assert_called_once_with(
"</xml>", flags=fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
def test_attach_device_persistent_live(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.attach_device(conf, persistent=True, live=True)
self.domain.attachDeviceFlags.assert_called_once_with(
"</xml>", flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_detach_device(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.detach_device(conf)
self.domain.detachDeviceFlags.assert_called_once_with(
"</xml>", flags=0)
def test_detach_device_persistent(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.detach_device(conf, persistent=True)
self.domain.detachDeviceFlags.assert_called_once_with(
"</xml>", flags=fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG)
def test_detach_device_live(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.detach_device(conf, live=True)
self.domain.detachDeviceFlags.assert_called_once_with(
"</xml>", flags=fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
def test_detach_device_persistent_live(self):
conf = mock.Mock(spec=vconfig.LibvirtConfigGuestDevice)
conf.to_xml.return_value = "</xml>"
self.guest.detach_device(conf, persistent=True, live=True)
self.domain.detachDeviceFlags.assert_called_once_with(
"</xml>", flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_get_xml_desc(self):
self.guest.get_xml_desc()
self.domain.XMLDesc.assert_called_once_with(flags=0)
def test_get_xml_desc_dump_inactive(self):
self.guest.get_xml_desc(dump_inactive=True)
self.domain.XMLDesc.assert_called_once_with(
flags=fakelibvirt.VIR_DOMAIN_XML_INACTIVE)
def test_get_xml_desc_dump_sensitive(self):
self.guest.get_xml_desc(dump_sensitive=True)
self.domain.XMLDesc.assert_called_once_with(
flags=fakelibvirt.VIR_DOMAIN_XML_SECURE)
def test_get_xml_desc_dump_inactive_dump_sensitive(self):
self.guest.get_xml_desc(dump_inactive=True, dump_sensitive=True)
self.domain.XMLDesc.assert_called_once_with(
flags=(fakelibvirt.VIR_DOMAIN_XML_INACTIVE |
fakelibvirt.VIR_DOMAIN_XML_SECURE))
def test_get_xml_desc_dump_migratable(self):
self.guest.get_xml_desc(dump_migratable=True)
self.domain.XMLDesc.assert_called_once_with(
flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE)
def test_has_persistent_configuration(self):
self.assertTrue(
self.guest.has_persistent_configuration())
self.domain.isPersistent.assert_called_once_with()
def test_save_memory_state(self):
self.guest.save_memory_state()
self.domain.managedSave.assert_called_once_with(0)
def test_get_block_device(self):
disk = 'vda'
gblock = self.guest.get_block_device(disk)
self.assertEqual(disk, gblock._disk)
self.assertEqual(self.guest, gblock._guest)
def test_set_user_password(self):
self.guest.set_user_password("foo", "123")
self.domain.setUserPassword.assert_called_once_with("foo", "123", 0)
def test_get_devices(self):
xml = """
<domain type='qemu'>
<name>QEMUGuest1</name>
<uuid>c7a5fdbd-edaf-9455-926a-d65c16db1809</uuid>
<memory unit='KiB'>219136</memory>
<currentMemory unit='KiB'>219136</currentMemory>
<vcpu placement='static'>1</vcpu>
<os>
<type arch='i686' machine='pc'>hvm</type>
<boot dev='hd'/>
</os>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<devices>
<emulator>/usr/bin/qemu</emulator>
<disk type='block' device='disk'>
<driver name='qemu' type='raw'/>
<source dev='/dev/HostVG/QEMUGuest2'/>
<target dev='hda' bus='ide'/>
<address type='drive' controller='0' bus='0' target='0' unit='0'/>
</disk>
<disk type='network' device='disk'>
<driver name='qemu' type='raw'/>
<auth username='myname'>
<secret type='iscsi' usage='mycluster_myname'/>
</auth>
<source protocol='iscsi' name='iqn.1992-01.com.example'>
<host name='example.org' port='6000'/>
</source>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='network' device='disk'>
<driver name='qemu' type='raw'/>
<source protocol='iscsi' name='iqn.1992-01.com.example/1'>
<host name='example.org' port='6000'/>
</source>
<target dev='vdb' bus='virtio'/>
</disk>
<hostdev mode='subsystem' type='pci' managed='yes'>
<source>
<address domain='0x0000' bus='0x06' slot='0x12' function='0x5'/>
</source>
</hostdev>
<hostdev mode='subsystem' type='pci' managed='yes'>
<source>
<address domain='0x0000' bus='0x06' slot='0x12' function='0x6'/>
</source>
</hostdev>
<controller type='usb' index='0'/>
<controller type='pci' index='0' model='pci-root'/>
<memballoon model='none'/>
</devices>
</domain>
"""
self.domain.XMLDesc.return_value = xml
devs = self.guest.get_all_devices()
# Only currently parse <disk> and <hostdev> elements
# hence we're not counting the controller/memballoon
self.assertEqual(5, len(devs))
self.assertIsInstance(devs[0], vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(devs[1], vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(devs[2], vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(devs[3], vconfig.LibvirtConfigGuestHostdev)
self.assertIsInstance(devs[4], vconfig.LibvirtConfigGuestHostdev)
devs = self.guest.get_all_devices(vconfig.LibvirtConfigGuestDisk)
self.assertEqual(3, len(devs))
self.assertIsInstance(devs[0], vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(devs[1], vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(devs[2], vconfig.LibvirtConfigGuestDisk)
devs = self.guest.get_all_disks()
self.assertEqual(3, len(devs))
self.assertIsInstance(devs[0], vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(devs[1], vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(devs[2], vconfig.LibvirtConfigGuestDisk)
devs = self.guest.get_all_devices(vconfig.LibvirtConfigGuestHostdev)
self.assertEqual(2, len(devs))
self.assertIsInstance(devs[0], vconfig.LibvirtConfigGuestHostdev)
self.assertIsInstance(devs[1], vconfig.LibvirtConfigGuestHostdev)
def test_get_info(self):
self.domain.info.return_value = (1, 2, 3, 4, 5)
self.domain.ID.return_value = 6
info = self.guest.get_info(self.host)
self.domain.info.assert_called_once_with()
self.assertEqual(1, info.state)
self.assertEqual(2, info.max_mem_kb)
self.assertEqual(3, info.mem_kb)
self.assertEqual(4, info.num_cpu)
self.assertEqual(5, info.cpu_time_ns)
self.assertEqual(6, info.id)
class GuestBlockTestCase(test.NoDBTestCase):
def setUp(self):
super(GuestBlockTestCase, self).setUp()
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.host = host.Host("qemu:///system")
self.context = context.get_admin_context()
self.domain = mock.Mock(spec=fakelibvirt.virDomain)
self.guest = libvirt_guest.Guest(self.domain)
self.gblock = self.guest.get_block_device('vda')
def test_abort_job(self):
self.gblock.abort_job()
self.domain.blockJobAbort.assert_called_once_with('vda', flags=0)
def test_abort_job_async(self):
self.gblock.abort_job(async=True)
self.domain.blockJobAbort.assert_called_once_with(
'vda', flags=fakelibvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC)
def test_abort_job_pivot(self):
self.gblock.abort_job(pivot=True)
self.domain.blockJobAbort.assert_called_once_with(
'vda', flags=fakelibvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT)
def test_get_job_info(self):
self.domain.blockJobInfo.return_value = {
"type": 1,
"bandwidth": 18,
"cur": 66,
"end": 100}
info = self.gblock.get_job_info()
self.assertEqual(1, info.job)
self.assertEqual(18, info.bandwidth)
self.assertEqual(66, info.cur)
self.assertEqual(100, info.end)
self.domain.blockJobInfo.assert_called_once_with('vda', flags=0)
def test_resize(self):
self.gblock.resize(10)
self.domain.blockResize.assert_called_once_with('vda', 10)
def test_rebase(self):
self.gblock.rebase("foo")
self.domain.blockRebase.assert_called_once_with(
'vda', "foo", 0, flags=0)
def test_rebase_shallow(self):
self.gblock.rebase("foo", shallow=True)
self.domain.blockRebase.assert_called_once_with(
'vda', "foo", 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW)
def test_rebase_reuse_ext(self):
self.gblock.rebase("foo", reuse_ext=True)
self.domain.blockRebase.assert_called_once_with(
'vda', "foo", 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT)
def test_rebase_copy(self):
self.gblock.rebase("foo", copy=True)
self.domain.blockRebase.assert_called_once_with(
'vda', "foo", 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY)
def test_rebase_relative(self):
self.gblock.rebase("foo", relative=True)
self.domain.blockRebase.assert_called_once_with(
'vda', "foo", 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE)
def test_commit(self):
self.gblock.commit("foo", "top")
self.domain.blockCommit.assert_called_once_with(
'vda', "foo", "top", 0, flags=0)
def test_commit_relative(self):
self.gblock.commit("foo", "top", relative=True)
self.domain.blockCommit.assert_called_once_with(
'vda', "foo", "top", 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE)
def test_wait_for_job(self):
self.domain.blockJobInfo.return_value = {
"type": 4,
"bandwidth": 18,
"cur": 95,
"end": 100}
in_progress = self.gblock.wait_for_job()
self.assertTrue(in_progress)
self.domain.blockJobInfo.return_value = {
"type": 4,
"bandwidth": 18,
"cur": 100,
"end": 100}
in_progress = self.gblock.wait_for_job()
self.assertFalse(in_progress)
self.domain.blockJobInfo.return_value = {"type": 0}
in_progress = self.gblock.wait_for_job(wait_for_job_clean=True)
self.assertFalse(in_progress)
def test_wait_for_job_arbort_on_error(self):
self.domain.blockJobInfo.return_value = -1
self.assertRaises(
exception.NovaException,
self.gblock.wait_for_job, abort_on_error=True)
|
|
#!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# compactsnoop Trace compact zone and print details including issuing PID.
# For Linux, uses BCC, eBPF.
#
# This uses in-kernel eBPF maps to cache process details (PID and comm) by
# compact zone begin, as well as a starting timestamp for calculating
# latency.
#
# Copyright (c) 2019 Wenbo Zhang
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 11-NOV-2019 Wenbo Zhang Created this.
from __future__ import print_function
from bcc import BPF
import argparse
import platform
from datetime import datetime, timedelta
# arguments
examples = """examples:
./compactsnoop # trace all compact stall
./compactsnoop -T # include timestamps
./compactsnoop -d 10 # trace for 10 seconds only
./compactsnoop -K # output kernel stack trace
./compactsnoop -e # show extended fields
"""
parser = argparse.ArgumentParser(
description="Trace compact zone",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples,
)
parser.add_argument("-T", "--timestamp", action="store_true",
help="include timestamp on output")
parser.add_argument("-p", "--pid", help="trace this PID only")
parser.add_argument("-d", "--duration",
help="total duration of trace in seconds")
parser.add_argument("-K", "--kernel-stack", action="store_true",
help="output kernel stack trace")
parser.add_argument("-e", "--extended_fields", action="store_true",
help="show system memory state")
parser.add_argument("--ebpf", action="store_true", help=argparse.SUPPRESS)
args = parser.parse_args()
debug = 0
if args.duration:
args.duration = timedelta(seconds=int(args.duration))
NO_EXTENDED = """
#ifdef EXTNEDED_FIELDS
#undef EXTNEDED_FIELDS
#endif
"""
EXTENDED = """
#define EXTNEDED_FIELDS 1
"""
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/sched.h>
#include <linux/mmzone.h>
struct node;
#include <linux/compaction.h>
struct compact_control {
struct list_head freepages; /* List of free pages to migrate to */
struct list_head migratepages; /* List of pages being migrated */
unsigned long nr_freepages; /* Number of isolated free pages */
unsigned long nr_migratepages; /* Number of pages to migrate */
unsigned long free_pfn; /* isolate_freepages search base */
unsigned long migrate_pfn; /* isolate_migratepages search base */
bool sync; /* Synchronous migration */
};
struct val_t {
int nid;
int idx;
int order;
int sync;
#ifdef EXTNEDED_FIELDS
int fragindex;
int low;
int min;
int high;
int free;
#endif
u64 ts; // compaction begin time
};
struct data_t {
u32 pid;
u32 tid;
int nid;
int idx;
int order;
u64 delta;
u64 ts; // compaction end time
int sync;
#ifdef EXTNEDED_FIELDS
int fragindex;
int low;
int min;
int high;
int free;
#endif
int status;
int stack_id;
char comm[TASK_COMM_LEN];
};
BPF_HASH(start, u64, struct val_t);
BPF_PERF_OUTPUT(events);
BPF_STACK_TRACE(stack_traces, 2048);
#ifdef CONFIG_NUMA
static inline int zone_to_nid_(struct zone *zone)
{
int node;
bpf_probe_read(&node, sizeof(node), &zone->node);
return node;
}
#else
static inline int zone_to_nid_(struct zone *zone)
{
return 0;
}
#endif
// #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
static inline int zone_idx_(struct zone *zone)
{
struct pglist_data *zone_pgdat = NULL;
bpf_probe_read(&zone_pgdat, sizeof(zone_pgdat), &zone->zone_pgdat);
return zone - zone_pgdat->node_zones;
}
#ifdef EXTNEDED_FIELDS
static inline void get_all_wmark_pages(struct zone *zone, struct val_t *valp)
{
u64 watermark[NR_WMARK] = {};
u64 watermark_boost = 0;
bpf_probe_read(&watermark, sizeof(watermark), &zone->watermark);
valp->min = watermark[WMARK_MIN];
valp->low = watermark[WMARK_LOW];
valp->high = watermark[WMARK_HIGH];
bpf_probe_read(&valp->free, sizeof(valp->free),
&zone->vm_stat[NR_FREE_PAGES]);
}
#endif
int trace_compact_zone_entry(struct pt_regs *ctx, struct zone *zone,
struct compact_control *cc)
{
#ifdef EXTNEDED_FIELDS
struct val_t val = { .fragindex=-1000 };
#else
struct val_t val = { };
#endif
u64 id = bpf_get_current_pid_tgid();
PID_FILTER
val.sync = cc->sync;
start.update(&id, &val);
return 0;
}
int trace_compaction_suitable_entry(struct pt_regs *ctx, struct zone *zone,
int order)
{
u64 id = bpf_get_current_pid_tgid();
struct val_t *valp = start.lookup(&id);
if (valp == NULL) {
// missed entry
return 0;
}
valp->nid = zone_to_nid_(zone);
valp->idx = zone_idx_(zone);
valp->order = order;
#ifdef EXTNEDED_FIELDS
get_all_wmark_pages(zone, valp);
#endif
return 0;
}
int trace_fragmentation_index_return(struct pt_regs *ctx)
{
int ret = PT_REGS_RC(ctx);
u64 id = bpf_get_current_pid_tgid();
struct val_t *valp = start.lookup(&id);
if (valp == NULL) {
// missed entry
return 0;
}
#ifdef EXTNEDED_FIELDS
valp->fragindex = ret;
#endif
return 0;
}
int trace_compaction_suitable_return(struct pt_regs *ctx)
{
int ret = PT_REGS_RC(ctx);
u64 id = bpf_get_current_pid_tgid();
struct val_t *valp = start.lookup(&id);
if (valp == NULL) {
// missed entry
return 0;
}
if (ret != COMPACT_CONTINUE)
start.delete(&id);
else
valp->ts = bpf_ktime_get_ns();
return 0;
}
int trace_compact_zone_return(struct pt_regs *ctx)
{
int ret = PT_REGS_RC(ctx);
struct data_t data = {};
u64 ts = bpf_ktime_get_ns();
u64 id = bpf_get_current_pid_tgid();
struct val_t *valp = start.lookup(&id);
if (valp == NULL) {
// missed entry or unsuitable
return 0;
}
data.delta = ts - valp->ts;
data.ts = ts / 1000;
data.pid = id >> 32;
data.tid = id;
bpf_get_current_comm(&data.comm, sizeof(data.comm));
data.nid = valp->nid;
data.idx = valp->idx;
data.order = valp->order;
data.sync = valp->sync;
#ifdef EXTNEDED_FIELDS
data.fragindex = valp->fragindex;
data.min = valp->min;
data.low = valp->low;
data.high = valp->high;
data.free = valp->free;
#endif
data.status = ret;
data.stack_id = stack_traces.get_stackid(ctx, BPF_F_REUSE_STACKID);
events.perf_submit(ctx, &data, sizeof(data));
start.delete(&id);
return 0;
}
"""
if platform.machine() != 'x86_64':
print("""
Currently only support x86_64 servers, if you want to use it on
other platforms, please refer include/linux/mmzone.h to modify
zone_idex_to_str to get the right zone type
""")
exit()
if args.extended_fields:
bpf_text = EXTENDED + bpf_text
else:
bpf_text = NO_EXTENDED + bpf_text
if args.pid:
bpf_text = bpf_text.replace(
"PID_FILTER", "if (id >> 32 != %s) { return 0; }" % args.pid)
else:
bpf_text = bpf_text.replace("PID_FILTER", "")
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
exit()
# load BPF program
b = BPF(text=bpf_text)
b.attach_kprobe(event="compact_zone", fn_name="trace_compact_zone_entry")
b.attach_kretprobe(event="compact_zone", fn_name="trace_compact_zone_return")
b.attach_kprobe(
event="compaction_suitable", fn_name="trace_compaction_suitable_entry"
)
b.attach_kretprobe(
event="fragmentation_index", fn_name="trace_fragmentation_index_return"
)
b.attach_kretprobe(
event="compaction_suitable", fn_name="trace_compaction_suitable_return"
)
stack_traces = b.get_table("stack_traces")
initial_ts = 0
def zone_idx_to_str(idx):
# from include/linux/mmzone.h
# NOTICE: consider only x86_64 servers
zonetype = {
0: "ZONE_DMA",
1: "ZONE_DMA32",
2: "ZONE_NORMAL",
}
if idx in zonetype:
return zonetype[idx]
else:
return str(idx)
def compact_result_to_str(status):
# from include/linux/compaction.h
compact_status = {
# COMPACT_SKIPPED: compaction didn't start as it was not possible
# or direct reclaim was more suitable
0: "skipped",
# COMPACT_CONTINUE: compaction should continue to another pageblock
1: "continue",
# COMPACT_PARTIAL: direct compaction partially compacted a zone and
# there are suitable pages
2: "partial",
# COMPACT_COMPLETE: The full zone was compacted
3: "complete",
}
if status in compact_status:
return compact_status[status]
else:
return str(status)
# header
if args.timestamp:
print("%-14s" % ("TIME(s)"), end=" ")
print(
"%-14s %-6s %-4s %-12s %-5s %-7s"
% ("COMM", "PID", "NODE", "ZONE", "ORDER", "MODE"),
end=" ",
)
if args.extended_fields:
print("%-8s %-8s %-8s %-8s %-8s" %
("FRAGIDX", "MIN", "LOW", "HIGH", "FREE"), end=" ")
print("%9s %16s" % ("LAT(ms)", "STATUS"))
# process event
def print_event(cpu, data, size):
event = b["events"].event(data)
global initial_ts
if not initial_ts:
initial_ts = event.ts
if args.timestamp:
delta = event.ts - initial_ts
print("%-14.9f" % (float(delta) / 1000000), end=" ")
print("%-14.14s %-6s %-4s %-12s %-5s %-7s" % (
event.comm.decode("utf-8", "replace"),
event.pid,
event.nid,
zone_idx_to_str(event.idx),
event.order,
"SYNC" if event.sync else "ASYNC"), end=" ")
if args.extended_fields:
print("%-8.3f %-8s %-8s %-8s %-8s" % (
float(event.fragindex) / 1000,
event.min,
event.low,
event.high,
event.free), end=" ")
print("%9.3f %16s" % (
float(event.delta) / 1000000, compact_result_to_str(event.status)))
if args.kernel_stack:
for addr in stack_traces.walk(event.stack_id):
sym = b.ksym(addr, show_offset=True)
print("\t%s" % sym)
print("")
# loop with callback to print_event
b["events"].open_perf_buffer(print_event, page_cnt=64)
start_time = datetime.now()
while not args.duration or datetime.now() - start_time < args.duration:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
|
|
"""."""
from copy import deepcopy as _dcopy
import multiprocessing as mp_
import numpy as _np
from mathphys.functions import save_pickle as _save_pickle, \
load_pickle as _load_pickle
import pyaccel as _pyaccel
class LOCOUtils:
"""LOCO utils."""
@staticmethod
def save_data(fname, jlocodict, overwrite=True):
"""."""
_save_pickle(jlocodict, fname, overwrite=overwrite)
@staticmethod
def load_data(fname):
"""."""
return _load_pickle(fname)
@staticmethod
def get_idx(indcs):
"""."""
return _np.array([idx[0] for idx in indcs])
@staticmethod
def respm_calc(model, respm, use_dispersion):
"""."""
respm.model = _dcopy(model)
matrix = respm.get_respm()
if not use_dispersion:
matrix[:, -1] *= 0
return matrix
@staticmethod
def apply_bpm_gain(matrix, gain):
"""."""
return gain[:, None] * matrix
@staticmethod
def apply_bpm_roll(matrix, roll):
"""."""
cos_mat = _np.diag(_np.cos(roll))
sin_mat = _np.diag(_np.sin(roll))
r_alpha = _np.hstack((cos_mat, sin_mat))
r_alpha = _np.vstack((r_alpha, _np.hstack((-sin_mat, cos_mat))))
return _np.dot(r_alpha, matrix)
@staticmethod
def apply_corr_gain(matrix, gain):
"""."""
matrix[:, :-1] *= gain[None, :]
return matrix
@staticmethod
def apply_all_gain(matrix, gain_bpm, roll_bpm, gain_corr):
"""."""
matrix = LOCOUtils.apply_bpm_gain(matrix, gain_bpm)
matrix = LOCOUtils.apply_bpm_roll(matrix, roll_bpm)
matrix = LOCOUtils.apply_corr_gain(matrix, gain_corr)
return matrix
@staticmethod
def apply_bpm_weight(matrix, weight_bpm):
"""."""
return weight_bpm * matrix
@staticmethod
def apply_corr_weight(matrix, weight_corr):
"""."""
return matrix * weight_corr[None, :]
@staticmethod
def apply_all_weight(matrix, weight_bpm, weight_corr):
"""."""
matrix = LOCOUtils.apply_bpm_weight(matrix, weight_bpm)
matrix = LOCOUtils.apply_corr_weight(matrix, weight_corr)
return matrix
@staticmethod
def remove_offdiagonal(matrix_in, nr_bpm, nr_ch, nr_cv):
"""."""
matrix_out = _np.zeros(matrix_in.shape)
matrix_out[:nr_bpm, :nr_ch] = matrix_in[:nr_bpm, :nr_ch]
matrix_out[nr_bpm:, nr_ch:nr_ch+nr_cv] = \
matrix_in[nr_bpm:, nr_ch:nr_ch+nr_cv]
matrix_out[:nr_bpm, -1] = matrix_in[:nr_bpm, -1]
return matrix_out
@staticmethod
def remove_diagonal(matrix_in, nr_bpm, nr_ch):
"""."""
matrix_out = _np.zeros(matrix_in.shape)
matrix_out[:nr_bpm, nr_ch:-1] = matrix_in[:nr_bpm, nr_ch:-1]
matrix_out[nr_bpm:, :nr_ch] = matrix_in[nr_bpm:, :nr_ch]
matrix_out[nr_bpm:, -1] = matrix_in[nr_bpm:, -1]
return matrix_out
@staticmethod
def add_dispersion_to_respm(matrix, energy_shift, dispersion):
"""."""
matrix_out = _dcopy(matrix)
matrix_out[:, :-1] += dispersion[:, None] * energy_shift[None, :]
return matrix_out
@staticmethod
def get_quads_strengths(model, indices):
"""."""
kquads = []
for qidx in indices:
kquads.append(_pyaccel.lattice.get_attribute(
model, 'KL', qidx))
return _np.array(kquads)
@staticmethod
def set_quadmag_kdelta(model, idx_mag, kvalues, kdelta):
"""."""
for idx, idx_seg in enumerate(idx_mag):
_pyaccel.lattice.set_attribute(
model, 'KL', idx_seg, kvalues[idx] + kdelta/len(idx_mag))
@staticmethod
def set_quadset_kdelta(model, idx_set, kvalues, kdelta):
"""."""
for idx, idx_mag in enumerate(idx_set):
LOCOUtils.set_quadmag_kdelta(
model, idx_mag, kvalues[idx], kdelta)
@staticmethod
def set_dipmag_kdelta(model, idx_mag, kvalues, kdelta):
"""."""
ktotal = _np.sum(kvalues)
if ktotal:
newk = [kval*(1+kdelta/ktotal) for kval in kvalues]
_pyaccel.lattice.set_attribute(
model, 'KL', idx_mag, newk)
else:
newk = [kval + kdelta/len(idx_mag) for kval in kvalues]
_pyaccel.lattice.set_attribute(
model, 'KL', idx_mag, kvalues + kdelta/len(idx_mag))
@staticmethod
def set_dipset_kdelta(model, idx_set, kvalues, kdelta):
"""."""
for idx, idx_mag in enumerate(idx_set):
LOCOUtils.set_dipmag_kdelta(
model, idx_mag, kvalues[idx], kdelta)
@staticmethod
def set_quadmag_ksdelta(model, idx_mag, ksvalues, ksdelta):
"""."""
_pyaccel.lattice.set_attribute(
model, 'KsL', idx_mag, ksvalues + ksdelta)
@staticmethod
def set_dipmag_ksdelta(model, idx_mag, ksvalues, ksdelta):
"""."""
kstotal = _np.sum(ksvalues)
if kstotal:
newks = [ksval*(1+ksdelta/kstotal) for ksval in ksvalues]
_pyaccel.lattice.set_attribute(
model, 'KsL', idx_mag, newks)
else:
newks = [ksval + ksdelta/len(idx_mag) for ksval in ksvalues]
_pyaccel.lattice.set_attribute(
model, 'KsL', idx_mag, newks)
@staticmethod
def set_dipmag_kick(model, idx_mag, kick_values, kick_delta):
"""."""
angle = _np.array(
_pyaccel.lattice.get_attribute(model, 'angle', idx_mag))
angle /= _np.sum(angle)
_pyaccel.lattice.set_attribute(
model, 'hkick_polynom', idx_mag, kick_values + kick_delta * angle)
@staticmethod
def set_girders_long_shift(model, girders, ds_shift):
"""."""
for i, inds in enumerate(girders):
if ds_shift[i]:
model[inds[0]-1].length += ds_shift[i]
model[inds[1]+1].length -= ds_shift[i]
return model
@staticmethod
def jloco_calc_linear(config, matrix):
"""."""
nbpm = config.nr_bpm
nch = config.nr_ch
ncv = config.nr_cv
ncorr = nch + ncv
shape0 = matrix.shape[0]
shape1 = matrix.shape[1]
if shape0 != 2*nbpm:
raise Exception('Problem with BPM number in matrix')
if shape1 not in (ncorr, ncorr + 1):
raise Exception('Problem with correctors number in matrix')
if shape1 < ncorr + 1 and config.use_dispersion:
raise Exception('There is no dispersion line in the matrix')
if config.gain_bpm is not None:
g_bpm = config.gain_bpm
else:
g_bpm = _np.ones(2*nbpm)
if config.roll_bpm is not None:
alpha_bpm = config.roll_bpm
else:
alpha_bpm = _np.zeros(nbpm)
cos_mat = _np.diag(_np.cos(alpha_bpm))
sin_mat = _np.diag(_np.sin(alpha_bpm))
r_alpha = _np.hstack((cos_mat, sin_mat))
r_alpha = _np.vstack((r_alpha, _np.hstack((-sin_mat, cos_mat))))
dr_alpha = _np.hstack((-sin_mat, cos_mat))
dr_alpha = _np.vstack((dr_alpha, _np.hstack((-cos_mat, sin_mat))))
dmdg_bpm = _np.zeros((shape0*shape1, 2*nbpm))
for num in range(shape0):
kron = LOCOUtils.kronecker(num, num, shape0)
dbmat = _np.dot(r_alpha, kron)
dmdg_bpm[:, num] = _np.dot(dbmat, matrix).ravel()
dmdalpha_bpm = _np.zeros((shape0*shape1, nbpm))
for idx in range(shape0//2):
kron = LOCOUtils.kronecker(idx, idx, shape0//2)
kron = _np.tile(kron, (2, 2))
drmat = _np.dot(kron, dr_alpha)
dbmat = drmat * g_bpm[:, None]
dmdalpha_bpm[:, idx] = _np.dot(dbmat, matrix).ravel()
dmdg_corr = _np.zeros((shape0*shape1, ncorr))
for idx in range(ncorr):
kron = LOCOUtils.kronecker(idx, idx, shape1)
dmdg_corr[:, idx] = _np.dot(matrix, kron).ravel()
return dmdg_bpm, dmdalpha_bpm, dmdg_corr
@staticmethod
def _parallel_base(config, model, indices, func, magtype=None):
if not config.parallel:
mat = func(config, model, indices)
else:
slcs = LOCOUtils._get_slices_multiprocessing(
config.parallel, len(indices))
with mp_.Pool(processes=len(slcs)) as pool:
res = []
for slc in slcs:
res.append(pool.apply_async(
func,
(config, model, indices[slc], magtype)))
mat = [re.get() for re in res]
mat = _np.concatenate(mat, axis=1)
return mat
@staticmethod
def _get_slices_multiprocessing(parallel, npart):
nrproc = mp_.cpu_count() - 3
nrproc = nrproc if parallel is True else parallel
nrproc = max(nrproc, 1)
nrproc = min(nrproc, npart)
np_proc = (npart // nrproc)*_np.ones(nrproc, dtype=int)
np_proc[:(npart % nrproc)] += 1
parts_proc = _np.r_[0, _np.cumsum(np_proc)]
return [slice(parts_proc[i], parts_proc[i+1]) for i in range(nrproc)]
@staticmethod
def jloco_calc_k_dip(config, model):
"""."""
if config.use_dip_families:
dip_indices = []
for fam_name in config.famname_dipset:
dip_indices.append(config.respm.fam_data[fam_name]['index'])
else:
dip_indices = config.respm.fam_data['BN']['index']
magtype = 'dipole'
dip_matrix = LOCOUtils._parallel_base(
config, model, dip_indices,
LOCOUtils._jloco_calc_k_matrix, magtype)
return dip_matrix
@staticmethod
def jloco_calc_k_quad(config, model):
"""."""
if config.use_quad_families:
kindices = []
for fam_name in config.famname_quadset:
kindices.append(config.respm.fam_data[fam_name]['index'])
else:
kindices = config.respm.fam_data['QN']['index']
magtype = 'quadrupole'
kmatrix = LOCOUtils._parallel_base(
config, model, kindices,
LOCOUtils._jloco_calc_k_matrix, magtype)
return kmatrix
@staticmethod
def jloco_calc_k_sext(config, model):
"""."""
if config.use_sext_families:
sindices = []
for fam_name in config.famname_sextset:
sindices.append(config.respm.fam_data[fam_name]['index'])
else:
sindices = config.respm.fam_data['SN']['index']
magtype = 'sextupole'
smatrix = LOCOUtils._parallel_base(
config, model, sindices,
LOCOUtils._jloco_calc_k_matrix, magtype)
return smatrix
@staticmethod
def _jloco_calc_k_matrix(config, model, indices, magtype=None):
matrix_nominal = LOCOUtils.respm_calc(
model, config.respm, config.use_dispersion)
famtype = None
if magtype == 'quadrupole':
famtype = config.use_quad_families
elif magtype == 'sextupole':
famtype = config.use_sext_families
elif magtype == 'dipole':
famtype = config.use_dip_families
if famtype:
kvalues = LOCOUtils.get_quads_strengths(
model, indices)
if magtype == 'dipole':
set_quad_kdelta = LOCOUtils.set_dipset_kdelta
else:
set_quad_kdelta = LOCOUtils.set_quadset_kdelta
else:
kvalues = _np.array(
_pyaccel.lattice.get_attribute(model, 'KL', indices))
if magtype == 'dipole':
set_quad_kdelta = LOCOUtils.set_dipmag_kdelta
else:
set_quad_kdelta = LOCOUtils.set_quadmag_kdelta
kmatrix = _np.zeros((matrix_nominal.size, len(indices)))
model_this = _dcopy(model)
for idx, idx_set in enumerate(indices):
set_quad_kdelta(
model_this, idx_set,
kvalues[idx], config.DEFAULT_DELTA_K)
matrix_this = LOCOUtils.respm_calc(
model_this, config.respm, config.use_dispersion)
dmatrix = (matrix_this - matrix_nominal)/config.DEFAULT_DELTA_K
kmatrix[:, idx] = dmatrix.ravel()
set_quad_kdelta(model_this, idx_set, kvalues[idx], 0)
return kmatrix
@staticmethod
def _jloco_calc_ks_matrix(config, model, indices, magtype=None):
matrix_nominal = LOCOUtils.respm_calc(
model, config.respm, config.use_dispersion)
ksvalues = _np.array(
_pyaccel.lattice.get_attribute(model, 'KsL', indices))
if magtype == 'dipole':
set_quad_ksdelta = LOCOUtils.set_dipmag_ksdelta
else:
set_quad_ksdelta = LOCOUtils.set_quadmag_ksdelta
ksmatrix = _np.zeros((matrix_nominal.size, len(indices)))
model_this = _dcopy(model)
for idx, idx_set in enumerate(indices):
set_quad_ksdelta(
model_this, idx_set, ksvalues[idx], config.DEFAULT_DELTA_KS)
matrix_this = LOCOUtils.respm_calc(
model_this, config.respm, config.use_dispersion)
dmatrix = (matrix_this - matrix_nominal)/config.DEFAULT_DELTA_KS
ksmatrix[:, idx] = dmatrix.ravel()
set_quad_ksdelta(model_this, idx_set, ksvalues[idx], 0)
return ksmatrix
@staticmethod
def jloco_calc_ks_dipoles(config, model):
"""."""
ksindices = config.respm.fam_data['BN']['index']
ksmatrix = LOCOUtils._parallel_base(
config, model, ksindices,
LOCOUtils._jloco_calc_ks_matrix, magtype='dipole')
return ksmatrix
@staticmethod
def jloco_calc_ks_quad(config, model):
"""."""
ksindices = config.respm.fam_data['QN']['index']
ksmatrix = LOCOUtils._parallel_base(
config, model, ksindices,
LOCOUtils._jloco_calc_ks_matrix)
return ksmatrix
@staticmethod
def jloco_calc_ks_skewquad(config, model):
"""."""
config.update_skew_quad_knobs()
ksindices = config.skew_quad_indices
ksmatrix = LOCOUtils._parallel_base(
config, model, ksindices,
LOCOUtils._jloco_calc_ks_matrix)
return ksmatrix
@staticmethod
def jloco_calc_ks_sextupoles(config, model):
"""."""
ksindices = config.respm.fam_data['SN']['index']
ksmatrix = LOCOUtils._parallel_base(
config, model, ksindices,
LOCOUtils._jloco_calc_ks_matrix)
return ksmatrix
@staticmethod
def jloco_calc_kick_dipoles(config, model):
"""."""
dip_indices = config.respm.fam_data['BN']['index']
kick_matrix = LOCOUtils._parallel_base(
config, model, dip_indices,
LOCOUtils._jloco_calc_kick_dip)
return kick_matrix
@staticmethod
def _jloco_calc_kick_dip(config, model, dip_indices, magtype=None):
dip_kick_values = _np.array(_pyaccel.lattice.get_attribute(
model, 'hkick_polynom', dip_indices))
set_dip_kick = LOCOUtils.set_dipmag_kick
matrix_nominal = LOCOUtils.respm_calc(
model, config.respm, config.use_dispersion)
dip_kick_matrix = _np.zeros((matrix_nominal.size, 1))
delta_kick = config.DEFAULT_DELTA_DIP_KICK
model_this = _dcopy(model)
for idx, idx_set in enumerate(dip_indices):
set_dip_kick(
model_this, idx_set,
dip_kick_values[idx], delta_kick)
nmags = len(idx_set)
matrix_this = LOCOUtils.respm_calc(
model_this, config.respm, config.use_dispersion)
dmatrix = (matrix_this - matrix_nominal) / delta_kick / nmags
dip_kick_matrix[:, 0] = dmatrix.ravel()
for idx, idx_set in enumerate(dip_indices):
set_dip_kick(model_this, idx_set, dip_kick_values[idx], 0)
return dip_kick_matrix
@staticmethod
def jloco_calc_energy_shift(config, model):
"""."""
matrix0 = LOCOUtils.respm_calc(
model, config.respm, config.use_dispersion)
energy_shift = _np.zeros(config.nr_corr + 1)
dm_energy_shift = _np.zeros((matrix0.size, config.nr_corr))
for cnum in range(config.nr_corr):
energy_shift[cnum] = 1
matrix_shift = config.measured_dispersion[:, None] * \
energy_shift[None, :]
dm_energy_shift[:, cnum] = matrix_shift.ravel()
energy_shift[cnum] = 0
return dm_energy_shift
@staticmethod
def jloco_calc_girders(config, model):
"""."""
gindices = config.gir_indices
gir_matrix = LOCOUtils._parallel_base(
config, model, gindices,
LOCOUtils._jloco_girders_shift)
return gir_matrix
@staticmethod
def _jloco_girders_shift(config, model, gindices, magtype=None):
matrix_nominal = LOCOUtils.respm_calc(
model, config.respm, config.use_dispersion)
gmatrix = _np.zeros((matrix_nominal.size, len(gindices)))
model_this = _dcopy(model)
ds_shift = _np.zeros(gindices.shape[0])
for idx, _ in enumerate(gindices):
ds_shift[idx] = config.DEFAULT_GIRDER_SHIFT
LOCOUtils.set_girders_long_shift(
model_this, gindices, ds_shift)
matrix_this = LOCOUtils.respm_calc(
model_this, config.respm, config.use_dispersion)
dmatrix = (matrix_this - matrix_nominal)
dmatrix /= config.DEFAULT_GIRDER_SHIFT
gmatrix[:, idx] = dmatrix.ravel()
ds_shift[idx] = 0
LOCOUtils.set_girders_long_shift(
model_this, gindices, ds_shift)
return gmatrix
@staticmethod
def jloco_merge_linear(
config, km_quad, km_sext, km_dip,
ksm_quad, ksm_sext, ksm_dip,
dmdg_bpm, dmdalpha_bpm, dmdg_corr,
kick_dip, energy_shift, ks_skewquad,
girder_shift):
"""."""
nbpm = config.nr_bpm
nch = config.nr_ch
ncv = config.nr_cv
knobs_k = 0
knobs_ks = 0
knobs_linear = 0
knobs_skewquad = 0
knobs_gir = 0
if km_quad is not None:
knobs_k += km_quad.shape[1]
if km_sext is not None:
knobs_k += km_sext.shape[1]
if km_dip is not None:
knobs_k += km_dip.shape[1]
if ksm_quad is not None:
knobs_ks += ksm_quad.shape[1]
if ksm_sext is not None:
knobs_ks += ksm_sext.shape[1]
if ksm_dip is not None:
knobs_ks += ksm_dip.shape[1]
if ks_skewquad is not None:
knobs_skewquad += ks_skewquad.shape[1]
if config.fit_gain_bpm:
knobs_linear += 2*nbpm
if config.fit_roll_bpm:
knobs_linear += nbpm
if config.fit_gain_corr:
knobs_linear += nch + ncv
if config.fit_energy_shift:
knobs_linear += nch + ncv
if config.fit_dipoles_kick:
knobs_linear += 3
if config.fit_girder_shift:
knobs_gir += girder_shift.shape[1]
nknobs = knobs_k + knobs_ks + knobs_skewquad
nknobs += knobs_linear
nknobs += knobs_gir
jloco = _np.zeros(
(2*nbpm*(nch+ncv+1), nknobs))
idx = 0
if config.fit_quadrupoles:
num = km_quad.shape[1]
jloco[:, idx:idx+num] = km_quad
idx += num
if config.fit_sextupoles:
num = km_sext.shape[1]
jloco[:, idx:idx+num] = km_sext
idx += num
if config.fit_dipoles:
num = km_dip.shape[1]
jloco[:, idx:idx+num] = km_dip
idx += num
if config.fit_quadrupoles_coupling:
num = ksm_quad.shape[1]
jloco[:, idx:idx+num] = ksm_quad
idx += num
if config.fit_sextupoles_coupling:
num = ksm_sext.shape[1]
jloco[:, idx:idx+num] = ksm_sext
idx += num
if config.fit_dipoles_coupling:
num = ksm_dip.shape[1]
jloco[:, idx:idx+num] = ksm_dip
idx += num
if config.fit_gain_bpm:
num = dmdg_bpm.shape[1]
jloco[:, idx:idx+num] = dmdg_bpm
idx += num
if config.fit_roll_bpm:
num = dmdalpha_bpm.shape[1]
jloco[:, idx:idx+num] = dmdalpha_bpm
idx += num
if config.fit_gain_corr:
num = dmdg_corr.shape[1]
jloco[:, idx:idx+num] = dmdg_corr
idx += num
if config.fit_dipoles_kick:
num = kick_dip.shape[1]
jloco[:, idx:idx+num] = kick_dip
idx += num
if config.fit_energy_shift:
num = energy_shift.shape[1]
jloco[:, idx:idx+num] = energy_shift
idx += num
if config.fit_skew_quadrupoles:
num = knobs_skewquad
jloco[:, idx:idx+num] = ks_skewquad
idx += num
if config.fit_girder_shift:
num = knobs_gir
jloco[:, idx:idx+num] = girder_shift
idx += num
return jloco
@staticmethod
def jloco_apply_weight(jloco, weight_bpm, weight_corr):
"""."""
weight = (weight_bpm * weight_corr[None, :]).ravel()
return weight[:, None] * jloco
@staticmethod
def param_select(config, param):
"""."""
idx = 0
param_dict = dict()
if config.fit_quadrupoles:
size = len(config.quad_indices)
param_dict['quadrupoles_gradient'] = param[idx:idx+size]
idx += size
if config.fit_sextupoles:
size = len(config.sext_indices)
param_dict['sextupoles_gradient'] = param[idx:idx+size]
idx += size
if config.fit_dipoles:
size = len(config.dip_indices)
param_dict['dipoles_gradient'] = param[idx:idx+size]
idx += size
if config.fit_quadrupoles_coupling:
size = len(config.quad_indices_ks)
param_dict['quadrupoles_coupling'] = param[idx:idx+size]
idx += size
if config.fit_sextupoles_coupling:
size = len(config.sext_indices)
param_dict['sextupoles_coupling'] = param[idx:idx+size]
idx += size
if config.fit_dipoles_coupling:
size = len(config.dip_indices_ks)
param_dict['dipoles_coupling'] = param[idx:idx+size]
idx += size
if config.fit_gain_bpm:
size = 2*config.nr_bpm
param_dict['gain_bpm'] = param[idx:idx+size]
idx += size
if config.fit_roll_bpm:
size = config.nr_bpm
param_dict['roll_bpm'] = param[idx:idx+size]
idx += size
if config.fit_gain_corr:
size = config.nr_corr
param_dict['gain_corr'] = param[idx:idx+size]
idx += size
if config.fit_dipoles_kick:
size = len(config.dip_indices)
param_dict['dipoles_kick'] = param[idx:idx+size]
idx += size
if config.fit_energy_shift:
size = config.nr_corr
param_dict['energy_shift'] = param[idx:idx+size]
idx += size
if config.fit_skew_quadrupoles:
size = len(config.skew_quad_indices)
param_dict['skew_quadrupoles'] = param[idx:idx+size]
idx += size
if config.fit_girder_shift:
size = config.gir_indices.shape[0]
param_dict['girders_shift'] = param[idx:idx+size]
idx += size
return param_dict
@staticmethod
def kronecker(i, j, size):
"""."""
kron = _np.zeros((size, size))
if i == j:
kron[i, i] = 1
else:
kron[i, j] = 1
kron[j, i] = 1
return kron
|
|
import asyncio
import os
import traceback
from functools import partial
from inspect import isawaitable
from multiprocessing import Process
from signal import (
SIGTERM, SIGINT,
signal as signal_func,
Signals
)
from socket import (
socket,
SOL_SOCKET,
SO_REUSEADDR,
)
from time import time
from httptools import HttpRequestParser
from httptools.parser.errors import HttpParserError
try:
import uvloop as async_loop
except ImportError:
async_loop = asyncio
from sanic.log import log, netlog
from sanic.response import HTTPResponse
from sanic.request import Request
from sanic.exceptions import (
RequestTimeout, PayloadTooLarge, InvalidUsage, ServerError)
current_time = None
class Signal:
stopped = False
class CIDict(dict):
"""Case Insensitive dict where all keys are converted to lowercase
This does not maintain the inputted case when calling items() or keys()
in favor of speed, since headers are case insensitive
"""
def get(self, key, default=None):
return super().get(key.casefold(), default)
def __getitem__(self, key):
return super().__getitem__(key.casefold())
def __setitem__(self, key, value):
return super().__setitem__(key.casefold(), value)
def __contains__(self, key):
return super().__contains__(key.casefold())
class HttpProtocol(asyncio.Protocol):
__slots__ = (
# event loop, connection
'loop', 'transport', 'connections', 'signal',
# request params
'parser', 'request', 'url', 'headers',
# request config
'request_handler', 'request_timeout', 'request_max_size',
'request_class', 'is_request_stream', 'router',
# enable or disable access log / error log purpose
'has_log',
# connection management
'_total_request_size', '_timeout_handler', '_last_communication_time',
'_is_stream_handler')
def __init__(self, *, loop, request_handler, error_handler,
signal=Signal(), connections=set(), request_timeout=60,
request_max_size=None, request_class=None, has_log=True,
keep_alive=True, is_request_stream=False, router=None,
state=None, debug=False, **kwargs):
self.loop = loop
self.transport = None
self.request = None
self.parser = None
self.url = None
self.headers = None
self.router = router
self.signal = signal
self.has_log = has_log
self.connections = connections
self.request_handler = request_handler
self.error_handler = error_handler
self.request_timeout = request_timeout
self.request_max_size = request_max_size
self.request_class = request_class or Request
self.is_request_stream = is_request_stream
self._is_stream_handler = False
self._total_request_size = 0
self._timeout_handler = None
self._last_request_time = None
self._request_handler_task = None
self._request_stream_task = None
self._keep_alive = keep_alive
self._header_fragment = b''
self.state = state if state else {}
if 'requests_count' not in self.state:
self.state['requests_count'] = 0
self._debug = debug
@property
def keep_alive(self):
return (
self._keep_alive and
not self.signal.stopped and
self.parser.should_keep_alive())
# -------------------------------------------- #
# Connection
# -------------------------------------------- #
def connection_made(self, transport):
self.connections.add(self)
self._timeout_handler = self.loop.call_later(
self.request_timeout, self.connection_timeout)
self.transport = transport
self._last_request_time = current_time
def connection_lost(self, exc):
self.connections.discard(self)
self._timeout_handler.cancel()
def connection_timeout(self):
# Check if
time_elapsed = current_time - self._last_request_time
if time_elapsed < self.request_timeout:
time_left = self.request_timeout - time_elapsed
self._timeout_handler = (
self.loop.call_later(time_left, self.connection_timeout))
else:
if self._request_stream_task:
self._request_stream_task.cancel()
if self._request_handler_task:
self._request_handler_task.cancel()
exception = RequestTimeout('Request Timeout')
self.write_error(exception)
# -------------------------------------------- #
# Parsing
# -------------------------------------------- #
def data_received(self, data):
# Check for the request itself getting too large and exceeding
# memory limits
self._total_request_size += len(data)
if self._total_request_size > self.request_max_size:
exception = PayloadTooLarge('Payload Too Large')
self.write_error(exception)
# Create parser if this is the first time we're receiving data
if self.parser is None:
assert self.request is None
self.headers = []
self.parser = HttpRequestParser(self)
# requests count
self.state['requests_count'] = self.state['requests_count'] + 1
# Parse request chunk or close connection
try:
self.parser.feed_data(data)
except HttpParserError:
message = 'Bad Request'
if self._debug:
message += '\n' + traceback.format_exc()
exception = InvalidUsage(message)
self.write_error(exception)
def on_url(self, url):
if not self.url:
self.url = url
else:
self.url += url
def on_header(self, name, value):
self._header_fragment += name
if value is not None:
if self._header_fragment == b'Content-Length' \
and int(value) > self.request_max_size:
exception = PayloadTooLarge('Payload Too Large')
self.write_error(exception)
self.headers.append(
(self._header_fragment.decode().casefold(),
value.decode()))
self._header_fragment = b''
def on_headers_complete(self):
self.request = self.request_class(
url_bytes=self.url,
headers=CIDict(self.headers),
version=self.parser.get_http_version(),
method=self.parser.get_method().decode(),
transport=self.transport
)
if self.is_request_stream:
self._is_stream_handler = self.router.is_stream_handler(
self.request)
if self._is_stream_handler:
self.request.stream = asyncio.Queue()
self.execute_request_handler()
def on_body(self, body):
if self.is_request_stream and self._is_stream_handler:
self._request_stream_task = self.loop.create_task(
self.request.stream.put(body))
return
self.request.body.append(body)
def on_message_complete(self):
if self.is_request_stream and self._is_stream_handler:
self._request_stream_task = self.loop.create_task(
self.request.stream.put(None))
return
self.request.body = b''.join(self.request.body)
self.execute_request_handler()
def execute_request_handler(self):
self._request_handler_task = self.loop.create_task(
self.request_handler(
self.request,
self.write_response,
self.stream_response))
# -------------------------------------------- #
# Responding
# -------------------------------------------- #
def write_response(self, response):
"""
Writes response content synchronously to the transport.
"""
try:
keep_alive = self.keep_alive
self.transport.write(
response.output(
self.request.version, keep_alive,
self.request_timeout))
if self.has_log:
netlog.info('', extra={
'status': response.status,
'byte': len(response.body),
'host': '{0}:{1}'.format(self.request.ip[0],
self.request.ip[1]),
'request': '{0} {1}'.format(self.request.method,
self.request.url)
})
except AttributeError:
log.error(
('Invalid response object for url {}, '
'Expected Type: HTTPResponse, Actual Type: {}').format(
self.url, type(response)))
self.write_error(ServerError('Invalid response type'))
except RuntimeError:
log.error(
'Connection lost before response written @ {}'.format(
self.request.ip))
except Exception as e:
self.bail_out(
"Writing response failed, connection closed {}".format(
repr(e)))
finally:
if not keep_alive:
self.transport.close()
else:
self._last_request_time = current_time
self.cleanup()
async def stream_response(self, response):
"""
Streams a response to the client asynchronously. Attaches
the transport to the response so the response consumer can
write to the response as needed.
"""
try:
keep_alive = self.keep_alive
response.transport = self.transport
await response.stream(
self.request.version, keep_alive, self.request_timeout)
if self.has_log:
netlog.info('', extra={
'status': response.status,
'byte': -1,
'host': '{0}:{1}'.format(self.request.ip[0],
self.request.ip[1]),
'request': '{0} {1}'.format(self.request.method,
self.request.url)
})
except AttributeError:
log.error(
('Invalid response object for url {}, '
'Expected Type: HTTPResponse, Actual Type: {}').format(
self.url, type(response)))
self.write_error(ServerError('Invalid response type'))
except RuntimeError:
log.error(
'Connection lost before response written @ {}'.format(
self.request.ip))
except Exception as e:
self.bail_out(
"Writing response failed, connection closed {}".format(
repr(e)))
finally:
if not keep_alive:
self.transport.close()
else:
self._last_request_time = current_time
self.cleanup()
def write_error(self, exception):
try:
response = self.error_handler.response(self.request, exception)
version = self.request.version if self.request else '1.1'
self.transport.write(response.output(version))
except RuntimeError:
log.error(
'Connection lost before error written @ {}'.format(
self.request.ip if self.request else 'Unknown'))
except Exception as e:
self.bail_out(
"Writing error failed, connection closed {}".format(repr(e)),
from_error=True)
finally:
if self.has_log:
extra = {
'status': response.status,
'host': '',
'request': str(self.request) + str(self.url)
}
if response and isinstance(response, HTTPResponse):
extra['byte'] = len(response.body)
else:
extra['byte'] = -1
if self.request:
extra['host'] = '%s:%d' % self.request.ip,
extra['request'] = '%s %s' % (self.request.method,
self.url)
netlog.info('', extra=extra)
self.transport.close()
def bail_out(self, message, from_error=False):
if from_error or self.transport.is_closing():
log.error(
("Transport closed @ {} and exception "
"experienced during error handling").format(
self.transport.get_extra_info('peername')))
log.debug(
'Exception:\n{}'.format(traceback.format_exc()))
else:
exception = ServerError(message)
self.write_error(exception)
log.error(message)
def cleanup(self):
self.parser = None
self.request = None
self.url = None
self.headers = None
self._request_handler_task = None
self._request_stream_task = None
self._total_request_size = 0
self._is_stream_handler = False
def close_if_idle(self):
"""Close the connection if a request is not being sent or received
:return: boolean - True if closed, false if staying open
"""
if not self.parser:
self.transport.close()
return True
return False
def close(self):
"""
Force close the connection.
"""
if self.transport is not None:
self.transport.close()
self.transport = None
def update_current_time(loop):
"""Cache the current time, since it is needed at the end of every
keep-alive request to update the request timeout time
:param loop:
:return:
"""
global current_time
current_time = time()
loop.call_later(1, partial(update_current_time, loop))
def trigger_events(events, loop):
"""Trigger event callbacks (functions or async)
:param events: one or more sync or async functions to execute
:param loop: event loop
"""
for event in events:
result = event(loop)
if isawaitable(result):
loop.run_until_complete(result)
def serve(host, port, request_handler, error_handler, before_start=None,
after_start=None, before_stop=None, after_stop=None, debug=False,
request_timeout=60, ssl=None, sock=None, request_max_size=None,
reuse_port=False, loop=None, protocol=HttpProtocol, backlog=100,
register_sys_signals=True, run_async=False, connections=None,
signal=Signal(), request_class=None, has_log=True, keep_alive=True,
is_request_stream=False, router=None, websocket_max_size=None,
websocket_max_queue=None, state=None,
graceful_shutdown_timeout=15.0):
"""Start asynchronous HTTP Server on an individual process.
:param host: Address to host on
:param port: Port to host on
:param request_handler: Sanic request handler with middleware
:param error_handler: Sanic error handler with middleware
:param before_start: function to be executed before the server starts
listening. Takes arguments `app` instance and `loop`
:param after_start: function to be executed after the server starts
listening. Takes arguments `app` instance and `loop`
:param before_stop: function to be executed when a stop signal is
received before it is respected. Takes arguments
`app` instance and `loop`
:param after_stop: function to be executed when a stop signal is
received after it is respected. Takes arguments
`app` instance and `loop`
:param debug: enables debug output (slows server)
:param request_timeout: time in seconds
:param ssl: SSLContext
:param sock: Socket for the server to accept connections from
:param request_max_size: size in bytes, `None` for no limit
:param reuse_port: `True` for multiple workers
:param loop: asyncio compatible event loop
:param protocol: subclass of asyncio protocol class
:param request_class: Request class to use
:param has_log: disable/enable access log and error log
:param is_request_stream: disable/enable Request.stream
:param router: Router object
:return: Nothing
"""
if not run_async:
loop = async_loop.new_event_loop()
asyncio.set_event_loop(loop)
if debug:
loop.set_debug(debug)
connections = connections if connections is not None else set()
server = partial(
protocol,
loop=loop,
connections=connections,
signal=signal,
request_handler=request_handler,
error_handler=error_handler,
request_timeout=request_timeout,
request_max_size=request_max_size,
request_class=request_class,
has_log=has_log,
keep_alive=keep_alive,
is_request_stream=is_request_stream,
router=router,
websocket_max_size=websocket_max_size,
websocket_max_queue=websocket_max_queue,
state=state,
debug=debug,
)
server_coroutine = loop.create_server(
server,
host,
port,
ssl=ssl,
reuse_port=reuse_port,
sock=sock,
backlog=backlog
)
# Instead of pulling time at the end of every request,
# pull it once per minute
loop.call_soon(partial(update_current_time, loop))
if run_async:
return server_coroutine
trigger_events(before_start, loop)
try:
http_server = loop.run_until_complete(server_coroutine)
except:
log.exception("Unable to start server")
return
trigger_events(after_start, loop)
# Register signals for graceful termination
if register_sys_signals:
for _signal in (SIGINT, SIGTERM):
try:
loop.add_signal_handler(_signal, loop.stop)
except NotImplementedError:
log.warn('Sanic tried to use loop.add_signal_handler but it is'
' not implemented on this platform.')
pid = os.getpid()
try:
log.info('Starting worker [{}]'.format(pid))
loop.run_forever()
finally:
log.info("Stopping worker [{}]".format(pid))
# Run the on_stop function if provided
trigger_events(before_stop, loop)
# Wait for event loop to finish and all connections to drain
http_server.close()
loop.run_until_complete(http_server.wait_closed())
# Complete all tasks on the loop
signal.stopped = True
for connection in connections:
connection.close_if_idle()
# Gracefully shutdown timeout.
# We should provide graceful_shutdown_timeout,
# instead of letting connection hangs forever.
# Let's roughly calcucate time.
start_shutdown = 0
while connections and (start_shutdown < graceful_shutdown_timeout):
loop.run_until_complete(asyncio.sleep(0.1))
start_shutdown = start_shutdown + 0.1
# Force close non-idle connection after waiting for
# graceful_shutdown_timeout
coros = []
for conn in connections:
if hasattr(conn, "websocket") and conn.websocket:
coros.append(conn.websocket.close_connection(force=True))
else:
conn.close()
_shutdown = asyncio.gather(*coros, loop=loop)
loop.run_until_complete(_shutdown)
trigger_events(after_stop, loop)
loop.close()
def serve_multiple(server_settings, workers):
"""Start multiple server processes simultaneously. Stop on interrupt
and terminate signals, and drain connections when complete.
:param server_settings: kw arguments to be passed to the serve function
:param workers: number of workers to launch
:param stop_event: if provided, is used as a stop signal
:return:
"""
server_settings['reuse_port'] = True
# Handling when custom socket is not provided.
if server_settings.get('sock') is None:
sock = socket()
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sock.bind((server_settings['host'], server_settings['port']))
sock.set_inheritable(True)
server_settings['sock'] = sock
server_settings['host'] = None
server_settings['port'] = None
def sig_handler(signal, frame):
log.info("Received signal {}. Shutting down.".format(
Signals(signal).name))
for process in processes:
os.kill(process.pid, SIGINT)
signal_func(SIGINT, lambda s, f: sig_handler(s, f))
signal_func(SIGTERM, lambda s, f: sig_handler(s, f))
processes = []
for _ in range(workers):
process = Process(target=serve, kwargs=server_settings)
process.daemon = True
process.start()
processes.append(process)
for process in processes:
process.join()
# the above processes will block this until they're stopped
for process in processes:
process.terminate()
server_settings.get('sock').close()
|
|
'''
a wrapper to google cloud sql.
the MySQLdb module is thread safe but the connections to the database are not. so the
recommendation is that each thread have an independent connection. currently, each
database access will use its own connection and at the end of the method, close it.
if this becomes expensive, timewise, a mapping of thread to connection can be utilized.
Copyright 2015, Institute for Systems Biology.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from collections import OrderedDict
import isbcgc_cloudsql_model
class ISBCGC_database_helper(isbcgc_cloudsql_model.ISBCGC_database_helper):
"""
this class manages the cloud sql metadata upload for the TARGET program
"""
TARGET_metadata_project = {
'table_name': 'TARGET_metadata_project',
'primary_key_name': 'metadata_project_id',
'columns': [
['project_short_name', 'VARCHAR(40)', 'NOT NULL'],
['name', 'VARCHAR(80)', 'NOT NULL'],
['program_name', 'VARCHAR(40)', 'NULL'],
['primary_site', 'VARCHAR(20)', 'NULL'],
['dbgap_accession_number', 'VARCHAR(12)', 'NULL'],
['disease_type', 'VARCHAR(120)', 'NULL'],
['summary_case_count', 'INTEGER', 'NULL'],
['summary_file_count', 'INTEGER', 'NULL'],
['summary_file_size', 'BIGINT', 'NULL'],
['endpoint_type', 'VARCHAR(8)', 'NULL'],
],
# 'natural_key_cols': [
# 'case_barcode'
# ],
'indices_defs': [
['primary_site'],
['disease_type'],
['project_short_name'],
['program_name'],
['endpoint_type'],
]
}
TARGET_metadata_clinical = {
'table_name': 'TARGET_metadata_clinical',
'primary_key_name': 'metadata_clinical_id',
'columns': [
['endpoint_type', 'VARCHAR(10)', 'NOT NULL'],
['case_gdc_id', 'VARCHAR(36)', 'NOT NULL'],
['case_barcode', 'VARCHAR(40)', 'NOT NULL'],
['program_name', 'VARCHAR(30)', 'NOT NULL'],
['project_short_name', 'VARCHAR(30)', 'NOT NULL'],
['disease_code', 'VARCHAR(30)', 'NOT NULL'],
['summary_file_count', 'INT', 'NOT NULL'],
['gender', 'VARCHAR(6)', 'NULL'],
['vital_status', 'VARCHAR(5)', 'NULL'],
['race', 'VARCHAR(41)', 'NULL'],
['ethnicity', 'VARCHAR(22)', 'NULL'],
['first_event', 'VARCHAR(25)', 'NULL'],
['days_to_birth', 'INT', 'NULL'],
['days_to_last_followup', 'INT', 'NULL'],
['days_to_last_known_alive', 'INT', 'NULL'],
['days_to_death', 'INT', 'NULL'],
['age_at_diagnosis', 'INT', 'NULL'],
['year_of_diagnosis', 'INT', 'NULL'],
['year_of_last_follow_up', 'INT', 'NULL'],
['event_free_survival', 'INT', 'NULL'],
['wbc_at_diagnosis', 'FLOAT', 'NULL'],
['protocol', 'VARCHAR(70)', 'NULL']
],
# 'natural_key_cols': [
# 'case_barcode'
# ],
'indices_defs': [
['endpoint_type'],
['case_gdc_id'],
['case_barcode'],
['program_name'],
['project_short_name'],
['disease_code'],
['summary_file_count'],
['gender'],
['vital_status'],
['race'],
['ethnicity'],
['first_event'],
['days_to_birth'],
['days_to_last_followup'],
['days_to_last_known_alive'],
['days_to_death'],
['age_at_diagnosis'],
['year_of_diagnosis'],
['year_of_last_follow_up'],
['event_free_survival'],
['wbc_at_diagnosis'],
['protocol']
]
}
TARGET_metadata_biospecimen = {
'table_name': 'TARGET_metadata_biospecimen',
'primary_key_name': 'metadata_biospecimen_id',
'columns': [
['endpoint_type', 'VARCHAR(10)', 'NOT NULL'],
['sample_gdc_id', 'VARCHAR(36)', 'NOT NULL'],
['sample_barcode', 'VARCHAR(40)', 'NOT NULL'],
['case_gdc_id', 'VARCHAR(36)', 'NOT NULL'],
['case_barcode', 'VARCHAR(40)', 'NOT NULL'],
['program_name', 'VARCHAR(30)', 'NOT NULL'],
['project_short_name', 'VARCHAR(30)', 'NOT NULL'],
['disease_code', 'VARCHAR(30)', 'NOT NULL'],
['sample_type', 'VARCHAR(2)', 'NOT NULL'],
['tumor_code', 'VARCHAR(2)', 'NOT NULL']
],
# 'natural_key_cols': [
# 'sample_barcode'
# ],
'indices_defs': [
['endpoint_type'],
['sample_gdc_id'],
['sample_barcode'],
['case_gdc_id'],
['case_barcode'],
['program_name'],
['project_short_name'],
['disease_code'],
['sample_type'],
['tumor_code']
],
# 'foreign_key': [
# 'case_barcode',
# 'metadata_clinical',
# 'case_barcode'
# ]
}
TARGET_metadata_samples = {
'table_name': 'TARGET_metadata_samples',
'primary_key_name': 'metadata_samples_id', # todo: define this?
'columns': [
['case_gdc_id', 'VARCHAR(36)', 'NULL'],
['case_barcode', 'VARCHAR(45)', 'NOT NULL'],
['project_short_name', 'VARCHAR(40)', 'NOT NULL'],
['disease_code', 'VARCHAR(30)', 'NOT NULL'],
['program_name', 'VARCHAR(40)', 'NOT NULL'],
['sample_gdc_id', 'VARCHAR(36)', 'NULL'],
['sample_barcode', 'VARCHAR(45)', 'NOT NULL'],
['sample_type', 'VARCHAR(2)', 'NOT NULL'],
['endpoint_type', 'VARCHAR(8)', 'NULL'],
['age_at_diagnosis', 'INT', 'NULL'],
['gender', 'VARCHAR(6)', 'NULL'],
['vital_status', 'VARCHAR(5)', 'NULL'],
['race', 'VARCHAR(41)', 'NULL'],
['ethnicity', 'VARCHAR(22)', 'NULL'],
['first_event', 'VARCHAR(25)', 'NULL'],
['days_to_birth', 'INT', 'NULL'],
['days_to_last_followup', 'INT', 'NULL'],
['days_to_last_known_alive', 'INT', 'NULL'],
['days_to_death', 'INT', 'NULL'],
['year_of_diagnosis', 'INT', 'NULL'],
['event_free_survival', 'INT', 'NULL'],
['wbc_at_diagnosis', 'FLOAT', 'NULL'],
['protocol', 'VARCHAR(70)', 'NULL']
],
'indices_defs': [
['case_gdc_id'],
['case_barcode'],
['sample_gdc_id'],
['sample_barcode'],
['project_short_name'],
['disease_code'],
['program_name'],
['endpoint_type'],
['gender'],
['vital_status'],
['race'],
['ethnicity'],
['first_event'],
['days_to_birth'],
['days_to_last_followup'],
['days_to_last_known_alive'],
['days_to_death'],
['age_at_diagnosis'],
['year_of_diagnosis'],
['event_free_survival'],
['wbc_at_diagnosis'],
['protocol']
],
'natural_key_cols': ['sample_barcode'],
# 'foreign_key': [
# 'sample_barcode',
# ]
}
TARGET_metadata_attrs = {
'table_name': 'TARGET_metadata_attrs',
'primary_key_name': 'metadata_attrs_id', # todo: define this?
'columns': [
['attribute', 'VARCHAR(70)', 'NOT NULL'],
['code', 'VARCHAR(1)', 'NOT NULL'],
['spec', 'VARCHAR(4)', 'NOT NULL']
],
'indices_defs': [
['attribute'],
['code'],
['spec']
]
}
TARGET_metadata_data_HG19 = {
'table_name': 'TARGET_metadata_data_HG19',
'primary_key_name': 'metadata_data_id',
'columns': [
['file_gdc_id', 'VARCHAR(36)', 'NOT NULL'],
['case_gdc_id', 'VARCHAR(36)', 'NOT NULL'],
['case_barcode', 'VARCHAR(35)', 'NOT NULL'],
['sample_gdc_id', 'VARCHAR(45)', 'NULL'],
['sample_barcode', 'VARCHAR(45)', 'NULL'],
['sample_type', 'VARCHAR(2)', 'NULL'],
['aliquot_barcode', 'VARCHAR(45)', 'NULL'],
['aliquot_gdc_id', 'VARCHAR(36)', 'NULL'],
['project_short_name', 'VARCHAR(40)', 'NOT NULL'],
['disease_code', 'VARCHAR(30)', 'NOT NULL'],
['program_name', 'VARCHAR(40)', 'NOT NULL'],
['data_type', 'VARCHAR(35)', 'NOT NULL'],
['data_category', 'VARCHAR(30)', 'NOT NULL'],
['experimental_strategy', 'VARCHAR(50)', 'NULL'],
['type', 'VARCHAR(40)', 'NULL'],
['file_name', 'VARCHAR(120)', 'NOT NULL'],
['file_size', 'BIGINT', 'NOT NULL'],
['file_state', 'VARCHAR(30)', 'NULL'],
['data_format', 'VARCHAR(10)', 'NOT NULL'],
['md5sum', 'VARCHAR(33)', 'NULL'],
['access', 'VARCHAR(10)', 'NOT NULL'],
['acl', 'VARCHAR(25)', 'NULL'],
['platform', 'VARCHAR(50)', 'NULL'],
['file_name_key', 'VARCHAR(300)', 'NULL'],
['file_uploaded', 'VARCHAR(5)', 'NOT NULL'],
['endpoint_type', 'VARCHAR(8)', 'NULL'],
['analysis_gdc_id', 'VARCHAR(36)', 'NULL'],
['analysis_workflow_link', 'VARCHAR(60)', 'NULL'],
['analysis_workflow_type', 'VARCHAR(60)', 'NULL'],
['center_code', 'VARCHAR(8)', 'NULL'],
['center_name', 'VARCHAR(50)', 'NULL'],
['center_type', 'VARCHAR(8)', 'NULL'],
['species', 'VARCHAR(30)', 'NULL'],
# this should probably be a foreign key back to the index file record
['index_file_id', 'VARCHAR(36)', 'NULL'],
['index_file_name', 'VARCHAR(200)', 'NULL'],
['index_file_size', 'BIGINT', 'NULL'],
],
# 'natural_key_cols': [
# 'aliquot_barcode',
# 'DatafileName'
# ],
'indices_defs': [
['file_gdc_id'],
['case_gdc_id'],
['case_barcode'],
['sample_gdc_id'],
['sample_barcode'],
['sample_type'],
['aliquot_barcode'],
['aliquot_gdc_id'],
['project_short_name'],
['disease_code'],
['program_name'],
['data_type'],
['data_category'],
['experimental_strategy'],
['type'],
['data_format'],
['platform'],
['file_uploaded'],
['endpoint_type'],
['analysis_workflow_link'],
['analysis_workflow_type']
],
# 'foreign_key': [
# 'sample_barcode',
# 'metadata_biospecimen',
# 'sample_barcode'
# ]
}
TARGET_metadata_data_HG38 = {
'table_name': 'TARGET_metadata_data_HG38',
'primary_key_name': 'metadata_data_id',
'columns': [
['file_gdc_id', 'VARCHAR(36)', 'NOT NULL'],
['case_gdc_id', 'VARCHAR(36)', 'NOT NULL'],
['case_barcode', 'VARCHAR(35)', 'NOT NULL'],
['sample_gdc_id', 'VARCHAR(45)', 'NULL'],
['sample_barcode', 'VARCHAR(45)', 'NULL'],
['sample_type', 'VARCHAR(2)', 'NULL'],
['aliquot_barcode', 'VARCHAR(45)', 'NULL'],
['aliquot_gdc_id', 'VARCHAR(36)', 'NULL'],
['project_short_name', 'VARCHAR(40)', 'NOT NULL'],
['disease_code', 'VARCHAR(30)', 'NOT NULL'],
['program_name', 'VARCHAR(40)', 'NOT NULL'],
['data_type', 'VARCHAR(35)', 'NOT NULL'],
['data_category', 'VARCHAR(30)', 'NOT NULL'],
['experimental_strategy', 'VARCHAR(50)', 'NULL'],
['type', 'VARCHAR(40)', 'NULL'],
['file_name', 'VARCHAR(120)', 'NOT NULL'],
['file_size', 'BIGINT', 'NOT NULL'],
['file_state', 'VARCHAR(30)', 'NULL'],
['data_format', 'VARCHAR(10)', 'NOT NULL'],
['md5sum', 'VARCHAR(33)', 'NULL'],
['access', 'VARCHAR(10)', 'NOT NULL'],
['acl', 'VARCHAR(25)', 'NULL'],
['platform', 'VARCHAR(50)', 'NULL'],
['file_name_key', 'VARCHAR(300)', 'NULL'],
['file_uploaded', 'VARCHAR(5)', 'NOT NULL'],
['endpoint_type', 'VARCHAR(8)', 'NULL'],
['analysis_gdc_id', 'VARCHAR(36)', 'NULL'],
['analysis_workflow_link', 'VARCHAR(60)', 'NULL'],
['analysis_workflow_type', 'VARCHAR(60)', 'NULL'],
['center_code', 'VARCHAR(8)', 'NULL'],
['center_name', 'VARCHAR(50)', 'NULL'],
['center_type', 'VARCHAR(8)', 'NULL'],
['species', 'VARCHAR(30)', 'NULL'],
# this should probably be a foreign key back to the index file record
['index_file_id', 'VARCHAR(36)', 'NULL'],
['index_file_name', 'VARCHAR(200)', 'NULL'],
['index_file_size', 'BIGINT', 'NULL'],
],
# 'natural_key_cols': [
# 'aliquot_barcode',
# 'DatafileName'
# ],
'indices_defs': [
['file_gdc_id'],
['case_gdc_id'],
['case_barcode'],
['sample_gdc_id'],
['sample_barcode'],
['sample_type'],
['aliquot_barcode'],
['aliquot_gdc_id'],
['project_short_name'],
['disease_code'],
['program_name'],
['data_type'],
['data_category'],
['experimental_strategy'],
['type'],
['file_name_key'],
['file_uploaded'],
['data_format'],
['platform'],
['endpoint_type'],
['analysis_workflow_link'],
['analysis_workflow_type']
],
# 'foreign_key': [
# 'sample_barcode',
# 'metadata_biospecimen',
# 'sample_barcode'
# ]
}
TARGET_metadata_data_type_availability = {
'table_name': 'TARGET_metadata_data_type_availability',
'primary_key_name': 'metadata_data_type_availability_id',
'columns': [
['genomic_build', 'VARCHAR(4)', 'NOT NULL'],
['isb_label', 'VARCHAR(40)', 'NOT NULL']
],
'natural_key_cols': [
'genomic_build',
'isb_label'
],
'indices_defs': [
['genomic_build'],
['isb_label']
]
}
TARGET_metadata_sample_data_availability = {
'table_name': 'TARGET_metadata_sample_data_availability',
'columns': [
['metadata_data_type_availability_id', 'INTEGER', 'NOT NULL'],
['sample_barcode', 'VARCHAR(40)', 'NOT NULL'],
['count', 'INTEGER', 'NOT NULL']
],
'indices_defs': [
['metadata_data_type_availability_id', 'sample_barcode'],
['sample_barcode'],
['count']
],
'foreign_keys': [
[
'metadata_data_type_availability_id',
'TARGET_metadata_data_type_availability',
'metadata_data_type_availability_id'
]
]
}
metadata_tables = OrderedDict(
[
('TARGET_metadata_project', TARGET_metadata_project),
('TARGET_metadata_clinical', TARGET_metadata_clinical),
('TARGET_metadata_biospecimen', TARGET_metadata_biospecimen),
('TARGET_metadata_samples', TARGET_metadata_samples),
('TARGET_metadata_data_HG19', TARGET_metadata_data_HG19),
('TARGET_metadata_data_HG38', TARGET_metadata_data_HG38),
('TARGET_metadata_attrs', TARGET_metadata_attrs),
('TARGET_metadata_data_type_availability', TARGET_metadata_data_type_availability),
('TARGET_metadata_sample_data_availability', TARGET_metadata_sample_data_availability)
]
)
|
|
#!/usr/bin/python
try:
real_types = (int, long, float)
except NameError: # Python 3
real_types = (int, float)
xrange = range
import math
class Quat:
"""The class defining a quaternion."""
def __init__(self, x=0, y=0, z=0, t=0):
"""Create a Quat instance."""
self.q = [float(x), float(y), float(z), float(t)]
def __repr__(self):
"""Compute the string (formal) representation of the quaternion."""
return "Quat({}, {}, {}, {})".format(*self.q)
def __getitem__(self, key):
"""Implement quat[key]."""
return self.q[key]
def _normalize(self, other):
"""Transformation an object to a quaternion."""
if isinstance(other, real_types):
other = Quat(other)
elif isinstance(other, complex):
other = Quat(other.real, other.imag)
return other
def __eq__(self, other):
"""Test if the quaternions are equal."""
other = self._normalize(other)
return self.q == other.q
def __ne__(self, other):
"""Test if the quaternions are not equal."""
return not self == other
def __nonzero__(self):
"""Test if the quaternion is not equal to zero."""
return self.q != [0.0, 0.0, 0.0, 0.0]
__bool__ = __nonzero__ # Python 3
def __pos__(self):
"""Implementation of +q."""
return self
def __neg__(self):
"""Implementation of -q."""
#alist = [-item for item in self.q]
#return Quat(*alist)
return Quat(-self.q[0], -self.q[1], -self.q[2], -self.q[3])
def __add__(self, other):
"""Addition of quaternions."""
other = self._normalize(other)
alist = [self.q[i] + other.q[i] for i in range(4)]
return Quat(*alist)
__radd__ = __add__
def __sub__(self, other):
"""Subtraction of quaternions."""
# return self + (-other)
other = self._normalize(other)
alist = [self.q[i] - other.q[i] for i in range(4)]
return Quat(*alist)
def __rsub__(self, other):
"""Subtraction of quaternions."""
# return (-self) + other
other = self._normalize(other)
alist = [other.q[i] - self.q[i] for i in range(4)]
return Quat(*alist)
def __mul__(self, other):
"""Quaternion product."""
other = self._normalize(other)
a = (self.q[0] * other.q[0] - self.q[1] * other.q[1]
- self.q[2] * other.q[2] - self.q[3] * other.q[3])
b = (self.q[0] * other.q[1] + self.q[1] * other.q[0]
+ self.q[2] * other.q[3] - self.q[3] * other.q[2])
c = (self.q[0] * other.q[2] - self.q[1] * other.q[3]
+ self.q[2] * other.q[0] + self.q[3] * other.q[1])
d = (self.q[0] * other.q[3] + self.q[1] * other.q[2]
- self.q[2] * other.q[1] + self.q[3] * other.q[0])
return Quat(a, b, c, d)
def __rmul__(self, other):
"""Quaternion product."""
other = self._normalize(other)
a = (other.q[0] * self.q[0] - other.q[1] * self.q[1]
- other.q[2] * self.q[2] - other.q[3] * self.q[3])
b = (other.q[0] * self.q[1] + other.q[1] * self.q[0]
+ other.q[2] * self.q[3] - other.q[3] * self.q[2])
c = (other.q[0] * self.q[2] - other.q[1] * self.q[3]
+ other.q[2] * self.q[0] + other.q[3] * self.q[1])
d = (other.q[0] * self.q[3] + other.q[1] * self.q[2]
- other.q[2] * self.q[1] + other.q[3] * self.q[0])
return Quat(a, b, c, d)
def __abs__(self):
"""Return the norm of a quaternion (a scalar)."""
powers = sum(item * item for item in self.q)
return math.sqrt(powers)
norm = __abs__
def norm_squared(self):
"""Return the squared norm of a quaternion (a scalar)."""
return sum(item * item for item in self.q)
def normalized(self):
"""Return a normalized version of this quaternion."""
a = abs(self)
alist = [item / a for item in self.q]
return Quat(*alist)
def is_unit(self):
"""Test a unit quaternion."""
return 1.0 == sum(item * item for item in self.q)
def conjugate(self):
"""Conjugate the quaternion."""
return Quat(self.q[0], -self.q[1], -self.q[2], -self.q[3])
def __invert__(self): # ~p, return p^{-1}
"""Reciprocal of the quaternion."""
powers = sum(item * item for item in self.q)
return (1.0 / powers) * self.conjugate()
def _pow1(self, n):
"""Find powers of the quaternion (inefficient)."""
if n < 0:
return pow(~self, -n)
quat = Quat(1)
while n > 0:
quat = quat * self
n = n - 1
return quat
def _pow2(self, n):
"""Find powers of the quaternion (binary exponentiation)."""
if n == 0:
return Quat(1)
if n < 0:
return pow(~self, -n)
quat = self
if n == 1:
return self
elif n == 2:
return self * self
else: # binary exponentiation
result = Quat(1)
while True:
if n % 2 == 1:
result = result * quat
n = n - 1 # przez ile pomnozyc
if n == 0:
break
if n % 2 == 0:
quat = quat * quat
n = n // 2
return result
__pow__ = _pow2
def __hash__(self):
"""Hashable quaternions."""
return hash(tuple(self.q))
def __int__(self):
"""Conversion to int is not possible."""
raise TypeError("can't convert quat to int")
def __long__(self):
"""Conversion to long is not possible."""
raise TypeError("can't convert quat to long")
def __float__(self):
"""Conversion to float is not possible."""
raise TypeError("can't convert quat to float")
def __complex__(self):
"""Conversion to complex is not possible."""
raise TypeError("can't convert quat to complex")
def apply_to_vector(self, vector):
"""Return the rotated vector."""
if self.is_unit():
unit_quat = self
else:
print("not a unit quaternion")
unit_quat = self.normalized()
if len(vector) != 3:
raise ValueError("not a 3D vector")
vec_quat = Quat(0, vector[0], vector[1], vector[2])
vec_quat = unit_quat * vec_quat * (~unit_quat)
# zwracamy wektor z R^3 (wycinek)
return vec_quat.q[1:] # list or numpy.array
def get_rotation_matrix(self):
"""Return the rotation matrix."""
w, x, y, z = self.q
w2 = w * w
x2 = x * x
y2 = y * y
z2 = z * z
xy = x * y
wz = w * z
xz = x * z
wy = w * y
yz = y * z
wx = w * x
inverse = 1.0 / (w2 + x2 + y2 + z2)
inverse2 = 2.0 * inverse
m00 = (w2 + x2 - y2 - z2) * inverse
m01 = (xy - wz) * inverse2
m02 = (xz + wy) * inverse2
m10 = (xy + wz) * inverse2
m11 = (w2 - x2 + y2 - z2) * inverse
m12 = (yz - wx) * inverse2
m20 = (xz - wy) * inverse2
m21 = (yz + wx) * inverse2
m22 = (w2 - x2 - y2 + z2) * inverse
return [[m00, m01, m02, 0.],
[m10, m11, m12, 0.],
[m20, m21, m22, 0.],
[0., 0., 0., 1.]]
# method used to create a rotation Quaternion to rotate
# any vector defined as a Quaternion
# with respect to the vector vect theta 'radians';
# rot_vec ma dlugosc 1 w R^3, tworzymy kwaternion jednostkowy.
# Chyba lepiej zrobic metode klasy.
@classmethod
def rot_quat(cls, axis, angle):
"""From the axis-angle representation to the quat.
The angle is in radians. The axis is a unit 3D vector."""
if len(axis) != 3:
raise ValueError("not a 3D vector")
length = math.sqrt(sum(x * x for x in axis))
if length != 1.0:
print("not a unit vector")
axis = [x / length for x in axis]
q0 = math.cos(angle / 2.0)
sinus = math.sin(angle / 2.0)
q1 = axis[0] * sinus
q2 = axis[1] * sinus
q3 = axis[2] * sinus
return cls(q0, q1, q2, q3)
@classmethod
def from_x_rotation(cls, angle):
"""Create the unit quat for the X rotation."""
return cls.rot_quat([1, 0, 0], angle)
@classmethod
def from_y_rotation(cls, angle):
"""Create the unit quat for the Y rotation."""
return cls.rot_quat([0, 1, 0], angle)
@classmethod
def from_z_rotation(cls, angle):
"""Create the unit quat for the Z rotation."""
return cls.rot_quat([0, 0, 1], angle)
create_from_axis_rotation = rot_quat
create_from_x_rotation = from_x_rotation
create_from_y_rotation = from_y_rotation
create_from_z_rotation = from_z_rotation
@classmethod
def from_eulers(cls, phi, theta, psi):
"""Create the unit quat from Euler angles."""
unit_quat = cls.from_z_rotation(phi)
unit_quat *= cls.from_y_rotation(theta)
unit_quat *= cls.from_z_rotation(psi)
return unit_quat
Quaternion = Quat
# EOF
|
|
'''
Mutation Strategies
@author: Michael Eddington
@version: $Id$
'''
#
# Copyright (c) Michael Eddington
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Authors:
# Michael Eddington (mike@phed.org)
# $Id$
import sys, os, time, random, hashlib
from Peach.Engine.engine import Engine
from Peach.mutatestrategies import *
from Peach.Engine.incoming import DataCracker
class _RandomMutator(object):
name = "Random"
changedName = "N/A"
class RandomMutationStrategy(MutationStrategy):
'''
This mutation strategy will randomly select N fields
from a data model to fuzz on each test case.
Note: This strategy does not affect the state model
Note: First test case will not be modified
'''
def __init__(self, node, parent):
MutationStrategy.__init__(self, node, parent)
if node != None and node.hasAttributeNS(None, "seed"):
Engine.context.SEED = RandomMutationStrategy.SEED = node.getAttributeNS(None, "seed")
else:
RandomMutationStrategy.SEED = Engine.context.SEED
#: Number of iterations befor we switch files
self.switchCount = 200
if node != None and node.hasAttributeNS(None, "switchCount"):
self.switchCount = int(node.getAttributeNS(None, "switchCount"))
#: Number of iterations
self.iterationCount = 0
#: Are we using multiple data sets?
self.multipleFiles = False
#: Will this mutation strategy ever end?
self.isFinite = False
#: Number of fields to change
self._n = 7
if node != None and node.hasAttributeNS(None, "maxFieldsToMutate"):
self._n = int(node.getAttributeNS(None, "maxFieldsToMutate"))
#: Data models (fullname as key, value is node count)
self._dataModels = {}
#: Mutators for each field
self._fieldMutators = {}
#: Is initial test case?
self._isFirstTestCase = True
#: Data model selected for change
self._dataModelToChange = None
#: Random number generator for our instance
self._random = random.Random()
self._random.seed(hashlib.sha512(str(RandomMutationStrategy.SEED) + str(self.iterationCount)).digest())
self._mutator = _RandomMutator()
def next(self):
self.iterationCount += 1
self._random.seed(hashlib.sha512(str(RandomMutationStrategy.SEED) + str(self.iterationCount)).digest())
def getCount(self):
'''
Return the number of test cases
'''
return None
def _getNodeCount(self, node):
'''
Return the number of DataElements that are children of node
'''
return len(node.getAllChildDataElements())
def currentMutator(self):
'''
Return the current Mutator in use
'''
return self._mutator
## Events
def onTestCaseStarting(self, test, count, stateEngine):
'''
Called as we start a test case
@type test: Test instance
@param test: Current test being run
@type count: int
@param count: Current test #
@type stateEngine: StateEngine instance
@param stateEngine: StateEngine instance in use
'''
if not self._isFirstTestCase:
## Select the data model to change
self._dataModelToChange = self._random.choice(self._dataModels.keys())
def onTestCaseFinished(self, test, count, stateEngine):
'''
Called as we exit a test case
@type test: Test instance
@param test: Current test being run
@type count: int
@param count: Current test #
@type stateEngine: StateEngine instance
@param stateEngine: StateEngine instance in use
'''
self._isFirstTestCase = False
self._dataModelToChange = None
def GetRef(self, str, parent = None, childAttr = 'templates'):
'''
Get the object indicated by ref. Currently the object must have
been defined prior to this point in the XML
'''
#print "GetRef(%s) -- Starting" % str
origStr = str
baseObj = self.context
hasNamespace = False
isTopName = True
found = False
# Parse out a namespace
if str.find(":") > -1:
ns, tmp = str.split(':')
str = tmp
#print "GetRef(%s): Found namepsace: %s" % (str, ns)
# Check for namespace
if hasattr(self.context.namespaces, ns):
baseObj = getattr(self.context.namespaces, ns)
else:
#print self
raise PeachException("Unable to locate namespace: " + origStr)
hasNamespace = True
for name in str.split('.'):
#print "GetRef(%s): Looking for part %s" % (str, name)
found = False
if not hasNamespace and isTopName and parent != None:
# check parent, walk up from current parent to top
# level parent checking at each level.
while parent != None and not found:
#print "GetRef(%s): Parent.name: %s" % (name, parent.name)
if hasattr(parent, 'name') and parent.name == name:
baseObj = parent
found = True
elif hasattr(parent, name):
baseObj = getattr(parent, name)
found = True
elif hasattr(parent.children, name):
baseObj = getattr(parent.children, name)
found = True
elif hasattr(parent, childAttr) and hasattr( getattr(parent, childAttr), name):
baseObj = getattr( getattr(parent, childAttr), name)
found = True
else:
parent = parent.parent
# check base obj
elif hasattr(baseObj, name):
baseObj = getattr(baseObj, name)
found = True
# check childAttr
elif hasattr(baseObj, childAttr):
obj = getattr(baseObj, childAttr)
if hasattr(obj, name):
baseObj = getattr(obj, name)
found = True
else:
raise PeachException("Could not resolve ref %s" % origStr)
# check childAttr
if found == False and hasattr(baseObj, childAttr):
obj = getattr(baseObj, childAttr)
if hasattr(obj, name):
baseObj = getattr(obj, name)
found = True
# check across namespaces if we can't find it in ours
if isTopName and found == False:
for child in baseObj:
if child.elementType != 'namespace':
continue
#print "GetRef(%s): CHecking namepsace: %s" % (str, child.name)
ret = self._SearchNamespaces(child, name, childAttr)
if ret:
#print "GetRef(%s) Found part %s in namespace" % (str, name)
baseObj = ret
found = True
isTopName = False
if found == False:
raise PeachException("Unable to resolve reference: %s" % origStr)
return baseObj
def _SearchNamespaces(self, obj, name, attr):
'''
Used by GetRef to search across namespaces
'''
#print "_SearchNamespaces(%s, %s)" % (obj.name, name)
#print "dir(obj): ", dir(obj)
# Namespaces are stuffed under this variable
# if we have it we should be it :)
if hasattr(obj, 'ns'):
obj = obj.ns
if hasattr(obj, name):
return getattr(obj, name)
elif hasattr(obj, attr) and hasattr(getattr(obj, attr), name):
return getattr(getattr(obj, attr), name)
for child in obj:
if child.elementType != 'namespace':
continue
ret = self._SearchNamespaces(child, name, attr)
if ret != None:
return ret
return None
def onDataModelGetValue(self, action, dataModel):
'''
Called before getting a value from a data model
@type action: Action
@param action: Action we are starting
@type dataModel: Template
@param dataModel: Data model we are using
'''
if action.data != None and action.data.multipleFiles and action.data.switchCount != None:
self.switchCount = action.data.switchCount
if action.data != None and action.data.multipleFiles and \
self.iterationCount % self.switchCount == 0:
self.context = action.getRoot()
# If a file fails to parse, don't exit the
# run, instead re-crack until we find a working
# file.
while True:
# Time to switch to another file!
action.data.gotoRandomFile()
# Locate fresh copy of template with no data
obj = self.GetRef(action.template.ref)
cracker = DataCracker(obj.getRoot())
cracker.optmizeModelForCracking(obj)
template = obj.copy(action)
template.ref = action.template.ref
template.parent = action
template.name = action.template.name
# Switch any references to old name
oldName = template.ref
for relation in template._genRelationsInDataModelFromHere():
if relation.of == oldName:
relation.of = template.name
elif relation.From == oldName:
relation.From = template.name
# Crack file
try:
template.setDefaults(action.data, False, True)
print ""
break
except:
pass
# Cache default values
action.template = template
template.getValue()
# Re-create state engine copy. We do this to
# avoid have optmizeModelForCracking called over
# and over...
if hasattr(action, "origionalTemplate"):
#delattr(action, "origionalTemplate")
action.origionalTemplate = action.template
action.origionalTemplate.BuildRelationCache()
action.origionalTemplate.resetDataModel()
action.origionalTemplate.getValue()
action.template = action.template.copy(action)
# Regenerate mutator state
self._isFirstTestCase = True
self._dataModels = {}
self._fieldMutators = {}
if self._isFirstTestCase:
fullName = dataModel.getFullname()
if fullName not in self._dataModels:
self._dataModels[fullName] = self._getNodeCount(dataModel)
nodes = dataModel.getAllChildDataElements()
nodes.append(dataModel)
nonMutableNodes = []
for node in nodes:
if not node.isMutable:
nonMutableNodes.append(node)
mutators = []
self._fieldMutators[node.getFullname()] = mutators
for m in Engine.context.mutators:
if m.supportedDataElement(node):
# Need to create new instance from class
for i in range(m.weight**4):
mutators.append( m(Engine.context,node) )
for node in nonMutableNodes:
nodes.remove(node)
nonMutableNodes = None
return
else:
## Is this data model we are changing?
if dataModel.getFullname() != self._dataModelToChange:
return
## Select fields to modify
nodes = dataModel.getAllChildDataElements()
nodes.append(dataModel)
nodesToRemove = []
# Remove non-mutable fields
for node in nodes:
if not node.isMutable:
nodesToRemove.append(node)
for node in nodesToRemove:
nodes.remove(node)
## Quick to check to see if we failed.
#for node in nodes:
# if not self._fieldMutators.has_key(node.getFullname()) or len(self._fieldMutators[node.getFullname()]) == 0:
# raise Exception("Found element with no mutations!")
print "The following elements are mutating: "
# Select nodes we will modify
if len(nodes) <= self._n:
fields = nodes
maxN = self._n - len(fields)
if maxN <= 0:
maxN = self._n/2
for i in range(self._random.randint(1, maxN)):
# Now perform mutations on fields
if len(fields) < 3:
sampleset = fields
else:
sampleset = self._random.sample(fields, self._random.randint(1, len(fields)))
for node in sampleset:
try:
mutator = self._random.choice(self._fieldMutators[node.getFullname()])
fullName = node.getFullnameInDataModel()[len(dataModel.name)+1:]
print " %s" % (fullName)
print " %s" % (mutator.name)
# Note: Since we are applying multiple mutations
# sometimes a mutation will fail. We should
# ignore those failures.
try:
mutator.randomMutation(node, self._random)
except:
pass
except:
pass
else:
fields = self._random.sample(nodes, self._random.randint(1, self._n))
# Now perform mutations on fields
for node in fields:
try:
mutator = self._random.choice(self._fieldMutators[node.getFullname()])
#print " > %s: %s" % (node.getFullnameInDataModel(), mutator.name)
fullName = node.getFullnameInDataModel()[len(dataModel.name)+1:]
print " %s" % (fullName)
print " %s" % (mutator.name)
# Note: Since we are applying multiple mutations
# sometimes a mutation will fail. We should
# ignore those failures.
try:
mutator.randomMutation(node, self._random)
except:
pass
except:
pass
print ""
# all done!
#MutationStrategy.DefaultStrategy = RandomMutationStrategy
class SingleRandomMutationStrategy(RandomMutationStrategy):
'''
This mutation strategy will randomly select N fields
from a data model to fuzz on each test case.
Note: This strategy does not affect the state model
Note: First test case will not be modified
'''
def __init__(self, node, parent):
RandomMutationStrategy.__init__(self, node, parent)
#: Number of fields to change
self._n = 1
class DoubleRandomMutationStrategy(RandomMutationStrategy):
'''
This mutation strategy will randomly select N fields
from a data model to fuzz on each test case.
Note: This strategy does not affect the state model
Note: First test case will not be modified
'''
def __init__(self, node, parent):
RandomMutationStrategy.__init__(self, node, parent)
#: Number of fields to change
self._n = 2
# end
|
|
#!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Enable 'with' statements in Python 2.5
from __future__ import with_statement
import optparse
import os.path
import shutil
import subprocess
import sys
import time
import traceback
ARCH_MAP = {
'32': {
'gyp_arch': 'ia32',
'scons_platform': 'x86-32',
},
'64': {
'gyp_arch': 'x64',
'scons_platform': 'x86-64',
},
'arm': {
'gyp_arch': 'arm',
'scons_platform': 'arm',
},
}
def GetHostPlatform():
sys_platform = sys.platform.lower()
if sys_platform.startswith('linux'):
return 'linux'
elif sys_platform in ('win', 'win32', 'windows', 'cygwin'):
return 'win'
elif sys_platform in ('darwin', 'mac'):
return 'mac'
else:
raise Exception('Can not determine the platform!')
def SetDefaultContextAttributes(context):
"""
Set default values for the attributes needed by the SCons function, so that
SCons can be run without needing ParseStandardCommandLine
"""
platform = GetHostPlatform()
context['platform'] = platform
context['mode'] = 'opt'
context['default_scons_mode'] = ['opt-host', 'nacl']
context['default_scons_platform'] = ('x86-64' if platform == 'win'
else 'x86-32')
context['clang'] = False
context['asan'] = False
context['pnacl'] = False
context['use_glibc'] = False
context['use_breakpad_tools'] = False
context['max_jobs'] = 8
context['scons_args'] = []
def ParseStandardCommandLine(context):
"""
The standard buildbot scripts require 3 arguments to run. The first
argument (dbg/opt) controls if the build is a debug or a release build. The
second argument (32/64) controls the machine architecture being targeted.
The third argument (newlib/glibc) controls which c library we're using for
the nexes. Different buildbots may have different sets of arguments.
"""
parser = optparse.OptionParser()
parser.add_option('-n', '--dry-run', dest='dry_run', default=False,
action='store_true', help='Do not execute any commands.')
parser.add_option('--inside-toolchain', dest='inside_toolchain',
default=bool(os.environ.get('INSIDE_TOOLCHAIN')),
action='store_true', help='Inside toolchain build.')
parser.add_option('--clang', dest='clang', default=False,
action='store_true', help='Build trusted code with Clang.')
parser.add_option('--coverage', dest='coverage', default=False,
action='store_true',
help='Build and test for code coverage.')
parser.add_option('--validator', dest='validator', default=False,
action='store_true',
help='Only run validator regression test')
parser.add_option('--asan', dest='asan', default=False,
action='store_true', help='Build trusted code with ASan.')
parser.add_option('--scons-args', dest='scons_args', default =[],
action='append', help='Extra scons arguments.')
parser.add_option('--step-suffix', metavar='SUFFIX', default='',
help='Append SUFFIX to buildbot step names.')
parser.add_option('--no-gyp', dest='no_gyp', default=False,
action='store_true', help='Do not run the gyp build')
parser.add_option('--use-breakpad-tools', dest='use_breakpad_tools',
default=False, action='store_true',
help='Use breakpad tools for testing')
options, args = parser.parse_args()
if len(args) != 3:
parser.error('Expected 3 arguments: mode arch clib')
# script + 3 args == 4
mode, arch, clib = args
if mode not in ('dbg', 'opt', 'coverage'):
parser.error('Invalid mode %r' % mode)
if arch not in ARCH_MAP:
parser.error('Invalid arch %r' % arch)
if clib not in ('newlib', 'glibc', 'pnacl'):
parser.error('Invalid clib %r' % clib)
# TODO(ncbray) allow a command-line override
platform = GetHostPlatform()
context['platform'] = platform
context['mode'] = mode
context['arch'] = arch
# ASan is Clang, so set the flag to simplify other checks.
context['clang'] = options.clang or options.asan
context['validator'] = options.validator
context['asan'] = options.asan
# TODO(ncbray) turn derived values into methods.
context['gyp_mode'] = {
'opt': 'Release',
'dbg': 'Debug',
'coverage': 'Debug'}[mode]
context['gyp_arch'] = ARCH_MAP[arch]['gyp_arch']
context['gyp_vars'] = []
if context['clang']:
context['gyp_vars'].append('clang=1')
if context['asan']:
context['gyp_vars'].append('asan=1')
context['default_scons_platform'] = ARCH_MAP[arch]['scons_platform']
context['default_scons_mode'] = ['nacl']
# Only Linux can build trusted code on ARM.
# TODO(mcgrathr): clean this up somehow
if arch != 'arm' or platform == 'linux':
context['default_scons_mode'] += [mode + '-host']
context['use_glibc'] = clib == 'glibc'
context['pnacl'] = clib == 'pnacl'
context['max_jobs'] = 8
context['dry_run'] = options.dry_run
context['inside_toolchain'] = options.inside_toolchain
context['step_suffix'] = options.step_suffix
context['no_gyp'] = options.no_gyp
context['coverage'] = options.coverage
context['use_breakpad_tools'] = options.use_breakpad_tools
context['scons_args'] = options.scons_args
# Don't run gyp on coverage builds.
if context['coverage']:
context['no_gyp'] = True
for key, value in sorted(context.config.items()):
print '%s=%s' % (key, value)
def EnsureDirectoryExists(path):
"""
Create a directory if it does not already exist.
Does not mask failures, but there really shouldn't be any.
"""
if not os.path.exists(path):
os.makedirs(path)
def TryToCleanContents(path, file_name_filter=lambda fn: True):
"""
Remove the contents of a directory without touching the directory itself.
Ignores all failures.
"""
if os.path.exists(path):
for fn in os.listdir(path):
TryToCleanPath(os.path.join(path, fn), file_name_filter)
def TryToCleanPath(path, file_name_filter=lambda fn: True):
"""
Removes a file or directory.
Ignores all failures.
"""
if os.path.exists(path):
if file_name_filter(path):
print 'Trying to remove %s' % path
if os.path.isdir(path):
shutil.rmtree(path, ignore_errors=True)
else:
try:
os.remove(path)
except Exception:
pass
else:
print 'Skipping %s' % path
def Retry(op, *args):
# Windows seems to be prone to having commands that delete files or
# directories fail. We currently do not have a complete understanding why,
# and as a workaround we simply retry the command a few times.
# It appears that file locks are hanging around longer than they should. This
# may be a secondary effect of processes hanging around longer than they
# should. This may be because when we kill a browser sel_ldr does not exit
# immediately, etc.
# Virus checkers can also accidently prevent files from being deleted, but
# that shouldn't be a problem on the bots.
if GetHostPlatform() == 'win':
count = 0
while True:
try:
op(*args)
break
except Exception:
print "FAILED: %s %s" % (op.__name__, repr(args))
count += 1
if count < 5:
print "RETRY: %s %s" % (op.__name__, repr(args))
time.sleep(pow(2, count))
else:
# Don't mask the exception.
raise
else:
op(*args)
def _RemoveDirectory(path):
print 'Removing %s' % path
if os.path.exists(path):
shutil.rmtree(path)
print ' Succeeded.'
else:
print ' Path does not exist, nothing to do.'
def RemoveDirectory(path):
"""
Remove a directory if it exists.
Does not mask failures, although it does retry a few times on Windows.
"""
Retry(_RemoveDirectory, path)
# This is a sanity check so Command can print out better error information.
def FileCanBeFound(name, paths):
# CWD
if os.path.exists(name):
return True
# Paths with directories are not resolved using the PATH variable.
if os.path.dirname(name):
return False
# In path
for path in paths.split(os.pathsep):
full = os.path.join(path, name)
if os.path.exists(full):
return True
return False
def RemoveGypBuildDirectories():
# Remove all directories on all platforms. Overkill, but it allows for
# straight-line code.
# Windows
RemoveDirectory('build/Debug')
RemoveDirectory('build/Release')
RemoveDirectory('build/Debug-Win32')
RemoveDirectory('build/Release-Win32')
RemoveDirectory('build/Debug-x64')
RemoveDirectory('build/Release-x64')
# Linux and Mac
RemoveDirectory('../xcodebuild')
RemoveDirectory('../out')
RemoveDirectory('src/third_party/nacl_sdk/arm-newlib')
def RemoveSconsBuildDirectories():
RemoveDirectory('scons-out')
RemoveDirectory('breakpad-out')
# Execute a command using Python's subprocess module.
def Command(context, cmd, cwd=None):
print 'Running command: %s' % ' '.join(cmd)
# Python's subprocess has a quirk. A subprocess can execute with an
# arbitrary, user-defined environment. The first argument of the command,
# however, is located using the PATH variable of the Python script that is
# launching the subprocess. Modifying the PATH in the environment passed to
# the subprocess does not affect Python's search for the first argument of
# the command (the executable file.) This is a little counter intuitive,
# so we're forcing the search to use the same PATH variable as is seen by
# the subprocess.
env = context.MakeCommandEnv()
script_path = os.environ['PATH']
os.environ['PATH'] = env['PATH']
try:
if FileCanBeFound(cmd[0], env['PATH']) or context['dry_run']:
# Make sure that print statements before the subprocess call have been
# flushed, otherwise the output of the subprocess call may appear before
# the print statements.
sys.stdout.flush()
if context['dry_run']:
retcode = 0
else:
retcode = subprocess.call(cmd, cwd=cwd, env=env)
else:
# Provide a nicer failure message.
# If subprocess cannot find the executable, it will throw a cryptic
# exception.
print 'Executable %r cannot be found.' % cmd[0]
retcode = 1
finally:
os.environ['PATH'] = script_path
print 'Command return code: %d' % retcode
if retcode != 0:
raise StepFailed()
return retcode
# A specialized version of CommandStep.
def SCons(context, mode=None, platform=None, parallel=False, browser_test=False,
args=(), cwd=None):
python = sys.executable
if mode is None: mode = context['default_scons_mode']
if platform is None: platform = context['default_scons_platform']
if parallel:
jobs = context['max_jobs']
else:
jobs = 1
cmd = []
if browser_test and context.Linux():
# Although we could use the "browser_headless=1" Scons option, it runs
# xvfb-run once per Chromium invocation. This is good for isolating
# the tests, but xvfb-run has a stupid fixed-period sleep, which would
# slow down the tests unnecessarily.
cmd.extend(['xvfb-run', '--auto-servernum'])
cmd.extend([
python, 'scons.py',
'--verbose',
'-k',
'-j%d' % jobs,
'--mode='+','.join(mode),
'platform='+platform,
])
cmd.extend(context['scons_args'])
if context['clang']: cmd.append('--clang')
if context['asan']: cmd.append('--asan')
if context['use_glibc']: cmd.append('--nacl_glibc')
if context['pnacl']: cmd.append('bitcode=1')
if context['use_breakpad_tools']:
cmd.append('breakpad_tools_dir=breakpad-out')
# Append used-specified arguments.
cmd.extend(args)
Command(context, cmd, cwd)
class StepFailed(Exception):
"""
Thrown when the step has failed.
"""
class StopBuild(Exception):
"""
Thrown when the entire build should stop. This does not indicate a failure,
in of itself.
"""
class Step(object):
"""
This class is used in conjunction with a Python "with" statement to ensure
that the preamble and postamble of each build step gets printed and failures
get logged. This class also ensures that exceptions thrown inside a "with"
statement don't take down the entire build.
"""
def __init__(self, name, status, halt_on_fail=True):
self.status = status
if 'step_suffix' in status.context:
suffix = status.context['step_suffix']
else:
suffix = ''
self.name = name + suffix
self.halt_on_fail = halt_on_fail
self.step_failed = False
# Called on entry to a 'with' block.
def __enter__(self):
print
print '@@@BUILD_STEP %s@@@' % self.name
self.status.ReportBegin(self.name)
# The method is called on exit from a 'with' block - even for non-local
# control flow, i.e. exceptions, breaks, continues, returns, etc.
# If an exception is thrown inside a block wrapped with a 'with' statement,
# the __exit__ handler can suppress the exception by returning True. This is
# used to isolate each step in the build - if an exception occurs in a given
# step, the step is treated as a failure. This allows the postamble for each
# step to be printed and also allows the build to continue of the failure of
# a given step doesn't halt the build.
def __exit__(self, type, exception, trace):
if exception is None:
# If exception is None, no exception occurred.
step_failed = False
elif isinstance(exception, StepFailed):
step_failed = True
print
print 'Halting build step because of failure.'
print
else:
step_failed = True
print
print 'The build step threw an exception...'
print
traceback.print_exception(type, exception, trace, file=sys.stdout)
print
if step_failed:
self.status.ReportFail(self.name)
print '@@@STEP_FAILURE@@@'
if self.halt_on_fail:
print
print 'Entire build halted because %s failed.' % self.name
raise StopBuild()
else:
self.status.ReportPass(self.name)
# Suppress any exception that occurred.
return True
# Adds an arbitrary link inside the build stage on the waterfall.
def StepLink(text, link):
print '@@@STEP_LINK@%s@%s@@@' % (text, link)
# Adds arbitrary text inside the build stage on the waterfall.
def StepText(text):
print '@@@STEP_TEXT@%s@@@' % (text)
class BuildStatus(object):
"""
Keeps track of the overall status of the build.
"""
def __init__(self, context):
self.context = context
self.ever_failed = False
self.steps = []
def ReportBegin(self, name):
pass
def ReportPass(self, name):
self.steps.append((name, 'passed'))
def ReportFail(self, name):
self.steps.append((name, 'failed'))
self.ever_failed = True
# Handy info when this script is run outside of the buildbot.
def DisplayBuildStatus(self):
print
for step, status in self.steps:
print '%-40s[%s]' % (step, status)
print
if self.ever_failed:
print 'Build failed.'
else:
print 'Build succeeded.'
def ReturnValue(self):
return int(self.ever_failed)
class BuildContext(object):
"""
Encapsulates the information needed for running a build command. This
includes environment variables and default arguments for SCons invocations.
"""
# Only allow these attributes on objects of this type.
__slots__ = ['status', 'global_env', 'config']
def __init__(self):
# The contents of global_env override os.environ for any commands run via
# self.Command(...)
self.global_env = {}
# PATH is a special case. See: Command.
self.global_env['PATH'] = os.environ.get('PATH', '')
self.config = {}
self['dry_run'] = False
# Emulate dictionary subscripting.
def __getitem__(self, key):
return self.config[key]
# Emulate dictionary subscripting.
def __setitem__(self, key, value):
self.config[key] = value
# Emulate dictionary membership test
def __contains__(self, key):
return key in self.config
def Windows(self):
return self.config['platform'] == 'win'
def Linux(self):
return self.config['platform'] == 'linux'
def Mac(self):
return self.config['platform'] == 'mac'
def GetEnv(self, name):
return self.global_env[name]
def SetEnv(self, name, value):
self.global_env[name] = str(value)
def MakeCommandEnv(self):
# The external environment is not sanitized.
e = dict(os.environ)
# Arbitrary variables can be overridden.
e.update(self.global_env)
return e
def RunBuild(script, status):
try:
script(status, status.context)
except StopBuild:
pass
# Emit a summary step for three reasons:
# - The annotator will attribute non-zero exit status to the last build step.
# This can misattribute failures to the last build step.
# - runtest.py wraps the builds to scrape perf data. It emits an annotator
# tag on exit which misattributes perf results to the last build step.
# - Provide a label step in which to show summary result.
# Otherwise these go back to the preamble.
with Step('summary', status):
if status.ever_failed:
print 'There were failed stages.'
else:
print 'Success.'
# Display a summary of the build.
status.DisplayBuildStatus()
sys.exit(status.ReturnValue())
|
|
from JumpScale import j
import ujson
class system_contentmanager(j.code.classGetBase()):
"""
this actor manages all content on the wiki
can e.g. notify wiki/appserver of updates of content
"""
def __init__(self):
self._te = {}
self.actorname = "contentmanager"
self.appname = "system"
self.dbmem = j.db.keyvaluestore.getMemoryStore('%s.%s' % (self.appname, self.actorname))
def getActors(self, **args):
"""
result list(str)
"""
return j.core.portal.active.actorsloader.actors.keys()
def getActorsWithPaths(self, **args):
"""
result list([name,path])
"""
actors = []
for actor in j.core.portal.active.actorsloader.id2object.keys():
actor = j.core.portal.active.actorsloader.id2object[actor]
actors.append([actor.model.id, actor.model.path])
return actors
def getBuckets(self, **args):
"""
result list(str)
"""
return j.core.portal.active.bucketsloader.buckets.keys()
def getBucketsWithPaths(self, **args):
"""
result list([name,path])
"""
buckets = []
for bucket in j.core.portal.active.bucketsloader.id2object.keys():
bucket = j.core.portal.active.bucketsloader.id2object[bucket]
buckets.append([bucket.model.id, bucket.model.path])
return buckets
def getContentDirsWithPaths(self, **args):
"""
return root dirs of content (actors,buckets,spaces)
result list([name,path])
"""
objects = []
for objectname in j.core.portal.active.contentdirs.keys():
objectpath = j.core.portal.active.contentdirs[objectname]
objects.append([objectname, objectpath])
return objects
def getSpaces(self, **args):
"""
result list(str)
"""
return j.core.portal.active.spacesloader.spaces.keys()
def getSpacesWithPaths(self, **args):
"""
result list([name,path])
"""
spaces = []
for space in j.core.portal.active.spacesloader.spaces.keys():
space = j.core.portal.active.spacesloader.spaces[space]
spaces.append([space.model.id, space.model.path])
return spaces
def modelobjectlist(self, namespace, category, key, **args):
"""
@todo describe what the goal is of this method
param:appname
param:actorname
param:modelname
param:key
"""
dtext = j.apps.system.contentmanager.extensions.datatables
data = dtext.getData(namespace, category, key, **args)
return data
def modelobjectupdate(self, appname, actorname, key, **args):
"""
post args with ref_$id which refer to the key which is stored per actor in the cache
param:appname
param:actorname
param:key
result html
"""
actor = j.apps.__dict__[appname].__dict__[actorname]
ctx = args["ctx"]
data = actor.dbmem.cacheGet("form_%s" % key)
for ref in [item for item in ctx.params.keys() if item.find("ref") == 0]:
ref0 = int(ref.replace("ref_", ""))
key, refS = data[1][ref0] # @ref is how to retrieve info from the object
model = data[0][key]
exec("model.%s=args[\"%s\"]" % (refS, ref))
for modelkey in data[0].keys():
model = data[0][modelkey]
exec("actor.model_%s_set(model)" % model._meta[2])
if 'HTTP_REFERER' in ctx.env:
headers = [('Location', ctx.env['HTTP_REFERER'])]
ctx.start_response('302', headers)
def notifyActorDelete(self, id, **args):
"""
param:id id of space which changed
result bool
"""
self.reloadAll(id)
def bitbucketreload(self, spacename, **args):
import os
s = os.getcwd()
path = s.split('/apps/')[0]
mc = j.clients.mercurial.getClient(path)
mc.pullupdate()
if spacename != 'None':
j.core.portal.active.loadSpace(spacename)
else:
j.core.portal.active.loadSpace(self.appname)
return []
def reloadAll(self, id):
def reloadApp():
print "RELOAD APP FOR ACTORS Delete"
j.core.portal.active.reset()
j.core.portal.active.actorsloader.id2object.pop(id)
j.core.portal.active.scheduler.scheduleFromNow(2, 9, reloadApp)
j.core.portal.active.scheduler.scheduleFromNow(10, 9, reloadApp)
def notifyActorModification(self, id, **args):
"""
param:id id of actor which changed
result bool
"""
loaders = j.core.portal.active.actorsloader
loader = loaders.getLoaderFromId(id)
loader.reset()
def notifyActorNew(self, path, name, **args):
"""
param:path path of content which got changed
param:name name
result bool
"""
result = False
key = name.strip().lower()
# print "name:%s"%name
if name.find("__") == -1:
raise RuntimeError("Cannot create actor with name which is not constructed as $appname__$actorname, here %s" % name)
appname, actorname = name.split("__")
path = path
if key not in j.core.portal.active.actorsloader.actors:
# actor does not exist yet, create required dirs in basedir
if path == "":
path = j.system.fs.joinPaths(j.core.portal.active.basepath, "actors", key)
j.system.fs.createDir(path)
j.system.fs.createDir(j.system.fs.joinPaths(path, ".actor"))
else:
j.system.fs.createDir(path)
j.system.fs.createDir(j.system.fs.joinPaths(path, ".actor"))
print "scan path:%s" % path
j.core.portal.active.actorsloader.scan(path)
result = True
else:
result = False
return result
def notifyActorNewDir(self, actorname, actorpath, path, **args):
"""
param:actorname
param:actorpath
param:path
"""
# put your code here to implement this method
raise NotImplementedError("not implemented method notifyActorNewDir")
def notifyBucketDelete(self, id, **args):
"""
param:id id of bucket which changed
result bool
"""
result = None
# immediate remove
loaders = j.core.portal.active.bucketsloader
loaders.removeLoader(id)
def reloadApp(id=None):
j.core.portal.active.loadSpaces(reset=True)
# loader.pop(id)
# j.core.portal.active.scheduler.scheduleFromNow(1,9,reloadApp,id=id)
j.core.portal.active.scheduler.scheduleFromNow(10, 9, reloadApp, id=id)
return result
def notifyBucketModification(self, id, **args):
"""
param:id id of bucket which changed
result bool
"""
loaders = j.core.portal.active.bucketsloader
loader = loaders.getLoaderFromId(id)
loader.reset()
def notifyBucketNew(self, path, name, **args):
"""
param:path path of content which got changed
param:name name
result bool
"""
result = False
key = name.strip().lower()
path = path
loader = j.core.portal.active.bucketsloader
if key not in loader.id2object:
# does not exist yet, create required dirs in basedir
if path == "":
path = j.system.fs.joinPaths(j.core.portal.active.basepath, "buckets", key)
j.system.fs.createDir(path)
j.system.fs.createDir(j.system.fs.joinPaths(path, ".bucket"))
else:
j.system.fs.createDir(path)
j.system.fs.createDir(j.system.fs.joinPaths(path, ".bucket"))
loader.scan(path)
result = True
else:
result = False
return result
def notifyFiledir(self, path, **args):
"""
param:path path of content which got changed
result bool
"""
# put your code here to implement this method
raise NotImplementedError("not implemented method notifyFiledir")
def notifySpaceDelete(self, id, **args):
"""
param:id id of space which changed
result bool
"""
# immediate remove
loaders = j.core.portal.active.spacesloader
loaders.removeLoader(id)
def reloadApp():
print "RELOAD APP SPACE DELETE"
j.core.portal.active.loadSpaces(reset=True)
# loader=j.core.portal.active.spacesloader.id2object
# loader.pop(id)
j.core.portal.active.scheduler.scheduleFromNow(10, 9, reloadApp)
def notifySpaceModification(self, id, **args):
"""
param:id id of space which changed
result bool
"""
id=id.lower()
loaders = j.core.portal.active.spacesloader
loader = loaders.getLoaderFromId(id)
loader.reset()
ctx=args["ctx"]
if ctx.params.has_key("payload"):
payload=ujson.loads(ctx.params["payload"])
owner=payload["repository"]["owner"]
name=payload["repository"]["name"]
cmd="cd /opt/code/%s/%s;hg pull;hg update -C"%(owner,name)
print "execute %s"%cmd
j.system.process.execute(cmd)
def notifySpaceNew(self, path, name, **args):
"""
param:path path of content which got changed
param:name name
result bool
"""
result = False
key = name.strip().lower()
path = path
loader = j.core.portal.active.spacesloader
if key not in loader.id2object:
# does not exist yet, create required dirs in basedir
if path == "":
path = j.system.fs.joinPaths(j.core.portal.active.basepath, "spaces", name)
else:
j.system.fs.createDir(path)
# create default content
mddir = j.system.fs.joinPaths(path, ".space")
dest = j.system.fs.joinPaths(path, "%s.wiki" % name)
j.system.fs.createDir(mddir)
loader.scan(path)
source = j.system.fs.joinPaths(mddir, "template.wiki")
j.system.fs.copyFile(source, dest)
result = True
else:
result = False
return result
def notifySpaceNewDir(self, spacename, spacepath, path, **args):
"""
param:spacename
param:spacepath
param:path
"""
args = {}
args["spacename"] = spacename
args["spacepath"] = spacepath
args["path"] = path
return self._te["notifySpaceNewDir"].execute4method(args, params={}, actor=self)
def prepareActorSpecs(self, app, actor, **args):
"""
compress specs for specific actor and targz in appropriate download location
param:app name of app
param:actor name of actor
result bool
"""
result = None
actorname = actor
appname = app
filesroot = j.core.portal.active.filesroot
actorloader = j.core.portal.active.actorsloader.id2object["%s__%s" % (appname, actorname)]
path = j.system.fs.joinPaths(actorloader.model.path, "specs")
pathdest = j.system.fs.joinPaths(filesroot, "specs", "%s_%s.tgz" % (appname, actorname))
j.system.fs.remove(pathdest)
# j.system.fs.createDir(j.system.fs.joinPaths("files","specs"))
if not j.system.fs.exists(path):
return {"error": "could not find spec path for app %s actor %s" % (appname, actorname)}
else:
j.system.fs.targzCompress(path, pathdest)
return result
def wikisave(self, cachekey, text, **args):
"""
param:cachekey key to the doc
param:text content of file to edit
result bool
"""
contents = j.apps.system.contentmanager.dbmem.cacheGet(cachekey)
j.system.fs.writeFile(contents['path'], text)
returnpath = "/%s/%s" % (contents['space'], contents['page'])
if contents['querystr']:
returnpath += "?%s" % contents['querystr']
returncontent = "<script>window.open('%s', '_self', '');</script>" % returnpath
return returncontent
|
|
""" Models for representing top-level plot objects.
"""
from __future__ import absolute_import
from six import string_types
import warnings
from ..core.query import find
from ..core import validation
from ..core.validation.warnings import (MISSING_RENDERERS, NO_DATA_RENDERERS,
EMPTY_LAYOUT, MALFORMED_CATEGORY_LABEL)
from ..core.enums import Location
from ..core.property_mixins import LineProps, TextProps, FillProps
from ..model import Model
from ..core.properties import (Bool, Int, String, Enum, Auto, Instance, Either,
List, Dict, Include, Override)
from ..util.string import nice_join
from ..core.validation.errors import REQUIRED_RANGE
from .glyphs import Glyph
from .ranges import Range, Range1d, FactorRange
from .renderers import Renderer, GlyphRenderer, DataRenderer, TileRenderer, DynamicImageRenderer
from .sources import DataSource, ColumnDataSource
from .tools import Tool, ToolEvents
from .component import Component
def _select_helper(args, kwargs):
"""
Allow flexible selector syntax.
Returns:
a dict
"""
if len(args) > 1:
raise TypeError("select accepts at most ONE positional argument.")
if len(args) > 0 and len(kwargs) > 0:
raise TypeError("select accepts EITHER a positional argument, OR keyword arguments (not both).")
if len(args) == 0 and len(kwargs) == 0:
raise TypeError("select requires EITHER a positional argument, OR keyword arguments.")
if args:
arg = args[0]
if isinstance(arg, dict):
selector = arg
elif isinstance(arg, string_types):
selector = dict(name=arg)
elif issubclass(arg, Model):
selector = {"type" : arg}
else:
raise RuntimeError("Selector must be a dictionary, string or plot object.")
else:
selector = kwargs
return selector
class LayoutBox(Model):
''' Represents an **on-canvas** layout.
'''
class Plot(Component):
""" Model representing a plot, containing glyphs, guides, annotations.
"""
def __init__(self, **kwargs):
if "tool_events" not in kwargs:
kwargs["tool_events"] = ToolEvents()
if "border_fill" in kwargs and "border_fill_color" in kwargs:
raise ValueError("Conflicting properties set on plot: border_fill, border_fill_color.")
if "background_fill" in kwargs and "background_fill_color" in kwargs:
raise ValueError("Conflicting properties set on plot: background_fill, background_fill_color.")
super(Plot, self).__init__(**kwargs)
def select(self, *args, **kwargs):
''' Query this object and all of its references for objects that
match the given selector.
There are a few different ways to call the ``select`` method.
The most general is to supply a JSON-like query dictionary as the
single argument or as keyword arguments:
Args:
selector (JSON-like) : some sample text
Keyword Arguments:
kwargs : query dict key/values as keyword arguments
For convenience, queries on just names can be made by supplying
the ``name`` string as the single parameter:
Args:
name (str) : the name to query on
Also queries on just type can be made simply by supplying the
``Model`` subclass as the single parameter:
Args:
type (Model) : the type to query on
Returns:
seq[Model]
Examples:
.. code-block:: python
# These two are equivalent
p.select({"type": HoverTool})
p.select(HoverTool)
# These two are also equivalent
p.select({"name": "mycircle"})
p.select("mycircle")
# Keyword arguments can be supplied in place of selector dict
p.select({"name": "foo", "type": HoverTool})
p.select(name="foo", type=HoverTool)
'''
selector = _select_helper(args, kwargs)
# Want to pass selector that is a dictionary
from ..plotting.helpers import _list_attr_splat
return _list_attr_splat(find(self.references(), selector, {'plot': self}))
def row(self, row, gridplot):
''' Return whether this plot is in a given row of a GridPlot.
Args:
row (int) : index of the row to test
gridplot (GridPlot) : the GridPlot to check
Returns:
bool
'''
return self in gridplot.row(row)
def column(self, col, gridplot):
''' Return whether this plot is in a given column of a GridPlot.
Args:
col (int) : index of the column to test
gridplot (GridPlot) : the GridPlot to check
Returns:
bool
'''
return self in gridplot.column(col)
def add_layout(self, obj, place='center'):
''' Adds an object to the plot in a specified place.
Args:
obj (Renderer) : the object to add to the Plot
place (str, optional) : where to add the object (default: 'center')
Valid places are: 'left', 'right', 'above', 'below', 'center'.
Returns:
None
'''
valid_places = ['left', 'right', 'above', 'below', 'center']
if place not in valid_places:
raise ValueError(
"Invalid place '%s' specified. Valid place values are: %s" % (place, nice_join(valid_places))
)
if hasattr(obj, 'plot'):
if obj.plot is not None:
raise ValueError("object to be added already has 'plot' attribute set")
obj.plot = self
self.renderers.append(obj)
if place is not 'center':
getattr(self, place).append(obj)
def add_tools(self, *tools):
''' Adds an tools to the plot.
Args:
*tools (Tool) : the tools to add to the Plot
Returns:
None
'''
if not all(isinstance(tool, Tool) for tool in tools):
raise ValueError("All arguments to add_tool must be Tool subclasses.")
for tool in tools:
if tool.plot is not None:
raise ValueError("tool %s to be added already has 'plot' attribute set" % tool)
tool.plot = self
if hasattr(tool, 'overlay'):
self.renderers.append(tool.overlay)
self.tools.append(tool)
def add_glyph(self, source_or_glyph, glyph=None, **kw):
''' Adds a glyph to the plot with associated data sources and ranges.
This function will take care of creating and configuring a Glyph object,
and then add it to the plot's list of renderers.
Args:
source (DataSource) : a data source for the glyphs to all use
glyph (Glyph) : the glyph to add to the Plot
Keyword Arguments:
Any additional keyword arguments are passed on as-is to the
Glyph initializer.
Returns:
Glyph
'''
if glyph is not None:
source = source_or_glyph
else:
source, glyph = ColumnDataSource(), source_or_glyph
if not isinstance(source, DataSource):
raise ValueError("'source' argument to add_glyph() must be DataSource subclass")
if not isinstance(glyph, Glyph):
raise ValueError("'glyph' argument to add_glyph() must be Glyph subclass")
g = GlyphRenderer(data_source=source, glyph=glyph, **kw)
self.renderers.append(g)
return g
def add_tile(self, tile_source, **kw):
'''Adds new TileRenderer into the Plot.renderers
Args:
tile_source (TileSource) : a tile source instance which contain tileset configuration
Keyword Arguments:
Additional keyword arguments are passed on as-is to the tile renderer
Returns:
TileRenderer : TileRenderer
'''
tile_renderer = TileRenderer(tile_source=tile_source, **kw)
self.renderers.append(tile_renderer)
return tile_renderer
def add_dynamic_image(self, image_source, **kw):
'''Adds new DynamicImageRenderer into the Plot.renderers
Args:
image_source (ImageSource) : a image source instance which contain image configuration
Keyword Arguments:
Additional keyword arguments are passed on as-is to the dynamic image renderer
Returns:
DynamicImageRenderer : DynamicImageRenderer
'''
image_renderer = DynamicImageRenderer(image_source=image_source, **kw)
self.renderers.append(image_renderer)
return image_renderer
@validation.error(REQUIRED_RANGE)
def _check_required_range(self):
missing = []
if not self.x_range: missing.append('x_range')
if not self.y_range: missing.append('y_range')
if missing:
return ", ".join(missing) + " [%s]" % self
@validation.warning(MISSING_RENDERERS)
def _check_missing_renderers(self):
if len(self.renderers) == 0:
return str(self)
@validation.warning(NO_DATA_RENDERERS)
def _check_no_data_renderers(self):
if len(self.select(DataRenderer)) == 0:
return str(self)
@validation.warning(MALFORMED_CATEGORY_LABEL)
def _check_colon_in_category_label(self):
if not self.x_range: return
if not self.y_range: return
broken = []
for range_name in ['x_range', 'y_range']:
category_range = getattr(self, range_name)
if not isinstance(category_range, FactorRange): continue
for value in category_range.factors:
if not isinstance(value, string_types): break
if ':' in value:
broken.append((range_name, value))
break
if broken:
field_msg = ' '.join('[range:%s] [first_value: %s]' % (field, value)
for field, value in broken)
return '%s [renderer: %s]' % (field_msg, self)
__deprecated_attributes__ = ('background_fill', 'border_fill')
x_range = Instance(Range, help="""
The (default) data range of the horizontal dimension of the plot.
""")
y_range = Instance(Range, help="""
The (default) data range of the vertical dimension of the plot.
""")
x_mapper_type = Either(Auto, String, help="""
What kind of mapper to use to convert x-coordinates in data space
into x-coordinates in screen space.
Typically this can be determined automatically, but this property
can be useful to, e.g., show datetime values as floating point
"seconds since epoch" instead of formatted dates.
""")
y_mapper_type = Either(Auto, String, help="""
What kind of mapper to use to convert y-coordinates in data space
into y-coordinates in screen space.
Typically this can be determined automatically, but this property
can be useful to, e.g., show datetime values as floating point
"seconds since epoch" instead of formatted dates
""")
extra_x_ranges = Dict(String, Instance(Range1d), help="""
Additional named ranges to make available for mapping x-coordinates.
This is useful for adding additional axes.
""")
extra_y_ranges = Dict(String, Instance(Range), help="""
Additional named ranges to make available for mapping y-coordinates.
This is useful for adding additional axes.
""")
hidpi = Bool(default=True, help="""
Whether to use HiDPI mode when available.
""")
title_standoff = Int(default=8, help="""
How far (in screen units) to place a title away from the central
plot region.
""")
title = String('', help="""
A title for the plot.
""")
title_props = Include(TextProps, help="""
The %s for the plot title.
""")
title_text_align = Override(default='center')
title_text_baseline = Override(default='alphabetic')
title_text_font_size = Override(default={ 'value' : '20pt' })
outline_props = Include(LineProps, help="""
The %s for the plot border outline.
""")
outline_line_color = Override(default="#aaaaaa")
renderers = List(Instance(Renderer), help="""
A list of all renderers for this plot, including guides and annotations
in addition to glyphs and markers.
This property can be manipulated by hand, but the ``add_glyph`` and
``add_layout`` methods are recommended to help make sure all necessary
setup is performed.
""")
tools = List(Instance(Tool), help="""
A list of tools to add to the plot.
""")
tool_events = Instance(ToolEvents, help="""
A ToolEvents object to share and report tool events.
""")
left = List(Instance(Renderer), help="""
A list of renderers to occupy the area to the left of the plot.
""")
right = List(Instance(Renderer), help="""
A list of renderers to occupy the area to the right of the plot.
""")
# TODO (bev) LayoutBox here is a temporary workaround to the fact that
# plot titles are not proper renderers
above = List(Either(Instance(Renderer), Instance(LayoutBox)), help="""
A list of renderers to occupy the area above of the plot.
""")
below = List(Instance(Renderer), help="""
A list of renderers to occupy the area below of the plot.
""")
toolbar_location = Enum(Location, help="""
Where the toolbar will be located. If set to None, no toolbar
will be attached to the plot.
""")
logo = Enum("normal", "grey", help="""
What version of the Bokeh logo to display on the toolbar. If
set to None, no logo will be displayed.
""")
plot_height = Int(600, help="""
Total height of the entire plot (including any axes, titles,
border padding, etc.)
.. note::
This corresponds directly to the height of the HTML
canvas that will be used.
""")
plot_width = Int(600, help="""
Total width of the entire plot (including any axes, titles,
border padding, etc.)
.. note::
This corresponds directly to the width of the HTML
canvas that will be used.
""")
@property
def background_fill(self):
warnings.warn(
"""
Plot property 'background_fill' was deprecated in Bokeh
0.11.0 and will be removed. Use 'background_fill_color' instead.
""")
return self.background_fill_color
@background_fill.setter
def background_fill(self, color):
warnings.warn(
"""
Plot property 'background_fill' was deprecated in Bokeh
0.11.0 and will be removed. Use 'background_fill_color' instead.
""")
self.background_fill_color = color
@property
def border_fill(self):
warnings.warn(
"""
Plot property 'border_fill' was deprecated in Bokeh 0.11.0 and
will be removed. Use 'border_fill_color' instead.
""")
return self.border_fill_color
@border_fill.setter
def border_fill(self, color):
warnings.warn(
"""
Plot property 'border_fill' was deprecated in Bokeh 0.11.0 and
will be removed. Use 'border_fill_color' instead.
""")
self.border_fill_color = color
background_props = Include(FillProps, help="""
The %s for the plot background style.
""")
background_fill_color = Override(default='#ffffff')
border_props = Include(FillProps, help="""
The %s for the plot border style.
""")
border_fill_color = Override(default='#ffffff')
min_border_top = Int(50, help="""
Minimum size in pixels of the padding region above the top of the
central plot region.
.. note::
This is a *minimum*. The padding region may expand as needed to
accommodate titles or axes, etc.
""")
min_border_bottom = Int(50, help="""
Minimum size in pixels of the padding region below the bottom of
the central plot region.
.. note::
This is a *minimum*. The padding region may expand as needed to
accommodate titles or axes, etc.
""")
min_border_left = Int(50, help="""
Minimum size in pixels of the padding region to the left of
the central plot region.
.. note::
This is a *minimum*. The padding region may expand as needed to
accommodate titles or axes, etc.
""")
min_border_right = Int(50, help="""
Minimum size in pixels of the padding region to the right of
the central plot region.
.. note::
This is a *minimum*. The padding region may expand as needed to
accommodate titles or axes, etc.
""")
min_border = Int(50, help="""
A convenience property to set all all the ``min_border_X`` properties
to the same value. If an individual border property is explicitly set,
it will override ``min_border``.
""")
h_symmetry = Bool(True, help="""
Whether the total horizontal padding on both sides of the plot will
be made equal (the left or right padding amount, whichever is larger).
""")
v_symmetry = Bool(False, help="""
Whether the total vertical padding on both sides of the plot will
be made equal (the top or bottom padding amount, whichever is larger).
""")
lod_factor = Int(10, help="""
Decimation factor to use when applying level-of-detail decimation.
""")
lod_threshold = Int(2000, help="""
A number of data points, above which level-of-detail downsampling may
be performed by glyph renderers. Set to ``None`` to disable any
level-of-detail downsampling.
""")
lod_interval = Int(300, help="""
Interval (in ms) during which an interactive tool event will enable
level-of-detail downsampling.
""")
lod_timeout = Int(500, help="""
Timeout (in ms) for checking whether interactive tool events are still
occurring. Once level-of-detail mode is enabled, a check is made every
``lod_timeout`` ms. If no interactive tool events have happened,
level-of-detail mode is disabled.
""")
webgl = Bool(False, help="""
Whether WebGL is enabled for this plot. If True, the glyphs that
support this will render via WebGL instead of the 2D canvas.
""")
responsive = Bool(False, help="""
If True, the plot will automatically resize based on the size of its container. The
aspect ratio of the plot will be preserved, but ``plot_width`` and ``plot_height`` will
act only to set the initial aspect ratio.
.. warning::
The responsive setting is known not to work with HBox layout and may not work
in combination with other widgets or layouts.
""")
class GridPlot(Component):
""" A 2D grid of plots rendered on separate canvases in an HTML table.
"""
# TODO (bev) really, GridPlot should be a layout, not a Plot subclass
@validation.error(REQUIRED_RANGE)
def _check_required_range(self):
pass
@validation.warning(MISSING_RENDERERS)
def _check_missing_renderers(self):
pass
@validation.warning(NO_DATA_RENDERERS)
def _check_no_data_renderers(self):
pass
@validation.warning(EMPTY_LAYOUT)
def _check_empty_layout(self):
from itertools import chain
if not list(chain(self.children)):
return str(self)
children = List(List(Instance(Plot)), default=[[]], help="""
An array of plots to display in a grid, given as a list of lists of
Plot objects. To leave a position in the grid empty, pass None for
that position in the ``children`` list.
""")
border_space = Int(0, help="""
Distance (in pixels) between adjacent plots.
""")
toolbar_location = Enum(Location, default="left", help="""
Where the toolbar will be located. If set to None, no toolbar
will be attached to the plot.
""")
def select(self, *args, **kwargs):
''' Query this object and all of its references for objects that
match the given selector. See Plot.select for detailed usage information.
Returns:
seq[Model]
'''
selector = _select_helper(args, kwargs)
# Want to pass selector that is a dictionary
from ..plotting.helpers import _list_attr_splat
return _list_attr_splat(find(self.references(), selector, {'gridplot': self}))
def column(self, col):
''' Return a given column of plots from this GridPlot.
Args:
col (int) : index of the column to return
Returns:
seq[Plot] : column of plots
'''
try:
return [row[col] for row in self.children]
except:
return []
def row(self, row):
''' Return a given row of plots from this GridPlot.
Args:
rwo (int) : index of the row to return
Returns:
seq[Plot] : row of plots
'''
try:
return self.children[row]
except:
return []
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from parameterized import parameterized
from airflow import AirflowException
from airflow.gcp.operators.spanner import (
CloudSpannerInstanceDatabaseDeleteOperator, CloudSpannerInstanceDatabaseDeployOperator,
CloudSpannerInstanceDatabaseQueryOperator, CloudSpannerInstanceDatabaseUpdateOperator,
CloudSpannerInstanceDeleteOperator, CloudSpannerInstanceDeployOperator,
)
from tests.compat import mock
PROJECT_ID = 'project-id'
INSTANCE_ID = 'instance-id'
DB_ID = 'db1'
CONFIG_NAME = 'projects/project-id/instanceConfigs/eur3'
NODE_COUNT = '1'
DISPLAY_NAME = 'Test Instance'
INSERT_QUERY = "INSERT my_table1 (id, name) VALUES (1, 'One')"
INSERT_QUERY_2 = "INSERT my_table2 (id, name) VALUES (1, 'One')"
CREATE_QUERY = "CREATE TABLE my_table1 (id INT64, name STRING(100))"
CREATE_QUERY_2 = "CREATE TABLE my_table2 (id INT64, name STRING(100))"
DDL_STATEMENTS = [CREATE_QUERY, CREATE_QUERY_2]
class TestCloudSpanner(unittest.TestCase):
@mock.patch("airflow.gcp.operators.spanner.SpannerHook")
def test_instance_create(self, mock_hook):
mock_hook.return_value.get_instance.return_value = None
op = CloudSpannerInstanceDeployOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
configuration_name=CONFIG_NAME,
node_count=int(NODE_COUNT),
display_name=DISPLAY_NAME,
task_id="id"
)
result = op.execute(None) # pylint: disable=assignment-from-no-return
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.create_instance.assert_called_once_with(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
configuration_name=CONFIG_NAME,
node_count=int(NODE_COUNT),
display_name=DISPLAY_NAME
)
mock_hook.return_value.update_instance.assert_not_called()
self.assertIsNone(result)
@mock.patch("airflow.gcp.operators.spanner.SpannerHook")
def test_instance_create_missing_project_id(self, mock_hook):
mock_hook.return_value.get_instance.return_value = None
op = CloudSpannerInstanceDeployOperator(
instance_id=INSTANCE_ID,
configuration_name=CONFIG_NAME,
node_count=int(NODE_COUNT),
display_name=DISPLAY_NAME,
task_id="id"
)
result = op.execute(None) # pylint: disable=assignment-from-no-return
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.create_instance.assert_called_once_with(
project_id=None,
instance_id=INSTANCE_ID,
configuration_name=CONFIG_NAME,
node_count=int(NODE_COUNT),
display_name=DISPLAY_NAME
)
mock_hook.return_value.update_instance.assert_not_called()
self.assertIsNone(result)
@mock.patch("airflow.gcp.operators.spanner.SpannerHook")
def test_instance_update(self, mock_hook):
mock_hook.return_value.get_instance.return_value = {"name": INSTANCE_ID}
op = CloudSpannerInstanceDeployOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
configuration_name=CONFIG_NAME,
node_count=int(NODE_COUNT),
display_name=DISPLAY_NAME,
task_id="id"
)
result = op.execute(None) # pylint: disable=assignment-from-no-return
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.update_instance.assert_called_once_with(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
configuration_name=CONFIG_NAME,
node_count=int(NODE_COUNT),
display_name=DISPLAY_NAME
)
mock_hook.return_value.create_instance.assert_not_called()
self.assertIsNone(result)
@mock.patch("airflow.gcp.operators.spanner.SpannerHook")
def test_instance_update_missing_project_id(self, mock_hook):
mock_hook.return_value.get_instance.return_value = {"name": INSTANCE_ID}
op = CloudSpannerInstanceDeployOperator(
instance_id=INSTANCE_ID,
configuration_name=CONFIG_NAME,
node_count=int(NODE_COUNT),
display_name=DISPLAY_NAME,
task_id="id"
)
result = op.execute(None) # pylint: disable=assignment-from-no-return
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.update_instance.assert_called_once_with(
project_id=None,
instance_id=INSTANCE_ID,
configuration_name=CONFIG_NAME,
node_count=int(NODE_COUNT),
display_name=DISPLAY_NAME
)
mock_hook.return_value.create_instance.assert_not_called()
self.assertIsNone(result)
@mock.patch("airflow.gcp.operators.spanner.SpannerHook")
def test_instance_create_aborts_and_succeeds_if_instance_exists(self, mock_hook):
mock_hook.return_value.get_instance.return_value = {"name": INSTANCE_ID}
op = CloudSpannerInstanceDeployOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
configuration_name=CONFIG_NAME,
node_count=int(NODE_COUNT),
display_name=DISPLAY_NAME,
task_id="id"
)
result = op.execute(None) # pylint: disable=assignment-from-no-return
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.create_instance.assert_not_called()
self.assertIsNone(result)
@parameterized.expand([
("", INSTANCE_ID, "project_id"),
(PROJECT_ID, "", "instance_id"),
])
@mock.patch("airflow.gcp.operators.spanner.SpannerHook")
def test_instance_create_ex_if_param_missing(self, project_id, instance_id,
exp_msg, mock_hook):
with self.assertRaises(AirflowException) as cm:
CloudSpannerInstanceDeployOperator(
project_id=project_id,
instance_id=instance_id,
configuration_name=CONFIG_NAME,
node_count=int(NODE_COUNT),
display_name=DISPLAY_NAME,
task_id="id"
)
err = cm.exception
self.assertIn("The required parameter '{}' is empty".format(exp_msg), str(err))
mock_hook.assert_not_called()
@mock.patch("airflow.gcp.operators.spanner.SpannerHook")
def test_instance_delete(self, mock_hook):
mock_hook.return_value.get_instance.return_value = {"name": INSTANCE_ID}
op = CloudSpannerInstanceDeleteOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.delete_instance.assert_called_once_with(
project_id=PROJECT_ID, instance_id=INSTANCE_ID
)
self.assertTrue(result)
@mock.patch("airflow.gcp.operators.spanner.SpannerHook")
def test_instance_delete_missing_project_id(self, mock_hook):
mock_hook.return_value.get_instance.return_value = {"name": INSTANCE_ID}
op = CloudSpannerInstanceDeleteOperator(
instance_id=INSTANCE_ID,
task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.delete_instance.assert_called_once_with(
project_id=None,
instance_id=INSTANCE_ID
)
self.assertTrue(result)
@mock.patch("airflow.gcp.operators.spanner.SpannerHook")
def test_instance_delete_aborts_and_succeeds_if_instance_does_not_exist(self,
mock_hook):
mock_hook.return_value.get_instance.return_value = None
op = CloudSpannerInstanceDeleteOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.delete_instance.assert_not_called()
self.assertTrue(result)
@parameterized.expand([
("", INSTANCE_ID, "project_id"),
(PROJECT_ID, "", "instance_id"),
])
@mock.patch("airflow.gcp.operators.spanner.SpannerHook")
def test_instance_delete_ex_if_param_missing(self, project_id, instance_id, exp_msg,
mock_hook):
with self.assertRaises(AirflowException) as cm:
CloudSpannerInstanceDeleteOperator(
project_id=project_id,
instance_id=instance_id,
task_id="id"
)
err = cm.exception
self.assertIn("The required parameter '{}' is empty".format(exp_msg), str(err))
mock_hook.assert_not_called()
@mock.patch("airflow.gcp.operators.spanner.SpannerHook")
def test_instance_query(self, mock_hook):
mock_hook.return_value.execute_sql.return_value = None
op = CloudSpannerInstanceDatabaseQueryOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
database_id=DB_ID,
query=INSERT_QUERY,
task_id="id"
)
result = op.execute(None) # pylint: disable=assignment-from-no-return
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.execute_dml.assert_called_once_with(
project_id=PROJECT_ID, instance_id=INSTANCE_ID,
database_id=DB_ID,
queries=[INSERT_QUERY]
)
self.assertIsNone(result)
@mock.patch("airflow.gcp.operators.spanner.SpannerHook")
def test_instance_query_missing_project_id(self, mock_hook):
mock_hook.return_value.execute_sql.return_value = None
op = CloudSpannerInstanceDatabaseQueryOperator(
instance_id=INSTANCE_ID,
database_id=DB_ID,
query=INSERT_QUERY,
task_id="id"
)
result = op.execute(None) # pylint: disable=assignment-from-no-return
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.execute_dml.assert_called_once_with(
project_id=None, instance_id=INSTANCE_ID,
database_id=DB_ID, queries=[INSERT_QUERY]
)
self.assertIsNone(result)
@parameterized.expand([
("", INSTANCE_ID, DB_ID, INSERT_QUERY, "project_id"),
(PROJECT_ID, "", DB_ID, INSERT_QUERY, "instance_id"),
(PROJECT_ID, INSTANCE_ID, "", INSERT_QUERY, "database_id"),
(PROJECT_ID, INSTANCE_ID, DB_ID, "", "query"),
])
@mock.patch("airflow.gcp.operators.spanner.SpannerHook")
def test_instance_query_ex_if_param_missing(self, project_id, instance_id,
database_id, query, exp_msg, mock_hook):
with self.assertRaises(AirflowException) as cm:
CloudSpannerInstanceDatabaseQueryOperator(
project_id=project_id,
instance_id=instance_id,
database_id=database_id,
query=query,
task_id="id"
)
err = cm.exception
self.assertIn("The required parameter '{}' is empty".format(exp_msg), str(err))
mock_hook.assert_not_called()
@mock.patch("airflow.gcp.operators.spanner.SpannerHook")
def test_instance_query_dml(self, mock_hook):
mock_hook.return_value.execute_dml.return_value = None
op = CloudSpannerInstanceDatabaseQueryOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
database_id=DB_ID,
query=INSERT_QUERY,
task_id="id"
)
op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.execute_dml.assert_called_once_with(
project_id=PROJECT_ID, instance_id=INSTANCE_ID, database_id=DB_ID, queries=[INSERT_QUERY]
)
@mock.patch("airflow.gcp.operators.spanner.SpannerHook")
def test_instance_query_dml_list(self, mock_hook):
mock_hook.return_value.execute_dml.return_value = None
op = CloudSpannerInstanceDatabaseQueryOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
database_id=DB_ID,
query=[INSERT_QUERY, INSERT_QUERY_2],
task_id="id"
)
op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.execute_dml.assert_called_once_with(
project_id=PROJECT_ID, instance_id=INSTANCE_ID,
database_id=DB_ID, queries=[INSERT_QUERY, INSERT_QUERY_2]
)
@mock.patch("airflow.gcp.operators.spanner.SpannerHook")
def test_database_create(self, mock_hook):
mock_hook.return_value.get_database.return_value = None
op = CloudSpannerInstanceDatabaseDeployOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
database_id=DB_ID,
ddl_statements=DDL_STATEMENTS,
task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.create_database.assert_called_once_with(
project_id=PROJECT_ID, instance_id=INSTANCE_ID, database_id=DB_ID,
ddl_statements=DDL_STATEMENTS
)
mock_hook.return_value.update_database.assert_not_called()
self.assertTrue(result)
@mock.patch("airflow.gcp.operators.spanner.SpannerHook")
def test_database_create_missing_project_id(self, mock_hook):
mock_hook.return_value.get_database.return_value = None
op = CloudSpannerInstanceDatabaseDeployOperator(
instance_id=INSTANCE_ID,
database_id=DB_ID,
ddl_statements=DDL_STATEMENTS,
task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.create_database.assert_called_once_with(
project_id=None, instance_id=INSTANCE_ID, database_id=DB_ID,
ddl_statements=DDL_STATEMENTS
)
mock_hook.return_value.update_database.assert_not_called()
self.assertTrue(result)
@mock.patch("airflow.gcp.operators.spanner.SpannerHook")
def test_database_create_with_pre_existing_db(self, mock_hook):
mock_hook.return_value.get_database.return_value = {"name": DB_ID}
op = CloudSpannerInstanceDatabaseDeployOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
database_id=DB_ID,
ddl_statements=DDL_STATEMENTS,
task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.create_database.assert_not_called()
mock_hook.return_value.update_database.assert_not_called()
self.assertTrue(result)
@parameterized.expand([
("", INSTANCE_ID, DB_ID, DDL_STATEMENTS, 'project_id'),
(PROJECT_ID, "", DB_ID, DDL_STATEMENTS, 'instance_id'),
(PROJECT_ID, INSTANCE_ID, "", DDL_STATEMENTS, 'database_id'),
])
@mock.patch("airflow.gcp.operators.spanner.SpannerHook")
def test_database_create_ex_if_param_missing(self,
project_id, instance_id,
database_id, ddl_statements,
exp_msg, mock_hook):
with self.assertRaises(AirflowException) as cm:
CloudSpannerInstanceDatabaseDeployOperator(
project_id=project_id,
instance_id=instance_id,
database_id=database_id,
ddl_statements=ddl_statements,
task_id="id"
)
err = cm.exception
self.assertIn("The required parameter '{}' is empty".format(exp_msg), str(err))
mock_hook.assert_not_called()
@mock.patch("airflow.gcp.operators.spanner.SpannerHook")
def test_database_update(self, mock_hook):
mock_hook.return_value.get_database.return_value = {"name": DB_ID}
op = CloudSpannerInstanceDatabaseUpdateOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
database_id=DB_ID,
ddl_statements=DDL_STATEMENTS,
task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.update_database.assert_called_once_with(
project_id=PROJECT_ID, instance_id=INSTANCE_ID, database_id=DB_ID,
ddl_statements=DDL_STATEMENTS, operation_id=None
)
self.assertTrue(result)
@mock.patch("airflow.gcp.operators.spanner.SpannerHook")
def test_database_update_missing_project_id(self, mock_hook):
mock_hook.return_value.get_database.return_value = {"name": DB_ID}
op = CloudSpannerInstanceDatabaseUpdateOperator(
instance_id=INSTANCE_ID,
database_id=DB_ID,
ddl_statements=DDL_STATEMENTS,
task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.update_database.assert_called_once_with(
project_id=None, instance_id=INSTANCE_ID, database_id=DB_ID,
ddl_statements=DDL_STATEMENTS, operation_id=None
)
self.assertTrue(result)
@parameterized.expand([
("", INSTANCE_ID, DB_ID, DDL_STATEMENTS, 'project_id'),
(PROJECT_ID, "", DB_ID, DDL_STATEMENTS, 'instance_id'),
(PROJECT_ID, INSTANCE_ID, "", DDL_STATEMENTS, 'database_id'),
])
@mock.patch("airflow.gcp.operators.spanner.SpannerHook")
def test_database_update_ex_if_param_missing(self, project_id, instance_id,
database_id, ddl_statements,
exp_msg, mock_hook):
with self.assertRaises(AirflowException) as cm:
CloudSpannerInstanceDatabaseUpdateOperator(
project_id=project_id,
instance_id=instance_id,
database_id=database_id,
ddl_statements=ddl_statements,
task_id="id"
)
err = cm.exception
self.assertIn("The required parameter '{}' is empty".format(exp_msg), str(err))
mock_hook.assert_not_called()
@mock.patch("airflow.gcp.operators.spanner.SpannerHook")
def test_database_update_ex_if_database_not_exist(self, mock_hook):
mock_hook.return_value.get_database.return_value = None
with self.assertRaises(AirflowException) as cm:
op = CloudSpannerInstanceDatabaseUpdateOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
database_id=DB_ID,
ddl_statements=DDL_STATEMENTS,
task_id="id"
)
op.execute(None)
err = cm.exception
self.assertIn("The Cloud Spanner database 'db1' in project 'project-id' and "
"instance 'instance-id' is missing", str(err))
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
@mock.patch("airflow.gcp.operators.spanner.SpannerHook")
def test_database_delete(self, mock_hook):
mock_hook.return_value.get_database.return_value = {"name": DB_ID}
op = CloudSpannerInstanceDatabaseDeleteOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
database_id=DB_ID,
task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.delete_database.assert_called_once_with(
project_id=PROJECT_ID, instance_id=INSTANCE_ID, database_id=DB_ID
)
self.assertTrue(result)
@mock.patch("airflow.gcp.operators.spanner.SpannerHook")
def test_database_delete_missing_project_id(self, mock_hook):
mock_hook.return_value.get_database.return_value = {"name": DB_ID}
op = CloudSpannerInstanceDatabaseDeleteOperator(
instance_id=INSTANCE_ID,
database_id=DB_ID,
task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.delete_database.assert_called_once_with(
project_id=None, instance_id=INSTANCE_ID, database_id=DB_ID
)
self.assertTrue(result)
@mock.patch("airflow.gcp.operators.spanner.SpannerHook")
def test_database_delete_exits_and_succeeds_if_database_does_not_exist(self,
mock_hook):
mock_hook.return_value.get_database.return_value = None
op = CloudSpannerInstanceDatabaseDeleteOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
database_id=DB_ID,
task_id="id"
)
result = op.execute(None)
mock_hook.assert_called_once_with(gcp_conn_id="google_cloud_default")
mock_hook.return_value.delete_database.assert_not_called()
self.assertTrue(result)
@parameterized.expand([
("", INSTANCE_ID, DB_ID, DDL_STATEMENTS, 'project_id'),
(PROJECT_ID, "", DB_ID, DDL_STATEMENTS, 'instance_id'),
(PROJECT_ID, INSTANCE_ID, "", DDL_STATEMENTS, 'database_id'),
])
@mock.patch("airflow.gcp.operators.spanner.SpannerHook")
def test_database_delete_ex_if_param_missing(self, project_id, instance_id,
database_id, ddl_statements,
exp_msg, mock_hook):
with self.assertRaises(AirflowException) as cm:
CloudSpannerInstanceDatabaseDeleteOperator(
project_id=project_id,
instance_id=instance_id,
database_id=database_id,
ddl_statements=ddl_statements,
task_id="id"
)
err = cm.exception
self.assertIn("The required parameter '{}' is empty".format(exp_msg), str(err))
mock_hook.assert_not_called()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.