input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
for n in boundPropNames:
opts.addListener(n, self.name, self.__boundsOptsChanged)
for n in infoPropNames:
opts.addListener(n, self.name, self.__infoOptsChanged)
# Enable the volume widget if the
# overlay is a NIFTI image with more
# than three dimensions, and bind
# the widget to the volume property
# of the associated NiftiOpts instance
if isinstance(overlay, fslimage.Nifti) and overlay.ndim > 3:
props.bindWidget(self.__volume,
opts,
'volume',
floatspin.EVT_FLOATSPIN)
opts.addListener('volumeDim', self.name, self.__volumeDimChanged)
self.__volume .Enable()
self.__volumeLabel.Enable()
self.__volumeDimChanged()
# Or, if the overlay is a mesh which
# has some time series data associated
# with it
elif isinstance(overlay, fslmesh.Mesh):
props.bindWidget(self.__volume,
opts,
'vertexDataIndex',
floatspin.EVT_FLOATSPIN)
opts.addListener('vertexData', self.name, self.__vertexDataChanged)
self.__vertexDataChanged()
else:
self.__volume.SetRange(0, 0)
self.__volume.SetValue(0)
self.__volume.Disable()
def __deregisterOverlay(self):
"""De-registers property listeners with the :class:`.Display` and
:class:`.DisplayOpts` instances associated with the previously
registered overlay.
"""
opts = self.__registeredOpts
display = self.__registeredDisplay
overlay = self.__registeredOverlay
if overlay is None:
return
self.__registeredOpts = None
self.__registeredDisplay = None
self.__registeredOverlay = None
boundPropNames = DISPLAYOPTS_BOUNDS.get(opts, [], allhits=True)
infoPropNames = DISPLAYOPTS_INFO .get(opts, [], allhits=True)
boundPropNames = it.chain(*boundPropNames)
infoPropNames = it.chain(*infoPropNames)
if display is not None:
display.removeListener('overlayType', self.name)
for p in boundPropNames: opts.removeListener(p, self.name)
for p in infoPropNames: opts.removeListener(p, self.name)
if isinstance(overlay, fslimage.Nifti) and overlay.ndim > 3:
props.unbindWidget(self.__volume,
opts,
'volume',
floatspin.EVT_FLOATSPIN)
opts.removeListener('volumeDim', self.name)
elif isinstance(overlay, fslmesh.Mesh):
props.unbindWidget(self.__volume,
opts,
'vertexDataIndex',
floatspin.EVT_FLOATSPIN)
opts.removeListener('vertexData', self.name)
def __volumeDimChanged(self, *a):
"""Called when the selected overlay is a :class:`.Nifti`, and its
:attr:`.NiftiOpts.volumeDim` property changes. Updates the volume
widget.
"""
overlay = self.__registeredOverlay
opts = self.__registeredOpts
volume = opts.volume
vdim = opts.volumeDim + 3
self.__volume.SetRange(0, overlay.shape[vdim] - 1)
self.__volume.SetValue(volume)
self.__infoOptsChanged()
def __vertexDataChanged(self, *a):
"""Called when the selected overlay is a :class:`.Mesh`, and
its :attr:`.MeshOpts.vertexData` property changes. Updates the volume
widget.
"""
opts = self.__registeredOpts
vd = opts.getVertexData()
vdi = opts.vertexDataIndex
enabled = vd is not None and vd.shape[1] > 1
self.__volume .Enable(enabled)
self.__volumeLabel.Enable(enabled)
if enabled:
self.__volume.SetRange(0, vd.shape[1] - 1)
self.__volume.SetValue(vdi)
self.__infoOptsChanged()
def __boundsOptsChanged(self, *a):
"""Called when a :class:`.DisplayOpts` property associated
with the currently selected overlay, and listed in the
:data:`DISPLAYOPTS_BOUNDS` dictionary, changes. Refreshes the
``LocationInfoPanel`` interface accordingly.
"""
self.__updateWidgets()
self.__displayLocationChanged()
def __infoOptsChanged(self, *a):
"""Called when a :class:`.DisplayOpts` property associated
with the currently selected overlay, and listed in the
:data:`DISPLAYOPTS_INFO` dictionary, changes. Refreshes the
``LocationInfoPanel`` interface accordingly.
"""
self.__displayLocationChanged()
def __overlayOrderChanged(self, *a):
"""Called when the :attr:`.DisplayContext.overlayOrder` changes,
Refreshes the information panel.
"""
self.__displayLocationChanged()
def __updateWidgets(self):
"""Called by the :meth:`__selectedOverlayChanged` and
:meth:`__displayOptsChanged` methods. Enables/disables the
voxel/world location and volume controls depending on the currently
selected overlay (or reference image).
"""
overlay = self.__registeredOverlay
opts = self.__registeredOpts
if overlay is not None: refImage = opts.referenceImage
else: refImage = None
haveRef = refImage is not None
self.__voxelX .Enable(haveRef)
self.__voxelY .Enable(haveRef)
self.__voxelZ .Enable(haveRef)
self.__voxelLabel .Enable(haveRef)
######################
# World location label
######################
label = strings.labels[self, 'worldLocation']
if haveRef: label += strings.anatomy[refImage,
'space',
refImage.getXFormCode()]
else: label += strings.labels[ self,
'worldLocation',
'unknown']
self.__worldLabel.SetLabel(label)
####################################
# Voxel/world location widget limits
####################################
# Figure out the limits for the
# voxel/world location widgets
if haveRef:
opts = self.displayCtx.getOpts(refImage)
v2w = opts.getTransform('voxel', 'world')
shape = refImage.shape[:3]
vlo = [0, 0, 0]
vhi = np.array(shape) - 1
wlo, whi = affine.axisBounds(shape, v2w)
wstep = refImage.pixdim[:3]
else:
vlo = [0, 0, 0]
vhi = [0, 0, 0]
wbounds = self.displayCtx.bounds[:]
wlo = wbounds[0::2]
whi = wbounds[1::2]
wstep = [1, 1, 1]
log.debug('Setting voxelLocation limits: {} - {}'.format(vlo, vhi))
log.debug('Setting worldLocation limits: {} - {}'.format(wlo, whi))
# Update the voxel and world location limits,
# but don't trigger a listener callback, as
# this would change the display location.
widgets = [self.__worldX, self.__worldY, self.__worldZ]
with props.suppress(self, 'worldLocation'), \
props.suppress(self, 'voxelLocation'):
for i in range(3):
self.voxelLocation.setLimits(i, vlo[i], vhi[i])
self.worldLocation.setLimits(i, wlo[i], whi[i])
widgets[i].SetIncrement(wstep[i])
def __displayLocationChanged(self, *a):
"""Called when the :attr:`.DisplayContext.location` changes.
Propagates the change on to the :attr:`voxelLocation`
and :attr:`worldLocation` properties.
.. note:: Because the :attr:`.DisplayContext.location`,
:attr:`voxelLocation` and :attr:`worldLocation` properties
are all linked through property listeners (see
:meth:`props.HasProperties.addListener`), we need to be a
bit careful to avoid circular updates. Therefore, each of
the :meth:`__displayLocationChanged`,
:meth:`__worldLocationChanged` and
:meth:`__voxelLocationChanged` methods use the
:meth:`__prePropagate`, :meth:`__propagate`, and
:meth:`__postPropagate` methods to propagate changes
between the three location properties.
"""
if not self or self.destroyed:
return
if len(self.overlayList) == 0: return
if self.__registeredOverlay is None: return
self.__prePropagate()
self.__propagate('display', 'voxel')
self.__propagate('display', 'world')
self.__postPropagate()
self.__updateLocationInfo()
def __worldLocationChanged(self, *a):
"""Called when the :attr:`worldLocation` changes. Propagates the
change on to the :attr:`voxelLocation` and
:attr:`.DisplayContext.location` properties.
"""
if len(self.overlayList) == 0: return
if self.__registeredOverlay is None: return
self.__prePropagate()
self.__propagate('world', 'voxel')
self.__propagate('world', 'display')
self.__postPropagate()
self.__updateLocationInfo()
def __voxelLocationChanged(self, *a):
"""Called when the :attr:`voxelLocation` changes. Propagates the
change on to the :attr:`worldLocation` and
:attr:`.DisplayContext.location` properties.
"""
if len(self.overlayList) == 0: return
if self.__registeredOverlay is None: return
self.__prePropagate()
self.__propagate('voxel', 'world')
self.__propagate('voxel', 'display')
self.__postPropagate()
self.__updateLocationInfo()
def __prePropagate(self):
"""Called by the :meth:`__displayLocationChanged`,
:meth:`__worldLocationChanged` and :meth:`__voxelLocationChanged`
methods.
Disables notification of all location property listeners, so
circular updates do not occur.
"""
self .disableNotification('voxelLocation')
self .disableNotification('worldLocation')
self.displayCtx.disableListener( 'location', self.name)
self.Freeze()
def __propagate(self, source, target):
"""Called by the :meth:`__displayLocationChanged`,
:meth:`__worldLocationChanged` and :meth:`__voxelLocationChanged`
methods. Copies the coordinates from the ``source`` location to the
``target`` location. Valid values for the ``source`` and ``target``
are:
=========== ==============================================
``display`` The :attr:`.DisplayContext.location` property.
``voxel`` The :attr:`voxelLocation` property.
``world`` The :attr:`worldLocation` property.
=========== ==============================================
"""
if source == 'display': coords = self.displayCtx.location.xyz
elif source == 'voxel': coords = self.voxelLocation.xyz
elif source == 'world': coords = self.worldLocation.xyz
refImage = self.__registeredOpts.referenceImage
if refImage is not None:
opts = self.displayCtx.getOpts(refImage)
xformed = opts.transformCoords([coords],
source,
target,
vround=target == 'voxel')[0]
else:
xformed = coords
log.debug('Updating location ({} {} -> {} {})'.format(
source, coords, target, xformed))
if target == 'display': self.displayCtx.location.xyz = xformed
elif target == 'voxel': self.voxelLocation .xyz = xformed
elif target == 'world': self.worldLocation .xyz = xformed
def __postPropagate(self):
"""Called by the :meth:`__displayLocationChanged`,
:meth:`__worldLocationChanged` and :meth:`__voxelLocationChanged`
methods.
Re-enables the property listeners that were disabled by the
:meth:`__postPropagate` method.
"""
self .enableNotification('voxelLocation')
self .enableNotification('worldLocation')
self.displayCtx.enableListener( 'location', self.name)
self.Thaw()
self.Refresh()
self.Update()
def __updateLocationInfo(self):
"""Called whenever the :attr:`.DisplayContext.location` changes.
Updates the HTML panel which displays information about all overlays
in the :class:`.OverlayList`.
"""
if len(self.overlayList) == 0 or self.__registeredOverlay is None:
self.__info.SetPage('')
return
# Reverse the overlay order so they
# are ordered the same on the info
# page as in the overlay list panel
displayCtx = self.displayCtx
overlays = reversed(displayCtx.getOrderedOverlays())
selOvl = displayCtx.getSelectedOverlay()
lines = []
dswarn = self.__genDisplaySpaceWarning()
if dswarn is not None:
fmt = '<span style="color: #ff0000"><b>{}</b></span>'
lines.append(fmt.format(dswarn))
for overlay in overlays:
display = displayCtx.getDisplay(overlay)
opts = display.opts
if not display.enabled:
continue
info = None
title = '<b>{}</b>'.format(display.name)
# For mesh overlays, if the current location
# corresponds to a vertex, show some info
# about that vertex
if isinstance(overlay, fslmesh.Mesh):
info = self.__genMeshInfo(overlay, opts)
elif isinstance(overlay, fslimage.Image):
info = self.__genImageInfo(overlay, opts)
else:
info = '{}'.format(strings.labels[self, 'noData'])
# Indent info for unselected overlays,
# to make the info for the selected
# overlay a bit more obvious.
colourFmt = '<span style="color: #6060ff">{}</span>'
if overlay is selOvl:
title = colourFmt.format(title)
if info is not None:
info = colourFmt.format(info)
lines.append(title)
if info is not None:
lines.append(info)
self.__info.SetPage('<br>'.join(lines))
self.__info.Refresh()
def __genDisplaySpaceWarning(self):
"""Generate a warning if images with different orientations and/or
fields-of-view are loaded.
"""
images = [o for o in self.overlayList
if isinstance(o, fslimage.Image)]
for i in images[1:]:
if not i.sameSpace(images[0]):
return strings.messages[self, 'displaySpaceWarning']
return None
def __genMeshInfo(self, ovl, opts):
"""Generate an info line for the given :class:`.Mesh` overlay. """
vidx = opts.getVertex()
vd = opts.getVertexData()
vdidx = opts.vertexDataIndex
if vidx is None:
info = '[no vertex]'
# some vertex data has been
# loaded for this mesh.
elif vd is not None:
value = vd[vidx, vdidx]
# time series/multiple data points per
# vertex - display the time/data index
# as well
if vd.shape[1] > 1:
info = '[{}, {}]: {}'.format(vidx, vdidx, value)
# Only one scalar value per vertex -
# don't bother showing the vertex data
# index. If LUT is enabled, show the
# label for the value at the current
# vertex
elif opts.useLut:
lut = opts.lut
label = lut.get(value)
if label | |
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2018 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""Widget providing a set of tools to draw masks on a PlotWidget.
This widget is meant to work with a modified :class:`silx.gui.plot.PlotWidget`
- :class:`ScatterMask`: Handle scatter mask update and history
- :class:`ScatterMaskToolsWidget`: GUI for :class:`ScatterMask`
- :class:`ScatterMaskToolsDockWidget`: DockWidget to integrate in :class:`PlotWindow`
"""
from __future__ import division
__authors__ = ["<NAME>"]
__license__ = "MIT"
__date__ = "15/02/2019"
import math
import logging
import os
import numpy
import sys
from .. import qt
from ...math.combo import min_max
from ...image import shapes
from ._BaseMaskToolsWidget import BaseMask, BaseMaskToolsWidget, BaseMaskToolsDockWidget
from ..colors import cursorColorForColormap, rgba
_logger = logging.getLogger(__name__)
class ScatterMask(BaseMask):
"""A 1D mask for scatter data.
"""
def __init__(self, scatter=None):
"""
:param scatter: :class:`silx.gui.plot.items.Scatter` instance
"""
BaseMask.__init__(self, scatter)
def _getXY(self):
x = self._dataItem.getXData(copy=False)
y = self._dataItem.getYData(copy=False)
return x, y
def getDataValues(self):
"""Return scatter data values as a 1D array.
:rtype: 1D numpy.ndarray
"""
return self._dataItem.getValueData(copy=False)
def save(self, filename, kind):
if kind == 'npy':
try:
numpy.save(filename, self.getMask(copy=False))
except IOError:
raise RuntimeError("Mask file can't be written")
elif kind in ["csv", "txt"]:
try:
numpy.savetxt(filename, self.getMask(copy=False))
except IOError:
raise RuntimeError("Mask file can't be written")
def updatePoints(self, level, indices, mask=True):
"""Mask/Unmask points with given indices.
:param int level: Mask level to update.
:param indices: Sequence or 1D array of indices of points to be
updated
:param bool mask: True to mask (default), False to unmask.
"""
if mask:
self._mask[indices] = level
else:
# unmask only where mask level is the specified value
indices_stencil = numpy.zeros_like(self._mask, dtype=numpy.bool)
indices_stencil[indices] = True
self._mask[numpy.logical_and(self._mask == level, indices_stencil)] = 0
self._notify()
# update shapes
def updatePolygon(self, level, vertices, mask=True):
"""Mask/Unmask a polygon of the given mask level.
:param int level: Mask level to update.
:param vertices: Nx2 array of polygon corners as (y, x) or (row, col)
:param bool mask: True to mask (default), False to unmask.
"""
polygon = shapes.Polygon(vertices)
x, y = self._getXY()
# TODO: this could be optimized if necessary
indices_in_polygon = [idx for idx in range(len(x)) if
polygon.is_inside(y[idx], x[idx])]
self.updatePoints(level, indices_in_polygon, mask)
def updateRectangle(self, level, y, x, height, width, mask=True):
"""Mask/Unmask data inside a rectangle
:param int level: Mask level to update.
:param float y: Y coordinate of bottom left corner of the rectangle
:param float x: X coordinate of bottom left corner of the rectangle
:param float height:
:param float width:
:param bool mask: True to mask (default), False to unmask.
"""
vertices = [(y, x),
(y + height, x),
(y + height, x + width),
(y, x + width)]
self.updatePolygon(level, vertices, mask)
def updateDisk(self, level, cy, cx, radius, mask=True):
"""Mask/Unmask a disk of the given mask level.
:param int level: Mask level to update.
:param float cy: Disk center (y).
:param float cx: Disk center (x).
:param float radius: Radius of the disk in mask array unit
:param bool mask: True to mask (default), False to unmask.
"""
x, y = self._getXY()
stencil = (y - cy)**2 + (x - cx)**2 < radius**2
self.updateStencil(level, stencil, mask)
def updateEllipse(self, level, crow, ccol, radius_r, radius_c, mask=True):
"""Mask/Unmask an ellipse of the given mask level.
:param int level: Mask level to update.
:param int crow: Row of the center of the ellipse
:param int ccol: Column of the center of the ellipse
:param float radius_r: Radius of the ellipse in the row
:param float radius_c: Radius of the ellipse in the column
:param bool mask: True to mask (default), False to unmask.
"""
def is_inside(px, py):
return (px - ccol)**2 / radius_c**2 + (py - crow)**2 / radius_r**2 <= 1.0
x, y = self._getXY()
indices_inside = [idx for idx in range(len(x)) if is_inside(x[idx], y[idx])]
self.updatePoints(level, indices_inside, mask)
def updateLine(self, level, y0, x0, y1, x1, width, mask=True):
"""Mask/Unmask points inside a rectangle defined by a line (two
end points) and a width.
:param int level: Mask level to update.
:param float y0: Row of the starting point.
:param float x0: Column of the starting point.
:param float row1: Row of the end point.
:param float col1: Column of the end point.
:param float width: Width of the line.
:param bool mask: True to mask (default), False to unmask.
"""
# theta is the angle between the horizontal and the line
theta = math.atan((y1 - y0) / (x1 - x0)) if x1 - x0 else 0
w_over_2_sin_theta = width / 2. * math.sin(theta)
w_over_2_cos_theta = width / 2. * math.cos(theta)
vertices = [(y0 - w_over_2_cos_theta, x0 + w_over_2_sin_theta),
(y0 + w_over_2_cos_theta, x0 - w_over_2_sin_theta),
(y1 + w_over_2_cos_theta, x1 - w_over_2_sin_theta),
(y1 - w_over_2_cos_theta, x1 + w_over_2_sin_theta)]
self.updatePolygon(level, vertices, mask)
class ScatterMaskToolsWidget(BaseMaskToolsWidget):
"""Widget with tools for masking data points on a scatter in a
:class:`PlotWidget`."""
def __init__(self, parent=None, plot=None):
super(ScatterMaskToolsWidget, self).__init__(parent, plot,
mask=ScatterMask())
self._z = 2 # Mask layer in plot
self._data_scatter = None
"""plot Scatter item for data"""
self._data_extent = None
"""Maximum extent of the data i.e., max(xMax-xMin, yMax-yMin)"""
self._mask_scatter = None
"""plot Scatter item for representing the mask"""
def setSelectionMask(self, mask, copy=True):
"""Set the mask to a new array.
:param numpy.ndarray mask:
The array to use for the mask or None to reset the mask.
:type mask: numpy.ndarray of uint8, C-contiguous.
Array of other types are converted.
:param bool copy: True (the default) to copy the array,
False to use it as is if possible.
:return: None if failed, shape of mask as 1-tuple if successful.
The mask can be cropped or padded to fit active scatter,
the returned shape is that of the scatter data.
"""
if self._data_scatter is None:
# this can happen if the mask tools widget has never been shown
self._data_scatter = self.plot._getActiveItem(kind="scatter")
if self._data_scatter is None:
return None
self._adjustColorAndBrushSize(self._data_scatter)
if mask is None:
self.resetSelectionMask()
return self._data_scatter.getXData(copy=False).shape
mask = numpy.array(mask, copy=False, dtype=numpy.uint8)
if self._data_scatter.getXData(copy=False).shape == (0,) \
or mask.shape == self._data_scatter.getXData(copy=False).shape:
self._mask.setMask(mask, copy=copy)
self._mask.commit()
return mask.shape
else:
raise ValueError("Mask does not have the same shape as the data")
# Handle mask refresh on the plot
def _updatePlotMask(self):
"""Update mask image in plot"""
mask = self.getSelectionMask(copy=False)
if mask is not None:
self.plot.addScatter(self._data_scatter.getXData(),
self._data_scatter.getYData(),
mask,
legend=self._maskName,
colormap=self._colormap,
z=self._z)
self._mask_scatter = self.plot._getItem(kind="scatter",
legend=self._maskName)
self._mask_scatter.setSymbolSize(
self._data_scatter.getSymbolSize() + 2.0)
elif self.plot._getItem(kind="scatter",
legend=self._maskName) is not None:
self.plot.remove(self._maskName, kind='scatter')
# track widget visibility and plot active image changes
def showEvent(self, event):
try:
self.plot.sigActiveScatterChanged.disconnect(
self._activeScatterChangedAfterCare)
except (RuntimeError, TypeError):
pass
self._activeScatterChanged(None, None) # Init mask + enable/disable widget
self.plot.sigActiveScatterChanged.connect(self._activeScatterChanged)
def hideEvent(self, event):
self.plot.sigActiveScatterChanged.disconnect(self._activeScatterChanged)
if not self.browseAction.isChecked():
self.browseAction.trigger() # Disable drawing tool
if self.getSelectionMask(copy=False) is not None:
self.plot.sigActiveScatterChanged.connect(
self._activeScatterChangedAfterCare)
def _adjustColorAndBrushSize(self, activeScatter):
colormap = activeScatter.getColormap()
self._defaultOverlayColor = rgba(cursorColorForColormap(colormap['name']))
self._setMaskColors(self.levelSpinBox.value(),
self.transparencySlider.value() /
self.transparencySlider.maximum())
self._z = activeScatter.getZValue() + 1
self._data_scatter = activeScatter
# Adjust brush size to data range
xData = self._data_scatter.getXData(copy=False)
yData = self._data_scatter.getYData(copy=False)
# Adjust brush size to data range
if xData.size > 0 and yData.size > 0:
xMin, xMax = min_max(xData)
yMin, yMax = min_max(yData)
self._data_extent = max(xMax - xMin, yMax - yMin)
else:
self._data_extent = None
def _activeScatterChangedAfterCare(self, previous, next):
"""Check synchro of active scatter and mask when mask widget is hidden.
If active image has no | |
Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.documentation = None
"""
A
d
d
i
t
i
o
n
a
l
d
e
s
c
r
i
p
t
i
o
n
/
e
x
p
l
a
n
a
t
i
o
n
f
o
r
g
r
o
u
p
.
Type `str`. """
self.extends = None
"""
A
n
o
t
h
e
r
g
r
o
u
p
t
h
a
t
t
h
i
s
g
r
o
u
p
a
d
d
s
r
u
l
e
s
t
o
.
Type `str`. """
self.input = None
"""
N
a
m
e
d
i
n
s
t
a
n
c
e
p
r
o
v
i
d
e
d
w
h
e
n
i
n
v
o
k
i
n
g
t
h
e
m
a
p
.
List of `StructureMapGroupInput` items (represented as `dict` in JSON). """
self.name = None
"""
H
u
m
a
n
-
r
e
a
d
a
b
l
e
l
a
b
e
l
.
Type `str`. """
self.rule = None
"""
T
r
a
n
s
f
o
r
m
R
u
l
e
f
r
o
m
s
o
u
r
c
e
t
o
t
a
r
g
e
t
.
List of `StructureMapGroupRule` items (represented as `dict` in JSON). """
self.typeMode = None
"""
n
o
n
e
|
t
y
p
e
s
|
t
y
p
e
-
a
n
d
-
t
y
p
e
s
.
Type `str`. """
super(StructureMapGroup, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(StructureMapGroup, self).elementProperties()
js.extend([
("documentation", "documentation", str, False, None, False),
("extends", "extends", str, False, None, False),
("input", "input", StructureMapGroupInput, True, None, True),
("name", "name", str, False, None, True),
("rule", "rule", StructureMapGroupRule, True, None, True),
("typeMode", "typeMode", str, False, None, True),
])
return js
class StructureMapGroupInput(backboneelement.BackboneElement):
"""
N
a
m
e
d
i
n
s
t
a
n
c
e
p
r
o
v
i
d
e
d
w
h
e
n
i
n
v
o
k
i
n
g
t
h
e
m
a
p
.
A
n
a
m
e
a
s
s
i
g
n
e
d
t
o
a
n
i
n
s
t
a
n
c
e
o
f
d
a
t
a
.
T
h
e
i
n
s
t
a
n
c
e
m
u
s
t
b
e
p
r
o
v
i
d
e
d
w
h
e
n
t
h
e
m
a
p
p
i
n
g
i
s
i
n
v
o
k
e
d
.
"""
resource_type = "StructureMapGroupInput"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.documentation = None
"""
D
o
c
u
m
e
n
t
a
t
i
o
n
f
o
r
t
h
i
s
i
n
s
t
a
n
c
e
o
f
d
a
t
a
.
Type `str`. """
self.mode = None
"""
s
o
u
r
c
e
|
t
a
r
g
e
t
.
Type `str`. """
self.name = None
"""
N
a
m
e
f
o
r
t
h
i
s
i
n
s
t
a
n
c
e
o
f
d
a
t
a
.
Type `str`. """
self.type = None
"""
T
y
p
e
f
o
r
t
h
i
s
i
n
s
t
a
n
c
e
o
f
d
a
t
a
.
Type `str`. """
super(StructureMapGroupInput, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(StructureMapGroupInput, self).elementProperties()
js.extend([
("documentation", "documentation", str, False, None, False),
("mode", "mode", str, False, None, True),
("name", "name", str, False, None, True),
("type", "type", str, False, None, False),
])
return js
class StructureMapGroupRule(backboneelement.BackboneElement):
"""
T
r
a
n
s
f
o
r
m
R
u
l
e
f
r
o
m
s
o
u
r
c
e
t
o
t
a
r
g
e
t
.
"""
resource_type = "StructureMapGroupRule"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.dependent = None
"""
W
h
i
c
h
o
t
h
e
r
r
u
l
e
s
t
o
a
p
p
l
y
i
n
t
h
e
c
o
n
t
e
x
t
o
f
t
h
i
s
r
u
l
e
.
List of `StructureMapGroupRuleDependent` items (represented as `dict` in JSON). """
self.documentation = None
"""
D
o
c
u
m
e
n
t
a
t
i
o
n
f
o
r
t
h
i
s
i
n
s
t
a
n
c
e
o
f
d
a
t
a
.
Type `str`. """
self.name = None
"""
N
a
m
e
o
f
t
h
e
r
u
l
e
f
o
r
i
n
t
e
r
n
a
l
r
e
f
e
r
e
n
c
e
s
.
Type `str`. """
self.rule = None
"""
R
u
l
e
s
c
o
n
t
a
i
n
e
d
i
n
t
h
i
s
r
u
l
e
.
List of `StructureMapGroupRule` items (represented as `dict` in JSON). """
self.source = None
"""
S
o
u
r
c
e
i
n
p
u
t
s
t
o
t
h
e
m
a
p
p
i
n
g
.
List of `StructureMapGroupRuleSource` items (represented as `dict` in JSON). """
self.target = None
"""
C
o
n
t
e
n
t
t
o
c
r
e
a
t
e
b
e
c
a
u
s
e
o
f
t
h
i
s
m
a
p
p
i
n
g
r
u
l
e
.
List of `StructureMapGroupRuleTarget` items (represented as `dict` in JSON). """
super(StructureMapGroupRule, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(StructureMapGroupRule, self).elementProperties()
js.extend([
("dependent", "dependent", StructureMapGroupRuleDependent, True, None, False),
("documentation", "documentation", str, False, None, False),
("name", "name", str, False, None, True),
("rule", "rule", StructureMapGroupRule, True, None, False),
("source", "source", StructureMapGroupRuleSource, True, None, True),
("target", "target", StructureMapGroupRuleTarget, True, None, False),
])
return js
class StructureMapGroupRuleDependent(backboneelement.BackboneElement):
"""
W
h
i
c
h
o
t
h
e
r
r
u
l
e
s
t
o
a
p
p
l
y
i
n
t
h
e
c
o
n
t
e
x
t
o
f
t
h
i
s
r
u
l
e
.
"""
resource_type = "StructureMapGroupRuleDependent"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.name = None
"""
N
a
m
e
o
f
a
r
u
l
e
o
r
g
r
o
u
p
t
o
a
p
p
l
y
.
Type `str`. """
self.variable = None
"""
V
a
r
i
a
b
l
e
t
o
p
a
s
s
| |
<gh_stars>1-10
#!/usr/bin/env python3
"""FabTools: Tools for Fab..
This is a package provides classes used to define the tooling that is available in a shop.
They basically define some classes that interface with the FreeCAD Path Tools infrastructure.
The "new" FreeCAD Path Tools infrastructure organizes everything into a top level `Tools/`
directory and associated sub-directories as follows:
* `Tools/`: The top level directory that contains a `Shape/`, `Bit/`, and `Library/` sub-directory.
* `Tools/Shape/`: This sub-directory contains tool template files in FreeCAD `.fcstd` format:
* `ballend.fcstd`: The ball end tool template.
* ...
* `v-bit.fcstd`: The V-bit groove tool template.
* `Tools/Bit/`: This sub-directory contains FreeCAD Path Tool bit JSON files (`.fctb`):
The JSON in each tool bit file (`.fctb`) references one shape `.fcstd` file from `Tools/Shape/`.
* `6mm_Ball_End.fctb`: A 6mm Ball end end tool bit that uses `ballend.fcstd`.
* ...
* `60degree_VBit.fctb`: A 60-degree VBit tool bit that uses `v-bit.fcstd`.
* `Tools/Library/`: This sub-directory contains FreeCAD Path library JSON files (`.fctl`)
These files define a tool number to tool bit binding. In general, each Shop machine
will tend to have a dedicated library associated with it. However, some machine tools can
share the same library. Each `.fctl` JSON library references Tool Bit files from `Tools/Bin/`.
* `Default.fctl`: The default tools that comes with FreeCAD.
* `Machine1.fctl`: The tools library for Machine1.
* ...
* `MachineN.fctl`: The tools library for MachineN.
The top-down class hierarchy for the FabTools package is:
* FabToolsDirectory: This corresponds to a `Tools/` directory: (TBD).
* FabShapes: This corresponds to a `Tools/Shape/` directory:
* FabShape: This corresponds to a `.fcstd` tool shape template in the `Tools/Shape/` directory.
* FabAttributes: This corresponds to bit attributes that do not specify bit shape dimensions.
* FabBitTemplates: This contains all of the known FabBitTemplate's.
* FabBitTemplate: This corresponds to a template is used to construct FabBit.
* FabBits: This corresponds to a `Tools/Bit/` sub-Directory:
* FabBit: This corresponds to a `.fctb` file in the `Tools/Bit/` directory. For each different
Shape, there is a dedicated class that represents that shape:
* FabBallEndBit: This corresponds to `Tools/Shape/ballend.fcstd`.
* FabBullNoseBit: This corresponds to `Tools/Shape/bullnose.fcstd`.
* FabChamferBit: This corresponds to `Tools/Shape/chamfer.fcstd`.
* FabDrillBit: This corresponds to `Tools/Shape/drill.fcstd`.
* FabEndMillBit: This corresponds to `Tools/Shape/endmill.fcstd`.
* FabProbeBit: This corresponds to `Tools/Shape/probe.fcstd`.
* FabSlittingSawBit: This corresponds to `Tools/Shape/slittingsaw.fcstd`.
* FabThreadMillBit: This corresponds to `Tools/Shape/thread-mill.fcstd`.
* FabVBit: This corresponds to `Tools/Shape/v-bit.fcstd`.
* FabLibraries: This corresponds to a `Tool/Library` directory:
* FabLibrary: This corresponds to an individual `.fctl` file in the `Tools/Library` directory.
* FabTooling: THis corresponds to the `Tools/` driectory.
"""
# <--------------------------------------- 100 characters ---------------------------------------> #
from dataclasses import dataclass, field
from pathlib import Path as PathFile
import json
import tempfile
from typeguard import check_type, check_argument_types
from typing import Any, Dict, IO, List, Sequence, Tuple, Union
from FabToolTemplates import FabAttributes, FabBit, FabBitTemplate, FabBitTemplates
from FabToolTemplates import FabBitTemplatesFactory, FabShapes
from FabToolBits import FabBallEndBit, FabBullNoseBit, FabChamferBit, FabDoveTailBit, FabDrillBit
from FabToolBits import FabEndMillBit, FabProbeBit, FabSlittingSawBit, FabThreadMillBit, FabVBit
# FabBits:
@dataclass(frozen=True)
class FabBits(object):
"""FabBits: A collection FabBit's that corresponds to a `Tools/Bit/` sub-directory..
Attributes:
* *Bits* (Tuple[FabBit, ...]): The associated FabBit's in name sorted order.
* *Names* (Tuple[str, ...]): The sorted FabBit names.
* *Stems* (Tuple[str, ...]): Stem names in the same order as the Bits.
Constructor:
* FabBits(Bits, Names, Stems)
"""
Bits: Tuple[FabBit, ...]
Names: Tuple[str, ...]
Stems: Tuple[str, ...]
# FabBits.__post_init__():
def __post_init__(self) -> None:
"""Initialize a FabBits."""
check_type("FabBits.Bits", self.Bits, Tuple[FabBit, ...])
check_type("FabBits.Names", self.Names, Tuple[str, ...])
check_type("FabBits.Stems", self.Stems, Tuple[str, ...])
# FabBits.shapeNameToTemplateAndBit():
@staticmethod
def shapeNameToTemplateAndBit(shape_name: str) -> Tuple[FabBitTemplate, FabBit]:
"""Return the FabTempate and FabBit associated with a shape name."""
bit_templates: FabBitTemplates = FabBitTemplatesFactory.getTemplates() # type: ignore
template: FabBitTemplate
constructor: Any
if shape_name == "ballend":
template = bit_templates.BallEnd
constructor = FabBallEndBit
elif shape_name == "bullnose":
template = bit_templates.BullNose
constructor = FabBullNoseBit
elif shape_name == "chamfer":
template = bit_templates.Chamfer
constructor = FabChamferBit
elif shape_name == "dovetail": # pragma: no unit covert
template = bit_templates.DoveTail
constructor = FabDoveTailBit
elif shape_name == "drill":
template = bit_templates.Drill
constructor = FabDrillBit
elif shape_name == "endmill":
template = bit_templates.EndMill
constructor = FabEndMillBit
elif shape_name == "probe":
template = bit_templates.Probe
constructor = FabProbeBit
elif shape_name == "slittingsaw":
template = bit_templates.SlittingSaw
constructor = FabSlittingSawBit
elif shape_name == "thread-mill":
template = bit_templates.ThreadMill
constructor = FabThreadMillBit
elif shape_name == "v-bit":
template = bit_templates.V
constructor = FabVBit
else:
assert False, f"Unrecogniezed {shape_name=}"
return template, constructor
# FabBits.toJSON():
@staticmethod
def toJSON(bit_json: Dict[str, Any],
tools_directory: PathFile, bit_stem_name: str, tracing: str = "") -> FabBit:
"""Convert JSON dictionary to a FabBit.
Arguments:
* *bit_json* (Dict[str, Any]): The JSON dictionary that defines the FabBit.
* *tools_directory* (FabPath): The tools directory under with the bit will be stored.
* *bit_stem_name* (str):
The stem name of the (`.fctb`) file. (For example: "probe.fctb" => "probe")
Returns:
* (FabBit): The resulting FabBit.
"""
if tracing:
print(f"{tracing}=>FabBits.toJSON(*, '{str(tools_directory)}', '{bit_stem_name}')")
assert check_argument_types()
assert "name" in bit_json, "FabBits.toJSON(): No name found"
assert "version" in bit_json, "FabBits.toJSON(): No version found"
parameters: Dict[str, Any] = bit_json["parameter"] if "parameter" in bit_json else {}
attributes: Dict[str, Any] = bit_json["attribute"] if "attribute" in bit_json else {}
_ = attributes # TODO: Is *attributes* actually needed?
# Extract *version* and *shape_name* from *bit_json*:
version: Any = bit_json["version"]
assert isinstance(version, int) and version == 2, "FabBits.toJSON(): version is not 2"
assert "shape" in bit_json, "FabBits.toJSON(): No shape found"
shape: Any = bit_json["shape"]
assert isinstance(shape, str) and shape.endswith(".fcstd"), (
f"FabBits.toJSON(): {shape=} does not end in '.fcstd'")
shape_name: str = shape[:-6]
# Convert the *shape*name* into a *template* and *constructor*:
template: FabBitTemplate
constructor: Any
template, constructor = FabBits.shapeNameToTemplateAndBit(shape_name)
# Do a fixup for a thread mill.:
if shape_name == "thread-mill":
# The `Tools/Bit/5mm-thread-cutter.fctb` file doe not have a CuttingAngle parameter.
# So we make one up here:
if "CuttingAngle" not in parameters:
parameters["CuttingAngle"] = "60.000 °"
# Construct the *bit_path_file*:
bit_path_file: PathFile = tools_directory / "Bit" / f"{bit_stem_name}.fctb"
kwargs: Dict[str, Any] = template.kwargsFromJSON(bit_json, bit_path_file)
if tracing:
print(f"{tracing}{shape_name=} {constructor=}")
# print(f"{tracing}{kwargs=}")
bit: FabBit = constructor(**kwargs)
if False and tracing: # pragma: no unit cover
# print(f"{tracing}bit_json=")
# print(json.dumps(bit_json, indent=4))
print(f"{tracing}{bit=}")
if tracing:
print(f"{tracing}<=FabBits.toJSON(*, '{str(tools_directory)}', '{bit_stem_name}')=>*")
return bit
# FabBits.read():
@staticmethod
def read(tools_directory: PathFile, shapes: FabShapes, tracing: str = "") -> "FabBits":
"""Read in a `Tools/Bit/` sub-directory.
Arguments:
* *tools_directory* (PathFile):
The `.../Tools` directory containing the `Bit/` subdirectory of `.fctb` Bit definitions.
* *shapes: (FabShapes): The FabShape objects to use.
Returns:
* (FabBits): The resulting FabBits that corresponds to the `Tools/Bit` sub-directory.
"""
next_tracing: str = tracing + " " if tracing else ""
if tracing:
print(f"{tracing}=>FabBits.read({str(tools_directory)}, *)")
bits_directory: PathFile = tools_directory / "Bit"
assert bits_directory.is_dir(), f"FabBits.read(): {str(bits_directory)} is not a directory"
bits_table: Dict[str, FabBit] = {}
assert check_argument_types()
bit_paths_table: Dict[str, PathFile] = {}
bit_file_path: PathFile
for bit_file_path in bits_directory.glob("*.fctb"):
bit_paths_table[str(bit_file_path)] = bit_file_path
sorted_bit_path_file_names: Tuple[str, ...] = tuple(sorted(bit_paths_table.keys()))
bit_path_file_name: str
index: int
for index, bit_path_file_name in enumerate(sorted_bit_path_file_names):
bit_path_file: PathFile = bit_paths_table[bit_path_file_name]
# Read in the *bit_json* dictionary from *bit_file_path*:
bit_stem_name: str = bit_path_file.stem
if tracing:
print(f"{tracing}BitFile[{index}]: Processing {str(bit_path_file)}")
bit_file: IO[str]
bit_json_text: str
with open(bit_path_file) as bit_file:
bit_json_text = bit_file.read()
try:
bit_json: Any = json.loads(bit_json_text)
except json.decoder.JSONDecodeError as json_error: # pragma: no unit cover
assert f"FabBits.read(): JSON read error {str(bit_path_file)}: {str(json_error)}"
bit: FabBit = FabBits.toJSON(bit_json, tools_directory,
bit_stem_name, tracing=next_tracing)
bits_table[bit.Name] = bit
if tracing:
print(f"{tracing}BitTable['{bit_stem_name}']: {type(bit)=}")
# Return the final FabBits object:
sorted_names: Tuple[str, ...] = tuple(sorted(bits_table.keys()))
sorted_bits: List[FabBit] = [bits_table[bit_name] for bit_name in sorted_names]
ordered_stems: List[str] = [bit.BitStem for bit in sorted_bits]
for index, bit in enumerate(sorted_bits):
assert sorted_names[index] == bit.Name
assert sorted_bits[index] is bit, (
f"sorted_names[{index}]: {sorted_names[index]=} != {bit}")
assert ordered_stems[index] == bit.BitStem
# if tracing:
# print(f"{tracing}{sorted_names=}")
# print(f"{tracing}{sorted_bits=}")
bits: FabBits = FabBits(tuple(sorted_bits), sorted_names, tuple(ordered_stems))
if tracing:
print(f"{tracing}<=FabBits.read({str(tools_directory)}, *)=>|{len(sorted_names)}|")
return bits
# FabBits.write()
def write(self, tools_directory: PathFile, tracing: str = "") -> None:
"""Write FabBits out to disk."""
next_tracing: str = tracing + " " if tracing else ""
if tracing:
print(f"{tracing}=>FabBits.write(*, {tools_directory})")
assert tools_directory.name == "Tools", str(tools_directory)
shapes_directory: PathFile = tools_directory / "Shape"
bits_directory: PathFile = tools_directory / "Bit"
bit: FabBit
if shapes_directory.is_dir() and bits_directory.is_dir():
shapes: FabShapes | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author:
<NAME>, PhD, MSB, BCBA-D
https://www.researchgate.net/profile/David_Cox26
twitter: @davidjcox_
LinkedIn: https://www.linkedin.com/in/coxdavidj/
Website: https://davidjcox.xyz
"""
#Set current working directory to the folder that contains your data.
import os
import pandas as pd
import numpy as np
import sys
import re, string, unicodedata
import matplotlib.pyplot as plt
sys.path.append('/Users/davidjcox/Dropbox/Coding/Local Python Modules/')
# Set path to data
os.chdir('/Users/davidjcox/Dropbox/Projects/CurrentProjectManuscripts/Empirical/PersonalFun/Org_Statements_Diversity/Org-Statements-Diversity-Inclusion/Data')
# Change settings to view all columns of data
pd.set_option('display.max_columns', None)
#%% Import data.
raw_data = pd.read_csv('all_data.csv').drop(['Unnamed: 0'], axis=1)
data = raw_data.copy()
data
#%% DATA PRE-PROCESSING
# Pre-process our data. Goal is to have:
# (1) Single list where each item in the list is the raw string of the narrative for that participant.
# (2) List of lists with one list per subject, and each item in list is a sentence from their narrative.
# (3) List of lists with one list per subject, and each item in the list is a clean* word from their narrative.
# (4) Single list with all of the cleaned vocab for the entire group.
# (5) Single list of the vocabulary used throughout all narratives (i.e., omitting all redundancies from (4)).
# (6) Single list where each item in the list is a string of the cleaned narrative for that participant.
# (7) Single list where each item in the list is a string of the participant narratives with only clean words.
# ----------------------
# List names for above:
# (1) narratives
# (2) narratives_sent_tokenized
# (3) clean_words_tokenized
# (4) narratives_word_list
# (5) narrative_vocab
# (6) narr_as_string
# (7) clean_ind_narr
# --------------------------------------------------
# * Clean = punctuation and stop words removed.
#%% Start with (1) narratives:
# Single list where each item in the list is the raw string of the narrative for that participant.
narratives = data['body_text'] # Create a list of the narratives.
#%% Next we'll get (2), narratives_sent_tokenized:
# List of lists with one list per subject, and each item in list is a sentence from their narrative.
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
#nltk.download() # Need to download the 'punkt' model from nltk if not already on computer.
# Make some empty lists we'll store our data in.
lower_narratives = []
narratives_sent_tokenized = []
count=0
# Make lowercase all words in the narratives. Store in lower_narratives list.
for each_narr in narratives:
lower = each_narr.lower()
lower = lower.replace("/", '')
lower = lower.replace("\\", '')
lower = lower.replace("_", ' ')
lower_narratives.append(lower)
len(lower_narratives) # Should still have 1141 narratives.
lower_narratives[:2] # Check out first few to make sure everything looks okay.
# Sentence tokenize the narratives. Store in narratives_sent_tokenized list.
for each_narr in lower_narratives:
sent_tokens = nltk.sent_tokenize(each_narr)
narratives_sent_tokenized.append(sent_tokens)
print(len(narratives_sent_tokenized)) # Should still have 1141 narratives.
narratives_sent_tokenized[::500] # Check out every 500th narrative to make sure it looks okay.
#%% Next, we'll get (3), clean_words_tokenized:
# List of lists with one list per subject, and each item in the list is a clean* word from their narrative.
# Some empty lists we'll need to store data.
stem_narratives = []
clean_words_tokenized = []
narratives_word_tokenized = []
# Word tokenize the narratives. Store in narratives_word_tokenized list.
for list in lower_narratives:
word_tokens = nltk.word_tokenize(list)
narratives_word_tokenized.append(word_tokens)
len(narratives_word_tokenized) # Should still have 1141 items.
narratives_word_tokenized[:1] # Check to make sure the last two look okay.
# Convert each word to its root (i.e., lemmatize).
from nltk.stem import WordNetLemmatizer
wnl = nltk.WordNetLemmatizer()
for list in narratives_word_tokenized:
temp_list = []
for word in list:
words_stemmed = wnl.lemmatize(word) # Noun
words_stemmed = wnl.lemmatize(word, pos='v') # Verb
temp_list.append(words_stemmed)
stem_narratives.append(temp_list)
len(stem_narratives) # Should still have 1141 items.
stem_narratives[:1] # Check last two and compare to narratives_word_tokenized
# Some additional punctuation characters.
punctuation = [",", ".", "''", "' '", "\"", "!", "?", '-', '``', ':', ';', \
"'s", "...", "'d", '(', ')', '=', "'", "#", "$", "%", "&", '_', \
"<", "=v=", ">", "@", "[", "]", "^_^", '{', '}', "\"", '/', "\\\\", \
"n't", "'ll", "'m", '*', '..', "\"links:\"", "[001]", "[002]", \
"[003]", "<b>", "\"buttons\"", "\\r", "\\n", "\\\"", "\""] # Define list of punctuation to remove.
# Remove all punctuation, any sticky contraction elements, and stopwords from stem_narratives list.
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
a_range = [0, 1, 2, 3, 4, 5]
for i in a_range:
for list in stem_narratives:
for word in list:
if word == "'ve":
list.remove(word)
if word in punctuation:
list.remove(word)
if word in stop_words:
list.remove(word)
# Put this cleaned list into it's own list so we don't mess it up.
clean_words_tokenized = stem_narratives
len(clean_words_tokenized) # Should still have 5044 items
clean_words_tokenized[::200] # Check look the same.
#%% Next, we'll get (4) narratives_word_list:
# (4) Single list with all of the cleaned vocab for the entire group.
# ======================================================================================================================================
# Create empty list where we'll store our data.
narratives_word_list = []
# Iterate over each list and add the word to the list we just created.
for list in stem_narratives:
for word in list:
narratives_word_list.append(word)
narr_all_words = ' '.join(narratives_word_list)
len(narratives_word_list) # Should be 4038 total words.
#%% Next we'll get (5) narrative_vocab:
# Single list of the vocabulary used throughout all narratives (i.e., omitting all redundancies from (4)).
# Create empty list where we'll store our data.
narrative_vocab = []
# Iterate over narratives_word_list and only append to narrative_vocab if the word is not in there already.
for word in narratives_word_list:
if word not in narrative_vocab:
narrative_vocab.append(word)
print("Number of words in vocab:", len(narrative_vocab)) # Should be 1373 unique words in our vocab.
sorted_vocab = sorted(narrative_vocab)
unique_words = np.unique(sorted_vocab) # Look at every 100th word in the vocab set.
#%% Next, we'll get (6) narr_as_string:
# Single item of all narratives as a single string of the cleaned narratives.
# Create empty list where we'll store our data.
narr_as_string = []
# Join all of the words into single string.
narr_as_string = ' '.join(narratives_word_list)
print("Number of characters total:", len(narr_as_string)) # Should be 31,973 characters in this string.
narr_as_string[:198] # Look at the first 300 characters of this string.
#%% Finally, we'll get (7) clean_ind_narr:
# Single list where each item in the list is a string of the participant narratives with only clean words.
clean_ind_narr = []
for list in clean_words_tokenized:
sub_clean_narr = ' '.join(list)
clean_ind_narr.append(sub_clean_narr)
data['cleaned_sentences'] = clean_ind_narr
print("Number of total statements", len(clean_ind_narr))
print(clean_ind_narr[::500])
#%% ===========================================================================
############################## LIST CREATION COMPLETE #########################
# =============================================================================
narratives # Single list where each item in the list is the raw string of the narrative for that participant.
narratives_sent_tokenized # List of lists with one list per subject, and each item in list is a sentence from their narrative.
clean_words_tokenized # List of lists with one list per subject, and each item in the list is a clean word from their narrative.
narratives_word_list # Single list with all of the cleaned words for the entire group.
narr_all_words # Single item of all the cleaned words for entire group as single string
narrative_vocab # Single list of the vocabulary used throughout all narratives (i.e., omitting all redundancies from (4)).
narr_as_string # Single item of all narratives as a string of the cleaned narratives.
clean_ind_narr # Single list where each item in the list is a string of the participant narratives with only clean words.
#%%
from wordcloud import WordCloud
import matplotlib.pyplot as plt
#%% All words.
wordcloud = WordCloud(width=500, height=500, background_color='white').generate(narr_all_words)
plt.figure()
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.margins(x=0, y=0)
plt.show()
#%% Just top 50 words.
wordcloud = WordCloud(width=500, height=500, background_color='white', max_words=50).generate(narr_all_words)
plt.figure()
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.margins(x=0, y=0)
plt.show()
#%% Sentiment Analysis on the raw input
from nltk.sentiment.vader import SentimentIntensityAnalyzer
raw_sentiment_score = []
for sentence in data['body_text']:
ss = SentimentIntensityAnalyzer().polarity_scores(sentence)
raw_sentiment_score.append(ss)
raw_sent_df = pd.DataFrame(raw_sentiment_score)
raw_sent_df = raw_sent_df.rename(columns={'neg':'raw_neg', 'neu':'raw_neu', \
'pos':'raw_pos', 'compound':'raw_compound'})
raw_sent_df
#%% Sentiment Analysis on the lemmatized and cleaned sentences
lemmed_sentiment_score = []
for sentence in clean_ind_narr:
ss = SentimentIntensityAnalyzer().polarity_scores(sentence)
lemmed_sentiment_score.append(ss)
lemmed_sent_df = pd.DataFrame(lemmed_sentiment_score)
lemmed_sent_df = lemmed_sent_df.rename(columns={'neg':'cleaned_neg', 'neu':'cleaned_neu', \
'pos':'cleaned_pos', 'compound':'cleaned_compound'})
lemmed_sent_df
#%% Add the above to the data df
data = pd.concat([data, raw_sent_df, lemmed_sent_df], axis=1)
data.to_csv("all_data.csv")
data[:]
#%% Doughnut plot of raw sentiment
# Data
neg_sum = data['raw_neg'].sum()
neu_sum = data['raw_neu'].sum()
pos_sum = data['raw_pos'].sum()
tot = neg_sum + neu_sum + pos_sum
sent_prop = [round((neg_sum/tot)*100, 2), round((neu_sum/tot)*100, 2), round((pos_sum/tot)*100, 2)]
#%% Doughnut plot
fig, ax = plt.subplots(figsize=(6, 3), subplot_kw=dict(aspect="equal"))
labels = ["Negative","Neutral","Positive"]
wedges, texts = ax.pie(sent_prop, wedgeprops=dict(width=0.5), startangle=-40)
bbox_props = dict(boxstyle="square,pad=0.3", fc="w", ec="k", lw=0.72)
kw = dict(arrowprops=dict(arrowstyle="-"),
bbox=bbox_props, zorder=0, va="center")
for i, p in enumerate(wedges):
ang = (p.theta2 - p.theta1)/2. + p.theta1
y = np.sin(np.deg2rad(ang))
x = np.cos(np.deg2rad(ang))
horizontalalignment = {-1: "right", 1: "left"}[int(np.sign(x))]
connectionstyle = "angle,angleA=0,angleB={}".format(ang)
kw["arrowprops"].update({"connectionstyle": connectionstyle})
if labels[i] == 'Negative':
ax.annotate(labels[i]+' '+str(sent_prop[i])+'%', xy=(x, y), xytext=(1.35*np.sign(x), y),\
horizontalalignment=horizontalalignment, **kw)
else:
| |
self.firstSigmaSliderYX.setSingleStep(0.5)
self.firstSigmaSliderYX.setTickInterval(10)
self.firstSigmaSliderZ = widgets.sliderWithSpinBox(
isFloat=True, title='Sigma Z-direction: ',
title_loc='in_line'
)
self.firstSigmaSliderZ.setTickPosition(QSlider.TicksBelow)
self.firstSigmaSliderZ.setSingleStep(0.5)
self.firstSigmaSliderZ.setTickInterval(10)
firstLayout.addWidget(self.firstSigmaSliderYX)
firstLayout.addWidget(self.firstSigmaSliderZ)
firstGroupbox.setLayout(firstLayout)
secondGroupbox = QGroupBox('Second gaussian filter')
secondLayout = QVBoxLayout()
self.secondSigmaSliderYX = widgets.sliderWithSpinBox(
isFloat=True, title='Sigma YX-direction:',
title_loc='in_line'
)
self.secondSigmaSliderYX.setTickPosition(QSlider.TicksBelow)
self.secondSigmaSliderYX.setSingleStep(0.5)
self.secondSigmaSliderYX.setTickInterval(10)
self.secondSigmaSliderZ = widgets.sliderWithSpinBox(
isFloat=True, title='Sigma Z-direction: ',
title_loc='in_line'
)
self.secondSigmaSliderZ.setTickPosition(QSlider.TicksBelow)
self.secondSigmaSliderZ.setSingleStep(0.5)
self.secondSigmaSliderZ.setTickInterval(10)
secondLayout.addWidget(self.secondSigmaSliderYX)
secondLayout.addWidget(self.secondSigmaSliderZ)
secondGroupbox.setLayout(secondLayout)
if not is3D:
self.firstSigmaSliderZ.hide()
self.secondSigmaSliderZ.hide()
self.previewCheckBox = QCheckBox('Preview filter')
self.previewCheckBox.setChecked(True)
cancelButton = widgets.cancelPushButton('Cancel')
buttonsLayout.addStretch(1)
buttonsLayout.addWidget(cancelButton)
mainLayout.addWidget(firstGroupbox)
mainLayout.addSpacing(20)
mainLayout.addWidget(secondGroupbox)
mainLayout.addSpacing(20)
mainLayout.addWidget(self.previewCheckBox)
mainLayout.addSpacing(10)
mainLayout.addLayout(buttonsLayout)
mainLayout.addStretch(1)
self.setLayout(mainLayout)
self.setFont(font)
self.firstSigmaSliderYX.sigValueChange.connect(self.valueChanged)
self.secondSigmaSliderYX.sigValueChange.connect(self.valueChanged)
if not is3D:
self.firstSigmaSliderZ.sigValueChange.connect(self.valueChanged)
self.secondSigmaSliderZ.sigValueChange.connect(self.valueChanged)
cancelButton.clicked.connect(self.close)
self.previewCheckBox.toggled.connect(self.previewToggled)
def keyPressEvent(self, event):
# Avoid closing on enter or return
pass
def previewToggled(self, checked):
if checked:
self.valueChanged()
else:
self.sigRemoveFilterClicked.emit()
def initSpotmaxValues(self, posData):
self.firstSigmaSliderYX.setValue(0)
self.firstSigmaSliderZ.setValue(0)
PhysicalSizeY = posData.PhysicalSizeY
PhysicalSizeX = posData.PhysicalSizeX
PhysicalSizeZ = posData.PhysicalSizeZ
zyx_vox_dim = [PhysicalSizeZ, PhysicalSizeY, PhysicalSizeZ]
wavelen = 510
NA = 1.4
yx_resolution_multi = 1
z_resolution_limit = 1
_, zyx_resolution_pxl, _ = core.calc_resolution_limited_vol(
wavelen, NA, yx_resolution_multi, zyx_vox_dim, z_resolution_limit
)
self.secondSigmaSliderYX.setValue(zyx_resolution_pxl[1])
self.secondSigmaSliderZ.setValue(zyx_resolution_pxl[0])
def valueChanged(self, value=None):
sigmas = self.getSigmas()
if hasattr(self, 'channelsComboBox'):
channel = self.channelsComboBox.currentText()
else:
channel = ''
self.filterApplied = True
self.sigValueChanged.emit(sigmas, channel)
def getSigmas(self):
sigma1_yx = self.firstSigmaSliderYX.value()
sigma1_z = self.firstSigmaSliderZ.value()
sigma2_yx = self.secondSigmaSliderYX.value()
sigma2_z = self.secondSigmaSliderZ.value()
sigmas1 = (sigma1_z, sigma1_yx, sigma1_yx) if sigma1_z>0 else sigma1_yx
sigmas2 = (sigma2_z, sigma2_yx, sigma2_yx) if sigma2_z>0 else sigma2_yx
return sigmas1, sigmas2
def showEvent(self, event):
self.resize(int(self.width()*1.5), self.height())
self.firstSigmaSliderYX.setFocus(True)
def closeEvent(self, event):
self.sigClose.emit()
class edgeDetectionDialog(QDialog):
def __init__(self, mainWindow):
super().__init__(mainWindow)
self.cancel = True
self.mainWindow = mainWindow
if mainWindow is not None:
posData = self.mainWindow.data[self.mainWindow.pos_i]
items = [posData.filename]
else:
items = ['test']
try:
posData = self.mainWindow.data[self.mainWindow.pos_i]
items.extend(list(posData.ol_data_dict.keys()))
except Exception as e:
pass
self.keys = items
self.setWindowTitle('Edge detection')
self.setWindowFlags(Qt.Tool | Qt.WindowStaysOnTopHint)
mainLayout = QVBoxLayout()
paramsLayout = QGridLayout()
buttonsLayout = QHBoxLayout()
channelCBLabel = QLabel('Channel:')
mainLayout.addWidget(channelCBLabel)
self.channelsComboBox = QComboBox()
self.channelsComboBox.addItems(items)
if mainWindow is not None:
self.channelsComboBox.setCurrentText(posData.manualContrastKey)
if not self.mainWindow.overlayButton.isChecked():
self.channelsComboBox.setCurrentIndex(0)
mainLayout.addWidget(self.channelsComboBox)
row = 0
sigmaQSLabel = QLabel('Blur:')
paramsLayout.addWidget(sigmaQSLabel, row, 0)
row += 1
self.sigmaValLabel = QLabel('1.00')
paramsLayout.addWidget(self.sigmaValLabel, row, 1)
self.sigmaSlider = QSlider(Qt.Horizontal)
self.sigmaSlider.setMinimum(1)
self.sigmaSlider.setMaximum(100)
self.sigmaSlider.setValue(20)
self.sigma = 1.0
self.sigmaSlider.setTickPosition(QSlider.TicksBelow)
self.sigmaSlider.setTickInterval(10)
paramsLayout.addWidget(self.sigmaSlider, row, 0)
row += 1
sharpQSLabel = QLabel('Sharpen:')
# padding: top, left, bottom, right
sharpQSLabel.setStyleSheet("font-size:12px; padding:5px 0px 0px 0px;")
paramsLayout.addWidget(sharpQSLabel, row, 0)
row += 1
self.sharpValLabel = QLabel('5.00')
paramsLayout.addWidget(self.sharpValLabel, row, 1)
self.sharpSlider = QSlider(Qt.Horizontal)
self.sharpSlider.setMinimum(1)
self.sharpSlider.setMaximum(100)
self.sharpSlider.setValue(50)
self.radius = 5.0
self.sharpSlider.setTickPosition(QSlider.TicksBelow)
self.sharpSlider.setTickInterval(10)
paramsLayout.addWidget(self.sharpSlider, row, 0)
row += 1
self.PreviewCheckBox = QCheckBox("Preview")
self.PreviewCheckBox.setChecked(True)
paramsLayout.addWidget(self.PreviewCheckBox, row, 0, 1, 2,
alignment=Qt.AlignCenter)
closeButton = QPushButton('Close')
buttonsLayout.addWidget(closeButton, alignment=Qt.AlignCenter)
paramsLayout.setContentsMargins(0, 10, 0, 0)
buttonsLayout.setContentsMargins(0, 10, 0, 0)
mainLayout.addLayout(paramsLayout)
mainLayout.addLayout(buttonsLayout)
self.PreviewCheckBox.clicked.connect(self.preview_cb)
self.sigmaSlider.sliderMoved.connect(self.sigmaSliderMoved)
self.sharpSlider.sliderMoved.connect(self.sharpSliderMoved)
self.channelsComboBox.currentTextChanged.connect(self.apply)
closeButton.clicked.connect(self.close)
self.setLayout(mainLayout)
self.apply()
def setSize(self):
x = self.pos().x()
y = self.pos().y()
h = self.size().height()
self.setGeometry(x, y, 300, h)
def preview_cb(self, checked):
if not checked:
self.restoreNonFiltered()
self.mainWindow.updateALLimg(only_ax1=True, updateSharp=False)
else:
self.getData()
self.apply()
def getData(self):
key = self.channelsComboBox.currentText()
posData = self.mainWindow.data[self.mainWindow.pos_i]
if key.find(self.mainWindow.user_ch_name) != -1:
img = self.mainWindow.getImage(normalizeIntens=False)
data = posData.img_data
else:
img = self.mainWindow.getOlImg(key, normalizeIntens=False)
data = posData.ol_data[key]
if self.PreviewCheckBox.isChecked():
self.img = skimage.exposure.equalize_adapthist(img)
self.detectEdges()
self.frame_i = posData.frame_i
self.imgData = data
def detectEdges(self):
self.edge = skimage.filters.sobel(self.img)
def getFilteredImg(self):
img = self.edge.copy()
# Blur
img = skimage.filters.gaussian(img, sigma=self.sigma)
# Sharpen
img = img - skimage.filters.gaussian(img, sigma=self.radius)
if self.mainWindow.overlayButton.isChecked():
key = self.channelsComboBox.currentText()
img = self.mainWindow.getOverlayImg(
fluoData=(img, key), setImg=False
)
else:
img = self.mainWindow.getImageWithCmap(img=img)
return img
def apply(self):
self.getData()
img = self.getFilteredImg()
if self.PreviewCheckBox.isChecked():
self.mainWindow.img1.setImage(img)
# h = self.mainWindow.img1.getHistogram()
# self.mainWindow.hist.plot.setData(*h)
def sigmaSliderMoved(self, intVal):
self.sigma = intVal/20
self.sigmaValLabel.setText(f'{self.sigma:.2f}')
self.apply()
def sharpSliderMoved(self, intVal):
self.radius = 10 - intVal/10
if self.radius < 0.15:
self.radius = 0.15
self.sharpValLabel.setText(f'{intVal/10:.2f}')
self.apply()
def closeEvent(self, event):
self.mainWindow.edgeDetectorAction.setChecked(False)
self.mainWindow.updateALLimg(only_ax1=True, updateFilters=False)
class entropyFilterDialog(QDialog):
def __init__(self, mainWindow):
super().__init__(mainWindow)
self.cancel = True
self.mainWindow = mainWindow
if mainWindow is not None:
posData = self.mainWindow.data[self.mainWindow.pos_i]
items = [posData.filename]
else:
items = ['test']
try:
posData = self.mainWindow.data[self.mainWindow.pos_i]
items.extend(list(posData.ol_data_dict.keys()))
except Exception as e:
pass
self.keys = items
self.setWindowTitle('Edge detection')
self.setWindowFlags(Qt.Tool | Qt.WindowStaysOnTopHint)
mainLayout = QVBoxLayout()
paramsLayout = QGridLayout()
buttonsLayout = QHBoxLayout()
channelCBLabel = QLabel('Channel:')
mainLayout.addWidget(channelCBLabel)
self.channelsComboBox = QComboBox()
self.channelsComboBox.addItems(items)
if mainWindow is not None:
self.channelsComboBox.setCurrentText(posData.manualContrastKey)
mainLayout.addWidget(self.channelsComboBox)
row = 0
sigmaQSLabel = QLabel('Radius: ')
paramsLayout.addWidget(sigmaQSLabel, row, 0)
row += 1
self.radiusValLabel = QLabel('10')
paramsLayout.addWidget(self.radiusValLabel, row, 1)
self.radiusSlider = QSlider(Qt.Horizontal)
self.radiusSlider.setMinimum(1)
self.radiusSlider.setMaximum(100)
self.radiusSlider.setValue(10)
self.radiusSlider.setTickPosition(QSlider.TicksBelow)
self.radiusSlider.setTickInterval(10)
paramsLayout.addWidget(self.radiusSlider, row, 0)
row += 1
self.PreviewCheckBox = QCheckBox("Preview")
self.PreviewCheckBox.setChecked(True)
paramsLayout.addWidget(self.PreviewCheckBox, row, 0, 1, 2,
alignment=Qt.AlignCenter)
closeButton = QPushButton('Close')
buttonsLayout.addWidget(closeButton, alignment=Qt.AlignCenter)
paramsLayout.setContentsMargins(0, 10, 0, 0)
buttonsLayout.setContentsMargins(0, 10, 0, 0)
mainLayout.addLayout(paramsLayout)
mainLayout.addLayout(buttonsLayout)
self.PreviewCheckBox.clicked.connect(self.preview_cb)
self.radiusSlider.sliderMoved.connect(self.radiusSliderMoved)
self.channelsComboBox.currentTextChanged.connect(self.apply)
closeButton.clicked.connect(self.close)
self.setLayout(mainLayout)
self.apply()
def setSize(self):
x = self.pos().x()
y = self.pos().y()
h = self.size().height()
self.setGeometry(x, y, 300, h)
def preview_cb(self, checked):
if not checked:
self.restoreNonFiltered()
self.mainWindow.updateALLimg(only_ax1=True, updateSharp=False)
else:
self.getData()
self.apply()
def getData(self):
key = self.channelsComboBox.currentText()
posData = self.mainWindow.data[self.mainWindow.pos_i]
if key.find(self.mainWindow.user_ch_name) != -1:
img = self.mainWindow.getImage()
data = posData.img_data
else:
img = self.mainWindow.getOlImg(key)
data = posData.ol_data[key]
self.img = skimage.img_as_ubyte(img)
self.frame_i = posData.frame_i
self.imgData = data
def getFilteredImg(self):
radius = self.radiusSlider.sliderPosition()
selem = skimage.morphology.disk(radius)
entropyImg = skimage.filters.rank.entropy(self.img, selem)
if self.mainWindow.overlayButton.isChecked():
key = self.channelsComboBox.currentText()
img = self.mainWindow.getOverlayImg(
fluoData=(entropyImg, key), setImg=False
)
else:
img = self.mainWindow.getImageWithCmap(img=entropyImg)
return img
def apply(self):
self.getData()
img = self.getFilteredImg()
if self.PreviewCheckBox.isChecked():
self.mainWindow.img1.setImage(img)
# h = self.mainWindow.img1.getHistogram()
# self.mainWindow.hist.plot.setData(*h)
def radiusSliderMoved(self, intVal):
self.radiusValLabel.setText(f'{intVal}')
self.apply()
def closeEvent(self, event):
self.mainWindow.entropyFilterAction.setChecked(False)
self.mainWindow.updateALLimg(only_ax1=True, updateFilters=False)
class randomWalkerDialog(QDialog):
def __init__(self, mainWindow):
super().__init__(mainWindow)
self.cancel = True
self.mainWindow = mainWindow
if mainWindow is not None:
posData = self.mainWindow.data[self.mainWindow.pos_i]
items = [posData.filename]
else:
items = ['test']
try:
posData = self.mainWindow.data[self.mainWindow.pos_i]
items.extend(list(posData.ol_data_dict.keys()))
except Exception as e:
pass
self.keys = items
self.setWindowTitle('Random walker segmentation')
self.colors = [self.mainWindow.RWbkgrColor,
self.mainWindow.RWforegrColor]
mainLayout = QVBoxLayout()
paramsLayout = QGridLayout()
buttonsLayout = QHBoxLayout()
self.mainWindow.clearAllItems()
row = 0
paramsLayout.addWidget(QLabel('Background threshold:'), row, 0)
row += 1
self.bkgrThreshValLabel = QLabel('0.05')
paramsLayout.addWidget(self.bkgrThreshValLabel, row, 1)
self.bkgrThreshSlider = QSlider(Qt.Horizontal)
self.bkgrThreshSlider.setMinimum(1)
self.bkgrThreshSlider.setMaximum(100)
self.bkgrThreshSlider.setValue(5)
self.bkgrThreshSlider.setTickPosition(QSlider.TicksBelow)
self.bkgrThreshSlider.setTickInterval(10)
paramsLayout.addWidget(self.bkgrThreshSlider, row, 0)
row += 1
foregrQSLabel = QLabel('Foreground threshold:')
# padding: top, left, bottom, right
foregrQSLabel.setStyleSheet("font-size:12px; padding:5px 0px 0px 0px;")
paramsLayout.addWidget(foregrQSLabel, row, 0)
row += 1
self.foregrThreshValLabel = QLabel('0.95')
paramsLayout.addWidget(self.foregrThreshValLabel, row, 1)
self.foregrThreshSlider = QSlider(Qt.Horizontal)
self.foregrThreshSlider.setMinimum(1)
self.foregrThreshSlider.setMaximum(100)
self.foregrThreshSlider.setValue(95)
self.foregrThreshSlider.setTickPosition(QSlider.TicksBelow)
self.foregrThreshSlider.setTickInterval(10)
paramsLayout.addWidget(self.foregrThreshSlider, row, 0)
# Parameters link label
row += 1
url1 = 'https://scikit-image.org/docs/dev/auto_examples/segmentation/plot_random_walker_segmentation.html'
url2 = 'https://scikit-image.org/docs/dev/api/skimage.segmentation.html#skimage.segmentation.random_walker'
htmlTxt1 = f'<a href=\"{url1}">here</a>'
htmlTxt2 = f'<a href=\"{url2}">here</a>'
seeHereLabel = QLabel()
seeHereLabel.setText(f'See {htmlTxt1} and {htmlTxt2} for details '
'about Random walker segmentation.')
seeHereLabel.setTextFormat(Qt.RichText)
seeHereLabel.setTextInteractionFlags(Qt.TextBrowserInteraction)
seeHereLabel.setOpenExternalLinks(True)
font = QtGui.QFont()
font.setPixelSize(13)
seeHereLabel.setFont(font)
seeHereLabel.setStyleSheet("padding:12px 0px 0px 0px;")
paramsLayout.addWidget(seeHereLabel, row, 0, 1, 2)
computeButton = QPushButton('Compute segmentation')
closeButton = QPushButton('Close')
buttonsLayout.addWidget(computeButton, alignment=Qt.AlignRight)
buttonsLayout.addWidget(closeButton, alignment=Qt.AlignLeft)
paramsLayout.setContentsMargins(0, 10, 0, 0)
buttonsLayout.setContentsMargins(0, 10, 0, 0)
mainLayout.addLayout(paramsLayout)
mainLayout.addLayout(buttonsLayout)
self.bkgrThreshSlider.sliderMoved.connect(self.bkgrSliderMoved)
self.foregrThreshSlider.sliderMoved.connect(self.foregrSliderMoved)
computeButton.clicked.connect(self.computeSegmAndPlot)
closeButton.clicked.connect(self.close)
self.setLayout(mainLayout)
self.getImage()
self.plotMarkers()
def getImage(self):
img = self.mainWindow.getDisplayedCellsImg()
self.img = img/img.max()
self.imgRGB = (skimage.color.gray2rgb(self.img)*255).astype(np.uint8)
def setSize(self):
x = self.pos().x()
y = self.pos().y()
h = self.size().height()
w = self.size().width()
if w < 400:
w = 400
self.setGeometry(x, y, w, h)
def plotMarkers(self):
imgMin, imgMax = self.computeMarkers()
img = self.img
imgRGB = self.imgRGB.copy()
R, G, B = self.colors[0]
imgRGB[:, :, 0][img < imgMin] = R
imgRGB[:, :, 1][img < imgMin] = G
imgRGB[:, :, 2][img < imgMin] = B
R, G, B = self.colors[1]
imgRGB[:, :, 0][img > imgMax] = R
imgRGB[:, :, 1][img > imgMax] = G
imgRGB[:, :, 2][img > imgMax] = B
self.mainWindow.img1.setImage(imgRGB)
def computeMarkers(self):
bkgrThresh = self.bkgrThreshSlider.sliderPosition()/100
foregrThresh = self.foregrThreshSlider.sliderPosition()/100
img = self.img
self.markers = np.zeros(img.shape, np.uint8)
imgRange = img.max() - img.min()
imgMin = img.min() + imgRange*bkgrThresh
imgMax = img.min() + imgRange*foregrThresh
self.markers[img < imgMin] = 1
self.markers[img > imgMax] = 2
return imgMin, imgMax
def computeSegm(self, checked=True):
self.mainWindow.storeUndoRedoStates(False)
self.mainWindow.titleLabel.setText(
'Randomly walking around... ', color='w')
img = self.img
img = skimage.exposure.rescale_intensity(img)
t0 = time.time()
lab = skimage.segmentation.random_walker(img, self.markers, mode='bf')
lab = skimage.measure.label(lab>1)
t1 = time.time()
if len(np.unique(lab)) > 2:
skimage.morphology.remove_small_objects(lab, min_size=5,
in_place=True)
posData = self.mainWindow.data[self.mainWindow.pos_i]
posData.lab = lab
return t1-t0
def computeSegmAndPlot(self):
deltaT = self.computeSegm()
posData = self.mainWindow.data[self.mainWindow.pos_i]
self.mainWindow.update_rp()
self.mainWindow.tracking(enforce=True)
self.mainWindow.updateALLimg()
self.mainWindow.warnEditingWithCca_df('Random Walker segmentation')
txt = f'Random Walker segmentation computed in {deltaT:.3f} s'
print('-----------------')
print(txt)
print('=================')
# self.mainWindow.titleLabel.setText(txt, color='g')
def bkgrSliderMoved(self, intVal):
self.bkgrThreshValLabel.setText(f'{intVal/100:.2f}')
self.plotMarkers()
def foregrSliderMoved(self, intVal):
self.foregrThreshValLabel.setText(f'{intVal/100:.2f}')
self.plotMarkers()
def closeEvent(self, event):
self.mainWindow.segmModel = ''
self.mainWindow.updateALLimg()
class FutureFramesAction_QDialog(QDialog):
def __init__(self, frame_i, last_tracked_i, change_txt,
applyTrackingB=False, parent=None):
self.decision = None
self.last_tracked_i = last_tracked_i
super().__init__(parent)
self.setWindowTitle('Future frames action?')
mainLayout = QVBoxLayout()
txtLayout = QVBoxLayout()
doNotShowLayout = QVBoxLayout()
buttonsLayout = QVBoxLayout()
txt = (
'You already visited/checked future frames '
f'{frame_i+1}-{last_tracked_i}.\n\n'
f'The requested "{change_txt}" | |
<gh_stars>0
'''
tags
====
The following methods allow for interaction into the Tenable.io
:devportal:`tagging <tags>` API endpoints.
Methods available on ``tio.tags``:
.. rst-class:: hide-signature
.. autoclass:: TagsAPI
.. automethod:: create
.. automethod:: create_category
.. automethod:: delete
.. automethod:: delete_category
.. automethod:: details
.. automethod:: details_category
.. automethod:: edit
.. automethod:: edit_category
.. automethod:: list
.. automethod:: list_categories
'''
from .base import TIOEndpoint, TIOIterator
from tenable.errors import UnexpectedValueError
class TagsIterator(TIOIterator):
'''
The tags iterator provides a scalable way to work through tag list result
sets of any size. The iterator will walk through each page of data,
returning one record at a time. If it reaches the end of a page of
records, then it will request the next page of information and then continue
to return records from the next page (and the next, and the next) until the
counter reaches the total number of records that the API has reported.
Attributes:
count (int): The current number of records that have been returned
page (list):
The current page of data being walked through. pages will be
cycled through as the iterator requests more information from the
API.
page_count (int): The number of record returned from the current page.
total (int):
The total number of records that exist for the current request.
'''
pass
class TagsAPI(TIOEndpoint):
_filterset_tags = {
'value': {'operators': ['eq', 'match'], 'pattern': None, 'choices': None},
'category_name': {'operators': ['eq', 'match'], 'pattern': None, 'choices': None},
'description': {'operators': ['eq', 'match'], 'pattern': None, 'choices': None},
'updated_at': {'operators': ['date-eq', 'date-gt', 'date-lt'], 'pattern': '\\d+', 'choices': None},
'updated_by': {'operators': ['eq'], 'pattern': None} # Add UUID regex here
}
_filterset_categories = {
'name': {'operators': ['eq', 'match'], 'pattern': None, 'choices': None},
'description': {'operators': ['eq', 'match'], 'pattern': None, 'choices': None},
'created_at': {'operators': ['date-eq', 'date-gt', 'date-lt'], 'pattern': '\\d+', 'choices': None},
'updated_at': {'operators': ['date-eq', 'date-gt', 'date-lt'], 'pattern': '\\d+', 'choices': None},
'updated_by': {'operators': ['eq'], 'pattern': None, 'choices': None} # Add UUID regex here
}
def create(self, category, value, description=None, filters=None,
category_description=None):
'''
Create a tag category/value pair
:devportal:`tags: create-tag-value <tags-create-tag-value-1>`
Args:
category (str):
The category name, or the category UUID. If the category does
not exist, then it will be created along with the new value.
value (str):
The value for the tag.
category_description (str, optional):
If the category is to be created, a description can be
optionally provided.
description (str, optional):
A description for the Category/Value pair.
filters (dict, optional):
The filter dictionary as specified within the API documents.
Returns:
:obj:`dict`:
Tag value resource record
Examples:
Creating a new tag & Category:
>>> tio.tags.create('Location', 'Chicago')
Creating a new Tag value in the existing Location Category:
>>> tio.tags.create('Location', 'New York')
Creating a new Tag Value within a Category by UUID:
>>> tio.tags.create('00000000-0000-0000-0000-000000000000', 'Madison')
'''
payload = dict()
# First lets see if the category is a UUID or a general string. If its
# a UUID, then we will pass the value of category into the category_uuid
# parameter, if not (but is still a string), then we will pass into
# category_name
try:
payload['category_uuid'] = self._check('category', category, 'uuid')
except UnexpectedValueError:
payload['category_name'] = self._check('category', category, str)
payload['value'] = self._check('value', value, str)
if description:
payload['description'] = self._check('description', description, str)
if category_description:
payload['category_description'] = self._check(
'category_description', category_description, str)
if filters:
payload['filters'] = self._check('filters', filters, dict)
return self._api.post('tags/values', json=payload).json()
def create_category(self, name, description=None):
'''
Creates a new category
:devportal:`tags: create-category <tags-create-tag-category>`
Args:
name (str): The name of the category to create
description (str, optional): Description for the category to create.
Returns:
:obj:`dict`:
Tag category resource record
Examples:
>>> tio.tags.create_category('Location')
'''
payload = dict()
payload['name'] = self._check('name', name, str)
if description:
payload['description'] = self._check('description', description, str)
return self._api.post('tags/categories', json=payload).json()
def delete(self, tag_value_uuid):
'''
Deletes a tag category/value pair.
:devportal:`tag: delete tag value <tags-delete-tag-value>`
Args:
tag_value_uuid (str):
The unique identifier for the c/v pair to be deleted.
Returns:
:obj:`None`
Examples:
>>> tio.tags.delete('00000000-0000-0000-0000-000000000000')
'''
self._api.delete('tags/values/{}'.format(
self._check('tag_value_uuid', tag_value_uuid, 'uuid')))
def delete_category(self, tag_category_uuid):
'''
Deletes a tag category.
:devportal:`tag: delete tag category <tags-delete-tag-category>`
Args:
tag_category_uuid (str):
The unique identifier for the tag category to be deleted.
Returns:
:obj:`None`
Examples:
>>> tio.tags.delete('00000000-0000-0000-0000-000000000000')
'''
self._api.delete('tags/categories/{}'.format(
self._check('tag_category_uuid', tag_category_uuid, 'uuid')))
def details(self, tag_value_uuid):
'''
Retrieves the details for a specific tag category/value pair.
:devportal:`tag: tag details <tags-tag-value-details>`
Args:
tag_value_uuid (str):
The unique identifier for the c/v pair
Returns:
:obj:`dict`:
Tag value resource record
Examples:
>>> tio.tags.details('00000000-0000-0000-0000-000000000000')
'''
return self._api.get('tags/values/{}'.format(self._check(
'tag_value_uuid', tag_value_uuid, 'uuid'))).json()
def details_category(self, tag_category_uuid):
'''
Retrieves the details for a specific tag category.
:devportal:`tag: tag category details <tags-tag-category-details>`
Args:
tag_category_uuid (str):
The unique identifier for the category
Returns:
:obj:`dict`:
Tag category resource record
Examples:
>>> tio.tags.details_category('00000000-0000-0000-0000-000000000000')
'''
return self._api.get('tags/categories/{}'.format(self._check(
'tag_category_uuid', tag_category_uuid, 'uuid'))).json()
def edit(self, tag_value_uuid, value=None, description=None, filters=None):
'''
Updates Tag category/value pair information.
:devportal:`tag: edit tag value <tags-update-tag-value>`
Args:
tag_value_uuid (str):
The unique identifier for the c/v pair to be edited.
value (str, optional):
The new name for the category value.
description (str, optional):
New description for the category value.
filters (dict, optional):
The filter dictionary as specified within the API documents.
Returns:
:obj:`dict`:
Tag value resource record.
Examples:
>>> tio.tags.edit('00000000-0000-0000-0000-000000000000',
... name='NewValueName')
'''
payload = dict()
payload['value'] = self._check('value', value, str)
if description:
payload['description'] = self._check('description', description, str)
if filters:
payload['filters'] = self._check('filters', filters, dict)
return self._api.put('tags/values/{}'.format(self._check(
'tag_value_uuid', tag_value_uuid, 'uuid')), json=payload).json()
def edit_category(self, tag_category_uuid, name=None, description=None):
'''
Updates Tag category information.
:devportal:`tag: edit tag category <tags-edit-tag-category>`
Args:
tag_category_uuid (str):
The unique identifier for the category to be edited.
name (str, optional):
The new name for the category.
description (str, optional):
New description for the category.
Returns:
:obj:`dict`:
Tag category resource record.
Examples:
>>> tio.tags.edit_category('00000000-0000-0000-0000-000000000000',
... name='NewValueName')
'''
payload = dict()
payload['name'] = self._check('name', name, str)
if description:
payload['description'] = self._check('description', description, str)
return self._api.put('tags/categories/{}'.format(self._check(
'tag_category_uuid', tag_category_uuid, 'uuid')), json=payload).json()
def _tag_list_constructor(self, filters, filterdefs, filter_type, sort):
'''
A simple constructor to handle constructing the query parameters for the
list and list_category methods.
'''
query = self._parse_filters(filters, filterdefs, rtype='colon')
if filter_type:
query['ft'] = self._check('filter_type', filter_type, str,
choices=['AND', 'OR'], case='upper')
if sort:
query['sort'] = self._check('sort', sort, str,
choices=[k for k in filterdefs.keys()])
return query
def list(self, *filters, **kw):
'''
Retrieves a list of tag category/value pairs based off of the filters
defined within the query.
:devportal:`tags: list tags <tags-list-tag-values>`
Args:
*filters (tuple, optional):
A defined filter tuple consisting of the name, operator, and
value. Example: ``('category_name', 'eq', 'Location')``.
filter_type (str, optional):
If multiple filters are defined, the filter_type toggles the
behavior as to how these filters are used. Either all of the
filters have to match (``AND``) or any of the filters have to
match (``OR``). If not specified, the default behavior is to
assume filter_type is ``AND``.
limit (int, optional):
How many records should be returned in a given page. If nothing
is set, it will default to 1000 records.
pages (int, optional):
How many pages of data would you like to request?
offset (int, optional):
How many records to skip before returning results. If nothing
is set, it will default to 0.
sort (str, optional):
What field to sort the results on.
Returns:
:obj:`TagIterator`:
An iterator that handles the pagination of the results
Examples:
Return all of the Tag Values:
>>> for tag in tio.tags.list():
... pprint(tag)
Return all of the Tags of the Location category:
>>> for tag in tio.tags.list(('category_name', 'eq', 'Location')):
... pprint(tag)
'''
query = self._tag_list_constructor(filters, self._filterset_tags,
kw['filter_type'] if 'filter_type' in kw else None,
kw['sort'] if 'sort' in kw else None)
return TagsIterator(self._api,
_limit=self._check('limit', kw.get('limit', 1000), int),
_offset=self._check('offset', kw.get('offset', 0), int),
_pages_total=self._check('pages', kw.get('pages'), int),
_query=query,
_path='tags/values',
_resource='values'
)
def list_categories(self, *filters, **kw):
'''
Retrieves a list of tag categories based off of the filters defined
within the query.
:devportal:`tags: list categories <tags-list-tag-categories>`
Args:
*filters (tuple, optional):
A defined filter tuple consisting of the name, operator, and
value. Example: ``('name', 'eq', 'Location')``.
filter_type (str, optional):
If multiple filters are defined, the filter_type toggles the
behavior as to how these filters are used. Either all of the
filters have to match (``AND``) or any of | |
"""
Calculations iodine emissions with updated iodide field
"""
import numpy as np
import pandas as pd
import xarray as xr
import sparse2spatial.utils as utils
# import AC_tools (https://github.com/tsherwen/AC_tools.git)
import AC_tools as AC
def compare_emissions(wd_dict=None, inorg_emiss=None, specs=None):
"""
Compare emissions between runs with different parameterisations
Parameters
-------
wd_dict (dict): dictionary of names (keys) and locations of model runs
inorg_emiss (dict): dictionary of inorganic iodine emissions for runs
Returns
-------
(pd.DataFrame)
"""
# Get emission runs that test output
if isinstance(wd_dict, type(None)):
wd_dict = get_emissions_testing_runs()
params = sorted(wd_dict.keys())
# Get ozone burdens
O3Burdens = [AC.get_O3_burden(wd_dict[i]) for i in params]
O3Burdens = [i.sum()/1E3 for i in O3Burdens]
# Compile date into dataframe
df = pd.DataFrame(O3Burdens, index=params, columns=['O3 bud.'])
# Get emissions
if isinstance(inorg_emiss, type(None)):
inorg_emiss, specs = get_inorg_emissions_for_params(wd_dict=wd_dict)
# Sum emissions
for param in params:
inorg_emiss[param] = [i.sum() for i in inorg_emiss[param]]
# Convert to DatFrame and combine
inorg_emiss_names = [i+' emiss.' for i in specs]
df2 = pd.DataFrame(inorg_emiss, index=inorg_emiss_names)
df = pd.concat([df, df2.T], axis=1)
# Add total inorganic flux? (Hasghed out for now )
# df['Inorg emiss'] = df[inorg_emiss_names].sum(axis=1)
# Now do calculations to get change and difference between runs
# calculate % change in values between runs
df = df.T
#
param = 'RFR(offline)'
refs = 'Chance2014', 'MacDonald2014'
# Loop and calculate percentages
for ref in refs:
col_name = '({}% vs. {})'.format(param, ref)
df[col_name] = (df[param] - df[ref])/df[ref]*100
df = df.T
return df
def get_emissions_testing_runs():
"""
Get dictionary of emission model run locations
"""
# folder = get_file_locations('earth0_home_dir')
folder = ''
folder += '/data/all_model_simulations/iodine_runs/iGEOSChem_4.0_v10/'
# Locations of model runs with different iodide fields
RFR_dir = 'run.XS.UPa.FP.EU.BC.II.FP.2014.NEW_OFFLINE_IODIDE.several_months/'
Chance_dir = '/run.XS.UPa.FP.EU.BC.II.FP.2014.re_run4HEMCO_diag/'
MacDonald_dir = 'run.XS.UPa.FP.EU.BC.II.FP.2014.Chance_iodide/'
extr_dir = '/'
# extr_dir = '/spin_up/'
# extr_dir = '/test_dates/'
wd_dict = {
'Chance2014': folder + MacDonald_dir + extr_dir,
'MacDonald2014': folder + Chance_dir,
'RFR(offline)': folder + RFR_dir + extr_dir,
}
return wd_dict
def get_inorg_emissions_for_params(wd_dict=None, res='4x5'):
"""
Get inorganic emissions for the difference parameterisations
"""
from A_PD_hal_paper_analysis_figures.halogen_family_emission_printer import get_species_emiss_Tg_per_yr
specs = ['HOI', 'I2']
# Retrieve the surface area for a given resolution
s_area = AC.get_surface_area(res=res)
# calc emissions!
inorg_emiss = {}
for param in wd_dict.keys():
print(param)
wd = wd_dict[param]
months = AC.get_gc_months(wd=wd)
years = AC.get_gc_years(wd=wd)
# Get emissions
ars = get_species_emiss_Tg_per_yr(wd=wd, specs=specs, ref_spec='I',
s_area=s_area, years=years,
months=months)
# Add sums
ars += [ars[0]+ars[1]]
inorg_emiss[param] = ars
return inorg_emiss, specs+['Inorg']
def add_Inorg_and_Org_totals2array(ds, InOrgVar='Inorg_Total', OrgVar='Org_Total'):
"""
Add inorganic and organic sub totals to dataset
"""
# Add aggregated values to ds
OrgVars = [
'EmisCH2IBr_Ocean', 'EmisCH2ICl_Ocean', 'EmisCH2I2_Ocean', 'EmisCH3I_Ocean',
]
InOrgVars = ['EmisI2_Ocean', 'EmisHOI_Ocean', ]
# - Inorganic
# template off the first species
ds[InOrgVar] = ds[InOrgVars[0]].copy()
# sum values to this
arr = ds[InOrgVar].values
for var_ in InOrgVars[1:]:
print(var_)
arr = arr + ds[var_].values
ds[InOrgVar].values = arr
attrs = ds[InOrgVar].attrs
attrs['long_name'] = InOrgVar
ds[InOrgVar].attrs = attrs
# - Organic
# template off the first species
ds[OrgVar] = ds[OrgVars[0]].copy()
# sum values to this
arr = ds[OrgVar].values
for var_ in OrgVars[1:]:
print(var_)
arr = arr + ds[var_].values
ds[OrgVar].values = arr
attrs = ds[OrgVar].attrs
attrs['long_name'] = OrgVar
ds[OrgVar].attrs = attrs
return ds
def plot_up_surface_emissions(dsDH=None, runs=None, show_plot=False,
wds=None, dpi=320):
"""
Plot up emissions using HEMCO NetCDF files
"""
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
# names of runs to plot up?
if isinstance(wds, type(None)):
wds = get_run_dict4EGU_runs()
if isinstance(runs, type(None)):
runs = list(wds.keys())
# - Add aggregated values to ds
OrgVars = [
'EmisCH2IBr_Ocean', 'EmisCH2ICl_Ocean', 'EmisCH2I2_Ocean', 'EmisCH3I_Ocean',
]
InOrgVars = ['EmisI2_Ocean', 'EmisHOI_Ocean', ]
vars2use = OrgVars + InOrgVars
# Aggregate variables to use?
TotalVar = 'I_Total'
InOrgVar = 'Inorg_Total'
OrgVar = 'Org_Total'
# Setup the colourbar to use
Divergent_cmap = plt.get_cmap('RdBu_r')
cmap = AC.get_colormap(np.arange(10))
# loop my run and add values
for run in runs:
# which dataset to use?
print(run)
ds = dsDH[run]
# Add Inorg and org subtotals to array
ds = add_Inorg_and_Org_totals2array(ds=ds)
# Calculate totals
# template off the first species
ds[TotalVar] = dsDH[run][vars2use[0]].copy()
# Sum values to this
arr = ds[TotalVar].values
for var_ in vars2use[1:]:
print(var_)
arr = arr + dsDH[run][var_].values
ds[TotalVar].values = arr
attrs = ds[TotalVar].attrs
attrs['long_name'] = TotalVar
ds[TotalVar].attrs = attrs
# Setup PDF to save plot to
savetitle = 'Oi_prj_emissions_diff_plots_EGU_runs'
dpi = 320
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
# - Plot up emissions spatial distribution of total emissions
for run in runs:
print(run)
# dataset to plot
ds = dsDH[run][[TotalVar]]
# use annual sum of emissions
ds = ds.sum(dim='time')
# - Loop and plot species
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111, projection=ccrs.PlateCarree(), aspect='auto')
ds[TotalVar].plot.imshow(x='lon', y='lat',
ax=ax,
cmap=cmap,
transform=ccrs.PlateCarree())
# Add a title to the plot to the plot
PtrStr = "Total iodine emissions (Gg I) in '{}'"
PtrStr += "\n(max={:.1f}, min={:.1f}, sum={:.1f})"
sum_ = float(ds[TotalVar].sum().values)
max_ = float(ds[TotalVar].max().values)
min_ = float(ds[TotalVar].min().values)
plt.title(PtrStr.format(run, max_, min_, sum_))
# Beautify the plot
ax.coastlines()
ax.set_global()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# - Plot up emissions spatial distribution of inorg emissions
runs2plot = [i for i in runs if (i != 'No_HOI_I2')]
for run in runs2plot:
print(run)
# dataset to plot
ds = dsDH[run][[InOrgVar]]
# use annual sum of emissions
ds = ds.sum(dim='time')
# - Loop and plot species
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111, projection=ccrs.PlateCarree(), aspect='auto')
ds[InOrgVar].plot.imshow(x='lon', y='lat',
ax=ax,
cmap=cmap,
transform=ccrs.PlateCarree())
# Add a title to the plot
PtrStr = "Total Inorganic iodine emissions (Gg I) in '{}'"
PtrStr += "\n(max={:.1f}, min={:.1f}, sum={:.1f})"
sum_ = float(ds[InOrgVar].sum().values)
max_ = float(ds[InOrgVar].max().values)
min_ = float(ds[InOrgVar].min().values)
plt.title(PtrStr.format(run, max_, min_, sum_))
# Beautify the plot
ax.coastlines()
ax.set_global()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# - Plot up emissions spatial distribution inorg emissions (% of total)
runs2plot = [i for i in runs if (i != 'No_HOI_I2')]
for run in runs2plot:
print(run)
# dataset to plot
ds = dsDH[run][[InOrgVar, TotalVar]]
# use annual sum of emissions
ds = ds.sum(dim='time')
# Calculate the difference (perecent)
DIFFvar = 'Inorg/Total'
ds[DIFFvar] = ds[InOrgVar].copy()
ds[DIFFvar].values = ds[InOrgVar].values/ds[TotalVar].values*100
# Loop and plot species
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111, projection=ccrs.PlateCarree(), aspect='auto')
ds[DIFFvar].plot.imshow(x='lon', y='lat',
ax=ax,
cmap=cmap,
transform=ccrs.PlateCarree())
# Add a title to the plot
PtrStr = "Total Inorganic iodine emissions (% of total) in '{}' \n"
PtrStr += '(max={:.1f}, min={:.1f})'
max_ = float(ds[DIFFvar].max().values)
min_ = float(ds[DIFFvar].min().values)
plt.title(PtrStr.format(run, max_, min_))
# Beautify the plot
ax.coastlines()
ax.set_global()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# - plot up emissions as a % of REF (Chance2014)
REF = 'Chance2014'
# runs2plot = [i for i in runs if (i != REF)]
# runs2plot = [i for i in runs if (i != 'No_HOI_I2')]
runs2plot = ['ML_Iodide']
for run in runs2plot:
print(run)
# dataset to plot (use annual sum of emissions)
ds = dsDH[run][[InOrgVar]].sum(dim='time')
dsREF = dsDH[REF][[InOrgVar]].sum(dim='time')
#
DIFFvar = 'Inorg/Inorg({})'.format(REF)
ds[DIFFvar] = ds[InOrgVar].copy()
ds[DIFFvar].values = ds[InOrgVar].values/dsREF[InOrgVar].values*100
# - Loop and plot species
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111, projection=ccrs.PlateCarree(), aspect='auto')
ds[DIFFvar].plot.imshow(x='lon', y='lat',
# vmin=1, vmax=5,
vmin=0, vmax=200,
ax=ax,
cmap=cmap,
transform=ccrs.PlateCarree())
# Add a title to the plot
PtrStr = "Total Inorganic iodine emissions in '{}'\n as % of {}"
PtrStr += '(max={:.1f}, min={:.1f})'
max_ = float(ds[DIFFvar].max().values)
min_ = float(ds[DIFFvar].min().values)
plt.title(PtrStr.format(run, REF, max_, min_))
# Beautify the plot
ax.coastlines()
ax.set_global()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# - plot up emissions as a % of REF (Macdonald2014)
REF = 'Macdonald2014'
# runs2plot = [i for i in runs if (i != REF)]
# runs2plot = [i for i in runs if (i != 'No_HOI_I2')]
runs2plot = ['ML_Iodide']
for run in runs2plot:
print(run)
# dataset to plot (use annual sum of emissions)
ds = dsDH[run][[InOrgVar]].sum(dim='time')
dsREF = dsDH[REF][[InOrgVar]].sum(dim='time')
#
DIFFvar = 'Inorg/Inorg({})'.format(REF)
ds[DIFFvar] = ds[InOrgVar].copy()
ds[DIFFvar].values = ds[InOrgVar].values/dsREF[InOrgVar].values*100
# - Loop and plot species
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111, projection=ccrs.PlateCarree(), aspect='auto')
ds[DIFFvar].plot.imshow(x='lon', y='lat',
vmin=0, vmax=200,
ax=ax,
cmap=cmap,
transform=ccrs.PlateCarree())
# Add a title to the plot
PtrStr = "Total Inorganic iodine emissions in '{}'\n as % of {}"
PtrStr += | |
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import test_rnn_realtime as net
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import test_rnn_realtime as net
import traning as tr
import time
# 近似データの定義
high_value = 1.0
low_value = 0.0
category_num = 2
steps_num = 3
def make_input_4data(high_value, low_value, category_num, steps_num):
one_cate_data = np.empty((0, 0, category_num))
high_element = np.array([[high_value]])
low_element = np.array([[low_value]])
much = np.array([1, 0])
non_much = np.array([0, 1])
input_data = np.empty((0, steps_num, category_num))
target_data = np.empty((0, 2))
for q in range(category_num):
for i in range(category_num):
for j in range(category_num):
for k in range(category_num):
one_cate_data = np.empty((0, category_num))
one_low_data = np.empty((1, 0))
for l in range(category_num):
if l == q:
one_low_data = np.hstack(
(one_low_data, high_element))
else:
one_low_data = np.hstack(
(one_low_data, low_element))
one_cate_data = np.vstack((one_cate_data, one_low_data))
one_low_data = np.empty((1, 0))
for l in range(category_num):
if l == i:
one_low_data = np.hstack(
(one_low_data, high_element))
else:
one_low_data = np.hstack(
(one_low_data, low_element))
one_cate_data = np.vstack((one_cate_data, one_low_data))
one_low_data = np.empty((1, 0))
for l in range(category_num):
if l == j:
one_low_data = np.hstack(
(one_low_data, high_element))
else:
one_low_data = np.hstack(
(one_low_data, low_element))
one_cate_data = np.vstack((one_cate_data, one_low_data))
one_low_data = np.empty((1, 0))
for l in range(category_num):
if l == k:
one_low_data = np.hstack(
(one_low_data, high_element))
else:
one_low_data = np.hstack(
(one_low_data, low_element))
one_cate_data = np.vstack((one_cate_data, one_low_data))
one_cate_data_3 = np.array([one_cate_data])
input_data = np.vstack((input_data, one_cate_data_3))
if i == j and i == k and q == j:
addtarget = much
else:
addtarget = non_much
target_data = np.vstack((target_data, addtarget))
return input_data, target_data
def make_input_3data(high_value, low_value, category_num, steps_num):
one_cate_data = np.empty((0, 0, category_num))
high_element = np.array([[high_value]])
low_element = np.array([[low_value]])
much = np.array([1, 0])
non_much = np.array([0, 1])
input_data = np.empty((0, steps_num, category_num))
target_data = np.empty((0, 2))
for q in range(category_num):
for i in range(category_num):
for j in range(category_num):
one_cate_data = np.empty((0, category_num))
one_low_data = np.empty((1, 0))
for l in range(category_num):
if l == q:
one_low_data = np.hstack(
(one_low_data, high_element))
else:
one_low_data = np.hstack(
(one_low_data, low_element))
one_cate_data = np.vstack((one_cate_data, one_low_data))
one_low_data = np.empty((1, 0))
for l in range(category_num):
if l == i:
one_low_data = np.hstack(
(one_low_data, high_element))
else:
one_low_data = np.hstack(
(one_low_data, low_element))
one_cate_data = np.vstack((one_cate_data, one_low_data))
one_low_data = np.empty((1, 0))
for l in range(category_num):
if l == j:
one_low_data = np.hstack(
(one_low_data, high_element))
else:
one_low_data = np.hstack(
(one_low_data, low_element))
one_cate_data = np.vstack((one_cate_data, one_low_data))
one_low_data = np.empty((1, 0))
one_cate_data_3 = np.array([one_cate_data])
input_data = np.vstack((input_data, one_cate_data_3))
if i == j and q == j:
addtarget = much
else:
addtarget = non_much
target_data = np.vstack((target_data, addtarget))
return input_data, target_data
def make_input_2data(high_value, low_value, category_num, steps_num):
one_cate_data = np.empty((0, 0, category_num))
high_element = np.array([[high_value]])
low_element = np.array([[low_value]])
much = np.array([1, 0])
non_much = np.array([0, 1])
input_data = np.empty((0, steps_num, category_num))
target_data = np.empty((0, 2))
for q in range(category_num):
for i in range(category_num):
one_cate_data = np.empty((0, category_num))
one_low_data = np.empty((1, 0))
for l in range(category_num):
if l == q:
one_low_data = np.hstack(
(one_low_data, high_element))
else:
one_low_data = np.hstack(
(one_low_data, low_element))
one_cate_data = np.vstack((one_cate_data, one_low_data))
one_low_data = np.empty((1, 0))
for l in range(category_num):
if l == i:
one_low_data = np.hstack(
(one_low_data, high_element))
else:
one_low_data = np.hstack(
(one_low_data, low_element))
one_cate_data = np.vstack((one_cate_data, one_low_data))
one_low_data = np.empty((1, 0))
one_cate_data_3 = np.array([one_cate_data])
input_data = np.vstack((input_data, one_cate_data_3))
if i == q:
addtarget = much
else:
addtarget = non_much
target_data = np.vstack((target_data, addtarget))
return input_data, target_data
def add_traning_data(input_data, target_data, category_num, chunk):
much = np.array([1, 0])
non_much = np.array([0, 1])
for r in range(category_num ** (chunk - 1) - 2):
for i in range(category_num):
for j in range(1, category_num):
non_much_num = ((i + j) % category_num) * category_num ** 3 + \
i * category_num ** 2 + i * category_num + i
print(input_data[non_much_num])
input_data = np.vstack(
(input_data, np.array([input_data[non_much_num]])))
target_data = np.vstack(
(target_data, np.array([target_data[non_much_num]])))
test_num = i * category_num ** 3 + i * category_num ** 2 + i * category_num + i
print(input_data[test_num])
for ex in range(1):
input_data = np.vstack(
(input_data, np.array([input_data[test_num]])))
target_data = np.vstack(
(target_data, np.array([target_data[test_num]])))
return input_data, target_data
def iadd_traning_data(input_data, target_data, category_num, steps_num):
much = np.array([1, 0])
non_much = np.array([0, 1])
iinput_data = np.empty((0, steps_num, category_num))
itarget_data = np.empty((0, 2))
for r in range(1):
for i in range(category_num):
for j in range(1, category_num):
non_much_num = ((i + j) % category_num) * category_num ** 3 + \
i * category_num ** 2 + i * category_num + i
print("non_much_num:", non_much_num)
print(input_data[non_much_num])
for ex in range(1):
iinput_data = np.vstack(
(iinput_data, np.array([input_data[non_much_num]])))
itarget_data = np.vstack(
(itarget_data, np.array([target_data[non_much_num]])))
test_num = i * category_num ** 3 + i * category_num ** 2 + i * category_num + i
print(input_data[test_num])
for ex in range(1):
iinput_data = np.vstack(
(iinput_data, np.array([input_data[test_num]])))
itarget_data = np.vstack(
(itarget_data, np.array([target_data[test_num]])))
return iinput_data, itarget_data
def iadd_traning_3data(input_data, target_data, category_num, steps_num):
much = np.array([1, 0])
non_much = np.array([0, 1])
iinput_data = np.empty((0, steps_num, category_num))
itarget_data = np.empty((0, 2))
for r in range(1):
for i in range(category_num):
for j in range(1, category_num):
non_much_num = ((i + j) % category_num) * category_num ** 2 + \
i * category_num + i
print("non_much_num:", non_much_num)
print(input_data[non_much_num])
iinput_data = np.vstack(
(iinput_data, np.array([input_data[non_much_num]])))
itarget_data = np.vstack(
(itarget_data, np.array([target_data[non_much_num]])))
test_num = i * category_num ** 2 + i * category_num + i
print(input_data[test_num])
for ex in range(1):
iinput_data = np.vstack(
(iinput_data, np.array([input_data[test_num]])))
itarget_data = np.vstack(
(itarget_data, np.array([target_data[test_num]])))
return iinput_data, itarget_data
def iadd_traning_2data(input_data, target_data, category_num, steps_num):
much = np.array([1, 0])
non_much = np.array([0, 1])
iinput_data = np.empty((0, steps_num, category_num))
itarget_data = np.empty((0, 2))
for r in range(1):
for i in range(category_num):
for j in range(1, category_num):
non_much_num = ((i + j) % category_num) * category_num ** 1 + i
print("non_much_num:", non_much_num)
print(input_data[non_much_num])
iinput_data = np.vstack(
(iinput_data, np.array([input_data[non_much_num]])))
itarget_data = np.vstack(
(itarget_data, np.array([target_data[non_much_num]])))
test_num = i * category_num ** 1 + i
print(input_data[test_num])
for ex in range(1):
iinput_data = np.vstack(
(iinput_data, np.array([input_data[test_num]])))
itarget_data = np.vstack(
(itarget_data, np.array([target_data[test_num]])))
return iinput_data, itarget_data
def make_test_data(input_data, target_data, Tau, Wih_size, Whh_size, Who_size, I, H, O, category_num, flug_test=False):
Wih = Wih_size * np.random.rand(I, H) - Wih_size/2
Whh = Whh_size * np.random.rand(H, H) - Whh_size/2
Who = Who_size * np.random.rand(H, O) - Who_size/2
bh = np.zeros((1, H))
bo = np.zeros((1, O))
test_net = net.test_network(I, H, O, 1, 1, 1, 3)
test_net.set_Wandb(Wih, Whh, Who, bh, bo)
step_length = 100
test_net.def_parameter(Tau, 0.01, 1, step_length)
test_net.network_reset()
memo = test_net.forward(input_data, target_data, 0)
test0 = np.round(test_net.output_H(), decimals=3)
print_memo(memo)
plt.figure()
step_length = 100
test_net.def_parameter(Tau, 0.01, 1, step_length)
test_net.network_reset()
memo = test_net.forward(input_data, target_data, 2)
test2 = np.round(test_net.output_H(), decimals=3)
print_memo(memo)
plt.figure()
step_length = 100
test_net.def_parameter(Tau, 0.01, 1, step_length)
test_net.network_reset()
memo = test_net.forward(input_data, target_data, 4)
test1 = np.round(test_net.output_H(), decimals=3)
test1
print_memo(memo)
plt.figure()
step_length = 100
test_net.def_parameter(Tau, 0.01, 1, step_length)
test_net.network_reset()
memo = test_net.forward(input_data, target_data, 8)
test3 = np.round(test_net.output_H(), decimals=3)
print_memo(memo)
plt.figure()
ss = 10
sr = int(H/ss)
print("********")
ptest0 = test0.reshape(ss, sr)
sns.heatmap(ptest0)
plt.figure()
print("********")
ptest1 = test1.reshape(ss, sr)
sns.heatmap(ptest1)
plt.figure()
print("********")
ptest2 = test2.reshape(ss, sr)
sns.heatmap(ptest2)
plt.figure()
print("********")
ptest3 = test3.reshape(ss, sr)
sns.heatmap(ptest3)
plt.figure()
print("********")
del_03 = abs(ptest0-ptest1)
sns.heatmap(del_03, vmax=0.4)
print("********")
plt.figure()
del_03 = abs(ptest0-ptest2)
sns.heatmap(del_03, vmax=0.4)
print("********")
plt.figure()
del_03 = abs(ptest0-ptest3)
sns.heatmap(del_03, vmax=0.4)
print("********")
plt.figure()
test_data = np.empty((0, 1, H))
itarget_data = np.empty((0, 2))
if flug_test == True:
return
for i in range(category_num):
for j in range(category_num):
for k in range(category_num):
step_length = 100
test_net.def_parameter(Tau, 0.01, 1, step_length)
test_net.network_reset()
test_num = i * category_num * category_num + j * category_num + k
memo = test_net.forward(input_data, target_data, test_num)
test = np.round(test_net.output_H(), decimals=3)
test = np.array([test])
test_data = np.vstack((test_data, test))
addtarget = target_data[test_num]
itarget_data = np.vstack((itarget_data, addtarget))
for r in range(ccategory_num ** 3 +
i * ategory_num * (category_num)):
for i in range(category_num):
j, k = i, i
test_num = i * category_num * category_num + j * category_num + k
step_length = 100
test_net.def_parameter(Tau, 0.01, 1, step_length)
test_net.network_reset()
memo = test_net.forward(input_data, target_data, test_num)
test = np.round(test_net.output_H(), decimals=3)
test = np.array([test])
test_data = np.vstack((test_data, test))
addtarget = target_data[test_num]
itarget_data = np.vstack((itarget_data, addtarget))
return test_data, itarget_data
def traning(test_data, itarget_data, H, O, del_W_h, chunk):
test_network = tr.test_network(H, O, 1)
test_network.setlr(0.05)
itarget_data.shape
memo = test_network.traning(test_data, itarget_data, 300000)
plt.plot(memo)
name = "data/del_W_h:" | |
import copy
import functools
import json
import os
import pathlib
import re
import urllib.parse
from collections import deque
from contextlib import contextmanager
from datetime import datetime
from inspect import getframeinfo, stack
from itertools import chain
from os.path import abspath
from pathlib import Path
from sys import getsizeof, stderr
from typing import Dict, Iterable, List, Optional, Tuple, Union
import click
import semver
import yaml
try:
from reprlib import repr
except ImportError:
pass
from doozerlib import constants, exectools
def stringify(val):
"""
Accepts either str or bytes and returns a str
"""
try:
val = val.decode('utf-8')
except (UnicodeDecodeError, AttributeError):
pass
return val
def red_prefix(msg, file=None):
"""Print out a message prefix in bold red letters, like for "Error: "
messages"""
click.secho(stringify(msg), nl=False, bold=True, fg='red', file=file)
def red_print(msg, file=None):
"""Print out a message in red text"
messages"""
click.secho(stringify(msg), nl=True, bold=False, fg='red', file=file)
def green_prefix(msg, file=None):
"""Print out a message prefix in bold green letters, like for "Success: "
messages"""
click.secho(stringify(msg), nl=False, bold=True, fg='green', file=file)
def green_print(msg, file=None):
"""Print out a message in green text"""
click.secho(stringify(msg), nl=True, bold=False, fg='green', file=file)
def yellow_prefix(msg, file=None):
"""Print out a message prefix in bold yellow letters, like for "Success: "
messages"""
click.secho(stringify(msg), nl=False, bold=True, fg='yellow', file=file)
def yellow_print(msg, file=None):
"""Print out a message in yellow text"""
click.secho(stringify(msg), nl=True, bold=False, fg='yellow', file=file)
def cprint(msg, file=None):
"""Wrapper for click.echo"""
click.echo(stringify(msg), file=file)
def color_print(msg, color='white', nl=True, file=None):
"""Print out a message in given color"""
click.secho(stringify(msg), nl=nl, bold=False, fg=color, file=file)
DICT_EMPTY = object()
def dict_get(dct, path, default=DICT_EMPTY):
dct = copy.deepcopy(dct) # copy to not modify original
for key in path.split('.'):
try:
dct = dct[key]
except KeyError:
if default is DICT_EMPTY:
raise Exception('Unable to follow key path {}'.format(path))
return default
return dct
def remove_prefix(s: str, prefix: str) -> str:
if s.startswith(prefix):
return s[len(prefix):]
else:
return s[:]
def remove_prefixes(s: str, *args) -> str:
for prefix in args:
s = remove_prefix(s, prefix)
return s
def remove_suffix(s: str, suffix: str) -> str:
# suffix='' should not call self[:-0].
if suffix and s.endswith(suffix):
return s[:-len(suffix)]
else:
return s[:]
def convert_remote_git_to_https(source_url: str):
"""
Accepts a source git URL in ssh or https format and return it in a normalized
https format (:port on servers is not supported):
- https protocol
- no trailing /
:param source_url: Git remote
:return: Normalized https git URL
"""
url = source_url.strip().rstrip('/')
url = remove_prefixes(url, 'http://', 'https://', 'git://', 'git@', 'ssh://')
url = remove_suffix(url, '.git')
url = url.split('@', 1)[-1] # Strip username@
if url.find(':') > -1:
server, org_repo = url.rsplit(':', 1)
elif url.rfind('/') > -1:
server, org_repo = url.rsplit('/', 1)
else:
return f'https://{url}' # weird..
return f'https://{server}/{org_repo}'
def split_git_url(url) -> (str, str, str):
"""
:param url: A remote ssh or https github url
:return: Splits a github url into the server name, org, and repo name
"""
https_normalized = convert_remote_git_to_https(url)
url = https_normalized[8:] # strip https://
server, repo = url.split('/', 1) # e.g. 'github.com', 'openshift/origin'
org, repo_name = repo.split('/', 1)
return server, org, repo_name
def convert_remote_git_to_ssh(url):
"""
Accepts a remote git URL and turns it into a git@
ssh form.
:param url: The initial URL
:return: A url in git@server:repo.git
"""
server, org, repo_name = split_git_url(url)
return f'git@{server}:{org}/{repo_name}.git'
def setup_and_fetch_public_upstream_source(public_source_url: str, public_upstream_branch: str, source_dir: str):
"""
Fetch public upstream source for specified Git repository. Set up public_upstream remote if needed.
:param public_source_url: HTTPS Git URL of the public upstream source
:param public_upstream_branch: Git branch of the public upstream source
:param source_dir: Path to the local Git repository
"""
out, err = exectools.cmd_assert(["git", "-C", source_dir, "remote"])
if 'public_upstream' not in out.strip().split():
exectools.cmd_assert(["git", "-C", source_dir, "remote", "add", "--", "public_upstream", public_source_url])
else:
exectools.cmd_assert(["git", "-C", source_dir, "remote", "set-url", "--", "public_upstream", public_source_url])
exectools.cmd_assert(["git", "-C", source_dir, "fetch", "--", "public_upstream", public_upstream_branch], retries=3,
set_env=constants.GIT_NO_PROMPTS)
def is_commit_in_public_upstream(revision: str, public_upstream_branch: str, source_dir: str):
"""
Determine if the public upstream branch includes the specified commit.
:param revision: Git commit hash or reference
:param public_upstream_branch: Git branch of the public upstream source
:param source_dir: Path to the local Git repository
"""
cmd = ["git", "merge-base", "--is-ancestor", "--", revision, "public_upstream/" + public_upstream_branch]
# The command exits with status 0 if true, or with status 1 if not. Errors are signaled by a non-zero status that is not 1.
# https://git-scm.com/docs/git-merge-base#Documentation/git-merge-base.txt---is-ancestor
rc, out, err = exectools.cmd_gather(cmd)
if rc == 0:
return True
if rc == 1:
return False
raise IOError(
f"Couldn't determine if the commit {revision} is in the public upstream source repo. `git merge-base` exited with {rc}, stdout={out}, stderr={err}")
def is_in_directory(path: os.PathLike, directory: os.PathLike):
"""check whether a path is in another directory
"""
a = Path(path).parent.resolve()
b = Path(directory).resolve()
try:
a.relative_to(b)
return True
except ValueError:
return False
def mkdirs(path, mode=0o755):
"""
Make sure a directory exists. Similar to shell command `mkdir -p`.
:param path: Str path
:param mode: create directories with mode
"""
pathlib.Path(str(path)).mkdir(mode=mode, parents=True, exist_ok=True)
@contextmanager
def timer(out_method, msg):
caller = getframeinfo(stack()[2][0]) # Line that called this method
caller_caller = getframeinfo(stack()[3][0]) # Line that called the method calling this method
start_time = datetime.now()
try:
yield
finally:
time_elapsed = datetime.now() - start_time
entry = f'Time elapsed (hh:mm:ss.ms) {time_elapsed} in {os.path.basename(caller.filename)}:{caller.lineno} from {os.path.basename(caller_caller.filename)}:{caller_caller.lineno}:{caller_caller.code_context[0].strip() if caller_caller.code_context else ""} : {msg}'
out_method(entry)
def analyze_debug_timing(file):
peal = re.compile(r'^(\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d),\d\d\d \w+ [(](\d+)[)] (.*)')
thread_names = {}
event_timings = {} # maps internal to { <thread_name> => [event list] }
first_interval = None
def get_thread_name(thread):
if thread in thread_names:
return thread_names[thread]
c = f'T{len(thread_names)}'
thread_names[thread] = c
return c
def get_interval_map(interval):
nonlocal first_interval
if first_interval is None:
first_interval = interval
interval = interval - first_interval
if interval in event_timings:
return event_timings[interval]
mapping = {}
event_timings[interval] = mapping
return mapping
def get_thread_event_list(interval, thread):
thread_name = get_thread_name(thread)
interval_map = get_interval_map(interval)
if thread_name in interval_map:
return interval_map[thread_name]
event_list = []
interval_map[thread_name] = event_list
return event_list
def add_thread_event(interval, thread, event):
get_thread_event_list(int(interval), thread).append(event)
with open(file, 'r') as f:
for line in f:
m = peal.match(line.strip())
if m:
thread = m.group(2) # thread id (e.g. 139770552305472)
datestr = m.group(1) # 2020-04-09 10:17:03,092
event = m.group(3)
date_time_obj = datetime.strptime(datestr, '%Y-%m-%d %H:%M:%S')
minute_mark = int(int(date_time_obj.strftime("%s")) / 10) # ten second intervals
add_thread_event(minute_mark, thread, event)
def print_em(*args):
for a in args:
print(str(a).ljust(5), end="")
print('')
print('Thread timelines')
names = sorted(list(thread_names.values()), key=lambda e: int(e[1:])) # sorts as T1, T2, T3, .... by removing 'T'
print_em('*', *names)
sorted_intervals = sorted(list(event_timings.keys()))
for interval in range(0, sorted_intervals[-1] + 1):
print_em(interval, *names)
if interval in event_timings:
interval_map = event_timings[interval]
for i, thread_name in enumerate(names):
events = interval_map.get(thread_name, [])
for event in events:
with_event = list(names)
with_event[i] = thread_name + ': ' + event
print_em(f' {interval}', *with_event[:i + 1])
def what_is_in_master() -> str:
"""
:return: Returns a string like "4.6" to identify which release currently resides in master branch.
"""
# The promotion target of the openshift/images master branch defines this release master is associated with.
ci_config_url = 'https://raw.githubusercontent.com/openshift/release/master/ci-operator/config/openshift/images/openshift-images-master.yaml'
content = exectools.urlopen_assert(ci_config_url).read()
ci_config = yaml.safe_load(content)
# Look for something like: https://github.com/openshift/release/blob/251cb12e913dcde7be7a2b36a211650ed91c45c4/ci-operator/config/openshift/images/openshift-images-master.yaml#L64
target_release = ci_config.get('promotion', {}).get('name', None)
if not target_release:
red_print(content)
raise IOError('Unable to find which openshift release resides in master')
return target_release
def extract_version_fields(version, at_least=0):
"""
For a specified version, return a list with major, minor, patch.. isolated
as integers.
:param version: A version to parse
:param at_least: The minimum number of fields to find (else raise an error)
"""
fields = [int(f) for f in version.strip().split('-')[0].lstrip('v').split('.')] # v1.17.1 => [ '1', '17', '1' ]
if len(fields) < at_least:
raise IOError(f'Unable to find required {at_least} fields in {version}')
return fields
def get_cincinnati_channels(major, minor):
"""
:param major: Major for release
:param minor: Minor version for release.
:return: Returns the Cincinnati graph channels associated with a release
in promotion order (e.g. candidate -> stable)
"""
major = int(major)
minor = int(minor)
if major != 4:
raise IOError('Unable to derive previous for non v4 major')
prefixes = ['candidate', 'fast', 'stable']
if major == 4 and minor == 1:
prefixes = ['prerelease', 'stable']
return [f'{prefix}-{major}.{minor}' for prefix in prefixes]
def get_docker_config_json(config_dir):
flist = os.listdir(abspath(config_dir))
if 'config.json' in flist:
return abspath(os.path.join(config_dir, 'config.json'))
else:
raise FileNotFoundError("Can not find the registry config file in {}".format(config_dir))
def isolate_git_commit_in_release(release: str) -> Optional[str]:
"""
Given a release field, determines whether is contains
.git.<commit> information or .g<commit> (new style). If it does, it returns the value
of <commit>. If it is not found, None is returned.
| |
buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('_derived', node)
if value is not None and '_derived' not in already_processed:
already_processed.add('_derived')
self._derived = value
value = find_attr_value_('_real_archetype', node)
if value is not None and '_real_archetype' not in already_processed:
already_processed.add('_real_archetype')
if value in ('true', '1'):
self._real_archetype = True
elif value in ('false', '0'):
self._real_archetype = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('_archetype', node)
if value is not None and '_archetype' not in already_processed:
already_processed.add('_archetype')
self._archetype = value
value = find_attr_value_('_subtype', node)
if value is not None and '_subtype' not in already_processed:
already_processed.add('_subtype')
if value in ('true', '1'):
self._subtype = True
elif value in ('false', '0'):
self._subtype = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('_instances', node)
if value is not None and '_instances' not in already_processed:
already_processed.add('_instances')
self._instances = value
value = find_attr_value_('_desynched_atts', node)
if value is not None and '_desynched_atts' not in already_processed:
already_processed.add('_desynched_atts')
self._desynched_atts = value
value = find_attr_value_('_id', node)
if value is not None and '_id' not in already_processed:
already_processed.add('_id')
self._id = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Scalar':
obj_ = ScalarType.factory()
obj_.build(child_)
self.Scalar.append(obj_)
obj_.original_tagname_ = 'Scalar'
# end class ScalarsType
class AnomalyType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, _derived=None, _real_archetype=None, _desynched_atts=None, MetricID=None, _subtype=None, _instances=None, _archetype=None, Error=None, _id=None):
self.original_tagname_ = None
self._derived = _cast(None, _derived)
self._real_archetype = _cast(bool, _real_archetype)
self._desynched_atts = _cast(None, _desynched_atts)
self.MetricID = _cast(None, MetricID)
self._subtype = _cast(bool, _subtype)
self._instances = _cast(None, _instances)
self._archetype = _cast(None, _archetype)
self.Error = _cast(None, Error)
self._id = _cast(None, _id)
def factory(*args_, **kwargs_):
if AnomalyType.subclass:
return AnomalyType.subclass(*args_, **kwargs_)
else:
return AnomalyType(*args_, **kwargs_)
factory = staticmethod(factory)
def get__derived(self): return self._derived
def set__derived(self, _derived): self._derived = _derived
def get__real_archetype(self): return self._real_archetype
def set__real_archetype(self, _real_archetype): self._real_archetype = _real_archetype
def get__desynched_atts(self): return self._desynched_atts
def set__desynched_atts(self, _desynched_atts): self._desynched_atts = _desynched_atts
def get_MetricID(self): return self.MetricID
def set_MetricID(self, MetricID): self.MetricID = MetricID
def get__subtype(self): return self._subtype
def set__subtype(self, _subtype): self._subtype = _subtype
def get__instances(self): return self._instances
def set__instances(self, _instances): self._instances = _instances
def get__archetype(self): return self._archetype
def set__archetype(self, _archetype): self._archetype = _archetype
def get_Error(self): return self.Error
def set_Error(self, Error): self.Error = Error
def get__id(self): return self._id
def set__id(self, _id): self._id = _id
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='AnomalyType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AnomalyType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='AnomalyType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AnomalyType'):
if self._derived is not None and '_derived' not in already_processed:
already_processed.add('_derived')
outfile.write(' _derived=%s' % (self.gds_format_string(quote_attrib(self._derived).encode(ExternalEncoding), input_name='_derived'), ))
if self._real_archetype is not None and '_real_archetype' not in already_processed:
already_processed.add('_real_archetype')
outfile.write(' _real_archetype="%s"' % self.gds_format_boolean(self._real_archetype, input_name='_real_archetype'))
if self._desynched_atts is not None and '_desynched_atts' not in already_processed:
already_processed.add('_desynched_atts')
outfile.write(' _desynched_atts=%s' % (self.gds_format_string(quote_attrib(self._desynched_atts).encode(ExternalEncoding), input_name='_desynched_atts'), ))
if self.MetricID is not None and 'MetricID' not in already_processed:
already_processed.add('MetricID')
outfile.write(' MetricID=%s' % (self.gds_format_string(quote_attrib(self.MetricID).encode(ExternalEncoding), input_name='MetricID'), ))
if self._subtype is not None and '_subtype' not in already_processed:
already_processed.add('_subtype')
outfile.write(' _subtype="%s"' % self.gds_format_boolean(self._subtype, input_name='_subtype'))
if self._instances is not None and '_instances' not in already_processed:
already_processed.add('_instances')
outfile.write(' _instances=%s' % (self.gds_format_string(quote_attrib(self._instances).encode(ExternalEncoding), input_name='_instances'), ))
if self._archetype is not None and '_archetype' not in already_processed:
already_processed.add('_archetype')
outfile.write(' _archetype=%s' % (self.gds_format_string(quote_attrib(self._archetype).encode(ExternalEncoding), input_name='_archetype'), ))
if self.Error is not None and 'Error' not in already_processed:
already_processed.add('Error')
outfile.write(' Error=%s' % (self.gds_format_string(quote_attrib(self.Error).encode(ExternalEncoding), input_name='Error'), ))
if self._id is not None and '_id' not in already_processed:
already_processed.add('_id')
outfile.write(' _id=%s' % (self.gds_format_string(quote_attrib(self._id).encode(ExternalEncoding), input_name='_id'), ))
def exportChildren(self, outfile, level, namespace_='', name_='AnomalyType', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='AnomalyType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self._derived is not None and '_derived' not in already_processed:
already_processed.add('_derived')
showIndent(outfile, level)
outfile.write('_derived="%s",\n' % (self._derived,))
if self._real_archetype is not None and '_real_archetype' not in already_processed:
already_processed.add('_real_archetype')
showIndent(outfile, level)
outfile.write('_real_archetype=%s,\n' % (self._real_archetype,))
if self._desynched_atts is not None and '_desynched_atts' not in already_processed:
already_processed.add('_desynched_atts')
showIndent(outfile, level)
outfile.write('_desynched_atts="%s",\n' % (self._desynched_atts,))
if self.MetricID is not None and 'MetricID' not in already_processed:
already_processed.add('MetricID')
showIndent(outfile, level)
outfile.write('MetricID="%s",\n' % (self.MetricID,))
if self._subtype is not None and '_subtype' not in already_processed:
already_processed.add('_subtype')
showIndent(outfile, level)
outfile.write('_subtype=%s,\n' % (self._subtype,))
if self._instances is not None and '_instances' not in already_processed:
already_processed.add('_instances')
showIndent(outfile, level)
outfile.write('_instances="%s",\n' % (self._instances,))
if self._archetype is not None and '_archetype' not in already_processed:
already_processed.add('_archetype')
showIndent(outfile, level)
outfile.write('_archetype="%s",\n' % (self._archetype,))
if self.Error is not None and 'Error' not in already_processed:
already_processed.add('Error')
showIndent(outfile, level)
outfile.write('Error="%s",\n' % (self.Error,))
if self._id is not None and '_id' not in already_processed:
already_processed.add('_id')
showIndent(outfile, level)
outfile.write('_id="%s",\n' % (self._id,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('_derived', node)
if value is not None and '_derived' not in already_processed:
already_processed.add('_derived')
self._derived = value
value = find_attr_value_('_real_archetype', node)
if value is not None and '_real_archetype' not in already_processed:
already_processed.add('_real_archetype')
if value in ('true', '1'):
self._real_archetype = True
elif value in ('false', '0'):
self._real_archetype = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('_desynched_atts', node)
if value is not None and '_desynched_atts' not in already_processed:
already_processed.add('_desynched_atts')
self._desynched_atts = value
value = find_attr_value_('MetricID', node)
if value is not None and 'MetricID' not in already_processed:
already_processed.add('MetricID')
self.MetricID = value
value = find_attr_value_('_subtype', node)
if value is not None and '_subtype' not in already_processed:
already_processed.add('_subtype')
if value in ('true', '1'):
self._subtype = True
elif value in ('false', '0'):
self._subtype = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('_instances', node)
if value is not None and '_instances' not in already_processed:
already_processed.add('_instances')
self._instances = value
value = find_attr_value_('_archetype', node)
if value is not None and '_archetype' not in already_processed:
already_processed.add('_archetype')
self._archetype = value
value = find_attr_value_('Error', node)
if value is not None and 'Error' not in already_processed:
already_processed.add('Error')
self.Error = value
value = find_attr_value_('_id', node)
if value is not None and '_id' not in already_processed:
already_processed.add('_id')
self._id = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class AnomalyType
class AnomaliesType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, _derived=None, _real_archetype=None, _archetype=None, _subtype=None, _instances=None, _desynched_atts=None, _id=None, Anomaly=None):
self.original_tagname_ = None
self._derived = _cast(None, _derived)
self._real_archetype = _cast(bool, _real_archetype)
self._archetype = _cast(None, _archetype)
self._subtype = _cast(bool, _subtype)
self._instances = _cast(None, _instances)
self._desynched_atts = _cast(None, _desynched_atts)
self._id = _cast(None, _id)
if Anomaly is None:
self.Anomaly = []
else:
self.Anomaly = Anomaly
def factory(*args_, **kwargs_):
if AnomaliesType.subclass:
return AnomaliesType.subclass(*args_, **kwargs_)
else:
return AnomaliesType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Anomaly(self): return self.Anomaly
def set_Anomaly(self, Anomaly): self.Anomaly = Anomaly
def add_Anomaly(self, value): self.Anomaly.append(value)
def insert_Anomaly(self, index, value): self.Anomaly[index] = value
def get__derived(self): return self._derived
def set__derived(self, _derived): self._derived = _derived
def get__real_archetype(self): return self._real_archetype
def set__real_archetype(self, _real_archetype): self._real_archetype = _real_archetype
def get__archetype(self): return self._archetype
def set__archetype(self, _archetype): self._archetype = _archetype
def get__subtype(self): return self._subtype
def set__subtype(self, _subtype): self._subtype = _subtype
def get__instances(self): return self._instances
def set__instances(self, _instances): self._instances = _instances
def get__desynched_atts(self): return self._desynched_atts
def set__desynched_atts(self, _desynched_atts): self._desynched_atts = _desynched_atts
def get__id(self): return self._id
def set__id(self, _id): self._id = _id
def hasContent_(self):
if (
self.Anomaly
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='AnomaliesType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AnomaliesType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='AnomaliesType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AnomaliesType'):
if self._derived is not None and '_derived' not in already_processed:
already_processed.add('_derived')
outfile.write(' _derived=%s' % | |
<filename>models/hierarchical_bias/hf_translation.py
from dataclasses import dataclass, field
import itertools
import json
import logging
import os
from typing import Optional
from argparse import Namespace
from omegaconf import II
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.tasks import FairseqTask, register_task
from .hf_dataset import LanguagePairDataset
import datasets
from transformers import AutoTokenizer
from datasets import load_dataset
EVAL_BLEU_ORDER = 4
logger = logging.getLogger(__name__)
DEPTH_SPECIAL_TOKENS = {
-1: 48900,
0: 48613,
1: 48983,
2: 48936,
3: 48712,
4: 49130,
5: 49216
}
ACTION_SPECIAL_TOKENS = {
"UP": 49908,
"HOLD": 49859,
"DOWN": 49452
}
def load_langpair_dataset(
data_path,
data_loading_file,
data_name,
split,
tokenizer,
max_source_positions,
max_target_positions,
shuffle=True,
pad_to_multiple=1,
overwrite_cache=False,
):
if split == 'train':
split = datasets.Split.TRAIN
elif split == 'valid':
split = datasets.Split.VALIDATION
else:
split = datasets.Split.TEST
raw_dataset = load_dataset(data_loading_file, name=data_name, data_dir=data_path, split=split)
column_names = raw_dataset.column_names
def preprocess_function(examples):
document_paragraphs = examples["document_paragraphs"]
document_section_ends = examples["section_paragraph_ends"]
document_section_depths = examples["section_depths"]
document_section_titles = examples["section_titles"]
document_section_parent_ids = examples["section_parent_ids"]
summary = examples["summary"]
tokenized_document_paragraphs = [tokenizer(document_paragraph, add_special_tokens=False)["input_ids"] for document_paragraph in document_paragraphs]
tokenized_section_titles = [tokenizer(document_section_title, add_special_tokens=False)["input_ids"] for document_section_title in document_section_titles]
tokenized_document = []
section_chunk_sizes = []
section_parent_ids = []
section_depths = []
for tokenized_document_paragraph, document_section_end, document_section_depth, tokenized_section_title, document_section_parent_id in zip(tokenized_document_paragraphs, document_section_ends, document_section_depths, tokenized_section_titles, document_section_parent_ids):
assert len(document_section_end) == len(document_section_depth) == len(tokenized_section_title) == len(document_section_parent_id)
previous_end = 0
accumulate_length = 0
accumulate_section = []
for section_end, section_depth, section_parent_id, section_title in zip(document_section_end, document_section_depth, document_section_parent_id, tokenized_section_title):
accumulate_length += 1 # for section special token
accumulate_paragraph = [section_title]
accumulate_length += len(section_title)
if accumulate_length > max_source_positions - 2:
break
for section_paragraph in tokenized_document_paragraph[previous_end:section_end]:
accumulate_paragraph.append(section_paragraph[:max_source_positions - 2 - accumulate_length])
accumulate_length += len(section_paragraph) + 1
if accumulate_length > max_source_positions - 2:
break
if accumulate_length > max_source_positions - 2:
break
accumulate_section.append((accumulate_paragraph[0] + [tok for paragraph in accumulate_paragraph[1:] for tok in [48900] + paragraph], section_depth, section_parent_id))
previous_end = section_end
document = []
accumulate_parent_ids = []
chunk_size = []
previous_token_end = 0
depths = []
for section, depth, parent_id in accumulate_section:
accumulate_parent_ids.append(parent_id)
depths.append(depth)
document.extend([DEPTH_SPECIAL_TOKENS.get(depth, 49216)] + section)
chunk_size.append(len(document) - previous_token_end)
previous_token_end = len(document)
document = [0] + document + [2]
chunk_size = [1] + chunk_size + [1]
depths = [0] + depths + [0]
accumulate_parent_ids = [id if id == -1 else id + 1 for id in accumulate_parent_ids]
accumulate_parent_ids = [-1] + accumulate_parent_ids + [-1]
assert len(chunk_size) == len(accumulate_parent_ids)
tokenized_document.append(document)
section_chunk_sizes.append(chunk_size)
section_parent_ids.append(accumulate_parent_ids)
section_depths.append(depths)
with tokenizer.as_target_tokenizer():
tokenized_summary = tokenizer(summary, truncation=True, max_length=max_target_positions)["input_ids"]
examples["tokenized_inputs"] = tokenized_document
examples["labels"] = tokenized_summary
examples["chunk_sizes"] = section_chunk_sizes
examples["parent_ids"] = section_parent_ids
examples["depths"] = section_depths
return examples
def preprocess_function_qs_fq(examples):
document_paragraphs = examples["document_paragraphs"]
document_section_ends = examples["section_paragraph_ends"]
document_section_depths = examples["section_depths"]
document_section_titles = examples["section_titles"]
document_section_parent_ids = examples["section_parent_ids"]
first_questions = examples["first_question"]
first_summaries = examples["first_summary"]
document_summary_paragraphs = examples["summary_paragraphs"]
document_summary_depths = examples["summary_depths"]
tokenized_document_paragraphs = [tokenizer(document_paragraph, add_special_tokens=False)["input_ids"] for
document_paragraph in document_paragraphs]
tokenized_section_titles = [tokenizer(document_section_title, add_special_tokens=False)["input_ids"] for
document_section_title in document_section_titles]
tokenized_first_questions = tokenizer(first_questions, add_special_tokens=False)["input_ids"]
tokenized_document = []
section_chunk_sizes = []
section_parent_ids = []
for tokenized_document_paragraph, document_section_end, document_section_depth, tokenized_section_title, document_section_parent_id, \
tokenized_first_question in zip(
tokenized_document_paragraphs, document_section_ends, document_section_depths, tokenized_section_titles,
document_section_parent_ids, tokenized_first_questions):
assert len(document_section_end) == len(document_section_depth) == len(tokenized_section_title) == len(
document_section_parent_id)
previous_end = 0
accumulate_length = 1 + len(tokenized_first_question)
accumulate_section = []
for section_end, section_depth, section_parent_id, section_title in zip(document_section_end,
document_section_depth,
document_section_parent_id,
tokenized_section_title):
accumulate_length += 1 # for section special token
accumulate_paragraph = [section_title]
accumulate_length += len(section_title)
if accumulate_length > max_source_positions - 2:
break
for section_paragraph in tokenized_document_paragraph[previous_end:section_end]:
accumulate_paragraph.append(section_paragraph[:max_source_positions - 2 - accumulate_length])
accumulate_length += len(section_paragraph) + 1
if accumulate_length > max_source_positions - 2:
break
if accumulate_length > max_source_positions - 2:
break
accumulate_section.append((accumulate_paragraph[0] + [tok for paragraph in accumulate_paragraph[1:] for
tok in [48900] + paragraph], section_depth,
section_parent_id))
previous_end = section_end
document = []
accumulate_parent_ids = []
chunk_size = []
previous_token_end = 0
for section, depth, parent_id in accumulate_section:
accumulate_parent_ids.append(parent_id)
document.extend([DEPTH_SPECIAL_TOKENS.get(depth, 49216)] + section)
chunk_size.append(len(document) - previous_token_end)
previous_token_end = len(document)
document = [0] + tokenized_first_question + [0] + document + [2]
chunk_size = [1 + len(tokenized_first_question)] + [1] + chunk_size + [1]
accumulate_parent_ids = [id if id == -1 else id + 2 for id in accumulate_parent_ids]
accumulate_parent_ids = [-1] + [-1] + accumulate_parent_ids + [-1]
assert len(chunk_size) == len(accumulate_parent_ids)
tokenized_document.append(document)
section_chunk_sizes.append(chunk_size)
section_parent_ids.append(accumulate_parent_ids)
with tokenizer.as_target_tokenizer():
tokenized_document_summary_paragraphs = [
tokenizer(document_paragraph, add_special_tokens=False)["input_ids"] if document_paragraph else [] for document_paragraph in
document_summary_paragraphs]
tokenized_first_summaries = tokenizer(first_summaries, add_special_tokens=False)["input_ids"]
tokenized_summary = []
for tokenized_document_summary_paragraph, document_summary_depth, first_summary in zip(
tokenized_document_summary_paragraphs, document_summary_depths, tokenized_first_summaries):
current_depth = 0
summary = []
for summary_paragraph, summary_depth in zip(tokenized_document_summary_paragraph,
document_summary_depth):
if summary_depth > current_depth:
summary = summary + [ACTION_SPECIAL_TOKENS["DOWN"]] * (summary_depth - current_depth)
summary = summary + summary_paragraph
elif summary_depth == current_depth:
summary = summary + [ACTION_SPECIAL_TOKENS["HOLD"]]
summary = summary + summary_paragraph
else:
summary = summary + [ACTION_SPECIAL_TOKENS["UP"]] * (current_depth - summary_depth)
summary = summary + summary_paragraph
summary = [0] + first_summary + summary[:max_target_positions - len(first_summary) - 2] + [2]
tokenized_summary.append(summary)
examples["tokenized_inputs"] = tokenized_document
examples["labels"] = tokenized_summary
examples["chunk_sizes"] = section_chunk_sizes
examples["parent_ids"] = section_parent_ids
return examples
def preprocess_function_qs_qg(examples):
document_paragraphs = examples["document_paragraphs"]
document_section_ends = examples["section_paragraph_ends"]
document_section_depths = examples["section_depths"]
document_section_titles = examples["section_titles"]
document_section_parent_ids = examples["section_parent_ids"]
first_questions = examples["first_question"]
first_summaries = examples["first_summary"]
document_summary_paragraphs = examples["summary_paragraphs"]
tokenized_document_paragraphs = [tokenizer(document_paragraph, add_special_tokens=False)["input_ids"] for
document_paragraph in document_paragraphs]
tokenized_section_titles = [tokenizer(document_section_title, add_special_tokens=False)["input_ids"] for
document_section_title in document_section_titles]
tokenized_first_questions = tokenizer(first_questions, add_special_tokens=False)["input_ids"]
tokenized_first_summaries = tokenizer(first_summaries, add_special_tokens=False)["input_ids"]
tokenized_document = []
section_chunk_sizes = []
section_parent_ids = []
for tokenized_document_paragraph, document_section_end, document_section_depth, tokenized_section_title, document_section_parent_id, \
tokenized_first_question, tokenized_first_summary in zip(
tokenized_document_paragraphs, document_section_ends, document_section_depths, tokenized_section_titles,
document_section_parent_ids, tokenized_first_questions, tokenized_first_summaries):
assert len(document_section_end) == len(document_section_depth) == len(tokenized_section_title) == len(
document_section_parent_id)
previous_end = 0
accumulate_length = 1 + len(tokenized_first_question) + len(tokenized_first_summary)
accumulate_section = []
for section_end, section_depth, section_parent_id, section_title in zip(document_section_end,
document_section_depth,
document_section_parent_id,
tokenized_section_title):
accumulate_length += 1 # for section special token
accumulate_paragraph = [section_title]
accumulate_length += len(section_title)
if accumulate_length > max_source_positions - 2:
break
for section_paragraph in tokenized_document_paragraph[previous_end:section_end]:
accumulate_paragraph.append(section_paragraph[:max_source_positions - 2 - accumulate_length])
accumulate_length += len(section_paragraph) + 1
if accumulate_length > max_source_positions - 2:
break
if accumulate_length > max_source_positions - 2:
break
accumulate_section.append((accumulate_paragraph[0] + [tok for paragraph in accumulate_paragraph[1:] for
tok in [48900] + paragraph], section_depth,
section_parent_id))
previous_end = section_end
document = []
accumulate_parent_ids = []
chunk_size = []
previous_token_end = 0
for section, depth, parent_id in accumulate_section:
accumulate_parent_ids.append(parent_id)
document.extend([DEPTH_SPECIAL_TOKENS.get(depth, 49216)] + section)
chunk_size.append(len(document) - previous_token_end)
previous_token_end = len(document)
document = [0] + tokenized_first_question + tokenized_first_summary + [0] + document + [2]
chunk_size = [1 + len(tokenized_first_question) + len(tokenized_first_summary)] + [1] + chunk_size + [1]
accumulate_parent_ids = [id if id == -1 else id + 2 for id in accumulate_parent_ids]
accumulate_parent_ids = [-1] + [-1] + accumulate_parent_ids + [-1]
assert len(chunk_size) == len(accumulate_parent_ids)
tokenized_document.append(document)
section_chunk_sizes.append(chunk_size)
section_parent_ids.append(accumulate_parent_ids)
with tokenizer.as_target_tokenizer():
tokenized_document_summary_paragraphs = [
tokenizer(document_paragraph, add_special_tokens=False)["input_ids"] if document_paragraph else [] for document_paragraph in
document_summary_paragraphs]
tokenized_summary = []
for tokenized_document_summary_paragraph in tokenized_document_summary_paragraphs:
summary = []
for summary_paragraph in tokenized_document_summary_paragraph:
if summary:
summary = summary + [ACTION_SPECIAL_TOKENS["HOLD"]]
summary = summary + summary_paragraph
summary = summary[:max_target_positions - 2] + [2]
tokenized_summary.append(summary)
examples["tokenized_inputs"] = tokenized_document
examples["labels"] = tokenized_summary
examples["chunk_sizes"] = section_chunk_sizes
examples["parent_ids"] = section_parent_ids
return examples
if data_name == 'qs_hierarchy_fq':
raw_dataset = raw_dataset.map(
preprocess_function_qs_fq,
batched=True,
num_proc=1,
remove_columns=column_names,
load_from_cache_file=not overwrite_cache,
desc=f"Running tokenizer on {split} dataset",
)
elif data_name == 'qs_hierarchy_qg':
raw_dataset = raw_dataset.map(
preprocess_function_qs_qg,
batched=True,
num_proc=1,
remove_columns=column_names,
load_from_cache_file=not overwrite_cache,
desc=f"Running tokenizer on {split} dataset",
)
elif data_name == 'gov_report' or data_name == 'wiki_bio_sum':
raw_dataset = raw_dataset.map(
preprocess_function,
batched=True,
num_proc=1,
remove_columns=column_names,
load_from_cache_file=not overwrite_cache,
desc=f"Running tokenizer on {split} dataset",
)
else:
raise ValueError(f"Unknown data name: {data_name}")
src_sizes = [len(example["tokenized_inputs"]) for example in raw_dataset]
tgt_sizes = [len(example["labels"]) for example in raw_dataset]
return LanguagePairDataset(
raw_dataset,
src_sizes,
tgt_sizes,
shuffle=shuffle,
pad_to_multiple=pad_to_multiple,
)
@dataclass
class HuggingFaceTranslationConfig(FairseqDataclass):
data: Optional[str] = field(
default=None,
metadata={
"help": "colon separated path to data directories list, will be iterated upon during epochs "
"in round-robin manner; however, valid and test data are always in the first directory "
"to avoid the need for repeating them in all directories"
},
)
data_loading_file: Optional[str] = field(
default=None
)
data_name: str = field(
default='gov_report'
)
max_source_positions: int = field(
default=16384, metadata={"help": "max number of tokens in the source sequence"}
)
max_target_positions: int = field(
default=1024, metadata={"help": "max number of tokens in the target sequence"}
)
required_seq_len_multiple: int = II("dataset.required_seq_len_multiple")
tokenizer_name: str = field(
default='allenai/led-large-16384'
)
@register_task("hf_translation", dataclass=HuggingFaceTranslationConfig)
class HuggingfaceTranslationTask(FairseqTask):
"""
Translate from one (source) language to another (target) language.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The | |
'R') : 'DR2',
(6, 27, 'F', 'D') : 'DF0',
(6, 27, 'F', 'L') : 'LF0',
(6, 27, 'F', 'R') : 'RF0',
(6, 27, 'F', 'U') : 'UF0',
(6, 27, 'L', 'B') : 'LB2',
(6, 27, 'L', 'D') : 'DL0',
(6, 27, 'L', 'F') : 'LF2',
(6, 27, 'L', 'U') : 'UL0',
(6, 27, 'R', 'B') : 'RB2',
(6, 27, 'R', 'D') : 'DR0',
(6, 27, 'R', 'F') : 'RF2',
(6, 27, 'R', 'U') : 'UR0',
(6, 27, 'U', 'B') : 'UB2',
(6, 27, 'U', 'F') : 'UF2',
(6, 27, 'U', 'L') : 'UL2',
(6, 27, 'U', 'R') : 'UR2',
(65, 86, 'B', 'D') : 'DB1',
(65, 86, 'B', 'L') : 'LB1',
(65, 86, 'B', 'R') : 'RB1',
(65, 86, 'B', 'U') : 'UB1',
(65, 86, 'D', 'B') : 'DB1',
(65, 86, 'D', 'F') : 'DF1',
(65, 86, 'D', 'L') : 'DL1',
(65, 86, 'D', 'R') : 'DR1',
(65, 86, 'F', 'D') : 'DF1',
(65, 86, 'F', 'L') : 'LF1',
(65, 86, 'F', 'R') : 'RF1',
(65, 86, 'F', 'U') : 'UF1',
(65, 86, 'L', 'B') : 'LB1',
(65, 86, 'L', 'D') : 'DL1',
(65, 86, 'L', 'F') : 'LF1',
(65, 86, 'L', 'U') : 'UL1',
(65, 86, 'R', 'B') : 'RB1',
(65, 86, 'R', 'D') : 'DR1',
(65, 86, 'R', 'F') : 'RF1',
(65, 86, 'R', 'U') : 'UR1',
(65, 86, 'U', 'B') : 'UB1',
(65, 86, 'U', 'F') : 'UF1',
(65, 86, 'U', 'L') : 'UL1',
(65, 86, 'U', 'R') : 'UR1',
(66, 45, 'B', 'D') : 'DB2',
(66, 45, 'B', 'L') : 'LB2',
(66, 45, 'B', 'R') : 'RB2',
(66, 45, 'B', 'U') : 'UB2',
(66, 45, 'D', 'B') : 'DB0',
(66, 45, 'D', 'F') : 'DF0',
(66, 45, 'D', 'L') : 'DL0',
(66, 45, 'D', 'R') : 'DR0',
(66, 45, 'F', 'D') : 'DF2',
(66, 45, 'F', 'L') : 'LF2',
(66, 45, 'F', 'R') : 'RF2',
(66, 45, 'F', 'U') : 'UF2',
(66, 45, 'L', 'B') : 'LB0',
(66, 45, 'L', 'D') : 'DL2',
(66, 45, 'L', 'F') : 'LF0',
(66, 45, 'L', 'U') : 'UL2',
(66, 45, 'R', 'B') : 'RB0',
(66, 45, 'R', 'D') : 'DR2',
(66, 45, 'R', 'F') : 'RF0',
(66, 45, 'R', 'U') : 'UR2',
(66, 45, 'U', 'B') : 'UB0',
(66, 45, 'U', 'F') : 'UF0',
(66, 45, 'U', 'L') : 'UL0',
(66, 45, 'U', 'R') : 'UR0',
(70, 91, 'B', 'D') : 'DB0',
(70, 91, 'B', 'L') : 'LB0',
(70, 91, 'B', 'R') : 'RB0',
(70, 91, 'B', 'U') : 'UB0',
(70, 91, 'D', 'B') : 'DB2',
(70, 91, 'D', 'F') : 'DF2',
(70, 91, 'D', 'L') : 'DL2',
(70, 91, 'D', 'R') : 'DR2',
(70, 91, 'F', 'D') : 'DF0',
(70, 91, 'F', 'L') : 'LF0',
(70, 91, 'F', 'R') : 'RF0',
(70, 91, 'F', 'U') : 'UF0',
(70, 91, 'L', 'B') : 'LB2',
(70, 91, 'L', 'D') : 'DL0',
(70, 91, 'L', 'F') : 'LF2',
(70, 91, 'L', 'U') : 'UL0',
(70, 91, 'R', 'B') : 'RB2',
(70, 91, 'R', 'D') : 'DR0',
(70, 91, 'R', 'F') : 'RF2',
(70, 91, 'R', 'U') : 'UR0',
(70, 91, 'U', 'B') : 'UB2',
(70, 91, 'U', 'F') : 'UF2',
(70, 91, 'U', 'L') : 'UL2',
(70, 91, 'U', 'R') : 'UR2',
(72, 127, 'B', 'D') : 'DB0',
(72, 127, 'B', 'L') : 'LB0',
(72, 127, 'B', 'R') : 'RB0',
(72, 127, 'B', 'U') : 'UB0',
(72, 127, 'D', 'B') : 'DB2',
(72, 127, 'D', 'F') : 'DF2',
(72, 127, 'D', 'L') : 'DL2',
(72, 127, 'D', 'R') : 'DR2',
(72, 127, 'F', 'D') : 'DF0',
(72, 127, 'F', 'L') : 'LF0',
(72, 127, 'F', 'R') : 'RF0',
(72, 127, 'F', 'U') : 'UF0',
(72, 127, 'L', 'B') : 'LB2',
(72, 127, 'L', 'D') : 'DL0',
(72, 127, 'L', 'F') : 'LF2',
(72, 127, 'L', 'U') : 'UL0',
(72, 127, 'R', 'B') : 'RB2',
(72, 127, 'R', 'D') : 'DR0',
(72, 127, 'R', 'F') : 'RF2',
(72, 127, 'R', 'U') : 'UR0',
(72, 127, 'U', 'B') : 'UB2',
(72, 127, 'U', 'F') : 'UF2',
(72, 127, 'U', 'L') : 'UL2',
(72, 127, 'U', 'R') : 'UR2',
(73, 128, 'B', 'D') : 'DB1',
(73, 128, 'B', 'L') : 'LB1',
(73, 128, 'B', 'R') : 'RB1',
(73, 128, 'B', 'U') : 'UB1',
(73, 128, 'D', 'B') : 'DB1',
(73, 128, 'D', 'F') : 'DF1',
(73, 128, 'D', 'L') : 'DL1',
(73, 128, 'D', 'R') : 'DR1',
(73, 128, 'F', 'D') : 'DF1',
(73, 128, 'F', 'L') : 'LF1',
(73, 128, 'F', 'R') : 'RF1',
(73, 128, 'F', 'U') : 'UF1',
(73, 128, 'L', 'B') : 'LB1',
(73, 128, 'L', 'D') : 'DL1',
(73, 128, 'L', 'F') : 'LF1',
(73, 128, 'L', 'U') : 'UL1',
(73, 128, 'R', 'B') : 'RB1',
(73, 128, 'R', 'D') : 'DR1',
(73, 128, 'R', 'F') : 'RF1',
(73, 128, 'R', 'U') : 'UR1',
(73, 128, 'U', 'B') : 'UB1',
(73, 128, 'U', 'F') : 'UF1',
(73, 128, 'U', 'L') : 'UL1',
(73, 128, 'U', 'R') : 'UR1',
(74, 129, 'B', 'D') : 'DB2',
(74, 129, 'B', 'L') : 'LB2',
(74, 129, 'B', 'R') : 'RB2',
(74, 129, 'B', 'U') : 'UB2',
(74, 129, 'D', 'B') : 'DB0',
(74, 129, 'D', 'F') : 'DF0',
(74, 129, 'D', 'L') : 'DL0',
(74, 129, 'D', 'R') : 'DR0',
(74, 129, 'F', 'D') : 'DF2',
(74, 129, 'F', 'L') : 'LF2',
(74, 129, 'F', 'R') : 'RF2',
(74, 129, 'F', 'U') : 'UF2',
(74, 129, 'L', 'B') : 'LB0',
(74, 129, 'L', 'D') : 'DL2',
(74, 129, 'L', 'F') : 'LF0',
(74, 129, 'L', 'U') : 'UL2',
(74, 129, 'R', 'B') : 'RB0',
(74, 129, 'R', 'D') : 'DR2',
(74, 129, 'R', 'F') : 'RF0',
(74, 129, 'R', 'U') : 'UR2',
(74, 129, 'U', 'B') : 'UB0',
(74, 129, 'U', 'F') : 'UF0',
(74, 129, 'U', 'L') : 'UL0',
(74, 129, 'U', 'R') : 'UR0',
(77, 20, 'B', 'D') : 'DB2',
(77, 20, 'B', 'L') : 'LB2',
(77, 20, 'B', 'R') : 'RB2',
(77, 20, 'B', 'U') : 'UB2',
(77, 20, 'D', 'B') : 'DB0',
(77, 20, 'D', 'F') : 'DF0',
(77, 20, 'D', 'L') : 'DL0',
(77, 20, 'D', 'R') : 'DR0',
(77, 20, 'F', 'D') : 'DF2',
(77, 20, 'F', 'L') : 'LF2',
(77, 20, 'F', 'R') : 'RF2',
(77, 20, 'F', 'U') : 'UF2',
(77, 20, 'L', 'B') : 'LB0',
(77, 20, 'L', 'D') : 'DL2',
(77, 20, 'L', 'F') : 'LF0',
(77, 20, 'L', 'U') : 'UL2',
(77, 20, 'R', 'B') : 'RB0',
(77, 20, 'R', 'D') : 'DR2',
(77, 20, 'R', 'F') : 'RF0',
(77, 20, 'R', 'U') : 'UR2',
(77, 20, 'U', 'B') : 'UB0',
(77, 20, 'U', 'F') : 'UF0',
(77, 20, 'U', 'L') : 'UL0',
(77, 20, 'U', 'R') : 'UR0',
(78, 15, 'B', 'D') : 'DB1',
(78, 15, 'B', 'L') : 'LB1',
(78, 15, 'B', 'R') : 'RB1',
(78, 15, 'B', 'U') : 'UB1',
(78, 15, 'D', 'B') : 'DB1',
(78, 15, 'D', 'F') : 'DF1',
(78, 15, 'D', 'L') : 'DL1',
(78, 15, 'D', 'R') : 'DR1',
(78, 15, 'F', 'D') : 'DF1',
(78, 15, 'F', 'L') : 'LF1',
(78, 15, 'F', 'R') : 'RF1',
(78, 15, 'F', 'U') : 'UF1',
(78, 15, 'L', 'B') : 'LB1',
(78, 15, 'L', 'D') : 'DL1',
(78, 15, 'L', 'F') : 'LF1',
(78, 15, 'L', 'U') : 'UL1',
(78, 15, 'R', 'B') : 'RB1',
(78, 15, 'R', 'D') : 'DR1',
(78, 15, 'R', 'F') : 'RF1',
(78, 15, 'R', 'U') : 'UR1',
(78, 15, 'U', 'B') : 'UB1',
(78, 15, 'U', 'F') : 'UF1',
(78, 15, 'U', 'L') : 'UL1',
(78, 15, 'U', 'R') : 'UR1',
(79, 10, 'B', 'D') : 'DB0',
(79, 10, 'B', 'L') : 'LB0',
(79, 10, 'B', 'R') : 'RB0',
(79, 10, 'B', 'U') : 'UB0',
(79, 10, 'D', 'B') : | |
with open(html_name, 'rt', encoding='utf-8') as f:
html0 = f.read()
if html == html0:
return None
with open(html_name, 'wt', encoding='utf-8') as f:
f.write(html)
return None
else:
return html
GALLERYCALL = """
$('#%s').photobox('a', {
loop:%s,
thumbs:%s,
autoplay:%s,
time:%d,
zoomable:%s ,
rotatable:%s,
wheelNextPrev:%s
});
"""
def gallery_call(args, gallery_id):
return GALLERYCALL.replace('\n', '') % (
gallery_id,
str(args.photobox.loop).lower(),
str(args.photobox.thumbs).lower(),
str(args.photobox.autoplay).lower(),
args.photobox.time,
str(args.photobox.zoomable).lower(),
str(args.photobox.rotatable).lower(),
str(args.photobox.wheelNextPrev).lower(),
)
# -- Media description --------------------------------------------------------
def is_image_file(name):
return os.path.splitext(name)[1].lower() in (
'.jpg', '.jpeg', '.png', '.gif', '.bmp', '.webp', '.tif'
)
def is_video_file(name):
return os.path.splitext(name)[1].lower() in (
'.mp4', '.webm', '.mkv', '.flv', '.m4v', '.avi', '.wmv', '.mts', '.vob', '.divx'
)
def is_media(name):
return is_image_file(name) or is_video_file(name)
def validate_date(datestr):
# datestr = yyyymmdd
try:
datetime.datetime.strptime(datestr, '%Y%m%d')
return True
except ValueError:
return False
def date_from_name(name):
# heuristics
if match := re.search(r'(?:\D|^)(\d{8})(?:\D|$)', name, re.ASCII):
digits = match.group(1)
if validate_date(digits):
return digits
return None
def date_from_item(filename):
if date := date_from_name(filename):
return date
else:
timestamp = os.path.getmtime(filename)
return datetime.datetime.fromtimestamp(timestamp).strftime('%Y%m%d')
def time_from_name(name):
# heuristics
if match := re.search(r'(?:\D|^)(\d{8})\D(\d{6})(?:\D|$)', name, re.ASCII):
digits = match.group(2)
hour, minute, second = int(digits[0:2]), int(digits[2:4]), int(digits[4:6])
if 0 <= hour < 24 and 0 <= minute < 60 and 0 <= second < 60:
return digits
return None
def time_from_item(filename):
if time := time_from_name(filename):
return time
else:
timestamp = os.path.getmtime(filename)
return datetime.datetime.fromtimestamp(timestamp).strftime('%H%M%S')
FFPROBE_CMD = '''\
ffprobe -v error
-select_streams v:0
-show_entries stream=width,height,avg_frame_rate,r_frame_rate:format=duration
-of csv=p=0
'''
def get_image_info(filename):
date = date_from_item(filename)
time = time_from_item(filename)
img = Image.open(filename)
width, height = img.size
size = round(os.path.getsize(filename) / 1e6, 1)
return (date, time, width, height, size), f'{date} {time}, dim={width}x{height}, {size} MB'
def get_video_info(filename, info_fullname):
if os.path.exists(info_fullname):
with open(info_fullname) as f:
info = f.readline().split()
date, time, width, height, size, duration, fps = info[0], info[1], int(info[2]), int(info[3]), float(info[4]), int(info[5]), float(info[6])
formatted_info = format_video_info(date, time, width, height, size, duration, fps)
return (date, time, width, height, size, duration, fps), formatted_info
else:
info, formatted_info = make_video_info(filename, info_fullname)
with open(info_fullname, 'wt') as f:
print(' '.join([str(_) for _ in info]), file=f)
return info, formatted_info
def make_video_info(filename, info_fullname):
# ffmpeg must be in path
date = date_from_item(filename)
time = time_from_item(filename)
command = [*FFPROBE_CMD.split(), filename]
try:
output = check_output(command, stderr=STDOUT).decode()
width, height, fps, duration = parse_ffprobe_output(output)
size = round(os.path.getsize(filename) / 1e6, 1)
output = format_video_info(date, time, width, height, size, duration, fps)
except CalledProcessError as e:
output = e.output.decode()
warning(output)
raise
return (date, time, width, height, size, duration, fps), output
def parse_ffprobe_output(ffprobe_output):
# parse first channel data and last line for duration
match = re.match(r'(\d+),(\d+),(\d+)/(\d+),(\d+/\d+).*\s(\d+\.\d+)', ffprobe_output, re.DOTALL)
width = int(match.group(1))
height = int(match.group(2))
fps = round(int(match.group(3)) / int(match.group(4)), 1)
duration = round(float(match.group(6)))
return width, height, fps, duration
def format_video_info(date, time, width, height, size, duration, fps):
return f'{date} {time}, dim={width}x{height}, {format_duration(duration)}, fps={fps}, {size} MB'
def format_duration(duration):
mn = duration // 60
sec = duration % 60
if mn <= 59:
return f'm:s={mn:02}:{sec:02}'
else:
hour = mn // 60
mn = mn % 60
return f'h:m:s={hour:02}:{mn:02}:{sec:02}'
# -- Thumbnails (image and video) ---------------------------------------------
def thumbname(name, key):
return key + '-' + name + '.jpg'
def size_thumbnail(width, height, maxdim):
if width >= height:
return maxdim, int(round(maxdim * height / width))
else:
return int(round(maxdim * width / height)), maxdim
def make_thumbnail_image(args, image_name, thumb_name, size):
if os.path.exists(thumb_name) and args.forcethumb is False:
pass
else:
print('Making thumbnail:', thumb_name)
create_thumbnail_image(image_name, thumb_name, size)
def create_thumbnail_image(image_name, thumb_name, size):
imgobj = Image.open(image_name)
if (imgobj.mode != 'RGBA'
and image_name.endswith('.jpg')
and not (image_name.endswith('.gif') and imgobj.info.get('transparency'))
):
imgobj = imgobj.convert('RGBA')
imgobj.thumbnail(size, Image.LANCZOS)
imgobj = imgobj.convert('RGB')
imgobj.save(thumb_name)
def make_thumbnail_video(args, video_name, thumb_name, size, duration):
if os.path.exists(thumb_name) and args.forcethumb is False:
pass
else:
print('Making thumbnail:', thumb_name)
create_thumbnail_video(args, video_name, thumb_name, size, duration)
# base64 video.png
VIDEO_ICON = '''\
iVBORw0KGgoAAAANSUhEUgAAABgAAAAUCAAAAACy3qJfAAAA4UlEQVR4
2m1QoRbCMAy88SaK69xscfuEWiS4SZBIcCCRfAL8An8AcnJzTOJSWdxwzJXSPUoHRPQlueYuucigxm
9kDGaMf8AjopGcYn8LmmyLoihBWBiThb+5MTuUsc3aL56upneZ9sByAIg8Z8BEn96EeZ65iU7DvmbP
PxqDcH6p1swXBC4l6yZskACkTN1WrQr2SlIFhTtgqeZa+zsOogLXegvEocZ5c/W5BcoVNNCg3hSudV
/hEh4ofw6cEb00Km8i0dpRDUXfKiaQOEAdrUDo4dFp9C33jjaRac9/gDF/AlplVYtfWGCjAAAAAElF
TkSuQmCC'''
def create_thumbnail_video(args, filename, thumbname, size, duration):
# ffmpeg must be in path
delay = min(duration - 1, args.thumbnails.thumbdelay)
sizearg = '%dx%d' % size
command = 'ffmpeg -y -v error -itsoffset -%d -i "%s" -vcodec mjpeg -vframes 1 -an -f rawvideo -s %s "%s"'
command = command % (delay, filename, sizearg, thumbname)
result = os.system(command)
# add a movie icon to the thumbnail to identify videos
try:
img1 = Image.open(thumbname)
except:
# ffmpeg was unable to save thumbnail
warning('Unable to save thumbnail for', filename)
return
img2 = Image.open(io.BytesIO(base64.b64decode(VIDEO_ICON)))
width, height = img1.size
img1.paste(img2, (6, height - 20 - 6), None)
img1.save(thumbname)
def make_thumbnail_subdir(args, subdir_name, thumb_name, size, items, thumbdir):
# subdir thumbnails are always created as they depend on the content of the
# directory
print('Making thumbnail:', thumb_name)
create_thumbnail_subdir(subdir_name, thumb_name, size, items, thumbdir)
def create_thumbnail_subdir(subdir_name, thumb_name, size, items, thumbdir):
def size_thumbnail(width, height, xmax, ymax):
width2 = xmax
height2 = int(round(xmax * height / width))
if height2 < ymax:
width2 = int(round(ymax * width / height))
height2 = ymax
return width2, height2
thumblist = [os.path.basename(item.thumb) for item in items]
widthnum, heightnum, width, height, offsetx, offsety = mosaic_geometry(size, thumblist)
thumbnum = widthnum * heightnum
img = Image.new('RGB', size, SUBDIR_BACKCOL)
for ind, thumb in enumerate(thumblist[:min(thumbnum, len(thumblist))]):
row = ind // widthnum
col = ind % widthnum
img2 = Image.open(os.path.join(thumbdir, thumb))
w, h = size_thumbnail(*img2.size, width[col], height[row])
cropdim = ((w - width[col]) // 2, (h - height[row]) // 2,
(w - width[col]) // 2 + width[col], (h - height[row]) // 2 + height[row])
img2 = img2.resize((w, h), Image.LANCZOS)
img2 = img2.crop(cropdim)
img.paste(img2, (offsetx[col], offsety[row]))
if os.path.exists(thumb_name):
# test if the generated thumbnail is identical to the one already on disk
imgref = Image.open(thumb_name)
# must save and reload before comparing
byteio = io.BytesIO()
img.save(byteio, "JPEG")
byteio.seek(0)
imgnew = Image.open(byteio)
diff = ImageChops.difference(imgnew, imgref)
if diff.getbbox() is None:
return
img.save(thumb_name)
def mosaic_geometry(size, thumblist):
if len(thumblist) == 1:
widthnum = 1
heightnum = 1
elif len(thumblist) <= 3:
widthnum = 1
heightnum = 2
elif len(thumblist) <= 8:
widthnum = 2
heightnum = 2
else:
widthnum = 3
heightnum = 3
if widthnum == 1:
width = [size[0] - 2]
else:
width = [size[0] // widthnum - 2] * (widthnum - 1)
width.append(size[0] - (1 + sum(width) + 2 * len(width) + 1))
if heightnum == 1:
height = [size[1] - 2]
else:
height = [size[1] // heightnum - 2] * (heightnum - 1)
height.append(size[1] - (1 + sum(height) + 2 * len(height) + 1))
offsetx = [1]
for w in width[:-1]:
offsetx.append(offsetx[-1] + w + 2)
offsety = [1]
for h in height[:-1]:
offsety.append(offsety[-1] + h + 2)
return widthnum, heightnum, width, height, offsetx, offsety
def list_of_htmlfiles(args, posts):
htmlist = list()
htmlist.append(os.path.join(args.dest, args.rootname))
for post in posts:
htmlist.extend(list_of_htmlfiles_in_items(post.dcim))
return htmlist
def list_of_htmlfiles_in_items(itemlist):
htmlist = list()
for item in itemlist:
if type(item) == PostSubdir:
htmlist.append(item.htmname)
htmlist.extend(list_of_htmlfiles_in_items(item.sublist))
return htmlist
def list_of_thumbnails(posts, diary=False):
thumblist = list()
for post in posts:
thumblist.extend(list_of_thumbnails_in_items(post.medias))
if diary is False:
thumblist.extend(list_of_thumbnails_in_items(post.dcim))
return thumblist
def list_of_thumbnails_in_items(itemlist):
thumblist = list()
for item in itemlist:
if type(item) == PostSubdir:
thumblist.append(os.path.basename(item.thumb))
thumblist.extend(list_of_thumbnails_in_items(item.sublist))
else:
thumblist.append(os.path.basename(item.thumb))
return thumblist
def purge_htmlfiles(args, posts):
"""
Purge root dir from irrelevant html files
"""
htmlist = list_of_htmlfiles(args, posts)
html_to_remove = list()
for fullname in glob.glob(os.path.join(args.root, '*.htm*')):
if fullname not in htmlist:
html_to_remove.append(fullname)
if len(html_to_remove) > args.thumbnails.threshold_htmlfiles:
inpt = 'x'
while inpt not in 'yn':
inpt = input(f'{len(html_to_remove)} html files to remove. Continue [y|n]? ').lower()
if inpt == 'n':
return
for name in html_to_remove:
print('Removing html files', name)
os.remove(name)
def purge_thumbnails(args, thumbdir, posts, diary=False):
"""
Purge thumbnail dir from irrelevant thumbnails
"""
thumblist = list_of_thumbnails(posts, diary)
thumbs_to_remove = list()
for fullname in glob.glob(os.path.join(thumbdir, '*.jpg')):
if os.path.basename(fullname) not in thumblist:
thumbs_to_remove.append(fullname)
if len(thumbs_to_remove) > args.thumbnails.threshold_thumbs:
inpt = 'x'
while inpt not in 'yn':
inpt = input(f'{len(thumbs_to_remove)} thumbnails to remove. Continue [y|n]? ').lower()
if inpt == 'n':
return
for name in thumbs_to_remove:
print('Removing thumbnail', name)
os.remove(name)
info_fullname = os.path.splitext(name)[0] + '.info'
if os.path.exists(info_fullname):
os.remove(info_fullname)
# -- List of medias helpers ---------------------------------------------------
def is_media_within_dates(fullname, dates):
if is_media(fullname):
if type(dates) == tuple:
return dates[0] <= date_from_item(fullname) <= dates[1]
else:
return True
else:
return False
def sorted_listdir(filelist):
like_windows_explorer = True
if not filelist:
return filelist
if like_windows_explorer:
maxlen = max(len(os.path.splitext(name)[0]) for name in filelist)
def keyfunc(name):
root, ext = os.path.splitext(name.lower())
return root.ljust(maxlen, ' ') + ext
else:
keyfunc = str.lower
return sorted(filelist, key=keyfunc)
def list_of_files(sourcedir, recursive):
"""
Return the list of full paths for files in source directory
"""
result = list()
if recursive is False:
listdir = sorted_listdir(os.listdir(sourcedir))
if '.nomedia' not in listdir:
for basename in listdir:
result.append(os.path.join(sourcedir, basename))
else:
for root, | |
use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
UsageLogsResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['start_hr'] = \
start_hr
return self.call_with_http_info(**kwargs)
self.get_usage_logs = Endpoint(
settings={
'response_type': (UsageLogsResponse,),
'auth': [
'apiKeyAuth',
'appKeyAuth'
],
'endpoint_path': '/api/v1/usage/logs',
'operation_id': 'get_usage_logs',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'start_hr',
'end_hr',
],
'required': [
'start_hr',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'start_hr':
(datetime,),
'end_hr':
(datetime,),
},
'attribute_map': {
'start_hr': 'start_hr',
'end_hr': 'end_hr',
},
'location_map': {
'start_hr': 'query',
'end_hr': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json;datetime-format=rfc3339'
],
'content_type': [],
},
api_client=api_client,
callable=__get_usage_logs
)
def __get_usage_logs_by_index(
self,
start_hr,
**kwargs
):
"""Get hourly usage for Logs by Index # noqa: E501
Get hourly usage for logs by index. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_usage_logs_by_index(start_hr, async_req=True)
>>> result = thread.get()
Args:
start_hr (datetime): Datetime in ISO-8601 format, UTC, precise to hour: [YYYY-MM-DDThh] for usage beginning at this hour.
Keyword Args:
end_hr (datetime): Datetime in ISO-8601 format, UTC, precise to hour: [YYYY-MM-DDThh] for usage ending **before** this hour.. [optional]
index_name ([str]): Comma-separated list of log index names.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
UsageLogsByIndexResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['start_hr'] = \
start_hr
return self.call_with_http_info(**kwargs)
self.get_usage_logs_by_index = Endpoint(
settings={
'response_type': (UsageLogsByIndexResponse,),
'auth': [
'apiKeyAuth',
'appKeyAuth'
],
'endpoint_path': '/api/v1/usage/logs_by_index',
'operation_id': 'get_usage_logs_by_index',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'start_hr',
'end_hr',
'index_name',
],
'required': [
'start_hr',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'start_hr':
(datetime,),
'end_hr':
(datetime,),
'index_name':
([str],),
},
'attribute_map': {
'start_hr': 'start_hr',
'end_hr': 'end_hr',
'index_name': 'index_name',
},
'location_map': {
'start_hr': 'query',
'end_hr': 'query',
'index_name': 'query',
},
'collection_format_map': {
'index_name': 'multi',
}
},
headers_map={
'accept': [
'application/json;datetime-format=rfc3339'
],
'content_type': [],
},
api_client=api_client,
callable=__get_usage_logs_by_index
)
def __get_usage_network_flows(
self,
start_hr,
**kwargs
):
"""Get hourly usage for Network Flows # noqa: E501
Get hourly usage for network flows. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_usage_network_flows(start_hr, async_req=True)
>>> result = thread.get()
Args:
start_hr (datetime): Datetime in ISO-8601 format, UTC, precise to hour: `[YYYY-MM-DDThh]` for usage beginning at this hour.
Keyword Args:
end_hr (datetime): Datetime in ISO-8601 format, UTC, precise to hour: `[YYYY-MM-DDThh]` for usage ending **before** this hour.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
UsageNetworkFlowsResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['start_hr'] = \
start_hr
return self.call_with_http_info(**kwargs)
self.get_usage_network_flows = Endpoint(
settings={
'response_type': (UsageNetworkFlowsResponse,),
'auth': [
'apiKeyAuth',
'appKeyAuth'
],
'endpoint_path': '/api/v1/usage/network_flows',
'operation_id': 'get_usage_network_flows',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'start_hr',
'end_hr',
],
'required': [
'start_hr',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'start_hr':
(datetime,),
'end_hr':
(datetime,),
},
'attribute_map': {
'start_hr': 'start_hr',
'end_hr': 'end_hr',
},
'location_map': {
'start_hr': 'query',
'end_hr': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json;datetime-format=rfc3339'
],
'content_type': [],
},
api_client=api_client,
callable=__get_usage_network_flows
)
def __get_usage_network_hosts(
self,
start_hr,
**kwargs
):
"""Get hourly usage for Network Hosts # noqa: E501
Get hourly usage for network hosts. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_usage_network_hosts(start_hr, async_req=True)
>>> result = thread.get()
Args:
start_hr (datetime): Datetime in ISO-8601 format, UTC, precise to hour: [YYYY-MM-DDThh] for usage beginning at this hour.
Keyword Args:
end_hr (datetime): Datetime in ISO-8601 format, UTC, precise to hour: [YYYY-MM-DDThh] for usage ending **before** this hour.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
UsageNetworkHostsResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['start_hr'] = \
start_hr
return self.call_with_http_info(**kwargs)
self.get_usage_network_hosts = Endpoint(
settings={
'response_type': (UsageNetworkHostsResponse,),
'auth': [
'apiKeyAuth',
'appKeyAuth'
],
'endpoint_path': '/api/v1/usage/network_hosts',
'operation_id': 'get_usage_network_hosts',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'start_hr',
'end_hr',
],
'required': [
'start_hr',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'start_hr':
(datetime,),
'end_hr':
(datetime,),
},
'attribute_map': {
'start_hr': 'start_hr',
'end_hr': 'end_hr',
},
'location_map': {
'start_hr': 'query',
'end_hr': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json;datetime-format=rfc3339'
],
'content_type': [],
},
api_client=api_client,
callable=__get_usage_network_hosts
)
def __get_usage_profiling(
self,
start_hr,
**kwargs
):
"""Get hourly usage for profiled hosts # noqa: E501
Get hourly usage for profiled hosts. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_usage_profiling(start_hr, async_req=True)
>>> | |
str.split("."))
return str
## Add a parent version
def AddVersionParent(version, parent):
parentMap[version].add(parent)
def GetVersionProps(version):
"""Get version properties
This function is a fixed version of GetVersion().
"""
ns = nsMap[version]
versionId = versionIdMap[version]
isLegacy = versionMap.get(ns) == version
serviceNs = serviceNsMap[version]
return ns, versionId, isLegacy, serviceNs
## Get version namespace from version
def GetVersionNamespace(version):
""" Get version namespace from version """
ns = nsMap[version]
if not ns:
ns = serviceNsMap[version]
versionId = versionIdMap[version]
if not versionId:
namespace = ns
else:
namespace = '%s/%s' % (ns, versionId)
return namespace
## Get version from the version uri
def GetVersionFromVersionUri(version):
return versionMap[version.rsplit(":", 1)[-1]]
## Get wsdl namespace from version
def GetWsdlNamespace(version):
""" Get wsdl namespace from version """
return "urn:" + serviceNsMap[version]
## Get an iterable with all version parents
def GetVersionParents(version):
return parentMap[version]
## Get all the versions for the service with specified namespace (partially) ordered
## by compatibility (i.e. any version in the list that is compatible with some version
## v in the list will preceed v)
# @param namespace XML namespace identifying a service
# @return returns all the versions for the service with specified namespace (partially)
# ordered by compatibility
#
# NOTE: For this function, we use 'namespace' as a representation of 'service'. While
# this works for most services, for compatibility reasons, the core and query
# services share the 'vim25' namespace with the vim service. Fortunately, this
# shouldn't be an issue in practice, as the implementations of the vim
# service (vpxd and hostd) don't currently advertise that they support any
# versions of the core or query services, and we don't expect that they ever will.
# This function assumes that all other namespaces identify a unique service.
def GetServiceVersions(namespace):
"""
Get all the versions for the service with specified namespace (partially) ordered
by compatibility (i.e. any version in the list that is compatible with some version
v in the list will preceed v)
"""
def compare(a, b):
if a == b:
return 0
if b in parentMap[a]:
return -1
if a in parentMap[b]:
return 1
return (a > b) - (a < b)
if PY3:
return sorted([v for (v, n) in iteritems(serviceNsMap) if n == namespace],
key=cmp_to_key(compare))
else:
return sorted([v for (v, n) in iteritems(serviceNsMap) if n == namespace],
compare)
## Set a WSDL method with wsdl namespace and wsdl name
# Internal to VmomiSupport
# Note: Must be holding the _lazyLock
#
# @param ns XML namespace
# @param wsdlName wsdl name
# @param inputMM managed method object or info to load it (it points to
# list object that points to the type info which holds
# this managed method's information)
# @return returns added method or exising method if (ns, wsdlName)
# is already in the map. It throws a runtime error if
# trying to set two type info list's to the same (ns, wsdlName)
def _SetWsdlMethod(ns, wsdlName, inputMM):
"""
Set a WSDL method with wsdl namespace and wsdl name
Returns added method / existing method if (ns, wsdlName) already in the map
Note: Must be holding the _lazyLock
"""
_wsdlMethodNSs.add(ns)
curMM = _wsdlMethodMap.get( (ns, wsdlName) )
# if inputMM is a list
if isinstance(inputMM, list):
if curMM is None:
_wsdlMethodMap[(ns, wsdlName)] = inputMM
return inputMM
elif isinstance(curMM, list):
raise RuntimeError(
"Duplicate wsdl method %s %s (new class %s vs existing %s)" % \
(ns, wsdlName, inputMM[0], curMM[0]))
else:
return curMM
# if inputMM is a ManagedMethod
else:
if curMM is None or isinstance(curMM, list):
_wsdlMethodMap[(ns, wsdlName)] = inputMM
return inputMM
else:
return curMM
## Get wsdl method from ns, wsdlName
# @param ns XML namespace
# @param wsdlName wsdl name
# @return managed method object or throws a KeyError
def GetWsdlMethod(ns, wsdlName):
""" Get wsdl method from ns, wsdlName """
with _lazyLock:
method = _wsdlMethodMap[(ns, wsdlName)]
if isinstance(method, ManagedMethod):
# The type corresponding to the method is loaded,
# just return the method object
return method
elif method:
# The type is not loaded, the map contains the info
# to load the type. Load the actual type and
# return the method object
LoadManagedType(*method)
return _wsdlMethodMap[(ns, wsdlName)]
else:
raise KeyError("{0} {1}".format(ns, name))
## Guess the method from wsdlname with no ns
# WARNING! This should not be used in general, as there is no guarantee for
# the correctness of the guessing method
# @param name wsdl name
# @return managed method object if found in any namespace else throws
# KeyError
def GuessWsdlMethod(name):
with _lazyLock:
# Some methods may exist in multiple namespaces, and returning
# the wrong one will cause a deserialization error.
# Since in python3 the order of entries in set is not deterministic,
# we will try to get the method from vim25 namespace first.
try:
return GetWsdlMethod(XMLNS_VMODL_BASE, name)
except KeyError:
pass
for ns in _wsdlMethodNSs:
try:
return GetWsdlMethod(ns, name)
except KeyError:
pass
raise KeyError(name)
## Widen a type to one supported in a given version
def GetCompatibleType(type, version):
# Type can be widened if it has the attribute "_version" (which implies it
# is either a DataObject or ManagedObject)
if hasattr(type, "_version"):
while not IsChildVersion(version, type._version):
type = type.__bases__[0]
return type
## Invert an injective mapping
def InverseMap(map):
return dict([ (v, k) for (k, v) in iteritems(map) ])
def GetVmodlNs(version):
versionParts = version.split('.version.')
assert len(versionParts) == 2, 'Unsupported version format: %s' % version
return versionParts[0]
types = Object()
nsMap = {}
versionIdMap = {}
versionMap = {}
serviceNsMap = { BASE_VERSION : XMLNS_VMODL_BASE.split(":")[-1] }
parentMap = {}
class _MaturitySet:
"""
Registry for versions from all namespaces defining a given maturity.
The registration is automatic (relevant code is generated by emitters),
while for the query one may use either the VMODL namespace id (e.g. 'vim'),
or the wire namespace id (e.g. 'vim25').
"""
def __init__(self):
self._verNameMap = {} # e.g. 'vim' -> 'vim.version.version12'
self._verNameMapW = {} # e.g. 'vim25' -> 'vim.version.version12'
self._wireIdMap = {} # e.g. 'vim' -> 'vim25/6.7'
self._wireIdMapW = {} # e.g. 'vim25' -> 'vim25/6.7'
def Add(self, version):
"""
Register the version at corresponding maturity for a given VMODL
namespace. The 'version' parameter is in the VMODL name format
e.g. 'vim.version.version12'. This method is typically used by
auto-generated code.
"""
vmodlNs = GetVmodlNs(version)
# TODO fix the VSAN-related part of vcenter-all to enable the assert
# assert not (vmodlNs in self._verNameMap), 'Re-definition: %s' % vmodlNs
wireId = GetVersionNamespace(version)
wireNs = wireId.split('/')[0]
self._verNameMap[vmodlNs] = version
self._verNameMapW[wireNs] = version
self._wireIdMap[vmodlNs] = wireId
self._wireIdMapW[wireNs] = wireId
return wireId, wireNs
def GetName(self, vmodlNs):
"""
VMODL namespace to registered version name mapping, e.g.
'vim' -> 'vim.version.version12'
"""
return self._verNameMap[vmodlNs]
def GetNameW(self, wireNs):
"""
Wire namespace to registered version name mapping, e.g.
'vim25' -> 'vim.version.version12'
"""
return self._verNameMapW[wireNs]
def GetWireId(self, vmodlNs):
"""
VMODL namespace to registered version wire-id mapping, e.g.
'vim' -> 'vim25/6.7'
"""
return self._wireIdMap[vmodlNs]
def GetWireIdW(self, wireNs):
"""
Wire namespace to registered version wire-id mapping, e.g.
'vim25' -> 'vim25/6.7'
"""
return self._wireIdMapW[wireNs]
def EnumerateVmodlNs(self):
"""
Returns an iterable with registered VMODL namespace, e.g.
['vim', 'vpx', ... ]
"""
return self._verNameMap.keys()
def EnumerateWireNs(self):
"""
Returns an iterable with registered wire namespace, e.g.
['vim25', 'vpxd3', ... ]
"""
return self._verNameMapW.keys()
def EnumerateVersions(self):
"""
Returns an iterable with registered VMODL versions, e.g.
['vim.version.version12', 'vpx.version.version12', ... ]
"""
return self._verNameMap.values()
def EnumerateWireIds(self):
"""
Returns an iterable with registered versions wire-ids, e.g.
e.g. ['vim25/6.7', 'vpxd3/6.7', ... ]
"""
return self._wireIdMap.values()
# Backward compatibility aliases
_MaturitySet.Get = _MaturitySet.GetName
_MaturitySet.GetNamespace = _MaturitySet.GetWireId
newestVersions = _MaturitySet()
ltsVersions = _MaturitySet()
dottedVersions = _MaturitySet()
oldestVersions = _MaturitySet()
# Alias for backward compatibility.
publicVersions = ltsVersions
from .Version import AddVersion, IsChildVersion
if not isinstance(bool, type): # bool not a type in python <= 2.2
bool = type("bool", (int,),
{"__new__": lambda cls, val=0: int.__new__(cls, val and 1 or 0)})
byte = type("byte", (int,), {})
short = type("short", (int,), {})
double = type("double", (float,), {})
if PY3:
long = type("long", (int,), {})
URI = type("URI", (str,), {})
if not PY3:
# six defines binary_type in python2 as a string; this means the
# JSON encoder sees checksum properties as strings and attempts
# to perform utf-8 decoding on them because they contain high-bit
# characters.
binary = type("binary", (bytearray,), {})
else:
binary = type("binary", (binary_type,), {})
PropertyPath = type("PropertyPath", (text_type,), {})
# _wsdlTypeMapNSs store namespaces added to _wsdlTypeMap in _SetWsdlType
_wsdlTypeMapNSs | |
<gh_stars>1-10
import copy
import math
from typing import *
import yodel.globaldat as globaldat
def typeManagment(data: Any) -> bytearray:
'''
Take in any type, and turn it into a bytearray
'''
dtype = type(data)
if dtype == str:
return(bytearray(data.encode(encoding='UTF-8', errors='strict')))
elif dtype == bytes:
return(data)
class Flags:
'''
Class meant to be used in fields, is an array of bools, used to store flags about the packet.
Args:
lookup_table: list of strings used to map keys to bits
'''
def __init__(self, lookup_table:list):
length = 1
self.data = [0, 0, 0, 0, 0, 0, 0, 0]
self.lookup = {} # lookup table, maps names provided by the field onto indexes in data
self.a = 2
if lookup_table: # check if lookup table is provided
# index lookup table, check to see if a name has been provided, if
# so create an entry in lookup dict with the key as the name and
# the value as the index
for i in range(len(lookup_table)):
key = lookup_table[i]
# checks to see if the name provided for a given matrix is
# None, this is so that ["a",None,"b"] will only set a key for
# index 0 and 2
if key is not None:
# create dict entry with key being a name provided and the
# value being the index being mapped to
self.lookup[key] = i
def __setitem__(self, key, value):
if type(key) == str:
self.data[self.lookup[key]] = value
else:
self.data[key] = int(value)
def __getitem__(self, key):
if type(key) == str:
return(self.data[self.lookup[key]])
return(self.data[key])
def __bytes__(self):
out = 0
for i in range(8):
val = self.data[7 - i]
out += val * 2**i
return(out.to_bytes(1, 'little'))
# return(int(data,2))
def __repr__(self):
out = ''
for i in range(8):
val = str(int(self.data[i]))
out += val
return(out)
class Format:
"""
formats are used to store the information needed to encode or decode data.
eg: first 3 bytes are a string, next 5 are for an int, etc.
Args:
fields: list of field objects that will define the format
mtype: short for message type, allows unique identifiers to be given to your format that will be sent along with the format allowing for the receiver to know what format to use to decode the message
"""
def __init__(self, fields:List, mtype: int = 0):
self.mtype: int = mtype # kwargs.get("mtype", 0) #get message type
supported_types = [int, str, bytearray, Flags]
# dictionary that holds field data formated as field name: field value
self.fields_dict = {}
# fields holds the list of fields provided, still holds lots of useful
# meta data so it is kept around
self.fields: list = fields
self.output: dict = {} # dict that holds field names and values, this is so that sections on init can just copy the info from here rather than regenerating it
if self.mtype != 0: # when a format is created and the message type is not zero store it in the array of message types so that autoDecode can use the format
globaldat.messages_types[self.mtype] = self
for i in range(
len(fields)): # copy data over and init output with field names
fname = fields[i].name
self.output[fname] = 0
self.fields_dict[fname] = fields[i]
# self.gen_data()
class Section:
"""
sections are used to store data and the meta-data needed to encode that data.
to get extract all of the data in a section use:
section.fields
sections can be encoded by using bytes(section), also,
if a section is used in yodel.send it will automatically handle it.
Args:
format: format object to be used when encoding this section
"""
def __init__(self, format: Format):
# store format so that it can be accessed later as necessary
self.__dict__["format"] = format
# copy empty dict from format which has names already set
self.__dict__["fields"] = copy.copy(format.output)
# holds anything that comes after all fields have been filled
self.__dict__["payload"] = b''
def print(self): # fancy print
type_lookup = {
bytearray: "Bytearray",
int: "Int",
Flags: "Flags",
bytes: "bytes",
str: "String"
}
for i in list(self.fields.keys()):
name_len = len(str(i))
space = 20
dat_len = len(str(self.fields[i]))
space2 = 20
field_type = self.__dict__["format"].fields_dict[i].type
print_type = type_lookup[field_type]
if dat_len < space2:
space2 = space2 - dat_len
if name_len < space:
space = space - name_len
if field_type == str:
# print rules for strings
print(
f"{i}:{' '*space}\"{self.fields[i]}\"{' '*(space2 - 2)}{print_type}")
elif field_type == int:
# print rules for ints
print(
f"{i}:{' '*space}{self.fields[i]}{' '*(space2)}{print_type}")
elif field_type == Flags:
# print rules for flags
print(
f"{i}:{' '*space}{self.fields[i]}{' '*(space2)}{print_type} {list(self.fields[i].lookup.keys())}")
elif field_type == bytearray:
print(
f"{i}:{' '*space}{self.fields[i]}{' '*(space2)}{print_type}")
print(f"payload:{' '*space}{self.payload}")
def __bytes__(self):
return(evalBytes(self.__dict__["fields"], self.__dict__["format"], self.__dict__["payload"]))
def __setattr__(self, name, value):
if name != "payload":
self.fields[name] = value
else:
self.__dict__["payload"] = value
def __getattr__(self, name):
if name != "payload":
return(self.fields[name])
else:
return(self.__dict__["payload"])
def __setitem__(self, key, value):
self.fields[key] = value
def __getitem__(self, key):
return(self.fields[key])
def __str__(self):
return(str(self.fields))
class Field:
'''
A field is a section of memory meant to hold one value
Args:
name: name of field
_type: data type to use in field
bytes: when applicable this can hold the length of the field
'''
supported_types = [int, str, bytearray, Flags]
def __init__(self, name: str, _type: Type, *args, bytes=0, min=0, max=0):
bytes_len: int = bytes
if _type == int:
self.min = min
self.max = max
if bytes_len:
self.len = bytes_len
# signed integers are encoded using sign and magnitude
self.min = -1 * 2**((bytes_len * 8) - 1)
self.max = 2**((bytes_len * 8) - 1) - 1
else:
# when type is an int len tells us the amount of bits needed to
# represent the possble options. when type is a str len tells
# us the amount of bits needed to store the length of the
# string
self.len = math.ceil((max - min).bit_length() / 8)
#self.len =4
elif _type == str or _type == bytearray:
if bytes_len:
max = bytes_len
self.min = min
self.max = max
self.len = math.ceil((max - min).bit_length() / 8)
elif _type == Flags:
self.min = 0
self.max = 0
self.len = 1 # flags type is always one byte long
if len(args) == 1:
# take the array that holds the bit names
self.lookup = args[0]
else:
self.lookup = False
self.name = name # field name
self.type = _type # field data type
# self.len = math.ceil((Max-Min).bit_length()/8) #when type is an in
# len tells us the amount of bits needed to represent the possble
# options. when type is a str len tells us the amount of bits needed to
# store the length of the string
def decode(data: bytearray, encoding: Format) -> Section:
'''
Returns list of all field names
Args:
data: bytearray of data that you want to decode
encoding: format object to be used as the decoding rules
'''
fnames = list(encoding.fields_dict.keys())
output = Section(encoding) # generate new section object to store output
cpos = 0 # current position, sort of a pointer to the bytearray
for field in range(len(fnames)):
fname = fnames[field] # field name
fieldobj = encoding.fields_dict[fname]
ftype = fieldobj.type # data type of the field
flen = fieldobj.len # field length
# take the next n bytes where n is the length of the field
fdata = data[cpos:cpos + flen]
cpos += flen # incriment the current position by the length of the field
fmin = fieldobj.min # min field value
# all data types need their own custom decoding scheme
if ftype == str:
# get the size of the string by taking the first flen bytes and
# converting them to an int
strlen = globaldat.getInt(fdata)
strlen += fmin
# return the next n bytes where n is the length of the string
# defined by strlen
strdat = data[cpos:cpos + strlen]
cpos += strlen # move current position forward by the length of the string
output[fname] = strdat.decode("utf-8") # decode bytes as utf-8
elif ftype == bytearray:
# get the size of the bytearray by | |
incep2_4_0 = self.conv2dplusrelu(concat2_3, filter2_4_0, "NCHW", "SAME", 1, 1)
# branch 1
incep2_4_1a = self.conv2dplusrelu(concat2_3, filter2_4_1a, "NCHW", "SAME", 1, 1)
incep2_4_1b = self.conv2dplusrelu(incep2_4_1a, filter2_4_1b, "NCHW", "SAME", 1, 1)
incep2_4_1 = self.conv2dplusrelu(incep2_4_1b, filter2_4_1c, "NCHW", "SAME", 1, 1)
# branch 2
incep2_4_2a = self.conv2dplusrelu(concat2_3, filter2_4_2a, "NCHW", "SAME", 1, 1)
incep2_4_2b = self.conv2dplusrelu(incep2_4_2a, filter2_4_2b, "NCHW", "SAME", 1, 1)
incep2_4_2c = self.conv2dplusrelu(incep2_4_2b, filter2_4_2c, "NCHW", "SAME", 1, 1)
incep2_4_2d = self.conv2dplusrelu(incep2_4_2c, filter2_4_2d, "NCHW", "SAME", 1, 1)
incep2_4_2 = self.conv2dplusrelu(incep2_4_2d, filter2_4_2e, "NCHW", "SAME", 1, 1)
# branch 3
incep2_4_3a = self.ad.pooling_2d_forward_op(concat2_3, "NCHW", "mean", 1, 1, 1, 1, 3, 3)
incep2_4_3 = self.conv2dplusrelu(incep2_4_3a, filter2_4_3, "NCHW", "SAME", 1, 1)
concat2_4a = self.ad.concat_forward_op(incep2_4_0, incep2_4_1)
concat2_4b = self.ad.concat_forward_op(concat2_4a, incep2_4_2)
concat2_4 = self.ad.concat_forward_op(concat2_4b, incep2_4_3)
# inception_moudle2_5
filter2_5_0 = self.ad.Variable("filter2_5_0")
filter2_5_1a = self.ad.Variable("filter2_5_1a")
filter2_5_1b = self.ad.Variable("filter2_5_1b")
filter2_5_1c = self.ad.Variable("filter2_5_1c")
filter2_5_2a = self.ad.Variable("filter2_5_2a")
filter2_5_2b = self.ad.Variable("filter2_5_2b")
filter2_5_2c = self.ad.Variable("filter2_5_2c")
filter2_5_2d = self.ad.Variable("filter2_5_2d")
filter2_5_2e = self.ad.Variable("filter2_5_2e")
filter2_5_3 = self.ad.Variable("filter2_5_3a")
filter2_5_0_val = (192, 768, 1, 1)
filter2_5_1_vala = (160, 768, 1, 1)
filter2_5_1_valb = (160, 160, 1, 7)
filter2_5_1_valc = (192, 160, 7, 1)
filter2_5_2_vala = (160, 768, 1, 1)
filter2_5_2_valb = (160, 160, 7, 1)
filter2_5_2_valc = (160, 160, 1, 7)
filter2_5_2_vald = (160, 160, 7, 1)
filter2_5_2_vale = (192, 160, 1, 7)
filter2_5_3_val = (192, 768, 1, 1)
# branch_0
incep2_5_0 = self.conv2dplusrelu(concat2_4, filter2_5_0, "NCHW", "SAME", 1, 1)
# branch 1
incep2_5_1a = self.conv2dplusrelu(concat2_4, filter2_5_1a, "NCHW", "SAME", 1, 1)
incep2_5_1b = self.conv2dplusrelu(incep2_5_1a, filter2_5_1b, "NCHW", "SAME", 1, 1)
incep2_5_1 = self.conv2dplusrelu(incep2_5_1b, filter2_5_1c, "NCHW", "SAME", 1, 1)
# branch 2
incep2_5_2a = self.conv2dplusrelu(concat2_4, filter2_5_2a, "NCHW", "SAME", 1, 1)
incep2_5_2b = self.conv2dplusrelu(incep2_5_2a, filter2_5_2b, "NCHW", "SAME", 1, 1)
incep2_5_2c = self.conv2dplusrelu(incep2_5_2b, filter2_5_2c, "NCHW", "SAME", 1, 1)
incep2_5_2d = self.conv2dplusrelu(incep2_5_2c, filter2_5_2d, "NCHW", "SAME", 1, 1)
incep2_5_2 = self.conv2dplusrelu(incep2_5_2d, filter2_5_2e, "NCHW", "SAME", 1, 1)
# branch 3
incep2_5_3a = self.ad.pooling_2d_forward_op(concat2_4, "NCHW", "mean", 1, 1, 1, 1, 3, 3)
incep2_5_3 = self.conv2dplusrelu(incep2_5_3a, filter2_5_3, "NCHW", "SAME", 1, 1)
concat2_5a = self.ad.concat_forward_op(incep2_5_0, incep2_5_1)
concat2_5b = self.ad.concat_forward_op(concat2_5a, incep2_5_2)
concat2_5 = self.ad.concat_forward_op(concat2_5b, incep2_5_3)
# # inception_moudle3
# inception_moudle3_1
filter3_1_0a = self.ad.Variable("filter3_1_0a")
filter3_1_0b = self.ad.Variable("filter3_1_0b")
filter3_1_1a = self.ad.Variable("filter3_1_1a")
filter3_1_1b = self.ad.Variable("filter3_1_1b")
filter3_1_1c = self.ad.Variable("filter3_1_1c")
filter3_1_1d = self.ad.Variable("filter3_1_1d")
filter3_1_0_vala = (192, 768, 1, 1)
filter3_1_0_valb = (320, 192, 3, 3)
filter3_1_1_vala = (192, 768, 1, 1)
filter3_1_1_valb = (192, 192, 1, 7)
filter3_1_1_valc = (192, 192, 7, 1)
filter3_1_1_vald = (192, 192, 3, 3)
# branch_0
incep3_1_0a = self.conv2dplusrelu(concat2_5, filter3_1_0a, "NCHW", "SAME", 1, 1)
incep3_1_0 = self.conv2dplusrelu(incep3_1_0a, filter3_1_0b, "NCHW", "VALID", 2, 2)
# branch 1
incep3_1_1a = self.conv2dplusrelu(concat2_2, filter3_1_1a, "NCHW", "SAME", 1, 1)
incep3_1_1b = self.conv2dplusrelu(incep3_1_1a, filter3_1_1b, "NCHW", "SAME", 1, 1)
incep3_1_1c = self.conv2dplusrelu(incep3_1_1b, filter3_1_1c, "NCHW", "SAME", 1, 1)
incep3_1_1 = self.conv2dplusrelu(incep3_1_1c, filter3_1_1d, "NCHW", "VALID", 2, 2)
# branch 2
incep3_1_2 = self.ad.pooling_2d_forward_op(concat2_2, "NCHW", "mean", 0, 0, 2, 2, 3, 3)
concat3_1a = self.ad.concat_forward_op(incep3_1_0, incep3_1_1)
concat3_1 = self.ad.concat_forward_op(concat3_1a, incep3_1_2)
# inception_moudle3_2
filter3_2_0 = self.ad.Variable("filter3_2_0")
filter3_2_1a = self.ad.Variable("filter3_2_1a")
filter3_2_1b = self.ad.Variable("filter3_2_1b")
filter3_2_1c = self.ad.Variable("filter3_2_1c")
filter3_2_2a = self.ad.Variable("filter3_2_2a")
filter3_2_2b = self.ad.Variable("filter3_2_2b")
filter3_2_2c = self.ad.Variable("filter3_2_2c")
filter3_2_2d = self.ad.Variable("filter3_2_2d")
filter3_2_3 = self.ad.Variable("filter3_2_3a")
filter3_2_0_val = (320, 1280, 1, 1)
filter3_2_1_vala = (384, 1280, 1, 1)
filter3_2_1_valb = (384, 384, 1, 3)
filter3_2_1_valc = (384, 384, 3, 1)
filter3_2_2_vala = (448, 1280, 1, 1)
filter3_2_2_valb = (384, 448, 3, 3)
filter3_2_2_valc = (384, 384, 1, 3)
filter3_2_2_vald = (384, 384, 3, 1)
filter3_2_3_val = (192, 1280, 1, 1)
# branch_0
incep3_2_0 = self.conv2dplusrelu(concat3_1, filter3_2_0, "NCHW", "SAME", 1, 1)
# branch 1
incep3_2_1a = self.conv2dplusrelu(concat3_1, filter3_2_1a, "NCHW", "SAME", 1, 1)
incep3_2_1b = self.conv2dplusrelu(incep3_2_1a, filter3_2_1b, "NCHW", "SAME", 1, 1)
incep3_2_1c = self.conv2dplusrelu(incep3_2_1a, filter3_2_1c, "NCHW", "SAME", 1, 1)
incep3_2_1 = self.ad.concat_forward_op(incep3_2_1b, incep3_2_1c)
# branch 2
incep3_2_2a = self.conv2dplusrelu(concat3_1, filter3_2_2a, "NCHW", "SAME", 1, 1)
incep3_2_2b = self.conv2dplusrelu(incep3_2_2a, filter3_2_2b, "NCHW", "SAME", 1, 1)
incep3_2_2c = self.conv2dplusrelu(incep3_2_2b, filter3_2_2c, "NCHW", "SAME", 1, 1)
incep3_2_2d = self.conv2dplusrelu(incep3_2_2b, filter3_2_2d, "NCHW", "SAME", 1, 1)
incep3_2_2 = self.ad.concat_forward_op(incep3_2_2c, incep3_2_2d)
# branch 3
incep3_2_3a = self.ad.pooling_2d_forward_op(concat3_1, "NCHW", "mean", 1, 1, 1, 1, 3, 3)
incep3_2_3 = self.conv2dplusrelu(incep3_2_3a, filter3_2_3, "NCHW", "SAME", 1, 1)
concat3_2a = self.ad.concat_forward_op(incep3_2_0, incep3_2_1)
concat3_2b = self.ad.concat_forward_op(concat3_2a, incep3_2_2)
concat3_2 = self.ad.concat_forward_op(concat3_2b, incep3_2_3)
# # inception_moudle3_3
filter3_3_0 = self.ad.Variable("filter3_3_0")
filter3_3_1a = self.ad.Variable("filter3_3_1a")
filter3_3_1b = self.ad.Variable("filter3_3_1b")
filter3_3_1c = self.ad.Variable("filter3_3_1c")
filter3_3_2a = self.ad.Variable("filter3_3_2a")
filter3_3_2b = self.ad.Variable("filter3_3_2b")
filter3_3_2c = self.ad.Variable("filter3_3_2c")
filter3_3_2d = self.ad.Variable("filter3_3_2d")
filter3_3_3 = self.ad.Variable("filter3_3_3a")
filter3_3_0_val = (320, 2048, 1, 1)
filter3_3_1_vala = (384, 2048, 1, 1)
filter3_3_1_valb = (384, 384, 1, 3)
filter3_3_1_valc = (384, 384, 3, 1)
filter3_3_2_vala = (448, 2048, 1, 1)
filter3_3_2_valb = (384, 448, 3, 3)
filter3_3_2_valc = (384, 384, 1, 3)
filter3_3_2_vald = (384, 384, 3, 1)
filter3_3_3_val = (192, 2048, 1, 1)
# branch_0
incep3_3_0 = self.conv2dplusrelu(concat3_2, filter3_3_0, "NCHW", "SAME", 1, 1)
# branch 1
incep3_3_1a = self.conv2dplusrelu(concat3_2, filter3_3_1a, "NCHW", "SAME", 1, 1)
incep3_3_1b = self.conv2dplusrelu(incep3_3_1a, filter3_3_1b, "NCHW", "SAME", 1, 1)
incep3_3_1c = self.conv2dplusrelu(incep3_3_1a, filter3_3_1c, "NCHW", "SAME", 1, 1)
incep3_3_1 = self.ad.concat_forward_op(incep3_3_1b, incep3_3_1c)
# branch 2
incep3_3_2a = self.conv2dplusrelu(concat3_2, filter3_3_2a, "NCHW", "SAME", 1, 1)
incep3_3_2b = self.conv2dplusrelu(incep3_3_2a, filter3_3_2b, "NCHW", "SAME", 1, 1)
incep3_3_2c = self.conv2dplusrelu(incep3_3_2b, filter3_3_2c, "NCHW", "SAME", 1, 1)
incep3_3_2d = self.conv2dplusrelu(incep3_3_2b, filter3_3_2d, "NCHW", "SAME", 1, 1)
incep3_3_2 = self.ad.concat_forward_op(incep3_3_2c, incep3_3_2d)
# branch 3
incep3_3_3a = self.ad.pooling_2d_forward_op(concat3_2, "NCHW", "mean", 1, 1, 1, 1, 3, 3)
incep3_3_3 = self.conv2dplusrelu(incep3_3_3a, filter3_3_3, "NCHW", "SAME", 1, 1)
concat3_3a = self.ad.concat_forward_op(incep3_3_0, incep3_3_1)
concat3_3b = self.ad.concat_forward_op(concat3_3a, incep3_3_2)
concat3_3 = self.ad.concat_forward_op(concat3_3b, incep3_3_3)
filtera1 = self.ad.Variable("filtera1")
filtera1val = (1000, 2048, 1, 1)
W = self.ad.Variable("filtersmul")
W_val = (1000, 1000)
b = self.ad.Variable("biases")
b_val = (1000, )
poollast = self.ad.pooling_2d_forward_op(concat3_3, "NCHW", "mean", 0, 0, 1, 1, 8, 8)
dropout = self.ad.dropout_forward_op(poollast, "NCHW", 0.8)
convlast = self.conv2dplusrelu(dropout, filtera1, "NCHW", "SAME", 1, 1)
squeeze = self.ad.squeeze_op(convlast)
dense = self.ad.dense(squeeze, W, b)
y = self.ad.fullyactivation_forward_op(dense, "NCHW", "softmax")
loss = self.ad.crossEntropy_loss(y, y_)
# fc8
executor = self.ad.Executor(loss, y, 0.001, None, None, log_path=self.log_path, **kwargs)
feed_dict = {filterb_1: filtersb_val1, filterb_2: filtersb_val2, filterb_3: filtersb_val3
, filterb_4: filtersb_val4, filterb_5: filtersb_val5,
filter1_1_0: filter1_1_0_val, filter1_1_1a: filter1_1_1_vala, filter1_1_1b: filter1_1_1_valb, filter1_1_2a: filter1_1_2_vala, filter1_1_2b: filter1_1_2_valb
, filter1_1_2c: filter1_1_2_valc, filter1_1_3: filter1_1_3_val
, filter1_2_0: filter1_2_0_val, filter1_2_1a: filter1_2_1_vala,
filter1_2_1b: filter1_2_1_valb, filter1_2_2a: filter1_2_2_vala,
filter1_2_2b: filter1_2_2_valb, filter1_2_2c: filter1_2_2_valc, filter1_2_3: filter1_2_3_val
, filter1_3_0: filter1_3_0_val, filter1_3_1a: filter1_3_1_vala,
filter1_3_1b: filter1_3_1_valb, filter1_3_2a: filter1_3_2_vala,
filter1_3_2b: filter1_3_2_valb, filter1_3_2c: filter1_3_2_valc,
filter1_3_3: filter1_3_3_val
, filter2_1_0: filter2_1_0_val, filter2_1_1a: filter2_1_1_vala, filter2_1_1b: filter2_1_1_valb, filter2_1_1c: filter2_1_1_valc
, filter2_2_0: filter2_2_0_val, filter2_2_1a: filter2_2_1_vala, filter2_2_1b: filter2_2_1_valb, filter2_2_1c: filter2_2_1_valc,
filter2_2_2a: filter2_2_2_vala, filter2_2_2b: filter2_2_2_valb, filter2_2_2c: filter2_2_2_valc, filter2_2_2d: filter2_2_2_vald, filter2_2_2e: filter2_2_2_vale,
filter2_2_3: filter2_2_3_val
, filter2_3_0: filter2_3_0_val, filter2_3_1a: filter2_3_1_vala, filter2_3_1b: filter2_3_1_valb,
filter2_3_1c: filter2_3_1_valc,
filter2_3_2a: filter2_3_2_vala, filter2_3_2b: filter2_3_2_valb,
filter2_3_2c: filter2_3_2_valc, filter2_3_2d: filter2_3_2_vald,
filter2_3_2e: filter2_3_2_vale, filter2_3_3: filter2_3_3_val
, filter2_4_0: filter2_4_0_val, filter2_4_1a: filter2_4_1_vala, filter2_4_1b: filter2_4_1_valb,
filter2_4_1c: filter2_4_1_valc,
filter2_4_2a: filter2_4_2_vala, filter2_4_2b: filter2_4_2_valb,
filter2_4_2c: filter2_4_2_valc, filter2_4_2d: filter2_4_2_vald,
filter2_4_2e: filter2_4_2_vale, filter2_4_3: filter2_4_3_val
, filter2_5_0: filter2_5_0_val, filter2_5_1a: filter2_5_1_vala, filter2_5_1b: filter2_5_1_valb,
filter2_5_1c: filter2_5_1_valc,
filter2_5_2a: filter2_5_2_vala, filter2_5_2b: filter2_5_2_valb,
filter2_5_2c: filter2_5_2_valc, filter2_5_2d: filter2_5_2_vald,
filter2_5_2e: filter2_5_2_vale, filter2_5_3: filter2_5_3_val
, filter3_1_0a: filter3_1_0_vala, filter3_1_0b: filter3_1_0_valb, filter3_1_1a: filter3_1_1_vala, filter3_1_1b: filter3_1_1_valb,
filter3_1_1c: filter3_1_1_valc, filter3_1_1d: filter3_1_1_vald
, filter3_2_0: filter3_2_0_val, filter3_2_1a: filter3_2_1_vala,
filter3_2_1b: filter3_2_1_valb,
filter3_2_1c: filter3_2_1_valc, filter3_2_2a: filter3_2_2_vala, filter3_2_2b: filter3_2_2_valb,
filter3_2_2c: filter3_2_2_valc, filter3_2_2d: filter3_2_2_vald, filter3_2_3: filter3_2_3_val
, filter3_3_0: filter3_3_0_val, filter3_3_1a: filter3_3_1_vala,
filter3_3_1b: filter3_3_1_valb,
filter3_3_1c: filter3_3_1_valc, filter3_3_2a: filter3_3_2_vala,
filter3_3_2b: filter3_3_2_valb,
filter3_3_2c: filter3_3_2_valc, filter3_3_2d: filter3_3_2_vald,
filter3_3_3: filter3_3_3_val
, filtera1: filtera1val, W: W_val, b: b_val}
feed_dict_mv = {}
for key, value in feed_dict.items():
print(key)
m_key = executor.Variable_node_to_mv[key][0]
m_val = value
v_key = executor.Variable_node_to_mv[key][1]
v_val = value
feed_dict_mv.update({m_key: m_val, v_key: v_val})
X_val = (self.batch_size, self.image_channel, self.image_size, self.image_size) # number = batch_size channel = 3 image_size = 224*224
y_val = (self.batch_size, 1000)
feed_dict.update(feed_dict_mv)
feed_dict[X] = X_val
feed_dict[y_] = y_val
executor.init_operator_latency(feed_dict_sample=feed_dict, **kwargs)
return executor.predict_results
def run(self, executor_ctx, top_control_queue, top_message_queue, n_class, X_val, y_val, **kwargs):
self.n_class = n_class
self.top_control_queue = top_control_queue
self.top_message_queue = top_message_queue
self.executor_ctx = executor_ctx
X = self.ad.Placeholder("X")
y_ = self.ad.Placeholder("y_")
filterb_1 = self.ad.Variable("filterb_1")
filterb_2 = self.ad.Variable("filterb_2")
filterb_3 = self.ad.Variable("filterb_3")
filterb_4 = self.ad.Variable("filterb_4")
filterb_5 = self.ad.Variable("filterb_5")
filtersb_val1 = ndarray.array(np.random.normal(0, 0.5, (32, 3, 3, 3)), executor_ctx)
filtersb_val2 = ndarray.array(np.random.normal(0, 0.5, (32, 32, 3, 3)), executor_ctx)
filtersb_val3 = ndarray.array(np.random.normal(0, 0.5, (64, 32, 3, 3)), executor_ctx)
filtersb_val4 = ndarray.array(np.random.normal(0, 0.5, (80, 64, 1, 1)), executor_ctx)
filtersb_val5 = ndarray.array(np.random.normal(0, 0.5, (192, 80, 3, 3)), executor_ctx)
# inception前
covb_1 = self.conv2dplusrelu(X, filterb_1, | |
test_warn_if_multiple_augmenters_with_same_name(self):
source = iaa.Sequential([iaa.Fliplr(0.5, name="hflip"),
iaa.Fliplr(0.5, name="hflip")])
target = iaa.Sequential([iaa.Fliplr(0.5, name="hflip")])
source.localize_random_state_()
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
_ = target.copy_random_state(source, matching="name")
assert len(caught_warnings) == 1
assert (
"contains multiple augmenters with the same name"
in str(caught_warnings[-1].message)
)
# TODO these tests change the input type from list to array. Might be
# reasonable to change and test that scenario separetely
class TestAugmenterHooks(unittest.TestCase):
def setUp(self):
reseed()
@property
def image(self):
image = np.array([[0, 0, 1],
[0, 0, 1],
[0, 1, 1]], dtype=np.uint8)
return np.atleast_3d(image)
@property
def image_lr(self):
image_lr = np.array([[1, 0, 0],
[1, 0, 0],
[1, 1, 0]], dtype=np.uint8)
return np.atleast_3d(image_lr)
@property
def image_lrud(self):
image_lrud = np.array([[1, 1, 0],
[1, 0, 0],
[1, 0, 0]], dtype=np.uint8)
return np.atleast_3d(image_lrud)
def test_preprocessor(self):
def preprocessor(images, augmenter, parents):
img = np.copy(images)
img[0][1, 1, 0] += 1
return img
hooks = ia.HooksImages(preprocessor=preprocessor)
seq = iaa.Sequential([iaa.Fliplr(1.0), iaa.Flipud(1.0)])
images_aug = seq.augment_images([self.image], hooks=hooks)
expected = np.copy(self.image_lrud)
expected[1, 1, 0] = 3
assert np.array_equal(images_aug[0], expected)
def test_postprocessor(self):
def postprocessor(images, augmenter, parents):
img = np.copy(images)
img[0][1, 1, 0] += 1
return img
hooks = ia.HooksImages(postprocessor=postprocessor)
seq = iaa.Sequential([iaa.Fliplr(1.0), iaa.Flipud(1.0)])
images_aug = seq.augment_images([self.image], hooks=hooks)
expected = np.copy(self.image_lrud)
expected[1, 1, 0] = 3
assert np.array_equal(images_aug[0], expected)
def test_propagator(self):
def propagator(images, augmenter, parents, default):
if "Seq" in augmenter.name:
return False
else:
return default
hooks = ia.HooksImages(propagator=propagator)
seq = iaa.Sequential([iaa.Fliplr(1.0), iaa.Flipud(1.0)])
images_aug = seq.augment_images([self.image], hooks=hooks)
assert np.array_equal(images_aug[0], self.image)
def test_activator(self):
def activator(images, augmenter, parents, default):
if "Flipud" in augmenter.name:
return False
else:
return default
hooks = ia.HooksImages(activator=activator)
seq = iaa.Sequential([iaa.Fliplr(1.0), iaa.Flipud(1.0)])
images_aug = seq.augment_images([self.image], hooks=hooks)
assert np.array_equal(images_aug[0], self.image_lr)
def test_activator_keypoints(self):
def activator(keypoints_on_images, augmenter, parents, default):
return False
hooks = ia.HooksKeypoints(activator=activator)
kps = [ia.Keypoint(x=1, y=0), ia.Keypoint(x=2, y=0),
ia.Keypoint(x=2, y=1)]
kpsoi = ia.KeypointsOnImage(kps, shape=(5, 10, 3))
aug = iaa.Affine(translate_px=1)
keypoints_aug = aug.augment_keypoints(kpsoi, hooks=hooks)
assert keypoints_equal([keypoints_aug], [kpsoi])
class TestAugmenterWithLoadedImages(unittest.TestCase):
def setUp(self):
reseed()
def test_with_cv2(self):
image = np.arange(10*20).astype(np.uint8).reshape((10, 20, 1))
image = np.tile(image, (1, 1, 3))
image[:, :, 0] += 0
image[:, :, 1] += 1
image[:, :, 2] += 2
images = image[np.newaxis, :, :, :]
image_cp = np.copy(image)
images_cp = np.copy(images)
aug_arrs = _InplaceDummyAugmenterImgsArray(1)
aug_lists = _InplaceDummyAugmenterImgsList(1)
with TemporaryDirectory() as dirpath:
imgpath = os.path.join(dirpath, "temp_cv2.png")
imageio.imwrite(imgpath, image)
image_reloaded = cv2.imread(imgpath)[:, :, ::-1]
images_reloaded = image_reloaded[np.newaxis, :, :, :]
image_aug = aug_lists(image=image_reloaded)
assert image_aug is not image_reloaded
assert np.array_equal(image_reloaded, image_cp)
assert np.array_equal(image_aug, image_cp + 1)
image_aug = aug_lists.augment_image(image=image_reloaded)
assert image_aug is not image_reloaded
assert np.array_equal(image_reloaded, image_cp)
assert np.array_equal(image_aug, image_cp + 1)
images_aug = aug_arrs(images=images_reloaded)
assert images_aug is not images_reloaded
assert np.array_equal(images_reloaded, images_cp)
assert np.array_equal(images_aug, images_cp + 1)
images_aug = aug_arrs.augment_images(images=images_reloaded)
assert images_aug is not images_reloaded
assert np.array_equal(images_reloaded, images_cp)
assert np.array_equal(images_aug, images_cp + 1)
def test_with_imageio(self):
image = np.arange(10*20).astype(np.uint8).reshape((10, 20, 1))
image = np.tile(image, (1, 1, 3))
image[:, :, 0] += 0
image[:, :, 1] += 1
image[:, :, 2] += 2
images = image[np.newaxis, :, :, :]
image_cp = np.copy(image)
images_cp = np.copy(images)
aug_arrs = _InplaceDummyAugmenterImgsArray(1)
aug_lists = _InplaceDummyAugmenterImgsList(1)
with TemporaryDirectory() as dirpath:
imgpath = os.path.join(dirpath, "temp_imageio.png")
imageio.imwrite(imgpath, image)
image_reloaded = imageio.imread(imgpath)
images_reloaded = image_reloaded[np.newaxis, :, :, :]
image_aug = aug_lists(image=image_reloaded)
assert image_aug is not image_reloaded
assert np.array_equal(image_reloaded, image_cp)
assert np.array_equal(image_aug, image_cp + 1)
image_aug = aug_lists.augment_image(image=image_reloaded)
assert image_aug is not image_reloaded
assert np.array_equal(image_reloaded, image_cp)
assert np.array_equal(image_aug, image_cp + 1)
images_aug = aug_arrs(images=images_reloaded)
assert images_aug is not images_reloaded
assert np.array_equal(images_reloaded, images_cp)
assert np.array_equal(images_aug, images_cp + 1)
images_aug = aug_arrs.augment_images(images=images_reloaded)
assert images_aug is not images_reloaded
assert np.array_equal(images_reloaded, images_cp)
assert np.array_equal(images_aug, images_cp + 1)
def test_with_pil(self):
fnames = ["asarray", "array"]
for fname in fnames:
with self.subTest(fname=fname):
image = np.arange(10*20).astype(np.uint8).reshape((10, 20, 1))
image = np.tile(image, (1, 1, 3))
image[:, :, 0] += 0
image[:, :, 1] += 1
image[:, :, 2] += 2
images = image[np.newaxis, :, :, :]
image_cp = np.copy(image)
images_cp = np.copy(images)
aug_arrs = _InplaceDummyAugmenterImgsArray(1)
aug_lists = _InplaceDummyAugmenterImgsList(1)
with TemporaryDirectory() as dirpath:
imgpath = os.path.join(dirpath,
"temp_pil_%s.png" % (fname,))
imageio.imwrite(imgpath, image)
image_reloaded = getattr(np, fname)(PIL.Image.open(imgpath))
images_reloaded = image_reloaded[np.newaxis, :, :, :]
image_aug = aug_lists(image=image_reloaded)
assert image_aug is not image_reloaded
assert np.array_equal(image_reloaded, image_cp)
assert np.array_equal(image_aug, image_cp + 1)
image_aug = aug_lists.augment_image(image=image_reloaded)
assert image_aug is not image_reloaded
assert np.array_equal(image_reloaded, image_cp)
assert np.array_equal(image_aug, image_cp + 1)
images_aug = aug_arrs(images=images_reloaded)
assert images_aug is not images_reloaded
assert np.array_equal(images_reloaded, images_cp)
assert np.array_equal(images_aug, images_cp + 1)
images_aug = aug_arrs.augment_images(images=images_reloaded)
assert images_aug is not images_reloaded
assert np.array_equal(images_reloaded, images_cp)
assert np.array_equal(images_aug, images_cp + 1)
class TestSequential(unittest.TestCase):
def setUp(self):
reseed()
@property
def image(self):
image = np.array([[0, 1, 1],
[0, 0, 1],
[0, 0, 1]], dtype=np.uint8) * 255
return np.atleast_3d(image)
@property
def images(self):
return np.array([self.image], dtype=np.uint8)
@property
def image_lr(self):
image_lr = np.array([[1, 1, 0],
[1, 0, 0],
[1, 0, 0]], dtype=np.uint8) * 255
return np.atleast_3d(image_lr)
@property
def images_lr(self):
return np.array([self.image_lr], dtype=np.uint8)
@property
def image_ud(self):
image_ud = np.array([[0, 0, 1],
[0, 0, 1],
[0, 1, 1]], dtype=np.uint8) * 255
return np.atleast_3d(image_ud)
@property
def images_ud(self):
return np.array([self.image_ud], dtype=np.uint8)
@property
def image_lr_ud(self):
image_lr_ud = np.array([[1, 0, 0],
[1, 0, 0],
[1, 1, 0]], dtype=np.uint8) * 255
return np.atleast_3d(image_lr_ud)
@property
def images_lr_ud(self):
return np.array([self.image_lr_ud])
@property
def keypoints(self):
kps = [ia.Keypoint(x=1, y=0),
ia.Keypoint(x=2, y=0),
ia.Keypoint(x=2, y=1)]
return ia.KeypointsOnImage(kps, shape=self.image.shape)
@property
def keypoints_aug(self):
kps = [ia.Keypoint(x=3-1, y=3-0),
ia.Keypoint(x=3-2, y=3-0),
ia.Keypoint(x=3-2, y=3-1)]
return ia.KeypointsOnImage(kps, shape=self.image.shape)
@property
def polygons(self):
polygon = ia.Polygon([(0, 0), (2, 0), (2, 2), (0, 2)])
return ia.PolygonsOnImage([polygon], shape=self.image.shape)
@property
def polygons_aug(self):
polygon = ia.Polygon([(3-0, 3-0), (3-2, 3-0), (3-2, 3-2), (3-0, 3-2)])
return ia.PolygonsOnImage([polygon], shape=self.image.shape)
@property
def lsoi(self):
ls = ia.LineString([(0, 0), (2, 0), (2, 2), (0, 2)])
return ia.LineStringsOnImage([ls], shape=self.image.shape)
@property
def lsoi_aug(self):
ls = ia.LineString([(3-0, 3-0), (3-2, 3-0), (3-2, 3-2), (3-0, 3-2)])
return ia.LineStringsOnImage([ls], shape=self.image.shape)
@property
def bbsoi(self):
bb = ia.BoundingBox(x1=0, y1=0, x2=2, y2=2)
return ia.BoundingBoxesOnImage([bb], shape=self.image.shape)
@property
def bbsoi_aug(self):
x1 = 3-0
x2 = 3-2
y1 = 3-0
y2 = 3-2
bb = ia.BoundingBox(x1=min(x1, x2), y1=min(y1, y2),
x2=max(x1, x2), y2=max(y1, y2))
return ia.BoundingBoxesOnImage([bb], shape=self.image.shape)
@property
def heatmaps(self):
heatmaps_arr = np.float32([[0, 0, 1.0],
[0, 0, 1.0],
[0, 1.0, 1.0]])
return ia.HeatmapsOnImage(heatmaps_arr, shape=self.image.shape)
@property
def heatmaps_aug(self):
heatmaps_arr_expected = np.float32([[1.0, 1.0, 0.0],
[1.0, 0, 0],
[1.0, 0, 0]])
return ia.HeatmapsOnImage(heatmaps_arr_expected, shape=self.image.shape)
@property
def segmaps(self):
segmaps_arr = np.int32([[0, 0, 1],
[0, 0, 1],
[0, 1, 1]])
return ia.SegmentationMapsOnImage(segmaps_arr, shape=self.image.shape)
@property
def segmaps_aug(self):
segmaps_arr_expected = np.int32([[1, 1, 0],
[1, 0, 0],
[1, 0, 0]])
return ia.SegmentationMapsOnImage(segmaps_arr_expected,
shape=self.image.shape)
@property
def seq_two_flips(self):
return iaa.Sequential([
iaa.Fliplr(1.0),
iaa.Flipud(1.0)
])
def test_images__two_flips(self):
aug = self.seq_two_flips
observed = aug.augment_images(self.images)
assert np.array_equal(observed, self.images_lr_ud)
def test_images__two_flips__deterministic(self):
aug = self.seq_two_flips
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
assert np.array_equal(observed, self.images_lr_ud)
def test_images_as_list__two_flips(self):
aug = self.seq_two_flips
observed = aug.augment_images([self.image])
assert array_equal_lists(observed, [self.image_lr_ud])
def test_images_as_list__two_flips__deterministic(self):
aug = self.seq_two_flips
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.image])
assert array_equal_lists(observed, [self.image_lr_ud])
def test_keypoints__two_flips(self):
aug = self.seq_two_flips
observed = aug.augment_keypoints([self.keypoints])
assert_cbaois_equal(observed, [self.keypoints_aug])
def test_keypoints__two_flips__deterministic(self):
aug = self.seq_two_flips
aug_det = aug.to_deterministic()
observed = aug_det.augment_keypoints([self.keypoints])
assert_cbaois_equal(observed, [self.keypoints_aug])
def test_polygons__two_flips(self):
aug = self.seq_two_flips
observed = aug.augment_polygons(self.polygons)
assert_cbaois_equal(observed, self.polygons_aug)
def test_polygons__two_flips__deterministic(self):
aug = self.seq_two_flips
aug_det = aug.to_deterministic()
observed = aug_det.augment_polygons(self.polygons)
assert_cbaois_equal(observed, self.polygons_aug)
def test_line_strings__two_flips(self):
aug = self.seq_two_flips
observed = aug.augment_line_strings(self.lsoi)
assert_cbaois_equal(observed, self.lsoi_aug)
def test_line_strings__two_flips__deterministic(self):
aug = self.seq_two_flips
aug_det = aug.to_deterministic()
observed = aug_det.augment_line_strings(self.lsoi)
assert_cbaois_equal(observed, self.lsoi_aug)
def test_bounding_boxes__two_flips(self):
aug = self.seq_two_flips
observed = aug.augment_bounding_boxes(self.bbsoi)
assert_cbaois_equal(observed, self.bbsoi_aug)
def test_bounding_boxes__two_flips__deterministic(self):
aug = self.seq_two_flips
aug_det = aug.to_deterministic()
observed = aug_det.augment_bounding_boxes(self.bbsoi)
assert_cbaois_equal(observed, self.bbsoi_aug)
def test_heatmaps__two_flips(self):
aug = self.seq_two_flips
heatmaps = self.heatmaps
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == (3, 3, 1)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1.0 - 1e-6 < observed.max_value < 1.0 + 1e-6
assert np.allclose(observed.get_arr(),
self.heatmaps_aug.get_arr())
def test_segmentation_maps__two_flips(self):
aug = self.seq_two_flips
segmaps = self.segmaps
observed = aug.augment_segmentation_maps([segmaps])[0]
assert observed.shape == (3, 3, 1)
assert np.array_equal(observed.get_arr(),
self.segmaps_aug.get_arr())
def test_children_not_provided(self):
aug = iaa.Sequential()
image = np.arange(4*4).reshape((4, 4)).astype(np.uint8)
observed = aug.augment_image(image)
assert np.array_equal(observed, image)
def test_children_are_none(self):
aug = iaa.Sequential(children=None)
image = np.arange(4*4).reshape((4, 4)).astype(np.uint8)
observed = aug.augment_image(image)
assert np.array_equal(observed, image)
def test_children_is_single_augmenter_without_list(self):
aug = iaa.Sequential(iaa.Fliplr(1.0))
image = np.arange(4*4).reshape((4, 4)).astype(np.uint8)
observed = aug.augment_image(image)
assert np.array_equal(observed, np.fliplr(image))
def test_children_is_a_sequential(self):
aug = iaa.Sequential(iaa.Sequential(iaa.Fliplr(1.0)))
image = np.arange(4*4).reshape((4, 4)).astype(np.uint8)
observed = aug.augment_image(image)
assert np.array_equal(observed, np.fliplr(image))
def test_children_is_list_of_sequentials(self):
aug = iaa.Sequential([
iaa.Sequential(iaa.Flipud(1.0)),
iaa.Sequential(iaa.Fliplr(1.0))
])
image = np.arange(4*4).reshape((4, 4)).astype(np.uint8)
observed = aug.augment_image(image)
assert | |
kwargs.get('outfunc', self.outfunc)
nkwargs['dbname'] = kwargs.get('dbname', self.dbname)
nkwargs['dbkey'] = kwargs.get('dbkey', self.dbkey)
nkwargs['datatype'] = kwargs.get('datatype', self.datatype)
nkwargs['dtype'] = kwargs.get('dtype', drsfile.dtype)
nkwargs['shape'] = kwargs.get('shape', drsfile.shape)
nkwargs['numfiles'] = kwargs.get('numfiles', drsfile.numfiles)
nkwargs['s1d'] = kwargs.get('s1d', drsfile.s1d)
# return new instance of DrsFitsFile
return DrsFitsFile(**nkwargs)
# -------------------------------------------------------------------------
# file checking
# -------------------------------------------------------------------------
def check_file(self):
"""
Checks that this file is correct
:returns: True or False and the reason why (if False)
"""
# set function name
_ = display_func(None, 'check_file', __NAME__, 'DrsFitsFile')
# 1. check extension
cond1, msg1 = self.has_correct_extension()
if not cond1:
return False, msg1
# 2. check file header keys exist
cond2, msg2 = self.hkeys_exist()
if not cond2:
return False, msg2
# 3. check file header keys are correct
cond3, msg3 = self.has_correct_hkeys()
if not cond3:
return False, msg3
# 4. check if we have a fiber defined
self.has_fiber()
# if 1, 2 and 3 pass return True
return True, None
def has_correct_extension(self, filename=None, filetype=None, argname=None):
# set function name
_ = display_func(None, 'has_correct_extension', __NAME__, 'DrsFitsFile')
# deal with no input extension
if filetype is None:
filetype = self.filetype
# deal with no input filename
if filename is None:
filename = self.filename
basename = self.basename
else:
basename = os.path.basename(filename)
# -----------------------------------------------------------------
# deal with no argument name
if argname is None:
argname = TextEntry('40-001-00018')
# -----------------------------------------------------------------
# check recipe has been set
self.check_recipe()
# get recipe and parameters
params = self.recipe.drs_params
# -----------------------------------------------------------------
# check extension
if filetype is None:
msg = TextEntry('09-000-00003', args=[basename])
cond = True
elif filename.endswith(filetype):
msg = TextEntry('09-000-00004', args=[basename, filetype])
cond = True
else:
msg = TextEntry('09-000-00005', args=[basename, filetype])
cond = False
# if valid return True and no error
if cond:
dargs = [argname, os.path.basename(filename)]
WLOG(params, 'debug', TextEntry('90-001-00009', args=dargs),
wrap=False)
return True, msg
# if False generate error and return it
else:
emsg = TextEntry('09-001-00006', args=[argname, filetype])
return False, emsg
def hkeys_exist(self, header=None, filename=None, argname=None):
# set function name
func_name = display_func(None, 'hkeys_exist', __NAME__, 'DrsFitsFile')
# deal with no input header
if header is None:
# check file has been read
self.check_read(header_only=True, load=True)
# get header
header = self.header
# deal with no input filename
if filename is None:
basename = self.basename
else:
basename = os.path.basename(filename)
# -----------------------------------------------------------------
# check recipe has been set
self.check_recipe()
# get recipe and parameters
params = self.recipe.drs_params
rkeys = self.required_header_keys
# -----------------------------------------------------------------
# deal with no argument name
if argname is None:
argname = TextEntry('40-001-00018')
# -----------------------------------------------------------------
# Check that required keys are in header
for drskey in rkeys:
# check whether header key is in param dict (i.e. from a
# keywordstore) or whether we have to use the key as is
if drskey in params:
key = params[drskey][0]
source = params.sources[drskey]
else:
key = drskey
source = func_name
# deal with empty key
if (key is None) or key == '':
eargs = [key, drskey, source]
WLOG(params, 'error', TextEntry('00-006-00011', args=eargs))
# check if key is in header
if key not in header:
eargs = [argname, key]
emsg = TextEntry('09-001-00007', args=eargs)
WLOG(params, 'debug', emsg)
return False, emsg
else:
dargs = [argname, key, basename]
WLOG(params, 'debug', TextEntry('90-001-00010', args=dargs),
wrap=False)
# if we have got to this point return True (success) and no error
# messages
return True, None
def has_correct_hkeys(self, header=None, argname=None, log=True,
filename=None):
# set function name
_ = display_func(None, 'has_correct_hkeys', __NAME__, 'DrsFitsFile')
# -----------------------------------------------------------------
# check recipe has been set
self.check_recipe()
# get recipe and parameters
params = self.recipe.drs_params
# -----------------------------------------------------------------
# set function name
_ = display_func(params, 'has_correct_hkeys', __NAME__, 'DrsFitsFile')
# deal with no input header
if header is None:
# check file has been read
self.check_read(header_only=True, load=True)
# get header
header = self.header
# get file
filename = self.filename
# get short hand to required header keys
rkeys = self.required_header_keys
# -----------------------------------------------------------------
# deal with no argument name
if argname is None:
argname = TextEntry('40-001-00018')
# -----------------------------------------------------------------
# search for correct value for each header key
found = True
# storage
errors = dict()
# -----------------------------------------------------------------
# loop around required keys
for drskey in rkeys:
# check whether header key is in param dict (i.e. from a
# keywordstore) or whether we have to use the key as is
if drskey in params:
key = params[drskey][0]
else:
key = drskey
# check that key is in header
if key not in header:
ekwargs = dict(level='error', key=key, filename=filename)
raise lang.drs_exceptions.DrsHeaderError('Key not found',
**ekwargs)
# get value and required value
value = header[key].strip()
rvalue = rkeys[drskey].strip()
# check if key is valid
if rvalue != value:
dargs = [argname, key, rvalue]
if log:
WLOG(params, 'debug', TextEntry('90-001-00011', args=dargs),
wrap=False)
found = False
else:
dargs = [argname, key, rvalue]
if log:
WLOG(params, 'debug', TextEntry('90-001-00012', args=dargs),
wrap=False)
# store info
errors[key] = (found, argname, rvalue, value)
# return found (bool) and errors
return found, errors
def has_fiber(self, header=None):
# set function name
_ = display_func(None, 'has_fiber', __NAME__, 'DrsFitsFile')
# -----------------------------------------------------------------
# check whether fiber already set (in which case ignore)
if self.fiber is not None:
return
# -----------------------------------------------------------------
# check recipe has been set
self.check_recipe()
# deal with no input header
if header is None:
# check file has been read
self.check_read(header_only=True, load=True)
# get header
header = self.header
# get recipe and parameters
params = self.recipe.drs_params
# -----------------------------------------------------------------
kw_fiber = params['KW_FIBER'][0]
# -----------------------------------------------------------------
# deal with fiber
if kw_fiber in self.header:
fiber = header[kw_fiber]
# TODO: remove elif when fiber is always in header if file
# TODO: has a fiber
# TODO: START OF REMOVE ------------------------------------------------
elif 'AB' in self.basename.split('_')[-1]:
fiber = 'AB'
elif 'A' in self.basename.split('_')[-1]:
fiber = 'A'
elif 'B' in self.basename.split('_')[-1]:
fiber = 'B'
elif 'C' in self.basename.split('_')[-1]:
fiber = 'C'
# TODO: END OF REMOVE --------------------------------------------------
else:
fiber = None
# update fiber value
if fiber is not None:
self.fiber = fiber
# -------------------------------------------------------------------------
# table checking
# -------------------------------------------------------------------------
def get_infile_outfilename(self, params, recipe, infilename,
allowedfibers=None, ext='.fits'):
# set function name
_ = display_func(None, 'get_infile_outfilename', __NAME__,
'DrsFitsFile')
# ------------------------------------------------------------------
# 1. need to assign an input type for our raw file
if self.intype is not None:
# deal with in type being list
if isinstance(self.intype, list):
intype = self.intype[0]
else:
intype = self.intype
# get new copy
infile = intype.newcopy(recipe=recipe)
else:
infile = DrsFitsFile('DRS_RAW_TEMP')
# ------------------------------------------------------------------
# storage of files
chain_files = []
# need to go back through the file history and update filename
cintype = self.completecopy(infile)
# loop until we have no intype (raw file)
while cintype is not None:
# add to chain
chain_files.append(self.completecopy(cintype))
if hasattr(cintype, 'intype'):
# deal with in type being list
if isinstance(cintype.intype, list):
cintype = cintype.intype[0]
else:
cintype = cintype.intype
else:
break
# ------------------------------------------------------------------
# set the file name to the infilename
filename = str(infilename)
bottomfile = chain_files[-1]
# now we have chain we can project file (assuming last element in the
# chain is the raw file)
for cintype in chain_files[::-1][1:]:
bottomfile.filename = filename
bottomfile.basename = os.path.basename(filename)
# check whether we need fiber
if bottomfile.fibers is not None:
fiber = allowedfibers
else:
fiber = None
# get out file name
out = cintype.check_table_filename(params, recipe, bottomfile,
fullpath=True,
allowedfibers=fiber)
valid, outfilename = out
# set the filename to the outfilename
filename = outfilename
bottomfile = cintype
# ------------------------------------------------------------------
# add values to infile
infile.filename = filename
infile.basename = os.path.basename(filename)
infile.filetype = ext
# ------------------------------------------------------------------
# get outfilename (final)
valid, outfilename = self.check_table_filename(params, recipe, infile,
allowedfibers)
# ------------------------------------------------------------------
# return infile
return infile, valid, outfilename
def check_table_filename(self, params, recipe, infile, allowedfibers=None,
fullpath=False):
"""
Checks whether raw "filename" belongs to this DrsFile
:param params:
:param recipe:
:param infile:
:param allowedfibers:
:param fullpath:
:return:
"""
# set function name
func_name = display_func(None, 'check_table_filename', __NAME__,
'DrsFitsFile')
# ------------------------------------------------------------------
# deal with fibers
if allowedfibers is not None:
if isinstance(allowedfibers, str):
fibers = [allowedfibers]
else:
fibers = list(allowedfibers)
elif self.fibers is None:
fibers = [None]
else:
fibers = self.fibers
| |
= request.GET or request.POST
params = {param.upper(): value for param, value in query.items()}
# 900913 is deprecated
if params.get('SRS') == 'EPSG:900913':
params['SRS'] = 'EPSG:3857'
if params.get('CRS') == 'EPSG:900913':
params['CRS'] = 'EPSG:3857'
map_param = params.get('MAP')
# As we have one QGIS project per layer, we don't support GetCapabilities
# for now without any layer. We know, it's not OGC compliant.
if params.get('REQUEST') == 'GetCapabilities':
if (not map_param and
not (params.get('LAYERS') or params.get('TYPENAME'))):
return HttpResponse('GetCapabilities is not supported yet.')
# As we have one project per layer, we add the MAP path if the request is
# specific for one layer.
if not map_param and (params.get('LAYERS') or params.get('TYPENAME')):
# LAYERS is for WMS, TYPENAME for WFS
layer_name = params.get('LAYERS') or params.get('TYPENAME')
if len(layer_name.split(',')) > 1:
return HttpResponse(
'We do not support many layers in the request')
layer = get_object_or_404(Layer, name=layer_name)
qgis_layer = get_object_or_404(QGISServerLayer, layer=layer)
params['MAP'] = qgis_layer.qgis_project_path
# We have some shortcuts here instead of asking QGIS-Server.
if params.get('SERVICE') == 'WMS':
if params.get('REQUEST') == 'GetLegendGraphic':
layer_name = params.get('LAYER')
if not layer_name:
raise Http404('LAYER is not found for a GetLegendGraphic')
layer = get_object_or_404(Layer, name=layer_name)
return legend(request, layername=layer.name)
# Validation for STYLEMANAGER service
if params.get('SERVICE') == 'STYLEMANAGER':
project_param = params.get('PROJECT')
layer_name = params.get('LAYER')
if not project_param and layer_name:
layer = get_object_or_404(Layer, name=layer_name)
qgis_layer = get_object_or_404(QGISServerLayer, layer=layer)
params['PROJECT'] = qgis_layer.qgis_project_path
# if not shortcut, we forward any request to internal QGIS Server
qgis_server_url = qgis_server_endpoint(internal=True)
response = requests.get(qgis_server_url, params)
content = ensure_string(response.content)
# if it is GetCapabilities request, we need to replace all reference to
# our proxy
if params.get('REQUEST') == 'GetCapabilities':
qgis_server_base_url = qgis_server_endpoint(internal=True)
pattern = f'{qgis_server_base_url}'
content = re.sub(
pattern, qgis_server_endpoint(internal=False), content)
return HttpResponse(
content, content_type=response.headers.get('content-type'))
def qgis_server_pdf(request):
print_url = reverse('qgis_server:map-print')
response_data = {
"scales": [
{"name": "1:25,000", "value": "25000.0"},
{"name": "1:50,000", "value": "50000.0"},
{"name": "1:100,000", "value": "100000.0"},
{"name": "1:200,000", "value": "200000.0"},
{"name": "1:500,000", "value": "500000.0"},
{"name": "1:1,000,000", "value": "1000000.0"},
{"name": "1:2,000,000", "value": "2000000.0"},
{"name": "1:4,000,000", "value": "4000000.0"}
],
"dpis": [
{"name": "75", "value": "75"},
{"name": "150", "value": "150"},
{"name": "300", "value": "300"}
],
"outputFormats": [
{"name": "pdf"}
],
"layouts": [
{
"name": "A4 portrait",
"map": {
"width": 440,
"height": 483
},
"rotation": True
},
{
"name": "Legal",
"map": {
"width": 440,
"height": 483
},
"rotation": False
}
],
"printURL": f"{print_url}",
"createURL": f"{print_url}"
}
return HttpResponse(
json.dumps(response_data), content_type="application/json")
def qgis_server_map_print(request):
logger.debug('qgis_server_map_print')
temp = []
for key, value in request.POST.items():
temp[key] = value
print(f"{key}\n{value}\n--------")
return HttpResponse(
json.dumps(temp), content_type="application/json")
def qml_style(request, layername, style_name=None):
"""Update/Retrieve QML style of a given QGIS Layer.
:param layername: The layer name in Geonode.
:type layername: basestring
:param style_name: The style name recognized by QGIS Server
:type style_name: str
"""
layer = get_object_or_404(Layer, name=layername)
if request.method == 'GET':
# Request QML from QGIS server
if not style_name:
# If no style name provided, then it is a List request
styles_obj = None
try:
styles_obj = style_list(layer, internal=False)
except Exception:
print("Failed to fetch styles")
styles_dict = []
if styles_obj:
styles_dict = [model_to_dict(s) for s in styles_obj]
# If no style returned by GetCapabilities, this is a bug in QGIS
# Attempt to generate default style name
if not styles_dict:
style_url = style_get_url(layer, 'default')
response = requests.get(style_url)
if response.status_code == 200:
style_url = style_add_url(layer, 'default')
with open(layer.qgis_layer.qml_path, 'w') as f:
f.write(ensure_string(response.content))
response = requests.get(style_url)
if response.status_code == 200:
styles_obj = style_list(layer, internal=False)
styles_dict = [model_to_dict(s) for s in styles_obj]
response = HttpResponse(
json.dumps(styles_dict), content_type='application/json')
return response
# Return XML file of the style
style_url = style_get_url(layer, style_name, internal=False)
response = requests.get(style_url)
if response.status_code == 200:
response = HttpResponse(
ensure_string(response.content), content_type='text/xml')
response[
'Content-Disposition'] = f'attachment; filename={style_name}.qml'
else:
response = HttpResponse(
ensure_string(response.content), status=response.status_code)
return response
elif request.method == 'POST':
# For people who uses API request
if not request.user.has_perm(
'change_resourcebase', layer.get_self_resource()):
return HttpResponse(
'User does not have permission to change QML style.',
status=403)
# Request about adding new QML style
form = QGISLayerStyleUploadForm(request.POST, request.FILES)
if not form.is_valid():
return TemplateResponse(
request,
'qgis_server/forms/qml_style.html',
{
'resource': layer,
'style_upload_form': form
},
status=400).render()
try:
uploaded_qml = request.FILES['qml']
# update qml in uploaded media folder
# check upload session, is qml file exists?
layerfile_set = layer.upload_session.layerfile_set
try:
qml_layer_file = layerfile_set.get(name='qml')
# if it is exists, we need to delete it, because it won't be
# managed by geonode
qml_layer_file.delete()
except LayerFile.DoesNotExist:
pass
# update qml in QGIS Layer folder
content = uploaded_qml.read()
qgis_layer = get_object_or_404(QGISServerLayer, layer=layer)
with open(qgis_layer.qml_path, mode='w') as f:
f.write(content)
# construct URL to post new QML
style_name = request.POST['name']
style_title = request.POST['title']
if not style_name:
# Assign default name
name_format = 'style_%Y%m%d%H%M%S'
current_time = datetime.datetime.utcnow()
style_name = current_time.strftime(name_format)
# Add new style
style_url = style_add_url(layer, style_name)
response = requests.get(style_url)
if not (response.status_code == 200 and ensure_string(response.content) == 'OK'):
try:
style_list(layer, internal=False)
except Exception:
print("Failed to fetch styles")
return TemplateResponse(
request,
'qgis_server/forms/qml_style.html',
{
'resource': layer,
'style_upload_form': QGISLayerStyleUploadForm(),
'alert': True,
'alert_message': ensure_string(response.content),
'alert_class': 'alert-danger'
},
status=response.status_code).render()
# We succeeded on adding new style
# Refresh style models
try:
style_list(layer, internal=False)
qgis_style = layer.qgis_layer.styles.get(name=style_name)
qgis_style.title = style_title
qgis_style.save()
alert_message = f'Successfully add style {style_name}'
except Exception:
alert_message = 'Failed to fetch styles'
return TemplateResponse(
request,
'qgis_server/forms/qml_style.html',
{
'resource': layer,
'style_upload_form': form,
'alert': True,
'alert_class': 'alert-success',
'alert_message': alert_message
},
status=201).render()
except Exception as e:
logger.exception(e)
return HttpResponseServerError()
elif request.method == 'DELETE':
# Request to delete particular QML Style
if not style_name:
# Style name should exists
return HttpResponseBadRequest('Style name not provided.')
# Handle removing tile-style cache
try:
style = layer.qgis_layer.styles.get(name=style_name)
shutil.rmtree(style.style_tile_cache_path)
except Exception:
pass
style_url = style_remove_url(layer, style_name)
response = requests.get(style_url)
if not (response.status_code == 200 and ensure_string(response.content) == 'OK'):
alert_message = ensure_string(response.content)
if 'NAME is NOT an existing style.' in ensure_string(response.content):
alert_message = f'{style_name} is not an existing style'
try:
style_list(layer, internal=False)
except Exception:
print("Failed to fetch styles")
return TemplateResponse(
request,
'qgis_server/forms/qml_style.html',
{
'resource': layer,
'style_upload_form': QGISLayerStyleUploadForm(),
'alert': True,
'alert_message': alert_message,
'alert_class': 'alert-danger'
},
status=response.status_code).render()
# Successfully removed styles
# Handle when default style is deleted.
# Will be handled by style_list method
try:
style_list(layer, internal=False)
alert_message = f'Successfully deleted style {style_name}'
except Exception:
alert_message = 'Failed to fetch styles'
return TemplateResponse(
request,
'qgis_server/forms/qml_style.html',
{
'resource': layer,
'style_upload_form': QGISLayerStyleUploadForm(),
'alert': True,
'alert_message': alert_message,
'alert_class': 'alert-success'
},
status=200).render()
return HttpResponseBadRequest()
def default_qml_style(request, layername, style_name=None):
"""Set default style used by layer.
:param layername: The layer name in Geonode.
:type layername: basestring
:param style_name: The style name recognized by QGIS Server
:type style_name: str
"""
layer = get_object_or_404(Layer, name=layername)
if request.method == 'GET':
# Handle querying default style name request
default_style = layer.qgis_layer.default_style
retval = {
'name': default_style.name,
'title': default_style.title,
'style_url': default_style.style_url
}
return HttpResponse(
json.dumps(retval), content_type='application/json')
elif request.method == 'POST':
# For people who uses API request
if not request.user.has_perm(
'change_resourcebase', layer.get_self_resource()):
return HttpResponse(
'User does not have permission to change QML style.',
status=403)
if not style_name:
return HttpResponseBadRequest()
style_url = style_set_default_url(layer, style_name)
response = requests.get(style_url)
if not (response.status_code == 200 and ensure_string(response.content) == 'OK'):
return HttpResponseServerError(
'Failed to change default Style.'
'Error: {0}'.format(ensure_string(response.content)))
# Succesfully change default style
# Synchronize models
style = layer.qgis_layer.styles.get(name=style_name)
qgis_layer = layer.qgis_layer
qgis_layer.default_style = style
qgis_layer.save()
alert_message = f'Successfully changed default style {style_name}'
return TemplateResponse(
request,
'qgis_server/forms/qml_style.html',
{
'resource': layer,
'style_upload_form': QGISLayerStyleUploadForm(),
'alert': True,
'alert_message': alert_message,
'alert_class': 'alert-success'
},
status=200).render()
def set_thumbnail(request, layername):
"""Update thumbnail based on map extent
:param layername: The layer name in Geonode.
:type layername: basestring
:return:
"""
if request.method != 'POST':
return HttpResponseBadRequest()
layer = get_object_or_404(Layer, name=layername)
# For people who uses API request
if not request.user.has_perm(
'change_resourcebase', layer.get_self_resource()):
return HttpResponse(
'User does not have permission to change thumbnail.',
status=403)
# extract bbox
bbox_string = request.POST['bbox']
# BBox should be in the format: [xmin,ymin,xmax,ymax], EPSG:4326
# coming from leafletjs
bbox = bbox_string.split(',')
bbox = [float(s) for s in bbox]
# Give thumbnail creation to celery tasks, and exit.
create_qgis_server_thumbnail.apply_async(
('layers.layer', layer.id, True, bbox))
retval = {
'success': True
}
return HttpResponse(
json.dumps(retval), content_type="application/json")
def download_qlr(request, layername):
"""Download QLR file for a layer.
:param layername: The layer name in Geonode.
| |
in admin:
if wait["lang"] == "JP":
kr.sendText(msg.to,helpMessage)
random.choice(AST).sendImageWithURL(msg.to, url123)
random.choice(AST).sendText(msg.to,"↥↥↥↥↥↪ Owner Bots ↩↥↥↥↥↥")
else:
random.choice(AST).sendText(msg.to,helpMessage)
#==========================[Kris]===========================
elif msg.text in ["Key1","key1"]:
if msg.from_ in admin:
if wait["lang"] == "JP":
kr.sendText(msg.to,key1Message)
else:
random.choice(AST).sendText(msg.to,key1Message)
#==========================[Kris]===========================
elif ("Gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = kr.getGroup(msg.to)
X.name = msg.text.replace("Gn ","")
kr.updateGroup(X)
else:
kr.sendText(msg.to,"It can't be used besides the group.")
elif ("Kr1 gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = kr1.getGroup(msg.to)
X.name = msg.text.replace("Kr1 gn ","")
kr1.updateGroup(X)
else:
kr.sendText(msg.to,"It can't be used besides the group.")
elif ("Kr2 gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = kr2.getGroup(msg.to)
X.name = msg.text.replace("Kr2 gn ","")
kr2.updateGroup(X)
else:
kr2.sendText(msg.to,"It can't be used besides the group.")
elif ("Kr3 gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = kr3.getGroup(msg.to)
X.name = msg.text.replace("Kr3 gn ","")
kr3.updateGroup(X)
else:
kr3.sendText(msg.to,"It can't be used besides the group.")
#==========================[Kris]===========================
elif "Kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Kick ","")
kr.kickoutFromGroup(msg.to,[midd])
elif "Kr1 kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Kr1 kick ","")
kr1.kickoutFromGroup(msg.to,[midd])
elif "Kr2 kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Kr2 kick ","")
kr2.kickoutFromGroup(msg.to,[midd])
elif "Kr3 kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Kr3 kick ","")
kr3.kickoutFromGroup(msg.to,[midd])
#==========================[Kris]===========================
elif "Invite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Invite ","")
kr.findAndAddContactsByMid(midd)
kr.inviteIntoGroup(msg.to,[midd])
elif "Kr1 invite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Kr1 invite ","")
kr1.findAndAddContactsByMid(midd)
kr1.inviteIntoGroup(msg.to,[midd])
elif "Kr2 invite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Kr2 invite ","")
kr2.findAndAddContactsByMid(midd)
kr2.inviteIntoGroup(msg.to,[midd])
elif "Kr3 invite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Kr3 invite ","")
kr3.findAndAddContactsByMid(midd)
kr3.inviteIntoGroup(msg.to,[midd])
#==========================[Kris]===========================
elif msg.text in ["Me","me"]:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.from_}
kr.sendMessage(msg)
elif msg.text in ["Kr1","kr1"]:
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
kr1.sendMessage(msg)
elif msg.text in ["Kr2","kr2"]:
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
kr2.sendMessage(msg)
elif msg.text in ["Kr3","kr3"]:
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': Cmid}
kr3.sendMessage(msg)
elif msg.text in ["Kr4","kr4"]:
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': Dmid}
kr4.sendMessage(msg)
elif msg.text in ["Kr5","kr5"]:
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': Emid}
kr5.sendMessage(msg)
elif msg.text.lower() == 'Bot':
kr.sendImageWithURL(msg.to, url123)
kr.sendText(msg.to,"↥↥↥↥↥↪ Pembuat Bots ↩↥↥↥↥↥")
#==========================[Kris]===========================
elif msg.text in ["cancel","Cancel"]:
if msg.from_ in admin:
if msg.toType == 2:
G = kr.getGroup(msg.to)
if G.invitee is not None:
gInviMids = [contact.mid for contact in G.invitee]
kr.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
kr.sendText(msg.to,"No one is inviting")
else:
kr.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Can not be used outside the group")
else:
kr.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Kr cancel","kr cancel"]:
if msg.from_ in admin:
if msg.toType == 2:
G = kr1.getGroup(msg.to)
if G.invitee is not None:
gInviMids = [contact.mid for contact in G.invitee]
kr1.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"No one is inviting")
else:
kr1.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Can not be used outside the group")
else:
kr1.sendText(msg.to,"Not for use less than group")
#==========================[Kris]===========================
elif msg.text in ["Ourl","Link on","ourl","link on"]:
if msg.from_ in admin:
if msg.toType == 2:
X = kr.getGroup(msg.to)
X.preventJoinByTicket = False
kr.updateGroup(X)
if wait["lang"] == "JP":
kr.sendText(msg.to,"Done")
else:
kr.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Can not be used outside the group")
else:
kr.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Kr1 ourl","Kr1 link on","kr1 ourl","kr1 link on"]:
if msg.from_ in admin:
if msg.toType == 2:
X = kr1.getGroup(msg.to)
X.preventJoinByTicket = False
kr1.updateGroup(X)
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Done")
else:
kr1.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Can not be used outside the group")
else:
kr1.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Kr2 ourl","Kr2 link on"]:
if msg.from_ in admin:
if msg.toType == 2:
X = kr2.getGroup(msg.to)
X.preventJoinByTicket = False
kr2.updateGroup(X)
if wait["lang"] == "JP":
kr2.sendText(msg.to,"Done")
else:
kr2.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
kr2.sendText(msg.to,"Can not be used outside the group")
else:
kr2.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Kr3 ourl","Kr3 link on"]:
if msg.from_ in admin:
if msg.toType == 2:
X = kr3.getGroup(msg.to)
X.preventJoinByTicket = False
kr3.updateGroup(X)
if wait["lang"] == "JP":
kr3.sendText(msg.to,"Done")
else:
kr3.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
kr3.sendText(msg.to,"Can not be used outside the group")
else:
kr3.sendText(msg.to,"Not for use less than group")
#==========================[Kris]===========================
elif msg.text in ["Curl","Link off","curl","link off"]:
if msg.from_ in admin:
if msg.toType == 2:
X = kr.getGroup(msg.to)
X.preventJoinByTicket = True
kr.updateGroup(X)
if wait["lang"] == "JP":
kr.sendText(msg.to,"Done")
else:
kr.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Can not be used outside the group")
else:
kr.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Kr1 curl","Kr1 link off"]:
if msg.from_ in admin:
if msg.toType == 2:
X = kr1.getGroup(msg.to)
X.preventJoinByTicket = True
kr1.updateGroup(X)
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Done")
else:
kr1.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Can not be used outside the group")
else:
kr1.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Kr2 curl","Kr2 link off"]:
if msg.from_ in admin:
if msg.toType == 2:
X = kr2.getGroup(msg.to)
X.preventJoinByTicket = True
kr2.updateGroup(X)
if wait["lang"] == "JP":
kr2.sendText(msg.to,"Done")
else:
kr2.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
kr2.sendText(msg.to,"Can not be used outside the group")
else:
kr2.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Kr3 curl","Kr3 link off"]:
if msg.from_ in admin:
if msg.toType == 2:
X = kr3.getGroup(msg.to)
X.preventJoinByTicket = True
kr3.updateGroup(X)
if wait["lang"] == "JP":
kr3.sendText(msg.to,"Done")
else:
kr3.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
kr3.sendText(msg.to,"Can not be used outside the group")
else:
kr3.sendText(msg.to,"Not for use less than group")
#==========================[Kris]===========================
elif msg.text == "Ginfo":
if msg.from_ in admin:
if msg.toType == 2:
ginfo = kr.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
u = "close"
else:
u = "open"
kr.sendText(msg.to,"[group name]\n" + str(ginfo.name) + "\n[gid]\n" + msg.to + "\n[group creator]\n" + gCreator + "\n[profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\nmembers:" + str(len(ginfo.members)) + "members\npending:" + sinvitee + "people\nURL:" + u + "it is inside")
else:
kr.sendText(msg.to,"[group name]\n" + str(ginfo.name) + "\n[gid]\n" + msg.to + "\n[group creator]\n" + gCreator + "\n[profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
else:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Can not be used outside the group")
else:
kr.sendText(msg.to,"Not for use less than group")
elif msg.text == "Cctv ginfo":
if msg.from_ in admin:
if msg.toType == 2:
ginfo = kr1.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
u = "close"
else:
u = "open"
kr1.sendText(msg.to,"[group name]\n" + str(ginfo.name) + "\n[gid]\n" + msg.to + "\n[group creator]\n" + gCreator + "\n[profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\nmembers:" + str(len(ginfo.members)) + "members\npending:" + sinvitee + "people\nURL:" + u + "it is inside")
else:
kr1.sendText(msg.to,"[group name]\n" + str(ginfo.name) + "\n[gid]\n" + msg.to + "\n[group creator]\n" + gCreator + "\n[profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
else:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Can not be used outside the group")
else:
kr1.sendText(msg.to,"Not for use less than group")
#==========================[Kris]===========================
elif "All mid" == msg.text:
if msg.from_ in admin:
kr.sendText(msg.to,mid1)
kr1.sendText(msg.to,Amid)
kr2.sendText(msg.to,Bmid)
kr3.sendText(msg.to,Cmid)
kr4.sendText(msg.to,Dmid)
kr5.sendText(msg.to,Emid)
elif "Mid" == msg.text:
if msg.from_ in admin:
kr.sendText(msg.to,mid1)
elif "Kr1 mid" == msg.text:
if msg.from_ in admin:
kr1.sendText(msg.to,Amid)
elif "Kr2 mid" == msg.text:
if msg.from_ in admin:
kr2.sendText(msg.to,Bmid)
elif "Kr3 mid" == msg.text:
if msg.from_ in admin:
kr3.sendText(msg.to,Cmid)
elif "Kr4 mid" == msg.text:
if msg.from_ in admin:
kr4.sendText(msg.to,Cmid)
elif "Kr5 mid" == msg.text:
if msg.from_ in admin:
kr5.sendText(msg.to,Cmid)
#==========================[Kris]===========================
elif msg.text in ["Undang","undang"]:
if msg.from_ in admin:
wait["winvite"] = True
kr.sendText(msg.to,"send contact")
elif msg.text in ["Jepit","jepit"]:
if msg.from_ in admin:
wait["winvite"] = True
kr1.sendText(msg.to,"send contact")
elif msg.text in ["Tarik","tarik"]:
if msg.from_ in admin:
wait['invite'] = True
kr2.sendText(msg.to,"send contact")
#==========================[Kris]===========================
elif "Rename " in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Rename ","")
if len(string.decode('utf-8')) <= 10000000000:
profile = kr.getProfile()
profile.displayName = string
kr.updateProfile(profile)
kr.sendText(msg.to,"Changed " + string + "")
#==========================[Kris]===========================
elif "Rename1 " in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Rename1 ","")
if len(string.decode('utf-8')) <= 10000000000:
profile = kr1.getProfile()
profile.displayName = string
kr1.updateProfile(profile)
kr1.sendText(msg.to,"Changed " + string + "")
elif "Rename2 " in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Rename2 ","")
if len(string.decode('utf-8')) <= 10000000000:
profile = kr2.getProfile()
profile.displayName = string
kr2.updateProfile(profile)
kr2.sendText(msg.to,"Changed " + string + "")
elif "Rename3 " in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Rename3 ","")
if len(string.decode('utf-8')) <= 10000000000:
profile = kr3.getProfile()
profile.displayName = string
kr3.updateProfile(profile)
kr3.sendText(msg.to,"Changed " + string + "")
elif "Rename4 " in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Rename4 ","")
if len(string.decode('utf-8')) <= 10000000000:
profile = kr4.getProfile()
profile.displayName = string
kr4.updateProfile(profile)
kr4.sendText(msg.to,"Changed " + string + "")
elif "Rename5 " in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Rename5 ","")
if len(string.decode('utf-8')) <= 10000000000:
profile = kr5.getProfile()
profile.displayName = string
kr5.updateProfile(profile)
kr5.sendText(msg.to,"Changed " + string + "")
#==========================[Kris]===========================
elif "Allrename " in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Allrename ","")
if len(string.decode('utf-8')) <= 10000000000:
profile = kr.getProfile()
profile.displayName = string
kr.updateProfile(profile)
kr.sendText(msg.to,"Changed " + string + "")
profile = kr1.getProfile()
profile.displayName = string
kr1.updateProfile(profile)
kr1.sendText(msg.to,"Changed " + string + "")
profile = kr2.getProfile()
profile.displayName = string
kr2.updateProfile(profile)
kr2.sendText(msg.to,"Changed " + string + "")
profile = kr3.getProfile()
| |
#!/usr/bin/env python
'''
Test the xml input
'''
import os
from siconos.tests_setup import working_dir
try:
import pytest
xfail = pytest.mark.xfail
except:
import py.test
xfail = py.test.mark.xfail
from siconos.fromXml import buildModelXML
import siconos.kernel as SK
import numpy as np
def test_xml1():
''' the BouncingBall '''
bouncingBall = buildModelXML(os.path.join(working_dir, 'data/BBallTS.xml'))
# --- Get the simulation ---
s = bouncingBall.simulation()
dsN = SK.dynamicalSystems(bouncingBall.nonSmoothDynamicalSystem().topology().dSG(0))[0].number()
ball = bouncingBall.nonSmoothDynamicalSystem().dynamicalSystem(dsN)
N = 2000 # Number of time steps
# saved in a matrix dataPlot
outputSize = 4
dataPlot = np.zeros((N + 1, outputSize))
q = ball.q()
v = ball.velocity()
p = ball.p(1)
dataPlot[0, 0] = bouncingBall.t0()
dataPlot[0, 1] = q[0]
dataPlot[0, 2] = v[0]
dataPlot[0, 3] = p[0]
print("====> Start computation ...")
# --- Time loop ---
k = 1
while s.hasNextEvent():
s.computeOneStep()
# --- Get values to be plotted ---
dataPlot[k, 0] = s.nextTime()
dataPlot[k, 1] = q[0]
dataPlot[k, 2] = v[0]
dataPlot[k, 3] = p[0]
s.nextStep()
k += 1
print("End of computation - Number of iterations done: {:}".format(k))
print("====> Output file writing ...")
dataPlot.resize(k, outputSize)
np.savetxt("BBallTS.dat", dataPlot)
# Comparison with a reference file
dataPlotRef = SK.getMatrix(SK.SimpleMatrix(os.path.join(working_dir, 'data/BBallTSXML.ref')))
if np.linalg.norm(dataPlot - dataPlotRef, ord=np.inf) > 1e-12:
print(dataPlot - dataPlotRef)
print("ERROR: The result is rather different from the reference file.")
def test_xml2():
''' BallInBowl '''
# --- buildModelXML loading from xml file ---
bouncingBall = buildModelXML(os.path.join(working_dir,
'data/BallInBowl.xml'))
# --- Get the simulation ---
s = bouncingBall.simulation()
k = 0
T = bouncingBall.finalT()
t0 = bouncingBall.t0()
h = s.timeStep()
N = np.ceil((T - t0) / h)
# --- Get the values to be plotted ---
# . saved in a matrix dataPlot
dataPlot = np.zeros((N + 1, 6))
print("Prepare data for plotting ... ")
# For the initial time step:
# time
dataPlot[k, 0] = bouncingBall.t0()
# state q for the first dynamical system (ball)
dsN = SK.dynamicalSystems(bouncingBall.nonSmoothDynamicalSystem().topology().dSG(0))[0].number()
ball = bouncingBall.nonSmoothDynamicalSystem().dynamicalSystem(dsN)
q = ball.q()
v = ball.velocity()
p = ball.p(1)
dataPlot[k, 1] = q[0]
dataPlot[k, 2] = v[0]
dataPlot[k, 3] = q[1]
dataPlot[k, 4] = v[1]
dataPlot[k, 5] = p[0]
# --- Compute elapsed time ---
print("Computation ... ")
# --- Time loop ---
while s.hasNextEvent():
# solve ...
s.computeOneStep()
# --- Get values to be plotted ---
# time
dataPlot[k, 0] = s.nextTime()
# Ball: state q
dataPlot[k, 1] = q[0]
# Ball: velocity
dataPlot[k, 2] = v[0]
# Ground: q
dataPlot[k, 3] = q[1]
# Ground: velocity
dataPlot[k, 4] = v[1]
# Reaction
dataPlot[k, 5] = p[0]
# dataPlot[k, 6] = osi.computeResidu()
# transfer of state i+1 into state i and time incrementation
s.nextStep()
k += 1
# Number of time iterations
print("Number of iterations done: ")
# dataPlot (ascii) output
# ioMatrix::write(dataPlot,"noDim")
np.savetxt("BallInBowl.dat", dataPlot)
def test_xml3():
''' DryFriction '''
# --- buildModelXML loading from xml file ---
oscillator = buildModelXML(os.path.join(working_dir,
'data/DryFriction.xml'))
# --- Get the simulation ---
s = oscillator.simulation()
k = 0
T = oscillator.finalT()
t0 = oscillator.t0()
h = s.timeStep()
N = np.ceil((T - t0) / h)
# --- Get the values to be plotted ---
# . saved in a matrix dataPlot
dataPlot = np.zeros((N + 1, 5))
print("Prepare data for plotting ... ")
# For the initial time step:
# time
dataPlot[k, 0] = t0
# state q for the first dynamical system (ball)
dsN = SK.dynamicalSystems(oscillator.nonSmoothDynamicalSystem().topology().dSG(0))[0].number()
oscillo = oscillator.nonSmoothDynamicalSystem().dynamicalSystem(dsN)
inter = SK.interactions(oscillator.nonSmoothDynamicalSystem().topology().indexSet(0))[0]
dataPlot[k, 1] = oscillo.q()[0]
# velocity for the oscillo
dataPlot[k, 2] = oscillo.velocity()[0]
dataPlot[k, 3] = inter.lambda_(1)[0]
dataPlot[k, 4] = inter.lambda_(1)[1]
# --- Compute elapsed time ---
print("Computation ... ")
# --- Time loop ---
while k < N:
# get current time step
k += 1
# print( " Pas " << k
# solve ...
s.computeOneStep()
# --- Get values to be plotted ---
# time
dataPlot[k, 0] = s.nextTime()
# Oscillo: state q
dataPlot[k, 1] = oscillo.q()[0]
# Oscillo: velocity
dataPlot[k, 2] = oscillo.velocity()[0]
dataPlot[k, 3] = inter.lambda_(1)[0]
dataPlot[k, 4] = inter.lambda_(1)[1]
# transfer of state i+1 into state i and time incrementation
s.nextStep()
# Number of time iterations
print("Number of iterations done: {:}".format(k))
# dataPlot (ascii) output
np.savetxt("DryFriction.dat", dataPlot)
@xfail
def test_xml4():
''' CamFollower '''
# --- buildModelXML loading from xml file ---
CamFollower = buildModelXML(os.path.join(working_dir,
'data/CamFollower_TIDS.xml'))
# --- Get and initialize the simulation ---
S = CamFollower.simulation()
k = 0
T = CamFollower.finalT()
t0 = CamFollower.t0()
h = S.timeStep()
N = np.ceil((T - t0) / h)
# --- Get the values to be plotted ---
# . saved in a matrix dataPlot
dataPlot = np.zeros((N + 1, 8))
print("Prepare data for plotting ... ")
# For the initial time step:
# time
dataPlot[k, 0] = t0
# state q for the Follower
dsN = CamFollower.nonSmoothDynamicalSystem().topology().dSG(0).dynamicalSystems()[0].number()
Follower = CamFollower.nonSmoothDynamicalSystem().dynamicalSystem(dsN)
inter = CamFollower.nonSmoothDynamicalSystem().topology().dSG(0).interactions()[0]
# Position of the Follower
dataPlot[k, 1] = Follower.q()[0]
# Velocity for the Follower
dataPlot[k, 2] = Follower.velocity()[0]
# Reaction
dataPlot[k, 3] = inter.lambda_(1)[0]
# External Forcing
dataPlot[k, 4] = Follower.fExt()[0]
# State of the Cam
rpm = 358
CamEqForce = CamState(t0, rpm, CamPosition, CamVelocity, CamAcceleration)
# Position of the Cam
dataPlot[k, 5] = CamPosition
# Velocity of the Cam
dataPlot[k, 6] = CamVelocity
# Acceleration of the Cam
dataPlot[k, 7] = CamPosition + Follower.q()[0]
print("Computation ... ")
# --- Time loop ---
while k < N:
# get current time step
k += 1
S.computeOneStep()
# --- Get values to be plotted ---
dataPlot[k, 0] = S.nextTime()
# dataPlot[k, 1] = Follower.q()[0]
# dataPlot[k, 2] = ball.velocity()[0]
dataPlot[k, 1] = Follower.q()[0]
dataPlot[k, 2] = Follower.velocity()[0]
dataPlot[k, 3] = inter.lambda_(1)[0]
dataPlot[k, 4] = Follower.fExt()[0]
CamEqForce = CamState(S.nextTime(), rpm, CamPosition, CamVelocity, CamAcceleration)
dataPlot[k, 5] = CamPosition
dataPlot[k, 6] = CamVelocity
dataPlot[k, 7] = CamPosition + Follower.q()[0]
# transfer of state i+1 into state i and time incrementation
S.nextStep()
# Number of time iterations
print("Number of iterations done: {:}".format(k))
# dataPlot (ascii) output
np.savetxt("CamFollower.dat", dataPlot)
def test_xml5():
''' Bouncing Ball ED '''
# --- buildModelXML loading from xml file ---
bouncingBall = buildModelXML(os.path.join(working_dir, 'data/BBallED.xml'))
# --- Get and initialize the simulation ---
s = bouncingBall.simulation()
dsN = SK.dynamicalSystems(bouncingBall.nonSmoothDynamicalSystem().topology().dSG(0))[0].number()
ball = bouncingBall.nonSmoothDynamicalSystem().dynamicalSystem(dsN)
# --- Get the values to be plotted ---
# . saved in a matrix dataPlot
N = 12368 # Number of saved points: depends on the number of events ...
outputSize = 5
dataPlot = np.zeros((N + 1, outputSize))
q = ball.q()
v = ball.velocity()
p = ball.p(1)
f = ball.p(2)
dataPlot[0, 0] = bouncingBall.t0()
dataPlot[0, 1] = q[0]
dataPlot[0, 2] = v[0]
dataPlot[0, 3] = p[0]
dataPlot[0, 4] = f[0]
print("====> Start computation ... ")
# --- Time loop ---
eventsManager = s.eventsManager()
numberOfEvent = 0
k = 0
nonSmooth = False
while s.hasNextEvent():
k += 1
s.advanceToEvent()
if eventsManager.nextEvent().getType() == 2:
nonSmooth = True
s.processEvents()
# If the treated event is non smooth, the pre-impact state has been solved in memory vectors during process.
if nonSmooth:
dataPlot[k, 0] = s.startingTime()
dataPlot[k, 1] = ball.qMemory().getSiconosVector(1)[0]
dataPlot[k, 2] = ball.velocityMemory().getSiconosVector(1)[0]
k += 1
nonSmooth = False
dataPlot[k, 0] = s.startingTime()
dataPlot[k, 1] = q[0]
dataPlot[k, 2] = v[0]
dataPlot[k, 3] = p[0]
dataPlot[k, 4] = f[0]
numberOfEvent += 1
# --- Output files ---
dataPlot.resize(k, outputSize)
np.savetxt("BBallED.dat", dataPlot)
# Comparison with a reference file
dataPlotRef = SK.getMatrix(SK.SimpleMatrix(os.path.join(working_dir, 'data/BouncingBallEDXml.ref')))
if np.linalg.norm(dataPlot - dataPlotRef, ord=np.inf) > 1e-11:
print("Warning. The results is rather different from the reference file.")
print(np.linalg.norm(dataPlot - dataPlotRef, ord=np.inf))
exit(1)
def test_xml6():
''' BeadPlan '''
# --- buildModelXML loading from xml file ---
oscillator = buildModelXML(os.path.join(working_dir, 'data/BeadPlan.xml'))
# --- Get and initialize the simulation ---
s = oscillator.simulation()
k = 0
t0 = oscillator.t0()
T = oscillator.finalT()
h = s.timeStep()
N = np.ceil((T - t0) / h) # Number of time steps
# --- Get the values to be plotted ---
# . saved in a matrix dataPlot
dataPlot = np.zeros((N, 3))
print("Prepare data for plotting ... ")
# For the initi)al time step:
# XXX fix this crap
dsN = SK.dynamicalSystems(oscillator.nonSmoothDynamicalSystem().topology().dSG(0))[0].number()
oscillo = oscillator.nonSmoothDynamicalSystem().dynamicalSystem(dsN)
q = oscillo.q()
v = | |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.static}.
"""
import inspect
import mimetypes
import os
import re
import StringIO
from zope.interface.verify import verifyObject
from twisted.internet import abstract, interfaces
from twisted.python.runtime import platform
from twisted.python.filepath import FilePath
from twisted.python import log
from twisted.trial.unittest import TestCase
from twisted.web import static, http, script, resource
from twisted.web.server import UnsupportedMethod
from twisted.web.test.test_web import DummyRequest
from twisted.web.test._util import _render
class StaticDataTests(TestCase):
"""
Tests for L{Data}.
"""
def test_headRequest(self):
"""
L{Data.render} returns an empty response body for a I{HEAD} request.
"""
data = static.Data("foo", "bar")
request = DummyRequest([''])
request.method = 'HEAD'
d = _render(data, request)
def cbRendered(ignored):
self.assertEqual(''.join(request.written), "")
d.addCallback(cbRendered)
return d
def test_invalidMethod(self):
"""
L{Data.render} raises L{UnsupportedMethod} in response to a non-I{GET},
non-I{HEAD} request.
"""
data = static.Data("foo", "bar")
request = DummyRequest([''])
request.method = 'POST'
self.assertRaises(UnsupportedMethod, data.render, request)
class StaticFileTests(TestCase):
"""
Tests for the basic behavior of L{File}.
"""
def _render(self, resource, request):
return _render(resource, request)
def test_invalidMethod(self):
"""
L{File.render} raises L{UnsupportedMethod} in response to a non-I{GET},
non-I{HEAD} request.
"""
request = DummyRequest([''])
request.method = 'POST'
path = FilePath(self.mktemp())
path.setContent("foo")
file = static.File(path.path)
self.assertRaises(UnsupportedMethod, file.render, request)
def test_notFound(self):
"""
If a request is made which encounters a L{File} before a final segment
which does not correspond to any file in the path the L{File} was
created with, a not found response is sent.
"""
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest(['foobar'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
def test_emptyChild(self):
"""
The C{''} child of a L{File} which corresponds to a directory in the
filesystem is a L{DirectoryLister}.
"""
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest([''])
child = resource.getChildForRequest(file, request)
self.assertIsInstance(child, static.DirectoryLister)
self.assertEqual(child.path, base.path)
def test_securityViolationNotFound(self):
"""
If a request is made which encounters a L{File} before a final segment
which cannot be looked up in the filesystem due to security
considerations, a not found response is sent.
"""
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest(['..'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
def test_forbiddenResource(self):
"""
If the file in the filesystem which would satisfy a request cannot be
read, L{File.render} sets the HTTP response code to I{FORBIDDEN}.
"""
base = FilePath(self.mktemp())
base.setContent('')
# Make sure we can delete the file later.
self.addCleanup(base.chmod, 0700)
# Get rid of our own read permission.
base.chmod(0)
file = static.File(base.path)
request = DummyRequest([''])
d = self._render(file, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 403)
d.addCallback(cbRendered)
return d
if platform.isWindows():
test_forbiddenResource.skip = "Cannot remove read permission on Windows"
def test_indexNames(self):
"""
If a request is made which encounters a L{File} before a final empty
segment, a file in the L{File} instance's C{indexNames} list which
exists in the path the L{File} was created with is served as the
response to the request.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child("foo.bar").setContent("baz")
file = static.File(base.path)
file.indexNames = ['foo.bar']
request = DummyRequest([''])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(''.join(request.written), 'baz')
self.assertEqual(request.outgoingHeaders['content-length'], '3')
d.addCallback(cbRendered)
return d
def test_staticFile(self):
"""
If a request is made which encounters a L{File} before a final segment
which names a file in the path the L{File} was created with, that file
is served as the response to the request.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child("foo.bar").setContent("baz")
file = static.File(base.path)
request = DummyRequest(['foo.bar'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(''.join(request.written), 'baz')
self.assertEqual(request.outgoingHeaders['content-length'], '3')
d.addCallback(cbRendered)
return d
def test_staticFileDeletedGetChild(self):
"""
A L{static.File} created for a directory which does not exist should
return childNotFound from L{static.File.getChild}.
"""
staticFile = static.File(self.mktemp())
request = DummyRequest(['foo.bar'])
child = staticFile.getChild("foo.bar", request)
self.assertEqual(child, staticFile.childNotFound)
def test_staticFileDeletedRender(self):
"""
A L{static.File} created for a file which does not exist should render
its C{childNotFound} page.
"""
staticFile = static.File(self.mktemp())
request = DummyRequest(['foo.bar'])
request2 = DummyRequest(['foo.bar'])
d = self._render(staticFile, request)
d2 = self._render(staticFile.childNotFound, request2)
def cbRendered2(ignored):
def cbRendered(ignored):
self.assertEqual(''.join(request.written),
''.join(request2.written))
d.addCallback(cbRendered)
return d
d2.addCallback(cbRendered2)
return d2
def test_headRequest(self):
"""
L{static.File.render} returns an empty response body for I{HEAD}
requests.
"""
path = FilePath(self.mktemp())
path.setContent("foo")
file = static.File(path.path)
request = DummyRequest([''])
request.method = 'HEAD'
d = _render(file, request)
def cbRendered(ignored):
self.assertEqual("".join(request.written), "")
d.addCallback(cbRendered)
return d
def test_processors(self):
"""
If a request is made which encounters a L{File} before a final segment
which names a file with an extension which is in the L{File}'s
C{processors} mapping, the processor associated with that extension is
used to serve the response to the request.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child("foo.bar").setContent(
"from twisted.web.static import Data\n"
"resource = Data('dynamic world','text/plain')\n")
file = static.File(base.path)
file.processors = {'.bar': script.ResourceScript}
request = DummyRequest(["foo.bar"])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(''.join(request.written), 'dynamic world')
self.assertEqual(request.outgoingHeaders['content-length'], '13')
d.addCallback(cbRendered)
return d
def test_ignoreExt(self):
"""
The list of ignored extensions can be set by passing a value to
L{File.__init__} or by calling L{File.ignoreExt} later.
"""
file = static.File(".")
self.assertEqual(file.ignoredExts, [])
file.ignoreExt(".foo")
file.ignoreExt(".bar")
self.assertEqual(file.ignoredExts, [".foo", ".bar"])
file = static.File(".", ignoredExts=(".bar", ".baz"))
self.assertEqual(file.ignoredExts, [".bar", ".baz"])
def test_ignoredExtensionsIgnored(self):
"""
A request for the I{base} child of a L{File} succeeds with a resource
for the I{base<extension>} file in the path the L{File} was created
with if such a file exists and the L{File} has been configured to
ignore the I{<extension>} extension.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child('foo.bar').setContent('baz')
base.child('foo.quux').setContent('foobar')
file = static.File(base.path, ignoredExts=(".bar",))
request = DummyRequest(["foo"])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(''.join(request.written), 'baz')
d.addCallback(cbRendered)
return d
class StaticMakeProducerTests(TestCase):
"""
Tests for L{File.makeProducer}.
"""
def makeResourceWithContent(self, content, type=None, encoding=None):
"""
Make a L{static.File} resource that has C{content} for its content.
@param content: The bytes to use as the contents of the resource.
@param type: Optional value for the content type of the resource.
"""
fileName = self.mktemp()
fileObject = open(fileName, 'w')
fileObject.write(content)
fileObject.close()
resource = static.File(fileName)
resource.encoding = encoding
resource.type = type
return resource
def contentHeaders(self, request):
"""
Extract the content-* headers from the L{DummyRequest} C{request}.
This returns the subset of C{request.outgoingHeaders} of headers that
start with 'content-'.
"""
contentHeaders = {}
for k, v in request.outgoingHeaders.iteritems():
if k.startswith('content-'):
contentHeaders[k] = v
return contentHeaders
def test_noRangeHeaderGivesNoRangeStaticProducer(self):
"""
makeProducer when no Range header is set returns an instance of
NoRangeStaticProducer.
"""
resource = self.makeResourceWithContent('')
request = DummyRequest([])
producer = resource.makeProducer(request, resource.openForReading())
self.assertIsInstance(producer, static.NoRangeStaticProducer)
def test_noRangeHeaderSets200OK(self):
"""
makeProducer when no Range header is set sets the responseCode on the
request to 'OK'.
"""
resource = self.makeResourceWithContent('')
request = DummyRequest([])
resource.makeProducer(request, resource.openForReading())
self.assertEqual(http.OK, request.responseCode)
def test_noRangeHeaderSetsContentHeaders(self):
"""
makeProducer when no Range header is set sets the Content-* headers
for the response.
"""
length = 123
contentType = "text/plain"
contentEncoding = 'gzip'
resource = self.makeResourceWithContent(
'a'*length, type=contentType, encoding=contentEncoding)
request = DummyRequest([])
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
{'content-type': contentType, 'content-length': str(length),
'content-encoding': contentEncoding},
self.contentHeaders(request))
def test_singleRangeGivesSingleRangeStaticProducer(self):
"""
makeProducer when the Range header requests a single byte range
returns an instance of SingleRangeStaticProducer.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3'
resource = self.makeResourceWithContent('abcdef')
producer = resource.makeProducer(request, resource.openForReading())
self.assertIsInstance(producer, static.SingleRangeStaticProducer)
def test_singleRangeSets206PartialContent(self):
"""
makeProducer when the Range header requests a single, satisfiable byte
range sets the response code on the request to 'Partial Content'.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3'
resource = self.makeResourceWithContent('abcdef')
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
http.PARTIAL_CONTENT, request.responseCode)
def test_singleRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single, satisfiable byte
range sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3'
contentType = "text/plain"
contentEncoding = 'gzip'
resource = self.makeResourceWithContent('abcdef', type=contentType, encoding=contentEncoding)
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
{'content-type': contentType, 'content-encoding': contentEncoding,
'content-range': 'bytes 1-3/6', 'content-length': '3'},
self.contentHeaders(request))
def test_singleUnsatisfiableRangeReturnsSingleRangeStaticProducer(self):
"""
makeProducer still returns an instance of L{SingleRangeStaticProducer}
when the Range header requests a single unsatisfiable byte range.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=4-10'
resource = self.makeResourceWithContent('abc')
producer = resource.makeProducer(request, resource.openForReading())
self.assertIsInstance(producer, static.SingleRangeStaticProducer)
def test_singleUnsatisfiableRangeSets416ReqestedRangeNotSatisfiable(self):
"""
makeProducer sets the response code of the request to of 'Requested
Range Not Satisfiable' when the Range header requests a single
unsatisfiable byte range.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=4-10'
resource = self.makeResourceWithContent('abc')
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
http.REQUESTED_RANGE_NOT_SATISFIABLE, request.responseCode)
def test_singleUnsatisfiableRangeSetsContentHeaders(self):
"""
makeProducer when the | |
p = 0xffffffff00000001000000000000000000000000ffffffffffffffffffffffff
a = 0xffffffff00000001000000000000000000000000fffffffffffffffffffffffc
b = 0x5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b
xG = 0x6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296
yG = 0x4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5
q = 0xffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551
_p_ = p
_FpTAG_ = 'Fp'
class fp_Error(BaseException):
pass
def _is_an_fp_representation_(obj):
return (type(obj) is tuple and len(obj) == 2 and obj[0] is _FpTAG_
and type(obj[1]) is int and 0 <= obj[1] <= _p_ - 1)
def fp(integer):
return fp_from_integer(integer)
def fp_from_integer(integer):
assert type(integer) is int
return _FpTAG_, integer % _p_
def fp_to_integer(elm):
assert _is_an_fp_representation_(elm)
return elm[1]
def fp_from_octetstring(octetstring):
assert type(octetstring) is bytes
if len(octetstring) != 32:
raise fp_Error
value = int.from_bytes(octetstring, byteorder='big', signed=False)
if not (0 <= value <= _p_ - 1):
raise fp_Error
return fp(value)
def fp_to_octetstring(elm):
assert _is_an_fp_representation_(elm)
return elm[1].to_bytes(length=32, byteorder='big', signed=False)
def fp_eq(elm1, elm2):
assert _is_an_fp_representation_(elm1)
assert _is_an_fp_representation_(elm2)
return elm1[1] == elm2[1]
def fp_neq(elm1, elm2):
return not fp_eq(elm1, elm2)
def fp_neg(elm):
assert _is_an_fp_representation_(elm)
return _FpTAG_, -elm[1] % _p_
def fp_add(elm1, elm2):
assert _is_an_fp_representation_(elm1)
assert _is_an_fp_representation_(elm2)
return _FpTAG_, (elm1[1] + elm2[1]) % _p_
def fp_sub(elm1, elm2):
return fp_add(elm1, fp_neg(elm2))
def fp_inv(elm):
# n^( -1 ) === n^( (p - 1) -1 ) (mod p)
# n^( -1 ) === n^( p - 2 ) (mod p)
assert _is_an_fp_representation_(elm)
if elm[1] == 0:
raise fp_Error
return _FpTAG_, pow(elm[1], _p_ - 2, _p_)
def fp_mul(elm1, elm2):
assert _is_an_fp_representation_(elm1)
assert _is_an_fp_representation_(elm2)
return _FpTAG_, (elm1[1] * elm2[1]) % _p_
def fp_div(elm1, elm2):
return fp_mul(elm1, fp_inv(elm2))
def fp_square(elm):
return fp_mul(elm, elm)
def fp_cube(elm):
return fp_mul(fp_mul(elm, elm), elm)
def fp_sqrt(elm, parity=None):
# n^2 === n^( (p - 1) + 2 ) (mod p)
# n^2 === n^( p + 1 ) (mod p)
# n === n^( (p + 1) / 2 ) (mod p)
# m^2 === n^( (p + 1) / 2 ) (mod p)
# m === n^( (p + 1) / 4 ) (mod p)
assert _is_an_fp_representation_(elm)
assert parity is None or type(parity) is int
candidate = _FpTAG_, pow(elm[1], (_p_ + 1) // 4, _p_)
if fp_neq(fp_square(candidate), elm):
raise fp_Error
if parity is None or fp_parity_of(candidate) == parity & 1:
return candidate
else:
return fp_neg(candidate)
def fp_parity_of(elm):
assert _is_an_fp_representation_(elm)
return elm[1] & 1
_q_ = q
_FqTAG_ = 'Fq'
class fq_Error(BaseException):
pass
def _is_an_fq_representation_(obj):
return (type(obj) is tuple and len(obj) == 2 and obj[0] is _FqTAG_
and type(obj[1]) is int and 0 <= obj[1] <= _q_ - 1)
def fq(integer):
return fq_from_integer(integer)
def fq_from_integer(integer):
assert type(integer) is int
return _FqTAG_, integer % _q_
def fq_to_integer(elm):
assert _is_an_fq_representation_(elm)
return elm[1]
def fq_inv(elm):
assert _is_an_fq_representation_(elm)
if elm[1] == 0:
raise fq_Error
return _FqTAG_, pow(elm[1], _q_ - 2, _q_)
def fq_mul(elm1, elm2):
assert _is_an_fq_representation_(elm1)
assert _is_an_fq_representation_(elm2)
return _FqTAG_, (elm1[1] * elm2[1]) % _q_
def fq_div(elm1, elm2):
return fq_mul(elm1, fq_inv(elm2))
def fq_to_lsb_first_bit_sequence_generator(elm):
assert _is_an_fq_representation_(elm)
_, value = elm
vlen = value.bit_length()
for i in range(vlen):
yield (value >> i) & 1
def fq_to_msb_first_bit_sequence(elm):
return reversed(tuple(fq_to_lsb_first_bit_sequence_generator(elm)))
_a_ = fp(a)
_b_ = fp(b)
_xG_ = fp(xG)
_yG_ = fp(yG)
_xZ_ = fp(0)
_yZ_ = fp(0)
_ETAG_ = 'E'
_G_ = _ETAG_, _xG_, _yG_
_Z_ = _ETAG_, _xZ_, _yZ_
class e_Error(BaseException):
pass
def _is_on_e_curve_(x, y):
lhs = fp_square(y)
rhs = fp_add(fp_add(fp_cube(x), fp_mul(_a_, x)), _b_)
return fp_eq(lhs, rhs)
def _is_a_2d_fp_space_point_for_e_(obj):
return (type(obj) is tuple and len(obj) == 3 and obj[0] is _ETAG_
and _is_an_fp_representation_(obj[1])
and _is_an_fp_representation_(obj[2]))
def _is_an_e_representation_(obj):
if _is_a_2d_fp_space_point_for_e_(obj):
_, x, y = obj
if _is_on_e_curve_(x, y):
return True
else:
return obj == _Z_
else:
return False
assert not _is_on_e_curve_(_xZ_, _yZ_)
assert _is_an_e_representation_(_G_)
assert _is_an_e_representation_(_Z_)
def e(spec):
assert type(spec) is int and spec == 0 or spec == 1
if spec == 0:
return _Z_
elif spec == 1:
return _G_
else:
assert False
def e_from_octetstring(octetstring):
assert type(octetstring) is bytes
try:
if len(octetstring) == 1 and octetstring[0] == 0x00:
return _Z_
elif len(octetstring) == 65 and octetstring[0] == 0x04:
x = fp_from_octetstring(octetstring[1:33])
y = fp_from_octetstring(octetstring[33:65])
assert _is_an_e_representation_((_ETAG_, x, y))
return _ETAG_, x, y
elif len(octetstring) == 33 and octetstring[0] in {0x02, 0x03}:
y_parity = octetstring[0] & 1
x = fp_from_octetstring(octetstring[1:33])
w = fp_add(fp_add(fp_cube(x), fp_mul(_a_, x)), _b_)
y = fp_sqrt(w, parity=y_parity)
assert _is_an_e_representation_((_ETAG_, x, y))
return _ETAG_, x, y
except fp_Error:
pass
raise e_Error
def e_nonzero_from_octetstring(octetstring):
if len(octetstring) == 1 and octetstring[0] == 0x00:
raise e_Error
return e_from_octetstring(octetstring)
def e_to_octetstring(P, compressed=False):
assert _is_an_e_representation_(P)
_, x, y = P
y_parity = fp_parity_of(y)
assert y_parity in {0, 1}
if not compressed:
xx = fp_to_octetstring(x)
yy = fp_to_octetstring(y)
return b'\x04' + xx + yy
else:
xx = fp_to_octetstring(x)
return bytes([0x02 ^ y_parity]) + xx
def e_to_integer(P):
assert _is_an_e_representation_(P)
_, x, y = P
return fp_to_integer(x)
def e_eq(P, Q):
assert _is_an_e_representation_(P)
assert _is_an_e_representation_(Q)
_, xP, yP = P
_, xQ, yQ = Q
return fp_eq(xP, xQ) and fp_eq(yP, yQ)
def e_neg(P):
assert _is_an_e_representation_(P)
_, x, y = P
return _ETAG_, x, fp_neg(y)
def e_dbl(P):
assert _is_an_e_representation_(P)
_, xP, yP = P
if e_eq(P, _Z_):
return _Z_
if fp_eq(yP, fp(0)):
return _Z_
# slope = (3 * xP**2 + _a_) / (2 * yP)
# xR = slope**2 - 2 * xP
# yR = slope * (xP - xR) - yP
slope = fp_div(fp_add(fp_mul(fp(3), fp_square(xP)), _a_),
fp_mul(fp(2), yP))
xR = fp_sub(fp_square(slope), fp_mul(fp(2), xP))
yR = fp_sub(fp_mul(slope, fp_sub(xP, xR)), yP)
return _ETAG_, xR, yR
def e_add(P, Q):
assert _is_an_e_representation_(P)
assert _is_an_e_representation_(Q)
_, xP, yP = P
_, xQ, yQ = Q
if e_eq(P, _Z_):
return Q
if e_eq(Q, _Z_):
return P
if e_eq(P, e_neg(Q)):
return _Z_
if e_eq(P, Q):
return e_dbl(P)
# slope = (yP - yQ) / (xP - xQ)
# xR = slope**2 - xP - xQ
# yR = slope * (xP - xR) - yP
slope = fp_div(fp_sub(yP, yQ), fp_sub(xP, xQ))
xR = fp_sub(fp_sub(fp_square(slope), xP), xQ)
yR = fp_sub(fp_mul(slope, fp_sub(xP, xR)), yP)
return _ETAG_, xR, yR
def e_mul(P, k):
assert _is_an_e_representation_(P)
assert _is_an_fq_representation_(k)
R = _Z_
for bit in fq_to_msb_first_bit_sequence(k):
R = e_dbl(R)
if bit == 1:
R = e_add(R, P)
return R
class asn1_Error(BaseException):
pass
def asn1_parse_integer(octetstring):
"""
return an signed integer encoded in this ASN.1 INTEGER
"""
assert type(octetstring) is bytes
T, L, V, X = _asn1_extract_T_L_V_X_from_(octetstring)
assert _asn1_L_value_(L) == len(V)
if len(X) != 0:
raise asn1_Error
if T != b'\x02':
raise asn1_Error
if len(V) >= 2 and V[0] == 0x00 and V[1] <= 0x7f:
raise asn1_Error
return int.from_bytes(V, byteorder='big', signed=True)
def asn1_parse_bitstring_as_octet_string(octetstring):
"""
return an octet string encoded in this ASN.1 BIT STRING
"""
assert type(octetstring) is bytes
T, L, V, X = _asn1_extract_T_L_V_X_from_(octetstring)
assert _asn1_L_value_(L) == len(V)
if len(X) != 0:
raise asn1_Error
if T != b'\x03':
raise asn1_Error
if V[0] != 0x00:
raise asn1_Error
return V[1:]
def asn1_parse_sequence(octetstring):
"""
return a sequence of octet strings encoded in this ASN.1 SEQUENCE
"""
assert type(octetstring) is bytes
T, L, V, X = _asn1_extract_T_L_V_X_from_(octetstring)
assert _asn1_L_value_(L) == len(V)
if len(X) != 0:
raise asn1_Error
if T != b'\x30':
raise asn1_Error
items = ()
X = V
while len(X) != 0:
T, L, V, X = _asn1_extract_T_L_V_X_from_(X)
items += (T + L + V,)
return items
def _asn1_extract_T_L_V_X_from_(stream):
X = stream
T, X = _asn1_extract_T_from_(X)
L, X = _asn1_extract_L_from_(X)
V, X = _asn1_extract_V_from_(X, length=_asn1_L_value_(L))
return T, L, V, X
def _asn1_L_value_(L):
if len(L) == 0:
raise asn1_Error
elif len(L) == 1 and L[0] <= 0x7f:
return L[0]
elif len(L) == 2 and L[0] == 0x81 and L[1] >= 0x80:
return L[1]
elif len(L) == L[0] - 0x7f and L[0] >= 0x82 and L[1] != 0x00:
return int.from_bytes(L[1:], byteorder='big', signed=False)
else:
raise asn1_Error
def _asn1_extract_T_from_(stream):
if len(stream) == 0:
raise asn1_Error
return stream[:1], stream[1:]
def _asn1_extract_L_from_(stream):
if len(stream) == 0:
raise asn1_Error
if stream[0] == 0x80:
raise asn1_Error
elif stream[0] <= 0x7f:
return stream[:1], stream[1:]
else:
return _asn1_extract_long_L_from_(stream)
def _asn1_extract_long_L_from_(stream):
length = stream[0] - 0x7f
if len(stream) < length:
raise asn1_Error
L, _ = stream[:length], stream[length:]
if (length == 2 and L[1] >= 0x80) or L[1] != 0x00:
return L, _
else:
raise asn1_Error
def _asn1_extract_V_from_(stream, length):
if len(stream) < length:
raise asn1_Error
return stream[:length], stream[length:]
def _asn1_parse_a_sequence_of_two_signed_integers_(octetstring):
seq = asn1_parse_sequence(octetstring)
if len(seq) != 2:
raise asn1_Error
octets1, octets2 = seq
int1 = asn1_parse_integer(octets1)
int2 = asn1_parse_integer(octets2)
return int1, int2
__q__ = q
class ecdsa_Error(BaseException):
pass
def _ecdsa_signature_base_octetstring_to_integer_mod_q_(octetstring):
# h <- mod_q(bitstring_to_integer(truncate_to_q_length(hash( ... ))))
assert type(octetstring) is bytes
import hashlib
sha256_digester = hashlib.sha256()
sha256_digester.update(octetstring)
digest = sha256_digester.digest()
return int.from_bytes(digest, byteorder='big', signed=False) % __q__
def _ecdsa_is_valid_Qhrs_quadruple_(Q, h, r, s):
assert _is_an_e_representation_(Q) and not e_eq(Q, e(0))
assert type(h) is int and (0 | |
and ('recomputed_with' not in result or \
result['recompute_width'] in ['always','first_time']):
CMS_test_threshold = 2e-2*(1.0e-4/min(result['lambdaCMS']))
else:
# If the widths were not computed numerically, then the accuracy of
# the cancellation should be better.
CMS_test_threshold = 2e-2*(1.0e-5/min(result['lambdaCMS']))
# This threshold sets how flat the diff line must be when approaching it from
# the right to start considering its value. Notice that it cannot be larger
# than the CMS_test_threshold
consideration_threshold = min(CMS_test_threshold/10.0, 0.05)
# Number of values groupes with the median technique to avoid being
# sensitive to unstabilities
group_val = 3
# Starting from which value, relative to the averaged diff, should one consider
# the asymptotic diff median to be exactly 0.0 in which case one would use this
# average instead of this asymptotic median. u d~ > e+ ve LO exhibit a \
# difference at zero for example.
diff_zero_threshold = 1e-3
# Plotting parameters. Specify the lambda range to plot.
# lambda_range = [-1,-1] returns the default automatic setup
lambda_range = options['lambda_plot_range']
##################################
# One can print out the raw results by uncommenting the line below
# misc.sprint(result)
# for i, res in enumerate(result['a e- > e- ve ve~ [ virt = QCD QED ]']['CMS']):
# for i, res in enumerate(result['u d~ > e+ ve a [ virt = QCD QED ]']['CMS']):
# if res['resonance']['FSMothersNumbers'] == set([3, 4]):
# misc.sprint(res['resonance']['PS_point_used'])
# stop
res_str = ''
# Variables for the concise report
concise_str = ''
concise_data = '%%(process)-%ds%%(asymptot)-15s%%(cms_check)-25s%%(status)-25s\n'
concise_repl_dict = {'Header':{'process':'Process',
'asymptot':'Asymptot',
'cms_check':'Deviation to asymptot',
'status':'Result'}}
####### BEGIN helper functions
# Chose here whether to use Latex particle names or not
# Possible values are 'none', 'model' or 'built-in'
useLatexParticleName = 'built-in'
name2tex = {'e+':r'e^+','w+':r'W^+','a':r'\gamma','g':'g',
'e-':r'e^-','w-':r'W^-','z':'Z','h':'H',
'mu+':r'\mu^+',
'mu-':r'\mu^-',
'ta+':r'\tau^+',
'ta-':r'\tau^-'}
for p in ['e','m','t']:
d = {'e':'e','m':r'\mu','t':r'\tau'}
name2tex['v%s'%p]=r'\nu_{%s}'%d[p]
name2tex['v%s~'%p]=r'\bar{\nu_{%s}}'%d[p]
for p in ['u','d','c','s','b','t']:
name2tex[p]=p
name2tex['%s~'%p]=r'\bar{%s}'%p
def format_particle_name(particle, latex=useLatexParticleName):
p_name = particle
if latex=='model':
try:
texname = model.get_particle(particle).get('texname')
if texname and texname!='none':
p_name = r'$\displaystyle %s$'%texname
except:
pass
elif latex=='built-in':
try:
p_name = r'$\displaystyle %s$'%name2tex[particle]
except:
pass
return p_name
def resonance_str(resonance, latex=useLatexParticleName):
""" Provides a concise string to characterize the resonance """
particle_name = model.get_particle(resonance['ParticlePDG']).get_name()
mothersID=['%d'%n for n in sorted(resonance['FSMothersNumbers'])]
return r"%s [%s]"%(format_particle_name(particle_name,latex=latex),
','.join(mothersID))
def format_title(process, resonance):
""" Format the plot title given the process and resonance """
process_string = []
for particle in process.split():
if '<=' in particle:
particle = particle.replace('<=',r'$\displaystyle <=$')
if '^2' in particle:
particle = particle.replace('^2',r'$\displaystyle ^2$')
if particle=='$$':
process_string.append(r'\$\$')
continue
if particle=='>':
process_string.append(r'$\displaystyle \rightarrow$')
continue
if particle=='/':
process_string.append(r'$\displaystyle /$')
continue
process_string.append(format_particle_name(particle))
if resonance=='':
return r'CMS check for %s' %(' '.join(process_string))
else:
return r'CMS check for %s ( resonance %s )'\
%(' '.join(process_string),resonance)
def guess_lambdaorder(ME_values_list, lambda_values, expected=None,
proc=None, res=None):
""" Guess the lambda scaling from a list of ME values and return it.
Also compare with the expected result if specified and trigger a
warning if not in agreement."""
# guess the lambdaCMS power in the amplitude squared
bpowers = []
for i, lambdaCMS in enumerate(lambda_values[1:]):
bpowers.append(round(math.log(ME_values_list[0]/ME_values_list[i+1],\
lambda_values[0]/lambdaCMS)))
# Pick the most representative power
bpower = sorted([(el, bpowers.count(el)) for el in set(bpowers)],
key = lambda elem: elem[1], reverse=True)[0][0]
if not expected:
return bpower
if bpower != expected:
logger.warning('The apparent scaling of the squared amplitude'+
'seems inconsistent w.r.t to detected value '+
'(%i vs %i). %i will be used.'%(expected,bpower,bpower)+
' This happend for process %s and resonance %s'%(proc, res))
return bpower
def check_stability(ME_values, lambda_values, lambda_scaling, values_name):
""" Checks if the values passed in argument are stable and return the
stability check outcome warning if it is not precise enough. """
values = sorted([
abs(val*(lambda_values[0]/lambda_values[i])**lambda_scaling) for \
i, val in enumerate(ME_values)])
median = values[len(values)//2]
max_diff = max(abs(values[0]-median),abs(values[-1]-median))
stability = max_diff/median
stab_threshold = 1e-2
if stability >= stab_threshold:
return "== WARNING: Stability check failed for '%s' with stability %.2e.\n"\
%(values_name, stability)
else:
return None
####### END helper functions
if options['analyze']=='None':
if options['reuse']:
save_path = CMS_save_path('pkl', result, model, options,
output_path=output_path)
buff = "\nThe results of this check have been stored on disk and its "+\
"analysis can be rerun at anytime with the MG5aMC command:\n "+\
" check cms --analyze=%s\n"%save_path
res_str += buff
concise_str += buff
save_load_object.save_to_file(save_path, result)
elif len(result['ordered_processes'])>0:
buff = "\nUse the following synthax if you want to store "+\
"the raw results on disk.\n"+\
" check cms -reuse <proc_def> <options>\n"
res_str += buff
concise_str += buff
############################
# Numerical check first #
############################
checks = []
for process in result['ordered_processes']:
checks.extend([(process,resID) for resID in \
range(len(result[process]['CMS']))])
if options['reuse']:
logFile = open(CMS_save_path(
'log', result, model, options, output_path=output_path),'w')
lambdaCMS_list=result['lambdaCMS']
# List of failed processes
failed_procs = []
# A bar printing function helper. Change the length here for esthetics
bar = lambda char: char*47
# Write out the widths used if information is present:
if 'widths_computed' in result:
res_str += '\n%s%s%s\n'%(bar('='),' Widths ',bar('='))
if result['recompute_width'] == 'never':
res_str += '| Widths extracted from the param_card.dat'
else:
res_str += '| Widths computed %s'%('analytically' if has_FRdecay
else 'numerically')
if result['recompute_width'] == 'first_time':
res_str += ' for \lambda = 1'
elif result['recompute_width'] == 'always':
res_str += ' for all \lambda values'
res_str += " using mode '--recompute_width=%s'.\n"%result['recompute_width']
for particle_name, width in result['widths_computed']:
res_str += '| %-10s = %-11.6gGeV\n'%('Width(%s)'%particle_name,width)
res_str += '%s%s%s\n'%(bar('='),'='*8,bar('='))
# Doing the analysis to printout to the MG5 interface and determine whether
# the test is passed or not
# Number of last points to consider for the stability test
nstab_points=group_val
# Store here the asymptot detected for each difference curve
differences_target = {}
for process, resID in checks:
# Reinitialize the concise result replacement dictionary
# (only one resonance is indicated in this one, no matter what.)
concise_repl_dict[process] = {'process':process,
'asymptot':'N/A',
'cms_check':'N/A',
'status':'N/A'}
proc_res = result[process]
cms_res = proc_res['CMS'][resID]
nwa_res = proc_res['NWA'][resID]
resonance = resonance_str(cms_res['resonance'], latex='none')
cms_born=cms_res['born']
nwa_born=nwa_res['born']
# Starting top thick bar
res_str += '\n%s%s%s\n'%(bar('='),'='*8,bar('='))
# Centered process and resonance title
proc_title = "%s (resonance %s)"%(process,resonance)
centering = (bar(2)+8-len(proc_title))//2
res_str += "%s%s\n"%(' '*centering,proc_title)
# Starting bottom thin bar
res_str += '%s%s%s\n'%(bar('-'),'-'*8,bar('-'))
# Reminder if diff_lambda_power is not 1
if diff_lambda_power!=1:
res_str += "== WARNING diff_lambda_power is not 1 but = %g\n"%diff_lambda_power
res_str += '%s%s%s\n'%(bar('-'),'-'*8,bar('-'))
born_power = guess_lambdaorder(nwa_born,lambdaCMS_list,
expected=proc_res['born_order'], proc=process, res=resonance)
stab_cms_born = check_stability(cms_born[-nstab_points:],
lambdaCMS_list[-nstab_points:], born_power, 'CMS Born')
if stab_cms_born:
res_str += stab_cms_born
stab_nwa_born = check_stability(nwa_born[-nstab_points:],
lambdaCMS_list[-nstab_points:], born_power, 'NWA Born')
if stab_nwa_born:
res_str += stab_nwa_born
# Write out the phase-space point
res_str += "== Kinematic configuration in GeV (E,px,pypz)\n"
for i, p in enumerate(cms_res['resonance']['PS_point_used']):
res_str += " | p%-2.d = "%(i+1)
for pi in p:
res_str += '%-24.17g'%pi if pi<0.0 else ' %-23.17g'%pi
res_str += "\n"
# Write out the offshellnesses specification
res_str += "== Offshellnesses of all detected resonances\n"
for res_name, offshellness in cms_res['resonance']['offshellnesses']:
res_str += " | %-15s = %f\n"%(res_name, offshellness)
res_str += '%s%s%s\n'%(bar('-'),'-'*8,bar('-'))
if not pert_orders:
res_str += "== Born scaling lambda^n_born. nborn = %d\n"%born_power
else:
cms_finite=cms_res['finite']
nwa_finite=nwa_res['finite']
loop_power = guess_lambdaorder(nwa_finite,lambdaCMS_list,
expected=proc_res['loop_order'], proc=process, res=resonance)
res_str += "== Scaling lambda^n. nborn, nloop = %d, %d\n"\
%(born_power,loop_power)
stab_cms_finite = check_stability(cms_finite[-nstab_points:],
lambdaCMS_list[-nstab_points:], loop_power, 'CMS finite')
if stab_cms_finite:
res_str += stab_cms_finite
stab_nwa_finite = check_stability(nwa_finite[-nstab_points:],
lambdaCMS_list[-nstab_points:], loop_power, 'NWA finite')
if stab_nwa_finite:
res_str += stab_nwa_finite
# Now organize data
CMSData = []
NWAData = []
DiffData = []
for idata, lam in enumerate(lambdaCMS_list):
if not pert_orders:
new_cms=cms_born[idata]/(lam**born_power)
new_nwa=nwa_born[idata]/(lam**born_power)
else:
new_cms=(cms_finite[idata]+cms_born[idata]-nwa_born[idata])/(lam*nwa_born[idata])
new_nwa=nwa_finite[idata]/(lam*nwa_born[idata])
new_diff=(new_cms-new_nwa)/(lam**diff_lambda_power)
CMSData.append(new_cms)
NWAData.append(new_nwa)
DiffData.append(new_diff)
# NWA Born median
# Find which values to start the test at by looking at the CMSdata scaling
# First compute the median of the middle 60% of entries in the plot
trim_range=int(((1.0-0.6)/2.0)*len(DiffData))
low_diff_median = sorted(DiffData[trim_range:-trim_range])\
[(len(DiffData)-2*trim_range)//2]
# Now walk the values from the right of the diff plot until we reaches
# values stable with respect to the CMS_tale_median. This value will
# be limit of the range considered for the CMS test. Do it in a way which
# | |
maxweight=float(external.stdout.readline())
output = maxweight
elif mode == 'full_me':
me_value=float(external.stdout.readline())
output = me_value
elif mode == 'unweighting':
firstline=external.stdout.readline().split()
try:
nexternal=int(firstline[0])
trials= int(firstline[1])
BWvalue= float(firstline[2])
weight= float(firstline[3])
failed= float(firstline[4])
use_mc_masses=int(firstline[5])
except ValueError:
logger.debug(firstline)
return
momenta=[external.stdout.readline() for i in range(nexternal)]
lastline=external.stdout.readline().split()
helicities=[lastline[i] for i in range(len(lastline))]
output = trials, BWvalue, weight, momenta, failed, use_mc_masses, helicities
if len(self.calculator) > self.options['max_running_process']:
logger.debug('more than %s calculators. Perform cleaning' % self.options['max_running_process'])
nb_calls = list(self.calculator_nbcall.values())
nb_calls.sort()
cut = max([nb_calls[len(nb_calls)//2], 0.001 * nb_calls[-1]])
for key, external in list(self.calculator.items()):
nb = self.calculator_nbcall[key]
if nb < cut:
if key[0]=='full':
path=key[1]
end_signal="5 0 0 0 0\n" # before closing, write down the seed
external.stdin.write(end_signal)
ranmar_state=external.stdout.readline()
ranmar_file=pjoin(path,'ranmar_state.dat')
ranmar=open(ranmar_file, 'w')
ranmar.write(ranmar_state)
ranmar.close()
external.stdin.close()
external.stdout.close()
external.terminate()
del self.calculator[key]
del self.calculator_nbcall[key]
else:
self.calculator_nbcall[key] = self.calculator_nbcall[key] //10
return output
def calculate_matrix_element(self, mode, production, stdin_text):
"""routine to return the matrix element"""
if mode != "decay":
raise Exception("This function is only secure in mode decay.")
tmpdir = ''
if (mode, production) in self.calculator:
external = self.calculator[(mode, production)]
self.calculator_nbcall[(mode, production)] += 1
else:
logger.debug('we have %s calculator ready' % len(self.calculator))
if mode == 'prod':
tmpdir = pjoin(self.path_me,'production_me', 'SubProcesses',
production)
elif mode in ['full','decay']:
tmpdir = pjoin(self.path_me,'%s_me' % mode, 'SubProcesses',
production)
executable_prod="./check"
my_env = os.environ.copy()
my_env["GFORTRAN_UNBUFFERED_ALL"] = "y"
external = Popen(executable_prod, stdout=PIPE, stdin=PIPE,
stderr=STDOUT, cwd=tmpdir,
env=my_env,
bufsize=0)
assert (mode, production) not in self.calculator
self.calculator[(mode, production)] = external
self.calculator_nbcall[(mode, production)] = 1
external.stdin.write(stdin_text.encode())
if mode == 'prod':
info = int(external.stdout.readline().decode())
nb_output = abs(info)+1
else:
info = 1
nb_output = 1
std = []
for i in range(nb_output):
external.stdout.flush()
line = external.stdout.readline().decode()
std.append(line)
prod_values = ' '.join(std)
#prod_values = ' '.join([external.stdout.readline().decode() for i in range(nb_output)])
if info < 0:
print('ZERO DETECTED')
print(prod_values)
print(stdin_text)
os.system('lsof -p %s' % external.pid)
return ' '.join(prod_values.split()[-1*(nb_output-1):])
if len(self.calculator) > self.options['max_running_process']:
logger.debug('more than 100 calculator. Perform cleaning')
nb_calls = list(self.calculator_nbcall.values())
nb_calls.sort()
cut = max([nb_calls[len(nb_calls)//2], 0.001 * nb_calls[-1]])
for key, external in list(self.calculator.items()):
nb = self.calculator_nbcall[key]
if nb < cut:
external.stdin.close()
external.stdout.close()
external.terminate()
del self.calculator[key]
del self.calculator_nbcall[key]
else:
self.calculator_nbcall[key] = self.calculator_nbcall[key] //10
if mode == 'prod':
return prod_values
else:
return float(prod_values)
def generate_configs_file(self,nfinal,decay, path):
""" write the file configs_decay.inc
also record the itree information in a python variable,
this will be needed to write down the event
decay_struct['mg_tree'] = [(d1,d2, mother), (d1,d2,mother), ...]
with - BACKWARD ORDER,
- me indices
"""
decay_struct=decay['decay_struct']
me_index=2 # should match the particle index in the full matrix element
count_res=0 # count number of resonances
iforest=[]
pmasswidth=[]
# data (map_external2res(i), i=1,4)/1,2,-2,-4/
decay['prod2full']=[1,2]
map_external=' data (map_external2res(i), i=1,%s)/1,2,' %(nfinal+2)
for part in range(3,nfinal+3):
if part in decay_struct: # particle in the prod. event to be decayed
#print part
decay_struct[part]['mg_tree']=[]
nb_res=len(list(decay_struct[part]["tree"].keys()))
for res in range(-1,-nb_res-1,-1):
label=abs(decay_struct[part]["tree"][res]['label'])
mass=self.pid2massvar[label]
width=self.pid2widthvar[label]
me_res=-nb_res-res-count_res-1
indexd1=decay_struct[part]["tree"][res]["d1"]["index"]
if indexd1>0:
me_index+=1
me_d1=me_index
else:
# need to label resonances backward
me_d1 = -nb_res-indexd1-count_res-1
indexd2=decay_struct[part]["tree"][res]["d2"]["index"]
if indexd2>0:
me_index+=1
me_d2=me_index
else:
# need to label resonances backward
me_d2 = -nb_res-indexd2-count_res-1
iforest.append(" DATA (IDECAY(I, %s ),I=1,2)/ %s , %s / \n" % (me_res, me_d1, me_d2))
decay_struct[part]['mg_tree'].append((me_res,me_d1,me_d2))
pmasswidth.append(" PRMASS(%s)=%s \n" %(me_res,mass) )
pmasswidth.append(" PRWIDTH(%s)=%s \n" %(me_res,width) )
count_res=count_res+nb_res
map_external+='%s ,' % (-count_res)
decay['prod2full'].append(-count_res)
else:
me_index+=1
map_external+='%s ,' % me_index
decay['prod2full'].append(me_index)
map_external=map_external[:-1]+'/ \n'
trappe=open(pjoin(path,'configs_decay.inc'),'w')
trappe.write(map_external)
for item in iforest:
trappe.write(item)
trappe.write(' ns_channel_decay= %s \n' % count_res)
for item in pmasswidth:
trappe.write(item)
trappe.close()
def get_montecarlo_masses_from_event(self,decay_struct, event_map, map_prod2full):
"""
from the production event curr_event and from the decay channel 'decay_struct'
(which has just been selected randomly), get the MonteCarlo masses
"""
# in order to preserve the natural order in lhe file,
# we need the inverse of the dico event_map
inv_event_map={}
for i in event_map.keys():
inv_event_map[event_map[i]]=i
indices_for_mc_masses=[]
values_for_mc_masses=[]
for index in self.curr_event.event2mg.keys():
if self.curr_event.event2mg[index]>0: # no need to consider resonances in the production event file
part=inv_event_map[self.curr_event.event2mg[index]-1]+1 # index for prod. matrix element
part_for_curr_evt=self.curr_event.event2mg[index] # index for event file
if part not in decay_struct:
# get the pid
curr_pid=abs(self.curr_event.particle[part_for_curr_evt]['pid'])
if curr_pid in self.MC_masses:
#print part
#print map_prod2full
indices_for_mc_masses.append(map_prod2full[part-1])
values_for_mc_masses.append(self.MC_masses[curr_pid])
else:
# now we need to write the decay products in the event
# follow the decay chain order, so that we can easily keep track of the mother index
for res in range(-1,-len(list(decay_struct[part]["tree"].keys()))-1,-1):
index_d1=decay_struct[part]['mg_tree'][-res-1][1]
index_d2=decay_struct[part]['mg_tree'][-res-1][2]
pid_d1=abs(decay_struct[part]\
["tree"][res]["d1"]["label"])
pid_d2=abs(decay_struct[part]\
["tree"][res]["d2"]["label"])
if index_d1 >0 and pid_d1 in self.MC_masses:
indices_for_mc_masses.append(index_d1)
values_for_mc_masses.append(self.MC_masses[pid_d1])
if index_d2 >0 and pid_d2 in self.MC_masses:
indices_for_mc_masses.append(index_d2)
values_for_mc_masses.append(self.MC_masses[pid_d2])
return indices_for_mc_masses,values_for_mc_masses
def decay_one_event_new(self,curr_event,decay_struct, event_map, momenta_in_decay, use_mc_masses, helicities):
"""Write down the event
momenta is the list of momenta ordered according to the productin ME
"""
pid2color = self.pid2color
decayed_event=Event()
decayed_event.event2mg={}
decayed_event.ievent=curr_event.ievent
decayed_event.wgt=curr_event.wgt
decayed_event.scale=curr_event.scale
decayed_event.aqed=curr_event.aqed
decayed_event.aqcd=curr_event.aqcd
decayed_event.diese=curr_event.diese
decayed_event.rwgt=curr_event.rwgt
decayed_event.event_init_line=curr_event.event_init_line
part_number=0
external=0
maxcol=curr_event.max_col
# in order to preserve the natural order in lhe file,
# we need the inverse of the dico event_map
inv_event_map={}
for i in event_map.keys():
inv_event_map[event_map[i]]=i
sol_nb = None
for index in curr_event.event2mg.keys():
if curr_event.event2mg[index]>0:
part=inv_event_map[curr_event.event2mg[index]-1]+1 # index for prod. matrix element
part_for_curr_evt=curr_event.event2mg[index] # index for event file
if part not in decay_struct:
external+=1
part_number+=1
decayed_event.particle[part_number]=curr_event.particle[part_for_curr_evt]
decayed_event.event2mg[part_number]=part_number
else:
# now we need to write the decay products in the event
# follow the decay chain order, so that we can easily keep track of the mother index
map_to_part_number={}
for res in range(-1,-len(list(decay_struct[part]["tree"].keys()))-1,-1):
index_res_for_mom=decay_struct[part]['mg_tree'][-res-1][0]
if (res==-1):
part_number+=1
mom=momenta_in_decay[index_res_for_mom].copy()
pid=decay_struct[part]["tree"][res]['label']
istup=2
mothup1=curr_event.particle[part_for_curr_evt]["mothup1"]
mothup2=curr_event.particle[part_for_curr_evt]["mothup2"]
colup1=curr_event.particle[part_for_curr_evt]["colup1"]
colup2=curr_event.particle[part_for_curr_evt]["colup2"]
decay_struct[part]["tree"][res]["colup1"]=colup1
decay_struct[part]["tree"][res]["colup2"]=colup2
mass=mom.m
helicity=0.
decayed_event.particle[part_number]={"pid":pid,\
"istup":istup,"mothup1":mothup1,"mothup2":mothup2,\
"colup1":colup1,"colup2":colup2,"momentum":mom,\
"mass":mass,"helicity":helicity}
decayed_event.event2mg[part_number]=part_number
map_to_part_number[res]=part_number
#
# Extract color information so that we can write the color flow
#
colormother=pid2color[decay_struct[part]["tree"][res]["label"]]
colord1=pid2color[decay_struct[part]\
["tree"][res]["d1"]["label"]]
colord2=pid2color[decay_struct[part]\
["tree"][res]["d2"]["label"]]
colup1=decay_struct[part]["tree"][res]["colup1"]
colup2=decay_struct[part]["tree"][res]["colup2"]
# now figure out what is the correct color flow informatio
# Only consider 1,3, 3-bar and 8 color rep.
# Normally, the color flow needs to be determined only
# during the reshuffling phase, but it is currenlty assigned
# for each "trial event"
if abs(colord1)==1:
d2colup1=colup1
d2colup2=colup2
d1colup1=0
d1colup2=0
elif abs(colord2)==1:
d1colup1=colup1
d1colup2=colup2
d2colup1=0
d2colup2=0
elif colord1==3 and colord2==-3 and colormother ==1:
maxcol+=1
d1colup1=maxcol
d1colup2=0
d2colup1=0
d2colup2=maxcol
elif colord1==3 and colord2==-3 and colormother ==8:
d1colup1=colup1
d1colup2=0
d2colup1=0
d2colup2=colup2
elif colord1==-3 and colord2==3 and colormother ==8:
d1colup1=0
d1colup2=colup2
d2colup1=colup1
d2colup2=0
elif colord1==-3 and colord2==3 and colormother ==1:
maxcol+=1
d1colup1=0
d1colup2=maxcol
d2colup1=maxcol
d2colup2=0
elif colord1==3 and colord2==8 and colormother ==3:
maxcol+=1
d2colup1=colup1
d2colup2=maxcol
d1colup1=maxcol
d1colup2=0
elif colord2==3 and colord1==8 and colormother ==3:
maxcol+=1
d1colup1=colup1
d1colup2=maxcol
d2colup1=maxcol
d2colup2=0
elif colord1==-3 and colord2==8 and colormother ==-3:
maxcol+=1
d2colup2=colup2
d2colup1=maxcol
d1colup2=maxcol
d1colup1=0
elif colord2==-3 and colord1==8 and colormother ==-3:
maxcol+=1
d1colup2=colup2
d1colup1=maxcol
d2colup2=maxcol
d2colup1=0
elif colord1==-3 and colord2==-3 and colormother == 3:
maxcol+=2
d1colup1=0
d1colup2=maxcol
d2colup1=0
d2colup2=maxcol-1
elif (colord1==-3 and colord2==3 and colormother == 3) or\
(colord1==-3 and colord2==3 and colormother == -3):
maxcol+=2
d1colup1 = 0
d1colup2 = maxcol
d2colup1 = maxcol-1
d2colup2 = 0
elif (colord1==3 and colord2==-3 and colormother == 3) or\
(colord1==3 and colord2==-3 and colormother == -3):
maxcol+=2
d1colup1=maxcol
d1colup2=0
d2colup1=0
d2colup2=maxcol-1
elif colord1==3 and colord2==3 and colormother == -3:
maxcol+=2
d1colup1=maxcol
d1colup2=0
d2colup1=maxcol-1
d2colup2=0
elif colord2==8 and colord1==8 and colormother ==8:
maxcol+=1
ran = random.random()
if ran> 0.5:
d1colup2=colup2
d1colup1=maxcol
d2colup2=maxcol
d2colup1=colup1
else:
d1colup2=maxcol
d1colup1=colup1
d2colup2=colup2
d2colup1=maxcol
else:
raise Exception('color combination not treated by MadSpin (yet). (%s,%s,%s)' \
% (colord1,colord2,colormother))
part_number+=1
index_d1_for_mom=decay_struct[part]['mg_tree'][-res-1][1]
mom=momenta_in_decay[index_d1_for_mom].copy()
#mom=decay_products[decay_struct[part]\
# ["tree"][res]["d1"]["index"]]["momentum"]
pid=decay_struct[part]\
["tree"][res]["d1"]["label"]
indexd1=decay_struct[part]["tree"][res]["d1"]["index"]
if ( indexd1>0):
hel=helicities[index_d1_for_mom-1]
istup=1
external+=1
if not use_mc_masses or abs(pid) not in self.MC_masses:
mass=self.banner.get('param_card','mass', abs(pid)).value
else:
mass=self.MC_masses[abs(pid)]
else:
hel=0.
decay_struct[part]["tree"][indexd1]["colup1"]=d1colup1
decay_struct[part]["tree"][indexd1]["colup2"]=d1colup2
istup=2
mass=mom.m
map_to_part_number[indexd1]=part_number
mothup1=map_to_part_number[res]
mothup2=map_to_part_number[res]
decayed_event.particle[part_number]={"pid":pid,\
"istup":istup,"mothup1":mothup1,"mothup2":mothup2,\
"colup1":d1colup1,"colup2":d1colup2,"momentum":mom,\
"mass":mass,"helicity":hel}
decayed_event.event2mg[part_number]=part_number
part_number+=1
index_d2_for_mom=decay_struct[part]['mg_tree'][-res-1][2]
mom=momenta_in_decay[index_d2_for_mom].copy()
#mom=decay_products[decay_struct[part]["tree"][res]["d2"]\
# ["index"]]["momentum"]
pid=decay_struct[part]["tree"][res]["d2"]\
["label"]
indexd2=decay_struct[part]["tree"][res]["d2"]["index"]
if ( indexd2>0):
hel=helicities[index_d2_for_mom-1]
istup=1
external+=1
if not use_mc_masses or abs(pid) not in self.MC_masses:
mass=self.banner.get('param_card','mass', abs(pid)).value
else:
mass=self.MC_masses[abs(pid)]
else:
hel=0.
istup=2
decay_struct[part]["tree"][indexd2]["colup1"]=d2colup1
decay_struct[part]["tree"][indexd2]["colup2"]=d2colup2
mass=mom.m
map_to_part_number[indexd2]=part_number
mothup1=map_to_part_number[res]
mothup2=map_to_part_number[res]
decayed_event.particle[part_number]={"pid":pid,"istup":istup,\
"mothup1":mothup1,"mothup2":mothup2,"colup1":d2colup1,\
"colup2":d2colup2,\
"momentum":mom,"mass":mass,"helicity":hel}
decayed_event.event2mg[part_number]=part_number
else: # resonance in the production event
part=curr_event.event2mg[index]
part_number+=1
decayed_event.particle[part_number]=curr_event.resonance[part]
decayed_event.event2mg[part_number]=part_number
# Here I need to check that the daughters still have the correct mothup1 and mothup2
for part in curr_event.resonance.keys():
mothup1=curr_event.resonance[part]["mothup1"]
mothup2=curr_event.resonance[part]["mothup2"]
if mothup1==index:
if mothup2!=index: print("Warning: | |
import collections.abc
from contextlib import suppress
from datetime import timedelta
from typing import (
TYPE_CHECKING,
Any,
Dict,
Hashable,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Union,
)
import numpy as np
import pandas as pd
from . import formatting, utils
from .indexing import ExplicitlyIndexedNDArrayMixin, NumpyIndexingAdapter
from .npcompat import DTypeLike
from .utils import is_dict_like, is_scalar
if TYPE_CHECKING:
from .variable import Variable
class Index:
"""Base class inherited by all xarray-compatible indexes."""
__slots__ = ("coord_names",)
def __init__(self, coord_names: Union[Hashable, Iterable[Hashable]]):
if isinstance(coord_names, Hashable):
coord_names = (coord_names,)
self.coord_names = tuple(coord_names)
@classmethod
def from_variables(
cls, variables: Dict[Hashable, "Variable"], **kwargs
): # pragma: no cover
raise NotImplementedError()
def to_pandas_index(self) -> pd.Index:
"""Cast this xarray index to a pandas.Index object or raise a TypeError
if this is not supported.
This method is used by all xarray operations that expect/require a
pandas.Index object.
"""
raise TypeError(f"{type(self)} cannot be cast to a pandas.Index object.")
def query(self, labels: Dict[Hashable, Any]): # pragma: no cover
raise NotImplementedError
def equals(self, other): # pragma: no cover
raise NotImplementedError()
def union(self, other): # pragma: no cover
raise NotImplementedError()
def intersection(self, other): # pragma: no cover
raise NotImplementedError()
def _sanitize_slice_element(x):
from .dataarray import DataArray
from .variable import Variable
if not isinstance(x, tuple) and len(np.shape(x)) != 0:
raise ValueError(
f"cannot use non-scalar arrays in a slice for xarray indexing: {x}"
)
if isinstance(x, (Variable, DataArray)):
x = x.values
if isinstance(x, np.ndarray):
x = x[()]
return x
def _query_slice(index, label, coord_name="", method=None, tolerance=None):
if method is not None or tolerance is not None:
raise NotImplementedError(
"cannot use ``method`` argument if any indexers are slice objects"
)
indexer = index.slice_indexer(
_sanitize_slice_element(label.start),
_sanitize_slice_element(label.stop),
_sanitize_slice_element(label.step),
)
if not isinstance(indexer, slice):
# unlike pandas, in xarray we never want to silently convert a
# slice indexer into an array indexer
raise KeyError(
"cannot represent labeled-based slice indexer for coordinate "
f"{coord_name!r} with a slice over integer positions; the index is "
"unsorted or non-unique"
)
return indexer
def _asarray_tuplesafe(values):
"""
Convert values into a numpy array of at most 1-dimension, while preserving
tuples.
Adapted from pandas.core.common._asarray_tuplesafe
"""
if isinstance(values, tuple):
result = utils.to_0d_object_array(values)
else:
result = np.asarray(values)
if result.ndim == 2:
result = np.empty(len(values), dtype=object)
result[:] = values
return result
def _is_nested_tuple(possible_tuple):
return isinstance(possible_tuple, tuple) and any(
isinstance(value, (tuple, list, slice)) for value in possible_tuple
)
def get_indexer_nd(index, labels, method=None, tolerance=None):
"""Wrapper around :meth:`pandas.Index.get_indexer` supporting n-dimensional
labels
"""
flat_labels = np.ravel(labels)
flat_indexer = index.get_indexer(flat_labels, method=method, tolerance=tolerance)
indexer = flat_indexer.reshape(labels.shape)
return indexer
class PandasIndex(Index, ExplicitlyIndexedNDArrayMixin):
"""Wrap a pandas.Index to preserve dtypes and handle explicit indexing."""
__slots__ = ("array", "_dtype")
def __init__(
self, array: Any, dtype: DTypeLike = None, coord_name: Optional[Hashable] = None
):
if coord_name is None:
coord_name = tuple()
super().__init__(coord_name)
self.array = utils.safe_cast_to_index(array)
if dtype is None:
if isinstance(array, pd.PeriodIndex):
dtype_ = np.dtype("O")
elif hasattr(array, "categories"):
# category isn't a real numpy dtype
dtype_ = array.categories.dtype
elif not utils.is_valid_numpy_dtype(array.dtype):
dtype_ = np.dtype("O")
else:
dtype_ = array.dtype
else:
dtype_ = np.dtype(dtype) # type: ignore[assignment]
self._dtype = dtype_
def to_pandas_index(self) -> pd.Index:
return self.array
@property
def dtype(self) -> np.dtype:
return self._dtype
def __array__(self, dtype: DTypeLike = None) -> np.ndarray:
if dtype is None:
dtype = self.dtype
array = self.array
if isinstance(array, pd.PeriodIndex):
with suppress(AttributeError):
# this might not be public API
array = array.astype("object")
return np.asarray(array.values, dtype=dtype)
@property
def shape(self) -> Tuple[int]:
return (len(self.array),)
def query(
self, labels, method=None, tolerance=None
) -> Tuple[Any, Union["PandasIndex", None]]:
assert len(labels) == 1
coord_name, label = next(iter(labels.items()))
index = self.array
if isinstance(label, slice):
indexer = _query_slice(index, label, coord_name, method, tolerance)
elif is_dict_like(label):
raise ValueError(
"cannot use a dict-like object for selection on "
"a dimension that does not have a MultiIndex"
)
else:
label = (
label
if getattr(label, "ndim", 1) > 1 # vectorized-indexing
else _asarray_tuplesafe(label)
)
if label.ndim == 0:
# see https://github.com/pydata/xarray/pull/4292 for details
label_value = label[()] if label.dtype.kind in "mM" else label.item()
if isinstance(index, pd.CategoricalIndex):
if method is not None:
raise ValueError(
"'method' is not a valid kwarg when indexing using a CategoricalIndex."
)
if tolerance is not None:
raise ValueError(
"'tolerance' is not a valid kwarg when indexing using a CategoricalIndex."
)
indexer = index.get_loc(label_value)
else:
indexer = index.get_loc(
label_value, method=method, tolerance=tolerance
)
elif label.dtype.kind == "b":
indexer = label
else:
indexer = get_indexer_nd(index, label, method, tolerance)
if np.any(indexer < 0):
raise KeyError(f"not all values found in index {coord_name!r}")
return indexer, None
def equals(self, other):
if isinstance(other, pd.Index):
other = type(self)(other)
return self.array.equals(other.array)
def union(self, other):
if isinstance(other, pd.Index):
other = type(self)(other)
return type(self)(self.array.union(other.array))
def intersection(self, other):
if isinstance(other, pd.Index):
other = PandasIndex(other)
return type(self)(self.array.intersection(other.array))
def __getitem__(
self, indexer
) -> Union[
"PandasIndex",
NumpyIndexingAdapter,
np.ndarray,
np.datetime64,
np.timedelta64,
]:
key = indexer.tuple
if isinstance(key, tuple) and len(key) == 1:
# unpack key so it can index a pandas.Index object (pandas.Index
# objects don't like tuples)
(key,) = key
if getattr(key, "ndim", 0) > 1: # Return np-array if multidimensional
return NumpyIndexingAdapter(self.array.values)[indexer]
result = self.array[key]
if isinstance(result, pd.Index):
result = type(self)(result, dtype=self.dtype)
else:
# result is a scalar
if result is pd.NaT:
# work around the impossibility of casting NaT with asarray
# note: it probably would be better in general to return
# pd.Timestamp rather np.than datetime64 but this is easier
# (for now)
result = np.datetime64("NaT", "ns")
elif isinstance(result, timedelta):
result = np.timedelta64(getattr(result, "value", result), "ns")
elif isinstance(result, pd.Timestamp):
# Work around for GH: pydata/xarray#1932 and numpy/numpy#10668
# numpy fails to convert pd.Timestamp to np.datetime64[ns]
result = np.asarray(result.to_datetime64())
elif self.dtype != object:
result = np.asarray(result, dtype=self.dtype)
# as for numpy.ndarray indexing, we always want the result to be
# a NumPy array.
result = utils.to_0d_array(result)
return result
def transpose(self, order) -> pd.Index:
return self.array # self.array should be always one-dimensional
def __repr__(self) -> str:
return f"{type(self).__name__}(array={self.array!r}, dtype={self.dtype!r})"
def copy(self, deep: bool = True) -> "PandasIndex":
# Not the same as just writing `self.array.copy(deep=deep)`, as
# shallow copies of the underlying numpy.ndarrays become deep ones
# upon pickling
# >>> len(pickle.dumps((self.array, self.array)))
# 4000281
# >>> len(pickle.dumps((self.array, self.array.copy(deep=False))))
# 8000341
array = self.array.copy(deep=True) if deep else self.array
return type(self)(array, self._dtype)
class PandasMultiIndex(PandasIndex):
def query(
self, labels, method=None, tolerance=None
) -> Tuple[Any, Union["PandasIndex", None]]:
if method is not None or tolerance is not None:
raise ValueError(
"multi-index does not support ``method`` and ``tolerance``"
)
index = self.array
new_index = None
# label(s) given for multi-index level(s)
if all([lbl in index.names for lbl in labels]):
is_nested_vals = _is_nested_tuple(tuple(labels.values()))
if len(labels) == index.nlevels and not is_nested_vals:
indexer = index.get_loc(tuple(labels[k] for k in index.names))
else:
for k, v in labels.items():
# index should be an item (i.e. Hashable) not an array-like
if isinstance(v, Sequence) and not isinstance(v, str):
raise ValueError(
"Vectorized selection is not "
f"available along coordinate {k!r} (multi-index level)"
)
indexer, new_index = index.get_loc_level(
tuple(labels.values()), level=tuple(labels.keys())
)
# GH2619. Raise a KeyError if nothing is chosen
if indexer.dtype.kind == "b" and indexer.sum() == 0:
raise KeyError(f"{labels} not found")
# assume one label value given for the multi-index "array" (dimension)
else:
if len(labels) > 1:
coord_name = next(iter(set(labels) - set(index.names)))
raise ValueError(
f"cannot provide labels for both coordinate {coord_name!r} (multi-index array) "
f"and one or more coordinates among {index.names!r} (multi-index levels)"
)
coord_name, label = next(iter(labels.items()))
if is_dict_like(label):
invalid_levels = [name for name in label if name not in index.names]
if invalid_levels:
raise ValueError(
f"invalid multi-index level names {invalid_levels}"
)
return self.query(label)
elif isinstance(label, slice):
indexer = _query_slice(index, label, coord_name)
elif isinstance(label, tuple):
if _is_nested_tuple(label):
indexer = index.get_locs(label)
elif len(label) == index.nlevels:
indexer = index.get_loc(label)
else:
indexer, new_index = index.get_loc_level(
label, level=list(range(len(label)))
)
else:
label = (
label
if getattr(label, "ndim", 1) > 1 # vectorized-indexing
else _asarray_tuplesafe(label)
)
if label.ndim == 0:
indexer, new_index = index.get_loc_level(label.item(), level=0)
elif label.dtype.kind == "b":
indexer = label
else:
if label.ndim > 1:
raise ValueError(
"Vectorized selection is not available along "
f"coordinate {coord_name!r} with a multi-index"
)
indexer = get_indexer_nd(index, label)
if np.any(indexer < 0):
raise KeyError(f"not all values found in index {coord_name!r}")
if new_index is not None:
new_index | |
# Why: #6423 in Alexa global
'http://www.v2cigar.net/',
# Why: #6424 in Alexa global
'http://www.peopleclick.com/',
# Why: #6425 in Alexa global
'http://www.moudamepo.com/',
# Why: #6426 in Alexa global
'http://www.baijob.com/',
# Why: #6427 in Alexa global
'http://www.geni.com/',
# Why: #6428 in Alexa global
'http://huangye88.com/',
# Why: #6429 in Alexa global
'http://www.phun.org/',
# Why: #6430 in Alexa global
'http://www.kasikornbankgroup.com/',
# Why: #6431 in Alexa global
'http://www.angrymovs.com/',
# Why: #6432 in Alexa global
'http://www.bibliocommons.com/',
# Why: #6433 in Alexa global
'http://www.melateiran.com/',
# Why: #6434 in Alexa global
'http://www.gigya.com/',
# Why: #6435 in Alexa global
'http://17ok.com/',
# Why: #6436 in Alexa global
'http://www.ename.cn/',
# Why: #6437 in Alexa global
'http://www.xdowns.com/',
# Why: #6438 in Alexa global
'http://www.tportal.hr/',
# Why: #6439 in Alexa global
'http://www.dreamteammoney.com/',
# Why: #6440 in Alexa global
'http://www.prevention.com/',
# Why: #6441 in Alexa global
'http://www.terra.cl/',
# Why: #6442 in Alexa global
'http://www.blinklist.com/',
# Why: #6443 in Alexa global
'http://www.51seer.com/',
# Why: #6444 in Alexa global
'http://www.ruelsoft.com/',
# Why: #6445 in Alexa global
'http://www.kulichki.net/',
# Why: #6446 in Alexa global
'http://vippers.jp/',
# Why: #6447 in Alexa global
'http://www.tatatele.in/',
# Why: #6448 in Alexa global
'http://www.mybloggertricks.com/',
# Why: #6449 in Alexa global
'http://www.ma-bimbo.com/',
# Why: #6450 in Alexa global
'http://www.ftchinese.com/',
# Why: #6451 in Alexa global
'http://www.sergey-mavrodi-mmm.net/',
# Why: #6452 in Alexa global
'http://www.wp.tv/',
# Why: #6453 in Alexa global
'http://www.chevrolet.com/',
# Why: #6454 in Alexa global
'http://www.razerzone.com/',
# Why: #6455 in Alexa global
'http://www.submanga.com/',
# Why: #6456 in Alexa global
'http://www.thomson.co.uk/',
# Why: #6457 in Alexa global
'http://www.syosetu.org/',
# Why: #6458 in Alexa global
'http://www.olx.com/',
# Why: #6459 in Alexa global
'http://www.vplay.ro/',
# Why: #6460 in Alexa global
'http://www.rtnn.net/',
# Why: #6461 in Alexa global
'http://www.55.la/',
# Why: #6462 in Alexa global
'http://www.instructure.com/',
# Why: #6463 in Alexa global
'http://lvse.com/',
# Why: #6464 in Alexa global
'http://www.hvg.hu/',
# Why: #6465 in Alexa global
'http://www.androidpolice.com/',
# Why: #6466 in Alexa global
'http://www.cookinglight.com/',
# Why: #6467 in Alexa global
'http://www.madadsmedia.com/',
# Why: #6468 in Alexa global
'http://www.inews.gr/',
# Why: #6469 in Alexa global
'http://www.ktxp.com/',
# Why: #6470 in Alexa global
'http://www.socialsecurity.gov/',
# Why: #6471 in Alexa global
'http://www.equifax.com/',
# Why: #6472 in Alexa global
'http://www.ceskatelevize.cz/',
# Why: #6473 in Alexa global
'http://www.gaaks.com/',
# Why: #6474 in Alexa global
'http://www.chillingeffects.org/',
# Why: #6476 in Alexa global
'http://www.komando.com/',
# Why: #6477 in Alexa global
'http://www.nowpublic.com/',
# Why: #6478 in Alexa global
'http://www.khanwars.ae/',
# Why: #6479 in Alexa global
'http://www.berlin.de/',
# Why: #6480 in Alexa global
'http://www.bleepingcomputer.com/',
# Why: #6481 in Alexa global
'http://www.military.com/',
# Why: #6482 in Alexa global
'http://www.zero10.net/',
# Why: #6483 in Alexa global
'http://www.onekingslane.com/',
# Why: #6484 in Alexa global
'http://www.beget.ru/',
# Why: #6486 in Alexa global
'http://www.get-tune.net/',
# Why: #6487 in Alexa global
'http://www.freewebs.com/',
# Why: #6489 in Alexa global
'http://www.591.com.tw/',
# Why: #6490 in Alexa global
'http://www.pcfinancial.ca/',
# Why: #6491 in Alexa global
'http://www.sparknotes.com/',
# Why: #6492 in Alexa global
'http://www.tinychat.com/',
# Why: #6493 in Alexa global
'http://luxup.ru/',
# Why: #6494 in Alexa global
'http://www.geforce.com/',
# Why: #6495 in Alexa global
'http://www.tatts.com.au/',
# Why: #6496 in Alexa global
'http://www.alweeam.com.sa/',
# Why: #6497 in Alexa global
'http://www.123-reg.co.uk/',
# Why: #6498 in Alexa global
'http://www.sexyswingertube.com/',
# Why: #6499 in Alexa global
'http://www.groupon.es/',
# Why: #6500 in Alexa global
'http://www.guardianlv.com/',
# Why: #6501 in Alexa global
'http://www.hypovereinsbank.de/',
# Why: #6502 in Alexa global
'http://www.game2.com.cn/',
# Why: #6503 in Alexa global
'http://www.mofcom.gov.cn/',
# Why: #6504 in Alexa global
'http://www.usc.edu/',
# Why: #6505 in Alexa global
'http://www.ard.de/',
# Why: #6506 in Alexa global
'http://www.hoovers.com/',
# Why: #6507 in Alexa global
'http://www.tdameritrade.com/',
# Why: #6508 in Alexa global
'http://www.userscripts.org/',
# Why: #6509 in Alexa global
'http://app111.com/',
# Why: #6510 in Alexa global
'http://www.al.com/',
# Why: #6511 in Alexa global
'http://www.op.fi/',
# Why: #6512 in Alexa global
'http://www.adbkm.com/',
# Why: #6513 in Alexa global
'http://www.i-part.com.tw/',
# Why: #6514 in Alexa global
'http://www.pivithurutv.info/',
# Why: #6515 in Alexa global
'http://www.haber3.com/',
# Why: #6516 in Alexa global
'http://www.shatel.ir/',
# Why: #6517 in Alexa global
'http://www.camonster.com/',
# Why: #6518 in Alexa global
'http://www.weltbild.de/',
# Why: #6519 in Alexa global
'http://www.pingan.com.cn/',
# Why: #6520 in Alexa global
'http://www.advanceautoparts.com/',
# Why: #6521 in Alexa global
'http://www.mplssaturn.com/',
# Why: #6522 in Alexa global
'http://www.weeklystandard.com/',
# Why: #6523 in Alexa global
'http://www.cna.com.tw/',
# Why: #6524 in Alexa global
'http://www.popscreen.com/',
# Why: #6525 in Alexa global
'http://www.freelifetimefuckbook.com/',
# Why: #6526 in Alexa global
'http://www.peixeurbano.com.br/',
# Why: #6527 in Alexa global
'http://www.2258.com/',
# Why: #6528 in Alexa global
'http://www.proxfree.com/',
# Why: #6529 in Alexa global
'http://www.zend.com/',
# Why: #6530 in Alexa global
'http://www.garena.tw/',
# Why: #6531 in Alexa global
'http://www.citehr.com/',
# Why: #6532 in Alexa global
'http://www.gadyd.com/',
# Why: #6533 in Alexa global
'http://www.tvspielfilm.de/',
# Why: #6534 in Alexa global
'http://www.skapiec.pl/',
# Why: #6535 in Alexa global
'http://www.9see.com/',
# Why: #6536 in Alexa global
'http://cndns.com/',
# Why: #6537 in Alexa global
'http://www.hurriyetemlak.com/',
# Why: #6538 in Alexa global
'http://www.census.gov/',
# Why: #6539 in Alexa global
'http://www.collider.com/',
# Why: #6540 in Alexa global
'http://www.cinaplay.com/',
# Why: #6542 in Alexa global
'http://www.aq.com/',
# Why: #6543 in Alexa global
'http://www.aolsearch.com/',
# Why: #6544 in Alexa global
'http://www.ce4arab.com/',
# Why: #6546 in Alexa global
'http://www.cbi.ir/',
# Why: #6547 in Alexa global
'http://cjol.com/',
# Why: #6548 in Alexa global
'http://www.brandporno.com/',
# Why: #6549 in Alexa global
'http://www.yicheshi.com/',
# Why: #6550 in Alexa global
'http://www.mydealz.de/',
# Why: #6551 in Alexa global
'http://www.xiachufang.com/',
# Why: #6552 in Alexa global
'http://www.sun-sentinel.com/',
# Why: #6553 in Alexa global
'http://www.flashkhor.com/',
# Why: #6554 in Alexa global
'http://www.join.me/',
# Why: #6555 in Alexa global
'http://www.hankyung.com/',
# Why: #6556 in Alexa global
'http://www.oneandone.co.uk/',
# Why: #6557 in Alexa global
'http://www.derwesten.de/',
# Why: #6558 in Alexa global
'http://www.gammae.com/',
# Why: #6559 in Alexa global
'http://www.webadultdating.biz/',
# Why: #6560 in Alexa global
'http://www.pokerstars.com/',
# Why: #6561 in Alexa global
'http://www.fucked-sex.com/',
# Why: #6562 in Alexa global
'http://www.antaranews.com/',
# Why: #6563 in Alexa global
'http://www.banorte.com/',
# Why: #6564 in Alexa global
'http://www.travian.it/',
# Why: #6565 in Alexa global
'http://www.msu.edu/',
# Why: #6566 in Alexa global
'http://www.ozbargain.com.au/',
# Why: #6567 in Alexa global
'http://www.77vcd.com/',
# Why: #6568 in Alexa global
'http://www.bestooxx.com/',
# Why: #6569 in Alexa global
'http://www.siemens.com/',
# Why: #6570 in Alexa global
'http://www.en-japan.com/',
# Why: #6571 in Alexa global
'http://www.akbank.com/',
# Why: #6572 in Alexa global
'http://www.srf.ch/',
# Why: #6573 in Alexa global
'http://www.meijer.com/',
# Why: #6574 in Alexa global
'http://www.htmldrive.net/',
# Why: #6575 in Alexa global
'http://www.peoplestylewatch.com/',
# Why: #6576 in Alexa global
'http://www.4008823823.com.cn/',
# Why: #6577 in Alexa global
'http://www.boards.ie/',
# Why: #6578 in Alexa global
'http://www.zhulong.com/',
# Why: #6579 in Alexa global
'http://www.svyaznoybank.ru/',
# Why: #6580 in Alexa global
'http://www.myfilestore.com/',
# Why: #6581 in Alexa global
'http://www.sucuri.net/',
# Why: #6582 in Alexa global
'http://www.redflagdeals.com/',
# Why: #6583 in Alexa global
'http://www.gxnews.com.cn/',
# Why: #6584 in Alexa global
'http://www.javascriptkit.com/',
# Why: #6585 in Alexa global
'http://www.edreams.fr/',
# Why: #6586 in Alexa global
'http://www.wral.com/',
# Why: #6587 in Alexa global
'http://www.togetter.com/',
# Why: #6588 in Alexa global
'http://www.dmi.dk/',
# Why: #6589 in Alexa global
'http://www.thinkdigit.com/',
# Why: #6590 in Alexa global
'http://www.barclaycard.co.uk/',
# Why: #6591 in Alexa global
'http://www.comm100.com/',
# Why: #6592 in Alexa global
'http://www.christianbook.com/',
# Why: #6593 in Alexa global
'http://www.popularmechanics.com/',
# Why: #6594 in Alexa global
'http://www.taste.com.au/',
# Why: #6595 in Alexa global
'http://www.tripadvisor.ru/',
# Why: #6596 in Alexa global
'http://www.colissimo.fr/',
# Why: #6597 in Alexa global
'http://www.gdposir.info/',
# Why: #6598 in Alexa global
'http://www.rarlab.com/',
# Why: #6599 in Alexa global
'http://www.dcnepalevent.com/',
# Why: #6600 in Alexa global
'http://www.sagepub.com/',
# Why: #6601 in Alexa global
'http://www.markosweb.com/',
# Why: #6602 in Alexa global
'http://www.france3.fr/',
# Why: #6603 in Alexa global
'http://www.mindbodyonline.com/',
# Why: #6604 in Alexa global
'http://www.yapo.cl/',
# Why: #6605 in Alexa global
'http://www.0-6.com/',
# Why: #6606 in Alexa global
'http://www.dilbert.com/',
# Why: #6607 in Alexa global
'http://www.searchqu.com/',
# Why: #6608 in Alexa global
'http://www.usa.gov/',
# Why: #6609 in Alexa global
'http://www.vatandownload.com/',
# Why: #6610 in Alexa global
| |
<reponame>LukasMosser/SNIST
from collections import OrderedDict, namedtuple
from itertools import product
import sympy
import numpy as np
from psutil import virtual_memory
from cached_property import cached_property
from devito.builtins import assign
from devito.cgen_utils import INT, cast_mapper
from devito.data import Data, default_allocator
from devito.dimension import Dimension, ConditionalDimension, DefaultDimension
from devito.equation import Eq, Inc
from devito.exceptions import InvalidArgument
from devito.logger import debug, warning
from devito.mpi import MPI, SparseDistributor
from devito.parameters import configuration
from devito.symbolics import Add, indexify, retrieve_function_carriers
from devito.finite_differences import Differentiable, generate_fd_shortcuts
from devito.types import (AbstractCachedFunction, AbstractCachedSymbol, Symbol, Scalar,
OWNED, HALO, LEFT, RIGHT)
from devito.tools import (EnrichedTuple, Tag, ReducerMap, ArgProvider, as_tuple,
flatten, is_integer, prod, powerset, filter_ordered,
memoized_meth)
__all__ = ['Constant', 'Function', 'TimeFunction', 'SparseFunction',
'SparseTimeFunction', 'PrecomputedSparseFunction',
'PrecomputedSparseTimeFunction', 'Buffer', 'NODE', 'CELL']
class Constant(AbstractCachedSymbol, ArgProvider):
"""
Symbol representing constant values in symbolic equations.
.. note::
The parameters must always be given as keyword arguments, since
SymPy uses ``*args`` to (re-)create the dimension arguments of the
symbolic function.
"""
is_Input = True
is_Constant = True
is_Scalar = True
def __init__(self, *args, **kwargs):
if not self._cached():
self._value = kwargs.get('value', 0)
@classmethod
def __dtype_setup__(cls, **kwargs):
return kwargs.get('dtype', np.float32)
@property
def data(self):
"""The value of the data object, as a scalar (int, float, ...)."""
return self.dtype(self._value)
@data.setter
def data(self, val):
self._value = val
@property
def _arg_names(self):
"""Return a tuple of argument names introduced by this symbol."""
return (self.name,)
@memoized_meth
def _arg_defaults(self, alias=None):
"""
Returns a map of default argument values defined by this symbol.
"""
key = alias or self
return {key.name: self.data}
def _arg_values(self, **kwargs):
"""
Returns a map of argument values after evaluating user input. If no
user input is provided, return a default value.
:param kwargs: Dictionary of user-provided argument overrides.
"""
if self.name in kwargs:
new = kwargs.pop(self.name)
if isinstance(new, Constant):
return new._arg_defaults(alias=self)
else:
return {self.name: new}
else:
return self._arg_defaults()
def _arg_check(self, args, intervals):
"""
Check that ``args`` contains legal runtime values bound to ``self``.
"""
if self.name not in args:
raise InvalidArgument("No runtime value for %s" % self.name)
key = args[self.name]
try:
# Might be a plain number, w/o a dtype field
if key.dtype != self.dtype:
warning("Data type %s of runtime value `%s` does not match the "
"Constant data type %s" % (key.dtype, self.name, self.dtype))
except AttributeError:
pass
_pickle_kwargs = AbstractCachedSymbol._pickle_kwargs + ['_value']
class TensorFunction(AbstractCachedFunction, ArgProvider):
"""
Utility class to encapsulate all symbolic types that represent
tensor (array) data.
.. note::
Users should not instantiate this class. Use :class:`Function` or
:class:`SparseFunction` (or their subclasses) instead.
"""
# Required by SymPy, otherwise the presence of __getitem__ will make SymPy
# think that a TensorFunction is actually iterable, thus breaking many of
# its key routines (e.g., solve)
_iterable = False
is_Input = True
is_TensorFunction = True
is_Tensor = True
def __init__(self, *args, **kwargs):
if not self._cached():
super(TensorFunction, self).__init__(*args, **kwargs)
# There may or may not be a `Grid` attached to the TensorFunction
self._grid = kwargs.get('grid')
# A `Distributor` to handle domain decomposition (only relevant for MPI)
self._distributor = self.__distributor_setup__(**kwargs)
# Staggering metadata
self._staggered = self.__staggered_setup__(**kwargs)
# Data-related properties and data initialization
self._data = None
self._first_touch = kwargs.get('first_touch', configuration['first-touch'])
self._allocator = kwargs.get('allocator', default_allocator())
initializer = kwargs.get('initializer')
if initializer is None or callable(initializer):
# Initialization postponed until the first access to .data
self._initializer = initializer
elif isinstance(initializer, (np.ndarray, list, tuple)):
# Allocate memory and initialize it. Note that we do *not* hold
# a reference to the user-provided buffer
self._initializer = None
if len(initializer) > 0:
self.data_with_halo[:] = initializer
else:
# This is a corner case -- we might get here, for example, when
# running with MPI and some processes get 0-size arrays after
# domain decomposition. We touch the data anyway to avoid the
# case ``self._data is None``
self.data
else:
raise ValueError("`initializer` must be callable or buffer, not %s"
% type(initializer))
def _allocate_memory(func):
"""Allocate memory as a :class:`Data`."""
def wrapper(self):
if self._data is None:
debug("Allocating memory for %s%s" % (self.name, self.shape_allocated))
self._data = Data(self.shape_allocated, self.dtype,
modulo=self._mask_modulo, allocator=self._allocator)
if self._first_touch:
assign(self, 0)
if callable(self._initializer):
if self._first_touch:
warning("`first touch` together with `initializer` causing "
"redundant data initialization")
try:
self._initializer(self.data_with_halo)
except ValueError:
# Perhaps user only wants to initialise the physical domain
self._initializer(self.data)
else:
self.data_with_halo.fill(0)
return func(self)
return wrapper
@classmethod
def __dtype_setup__(cls, **kwargs):
grid = kwargs.get('grid')
dtype = kwargs.get('dtype')
if dtype is not None:
return dtype
elif grid is not None:
return grid.dtype
else:
return np.float32
def __staggered_setup__(self, **kwargs):
"""
Setup staggering-related metadata. This method assigns: ::
* 0 to non-staggered dimensions;
* 1 to staggered dimensions.
"""
staggered = kwargs.get('staggered')
if staggered is None:
self.is_Staggered = False
return tuple(0 for _ in self.indices)
else:
self.is_Staggered = True
if staggered is NODE:
staggered = ()
elif staggered is CELL:
staggered = self.indices
else:
staggered = as_tuple(staggered)
mask = []
for d in self.indices:
if d in staggered:
mask.append(1)
elif -d in staggered:
mask.append(-1)
else:
mask.append(0)
return tuple(mask)
def __distributor_setup__(self, **kwargs):
grid = kwargs.get('grid')
# There may or may not be a `Distributor`. In the latter case, the
# TensorFunction is to be considered "local" to each MPI rank
return kwargs.get('distributor') if grid is None else grid.distributor
@property
def _data_buffer(self):
"""Reference to the data. Unlike :attr:`data` and :attr:`data_with_halo`,
this *never* returns a view of the data. This method is for internal use only."""
return self._data_allocated
@property
def _mem_external(self):
return True
@property
def grid(self):
return self._grid
@property
def staggered(self):
return self._staggered
@cached_property
def shape(self):
"""
Shape of the domain region. The domain constitutes the area of the
data written to by an :class:`Operator`.
Notes
-----
In an MPI context, this is the *local* domain region shape.
"""
return self.shape_domain
@cached_property
def shape_domain(self):
"""
Shape of the domain region. The domain constitutes the area of the
data written to by an :class:`Operator`.
Notes
-----
In an MPI context, this is the *local* domain region shape.
Alias to ``self.shape``.
"""
return tuple(i - j for i, j in zip(self._shape, self.staggered))
@cached_property
def shape_with_halo(self):
"""
Shape of the domain+outhalo region. The outhalo is the region
surrounding the domain that may be read by an :class:`Operator`.
Notes
-----
In an MPI context, this is the *local* with_halo region shape.
Further, note that the outhalo of inner ranks is typically empty, while
the outhalo of boundary ranks contains a number of elements depending
on the rank position in the decomposed grid (corner, side, ...).
"""
return tuple(j + i + k for i, (j, k) in zip(self.shape_domain,
self._extent_outhalo))
_shape_with_outhalo = shape_with_halo
@cached_property
def _shape_with_inhalo(self):
"""
Shape of the domain+inhalo region. The inhalo region comprises the
outhalo as well as any additional "ghost" layers for MPI halo
exchanges. Data in the inhalo region are exchanged when running
:class:`Operator`s to maintain consistent values as in sequential runs.
Notes
-----
Typically, this property won't be used in user code, but it may come
in handy for testing or debugging
"""
return tuple(j + i + k for i, (j, k) in zip(self.shape_domain, self._halo))
@cached_property
def shape_allocated(self):
"""
Shape of the allocated data. It includes the domain and inhalo regions,
as well as any additional padding surrounding the halo.
Notes
-----
In an MPI context, this is the *local* with_halo region shape.
"""
return tuple(j + i + k for i, (j, k) in zip(self._shape_with_inhalo,
self._padding))
@cached_property
def shape_global(self):
"""
Global shape of the domain region. The domain constitutes the area of
the data written to by an :class:`Operator`.
Notes
-----
In an MPI context, this is the *global* domain region shape, which is
therefore identical on all MPI ranks.
"""
if self.grid is None:
return self.shape
retval = []
for d, s in zip(self.dimensions, self.shape):
size = self.grid.dimension_map.get(d)
retval.append(size.glb if size is not None else s)
return tuple(retval)
_offset_inhalo = AbstractCachedFunction._offset_halo
_extent_inhalo = AbstractCachedFunction._extent_halo
@cached_property
def _extent_outhalo(self):
"""
The number of points in the outer halo region.
"""
if self._distributor is None:
return self._extent_inhalo
left = [self._distributor.glb_to_loc(d, i, LEFT, strict=False)
for d, i in zip(self.dimensions, self._extent_inhalo.left)]
right = [self._distributor.glb_to_loc(d, i, RIGHT, strict=False)
for d, i in zip(self.dimensions, self._extent_inhalo.right)]
Extent = | |
<reponame>nidhimittalhada/access_group_repo
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2015 Wipro Technologies.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib2 as urlreq
import json
import jsonrpc
import urlparse
import base64
from xml.etree import ElementTree as ET
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import units
from manila import exception
from manila.i18n import _, _LE, _LW
from manila.share.drivers.Nexenta_Wipro import constants
import utils
LOG = log.getLogger(__name__)
class RestHelper():
def __init__(self, configuration):
self.configuration = configuration
self.url = None
self.headers = {
"Connection": "keep-alive",
"Content-Type": "application/json",
'Authorization': 'Basic %s' % 'admin:nexenta'.encode('base64')[:-1]
}
self.shares = {}
self.share2nms = ''
self.shareinfo = {}
def _read_xml(self):
"""Open xml file and parse the content."""
# default='/etc/manila/manila_nexenta_conf.xml',
filename = self.configuration.manila_nexenta_conf_file
print "NEXENTA_WIPRO:- conf filename", filename
try:
tree = ET.parse(filename)
root = tree.getroot()
except Exception as err:
LOG.error(_LE('Read Nexenta config file(%(filename)s)'
' for Manila error: %(err)s') %
{'filename': filename,
'err': err})
raise err
return root
def _check_conf_file(self):
"""Check the config file, make sure the essential items are set """
root = self._read_xml()
# <RestURL>http://10.141.67.41:8457/rest/nms</RestURL>
resturl = root.findtext('Storage/RestURL')
print"NEXENTA_WIPRO: resturl", resturl
# <Product>NEXENTASTOR</Product>
product = root.findtext('Storage/Product')
print"NEXENTA_WIPRO: product", product
# VOL_52 - Volume Name
# pool_node = root.findall('Filesystem/StoragePool')
pool_node = root.findtext('Filesystem/StoragePool')
print"NEXENTA_WIPRO: pool_node", pool_node
if product != "NEXENTASTOR":
err_msg = (_(
'_check_conf_file: Config file invalid. '
'Product must be set to NEXENTASTOR.'))
LOG.error(err_msg)
raise exception.InvalidInput(err_msg)
if (not resturl):
err_msg = (_(
'_check_conf_file: Config file invalid. RestURL '
'must be set'))
LOG.error(err_msg)
raise exception.InvalidInput(err_msg)
if (not pool_node):
err_msg = (_(
'_check_conf_file: Config file invalid. '
'StoragePool must be set.'))
LOG.error(err_msg)
raise exception.InvalidInput(err_msg)
def _check_service(self):
# NEXENTA_WIPRO: Assume that Service is running for now.
print "NOT IMPLEMENETED"
# NEXENTA_WIPRO: TODO : To be completed.
# running_status = self._get_cifs_service_status()
# if running_status != constants.STATUS_SERVICE_RUNNING:
# self._start_cifs_service_status()
# service = self._get_nfs_service_status()
# if ((service['RUNNINGSTATUS'] != constants.STATUS_SERVICE_RUNNING) or
# (service['SUPPORTV3'] == 'false') or
# (service['SUPPORTV4'] == 'false')):
# self._start_nfs_service_status()
def _get_login_info(self):
"""NEXENTA_WIPRO: Get login IP from config file."""
logininfo = {}
filename = self.configuration.manila_nexenta_conf_file
print"NEXENTA_WIPRO: conf filename ", filename
tree = ET.parse(filename)
root = tree.getroot()
RestURL = root.findtext('Storage/RestURL')
logininfo['RestURL'] = RestURL.strip()
print"NEXENTA_WIPRO: rest url obtained is %s ", RestURL
return logininfo
def login(self):
"""Log in Nexenta array."""
login_info = self._get_login_info()
# url should be 'http://192.168.199.128:8457/rest/nms'
url = login_info['RestURL']
print"NEXENTA_WIPRO:- url is ", url
# NEXENTA_WIPRO: First Command Get the NexentaStor Version,
# just for testing the communication
self.url = login_info['RestURL']
root = self._read_xml()
# <connection_url>http://admin:nexenta@10.141.67.41:8457</connection_url>
nms_url = root.findtext('Filesystem/connection_url').strip()
print"NEXENTA_WIPRO: : nms_url : ", nms_url
# nms_url should be http://10.141.67.41:8457/rest/nms
self.share2nms = self._get_nms_for_url(nms_url)
nms = self.share2nms
res = (nms.appliance.get_prop('nms_version'))
if not (res):
err_msg = (
_("NEXENTA_WIPRO:_ERR: Could not login in"
"Nexenta Store appliance"))
LOG.error(err_msg)
raise exception.InvalidInput(err_msg)
print "NEXENTA_WIPRO: res is ", res
nms_version = res
# <share_name>manila_folder</share_name>
# share_name = root.findtext('Filesystem/share_name').
# strip().decode('unicode_escape')
# print"NEXENTA_WIPRO:200 : share_address : ", share_address
return nms_version
def _get_nms_for_url(self, nms_url):
# o = urlparse('http://www.cwi.nl:80/%7Eguido/Python.html') ->
# NEXENTA_WIPRO: BREAK INTO 6 parts
# ParseResult(scheme='http', netloc='www.cwi.nl:80',
# path='/%7Eguido/Python.html',
# params='', query='', fragment='')
parts = urlparse.urlparse(nms_url)
scheme = parts.scheme
print"NEXENTA_WIPRO: scheme is ", scheme
user = 'admin'
password = '<PASSWORD>'
# NEXENTA_WIPRO: if username and password not given in url
# http://admin:nexenta@10.141.67.41:8457
if '@' not in parts.netloc:
host_and_port = parts.netloc # 10.141.67.41:8457
else:
user_and_password, host_and_port = parts.netloc.split(
'@', 1) # admin:nexenta@10.141.67.41:8457
if ':' in user_and_password:
user, password = user_and_password.split(':') # admin:nexenta
else:
user = user_and_password
if ':' in host_and_port:
host, port = host_and_port.split(':', 1) # 10.141.67.41:8457
else:
host, port = host_and_port, '2000'
# http,10.141.67.41,8457
url = '%s://%s:%s/rest/nms/' % (scheme, host, port)
print"NEXENTA_WIPRO: url is ", url
# url should be http://10.141.67.41:8457/rest/nms
return jsonrpc.NexentaJSONProxy(url, user, password)
def _get_cifs_service_status(self):
LOG.debug("Check CIFS Service status- NOT YET IMPLEMENED.")
def _start_cifs_service_status(self):
LOG.debug("Start CIFS Service - NOT YET IMPLEMENED.")
def _find_pool_info(self):
root = self._read_xml()
pool_name = root.findtext('Filesystem/StoragePool').strip()
print"NEXENTA_WIPRO: pool_name : ", pool_name
if not pool_name:
err_msg = (_("Invalid resource pool: %s.") % pool_name)
LOG.error(err_msg)
raise exception.InvalidInput(err_msg)
nms = self.share2nms
if not nms.volume.object_exists(pool_name):
err_msg = (
_("NEXENTA_WIPRO:_ERR: Volume %s does not"
"exist in Nexenta Store appliance") %
pool_name)
LOG.error(err_msg)
raise exception.InvalidInput(err_msg)
# Check the Available storage space on the volume.
# TODO confirm why the free and available have "ulta pulta" values
volume_props = nms.volume.get_child_props(pool_name, '')
print"NEXENTA_WIPRO: volume props ", volume_props
# Used storage space, the size of storage within the volume occupied by
# data
print volume_props['allocated']
# free : Available storage space within the volume. Or, same: amount of
# storage that can be used
print volume_props['free']
allocated = utils.str2size(volume_props['allocated'])
free = utils.str2size(volume_props['free'])
poolinfo = {}
poolinfo['NAME'] = pool_name
poolinfo['ALLOCATED'] = allocated
poolinfo['FREE_CAPACITY'] = free
print"NEXENTA_WIPRO: poolinfo is ", poolinfo
return poolinfo
def _get_share_type(self, share_proto):
share_type = None
if share_proto == 'NFS':
share_type = "NFSHARE"
elif share_proto == 'CIFS':
share_type = "CIFSHARE"
else:
raise exception.InvalidShare(
reason=(_('Invalid NAS protocol supplied: %s.')
% share_proto))
return share_type
def _init_filesys_para(self, share_name, size, share_proto):
"""Init basic filesystem parameters."""
poolinfo = self._find_pool_info()
print("NEXENTA_WIPRO: pool free capacity in bytes is",
poolinfo['FREE_CAPACITY'])
print("NEXENTA_WIPRO: required size in bytes is ", size)
if poolinfo['FREE_CAPACITY'] < size:
err_msg = (
_("NEXENTA_WIPRO:_ERR: Volume %s doesnt"
"have enough free space") %
poolinfo['NAME'])
LOG.error(err_msg)
raise exception.InvalidInput(err_msg)
# convert size to Gigabytes - a string is to be sent to nexenta.
size = '%sG' % (size / units.Gi)
# TODO: confirm size unit
nexenta_folderparam = {
"NAME": share_name.replace("-", "_"),
"PARENT_NAME": poolinfo['NAME'], # Volume Name
"POOLINFO": poolinfo,
"DESCRIPTION": "Manilla Nexenta Folder smb shared",
"QUOTA": size, # size in bytes
"RECORDSIZE": '4k',
"COMPRESSION": 'on',
"SHARESMB": 'off',
"SHARENFS": 'off',
"SIZE": size,
"SHARE_PROTO": share_proto.strip(),
}
if share_proto == "CIFS":
nexenta_folderparam['SHARESMB'] = 'on'
nexenta_folderparam['SHARENFS'] = 'off'
elif share_proto == "NFS":
nexenta_folderparam['SHARESMB'] = 'off'
nexenta_folderparam['SHARENFS'] = 'on'
else:
print"Wrong share protocol"
err_msg = (
_("NEXENTA_WIPRO:_ERR: Wrong Value of share protocol"))
LOG.error(err_msg)
raise exception.InvalidInput(err_msg)
print("NEXENTA_WIPRO: Check sharesmb and sharenfs option",
nexenta_folderparam)
return nexenta_folderparam
def allocate_container(self, share_name, size, share_proto):
"""Creates filesystem associated to share by name."""
print"NEXENTA_WIPRO: share_name in creation is", share_name
# share_name in creation is share-15dee8f7-49bc-4833-8499-4116418b740d
nexenta_folderparam = self._init_filesys_para(
share_name, size, share_proto)
complete_share_name = self._create_filesystem(nexenta_folderparam)
return complete_share_name
def _create_filesystem(self, folder_param):
"""Create file system."""
# TODO: size and quota has to be related
nms = self.share2nms
if folder_param['SHARESMB'] == "on":
create_folder_props = {'quota': folder_param['QUOTA'],
'recordsize': folder_param['RECORDSIZE'],
'compression': folder_param['COMPRESSION'],
'sharesmb': folder_param['SHARESMB'],
}
elif (folder_param['SHARESMB'] == "off" and
folder_param['SHARENFS'] == "on"):
create_folder_props = {'quota': folder_param['QUOTA'],
'recordsize': folder_param['RECORDSIZE'],
'compression': folder_param['COMPRESSION'],
'sharenfs': folder_param['SHARENFS'],
}
print "NEXENTA_WIPRO: create-folder_props are ", create_folder_props
if not nms.folder.create_with_props(
folder_param['PARENT_NAME'],
folder_param['NAME'],
create_folder_props):
err_msg = (
_('NEXENTA_WIPRO:_ERR: Folder %(folder) could'
'not be created on Volume %(volume)') % {
'folder': folder_param['NAME'],
'volume': folder_param['PARENT_NAME']})
LOG.error(err_msg)
raise exception.InvalidShare(reason=err_msg)
# FOLDER IS CREATED
if folder_param['SHARESMB'] == "on":
fmri = 'svc:/network/smb/server:default'
elif (folder_param['SHARESMB'] == "off" and
folder_param['SHARENFS'] == "on"):
fmri = 'svc:/network/nfs/server:default'
else:
print"Could Not set fmri"
err_msg = (_('NEXENTA_WIPRO:_ERR: Could Not set FMRI'))
LOG.error(err_msg)
raise exception.InvalidInput(reason=err_msg)
print "NEXENTA_WIPRO: fmri used is", fmri
path = '%s/%s' % (folder_param['PARENT_NAME'].strip(),
folder_param['NAME'].strip())
print "folder path is ", path
# share_opts = {
# 'read_write': '*',
# 'read_only': '',
# 'root': 'nobody',
# 'extra_options': 'anon=0',
# 'recursive': 'true',
# 'anonymous_rw': 'true',
# }
share_opts = {
'Auth_Type': 'Auth_sys',
'read_write': '*',
'recursive': 'true',
'anonymous_rw': 'true',
'anonymous': 'true',
'extra_options': 'anon=0',
}
LOG.debug('Sharing folder on Nexenta Store')
print"NEXENTA_WIPRO: share opts are ", share_opts
try:
result = nms.netstorsvc.share_folder(fmri, path, share_opts)
except Exception as err:
LOG.error(
_LE('NEXENTA_WIPRO:_ERR: Folder %(share_name)'
'could not be shared, error:') % {
'share_name': folder_param['NAME']})
raise err
# Get all folders that are shared using the specified storage
# access protocol and match the specified pattern
# Pattern to select a subset of folders.
# An empty string matches all folders
# Returns : | |
for ProjectDocument and ProjectService to call
#----------------------------------------------------------------------------
def SetProject(self, projectPath):
if self._prject_browser.IsLoading:
utils.get_logger().info("app is loading projects at startup ,do not load project document %s at this time",projectPath)
return
#打开项目文件时强制显示项目视图窗口,不生成事件
GetApp().MainFrame.GetProjectView(show=True,generate_event=False)
curSel = self._prject_browser.project_combox.current()
for i in range(len(self._prject_browser.project_combox['values'])):
document = self._documents[i]
if document.GetFilename() == projectPath:
if curSel != i: # don't reload if already loaded
utils.get_logger().info("switch to and load project document %s",projectPath)
self._prject_browser.project_combox.current(i)
self.SetDocument(document)
self.LoadProject(document)
#self._projectChoice.SetToolTipString(document.GetFilename())
break
def GetSelectedFile(self):
for item in self._treeCtrl.selection():
filePath = self._GetItemFilePath(item)
if filePath:
return filePath
return None
def GetSelectedFiles(self):
filePaths = []
for item in self._treeCtrl.GetSelections():
filePath = self._GetItemFilePath(item)
if filePath and filePath not in filePaths:
filePaths.append(filePath)
return filePaths
def GetSelectedPhysicalFolder(self):
if self.GetMode() == ProjectView.PROJECT_VIEW:
return None
else:
for item in self._treeCtrl.GetSelections():
if not self._IsItemFile(item):
filePath = self._GetItemFolderPath(item)
if filePath:
return filePath
return None
def GetSelectedProject(self):
document = self.GetDocument()
if document:
return document.GetFilename()
else:
return None
def GetProjectSelection(self,document):
for i in range(len(self._prject_browser.project_combox['values'])):
project = self._documents[i]
if document == project:
return i
return -1
def AddProjectToView(self, document):
#check the project is already exist or not
index = self.GetProjectSelection(document)
#if proejct not exist,add the new document
if index == -1:
index = self._prject_browser.AddProject(self._MakeProjectName(document))
self._documents.append(document)
self._prject_browser.project_combox.current(index)
self.ProjectSelect()
def LoadDocuments(self):
self._projectChoice.Clear()
for document in self._documents:
i = self._projectChoice.Append(self._MakeProjectName(document),getProjectBitmap(), document)
if document == self.GetDocument():
self._projectChoice.SetSelection(i)
def AddProjectRoot(self,document_or_name):
self._prject_browser.clear()
#这里针对python2和python3中各自的string类型进行了区分:在python2中,使用的为basestring;在python3中,使用的为str
if isinstance(document_or_name,six.string_types[0]):
name = document_or_name
text = name
else:
document = document_or_name
text = document.GetModel().Name
root_item = self._treeCtrl.insert("", "end", text=text,image=self._treeCtrl.GetProjectIcon())
return root_item
def AddFolderItem(self,document,folderPath):
return self._treeCtrl.AddFolder(folderPath)
def LoadProject(self, document):
GetApp().configure(cursor="circle")
try:
#切换项目时加粗的节点重置为空,如果项目设置有启动文件,则将该启动文件节点加粗
self._bold_item = None
rootItem = self.AddProjectRoot(document)
if document:
docFilePath = document.GetFilename()
folders = document.GetModel().logicalFolders
folders.sort()
folderItems = []
for folderPath in folders:
folderItems = folderItems + self.AddFolderItem(document,folderPath)
for file in document.GetModel()._files:
folder = file.logicalFolder
if folder:
folderTree = folder.split('/')
item = rootItem
for folderName in folderTree:
found = False
for child in self._treeCtrl.get_children(item):
if self._treeCtrl.item(child, "text") == folderName:
item = child
found = True
break
if not found:
#print "error folder '%s' not found for %s" % (folder, file.filePath)
break
else:
item = rootItem
fileItem = self._treeCtrl.AppendItem(item, os.path.basename(file.filePath), file)
startupFile = document.GetModel().RunInfo.StartupFile
#设置项目启动文件
if startupFile and document.GetModel().fullPath(startupFile) == file.filePath:
self._bold_item = fileItem
self._treeCtrl.SetItemBold(fileItem)
document.GetModel().StartupFile = file
self._treeCtrl.SortChildren(rootItem)
for item in folderItems:
self._treeCtrl.SortChildren(item)
if utils.profile_get_int("LoadFolderState", True):
self.LoadFolderState()
self._treeCtrl.focus_set()
child = self._treeCtrl.GetFirstChild(self._treeCtrl.GetRootItem())
if child:
self._treeCtrl.see(child)
finally:
GetApp().configure(cursor="")
def ProjectHasFocus(self):
""" Does Project Choice have focus """
return (wx.Window.FindFocus() == self._projectChoice)
def FilesHasFocus(self):
""" Does Project Tree have focus """
winWithFocus = wx.Window.FindFocus()
if not winWithFocus:
return False
while winWithFocus:
if winWithFocus == self._treeCtrl:
return True
winWithFocus = winWithFocus.GetParent()
return False
def ClearFolderState(self):
config = GetApp().GetConfig()
config.DeleteGroup(getProjectKeyName(self.GetDocument()))
def SaveFolderState(self, event=None):
""" 保存项目文件夹打开或关闭状态 """
if self._loading:
return
folderList = []
folderItemList = self._GetFolderItems(self._treeCtrl.GetRootItem())
for item in folderItemList:
#判断节点是否处于展开状态,如果是,则保存展开状态
if self._treeCtrl.item(item, "open"):
folderList.append(self._GetItemFolderPath(item))
utils.profile_set(getProjectKeyName(self.GetDocument()), repr(folderList))
def LoadFolderState(self):
""" 加载项目文件夹打开或关闭状态"""
self._loading = True
config = GetApp().GetConfig()
openFolderData = config.Read(getProjectKeyName(self.GetDocument()), "")
if openFolderData:
folderList = eval(openFolderData)
folderItemList = self._GetFolderItems(self._treeCtrl.GetRootItem())
for item in folderItemList:
folderPath = self._GetItemFolderPath(item)
if folderPath in folderList:
#展开节点
self._treeCtrl.item(item, open=True)
else:
#关闭节点
self._treeCtrl.item(item, open=False)
self._loading = False
#----------------------------------------------------------------------------
# Control events
#----------------------------------------------------------------------------
def OnAddNewFile(self):
items = self._treeCtrl.selection()
if items:
item = items[0]
folderPath = self._GetItemFolderPath(item)
else:
folderPath = ""
dlg = newfile.NewFileDialog(self.GetFrame(),_("New FileType"),folderPath)
if dlg.ShowModal() == constants.ID_OK:
if self.GetDocument().GetCommandProcessor().Submit(projectcommand.ProjectAddFilesCommand(self.GetDocument(), [dlg.file_path], folderPath=folderPath)):
self._prject_browser.OpenSelection()
self.OnRename()
def OnAddFolder(self):
if self.GetDocument():
items = self._treeCtrl.selection()
if items:
item = items[0]
if self._IsItemFile(item):
item = self._treeCtrl.parent(item)
folderDir = self._GetItemFolderPath(item)
else:
folderDir = ""
if folderDir:
folderDir += "/"
folderPath = "%sUntitled" % folderDir
i = 1
while self._treeCtrl.FindFolder(folderPath):
i += 1
folderPath = "%sUntitled%s" % (folderDir, i)
projectdir = self.GetDocument().GetModel().homeDir
destfolderPath = os.path.join(projectdir,folderPath)
try:
os.mkdir(destfolderPath)
except Exception as e:
messagebox.showerror(GetApp().GetAppName(),str(e),parent= self.GetFrame())
return
self.GetDocument().GetCommandProcessor().Submit(projectcommand.ProjectAddFolderCommand(self, self.GetDocument(), folderPath))
#空文件夹下创建一个虚拟文件,防止空文件夹节点被删除
dummy_file = os.path.join(destfolderPath,consts.DUMMY_NODE_TEXT)
self.GetDocument().GetCommandProcessor().Submit(projectcommand.ProjectAddFilesCommand(self.GetDocument(),[dummy_file],folderPath))
# self._treeCtrl.UnselectAll()
item = self._treeCtrl.FindFolder(folderPath)
self._treeCtrl.selection_set(item)
self._treeCtrl.focus(item)
self._treeCtrl.see(item)
self.OnRename()
def AddFolder(self, folderPath):
self._treeCtrl.AddFolder(folderPath)
return True
def DeleteFolder(self, folderPath,delete_folder_files=True):
projectdir = self.GetDocument().GetModel().homeDir
folder_local_path = os.path.join(projectdir,folderPath)
if delete_folder_files:
if os.path.exists(folder_local_path):
try:
fileutils.RemoveDir(folder_local_path)
except Exception as e:
messagebox.showerror( _("Delete Folder"),"Could not delete '%s'. %s" % (os.path.basename(folder_local_path), e),
parent= self.GetFrame())
return
item = self._treeCtrl.FindFolder(folderPath)
self.DeleteFolderItems(item)
dummy_file = os.path.join(folder_local_path,consts.DUMMY_NODE_TEXT)
#如果文件夹下存在虚拟文件,则需要删除这个文件才能彻底删除文件夹
if self.GetDocument().GetModel().FindFile(dummy_file):
self.GetDocument().GetCommandProcessor().Submit(projectcommand.ProjectRemoveFilesCommand(self.GetDocument(), [dummy_file]))
self._treeCtrl.delete(item)
return True
def DeleteFolderItems(self,folder_item):
files = []
items = self._treeCtrl.get_children(folder_item)
for item in items:
if self._treeCtrl.GetChildrenCount(item):
self.DeleteFolderItems(item)
else:
file = self._GetItemFile(item)
files.append(file)
if files:
self.GetDocument().GetCommandProcessor().Submit(projectcommand.ProjectRemoveFilesCommand(self.GetDocument(), files))
def OnAddFileToProject(self):
project_template = self.GetDocumentManager().FindTemplateForTestPath(consts.PROJECT_EXTENSION)
#注意这里最好不要设置initialdir,会自动选择上一次打开的目录
descrs = strutils.gen_file_filters(project_template.GetDocumentType())
paths = filedialog.askopenfilename(
master=self._prject_browser,
filetypes=descrs,
multiple=True
)
if not paths:
return
newPaths = []
#必须先格式化所有路径
for path in paths:
newPaths.append(fileutils.opj(path))
folderPath = None
item = self._treeCtrl.GetSingleSelectItem()
if item:
if not self._IsItemFile(item):
folderPath = self._GetItemFolderPath(item)
self.GetDocument().GetCommandProcessor().Submit(projectcommand.ProjectAddFilesCommand(self.GetDocument(), newPaths, folderPath=folderPath))
self.Activate() # after add, should put focus on project editor
def OnAddDirToProject(self):
class AddDirProjectDialog(ui_base.CommonModaldialog):
def __init__(self, parent,view):
self._view = view
ui_base.CommonModaldialog.__init__(self, parent)
self.title(_("Add Directory Files to Project"))
row = ttk.Frame(self.main_frame)
ttk.Label(row, text=_("Directory:")).pack(side=tk.LEFT)
self.dir_var = tk.StringVar(value=os.path.dirname(self._view.GetDocument().GetFilename()))
dirCtrl = ttk.Entry(row, textvariable=self.dir_var)
dirCtrl.pack(side=tk.LEFT,fill="x",expand=1)
# dirCtrl.SetToolTipString(dirCtrl.GetValue())
findDirButton = ttk.Button(row,text=_("Browse..."),command=self.OnBrowseButton)
findDirButton.pack(side=tk.LEFT,padx=(consts.DEFAUT_CONTRL_PAD_X,0))
row.pack(fill="x",padx=consts.DEFAUT_CONTRL_PAD_X,pady=(consts.DEFAUT_CONTRL_PAD_Y,0))
self.visibleTemplates = []
for template in self._view.GetDocumentManager()._templates:
if template.IsVisible() and not isinstance(template,ProjectTemplate):
self.visibleTemplates.append(template)
choices = []
descr = ''
for template in self.visibleTemplates:
if len(descr) > 0:
descr = descr + _('|')
descr = _(template.GetDescription()) + " (" + template.GetFileFilter() + ")"
choices.append(descr)
choices.insert(0, _("All Files") + "(*.*)") # first item
row = ttk.Frame(self.main_frame)
ttk.Label(row,text=_("Files of type:")).pack(side=tk.LEFT)
self.filter_var = tk.StringVar()
self.filterChoice = ttk.Combobox(row, values=choices,textvariable=self.filter_var)
self.filterChoice.current(0)
self.filterChoice['state'] = 'readonly'
self.filterChoice.pack(side=tk.LEFT,fill="x",expand=1)
row.pack(fill="x",padx=consts.DEFAUT_CONTRL_PAD_X,pady=(consts.DEFAUT_CONTRL_PAD_Y,0))
misc.create_tooltip(self.filterChoice,_("Select file type filter."))
self.subfolderChkVar = tk.IntVar(value=True)
subfolderCtrl = ttk.Checkbutton(self.main_frame, text=_("Add files from subdirectories"),variable=self.subfolderChkVar).pack(fill="x",padx=consts.DEFAUT_CONTRL_PAD_X,pady=(consts.DEFAUT_CONTRL_PAD_Y,0))
# subfolderCtrl.SetValue(True)
self.AddokcancelButton()
def OnBrowseButton(self):
path = filedialog.askdirectory(title=_("Choose a directory:"))
if not path:
return
self.dir_var.set(fileutils.opj(path))
def _ok(self):
index = self.filterChoice.current()
self.template = None
lastIndex = len(self.filterChoice['values']) -1
if index and index != lastIndex: # if not All or Any
self.template = self.visibleTemplates[index-1]
ui_base.CommonModaldialog._ok(self)
dlg = AddDirProjectDialog(GetApp().GetTopWindow(),self)
status = dlg.ShowModal()
if status == constants.ID_OK:
if not os.path.exists(dlg.dir_var.get()):
messagebox.showinfo(GetApp().GetAppName(),
_("directory '%s' does not exist.") % dlg.dir_var.get(),
parent=self.GetFrame(),
)
return
if status == constants.ID_OK:
GetApp().configure(cursor="circle")
try:
doc = self.GetDocument()
searchSubfolders = dlg.subfolderChkVar.get()
dirString = dlg.dir_var.get()
if os.path.isfile(dirString):
# If they pick a file explicitly, we won't prevent them from adding it even if it doesn't match the filter.
# We'll assume they know what they're doing.
paths = [dirString]
else:
paths = []
template = dlg.template
# do search in files on disk
for root, dirs, files in os.walk(dirString):
if not searchSubfolders and root != dirString:
break
for name in files:
if template is None: # All
filename = os.path.join(root, name)
# if already in project, don't add it, otherwise undo will remove it from project even though it was already in it.
if not doc.IsFileInProject(filename):
paths.append(filename)
else: # use selected filter
if template.FileMatchesTemplate(name):
filename = os.path.join(root, name)
# if already in project, don't add it, otherwise undo will remove it from project even though it was already in it.
if not doc.IsFileInProject(filename):
paths.append(filename)
folderPath = None
selections = self._treeCtrl.selection()
if selections:
item = selections[0]
if not self._IsItemFile(item):
folderPath = self._GetItemFolderPath(item)
doc.GetCommandProcessor().Submit(projectcommand.ProjectAddFilesCommand(doc, paths, folderPath=folderPath))
self.Activate() # after add, should put focus on project editor
finally:
GetApp().configure(cursor="")
def DoAddFilesToProject(self, filePaths, folderPath):
# method used by Drag-n-Drop to add files to current Project
self.GetDocument().GetCommandProcessor().Submit(projectcommand.ProjectAddFilesCommand(self.GetDocument(), filePaths, folderPath))
def OnRename(self):
items = self._treeCtrl.selection()
if not items:
return
item = items[0]
if utils.is_linux():
text = tkSimpleDialog.askstring(
_("Enter New Name"),
_("Enter New Name"),
initialvalue=self._treeCtrl.item(item,"text"),
parent=self.GetFrame()
)
if not text:
return
self.ChangeLabel(item, text)
else:
if items:
self._treeCtrl.EditLabel(item)
def OnEndLabelEdit(self, item,newName):
if item == self._treeCtrl.GetRootItem():
if not newName:
#wx.MessageBox(_("project name could not be empty"),style=wx.OK|wx.ICON_ERROR)
return
else:
#检查项目名称是否改变
if self.GetDocument().GetModel().Name != newName:
self.GetDocument().GetModel().Name = newName
self.GetDocument().Modify(True)
#修改节点文本
self._treeCtrl.item(item,text=newName)
return
if not self.ChangeLabel(item, newName):
return
def ChangeLabel(self, item, newName):
if not newName:
return False
if self._IsItemFile(item):
oldFilePath = self._GetItemFilePath(item)
newFilePath = os.path.join(os.path.dirname(oldFilePath), newName)
doc = self.GetDocument()
parent_item = self._treeCtrl.parent(item)
if not doc.GetCommandProcessor().Submit(projectcommand.ProjectRenameFileCommand(doc, oldFilePath, newFilePath)):
return False
self._treeCtrl.SortChildren(self._treeCtrl.parent(parent_item))
else:
oldFolderPath = self._GetItemFolderPath(item)
newFolderPath = os.path.dirname(oldFolderPath)
if newFolderPath:
newFolderPath += "/"
newFolderPath += newName
if newFolderPath == oldFolderPath:
return True
if self._treeCtrl.FindFolder(newFolderPath):
messagebox.showwarning(_("Rename Folder"),_("Folder '%s' already | |
from typing import Union, List, Dict, Set
import copy
import random
# Class signifying one of semaphore operations wait and signal
# semaphore_id : id of the semaphore on which the operation is performed
# modification : when positive the operation is signal and the operation is incrementing the semaphore by modification
# when negative the operation is wait and the operation is decrementing the semaphore by -modification
# process : the number of process to which the operation belongs. It is a fictional process, needed in order to know
# which operations can be performed at which time.
class Operation:
semaphore_id: Union[int, None]
modification: int
def __init__(self, semaphore_id: Union[int, None], modification: int):
self.semaphore_id = semaphore_id
self.modification = modification
def __str__(self):
if self.modification == 0:
return "Empty operation\n"
if self.modification > 0:
return "Signal with " + str(self.modification) + " on semaphore with id " + str(self.semaphore_id) + "\n"
return "Wait for " + str(-self.modification) + " on semaphore with id " + str(self.semaphore_id) + "\n"
# If modification is 0 then the operation does nothing
def is_none(self):
return self.modification == 0
# Class signifying a semaphore operations wait and signal
# id : semaphore's id
# count : semaphore count
# queue : FIFO queue pairs of:
# 1) operation waiting on the semaphore
# 2) index of the operation in the execution chain
# restricted_processes : if operation o = queue[i][0] then no operation
# with process p may be executed if p is in restricted_processes[i]
# while o remains in the queue
class Semaphore:
id: int
original_id: int
count: int
queue: list[tuple['Node', int]]
def __init__(self, id: int, count: int, original_id: int):
self.id = id
self.count = count
self.queue = []
self.original_id = original_id
def __str__(self):
basic_info = "Semaphore with id " + str(self.id) + " has " + str(self.count) + " units\n"
if len(self.queue) == 0:
return basic_info + "\n"
queue_info = "Semaphore's queue:\n"
for node in self.queue:
queue_info += "Index in execution chain: " + str(node[1]) + "\n"
queue_info += str(node[0])
return basic_info + queue_info + "\n"
def get_debug_information(self) -> dict[str, any]:
result = {"sem_id": self.original_id, "unit_count": self.count, "waiting": len(self.queue)}
result["queue"] = [operation.get_debug_info() for operation, _ in self.queue]
return result
# Performs the given operation, that has a given index and restricted processes set
def do_operation(self, operation: 'Node', index: int) -> bool:
if self.count + operation.operation.modification < 0:
self.queue.append((operation, index))
return False
self.count += operation.operation.modification
return True
# If the first operation waiting on the semaphore can be executed,
# it is deleted from the waiting queue and returned together with its index
# in the execution chain
# If there is no such operation None, None is returned
def try_get_operation(self) -> Union[tuple['Node', int], tuple[None, int]]:
if len(self.queue) != 0:
if self.count + self.queue[0][0].operation.modification >= 0:
operation, index = self.queue.pop(0)
return operation, index
return None, -1
# Class signifying a program execution
# semaphores : map from semaphore id to semaphore, containing all the semaphores
# created during program's execution
# execution_chain : the list of the operations to be executed
# executed : the list such that executed[i] = True iff execution_chain[i] was executed
# process_dependency : if k is in process_dependency[j] then j can't run before k unless j == k
# first_possible_index : first_possible_index = n iff for each 0 <= i < n executed[i] = True
# executed_count : number of executed operations from the execution_chain
class Execution:
semaphores: Dict[int, Semaphore]
execution_chain: List['Node']
executed: List[int]
first_possible_index: int
executed_count: int
previous_node_finished_count: Dict['Node', int]
waiting_on_semaphore: Set['Node']
def __init__(self, semaphores: Dict[int, Semaphore], execution_chain: List['Node']):
self.semaphores = copy.deepcopy(semaphores)
self.execution_chain = execution_chain
self.executed = [0] * (len(execution_chain))
self.first_possible_index = 0
self.executed_count = 0
self.previous_node_finished_count = dict()
self.waiting_on_semaphore = set()
def __str__(self):
result = ""
result += "Semaphores' state:\n"
for _, semaphore in self.semaphores.items():
result += str(semaphore)
result += "\n"
if len(self.execution_chain) <= 20:
for index, node in enumerate(self.execution_chain):
if self.executed[index] == 1:
result += "\u001b[32m" + "Executed: \u001b[0m\n"
else:
result += "\u001b[31m" + "Not executed: \u001b[0m\n"
result += str(node)
result += "\n"
else:
for index in range(self.first_possible_index,
min(len(self.execution_chain), self.first_possible_index + 20)):
if self.executed[index] == 1:
result += "Operation with index " + str(index) + " was executed: \n"
else:
result += "Operation with index " + str(index) + " was not executed: \n"
result += str(self.execution_chain[index])
return result
def get_debug_info(self) -> [dict[str, any]]:
result = []
for semaphore in self.semaphores.values():
result.append(semaphore.get_debug_information())
return result
# Function that checks if the given operation can be executed
def is_executable(self, operation: 'Node') -> bool:
return self.previous_node_finished_count.get(operation,
0) == operation.prev_count and operation not in self.waiting_on_semaphore
def mark_executed(self, operation: 'Node') -> None:
for child in operation.next:
self.previous_node_finished_count[child] = self.previous_node_finished_count.get(child, 0) + 1
# Function that tries to execute the given operation with the given index
# if operation needs to wait on its semaphore then returns False
# Otherwise returns True
def do_operation(self, operation: 'Node', index: int) -> bool:
assert(self.executed[index] == 0)
if operation.operation.is_none():
self.mark_executed(operation)
self.executed_count += 1
self.executed[index] = 1
if index == self.first_possible_index:
self.first_possible_index += 1
while self.first_possible_index < len(self.executed) and self.executed[self.first_possible_index]:
self.first_possible_index += 1
return True
operation_semaphore: Semaphore = self.semaphores[operation.operation.semaphore_id]
if not operation_semaphore.do_operation(operation, index):
self.waiting_on_semaphore.add(operation)
return False
else:
self.mark_executed(operation)
self.executed_count += 1
self.executed[index] = 1
if index == self.first_possible_index:
self.first_possible_index += 1
while self.first_possible_index < len(self.executed) and self.executed[self.first_possible_index]:
self.first_possible_index += 1
return True
# A function that looks for a next operation that can be executed
# returns that operation together with its index if one is found,
# returns None, -1 otherwise
def next_possible_operation(self) -> tuple[Union['Node', None], int]:
# If all the operations where executed there is no next operation
if self.first_possible_index >= len(self.execution_chain):
return None, -1
# If the first possible operation can be executed it is returned
if self.is_executable(self.execution_chain[self.first_possible_index]):
assert(self.executed[self.first_possible_index] == 0)
return_index = self.first_possible_index
return self.execution_chain[return_index], return_index
# The semaphores are checked to find a next operation
for _, semaphore in self.semaphores.items():
operation, index = semaphore.try_get_operation()
if operation is not None:
self.waiting_on_semaphore.remove(operation)
return operation, index
# Execution chain is checked for executable operations
for i in range(self.first_possible_index + 1, len(self.execution_chain)):
if not self.executed[i] and self.is_executable(self.execution_chain[i]) and self.execution_chain[
i].operation.modification >= 0:
return self.execution_chain[i], i
return None, -1
# A function that attempts to execute all the operation from the execution chain
# returns False if it fails and True otherwise
def try_executing(self) -> bool:
assert(len(self.previous_node_finished_count) == 0)
while True:
next_operation, index = self.next_possible_operation()
if next_operation is None:
if self.executed_count >= len(self.execution_chain):
return True
return False
self.do_operation(next_operation, index)
# A class signifying the execution graph's Nodes. There exist an
# edge from Node A to Node B iff the operation in Node A must be
# executed before the operation in Node B
# operation : the operation to be executed
# next : list of edges from this Node to other
class Node:
operation: Operation
next: Set['Node']
prev_count: int
original_post: int
def __init__(self, original_post: int, operation: Operation = Operation(None, 0)):
self.original_post = original_post
self.operation = operation
self.next = set()
self.prev_count = 0
def __str__(self):
return str(self.operation)
def get_debug_info(self) -> dict[str, Union[str, int]]:
operation = self.operation
post = self.original_post
type = ""
count = operation.modification
if count > 0:
type = "signal"
if count == 0:
type = "none"
if count < 0:
type = "wait"
count = -count
return {"original_post": post, "type": type, "count": count}
def add_parent(self):
self.prev_count += 1
def add_child(self, child: 'Node'):
self.next.add(child)
child.add_parent()
def is_none(self):
return self.operation.is_none()
# erases Nodes with None operation everywhere except for the first one
def erase_none(self):
ind = 0
next_list = list(self.next)
while ind < len(next_list):
if (next_list[ind]).is_none():
next_list += list(next_list[ind].next)
next_list.pop(ind)
else:
ind += 1
self.next = set(next_list)
for child in self.next:
child.erase_none()
# Class Graph stores the information about semaphore operation tree and process dependencies.
# It implements method detecting occured and possible deadlocks - is_deadlock_free.
class Graph:
root: Node
semaphores: Dict[int, Semaphore]
def __init__(self, graph: Node, semaphores: Dict[int, Semaphore]):
self.root = copy.deepcopy(graph)
self.semaphores = semaphores
# Simplifies graph by deleting all the operations that are done on the semaphores
# which are not | |
str,
"Supplemental Type": None,
"Restricted Value": None,
"Example Value": 'a string such as "root".',
"Max Length": None,
"Required Character": None,
},
{"Name": "esxi_hypervisor_admin_password",
"Value": esxi_hypervisor_admin_password,
"Configuration Section": "MODULE REQUIREMENT 3 --> HyperFlex Local Credential Policy Settings",
"Expected Type": str,
"Supplemental Type": None,
"Restricted Value": None,
"Example Value": 'a string such as "C1sco12345!".',
"Max Length": None,
"Required Character": None,
},
{"Name": "storage_controller_vm_root_user_password",
"Value": storage_controller_vm_root_user_password,
"Configuration Section": "MODULE REQUIREMENT 3 --> HyperFlex Local Credential Policy Settings",
"Expected Type": str,
"Supplemental Type": None,
"Restricted Value": None,
"Example Value": 'a string such as "C1sco12345!".',
"Max Length": None,
"Required Character": None,
},
{"Name": "timezone",
"Value": timezone,
"Configuration Section": "MODULE REQUIREMENT 3 --> HyperFlex System Configuration Policy Settings",
"Expected Type": str,
"Supplemental Type": None,
"Restricted Value": None,
"Example Value": 'a string such as "Etc/GMT" or "America/New_York". A full list of supported values can be found on the Intersight GUI or the Olson tz database.',
"Max Length": None,
"Required Character": "/",
},
{"Name": "dns_suffix",
"Value": dns_suffix,
"Configuration Section": "MODULE REQUIREMENT 3 --> HyperFlex System Configuration Policy Settings",
"Expected Type": str,
"Supplemental Type": None,
"Restricted Value": None,
"Example Value": 'a string such as "dcloud.cisco.com".',
"Max Length": None,
"Required Character": None,
},
{"Name": "dns_servers_list",
"Value": dns_servers_list,
"Configuration Section": "MODULE REQUIREMENT 3 --> HyperFlex System Configuration Policy Settings",
"Expected Type": list,
"Supplemental Type": "ip_list",
"Restricted Value": None,
"Example Value": 'a list such as ["198.18.133.1"].',
"Max Length": None,
"Required Character": None,
},
{"Name": "ntp_servers_list",
"Value": ntp_servers_list,
"Configuration Section": "MODULE REQUIREMENT 3 --> HyperFlex System Configuration Policy Settings",
"Expected Type": list,
"Supplemental Type": "ip_list",
"Restricted Value": None,
"Example Value": 'a list such as ["198.18.128.1"].',
"Max Length": None,
"Required Character": None,
},
{"Name": "vcenter_fqdn_or_ip",
"Value": vcenter_fqdn_or_ip,
"Configuration Section": "MODULE REQUIREMENT 3 --> HyperFlex VMware vCenter Configuration Policy Settings",
"Expected Type": str,
"Supplemental Type": "ip_string",
"Restricted Value": None,
"Example Value": 'a string such as "198.18.133.30".',
"Max Length": 15,
"Required Character": None,
},
{"Name": "vcenter_admin_username",
"Value": vcenter_admin_username,
"Configuration Section": "MODULE REQUIREMENT 3 --> HyperFlex VMware vCenter Configuration Policy Settings",
"Expected Type": str,
"Supplemental Type": None,
"Restricted Value": None,
"Example Value": 'a string such as "<EMAIL>".',
"Max Length": None,
"Required Character": None,
},
{"Name": "vcenter_admin_password",
"Value": vcenter_admin_password,
"Configuration Section": "MODULE REQUIREMENT 3 --> HyperFlex VMware vCenter Configuration Policy Settings",
"Expected Type": str,
"Supplemental Type": None,
"Restricted Value": None,
"Example Value": 'a string such as "C1sco12345!".',
"Max Length": None,
"Required Character": None,
},
{"Name": "vcenter_hosts_and_clusters_datacenter_name",
"Value": vcenter_hosts_and_clusters_datacenter_name,
"Configuration Section": "MODULE REQUIREMENT 3 --> HyperFlex VMware vCenter Configuration Policy Settings",
"Expected Type": str,
"Supplemental Type": None,
"Restricted Value": None,
"Example Value": 'a string such as "dCloud-DC".',
"Max Length": None,
"Required Character": None,
},
{"Name": "enable_vdi_optimization",
"Value": enable_vdi_optimization,
"Configuration Section": "MODULE REQUIREMENT 3 --> HyperFlex Cluster Storage Configuration Policy Settings",
"Expected Type": bool,
"Supplemental Type": None,
"Restricted Value": (True, False),
"Example Value": 'a Boolean of True or False.',
"Max Length": None,
"Required Character": None,
},
{"Name": "cleanup_disk_partitions",
"Value": cleanup_disk_partitions,
"Configuration Section": "MODULE REQUIREMENT 3 --> HyperFlex Cluster Storage Configuration Policy Settings",
"Expected Type": bool,
"Supplemental Type": None,
"Restricted Value": (True, False),
"Example Value": 'a Boolean of True or False.',
"Max Length": None,
"Required Character": None,
},
{"Name": "esxi_hostname_prefix",
"Value": esxi_hostname_prefix,
"Configuration Section": "MODULE REQUIREMENT 3 --> HyperFlex Node Configuration Policy Settings",
"Expected Type": str,
"Supplemental Type": None,
"Restricted Value": None,
"Example Value": 'a string such as "hx-edge-esxi". The string value can be no longer than 60 characters.',
"Max Length": 60,
"Required Character": None,
},
{"Name": "esxi_mgmt_ip_range_start_address",
"Value": esxi_mgmt_ip_range_start_address,
"Configuration Section": "MODULE REQUIREMENT 3 --> HyperFlex Node Configuration Policy Settings",
"Expected Type": str,
"Supplemental Type": "ip_string",
"Restricted Value": None,
"Example Value": 'a string such as "198.18.135.101".',
"Max Length": 15,
"Required Character": ".",
},
{"Name": "esxi_mgmt_ip_range_end_address",
"Value": esxi_mgmt_ip_range_end_address,
"Configuration Section": "MODULE REQUIREMENT 3 --> HyperFlex Node Configuration Policy Settings",
"Expected Type": str,
"Supplemental Type": "ip_string",
"Restricted Value": None,
"Example Value": 'a string such as "198.18.135.103".',
"Max Length": 15,
"Required Character": ".",
},
{"Name": "esxi_mgmt_ip_range_subnet_mask",
"Value": esxi_mgmt_ip_range_subnet_mask,
"Configuration Section": "MODULE REQUIREMENT 3 --> HyperFlex Node Configuration Policy Settings",
"Expected Type": str,
"Supplemental Type": "ip_string",
"Restricted Value": None,
"Example Value": 'a string such as "255.255.192.0".',
"Max Length": 15,
"Required Character": ".",
},
{"Name": "esxi_mgmt_ip_range_gateway",
"Value": esxi_mgmt_ip_range_gateway,
"Configuration Section": "MODULE REQUIREMENT 3 --> HyperFlex Node Configuration Policy Settings",
"Expected Type": str,
"Supplemental Type": "ip_string",
"Restricted Value": None,
"Example Value": 'a string such as "198.18.128.1".',
"Max Length": 15,
"Required Character": ".",
},
{"Name": "storage_controller_vm_ip_range_start_address",
"Value": storage_controller_vm_ip_range_start_address,
"Configuration Section": "MODULE REQUIREMENT 3 --> HyperFlex Node Configuration Policy Settings",
"Expected Type": str,
"Supplemental Type": "ip_string",
"Restricted Value": None,
"Example Value": 'a string such as "198.18.135.104".',
"Max Length": 15,
"Required Character": ".",
},
{"Name": "storage_controller_vm_ip_range_end_address",
"Value": storage_controller_vm_ip_range_end_address,
"Configuration Section": "MODULE REQUIREMENT 3 --> HyperFlex Node Configuration Policy Settings",
"Expected Type": str,
"Supplemental Type": "ip_string",
"Restricted Value": None,
"Example Value": 'a string such as "198.18.135.106".',
"Max Length": 15,
"Required Character": ".",
},
{"Name": "storage_controller_vm_ip_range_subnet_mask",
"Value": storage_controller_vm_ip_range_subnet_mask,
"Configuration Section": "MODULE REQUIREMENT 3 --> HyperFlex Node Configuration Policy Settings",
"Expected Type": str,
"Supplemental Type": "ip_string",
"Restricted Value": None,
"Example Value": 'a string such as "255.255.192.0".',
"Max Length": 15,
"Required Character": ".",
},
{"Name": "storage_controller_vm_ip_range_gateway",
"Value": storage_controller_vm_ip_range_gateway,
"Configuration Section": "MODULE REQUIREMENT 3 --> HyperFlex Node Configuration Policy Settings",
"Expected Type": str,
"Supplemental Type": "ip_string",
"Restricted Value": None,
"Example Value": 'a string such as "198.18.128.1".',
"Max Length": 15,
"Required Character": ".",
},
{"Name": "hx_node_uplink_speed",
"Value": hx_node_uplink_speed,
"Configuration Section": "MODULE REQUIREMENT 3 --> HyperFlex Cluster Network Configuration Policy Settings",
"Expected Type": str,
"Supplemental Type": None,
"Restricted Value": ("1G", "10G"),
"Example Value": 'either the string "1G" or "10G" only. The option "10G" will also support and enable higher speeds.',
"Max Length": 3,
"Required Character": None,
},
{"Name": "hx_mac_prefix_start_address",
"Value": hx_mac_prefix_start_address,
"Configuration Section": "MODULE REQUIREMENT 3 --> HyperFlex Cluster Network Configuration Policy Settings",
"Expected Type": str,
"Supplemental Type": None,
"Restricted Value": None,
"Example Value": 'a string such as "00". This value is only used with 10G+ uplink speeds. The MAC Address OUI (Organizationally Unique Identifier) of 00:25:B5 is hard-coded.',
"Max Length": 2,
"Required Character": None,
},
{"Name": "hx_mac_prefix_end_address",
"Value": hx_mac_prefix_end_address,
"Configuration Section": "MODULE REQUIREMENT 3 --> HyperFlex Cluster Network Configuration Policy Settings",
"Expected Type": str,
"Supplemental Type": None,
"Restricted Value": None,
"Example Value": 'a string such as "00". This value is only used with 10G+ uplink speeds. The MAC Address OUI (Organizationally Unique Identifier) of 00:25:B5 is hard-coded.',
"Max Length": 2,
"Required Character": None,
},
{"Name": "hx_mgmt_vlan_id",
"Value": hx_mgmt_vlan_id,
"Configuration Section": "MODULE REQUIREMENT 3 --> HyperFlex Cluster Network Configuration Policy Settings",
"Expected Type": int,
"Supplemental Type": None,
"Restricted Value": range(0,4096),
"Example Value": 'an integer from 0 - 4095',
"Max Length": None,
"Required Character": None,
},
{"Name": "enable_hx_node_uplink_jumbo_frames",
"Value": enable_hx_node_uplink_jumbo_frames,
"Configuration Section": "MODULE REQUIREMENT 3 --> HyperFlex Cluster Network Configuration Policy Settings",
"Expected Type": bool,
"Supplemental Type": None,
"Restricted Value": (True, False),
"Example Value": 'a Boolean of True or False.',
"Max Length": None,
"Required Character": None,
},
{"Name": "hx_storage_vlan_id",
"Value": hx_storage_vlan_id,
"Configuration Section": "MODULE REQUIREMENT 3 --> HyperFlex Storage Network Setting",
"Expected Type": int,
"Supplemental Type": None,
"Restricted Value": range(1,4096),
"Example Value": 'an integer from 1 - 4095',
"Max Length": None,
"Required Character": None,
},
{"Name": "hx_connect_mgmt_ip_address",
"Value": hx_connect_mgmt_ip_address,
"Configuration Section": "MODULE REQUIREMENT 3 --> HyperFlex Cluster Management IP Address and MAC Prefix Address Settings",
"Expected Type": str,
"Supplemental Type": "ip_string",
"Restricted Value": None,
"Example Value": 'a string such as "198.18.135.100".',
"Max Length": 15,
"Required Character": ".",
},
{"Name": "hx_mac_prefix_address",
"Value": hx_mac_prefix_address,
"Configuration Section": "MODULE REQUIREMENT 3 --> HyperFlex Cluster Management IP Address and MAC Prefix Address Settings",
"Expected Type": str,
"Supplemental Type": None,
"Restricted Value": None,
"Example Value": 'a string such as "00". This value is only used with 10G+ uplink speeds. The MAC Address OUI (Organizationally Unique Identifier) of 00:25:B5 is hard-coded.',
"Max Length": 2,
"Required Character": None,
},
)
# Begin performing a preliminary check of the provided variable values
print("Performing a preliminary check of the provided variable values...\n")
for variable in variable_dictionary:
# Verify the provided | |
import torch
import torch.nn as nn
import torch.nn.functional as F
from .dsnt import spatial_softmax2d, spatial_expectation2d
from cirtorch.utils.grid import create_meshgrid, create_meshgrid3d
from ..conversions import normalize_pixel_coordinates, normalize_pixel_coordinates3d
from cirtorch.features.nms import nms3d
from cirtorch.filters.sobel import spatial_gradient3d
def _get_window_grid_kernel2d(h, w, device=torch.device('cpu')):
"""
Helper function, which generates a kernel to with window coordinates, residual to window center.
"""
window_grid2d = create_meshgrid(h, w, False, device=device)
window_grid2d = normalize_pixel_coordinates(window_grid2d, h, w)
conv_kernel = window_grid2d.permute(3, 0, 1, 2)
return conv_kernel
def _get_center_kernel2d(h, w, device=torch.device('cpu')):
"""
Helper function, which generates a kernel to return center coordinates,
when applied with F.conv2d to 2d coordinates grid.
"""
center_kernel = torch.zeros(2, 2, h, w, device=device)
# If the size is odd, we have one pixel for center, if even - 2
if h % 2 != 0:
h_i1 = h // 2
h_i2 = (h // 2) + 1
else:
h_i1 = (h // 2) - 1
h_i2 = (h // 2) + 1
if w % 2 != 0:
w_i1 = w // 2
w_i2 = (w // 2) + 1
else:
w_i1 = (w // 2) - 1
w_i2 = (w // 2) + 1
center_kernel[(0, 1), (0, 1), h_i1: h_i2, w_i1: w_i2] = 1.0 / float(((h_i2 - h_i1) * (w_i2 - w_i1)))
return center_kernel
def _get_center_kernel3d(d, h, w, device=torch.device('cpu')):
"""
Helper function, which generates a kernel to return center coordinates,
when applied with F.conv2d to 3d coordinates grid.
"""
center_kernel = torch.zeros(3, 3, d, h, w, device=device)
# If the size is odd, we have one pixel for center, if even - 2
if h % 2 != 0:
h_i1 = h // 2
h_i2 = (h // 2) + 1
else:
h_i1 = (h // 2) - 1
h_i2 = (h // 2) + 1
if w % 2 != 0:
w_i1 = w // 2
w_i2 = (w // 2) + 1
else:
w_i1 = (w // 2) - 1
w_i2 = (w // 2) + 1
if d % 2 != 0:
d_i1 = d // 2
d_i2 = (d // 2) + 1
else:
d_i1 = (d // 2) - 1
d_i2 = (d // 2) + 1
center_num = float((h_i2 - h_i1) * (w_i2 - w_i1) * (d_i2 - d_i1))
center_kernel[(0, 1, 2), (0, 1, 2), d_i1: d_i2, h_i1: h_i2, w_i1: w_i2] = 1.0 / center_num
return center_kernel
def _get_window_grid_kernel3d(d, h, w, device=torch.device('cpu')):
"""
Helper function, which generates a kernel to return coordinates,
residual to window center.
"""
grid2d = create_meshgrid(h, w, True, device=device)
if d > 1:
z = torch.linspace(-1, 1, d, device=device).view(d, 1, 1, 1)
else:
z = torch.zeros(1, 1, 1, 1, device=device)
grid3d = torch.cat([z.repeat(1, h, w, 1).contiguous(), grid2d.repeat(d, 1, 1, 1)], dim=3)
conv_kernel = grid3d.permute(3, 0, 1, 2).unsqueeze(1)
return conv_kernel
class ConvSoftArgmax2d(nn.Module):
"""
Module that calculates soft argmax 2d per window.
`geometry.subpix.conv_soft_argmax2d` for details.
"""
def __init__(self, kernel_size = (3, 3), stride = (1, 1), padding = (1, 1),
temperature = torch.tensor(1.0), normalized_coordinates = True,
eps = 1e-8, output_value = False):
super(ConvSoftArgmax2d, self).__init__()
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.temperature = temperature
self.normalized_coordinates = normalized_coordinates
self.eps = eps
self.output_value = output_value
def forward(self, x):
return conv_soft_argmax2d(x,
self.kernel_size,
self.stride,
self.padding,
self.temperature,
self.normalized_coordinates,
self.eps,
self.output_value)
class ConvSoftArgmax3d(nn.Module):
"""
Module that calculates soft argmax 3d per window.
See `geometry.subpix.conv_soft_argmax3d` for details.
"""
def __init__(self, kernel_size = (3, 3, 3), stride = (1, 1, 1), padding = (1, 1, 1),
temperature = torch.tensor(1.0), normalized_coordinates = False,
eps = 1e-8, output_value = True, strict_maxima_bonus = 0.0):
super(ConvSoftArgmax3d, self).__init__()
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.temperature = temperature
self.normalized_coordinates = normalized_coordinates
self.eps = eps
self.output_value = output_value
self.strict_maxima_bonus = strict_maxima_bonus
def forward(self, x):
return conv_soft_argmax3d(x,
self.kernel_size,
self.stride,
self.padding,
self.temperature,
self.normalized_coordinates,
self.eps,
self.output_value,
self.strict_maxima_bonus)
def conv_soft_argmax2d(input, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), temperature=torch.tensor(1.0),
normalized_coordinates=True, eps=1e-8, output_value=False):
"""
Function that computes the convolutional spatial Soft-Argmax 2D over the windows
of a given input heatmap. Function has two outputs: argmax coordinates and the softmaxpooled heatmap values
themselves. On each window, the function computed is
"""
if not len(input.shape) == 4:
raise ValueError("Invalid input shape, we expect BxCxHxW. Got: {}"
.format(input.shape))
if temperature <= 0:
raise ValueError("Temperature should be positive float or tensor. Got: {}"
.format(temperature))
b, c, h, w = input.shape
kx, ky = kernel_size
device = input.device
dtype = input.dtype
input = input.view(b * c, 1, h, w)
center_kernel = _get_center_kernel2d(kx, ky, device).to(dtype)
window_kernel = _get_window_grid_kernel2d(kx, ky, device).to(dtype)
# applies exponential normalization trick
x_max = F.adaptive_max_pool2d(input, (1, 1))
# max is detached to prevent undesired backprop loops in the graph
x_exp = ((input - x_max.detach()) / temperature).exp()
# Not available yet in version 1.0, so let's do manually
pool_coef = float(kx * ky)
# softmax denominator
den = pool_coef * F.avg_pool2d(x_exp, kernel_size, stride=stride, padding=padding) + eps
x_softmaxpool = pool_coef * F.avg_pool2d(x_exp * input, kernel_size, stride=stride, padding=padding) / den
x_softmaxpool = x_softmaxpool.view(b, c, x_softmaxpool.size(2), x_softmaxpool.size(3))
# We need to output also coordinates
# Pooled window center coordinates
grid_global = create_meshgrid(h, w, False, device).to(dtype).permute(0, 3, 1, 2)
grid_global_pooled = F.conv2d(grid_global, center_kernel, stride=stride, padding=padding)
# Coordinates of maxima residual to window center
# prepare kernel
coords_max = F.conv2d(x_exp, window_kernel, stride=stride, padding=padding)
coords_max = coords_max / den.expand_as(coords_max)
coords_max = coords_max + grid_global_pooled.expand_as(coords_max)
if normalized_coordinates:
coords_max = normalize_pixel_coordinates(coords_max.permute(0, 2, 3, 1), h, w)
coords_max = coords_max.permute(0, 3, 1, 2)
# Back B*C -> (b, c)
coords_max = coords_max.view(b, c, 2, coords_max.size(2), coords_max.size(3))
if output_value:
return coords_max, x_softmaxpool
return coords_max
def conv_soft_argmax3d(input, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1),
temperature=torch.tensor(1.0), normalized_coordinates=False, eps=1e-8,
output_value=True, strict_maxima_bonus=0.0):
"""
Function that computes the convolutional spatial Soft-Argmax 3D over the windows
of a given input heatmap. Function has two outputs: argmax coordinates and the softmaxpooled heatmap values
themselves. On each window, the function computed is:
"""
if not len(input.shape) == 5:
raise ValueError("Invalid input shape, we expect BxCxDxHxW. Got: {}"
.format(input.shape))
if temperature <= 0:
raise ValueError("Temperature should be positive float or tensor. Got: {}"
.format(temperature))
b, c, d, h, w = input.shape
kx, ky, kz = kernel_size
device = input.device
dtype = input.dtype
input = input.view(b * c, 1, d, h, w)
center_kernel = _get_center_kernel3d(kx, ky, kz, device).to(dtype)
window_kernel = _get_window_grid_kernel3d(kx, ky, kz, device).to(dtype)
# applies exponential normalization trick
x_max = F.adaptive_max_pool3d(input, (1, 1, 1))
# max is detached to prevent undesired backprop loops in the graph
x_exp = ((input - x_max.detach()) / temperature).exp()
pool_coef = float(kx * ky * kz)
# softmax denominator
den = pool_coef * F.avg_pool3d(x_exp.view_as(input), kernel_size, stride=stride, padding=padding) + eps
# We need to output also coordinates
# Pooled window center coordinates
grid_global = create_meshgrid3d(d, h, w, False, device=device).to(dtype).permute(0, 4, 1, 2, 3)
grid_global_pooled = F.conv3d(grid_global, center_kernel, stride=stride, padding=padding)
# Coordinates of maxima residual to window center
# prepare kernel
coords_max = F.conv3d(x_exp, window_kernel, stride=stride, padding=padding)
coords_max = coords_max / den.expand_as(coords_max)
coords_max = coords_max + grid_global_pooled.expand_as(coords_max)
if normalized_coordinates:
coords_max = normalize_pixel_coordinates3d(coords_max.permute(0, 2, 3, 4, 1), d, h, w)
coords_max = coords_max.permute(0, 4, 1, 2, 3)
# Back B*C -> (b, c)
coords_max = coords_max.view(b, c, 3, coords_max.size(2), coords_max.size(3), coords_max.size(4))
if not output_value:
return coords_max
x_softmaxpool = pool_coef * F.avg_pool3d(x_exp.view(input.size()) * input, kernel_size,
stride=stride, padding=padding) / den
if strict_maxima_bonus > 0:
in_levels = input.size(2)
out_levels = x_softmaxpool.size(2)
skip_levels = (in_levels - out_levels) // 2
strict_maxima= F.avg_pool3d(nms3d(input, kernel_size), 1, stride, 0)
strict_maxima = strict_maxima[:, :, skip_levels:out_levels - skip_levels]
x_softmaxpool *= 1.0 + strict_maxima_bonus * strict_maxima
x_softmaxpool = x_softmaxpool.view(b, c, x_softmaxpool.size(2), x_softmaxpool.size(3), x_softmaxpool.size(4))
return coords_max, x_softmaxpool
def spatial_soft_argmax2d(input, temperature=torch.tensor(1.0), normalized_coordinates=True, eps=1e-8):
"""
Function that computes the Spatial Soft-Argmax 2D of a given input heatmap.
Returns the index of the maximum 2d coordinates of the give map.
The output order is x-coord and y-coord.
"""
input_soft = spatial_softmax2d(input, temperature)
output = spatial_expectation2d(input_soft, normalized_coordinates)
return output
class SpatialSoftArgmax2d(nn.Module):
"""
Module that computes the Spatial Soft-Argmax 2D of a given heatmap.
See :func:`geometry.subpix.spatial_soft_argmax2d` for details.
"""
def __init__(self, temperature=torch.tensor(1.0), normalized_coordinates=True, eps=1e-8):
super(SpatialSoftArgmax2d, self).__init__()
self.temperature = temperature
self.normalized_coordinates = normalized_coordinates
| |
# SPDX-FileCopyrightText: 2006-2010 <NAME> for Adafruit Industries
# SPDX-FileCopyrightText: 2019 LadyAda for Adafruit Industries
# SPDX-FileCopyrightText: 2021 <NAME> for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`adafruit_turtle`
================================================================================
* Originals Author(s): LadyAda and <NAME>
Implementation Notes
--------------------
**Hardware:**
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
* Adafruit's Bus Device library:
https://github.com/adafruit/Adafruit_CircuitPython_BusDevice
"""
# pylint:disable=too-many-public-methods, too-many-instance-attributes, invalid-name
# pylint:disable=too-few-public-methods, too-many-lines, too-many-arguments
import gc
import math
import time
import board
import displayio
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_turtle.git"
class Color:
"""Standard colors"""
WHITE = 0xFFFFFF
BLACK = 0x000000
RED = 0xFF0000
ORANGE = 0xFFA500
YELLOW = 0xFFEE00
GREEN = 0x00C000
BLUE = 0x0000FF
PURPLE = 0x8040C0
PINK = 0xFF40C0
LIGHT_GRAY = 0xAAAAAA
GRAY = 0x444444
BROWN = 0xCA801D
DARK_GREEN = 0x008700
TURQUOISE = 0x00C0C0
DARK_BLUE = 0x0000AA
DARK_RED = 0x800000
colors = (
BLACK,
WHITE,
RED,
YELLOW,
GREEN,
ORANGE,
BLUE,
PURPLE,
PINK,
GRAY,
LIGHT_GRAY,
BROWN,
DARK_GREEN,
TURQUOISE,
DARK_BLUE,
DARK_RED,
)
def __init__(self):
pass
class Vec2D(tuple):
"""A 2 dimensional vector class, used as a helper class
for implementing turtle graphics.
May be useful for turtle graphics programs also.
Derived from tuple, so a vector is a tuple!
"""
# Provides (for a, b vectors, k number):
# a+b vector addition
# a-b vector subtraction
# a*b inner product
# k*a and a*k multiplication with scalar
# |a| absolute value of a
# a.rotate(angle) rotation
def __init__(self, x, y):
super().__init__((x, y))
def __add__(self, other):
return Vec2D(self[0] + other[0], self[1] + other[1])
def __mul__(self, other):
if isinstance(other, Vec2D):
return self[0] * other[0] + self[1] * other[1]
return Vec2D(self[0] * other, self[1] * other)
def __rmul__(self, other):
if isinstance(other, (float, int)):
return Vec2D(self[0] * other, self[1] * other)
return None
def __sub__(self, other):
return Vec2D(self[0] - other[0], self[1] - other[1])
def __neg__(self):
return Vec2D(-self[0], -self[1])
def __abs__(self):
return (self[0] ** 2 + self[1] ** 2) ** 0.5
def rotate(self, angle):
"""Rotate self counterclockwise by angle.
:param angle: how much to rotate
"""
perp = Vec2D(-self[1], self[0])
angle = angle * math.pi / 180.0
c, s = math.cos(angle), math.sin(angle)
return Vec2D(self[0] * c + perp[0] * s, self[1] * c + perp[1] * s)
def __getnewargs__(self):
return (self[0], self[1])
def __repr__(self):
return "({:.2f},{:.2f})".format(self[0], self[1])
class turtle:
"""A Turtle that can be given commands to draw."""
# pylint:disable=too-many-statements
def __init__(self, display=None, scale=1):
if display:
self._display = display
else:
try:
self._display = board.DISPLAY
except AttributeError as err:
raise RuntimeError(
"No display available. One must be provided."
) from err
self._w = self._display.width
self._h = self._display.height
self._x = self._w // (2 * scale)
self._y = self._h // (2 * scale)
self._speed = 6
self._heading = 0
self._logomode = True
self._fullcircle = 360.0
self._degreesPerAU = 1.0
self._angleOrient = 1
self._angleOffset = 0
self._bg_color = 0
self._splash = displayio.Group()
self._bgscale = 1
if self._w == self._h:
i = 1
while self._bgscale == 1:
if self._w / i < 128:
self._bg_bitmap = displayio.Bitmap(i, i, 1)
self._bgscale = self._w // i
i += 1
else:
self._bgscale = self._GCD(self._w, self._h)
self._bg_bitmap = displayio.Bitmap(
self._w // self._bgscale, self._h // self._bgscale, 1
)
self._bg_palette = displayio.Palette(1)
self._bg_palette[0] = Color.colors[self._bg_color]
self._bg_sprite = displayio.TileGrid(
self._bg_bitmap, pixel_shader=self._bg_palette, x=0, y=0
)
self._bg_group = displayio.Group(scale=self._bgscale)
self._bg_group.append(self._bg_sprite)
self._splash.append(self._bg_group)
# group to add background pictures (and/or user-defined stuff)
self._bg_addon_group = displayio.Group()
self._splash.append(self._bg_addon_group)
self._fg_scale = scale
self._w = self._w // self._fg_scale
self._h = self._h // self._fg_scale
self._fg_bitmap = displayio.Bitmap(self._w, self._h, len(Color.colors))
self._fg_palette = displayio.Palette(len(Color.colors))
self._fg_palette.make_transparent(self._bg_color)
for i, c in enumerate(Color.colors):
self._fg_palette[i] = c
self._fg_sprite = displayio.TileGrid(
self._fg_bitmap, pixel_shader=self._fg_palette, x=0, y=0
)
self._fg_group = displayio.Group(scale=self._fg_scale)
self._fg_group.append(self._fg_sprite)
self._splash.append(self._fg_group)
# group to add text and/or user defined stuff
self._fg_addon_group = displayio.Group()
self._splash.append(self._fg_addon_group)
self._turtle_bitmap = displayio.Bitmap(9, 9, 2)
self._turtle_palette = displayio.Palette(2)
self._turtle_palette.make_transparent(0)
self._turtle_palette[1] = Color.WHITE
for i in range(4):
self._turtle_bitmap[4 - i, i] = 1
self._turtle_bitmap[i, 4 + i] = 1
self._turtle_bitmap[4 + i, 7 - i] = 1
self._turtle_bitmap[4 + i, i] = 1
self._turtle_sprite = displayio.TileGrid(
self._turtle_bitmap, pixel_shader=self._turtle_palette, x=-100, y=-100
)
self._turtle_group = displayio.Group(scale=self._fg_scale)
self._turtle_group.append(self._turtle_sprite)
self._splash.append(self._turtle_group)
self._penstate = False
self._pensize = 1
self._pencolor = 1
self.pencolor(Color.WHITE)
self._bg_pic = None
self._bg_pic_filename = ""
self._turtle_pic = None
self._turtle_odb = None
self._turtle_alt_sprite = None
self._drawturtle()
self._stamps = {}
self._turtle_odb_use = 0
self._turtle_odb_file = None
self._odb_tilegrid = None
gc.collect()
self._display.show(self._splash)
# pylint:enable=too-many-statements
def _drawturtle(self):
if self._turtle_pic is None:
self._turtle_sprite.x = int(self._x - 4)
self._turtle_sprite.y = int(self._y - 4)
else:
if self._turtle_odb is not None:
self._turtle_alt_sprite.x = int(self._x - self._turtle_odb.width // 2)
self._turtle_alt_sprite.y = int(self._y - self._turtle_odb.height // 2)
else:
self._turtle_alt_sprite.x = int(self._x - self._turtle_pic[0] // 2)
self._turtle_alt_sprite.y = int(self._y - self._turtle_pic[1] // 2)
###########################################################################
# Move and draw
def forward(self, distance):
"""Move the turtle forward by the specified distance, in the direction the turtle is headed.
:param distance: how far to move (integer or float)
"""
p = self.pos()
angle = (
self._angleOffset + self._angleOrient * self._heading
) % self._fullcircle
x1 = p[0] + math.sin(math.radians(angle)) * distance
y1 = p[1] + math.cos(math.radians(angle)) * distance
self.goto(x1, y1)
fd = forward
def backward(self, distance):
"""Move the turtle backward by distance, opposite to the direction the turtle is headed.
Does not change the turtle's heading.
:param distance: how far to move (integer or float)
"""
self.forward(-distance)
bk = backward
back = backward
def right(self, angle):
"""Turn turtle right by angle units. (Units are by default degrees,
but can be set via the degrees() and radians() functions.)
Angle orientation depends on the turtle mode, see mode().
:param angle: how much to rotate to the right (integer or float)
"""
if self._logomode:
self._turn(angle)
else:
self._turn(-angle)
rt = right
def left(self, angle):
"""Turn turtle left by angle units. (Units are by default degrees,
but can be set via the degrees() and radians() functions.)
Angle orientation depends on the turtle mode, see mode().
:param angle: how much to rotate to the left (integer or float)
"""
if self._logomode:
self._turn(-angle)
else:
self._turn(angle)
lt = left
# pylint:disable=too-many-branches,too-many-statements
def goto(self, x1, y1=None):
"""If y1 is None, x1 must be a pair of coordinates or an (x, y) tuple
Move turtle to an absolute position. If the pen is down, draw line.
Does not change the turtle's orientation.
:param x1: a number or a pair of numbers
:param y1: a number or None
"""
if y1 is None:
y1 = x1[1]
x1 = x1[0]
x1 += self._w // 2
y1 = self._h // 2 - y1
x0 = self._x
y0 = self._y
if not self.isdown():
self._x = x1 # woot, we just skip ahead
self._y = y1
self._drawturtle()
return
steep = abs(y1 - y0) > abs(x1 - x0)
rev = False
dx = x1 - x0
if steep:
x0, y0 = y0, x0
x1, y1 = y1, x1
dx = x1 - x0
if x0 > x1:
rev = True
dx = x0 - x1
dy = abs(y1 - y0)
err = dx / 2
ystep = -1
if y0 < y1:
ystep = 1
step = 1
if self._speed > 0:
ts = ((11 - self._speed) * 0.00020) * (self._speed + 0.5)
else:
ts = 0
while (not rev and x0 <= x1) or (rev and x1 <= x0):
if steep:
try:
self._plot(int(y0), int(x0), self._pencolor)
except IndexError:
pass
self._x = y0
self._y = x0
else:
try:
self._plot(int(x0), int(y0), self._pencolor)
except IndexError:
pass
self._x = x0
self._y = y0
if self._speed > 0:
if step >= self._speed:
# mark the step
step = 1
self._drawturtle()
time.sleep(ts)
else:
step += 1
err -= dy
if err < 0:
y0 += ystep
err += dx
if rev:
x0 -= 1
else:
x0 += 1
self._drawturtle()
setpos = goto
setposition = goto
# pylint:enable=too-many-branches,too-many-statements
def setx(self, x):
"""Set the turtle's first coordinate to x, leave second coordinate
unchanged.
:param x: new value of the turtle's x coordinate (a number)
"""
self.goto(x, self.pos()[1])
def sety(self, y):
"""Set the turtle's second coordinate to y, leave first coordinate
unchanged.
:param y: new value of the turtle's y coordinate (a number)
"""
self.goto(self.pos()[0], y)
def setheading(self, to_angle):
"""Set the orientation of the turtle to to_angle. Here are some common
directions | |
import time
import os
from threading import Lock, Thread
import copy as cp
import json
from config.config import config
from bots.botIRC import BotIRC
from bots.botChess import BotChess
from lib.misc import print_debug
class BotHandler:
""" Class to handle Bots """
# json file used by OBS to update its scenes
PATH_OBS_JSON = "./obs/info.json"
# Interval (seconds) to change URL in OBS json and then change back to the
# one before. This is needed because OBS does not refresh
# page after some time
REFRESH_URL_INTERVAL = 1800 # 30 minutes
# COMMANDS MUST START WITH '!'
MSG_COMMANDS = ["!resign", "!challenge"]
def __init__(self):
""" BotHandler constructor """
# Bots configurations
self.config = config
# Create BotChess object
self.bot_chess = BotChess(config["lichess"], self)
# Create BotIRC object
self.bot_irc = BotIRC(config["twitch"])
# Current game ids
self.game_ids = []
self.lock_game_ids = Lock()
# Users that already voted in certain games
self.users_already_voted = {}
self.lock_users_already_voted = Lock()
def run(self):
""" Run BotHandler (start program) """
# Start game_id checking thread
self.thread_games = Thread(target=self.thread_update_game_ids, daemon=True)
self.thread_games.start()
# Start OBS thread to update wins, draws and losses
self.thread_obs_wdl = Thread(target=self.thread_obs_update_WDL, daemon=True)
self.thread_obs_wdl.start()
# Start OBS thread to update URL
self.thread_obs_url = Thread(target=self.thread_obs_update_URL, daemon=True)
self.thread_obs_url.start()
# Start Twitch thread
self.thread_twitch = Thread(target=self.thread_twitch_chat, daemon=True)
self.thread_twitch.start()
# Keeps running, because all threads are daemon
while True:
time.sleep(10)
def thread_update_game_ids(self):
""" Thread to update current games IDs """
while True:
time.sleep(0.2)
with self.lock_game_ids:
self.game_ids = self.bot_chess.get_ongoing_game_ids()
def thread_twitch_chat(self):
""" Thread to listen messages in Twitch chat and treat them """
while True:
time.sleep(0.2)
# Check for new messages
new_messages = self.bot_irc.recv_messages(1024)
# If there's no messages, continues
if new_messages is None:
continue
for message in new_messages:
print_debug(f"Message: {message}", "DEBUG")
# Tries to get command from message
command = self.get_command_from_msg(message["message"])
if command is not None:
self.treat_command(command, message)
continue
# Tries to get move from the message
move = self.bot_chess.get_move_from_msg(message["message"])
if move is not None:
self.treat_move_msg(move, message)
def thread_obs_update_WDL(self):
""" Thread to update wins, draws and losses in OBS json """
last_json = self.get_obs_info_json()
while True:
time.sleep(5)
# Updates wins, draws and losses at the beginning
acc_info = self.bot_chess.get_account_info()
if acc_info is not None:
# Gets wins, draws and losses
wins, draws, losses = (
acc_info["count"]["win"],
acc_info["count"]["draw"],
acc_info["count"]["loss"],
)
if (
wins != last_json["wins"]
or draws != last_json["draws"]
or losses != last_json["losses"]
):
# Updates local json
self.update_obs_json_WDL(wins, draws, losses)
last_json = self.get_obs_info_json()
def thread_obs_update_URL(self):
""" Thread to update OBS json file """
last_game_id = self.get_game_id_from_url(self.get_obs_info_json()["url"])
refresh_time = time.time()
color = "white"
while True:
time.sleep(0.5)
# If refresh time has passed, updated URL, wait some time and then
# go back to the page
if time.time() - refresh_time >= BotHandler.REFRESH_URL_INTERVAL:
# Updates URL to user page
self.update_obs_json_url(last_game_id)
time.sleep(3)
# Updated URL back to game_id
self.update_obs_json_url(last_game_id + "/" + color)
refresh_time = time.time()
# Get current ongoing games
games_ids = self.get_game_ids()
# Update URL that OBS is reading from
if len(games_ids) > 0:
# Gets current game ID
game_id = games_ids[0]
# If the game_id has changed, updates OBS json
if game_id != last_game_id:
# Tries to get color in ongoing game
color_aux = self.bot_chess.get_color_in_ongoing_game(game_id)
if color_aux is not None:
color = color_aux
# Updated URL
self.update_obs_json_url(game_id + "/" + color)
# Updates last game ID
last_game_id = game_id
def treat_move_msg(self, move, msg_dict):
""" Treats message with a move
Arguments:
move {str} -- Move string
msg_dict {dict} -- Dictionary with message info
"""
# Get copy of current game ids
cp_game_ids = self.get_game_ids()
if len(cp_game_ids) == 0:
return
# Select game_id
# TODO: more robust way to define game_id
# (needed if there's more than one game)
game_id = cp_game_ids[0]
# If the user has already voted in that game, it does not
# let him vote again
if self.get_has_user_already_voted(game_id, msg_dict["username"]):
print_debug(f"{msg_dict['username']} trying to vote again", "DEBUG")
return
# Votes for move in the game
ret = self.bot_chess.vote_for_move(game_id, move)
if ret:
# Set user as already voted in the game
self.set_user_as_already_voted(game_id, msg_dict["username"])
def treat_command(self, command, msg_dict):
""" Treats command from message
Arguments:
command {dict} -- Dictionary as {"!command_name": command_msg}
msg_dict {dict} -- Dictionary with message info
"""
# Treats !resign command
if "!resign" in command.keys():
# Gets copy of game ids
cp_game_ids = self.get_game_ids()
# If there's no game, don't do nothing
if len(cp_game_ids) == 0:
print_debug("There is no game, unable to resign", "DEBUG")
return
# Select game_id
# TODO: more robust way to define game_id
# (needed if there's more than one game)
game_id = cp_game_ids[0]
ret = self.bot_chess.vote_for_resign(game_id)
if ret:
self.set_user_as_already_voted(game_id, msg_dict["username"])
# TODO: Treatment of !challenge command
if "!challenge" in command.keys():
pass
def reset_users_voted_moves(self, game_id):
""" Reset users that voted in given game
Arguments:
game_id {str} -- Game ID in Lichess
"""
with self.lock_users_already_voted:
if game_id not in self.users_already_voted.keys():
return
self.users_already_voted[game_id] = []
def set_user_as_already_voted(self, game_id, user):
""" Set given user as already voted in given game
Arguments:
game_id {str} -- Game ID in Lichess
user {str} -- User in Twitch
"""
with self.lock_users_already_voted:
# Adds list of users that already voted in game_id
# if it has not been created yet
if game_id not in self.users_already_voted.keys():
self.users_already_voted[game_id] = []
# Appends user to the list of users that already voted in
# game_id, if he is not already in it
if user not in self.users_already_voted[game_id]:
self.users_already_voted[game_id].append(user)
def get_has_user_already_voted(self, game_id, user):
""" Get if given user has already voted in given game
Arguments:
game_id {str} -- Game ID in Lichess
user {str} -- User in Twitch
Returns:
bool -- True if user has already voted, False otherwise
"""
with self.lock_users_already_voted:
# If there's no list of users yet
if game_id not in self.users_already_voted.keys():
return False
# If the user is not in the list of user that
# already voted in game_id
if user not in self.users_already_voted[game_id]:
return False
return True
def update_obs_json_url(self, lichess_route):
""" Upate URL in OBS json to stream given Lichess route
Arguments:
lichess_route {str} -- Route in lichess.org
"""
try:
# Gets OBS json as dictionary
json_info = self.get_obs_info_json()
# Updates URL
url = f"http://www.lichess.org/{lichess_route}"
json_info["url"] = url
# Updates OBS json
with open(BotHandler.PATH_OBS_JSON, "w") as f:
json.dump(json_info, f)
print_debug(f"Wrote {url} to {BotHandler.PATH_OBS_JSON}", "DEBUG")
except Exception as e:
print_debug(
f"Unable to update url in {BotHandler.PATH_OBS_JSON}."
+ f" Exception: {e}"
)
def update_obs_json_WDL(self, wins, draws, losses):
""" Upate wins, draws and losses in OBS json
Arguments:
wins {int} -- Number of wins
draws {int} -- Number of draws
losses {int} -- Number of losses
"""
try:
# Gets OBS json as dictionary
json_info = self.get_obs_info_json()
# Updates wins, draws and losses
json_info["wins"] = wins
json_info["draws"] = draws
json_info["losses"] = losses
# Updates OBS json
with open(BotHandler.PATH_OBS_JSON, "w") as f:
json.dump(json_info, f)
print_debug(f"Updated W-D-L of {BotHandler.PATH_OBS_JSON}", "DEBUG")
except Exception as e:
print_debug(
f"Unable to update WDL in {BotHandler.PATH_OBS_JSON}."
+ f" Exception: {e}"
)
def create_obs_info_json(self):
""" Creates OBS json file """
with open(BotHandler.PATH_OBS_JSON, "w") as f:
# Get last played game ID
last_id = self.bot_chess.get_id_last_game_played()
# Creates OBS json file with URL from last game played
json.dump(
{
"wins": 0,
"losses": 0,
"draws": 0,
"url": "http://www.lichess.org/"
+ (last_id if last_id is not None else ""),
},
f,
)
print_debug(f"Create {BotHandler.PATH_OBS_JSON} as OBS json", "DEBUG")
def get_obs_info_json(self):
""" Gets OBS json as dictionary
Returns:
dict or None -- OBS json information or None in case of error
"""
if not os.path.exists(BotHandler.PATH_OBS_JSON):
print_debug(f"File {BotHandler.PATH_OBS_JSON} does not exists", "DEBUG")
self.create_obs_info_json()
with open(BotHandler.PATH_OBS_JSON, "r") as f:
try:
json_info = json.load(f)
except Exception as e:
print_debug(f"Unable to read OBS json. Excepction: {e}", "DEBUG")
self.create_obs_info_json()
try:
json_info = json.load(f)
except Exception as e2:
print_debug(
f"I give up on reading OBS json. Exception {e2}", "ERROR"
)
return None
return json_info
def get_game_id_from_url(self, url):
""" Get Lichess game ID from given URL
Arguments:
url {str} -- Lichess game URL
Returns:
str -- Game ID
"""
return url.split("/")[-1]
def get_game_ids(self):
""" Get current Lichess games IDs
| |
from pox.core import core
from pox.lib.revent import revent
import pox.openflow.libopenflow_01 as of
import pox.openflow.nicira as nx
from pox.openflow.discovery import Discovery
from pox.lib.util import dpid_to_str
from pox.lib.util import str_to_bool
from pox.lib.addresses import IPAddr
from pox.lib.addresses import EthAddr
import pox.lib.packet as pkt
import pox.openflow.spanning_tree
import asyncore
import mysql.connector
import struct
import asynchat
import socket
import thread
import os
import RouteApp
import threading
import time
import pyinotify
import random
log = core.getLogger()
SNORT_ADDR = "10.0.1.2"
ip2serv_name = {"10.0.0.252" : "http", "10.0.0.1" : "http"}
serv_name2ip = {"http" : ["10.0.0.252", "10.0.0.1"]}
gateway_mac=EthAddr("08:00:27:47:7b:44")
MAXCMD = 100
HIGH = 4
MID = 3
LOWMID = 2
LOW = 1
def start_server(socket_map):
asyncore.loop(map = socket_map)
def start_watch(wm, eh):
notifier = pyinotify.Notifier(wm, eh)
notifier.loop()
class MyEventHandler(pyinotify.ProcessEvent):
log.info("Starting monitor...")
def gen_cmd(self, pathname):
try:
fd = open(pathname, 'r')
commands = fd.readlines(MAXCMD)
fd.close()
return commands
except IOError as e:
log.error("I/O error ({0}): {1}".format(e.errno, e.strerror))
return -1
def func_gen(self, event):
commands = self.gen_cmd(event.pathname)
if not commands == -1:
core.secure.func_gen(event.name, commands)
func_name = event.name
value = func_name.split('_')
if not core.secure.func_table.has_key(value[0]):
core.secure.func_table[value[0]]={}
if not core.secure.func_table[value[0]].has_key(value[1]):
core.secure.func_table[value[0]][value[1]] = {}
if (len(value) == 4):
core.secure.func_table[value[0]][value[1]][(value[2],value[3])] = func_name
else:
core.secure.func_table[value[0]][value[1]]["any"] = func_name
def func_del(self, event):
func_name = "func_" + event.name
try:
funcname = func_name.replace(" ", "_")
core.secure.funclist.remove(func_name)
delattr(core.secure.handlers, funcname)
value = func_name.split('_')
del value[0]
if (len(value) == 4):
del core.secure.func_table[value[0]][value[1]][(value[2],value[3])]
else:
del core.secure.func_table[value[0]][value[1]]["any"]
log.info("handler %s removed, rules updated."%func_name)
except ValueError as e:
log.error('%s is not in the funclist'%func_name)
def process_IN_MOVED_TO(self, event):
log.debug('MOVED_TO event: %s'%event.name)
self.func_gen(event)
def process_IN_MODIFY(self, event):
log.debug('MODIFY event: %s'%event.name)
self.func_del(event)
self.func_gen(event)
def process_IN_DELETE(self, event):
log.debug('DELETE event: %s'%event.name)
self.func_del(event)
def process_IN_MOVED_FROM(self, event):
log.debug('MOVED_FROM event: %s', event.name)
self.func_del(event)
class AlertIn(revent.Event):
def __init__(self, alertmsg):
revent.Event.__init__(self)
self.name = alertmsg[0]
self.priority = alertmsg[1]
self.src = alertmsg[2]
self.dst = alertmsg[3]
self.occation = alertmsg[4]
class Reminder(revent.EventMixin):
_eventMixin_events = set([
AlertIn,
])
def __init__(self):
self.msg = None
def set_msg(self, msg):
self.msg = msg
def alert(self):
self.raiseEvent(AlertIn, self.msg)
class secure_connect(asynchat.async_chat):
def __init__(self, connection, socket_map):
asynchat.async_chat.__init__(self, connection, map = socket_map)
self.buf = []
self.ac_in_buffer_size = 1024
self.set_terminator("@")
def collect_incoming_data(self, data):
self.buf.append(data)
def found_terminator(self):
temp = ("".join(self.buf)).split("\n")
core.Reminder.set_msg(temp)
core.Reminder.alert()
self.buf=[]
self.set_terminator("@")
class secure_server(asyncore.dispatcher):
def __init__(self, socket_map):
self.socket_map = socket_map
asyncore.dispatcher.__init__(self, map = self.socket_map)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.bind(("0.0.0.0",20000))
self.listen(5)
def handle_accept(self):
connection, addr = self.accept()
server_connect = secure_connect(connection, self.socket_map)
class handlers(object):
def __init__(self):
pass
class secure(object):
def start(self):
core.openflow.addListeners(self)
core.openflow_discovery.addListeners(self)
def __init__(self, path):
self.path = path
self.filelist=None
self.counter=0
self.filenum=0
self.cmdlist = ["disconnect", "wait", "reconnect", "pass", "monitor", "reset", "redirect", "unredirect", "passit"]
self.handlers = handlers()
self.funclist = None
self.sig_table= {"BAD-TRAFFIC same SRC/DST":"1",
"ICMP Time-To-Live Exceeded in Transit":"2",
"ICMP Echo Reply":"3",
"ICMP PING BSDtype":"4",
"ICMP PING *NIX":"5",
"ICMP PING":"6",
"SNMP AgentX/tcp request":"7",
"SNMP request tcp":"8"}
self.func_table={}
self.alys_cmd()
self.action_triggered = False
self.name_process()
self.mactable = {}
self.iptable = {}
self.droplist = {}
self.monitorlist = {}
self.redirectlist = {}
self.ignorelist = []
self.socket_map = {}
self.server = secure_server(self.socket_map)
core.Reminder.addListeners(self)
core.addListener(pox.core.GoingUpEvent, self.start_server)
core.call_when_ready(self.start, ["openflow_discovery", "NX"])
core.callDelayed(1, self.start_watch)
def start_server(self, event):
thread.start_new_thread(start_server, (self.socket_map,))
def start_watch(self):
wm = pyinotify.WatchManager()
wm.add_watch(self.path, pyinotify.ALL_EVENTS, rec = True)
eh = MyEventHandler()
thread.start_new_thread(start_watch, (wm, eh))
def func_gen(self, File, cmds):
func_name = "func_" + File
self.funclist.append(func_name)
func_name = func_name.replace(" ", "_")
cmdgenlist = []
for each in cmds:
item = each.split('\n')
action=item[0].split(',')
if action[0]=="time":
action[1]=float(action[1])
func_action = "self."+action[0]+"("+action[1]+")"
elif action[0] in self.cmdlist:
if(len(action) == 1):
func_action = "self." + action[0] + "()"
else:
func_action = "self."+action[0]+"("+action[1]+")"
cmdgenlist.append(func_action)
func_action = ''
function = "def "+func_name+"(self, src, dst):\n"
for command in cmdgenlist:
function = function+" "+command+"\n"
exec function
setattr(self.handlers, func_name, eval(func_name))
log.info("handler %s registered, rules updated."%func_name)
def alys_file(self):
for File in self.filelist:
fd = open(self.path + File,'r')
commands = fd.readlines(MAXCMD)
fd.close()
yield File, commands
def alys_cmd(self):
self.filelist = os.listdir(self.path)
self.funclist = []
self.filenum = len(self.filelist)
filegen = self.alys_file()
while self.counter < self.filenum:
File,commands = filegen.next()
self.func_gen(File, commands)
self.counter += 1
def passit(self):
self.action_triggered = True
def disconnect(self,addr):
self.action_triggered = False
if self.droplist.has_key(addr):
self.droplist[addr] += 1
else:
self.droplist[addr] = 1
if self.droplist[addr] != 1:
return
ipaddr = IPAddr(addr)
msg = of.ofp_flow_mod()
msg.priority = MID
if self.iptable.has_key(ipaddr) and self.iptable[ipaddr] != gateway_mac:
#Forbid inside machine from sending packets
host_mac = self.iptable[ipaddr]
switchid = self.mactable[host_mac][0]
msg.match.dl_type = 0x0800
msg.match.dl_src = host_mac
msg.actions.append(of.ofp_action_output(port = of.OFPP_NONE))
else:
switchid = self.mactable[gateway_mac][0]
msg.match.dl_type = 0x0800
msg.match.nw_src = ipaddr
msg.actions.append(of.ofp_action_output(port = of.OFPP_NONE))
switch = core.openflow.getConnection(switchid)
switch.send(msg)
self.action_triggered = True
log.info("%s being disconncted"%addr)
def redirect(self,addr):
self.action_triggered = False
ipaddr = IPAddr(addr)
if not ip2serv_name.has_key(addr):
return
if self.redirectlist.has_key(addr):
self.redirectlist[addr] += 1
else:
self.redirectlist[addr] = 1
if self.redirectlist[addr] == 1:
if self.droplist.has_key(addr):
if ip2serv_name.has_key(addr):
serv_name = ip2serv_name[addr]
if serv_name2ip.has_key(serv_name):
Masterip = serv_name2ip[serv_name][0]
Masteraddr = IPAddr(Masterip)
livelist = [ item for item in serv_name2ip[serv_name] if item not in self.droplist ]
if len(livelist) > 0:
new_ip = random.choice(livelist)
log.info("redirectint for %s to %s \nin the service of %s"%(addr, str(new_ip), serv_name))
new_mac = self.iptable[IPAddr(new_ip)]
msg = of.ofp_flow_mod()
msg.match.dl_dst = self.iptable[Masteraddr]
msg.actions.append(of.ofp_action_dl_addr.set_dst(new_mac))
msg.actions.append(of.ofp_action_nw_addr.set_dst(IPAddr(new_ip)))
msg.priority = HIGH
routelist = RouteApp.get_shortest_route(pox.openflow.spanning_tree._calc_spanning_tree(), self.mactable[gateway_mac][0], self.mactable[new_mac][0])
routelist[-1] = self.mactable[new_mac]
msg.actions.append(of.ofp_action_output(port = routelist[0][1]))
switchid = self.mactable[gateway_mac][0]
switch = core.openflow.getConnection(switchid)
switch.send(msg)
msg = of.ofp_flow_mod()
msg.match.dl_src = self.iptable[IPAddr(new_ip)]
msg.match.dl_dst = gateway_mac
msg.priority = HIGH
#msg.match.nw_proto = pkt.ipv4.TCP_PROTOCO
msg.actions.append(of.ofp_action_dl_addr.set_src(self.iptable[ipaddr]))
msg.actions.append(of.ofp_action_nw_addr.set_src(ipaddr))
msg.actions.append(of.ofp_action_output(port = self.mactable[gateway_mac][1]))
switchid = self.mactable[gateway_mac][0]
switch = core.openflow.getConnection(switchid)
switch.send(msg)
self.action_triggered = True
else:
log.error("no more same service ip to redirect")
else:
log.error("check the service to ip dictionary %s"%serv_name)
else:
log.error("check the ip to service dictionary %s"%addr)
else:
log.error("%s is not in droplist"%addr)
def wait(self,arg):
#if self.action_triggered:
log.info("waiting for %d seconds"%arg)
time.sleep(arg)
def reconnect(self,addr):
self.action_triggered = False
self.droplist[addr] -= 1
if self.droplist[addr] <= 0:
ipaddr = IPAddr(addr)
self.droplist[addr] = 0
log.info("%s being reconnected"%addr)
msg = of.ofp_flow_mod()
msg.command = of.OFPFC_DELETE_STRICT
msg.priority = MID
msg.actions.append(of.ofp_action_output(port = of.OFPP_NONE))
if self.iptable.has_key(ipaddr) and self.iptable[ipaddr] != gateway_mac:
host_mac = self.iptable[ipaddr]
switchid = self.mactable[host_mac][0]
msg.match.dl_type = 0x0800
msg.match.dl_src = host_mac
else:
switchid = self.mactable[gateway_mac][0]
msg.match.dl_type = 0x0800
msg.match.nw_src = ipaddr
switch = core.openflow.getConnection(switchid)
switch.send(msg)
self.action_triggered = True
def monitor(self, addr):
self.action_triggered = False
ipaddr = IPAddr(addr)
if not self.iptable.has_key(ipaddr):
return
if self.iptable[ipaddr] == gateway_mac:
return
if self.monitorlist.has_key(addr):
self.monitorlist[addr] += 1
else:
self.monitorlist[addr] = 1
if self.monitorlist[addr] == 1:
log.info("packet from/to %s mirrored for monitoring"%addr)
#msg = nx.nx_flow_mod()
#msg.table_id = 1
msg = of.ofp_flow_mod()
msg.priority = LOWMID
#msg.match.eth_src = self.iptable[ipaddr]
msg.match.dl_src = self.iptable[ipaddr]
msg.match.dl_type = 0x0800
msg.actions.append(of.ofp_action_dl_addr.set_dst(gateway_mac))
routelist = RouteApp.get_shortest_route(pox.openflow.spanning_tree._calc_spanning_tree(), self.mactable[self.iptable[ipaddr]][0], self.mactable[gateway_mac][0])
routelist[-1] = self.mactable[gateway_mac]
msg.actions.append(of.ofp_action_output(port = routelist[0][1]))
switchid = self.mactable[self.iptable[ipaddr]][0]
switch = core.openflow.getConnection(switchid)
switch.send(msg)
self.action_triggered = True
#delete all flow entries in flowtable 1
def reset(self, addr):
self.action_triggered = False
self.monitorlist[addr] -= 1
if self.monitorlist[addr] > 0:
return
self.monitorlist[addr] = 0
log.info("resetting %s"%addr)
msg = nx.nx_flow_mod()
msg.command = of.OFPFC_DELETE_STRICT
msg.table_id = 1
ipaddr = IPAddr(addr)
host_mac = self.iptable[ipaddr]
msg.match.eth_src = host_mac
switchid = self.mactable[host_mac][0]
switch = core.openflow.getConnection(switchid)
switch.send(msg)
self.action_triggered = True
def unredirect(self, addr):
self.action_triggered = False
self.redirectlist[addr] -= 1
if self.redirectlist[addr] > 0:
return
self.redirectlist[addr] = 0
log.info("unredirecting %s"%addr)
msg = nx.nx_flow_mod()
msg.command = of.OFPFC_DELETE_STRICT
msg.table_id = 1
serv_name = ip2serv_name[addr]
Masterip = serv_name2ip[serv_name]
Masteraddr = IPAddr(Masterip)
host_mac = self.iptable[Masteraddr]
msg.match.eth_dst = host_mac
msg.match.of_ip_src = Masterip
switchid = self.mactable[gateway_mac][0]
switch = core.openflow.getConnection(switchid)
switch.send(msg)
self.action_triggered = True
def name_process(self):
for func_name in self.funclist:
value = func_name.split('_')
del value[0]
if not self.func_table.has_key(value[0]):
self.func_table[value[0]]={}
if not self.func_table[value[0]].has_key(value[1]):
self.func_table[value[0]][value[1]] = {}
if (len(value) == 4):
self.func_table[value[0]][value[1]][(value[2],value[3])] = func_name
else:
self.func_table[value[0]][value[1]]["any"] = func_name
#{priority:{signatrue:{(interval, times):funcname}}}
def occa_process(self, occation, during):
timeArray = time.strptime(occation, "%Y-%m-%d %H:%M:%S")
timeStamp = time.mktime(timeArray)
timeStamp -= float(during)
timeArray = time.localtime(timeStamp)
before = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
return before
def _handle_AlertIn(self, event):
log.info("Alert In.")
sig = event.name
occation = event.occation
priority = event.priority
sip = event.src
dip = event.dst
if self.monitorlist.has_key(sip) and self.monitorlist[sip] > 0 and not sig in self.ignorelist:
log.info("%s is under attack and may have been captured, so disconncet it."%sip)
self.disconnect(sip)
func_name = "func_"
if self.func_table.has_key(priority):
func_name += priority
if self.func_table[priority].has_key(sig):
func_name += "_" + sig
if (len(self.func_table[priority][sig]) == 1) and (self.func_table[priority][sig].keys()[0] == "any"):
func_name += "_any"
else:
timelist = [item for item in self.func_table[priority][sig].keys()]
flag = False
for time in timelist:
before = self.occa_process(occation, time[0])
times = self.sql(before, occation, sip, dip)
log.info("this has happened:%d times"%times)
if times >= int(time[1]):
func_name += "_" + time[0] + "_" + time[1]
flag = True
break
if not flag:
if (self.func_table[priority][sig].has_key("any")):
func_name += "_any"
else:
log.error("No Strategy")
return
elif | |
")").set_color_by_tex_to_color_map(t2c),
TexMobject("\\text{DFT}", "_{\\frac{n}{2}}", "(", "\\boldsymbol{a}", "^{[1]}", ")").set_color_by_tex_to_color_map(t2c),
).arrange_submobjects(RIGHT, buff=2).next_to(old, DOWN)
br = Brace(pre, UP)
old.shift(UP*0.5)
self.play(Write(old))
self.wait()
self.play(ShowCreation(br), FadeInFrom(pre, UP))
self.wait(3)
O = TexMobject("T(n)=2T(\\frac{n}{2})+O(n)", "=", "O(n\\log n)", "<O(n^2)").next_to(pre, DOWN)
self.play(Write(O[0]))
self.wait()
self.play(FadeInFrom(O[1:3], RIGHT))
self.play(
ShowCreationThenDestructionAround(O[2]),
O[2].set_color, YELLOW
)
self.wait()
self.play(FadeInFrom(O[3], RIGHT))
self.wait(3)
class FFT_Code(Scene):
def construct(self):
t2c = {
"\\boldsymbol{a}": GREEN,
"lim": GOLD,
"_0": BLUE,
"_1": BLUE,
"_2": BLUE,
"_3": BLUE,
"_{n-1}": BLUE,
"_{n-2}": BLUE,
"a": GREEN,
"_{k}": BLUE,
"k": BLUE,
"_{k+\\frac{n}{2}}": BLUE,
"^{[0]}": GOLD,
"^{[1]}": GOLD,
"\\omega": RED,
"_n": GOLD,
"^{\\frac{2\\pi}{n}}": BLUE_A,
"2\\pi/n": BLUE_A,
"{i}": BLUE_E,
"n/2-1": GOLD,
}
nums = VGroup(
*[
Text(str(i), font="Consolas").scale(0.45).set_color(GRAY)
for i in range(1, 12)
]
).arrange_submobjects(DOWN, aligned_edge=RIGHT, buff=0.3).shift(LEFT*5)
title = TexMobject("\\text{FFT}", "(", "\\boldsymbol{a}", ",\\ ", "lim", ")")
title.set_color_by_tex_to_color_map(t2c).scale(0.8)
title.next_to(nums, UP, aligned_edge=LEFT)
code = VGroup()
line1 = TexMobject("\\textbf{if}\\ \\ ", "lim", "==", "1", "\\ \\ \\ ", "\\textbf{return}")
line1.scale(0.75).next_to(nums[0], RIGHT).set_color_by_tex_to_color_map(t2c)
code.add(line1)
line2 = TexMobject("\\boldsymbol{a}", "^{[0]}", "=", "(", "a", "_0", ", ", "a", "_2", ", ", "\\cdots", ", ", "a", "_{n-2}", ")")
line2.scale(0.75).next_to(nums[1], RIGHT).set_color_by_tex_to_color_map(t2c)
code.add(line2)
line3 = TexMobject("\\boldsymbol{a}", "^{[1]}", "=", "(", "a", "_1", ", ", "a", "_3", ", ", "\\cdots", ", ", "a", "_{n-1}", ")")
line3.scale(0.75).next_to(nums[2], RIGHT).set_color_by_tex_to_color_map(t2c)
code.add(line3)
line4 = TexMobject("\\text{FFT}", "(", "\\boldsymbol{a}", "^{[0]}", ",\\ ", "lim", ">>", "1", ")")
line4.scale(0.75).next_to(nums[3], RIGHT).set_color_by_tex_to_color_map(t2c)
code.add(line4)
line5 = TexMobject("\\text{FFT}", "(", "\\boldsymbol{a}", "^{[1]}", ",\\ ", "lim", ">>", "1", ")")
line5.scale(0.75).next_to(nums[4], RIGHT).set_color_by_tex_to_color_map(t2c)
code.add(line5)
line6 = TexMobject("\\omega", "_n", "=", "e", "^{\\frac{2\\pi}{n}", "i}", "=", "\\cos", "(", "2\\pi/n", ")", "+",\
"{i}", "\\sin", "(", "2\\pi/n", ")")
line6.scale(0.75).next_to(nums[5], RIGHT).set_color_by_tex_to_color_map(t2c)
line6[4].set_color(BLUE_A); line6[5].set_color(BLUE_E)
code.add(line6)
line7 = TexMobject("\\omega", "=", "1")
line7.scale(0.75).next_to(nums[6], RIGHT).set_color_by_tex_to_color_map(t2c)
code.add(line7)
line8 = TexMobject("\\textbf{for}\\ \\ ", "k", "=", "0", "\\ .\\ .\\ ", "n/2-1")
line8.scale(0.75).next_to(nums[7], RIGHT).set_color_by_tex_to_color_map(t2c)
code.add(line8)
line9 = TexMobject("a", "_k", "=", "a", "^{[0]}", "_k", "+", "\\omega", "a", "^{[1]}", "_k")
line9.scale(0.75).next_to(nums[8], RIGHT, buff=1).set_color_by_tex_to_color_map(t2c)
code.add(line9)
line10 = TexMobject("a", "_{k+\\frac{n}{2}}", "=", "a", "^{[0]}", "_k", "-", "\\omega", "a", "^{[1]}", "_k")
line10.scale(0.75).next_to(nums[9], RIGHT, buff=1).set_color_by_tex_to_color_map(t2c)
code.add(line10)
line11 = TexMobject("\\omega", "=", "\\omega", "\\omega", "_n")
line11.scale(0.75).next_to(nums[10], RIGHT, buff=1).set_color_by_tex_to_color_map(t2c)
code.add(line11)
self.play(FadeIn(title))
self.wait(2)
self.play(FadeInFrom(nums[0], LEFT),)
self.play(Write(code[0]))
self.wait(2)
self.play(FadeInFrom(nums[1:3], LEFT))
self.play(Write(code[1:3]))
self.wait(2)
self.play(FadeInFrom(nums[3:5], LEFT))
self.play(Write(code[3:5]))
self.wait(2)
self.play(FadeInFrom(nums[5], LEFT))
self.play(Write(code[5]))
self.wait(2)
self.play(FadeInFrom(nums[6], LEFT))
self.play(Write(code[6]))
self.wait(2)
self.play(FadeInFrom(nums[7:], LEFT))
self.play(Write(code[7]))
self.wait()
self.play(Write(code[10]))
self.wait(2)
self.play(Write(code[8:10]))
self.wait(3)
sq = Rectangle(height=10, width=20).set_fill(BLACK, 1)
self.add(sq)
self.wait(3)
self.remove(sq)
self.wait(2)
q = VGroup()
q.add(SurroundingRectangle(VGroup(nums[0], code[0])))
q.add(SurroundingRectangle(VGroup(nums[1:3], code[1:3])))
q.add(SurroundingRectangle(VGroup(nums[3:5], code[3:5])))
q.add(SurroundingRectangle(VGroup(nums[5:7], code[5:7])))
q.add(SurroundingRectangle(VGroup(nums[7:], code[7:])))
for m in q:
self.play(ShowCreation(m))
self.play(ScaleInPlace(m, 1.2, rate_func=wiggle))
self.wait(2)
self.play(FadeOut(m))
self.wait(2)
self.wait()
class FFT_improve_part1(Scene):
def construct(self):
t2c = {
"\\boldsymbol{a}": GREEN,
"lim": GOLD,
"_0": BLUE,
"_1": BLUE,
"_2": BLUE,
"_3": BLUE,
"_{n-1}": BLUE,
"_{n-2}": BLUE,
"a": GREEN,
"_{k}": BLUE,
"k": BLUE,
"_{k+\\frac{n}{2}}": BLUE,
"^{[0]}": GOLD,
"^{[1]}": GOLD,
"\\omega": RED,
"_n": GOLD,
"^{\\frac{2\\pi}{n}}": BLUE_A,
"2\\pi/n": BLUE_A,
"{i}": BLUE_E,
"n/2-1": GOLD,
}
title = Text("高效实现FFT", font="Source Han Sans CN", t2c={"高效": YELLOW, "实现FFT": BLUE})
title.scale(0.6).move_to([-4.5, 3.3, 0])
sub = SubTopic("蝴蝶操作").scale(0.8).next_to(title, DOWN, aligned_edge=LEFT)
text = VGroup(
Text("公用子表达式", font="Source Han Serif CN").set_color(ORANGE).scale(0.5),
TexMobject("\\omega", "a", "^{[1]}", "_k").set_color_by_tex_to_color_map(t2c)
).arrange_submobjects(RIGHT, aligned_edge=DOWN).move_to([3.5, 1, 0])
change = TexMobject("t", "=", "\\omega", "a", "^{[1]}", "_k").set_color_by_tex_to_color_map(t2c)
change[0].set_color(BLUE)
change.next_to(text, DOWN)
change1 = TexMobject("a", "_k", "=", "a", "^{[0]}", "_k", "+", "t")
change1.set_color_by_tex_to_color_map(t2c).next_to(change, DOWN)
change1[-1].set_color(BLUE)
change2 = TexMobject("a", "_{k+\\frac{n}{2}}", "=", "a", "^{[0]}", "_k", "-", "t")
change2.set_color_by_tex_to_color_map(t2c).next_to(change1, DOWN)
change2[-1].set_color(BLUE)
self.play(Write(title))
self.wait()
self.play(Write(sub))
self.wait(3)
self.play(FadeInFrom(text, DOWN))
self.wait()
self.play(Write(change))
self.wait()
self.play(FadeIn(change1), FadeIn(change2))
self.wait(2)
class FFT_improve_part2(Scene):
def construct(self):
t2c = {
"_0": BLUE,
"_1": BLUE,
"_2": BLUE,
"_3": BLUE,
"_4": BLUE,
"_5": BLUE,
"_6": BLUE,
"_7": BLUE,
"a": GREEN,
"\\omega": RED,
}
title = Text("高效实现FFT", font="Source Han Sans CN", t2c={"高效": YELLOW, "实现FFT": BLUE})
title.scale(0.6).move_to([-4.5, 3.3, 0])
self.add(title)
sub = SubTopic("迭代实现").scale(0.8).next_to(title, DOWN, aligned_edge=LEFT)
a = lambda i: TexMobject("a", "_"+str(i)).set_color_by_tex_to_color_map(t2c)
com = lambda: TexMobject(",")
tree0 = VGroup(
*[VGroup(a(i), com()).arrange_submobjects(RIGHT, aligned_edge=DOWN) for i in range(7)],
a(7)
).arrange_submobjects(RIGHT, aligned_edge=DOWN).shift(UP*2)
tree1 = VGroup(
*[VGroup(a(i), com()).arrange_submobjects(RIGHT, aligned_edge=DOWN) for i in [0, 2, 4]],
a(6)
).arrange_submobjects(RIGHT, aligned_edge=DOWN).move_to([-3.5, tree0.get_center()[1]-1.2, 0])
tree2 = VGroup(
*[VGroup(a(i), com()).arrange_submobjects(RIGHT, aligned_edge=DOWN) for i in [1, 3, 5]],
a(7)
).arrange_submobjects(RIGHT, aligned_edge=DOWN).move_to([3.5, tree0.get_center()[1]-1.2, 0])
tree3 = VGroup(
a(0), com(), a(4)
).arrange_submobjects(RIGHT, aligned_edge=DOWN).move_to([-5.25, tree1.get_center()[1]-1.2, 0])
tree4 = VGroup(
a(2), com(), a(6)
).arrange_submobjects(RIGHT, aligned_edge=DOWN).move_to([-1.75, tree1.get_center()[1]-1.2, 0])
tree5 = VGroup(
a(1), com(), a(5)
).arrange_submobjects(RIGHT, aligned_edge=DOWN).move_to([1.75, tree1.get_center()[1]-1.2, 0])
tree6 = VGroup(
a(3), com(), a(7)
).arrange_submobjects(RIGHT, aligned_edge=DOWN).move_to([5.25, tree1.get_center()[1]-1.2, 0])
leaf = VGroup(
*[
a(i).move_to([j, tree3.get_center()[1]-1.2, 0])
for i, j in zip([0, 4, 2, 6, 1, 5, 3, 7], [-6.125, -4.375, -2.625, -0.875, 0.875, 2.625, 4.375, 6.125])
]
)
self.wait()
self.play(Write(sub))
self.wait(2)
self.play(Write(tree0))
self.wait()
self.play(
TransformFromCopy(tree0[0][0], tree1[0][0]),
TransformFromCopy(tree0[2][0], tree1[1][0]),
TransformFromCopy(tree0[4][0], tree1[2][0]),
TransformFromCopy(tree0[6][0], tree1[3]),
run_time=3
)
self.play(FadeIn(VGroup(*[tree1[i][1] for i in range(3)])))
self.wait()
self.play(
TransformFromCopy(tree0[1][0], tree2[0][0]),
TransformFromCopy(tree0[3][0], tree2[1][0]),
TransformFromCopy(tree0[5][0], tree2[2][0]),
TransformFromCopy(tree0[7], tree2[3]),
run_time=3
)
self.play(FadeIn(VGroup(*[tree2[i][1] for i in range(3)])))
self.wait(2)
self.play(
TransformFromCopy(tree1[0][0], tree3[0]),
TransformFromCopy(tree1[2][0], tree3[2]),
run_time=2
)
self.play(FadeIn(tree3[1]))
self.wait()
self.play(
TransformFromCopy(tree1[1][0], tree4[0]),
TransformFromCopy(tree1[3], tree4[2]),
run_time=2
)
self.play(FadeIn(tree4[1]))
self.wait()
self.play(
TransformFromCopy(tree2[0][0], tree5[0]),
TransformFromCopy(tree2[2][0], tree5[2]),
TransformFromCopy(tree2[1][0], tree6[0]),
TransformFromCopy(tree2[3], tree6[2]),
run_time=2
)
self.play(FadeIn(tree5[1]), FadeIn(tree6[1]))
self.wait(2)
self.play(
TransformFromCopy(tree3[0], leaf[0]),
TransformFromCopy(tree3[2], leaf[1]),
TransformFromCopy(tree4[0], leaf[2]),
TransformFromCopy(tree4[2], leaf[3]),
TransformFromCopy(tree5[0], leaf[4]),
TransformFromCopy(tree5[2], leaf[5]),
TransformFromCopy(tree6[0], leaf[6]),
TransformFromCopy(tree6[2], leaf[7]),
run_time=3
)
self.wait(3)
class FFT_improve_part3(Scene):
def construct(self):
t2c = {
"_0": BLUE,
"_1": BLUE,
"_2": BLUE,
"_3": BLUE,
"_4": BLUE,
"_5": BLUE,
"_6": BLUE,
"_7": BLUE,
"_{2}": GOLD,
"_{4}": GOLD,
"_{8}": GOLD,
"a": GREEN,
"\\omega": RED,
}
title = Text("高效实现FFT", font="Source Han Sans CN", t2c={"高效": YELLOW, "实现FFT": BLUE})
title.scale(0.6).move_to([-4.5, 3.3, 0])
sub = SubTopic("迭代实现").scale(0.8).next_to(title, DOWN, aligned_edge=LEFT)
self.add(title, sub)
a = lambda i: TexMobject("a", "_"+str(i)).set_color_by_tex_to_color_map(t2c)
com = lambda: TexMobject(",")
w = lambda i: TexMobject("\\omega", "_{"+str(i)+"}").set_color_by_tex_to_color_map(t2c).scale(0.8)
leaf = VGroup(
*[
a(i).move_to([j, -1.59999, 0])
for i, j in zip([0, 4, 2, 6, 1, 5, 3, 7], [-6.125, -4.375, -2.625, -0.875, 0.875, 2.625, 4.375, 6.125])
]
)
self.add(leaf)
self.play(leaf.shift, UP*3.6)
tree3 = VGroup(
a(0), com(), a(4)
).arrange_submobjects(RIGHT, aligned_edge=DOWN).move_to([-5.25, leaf.get_center()[1]-1.2, 0])
tree4 = VGroup(
a(2), com(), a(6)
).arrange_submobjects(RIGHT, aligned_edge=DOWN).move_to([-1.75, leaf.get_center()[1]-1.2, 0])
tree5 = VGroup(
a(1), com(), a(5)
).arrange_submobjects(RIGHT, aligned_edge=DOWN).move_to([1.75, leaf.get_center()[1]-1.2, 0])
tree6 = VGroup(
a(3), com(), a(7)
).arrange_submobjects(RIGHT, aligned_edge=DOWN).move_to([5.25, leaf.get_center()[1]-1.2, 0])
tree1 = VGroup(
*[VGroup(a(i), com()).arrange_submobjects(RIGHT, aligned_edge=DOWN) for i in [0, 2, 4]],
a(6)
).arrange_submobjects(RIGHT, aligned_edge=DOWN).move_to([-3.5, tree3.get_center()[1]-1.2, 0])
tree2 = VGroup(
*[VGroup(a(i), com()).arrange_submobjects(RIGHT, aligned_edge=DOWN) for i in [1, 3, 5]],
a(7)
).arrange_submobjects(RIGHT, aligned_edge=DOWN).move_to([3.5, tree3.get_center()[1]-1.2, 0])
tree0 = VGroup(
*[VGroup(a(i), com()).arrange_submobjects(RIGHT, aligned_edge=DOWN) for i in range(7)],
a(7)
).arrange_submobjects(RIGHT, aligned_edge=DOWN).move_to([0, tree1.get_center()[1]-1.2, 0])
w2_0 = w(2).move_to([-5.25, (leaf.get_center()[1]+tree3.get_center()[1]) / 2, 0])
w2_1 = w(2).move_to([-1.75, (leaf.get_center()[1]+tree3.get_center()[1]) / 2, 0])
w2_2 = w(2).move_to([ 1.75, (leaf.get_center()[1]+tree3.get_center()[1]) / 2, 0])
w2_3 = w(2).move_to([ 5.25, (leaf.get_center()[1]+tree3.get_center()[1]) / 2, 0])
w4_0 = w(4).move_to([-3.5, (tree1.get_center()[1]+tree3.get_center()[1]) / 2, 0])
w4_1 = w(4).move_to([ 3.5, (tree1.get_center()[1]+tree3.get_center()[1]) / 2, 0])
w8_0 = w(8).move_to([ 0 , (tree1.get_center()[1]+tree0.get_center()[1]) / 2, 0])
self.play(
TransformFromCopy(leaf[0], tree3[0]),
TransformFromCopy(leaf[1], tree3[2]),
TransformFromCopy(leaf[2], tree4[0]),
TransformFromCopy(leaf[3], tree4[2]),
TransformFromCopy(leaf[4], tree5[0]),
TransformFromCopy(leaf[5], tree5[2]),
TransformFromCopy(leaf[6], tree6[0]),
TransformFromCopy(leaf[7], tree6[2]),
FadeInFrom(VGroup(w2_0, w2_1, w2_2, w2_3), UP),
run_time=3
)
self.play(
FadeIn(VGroup(tree3[1], tree4[1], tree5[1], tree6[1]))
)
self.wait(2)
self.play(
TransformFromCopy(tree3[0], tree1[0][0]),
TransformFromCopy(tree3[2], tree1[2][0]),
TransformFromCopy(tree4[0], tree1[1][0]),
TransformFromCopy(tree4[2], tree1[3]),
TransformFromCopy(tree5[0], tree2[0][0]),
TransformFromCopy(tree5[2], tree2[2][0]),
TransformFromCopy(tree6[0], tree2[1][0]),
TransformFromCopy(tree6[2], tree2[3]),
FadeInFrom(VGroup(w4_0, w4_1), UP),
run_time=3
)
self.play(
FadeIn(VGroup(tree1[0][1], tree1[1][1], tree1[2][1], tree2[0][1], tree2[1][1], tree2[2][1]))
)
self.wait(2)
self.play(
TransformFromCopy(tree1[0][0], tree0[0][0]),
TransformFromCopy(tree1[2][0], tree0[4][0]),
TransformFromCopy(tree1[1][0], tree0[2][0]),
TransformFromCopy(tree1[3], tree0[6][0]),
TransformFromCopy(tree2[0][0], tree0[1][0]),
TransformFromCopy(tree2[2][0], tree0[5][0]),
TransformFromCopy(tree2[1][0], tree0[3][0]),
TransformFromCopy(tree2[3], tree0[7]),
FadeInFrom(w8_0, UP),
run_time=3
)
self.play(
FadeIn(VGroup(*[tree0[i][1] for i in range(7)]))
)
self.wait(3)
nums = VGroup(
*[Text(str(i), font="Consolas").scale(0.5).set_color(GRAY) for i in range(4)]
).arrange_submobjects(DOWN, buff=0.9).next_to(leaf, LEFT, aligned_edge=UP)
self.play(Write(nums))
rec1 = SurroundingRectangle(w2_1[1])
rec2 = SurroundingRectangle(tree4)
rec3 = SurroundingRectangle(w4_0[1])
rec4 = SurroundingRectangle(tree1)
text1 = TexMobject("2", "^1").next_to(tree4, RIGHT, buff=0.8)
text1[1].set_color(BLUE_A)
text2 = TexMobject("2", "^2").next_to(tree1, RIGHT, buff=0.8)
text2[1].set_color(BLUE_A)
self.wait(2)
self.play(
ShowCreation(rec1)
)
self.play(
ShowCreation(rec2),
TransformFromCopy(tree4, w2_1[1].copy())
)
self.wait(2)
self.play(FadeInFrom(text1[0], RIGHT))
self.play(TransformFromCopy(nums[1], text1[1]))
self.wait()
self.play(FadeOut(VGroup(rec1, rec2)))
self.wait(2)
self.play(
ShowCreation(rec3)
)
self.play(
ShowCreation(rec4),
TransformFromCopy(tree1, w4_0[1].copy())
)
self.wait(2)
self.play(FadeInFrom(text2[0], RIGHT))
self.play(TransformFromCopy(nums[2], text2[1]))
self.wait()
self.play(FadeOut(VGroup(rec3, rec4)))
self.wait(3)
class FFT_improve_Code(Scene):
def construct(self):
t2c = {
"a": GREEN,
"n/2-1": GOLD,
"dep": BLUE_A,
"^{dep}": BLUE_A,
"n": GOLD,
"m": GOLD,
"_m": GOLD,
"2\\pi/m": BLUE_A,
"^{\\frac{2\\pi}{m}}": BLUE_A,
"{i}": BLUE_E,
"\\omega": RED,
"\\boldsymbol{a}": GREEN,
"k": BLUE,
"m/2-1": GOLD,
"j": BLUE,
"t": WHITE,
"u": WHITE,
"_{k+j+m/2}": BLUE,
"_{k+j}": BLUE,
}
nums = VGroup(
*[
Text(str(i), font="Consolas").scale(0.4).set_color(GRAY)
for i in range(1, 13)
]
).arrange_submobjects(DOWN, aligned_edge=RIGHT, buff=0.25).shift(LEFT*4)
title = TexMobject("\\text{FFT}", "(", "\\boldsymbol{a}", ")")
title.set_color_by_tex_to_color_map(t2c).scale(0.8)
title.next_to(nums, UP, aligned_edge=LEFT)
code = VGroup()
line1 = TexMobject("\\textbf{BitReverse}\\ \\ ", "\\boldsymbol{a}")
line1.scale(0.75).next_to(nums[0], RIGHT).set_color_by_tex_to_color_map(t2c)
line1[0].set_color(WHITE)
code.add(line1)
line2 = TexMobject("\\textbf{for}\\ \\ ", "dep", "=", "1", "\\ .\\ .\\ ", "\\log_2", "n")
line2.scale(0.75).next_to(nums[1], RIGHT).set_color_by_tex_to_color_map(t2c)
code.add(line2)
| |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests exercising the analytics internals (not individual analytics)."""
__author__ = '<NAME> (<EMAIL>)'
import time
from webtest import app
from common import catch_and_log
from common import crypto
from common import utils as common_utils
from models import data_sources
from models import entities
from models import transforms
from models.data_sources import utils as data_sources_utils
from google.appengine.ext import db
# Data source must be registered before we import actions; actions imports
# 'main', which does all setup and registration in package scope.
class Character(entities.BaseEntity):
user_id = db.StringProperty(indexed=True)
goal = db.StringProperty(indexed=True)
name = db.StringProperty(indexed=False)
age = db.IntegerProperty(indexed=False)
rank = db.IntegerProperty(indexed=True)
_PROPERTY_EXPORT_DENYLIST = [name]
def for_export(self, transform_fn):
model = super(Character, self).for_export(transform_fn)
model.user_id = transform_fn(self.user_id)
return model
@classmethod
def safe_key(cls, db_key, transform_fn):
return db.Key.from_path(cls.kind(), transform_fn(db_key.id_or_name()))
class CharacterDataSource(data_sources.AbstractDbTableRestDataSource):
@classmethod
def get_name(cls):
return 'character'
@classmethod
def get_entity_class(cls):
return Character
data_sources.Registry.register(CharacterDataSource)
from tests.functional import actions
class DataSourceTest(actions.TestBase):
def setUp(self):
super(DataSourceTest, self).setUp()
with common_utils.Namespace(self.NAMESPACE):
self.characters = [
Character(
user_id='001', goal='L', rank=4, age=8, name='Charlie'),
Character(
user_id='002', goal='L', rank=6, age=6, name='Sally'),
Character(
user_id='003', goal='L', rank=0, age=8, name='Lucy'),
Character(
user_id='004', goal='G', rank=2, age=7, name='Linus'),
Character(
user_id='005', goal='G', rank=8, age=8, name='Max'),
Character(
user_id='006', goal='G', rank=1, age=8, name='Patty'),
Character(
user_id='007', goal='R', rank=9, age=35, name='Othmar'),
Character(
user_id='008', goal='R', rank=5, age=2, name='Snoopy'),
Character(
user_id='009', goal='R', rank=7, age=8, name='Pigpen'),
Character(
user_id='010', goal='R', rank=3, age=8, name='Violet'),
]
for c in self.characters:
c.put()
def tearDown(self):
with common_utils.Namespace(self.NAMESPACE):
db.delete(Character.all(keys_only=True).run())
super(DataSourceTest, self).tearDown()
class PiiExportTest(DataSourceTest):
COURSE_NAME = 'test_course'
ADMIN_EMAIL = '<EMAIL>'
NAMESPACE = 'ns_' + COURSE_NAME
def setUp(self):
super(PiiExportTest, self).setUp()
self.app_context = actions.simple_add_course(
self.COURSE_NAME, self.ADMIN_EMAIL, 'The Course')
self.data_source_context = (
CharacterDataSource.get_context_class().build_blank_default({}, 20))
def test_get_non_pii_data(self):
data = self._get_page_data(0)
self.assertEquals(10, len(data))
for item in data:
self.assertNotIn('name', item)
def test_get_non_pii_schema(self):
schema = self._get_schema()
self.assertNotIn('name', schema)
def test_get_pii_data(self):
self.data_source_context.send_uncensored_pii_data = True
data = self._get_page_data(0)
self.assertEquals(10, len(data))
for item in data:
self.assertIn('name', item)
def test_get_pii_schema(self):
self.data_source_context.send_uncensored_pii_data = True
schema = self._get_schema()
self.assertIn('name', schema)
def _get_schema(self):
log = catch_and_log.CatchAndLog()
schema = CharacterDataSource.get_schema(
self.app_context, log, self.data_source_context)
return schema
def _get_page_data(self, page_number):
log = catch_and_log.CatchAndLog()
schema = self._get_schema()
data, _ = CharacterDataSource.fetch_values(
self.app_context, self.data_source_context, schema, log,
page_number)
return data
class PaginatedTableTest(DataSourceTest):
"""Verify operation of paginated access to AppEngine DB tables."""
NAMESPACE = ''
def test_simple_read(self):
email = '<EMAIL>'
actions.login(email, is_admin=True)
response = transforms.loads(self.get('/rest/data/character/items').body)
self.assertIn('data', response)
self._verify_data(self.characters, response['data'])
self.assertIn('schema', response)
self.assertIn('user_id', response['schema'])
self.assertIn('age', response['schema'])
self.assertIn('rank', response['schema'])
self.assertNotIn('name', response['schema']) # denylisted
self.assertIn('log', response)
self.assertIn('source_context', response)
self.assertIn('params', response)
self.assertEquals([], response['params']['filters'])
self.assertEquals([], response['params']['orderings'])
def test_admin_required(self):
with self.assertRaisesRegexp(app.AppError, 'Bad response: 403'):
self.get('/rest/data/character/items')
def test_filtered_read(self):
email = '<EMAIL>'
actions.login(email, is_admin=True)
# Single greater-equal filter
response = transforms.loads(self.get(
'/rest/data/character/items?filter=rank>=7').body)
self.assertEquals(3, len(response['data']))
for character in response['data']:
self.assertTrue(character['rank'] >= 7)
# Single less-than filter
response = transforms.loads(self.get(
'/rest/data/character/items?filter=rank<7').body)
self.assertEquals(7, len(response['data']))
for character in response['data']:
self.assertTrue(character['rank'] < 7)
# Multiple filters finding some rows
response = transforms.loads(self.get(
'/rest/data/character/items?filter=rank<5&filter=goal=L').body)
self.assertEquals(2, len(response['data']))
for character in response['data']:
self.assertTrue(character['rank'] < 5)
self.assertTrue(character['goal'] == 'L')
def test_ordered_read(self):
email = '<EMAIL>'
actions.login(email, is_admin=True)
# Single ordering by rank
response = transforms.loads(self.get(
'/rest/data/character/items?ordering=rank').body)
self.assertEquals(10, len(response['data']))
prev_rank = -1
for character in response['data']:
self.assertTrue(character['rank'] > prev_rank)
prev_rank = character['rank']
# Single ordering by rank, descending
response = transforms.loads(self.get(
'/rest/data/character/items?ordering=-rank').body)
self.assertEquals(10, len(response['data']))
prev_rank = 10
for character in response['data']:
self.assertTrue(character['rank'] < prev_rank)
prev_rank = character['rank']
# Order by goal then rank
response = transforms.loads(self.get(
'/rest/data/character/items?ordering=goal&ordering=rank').body)
self.assertEquals(10, len(response['data']))
prev_goal = 'A'
prev_rank = -1
for character in response['data']:
self.assertTrue(character['goal'] >= prev_goal)
if character['goal'] != prev_goal:
prev_rank = -1
prev_goal = character['goal']
else:
self.assertTrue(character['rank'] > prev_rank)
prev_rank = character['rank']
def test_filtered_and_ordered(self):
email = '<EMAIL>'
actions.login(email, is_admin=True)
response = transforms.loads(self.get(
'/rest/data/character/items?filter=rank<7&ordering=rank').body)
self.assertEquals(7, len(response['data']))
prev_rank = -1
for character in response['data']:
self.assertTrue(character['rank'] > prev_rank)
self.assertTrue(character['rank'] < 7)
def test_illegal_filters_and_orderings(self):
email = '<EMAIL>'
actions.login(email, is_admin=True)
response = transforms.loads(self.get(
'/rest/data/character/items?filter=foo').body)
self._assert_have_critical_error(
response,
'Filter specification "foo" is not of the form: <name><op><value>')
response = transforms.loads(self.get(
'/rest/data/character/items?filter=foo=9').body)
self._assert_have_critical_error(
response,
'field "foo" which is not in the schema for type "Character"')
response = transforms.loads(self.get(
'/rest/data/character/items?filter=rank=kitten').body)
self._assert_have_critical_error(
response,
'invalid literal for int() with base 10: \'kitten\'')
response = transforms.loads(self.get(
'/rest/data/character/items?filter=rank<<7').body)
self._assert_have_critical_error(
response,
'"rank<<7" uses an unsupported comparison operation "<<"')
response = transforms.loads(self.get(
'/rest/data/character/items?ordering=foo').body)
self._assert_have_critical_error(
response,
'Invalid property name \'foo\'')
response = transforms.loads(self.get(
'/rest/data/character/items?ordering=age').body)
self._assert_have_critical_error(
response,
'Property \'age\' is not indexed')
response = transforms.loads(self.get(
'/rest/data/character/items?filter=age>5').body)
self._assert_have_critical_error(
response,
'Property \'age\' is not indexed')
response = transforms.loads(self.get(
'/rest/data/character/items?filter=rank<7&ordering=goal').body)
self._assert_have_critical_error(
response,
'First ordering property must be the same as inequality filter')
def _assert_have_critical_error(self, response, expected_message):
email = '<EMAIL>'
actions.login(email, is_admin=True)
for log in response['log']:
if (log['level'] == 'critical' and
expected_message in log['message']):
return
self.fail('Expected a critical error containing "%s"' %
expected_message)
def test_pii_encoding(self):
email = '<EMAIL>'
actions.login(email, is_admin=True)
token = data_sources_utils.generate_data_source_token(
crypto.XsrfTokenManager)
response = transforms.loads(self.get('/rest/data/character/items').body)
for d in response['data']:
# Ensure that field marked as needing transformation is cleared
# when we don't pass in an XSRF token used for generating a secret
# for encrypting.
self.assertEquals('None', d['user_id'])
self.assertEquals(str(db.Key.from_path(Character.kind(), 'None')),
d['key'])
# Ensure that field marked for denylist is suppressed.
self.assertFalse('name' in d)
response = transforms.loads(self.get(
'/rest/data/character/items?data_source_token=' + token).body)
for d in response['data']:
# Ensure that field marked as needing transformation is cleared
# when we don't pass in an XSRF token used for generating a secret
# for encrypting.
self.assertIsNotNone(d['user_id'])
self.assertNotEquals('None', d['key'])
# Ensure that field marked for denylist is still suppressed.
self.assertFalse('name' in d)
def test_pii_encoding_changes(self):
email = '<EMAIL>'
actions.login(email, is_admin=True)
token1 = data_sources_utils.generate_data_source_token(
crypto.XsrfTokenManager)
time.sleep(1) # Legit: XSRF token is time-based, so will change.
token2 = data_sources_utils.generate_data_source_token(
crypto.XsrfTokenManager)
self.assertNotEqual(token1, token2)
response1 = transforms.loads(self.get(
'/rest/data/character/items?data_source_token=' + token1).body)
response2 = transforms.loads(self.get(
'/rest/data/character/items?data_source_token=' + token2).body)
for c1, c2 in zip(response1['data'], response2['data']):
self.assertNotEquals(c1['user_id'], c2['user_id'])
self.assertNotEquals(c1['key'], c2['key'])
def test_sequential_pagination(self):
email = '<EMAIL>'
actions.login(email, is_admin=True)
response = transforms.loads(self.get(
'/rest/data/character/items?chunk_size=3&page_number=0').body)
source_context = response['source_context']
self.assertEquals(0, response['page_number'])
self._verify_data(self.characters[:3], response['data'])
self._assert_have_only_logs(response, [
'Creating new context for given parameters',
'fetch page 0 start cursor missing; end cursor missing',
'fetch page 0 using limit 3',
'fetch page 0 saving end cursor',
])
response = transforms.loads(self.get(
'/rest/data/character/items?chunk_size=3&page_number=1'
'&source_context=%s' % source_context).body)
source_context = response['source_context']
self.assertEquals(1, response['page_number'])
self._verify_data(self.characters[3:6], response['data'])
self._assert_have_only_logs(response, [
'Existing context matches parameters; using existing context',
'fetch page 1 start cursor present; end cursor missing',
'fetch page 1 using limit 3',
'fetch page 1 saving end cursor',
])
response = transforms.loads(self.get(
'/rest/data/character/items?chunk_size=3&page_number=2'
'&source_context=%s' % source_context).body)
source_context = response['source_context']
self.assertEquals(2, response['page_number'])
self._verify_data(self.characters[6:9], response['data'])
self._assert_have_only_logs(response, [
'Existing context matches parameters; using existing context',
'fetch page 2 start cursor present; end cursor missing',
'fetch page 2 using limit 3',
'fetch page 2 saving end cursor',
])
response = transforms.loads(self.get(
'/rest/data/character/items?chunk_size=3&page_number=3'
'&source_context=%s' % source_context).body)
source_context = response['source_context']
self.assertEquals(3, response['page_number'])
self._verify_data(self.characters[9:], response['data'])
self._assert_have_only_logs(response, [
'Existing context matches parameters; using existing context',
'fetch page 3 start cursor present; end cursor missing',
'fetch page 3 using limit 3',
'fetch page 3 is partial; not saving end cursor',
])
def test_non_present_page_request(self):
email = '<EMAIL>'
actions.login(email, is_admin=True)
response = transforms.loads(self.get(
'/rest/data/character/items?chunk_size=9&page_number=5').body)
self._verify_data(self.characters[9:], response['data'])
self.assertEquals(1, response['page_number'])
self._assert_have_only_logs(response, [
'Creating new context for given parameters',
'fetch page 0 start cursor missing; end cursor missing',
'fetch page 0 using limit 9',
'fetch page 0 saving end cursor',
'fetch page 1 start cursor present; end cursor missing',
'fetch page 1 using limit 9',
'fetch page 1 is partial; not saving end cursor',
'Fewer pages available than requested. Stopping at last page 1',
])
def test_empty_last_page_request(self):
email = '<EMAIL>'
actions.login(email, is_admin=True)
response = transforms.loads(self.get(
'/rest/data/character/items?chunk_size=10&page_number=3').body)
self._verify_data([], response['data'])
self.assertEquals(1, response['page_number'])
self._assert_have_only_logs(response, [
'Creating new context for given parameters',
'fetch page 0 start cursor missing; end cursor missing',
'fetch page 0 using limit 10',
'fetch page 0 saving end cursor',
'fetch page 1 start cursor present; end cursor missing',
'fetch page 1 using limit 10',
'fetch page 1 is partial; not saving end cursor',
'Fewer pages available than | |
from typing import Tuple
import numpy as np
import gym.spaces
from . import transform_utils as T
from .furniture_sawyer import FurnitureSawyerEnv
from .models import furniture_name2id
from ..util.logger import logger
class FurnitureSawyerDenseRewardEnv(FurnitureSawyerEnv):
"""
Sawyer environment.
Here, we call a moving object as 'leg' and a target object as 'table'.
"""
def __init__(self, config):
"""
Args:
config: configurations for the environment.
"""
config.furniture_id = furniture_name2id[config.furniture_name]
super().__init__(config)
# common rewards
self._diff_rew = config.diff_rew
self._phase_bonus = config.phase_bonus
self._ctrl_penalty_coef = config.ctrl_penalty_coef
self._eef_rot_threshold = config.eef_rot_threshold
self._eef_forward_dist_coef = config.eef_forward_dist_coef
self._eef_up_dist_coef = config.eef_up_dist_coef
self._gripper_penalty_coef = config.gripper_penalty_coef
self._move_other_part_penalty_coef = config.move_other_part_penalty_coef
self._early_termination = config.early_termination
# init_eef
self._init_eef_pos_dist_coef = config.init_eef_pos_dist_coef
# move_eef_above_leg
self._move_eef_pos_dist_coef = config.move_eef_pos_dist_coef
# lower_eef
self._lower_eef_pos_dist_coef = config.lower_eef_pos_dist_coef
# grasp_leg
self._grasp_dist_coef = config.grasp_dist_coef
# lift_leg
self._lift_xy_dist_coef = config.lift_xy_dist_coef
self._lift_z_dist_coef = config.lift_z_dist_coef
self._lift_xy_pos_threshold = config.lift_xy_pos_threshold
self._lift_z_pos_threshold = config.lift_z_pos_threshold
# align_leg
self._align_pos_dist_coef = config.align_pos_dist_coef
self._align_rot_dist_coef = config.align_rot_dist_coef
self._align_pos_threshold = config.align_pos_threshold
self._align_rot_threshold = config.align_rot_threshold
# move_leg
self._move_pos_dist_coef = config.move_pos_dist_coef
self._move_rot_dist_coef = config.move_rot_dist_coef
self._move_pos_threshold = config.move_pos_threshold
self._move_rot_threshold = config.move_rot_threshold
# move_leg_fine
self._move_fine_pos_exp_coef = config.move_fine_pos_exp_coef
self._move_fine_rot_dist_coef = config.move_fine_rot_dist_coef
self._move_fine_pos_dist_coef = config.move_fine_pos_dist_coef
self._aligned_bonus_coef = config.aligned_bonus_coef
self._num_connect_steps = 0
self._grip_up_phases = set(
[
"init_eef",
"move_eef_above_leg",
"lower_eef",
"grasp_leg",
"lift_leg",
]
)
self._grip_forward_phases = set(
["move_eef_above_leg", "lower_eef", "grasp_leg", "lift_leg"]
)
self._grip_open_phases = set(["init_eef", "move_eef_above_leg", "lower_eef"])
self._phases = [
"init_eef",
"move_eef_above_leg",
"lower_eef",
"grasp_leg",
"lift_leg",
"align_leg",
"move_leg",
"move_leg_fine",
]
@property
def observation_space(self):
"""
Returns the observation space.
"""
ob_space = super().observation_space
if self._config.phase_ob:
ob_space.spaces["phase_ob"] = gym.spaces.Box(low=0, high=1, shape=(8,))
return ob_space
def _get_obs(self, include_qpos=False):
state = super()._get_obs(include_qpos)
if self._config.phase_ob:
state["phase_ob"] = np.eye(8)[self._phase_i]
return state
def _step(self, a):
ob, reward, done, info = super()._step(a)
# update phase_ob with updated phase_i
if self._config.phase_ob:
ob["phase_ob"] = np.eye(8)[self._phase_i]
return ob, reward, done, info
def _reset_reward_variables(self):
self._subtask_step = len(self._preassembled)
self._used_sites = set()
for part_idx in range(len(self._preassembled)):
leg = self._recipe["recipe"][part_idx][0]
for i in range(len(self._recipe["recipe"])):
g_l, g_r = f"{leg}_ltgt_site{i}", f"{leg}_rtgt_site{i}"
if not (g_l in self._used_sites or g_r in self._used_sites):
self._used_sites.add(g_l)
self._used_sites.add(g_r)
break
self._update_reward_variables()
def _set_next_subtask(self) -> bool:
""" Returns True if we are done with all attaching steps. """
self._subtask_step += 1
if self._subtask_step == self._success_num_conn:
return True
self._update_reward_variables()
return False
def _update_reward_variables(self):
""" Updates the reward variables wrt subtask step. """
subtask_step = self._subtask_step
self._leg, self._table = self._recipe["recipe"][subtask_step]
self._leg_site, self._table_site = self._site_recipe[subtask_step][:2]
if len(self._site_recipe[subtask_step]) == 3:
self._leg_table_angle = self._site_recipe[subtask_step][2]
else:
self._leg_table_angle = None
# update the observation to the current objects of interest
self._subtask_part1 = self._object_name2id[self._leg]
self._subtask_part2 = self._object_name2id[self._table]
self._leg_touched = False
self._leg_dropped = False
self._table_moved = False
self._leg_lift = False
self._init_table_site_pos = self._get_pos(self._table_site)
self._init_lift_leg_pos = leg_pos = self._get_pos(self._leg)
self._lift_leg_pos = leg_pos + [
0,
0,
self._recipe["waypoints"][subtask_step][0][2],
]
self._leg_fine_aligned = 0
self._leg_allowed_angles = [x for x in self._leg_site.split(",")[1:-1] if x]
eef_pos = self._get_pos("griptip_site")
if self._config.reset_robot_after_attach:
self._phase_i = 1
else:
self._phase_i = 0
if (
"grip_init_pos" in self._recipe
and self._recipe["grip_init_pos"][subtask_step] is not None
):
init_eef_offset = self._recipe["grip_init_pos"][subtask_step][0]
self._init_eef_pos = eef_pos.copy() + init_eef_offset[:3]
if len(init_eef_offset) == 4:
self._init_eef_pos[2] = init_eef_offset[3] - 0.085
# deduct distance between grip_base and griptip
else:
self._phase_i = 1
for i in range(len(self._recipe["recipe"])):
g_l, g_r = f"{self._leg}_ltgt_site{i}", f"{self._leg}_rtgt_site{i}"
if g_l not in self._used_sites and g_r not in self._used_sites:
self._used_sites.add(g_l)
self._used_sites.add(g_r)
break
self._get_leg_grasp_pos = lambda: (self._get_pos(g_l) + self._get_pos(g_r)) / 2
self._get_leg_grasp_vector = lambda: self._get_pos(g_r) - self._get_pos(g_l)
if self._diff_rew:
if self._phase_i == 1:
leg_pos = self._get_leg_grasp_pos() + [0, 0, 0.05]
dist = np.linalg.norm(eef_pos - leg_pos)
self._prev_eef_above_leg_dist = dist
else:
dist = np.linalg.norm(eef_pos - self._init_eef_pos)
self._prev_init_eef_dist = dist
self._prev_grasp_dist = -1
self._prev_lift_leg_z_dist = self._recipe["waypoints"][subtask_step][0][2]
self._prev_lift_leg_xy_dist = 0.0
def _reset(self, furniture_id=None, background=None):
super()._reset(furniture_id, background)
self._reset_reward_variables()
def _collect_values(self):
""" Collects all sensor values required for reward. """
left, right = self._finger_contact(self._leg)
leg_touched = int(left and right)
leg_up = self._get_up_vector(self._leg_site)
table_up = self._get_up_vector(self._table_site)
leg_forward = self._get_forward_vector(self._leg_site)
table_forward = self._get_forward_vector(self._table_site)
if len(self._leg_allowed_angles):
leg_forward_rotated = self._project_connector_forward(
self._leg_site, self._table_site, self._leg_table_angle
)
else:
leg_forward_rotated = leg_forward
leg_site_pos = self._get_pos(self._leg_site)
leg_pos = self._get_pos(self._leg)
table_site_pos = self._get_pos(self._table_site)
above_table_site_pos = table_site_pos + [
0,
0,
self._recipe["z_finedist"],
]
eef_pos = self._get_pos("griptip_site")
leg_grasp_pos = self._get_leg_grasp_pos()
self._current_values = {
"eef_pos": eef_pos,
"leg_touched": leg_touched,
"leg_safe_grasp": leg_touched and (eef_pos[2] < leg_grasp_pos[2] - 0.000),
"leg_grasp_pos": leg_grasp_pos,
"leg_pos": leg_pos,
"lift": leg_pos[2] > self._lift_leg_pos[2],
"leg_site_pos": leg_site_pos,
"table_site_pos": table_site_pos,
"above_table_site_pos": above_table_site_pos,
"move_pos_dist": np.linalg.norm(table_site_pos - leg_site_pos),
"move_above_pos_dist": np.linalg.norm(above_table_site_pos - leg_site_pos),
"leg_up": leg_up,
"table_up": table_up,
"table_forward": table_forward,
"move_up_ang_dist": T.cos_siml(leg_up, table_up),
"leg_forward": leg_forward,
"leg_forward_rotated": leg_forward_rotated,
"move_forward_ang_dist": T.cos_siml(leg_forward_rotated, table_forward),
"proj_table": T.cos_siml(-table_up, leg_site_pos - table_site_pos),
"proj_leg": T.cos_siml(leg_up, table_site_pos - leg_site_pos),
"table_displacement": np.linalg.norm(
table_site_pos - self._init_table_site_pos
),
}
def _compute_reward(self, ac) -> Tuple[float, bool, dict]:
"""
Computes multi-phase reward.
While moving the leg, we need to make sure the grip is stable by measuring
angular movements.
At any point, the robot should minimize pose displacement in non-relevant parts.
Phases:
0. init_eef: move gripper to initial position
1. move_eef_above_leg: move eef over table leg
2. lower_eef: lower eef onto the leg
3. grasp_leg: grip the leg
4. lift_leg: lift the leg to specified height
5. align_leg: align the rotation of the leg with the target conn site
6. move_leg: coarsely align the leg with the conn site
7. move_leg_fine: fine grain alignment of the up and forward vectors
"""
phase_bonus = reward = 0
info = {}
# clear the original success and done
done = False
self._success = False
self._collect_values()
v = self._current_values
ctrl_penalty, ctrl_info = self._ctrl_penalty(ac)
stable_grip_reward, sg_info = self._stable_grip_reward()
move_other_part_penalty, move_info = self._move_other_part_penalty()
leg_touched = v["leg_touched"]
table_moved = move_info["table_displacement"] > 0.1
if not self._config.phase_ob:
info["skip_to_lift_leg"] = 0
info["skip_to_move_leg_fine"] = 0
# detect early picking
if (
v["leg_safe_grasp"]
and sg_info["stable_grip_succ"]
and self._phase_i < 3
):
logger.info("Skipped to lift_leg")
info["skip_to_lift_leg"] = 1
# phase_bonus += self._phase_bonus * (3 - self._phase_i)
# phase_bonus += self._phase_bonus
self._phase_i = self._phases.index("lift_leg") # lift_leg
# detect early fine alignment without lifting or coarse alignment
if leg_touched and self._phase_i in [4, 5]: # lift_leg or align_leg
move_above_pos_dist = v["move_above_pos_dist"]
move_pos_dist = v["move_pos_dist"]
move_up_ang_dist = v["move_up_ang_dist"]
move_forward_ang_dist = v["move_forward_ang_dist"]
if (
(
move_pos_dist < self._move_pos_threshold
or move_above_pos_dist < self._move_pos_threshold
)
and move_up_ang_dist > self._move_rot_threshold
and move_forward_ang_dist > self._move_rot_threshold
):
logger.info("Skipped to move_leg_fine")
info["skip_to_move_leg_fine"] = 1
# phase_bonus += self._phase_bonus * (7 - self._phase_i)
self._phase_i = self._phases.index("move_leg_fine") # move_leg_fine
self._prev_move_pos_dist = move_pos_dist
self._prev_move_up_ang_dist = move_up_ang_dist
self._prev_move_forward_ang_dist = move_forward_ang_dist
self._prev_proj_t = v["proj_table"]
self._prev_proj_l = v["proj_leg"]
# compute phase-based reward
phase = self._phases[self._phase_i]
info["phase_i"] = self._phase_i + len(self._phases) * self._subtask_step
info["touch"] = leg_touched
info["drop_leg"] = (
self._phase_i > 3
and not leg_touched
and not self._leg_dropped
and not self._connected
)
info["table_moved"] = table_moved and not self._table_moved
stable_grip_reward, sg_info = self._stable_grip_reward()
grip_penalty, grip_info = self._gripper_penalty(ac)
if phase != "move_leg_fine" and self._connected:
correct_connect = self._is_aligned(self._leg_site, self._table_site)
phase_reward = 0
phase_info = {
"connect_succ": self._connected and correct_connect,
"connect_action": ac[-1],
}
if table_moved:
if not self._table_moved:
logger.info("Moved table too much during move_leg_fine")
self._table_moved = True
done = self._early_termination
if self._early_termination:
phase_bonus -= self._phase_bonus
elif correct_connect:
phase_bonus += self._phase_bonus * 2
# discourage staying in algined mode
phase_bonus -= self._leg_fine_aligned * self._aligned_bonus_coef
self._phase_i = 0
logger.info("*** CONNECTED w/o move_leg_fine!")
# update reward variables for next attachment
done = self._success = self._set_next_subtask()
else:
phase_info["wrong_connect"] = 1
self._success = False
done = True
elif phase == "init_eef":
phase_reward, phase_info = self._init_eef_reward()
# if table_moved:
# logger.info("Moved table too much during init_eef")
# done = True
# phase_bonus -= self._phase_bonus / 2
if (
phase_info[f"{phase}_succ"]
and sg_info["stable_grip_succ"]
and grip_info["gripper_open_succ"]
):
self._phase_i += 1
phase_bonus += self._phase_bonus
eef_pos = v["eef_pos"]
leg_pos = v["leg_grasp_pos"] + [0, 0, 0.05]
dist = np.linalg.norm(eef_pos - leg_pos)
self._prev_eef_above_leg_dist = dist
elif phase == "move_eef_above_leg":
phase_reward, phase_info = self._move_eef_above_leg_reward()
if (
phase_info[f"{phase}_succ"]
and sg_info["stable_grip_succ"]
and grip_info["gripper_open_succ"]
):
self._phase_i += 1
phase_bonus += self._phase_bonus
eef_pos = v["eef_pos"]
leg_pos = v["leg_grasp_pos"] + [0, 0, -0.015]
self._prev_eef_leg_dist = np.linalg.norm(eef_pos - leg_pos)
elif phase == "lower_eef":
phase_reward, phase_info = self._lower_eef_reward()
if (
phase_info[f"{phase}_succ"]
and sg_info["stable_grip_succ"]
and grip_info["gripper_open_succ"]
):
phase_bonus += self._phase_bonus
self._phase_i += 1
elif phase == "grasp_leg":
phase_reward, phase_info = self._grasp_leg_reward(ac)
if phase_info["grasp_leg_succ"] and sg_info["stable_grip_succ"]:
self._phase_i += 1
phase_bonus += self._phase_bonus
elif phase == "lift_leg":
phase_reward, phase_info = self._lift_leg_reward()
if not leg_touched:
if not self._leg_dropped:
logger.info("Dropped leg during lifting")
self._leg_dropped = True
done = self._early_termination
if self._early_termination:
phase_bonus += -self._phase_bonus | |
<reponame>cog-isa/htm-rl
import numpy as np
from htm.bindings.sdr import SDR
from htm_rl.modules.htm.temporal_memory import ApicalBasalFeedbackTM
from htm.bindings.algorithms import SpatialPooler
from htm_rl.modules.basal_ganglia import BasalGanglia, DualBasalGanglia
from htm_rl.modules.htm.pattern_memory import SpatialMemory
import os
import pickle
from typing import Union
EPS = 1e-12
class Block:
"""
Processing unit of Hierarchy.
:param tm: ApicalBasalFeedbackTM
Temporal memory
:param sp: SpatialPoller or None
Spatial poller
:param bg: BasalGanglia or None
Basal ganglia
"""
tm: ApicalBasalFeedbackTM
sp: SpatialPooler
bg: Union[BasalGanglia, DualBasalGanglia]
sm: SpatialMemory
def __init__(self,
tm: ApicalBasalFeedbackTM,
sm: SpatialMemory = None,
sp: SpatialPooler = None,
bg: Union[BasalGanglia, DualBasalGanglia] = None,
id_: int = None,
level: int = None,
predicted_boost: float = 0.2,
feedback_boost_range: list[float, float] = None,
gamma: float = 0.9,
sm_da: float = 0,
sm_dda: float = 0,
d_an_th: float = 0,
d_cn_th: float = 0,
sm_reward_inc: float = 0.9,
sm_reward_dec: float = 0.999,
min_reward_decay: float = 0.99,
max_reward_decay: float = 0.99,
sm_max_reward: float = 0.9,
sm_min_reward: float = 0.9,
modulate_tm_lr: bool = False,
sparsity: float = 0,
continuous_output: bool = False):
self.tm = tm
self.sp = sp
self.bg = bg
self.sm = sm
if self.sp is not None:
self.sp_output = SDR(self.sp.getColumnDimensions())
self.sp_input = SDR(self.sp.getInputDimensions())
else:
self.sp_output = None
self.sp_input = SDR(self.tm.basal_columns)
if feedback_boost_range is None:
self.feedback_boost_range = [0, 1]
else:
self.feedback_boost_range = feedback_boost_range
self.predicted_columns = SDR(self.tm.basal_columns)
self.basal_columns = tm.basal_columns
self.basal_in = list()
self.apical_in = list()
self.feedback_in = list()
self.basal_out = list()
self.apical_out = list()
self.feedback_out = list()
self.d_an_th = d_an_th
self.d_cn_th = d_cn_th
self.anomaly = -1
self.confidence = 1
self.anomaly_threshold = 0
self.confidence_threshold = 0
self.d_an = 0
self.d_cn = 0
self.da = 0
self.dda = 0
self.sm_da = sm_da
self.sm_dda = sm_dda
self.modulate_tm_lr = modulate_tm_lr
self.reward_modulation_signal = 1
self.sm_reward_inc = sm_reward_inc
self.sm_reward_dec = sm_reward_dec
self.mean_reward = 0
self.max_reward = 0
self.min_reward = 0
self.max_reward_decay = max_reward_decay
self.min_reward_decay = min_reward_decay
self.sm_max_reward = sm_max_reward
self.sm_min_reward = sm_min_reward
self.should_return_exec_predictions = False
self.should_return_apical_predictions = False
self.id = id_
self.level = level
self.feedback_in_pattern = np.empty(0)
self.apical_in_pattern = np.empty(0)
self.basal_in_pattern = np.empty(0)
self.feedback_in_size = 0
self.apical_in_size = 0
self.basal_in_size = 0
self.sparsity = sparsity
self.reward_ext = 0
self.reward_int = 0
self.k = 0
self.gamma = gamma
self.made_decision = False
self.current_option = None
self.failed_option = None
self.completed_option = None
self.predicted_options = None
self.predicted_boost = predicted_boost
self.feedback_boost = 0
self.learn_tm = True
self.learn_sp = True
self.learn_sm = True
self.continuous_output = continuous_output
def __str__(self):
return f"Block_{self.id}"
def compute(self, add_exec=False, learn_exec=False, narrow_prediction=False):
self.should_return_exec_predictions = False
self.should_return_apical_predictions = False
# gather all inputs
# form basal input sdr(columns)
if learn_exec:
if self.learn_tm:
feedback_active_columns = list()
shift = 0
for block in self.feedback_in:
feedback_active_columns.append(block.get_output('basal') + shift)
shift += block.basal_columns
if len(feedback_active_columns) > 0:
feedback_active_columns = np.concatenate(feedback_active_columns)
else:
feedback_active_columns = np.empty(0)
self.tm.set_active_feedback_columns(feedback_active_columns)
self.tm.activate_exec_dendrites()
self.tm.learn_exec_feedback_segments()
self.feedback_in_pattern = feedback_active_columns
self.apical_in_pattern = np.empty(0)
self.basal_in_pattern = np.empty(0)
self.tm.inactivate_exec_dendrites()
self.d_an = 0
self.d_cn = 0
elif add_exec:
self.should_return_exec_predictions = True
feedback_active_columns = list()
shift = 0
total_value = 0
for block in self.feedback_in:
pattern, value = block.get_output('feedback', return_value=True)
feedback_active_columns.append(pattern + shift)
shift += block.basal_columns
if value is not None:
total_value += value
if len(self.feedback_in) > 1:
total_value /= len(self.feedback_in)
if len(feedback_active_columns) > 0:
feedback_active_columns = np.concatenate(feedback_active_columns)
else:
feedback_active_columns = np.empty(0)
self.tm.set_active_feedback_columns(feedback_active_columns)
self.tm.activate_exec_dendrites()
self.feedback_in_pattern = feedback_active_columns
self.d_an = 0
self.d_cn = 0
# Evaluate feedback boost
self.feedback_boost = self.feedback_boost_range[0] + total_value * (self.feedback_boost_range[1] - self.feedback_boost_range[0])
elif narrow_prediction:
# Narrow prediction by a feedback
# Form feedback input sdr(columns)
feedback_active_columns = list()
shift = 0
for block in self.feedback_in:
feedback_active_columns.append(block.get_output('basal') + shift)
shift += block.basal_columns
if len(feedback_active_columns) > 0:
feedback_active_columns = np.concatenate(feedback_active_columns)
else:
feedback_active_columns = np.empty(0)
# TM
self.tm.set_active_feedback_columns(feedback_active_columns)
self.tm.activate_inhib_dendrites()
self.tm.predict_cells()
self.confidence = self.tm.confidence[-1]
self.confidence_threshold = self.tm.confidence_threshold
self.d_cn = (self.confidence - self.confidence_threshold) / (self.confidence_threshold + EPS)
self.feedback_in_pattern = feedback_active_columns
else:
basal_active_columns = list()
shift = 0
for block in self.basal_in:
basal_active_columns.append(block.get_output('basal') + shift)
shift += block.basal_columns
if len(basal_active_columns) > 0:
basal_active_columns = np.concatenate(basal_active_columns)
else:
basal_active_columns = np.empty(0)
# Form apical input sdr(cells)
apical_active_cells = list()
apical_winner_cells = list()
shift = 0
for block in self.apical_in:
active, winner = block.get_output('apical')
apical_active_cells.append(active + shift)
apical_winner_cells.append(winner + shift)
shift += block.tm.basal_total_cells
if len(apical_active_cells) > 0:
apical_active_cells = np.concatenate(apical_active_cells)
else:
apical_active_cells = np.empty(0)
if len(apical_winner_cells) > 0:
apical_winner_cells = np.concatenate(apical_winner_cells)
else:
apical_winner_cells = np.empty(0)
# Form feedback input sdr(columns)
feedback_active_columns = list()
shift = 0
for block in self.feedback_in:
feedback_active_columns.append(block.get_output('basal') + shift)
shift += block.basal_columns
if len(feedback_active_columns) > 0:
feedback_active_columns = np.concatenate(feedback_active_columns)
else:
feedback_active_columns = np.empty(0)
# SP
self.sp_input.sparse = basal_active_columns
if self.sp is not None:
self.sp.compute(self.sp_input, self.learn_sp, self.sp_output)
basal_active_columns = self.sp_output.sparse
# Refresh patterns
if (self.sm is not None) and self.learn_sm:
self.sm.add(self.sp_output.dense.copy())
else:
if (self.sm is not None) and self.learn_sm:
self.sm.add(self.sp_input.dense.copy())
# Reinforce
if (self.bg is not None) and (self.k != 0):
self.bg.update_response(basal_active_columns)
self.bg.force_dopamine(self.reward_ext, k=self.k, reward_int=self.reward_int)
self.update_reward_modulation_signal(self.reward_ext)
self.reward_ext = 0
self.reward_int = 0
self.k = 0
prev_da = self.da
self.da = self.da * self.sm_da + np.power(self.bg.td_error, 2).flatten().sum() * (1 - self.sm_da)
self.dda = self.dda * self.sm_dda + (self.da - prev_da) * (1 - self.sm_dda)
# Forgetting
if (self.sm is not None) and self.learn_sm:
self.sm.forget()
# Modulation
if self.modulate_tm_lr:
self.tm.set_learning_rate(self.reward_modulation_signal)
# TM
self.tm.set_active_columns(basal_active_columns)
self.tm.activate_cells(self.learn_tm)
self.anomaly = self.tm.anomaly[-1]
self.anomaly_threshold = self.tm.anomaly_threshold
self.d_an = (self.anomaly - self.anomaly_threshold)/(self.anomaly_threshold + EPS)
self.tm.set_active_apical_cells(apical_active_cells)
self.tm.set_winner_apical_cells(apical_winner_cells)
self.tm.set_active_feedback_columns(feedback_active_columns)
self.tm.activate_basal_dendrites()
self.tm.activate_apical_dendrites()
self.tm.activate_inhib_dendrites()
self.tm.predict_cells()
self.confidence = self.tm.confidence[-1]
self.confidence_threshold = self.tm.confidence_threshold
self.d_cn = (self.confidence - self.confidence_threshold)/(self.confidence_threshold + EPS)
self.feedback_in_pattern = feedback_active_columns
self.apical_in_pattern = apical_active_cells
self.basal_in_pattern = basal_active_columns
def get_output(self, mode, return_value=False):
"""
Get block output.
:param mode: str: type of output, modes: {'basal', 'apical', 'feedback'}
:return: depends on mode
"""
if mode == 'basal':
# active columns without filtration
return self.tm.get_active_columns()
elif mode == 'apical':
# apical active cells and winners
return self.tm.get_active_cells(), self.tm.get_winner_cells()
elif mode == 'feedback':
# basal predicted columns with filtration
predicted_columns = self.tm.get_predicted_columns(add_exec=self.should_return_exec_predictions,
add_apical=self.should_return_apical_predictions)
self.predicted_columns.sparse = predicted_columns
# filter columns by Basal Ganglia conditioned on apical input
if (self.bg is not None) and (self.sm is not None):
# form apical input
apical_active_columns = list()
shift = 0
for block in self.apical_in:
columns = block.get_output('basal')
apical_active_columns.append(columns + shift)
shift += block.basal_columns
if len(apical_active_columns) > 0:
apical_active_columns = np.concatenate(apical_active_columns)
else:
apical_active_columns = np.empty(0)
condition = SDR(shift)
condition.sparse = apical_active_columns
# detect options among predictions
predicted_options, indices = self.sm.get_options(self.predicted_columns.dense, return_indices=True)
# all options
options = self.sm.get_sparse_patterns()
if len(options) > 0 or self.continuous_output:
boost_predicted_options = np.zeros(len(self.sm))
if len(indices) > 0:
# boost predicted options
boost_predicted_options[indices] += self.predicted_boost
# feedback boost
boost_predicted_options[indices] += self.feedback_boost
option_index, option, option_values = self.bg.compute(condition.sparse, options, responses_boost=boost_predicted_options)
self.bg.update_stimulus(condition.sparse)
norm_option_values = option_values - option_values.min()
norm_option_values /= (norm_option_values.max() + EPS)
self.made_decision = True
self.failed_option = None
self.completed_option = None
if len(self.sm.unique_id) > 0:
self.current_option = self.sm.unique_id[option_index]
self.predicted_options = self.sm.unique_id[indices]
else:
self.current_option = None
self.predicted_options = np.empty(0)
# jumped off a high level option
if not np.isin(option_index, indices):
self.feedback_boost = 0
for block in self.feedback_in:
block.finish_current_option('failed')
if return_value:
return option, norm_option_values[option_index]
else:
return option
else:
if return_value:
return np.empty(0), None
else:
return np.empty(0)
else:
if return_value:
return predicted_columns, None
else:
return predicted_columns
else:
raise ValueError(f'There is no such mode {mode}!')
def get_in_sizes(self):
self.feedback_in_size = sum([block.basal_columns for block in self.feedback_in])
self.apical_in_size = sum([block.tm.basal_total_cells for block in self.apical_in])
self.basal_in_size = sum([block.basal_columns for block in self.basal_in])
return self.feedback_in_size, self.apical_in_size, self.basal_in_size
def add_reward(self, reward_ext: float, reward_int: float = 0):
if self.bg is not None:
self.reward_ext += (self.gamma ** self.k) * reward_ext
self.reward_int += (self.gamma ** self.k) * reward_int
self.k += 1
def finish_current_option(self, flag):
if flag == 'failed':
self.failed_option = self.current_option
self.completed_option = None
elif flag == 'completed':
self.completed_option = self.current_option
self.failed_option = None
else:
raise ValueError
self.made_decision = False
self.current_option = None
def reset(self):
self.tm.reset()
if self.bg is not None:
self.bg.reset()
self.reward_ext = 0
self.reward_int = 0
self.k = 0
self.made_decision = False
self.current_option = None
self.failed_option = None
self.completed_option = None
self.should_return_exec_predictions = False
self.should_return_apical_predictions = False
self.feedback_in_pattern = np.empty(0)
self.apical_in_pattern = np.empty(0)
self.basal_in_pattern = np.empty(0)
def freeze(self):
self.learn_sp = False
self.learn_tm = False
self.learn_sm = False
def unfreeze(self):
self.learn_sp = True
self.learn_tm = True
self.learn_sm = True
def update_reward_modulation_signal(self, reward):
if reward > self.mean_reward:
self.mean_reward = self.mean_reward * self.sm_reward_inc + reward * | |
self.assertEqual(v.is_symbol(), True)
self.assertEqual(v.value(), 'empty-list')
v = mlisp.prim_type('type', [mlisp.VCons(mlisp.VNumber(42), mlisp.VEmpty())])
self.assertEqual(v.is_symbol(), True)
self.assertEqual(v.value(), 'cons-list')
def prim(name, args):
return (args[0], args[1])
v = mlisp.prim_type('type', [mlisp.VPrimitive('prim', prim, 2)])
self.assertEqual(v.is_symbol(), True)
self.assertEqual(v.value(), 'primitive')
v = mlisp.prim_type('type', [mlisp.VSymbol('Alice')])
self.assertEqual(v.is_symbol(), True)
self.assertEqual(v.value(), 'symbol')
v = mlisp.prim_type('type', [mlisp.VFunction(['a', 'b'], mlisp.Symbol('a'), mlisp.Environment())])
self.assertEqual(v.is_symbol(), True)
self.assertEqual(v.value(), 'function')
def test_prim_plus(self):
v = mlisp.prim_plus('+', [])
self.assertEqual(v.is_number(), True)
self.assertEqual(v.value(), 0)
v = mlisp.prim_plus('+', [mlisp.VNumber(42)])
self.assertEqual(v.is_number(), True)
self.assertEqual(v.value(), 42)
v = mlisp.prim_plus('+', [mlisp.VNumber(42), mlisp.VNumber(84)])
self.assertEqual(v.is_number(), True)
self.assertEqual(v.value(), 42 + 84)
v = mlisp.prim_plus('+', [mlisp.VNumber(42), mlisp.VNumber(84), mlisp.VNumber(168)])
self.assertEqual(v.is_number(), True)
self.assertEqual(v.value(), 42 + 84 + 168)
def test_prim_times(self):
v = mlisp.prim_times('*', [])
self.assertEqual(v.is_number(), True)
self.assertEqual(v.value(), 1)
v = mlisp.prim_times('*', [mlisp.VNumber(42)])
self.assertEqual(v.is_number(), True)
self.assertEqual(v.value(), 42)
v = mlisp.prim_times('*', [mlisp.VNumber(42), mlisp.VNumber(84)])
self.assertEqual(v.is_number(), True)
self.assertEqual(v.value(), 42 * 84)
v = mlisp.prim_times('*', [mlisp.VNumber(42), mlisp.VNumber(84), mlisp.VNumber(168)])
self.assertEqual(v.is_number(), True)
self.assertEqual(v.value(), 42 * 84 * 168)
def test_prim_minus(self):
v = mlisp.prim_minus('-', [mlisp.VNumber(42)])
self.assertEqual(v.is_number(), True)
self.assertEqual(v.value(), -42)
v = mlisp.prim_minus('-', [mlisp.VNumber(42), mlisp.VNumber(84)])
self.assertEqual(v.is_number(), True)
self.assertEqual(v.value(), 42 - 84)
v = mlisp.prim_minus('-', [mlisp.VNumber(42), mlisp.VNumber(84), mlisp.VNumber(168)])
self.assertEqual(v.is_number(), True)
self.assertEqual(v.value(), 42 - 84 - 168)
def test_prim_numless(self):
v = mlisp.prim_numless('<', [mlisp.VNumber(0), mlisp.VNumber(42)])
self.assertEqual(v.is_boolean(), True)
self.assertEqual(v.value(), True)
v = mlisp.prim_numless('<', [mlisp.VNumber(42), mlisp.VNumber(0)])
self.assertEqual(v.is_boolean(), True)
self.assertEqual(v.value(), False)
v = mlisp.prim_numless('<', [mlisp.VNumber(0), mlisp.VNumber(0)])
self.assertEqual(v.is_boolean(), True)
self.assertEqual(v.value(), False)
v = mlisp.prim_numless('<', [mlisp.VNumber(42), mlisp.VNumber(42)])
self.assertEqual(v.is_boolean(), True)
self.assertEqual(v.value(), False)
def test_prim_numlesseq(self):
v = mlisp.prim_numlesseq('<=', [mlisp.VNumber(0), mlisp.VNumber(42)])
self.assertEqual(v.is_boolean(), True)
self.assertEqual(v.value(), True)
v = mlisp.prim_numlesseq('<=', [mlisp.VNumber(42), mlisp.VNumber(0)])
self.assertEqual(v.is_boolean(), True)
self.assertEqual(v.value(), False)
v = mlisp.prim_numlesseq('<=', [mlisp.VNumber(0), mlisp.VNumber(0)])
self.assertEqual(v.is_boolean(), True)
self.assertEqual(v.value(), True)
v = mlisp.prim_numlesseq('<=', [mlisp.VNumber(42), mlisp.VNumber(42)])
self.assertEqual(v.is_boolean(), True)
self.assertEqual(v.value(), True)
def test_prim_numgreater(self):
v = mlisp.prim_numgreater('>', [mlisp.VNumber(0), mlisp.VNumber(42)])
self.assertEqual(v.is_boolean(), True)
self.assertEqual(v.value(), False)
v = mlisp.prim_numgreater('>', [mlisp.VNumber(42), mlisp.VNumber(0)])
self.assertEqual(v.is_boolean(), True)
self.assertEqual(v.value(), True)
v = mlisp.prim_numgreater('>', [mlisp.VNumber(0), mlisp.VNumber(0)])
self.assertEqual(v.is_boolean(), True)
self.assertEqual(v.value(), False)
v = mlisp.prim_numgreater('>', [mlisp.VNumber(42), mlisp.VNumber(42)])
self.assertEqual(v.is_boolean(), True)
self.assertEqual(v.value(), False)
def test_prim_numgreatereq(self):
v = mlisp.prim_numgreatereq('>=', [mlisp.VNumber(0), mlisp.VNumber(42)])
self.assertEqual(v.is_boolean(), True)
self.assertEqual(v.value(), False)
v = mlisp.prim_numgreatereq('>=', [mlisp.VNumber(42), mlisp.VNumber(0)])
self.assertEqual(v.is_boolean(), True)
self.assertEqual(v.value(), True)
v = mlisp.prim_numgreatereq('>=', [mlisp.VNumber(0), mlisp.VNumber(0)])
self.assertEqual(v.is_boolean(), True)
self.assertEqual(v.value(), True)
v = mlisp.prim_numgreatereq('>=', [mlisp.VNumber(42), mlisp.VNumber(42)])
self.assertEqual(v.is_boolean(), True)
self.assertEqual(v.value(), True)
def test_prim_not(self):
v = mlisp.prim_not('not', [mlisp.VBoolean(True)])
self.assertEqual(v.is_boolean(), True)
self.assertEqual(v.value(), False)
v = mlisp.prim_not('not', [mlisp.VBoolean(False)])
self.assertEqual(v.is_boolean(), True)
self.assertEqual(v.value(), True)
v = mlisp.prim_not('not', [mlisp.VNumber(0)])
self.assertEqual(v.is_boolean(), True)
self.assertEqual(v.value(), True)
v = mlisp.prim_not('not', [mlisp.VNumber(42)])
self.assertEqual(v.is_boolean(), True)
self.assertEqual(v.value(), False)
v = mlisp.prim_not('not', [mlisp.VString('')])
self.assertEqual(v.is_boolean(), True)
self.assertEqual(v.value(), True)
v = mlisp.prim_not('not', [mlisp.VString('Alice')])
self.assertEqual(v.is_boolean(), True)
self.assertEqual(v.value(), False)
v = mlisp.prim_not('not', [mlisp.VEmpty()])
self.assertEqual(v.is_boolean(), True)
self.assertEqual(v.value(), True)
v = mlisp.prim_not('not', [mlisp.VCons(mlisp.VNumber(42), mlisp.VEmpty())])
self.assertEqual(v.is_boolean(), True)
self.assertEqual(v.value(), False)
def test_prim_string_append(self):
v = mlisp.prim_string_append('string-append', [])
self.assertEqual(v.is_string(), True)
self.assertEqual(v.value(), '')
v = mlisp.prim_string_append('string-append', [mlisp.VString('Alice')])
self.assertEqual(v.is_string(), True)
self.assertEqual(v.value(), 'Alice')
v = mlisp.prim_string_append('string-append', [mlisp.VString('Alice'), mlisp.VString('Bob')])
self.assertEqual(v.is_string(), True)
self.assertEqual(v.value(), 'AliceBob')
v = mlisp.prim_string_append('string-append', [mlisp.VString('Alice'), mlisp.VString('Bob'), mlisp.VString('Charlie')])
self.assertEqual(v.is_string(), True)
self.assertEqual(v.value(), 'AliceBobCharlie')
def test_prim_string_length(self):
v = mlisp.prim_string_length('string-length', [mlisp.VString('')])
self.assertEqual(v.is_number(), True)
self.assertEqual(v.value(), 0)
v = mlisp.prim_string_length('string-length', [mlisp.VString('Alice')])
self.assertEqual(v.is_number(), True)
self.assertEqual(v.value(), 5)
v = mlisp.prim_string_length('string-length', [mlisp.VString('Alice Bob')])
self.assertEqual(v.is_number(), True)
self.assertEqual(v.value(), 9)
def test_prim_string_lower(self):
v = mlisp.prim_string_lower('string-lower', [mlisp.VString('')])
self.assertEqual(v.is_string(), True)
self.assertEqual(v.value(), '')
v = mlisp.prim_string_lower('string-lower', [mlisp.VString('Alice')])
self.assertEqual(v.is_string(), True)
self.assertEqual(v.value(), 'alice')
v = mlisp.prim_string_lower('string-lower', [mlisp.VString('<NAME>')])
self.assertEqual(v.is_string(), True)
self.assertEqual(v.value(), '<NAME>')
def test_prim_string_upper(self):
v = mlisp.prim_string_upper('string-upper', [mlisp.VString('')])
self.assertEqual(v.is_string(), True)
self.assertEqual(v.value(), '')
v = mlisp.prim_string_upper('string-upper', [mlisp.VString('Alice')])
self.assertEqual(v.is_string(), True)
self.assertEqual(v.value(), 'ALICE')
v = mlisp.prim_string_upper('string-upper', [mlisp.VString('<NAME>')])
self.assertEqual(v.is_string(), True)
self.assertEqual(v.value(), '<NAME>')
def test_prim_string_substring(self):
v = mlisp.prim_string_substring('string-substring', [mlisp.VString('')])
self.assertEqual(v.is_string(), True)
self.assertEqual(v.value(), '')
v = mlisp.prim_string_substring('string-substring', [mlisp.VString('Alice')])
self.assertEqual(v.is_string(), True)
self.assertEqual(v.value(), 'Alice')
v = mlisp.prim_string_substring('string-substring', [mlisp.VString('Alice'), mlisp.VNumber(0)])
self.assertEqual(v.is_string(), True)
self.assertEqual(v.value(), 'Alice')
v = mlisp.prim_string_substring('string-substring', [mlisp.VString('Alice'), mlisp.VNumber(1)])
self.assertEqual(v.is_string(), True)
self.assertEqual(v.value(), 'lice')
v = mlisp.prim_string_substring('string-substring', [mlisp.VString('Alice'), mlisp.VNumber(2)])
self.assertEqual(v.is_string(), True)
self.assertEqual(v.value(), 'ice')
v = mlisp.prim_string_substring('string-substring', [mlisp.VString('Alice'), mlisp.VNumber(0), mlisp.VNumber(5)])
self.assertEqual(v.is_string(), True)
self.assertEqual(v.value(), 'Alice')
v = mlisp.prim_string_substring('string-substring', [mlisp.VString('Alice'), mlisp.VNumber(0), mlisp.VNumber(3)])
self.assertEqual(v.is_string(), True)
self.assertEqual(v.value(), 'Ali')
v = mlisp.prim_string_substring('string-substring', [mlisp.VString('Alice'), mlisp.VNumber(2), mlisp.VNumber(3)])
self.assertEqual(v.is_string(), True)
self.assertEqual(v.value(), 'i')
v = mlisp.prim_string_substring('string-substring', [mlisp.VString('Alice'), mlisp.VNumber(0), mlisp.VNumber(0)])
self.assertEqual(v.is_string(), True)
self.assertEqual(v.value(), '')
v = mlisp.prim_string_substring('string-substring', [mlisp.VString('Alice'), mlisp.VNumber(3), mlisp.VNumber(3)])
self.assertEqual(v.is_string(), True)
self.assertEqual(v.value(), '')
def test_prim_apply(self):
def prim(name, args):
return (args[0], args[1])
v = mlisp.prim_apply('apply', [mlisp.VPrimitive('test', prim, 2, 2),
_make_list([mlisp.VNumber(42), mlisp.VString('Alice')])])
self.assertEqual(v[0].is_number(), True)
self.assertEqual(v[0].value(), 42)
self.assertEqual(v[1].is_string(), True)
self.assertEqual(v[1].value(), 'Alice')
v = mlisp.prim_apply('apply', [mlisp.VFunction(['a', 'b'], mlisp.Symbol('a'), mlisp.Environment()),
_make_list([mlisp.VNumber(42), mlisp.VString('Alice')])])
self.assertEqual(v.is_number(), True)
self.assertEqual(v.value(), 42)
def test_prim_cons(self):
v = mlisp.prim_cons('cons', [mlisp.VNumber(42), mlisp.VEmpty()])
l = _unmake_list(v)
self.assertEqual(len(l), 1)
self.assertEqual(l[0].is_number(), True)
self.assertEqual(l[0].value(), 42)
v = mlisp.prim_cons('cons', [mlisp.VNumber(42), _make_list([mlisp.VString('Alice'), mlisp.VString('Bob')])])
l = _unmake_list(v)
self.assertEqual(len(l), 3)
self.assertEqual(l[0].is_number(), True)
self.assertEqual(l[0].value(), 42)
self.assertEqual(l[1].is_string(), True)
self.assertEqual(l[1].value(), 'Alice')
self.assertEqual(l[2].is_string(), True)
self.assertEqual(l[2].value(), 'Bob')
def test_prim_append(self):
v = mlisp.prim_append('append', [])
l = _unmake_list(v)
self.assertEqual(len(l), 0)
v = mlisp.prim_append('append', [_make_list([mlisp.VNumber(1), mlisp.VNumber(2)])])
l = _unmake_list(v)
self.assertEqual(len(l), 2)
self.assertEqual(l[0].is_number(), True)
self.assertEqual(l[0].value(), 1)
self.assertEqual(l[1].is_number(), True)
self.assertEqual(l[1].value(), 2)
v = mlisp.prim_append('append', [_make_list([mlisp.VNumber(1), mlisp.VNumber(2)]),
_make_list([mlisp.VNumber(3), mlisp.VNumber(4)])])
l = _unmake_list(v)
self.assertEqual(len(l), 4)
self.assertEqual(l[0].is_number(), True)
self.assertEqual(l[0].value(), 1)
self.assertEqual(l[1].is_number(), True)
self.assertEqual(l[1].value(), 2)
self.assertEqual(l[2].is_number(), True)
self.assertEqual(l[2].value(), 3)
self.assertEqual(l[3].is_number(), True)
self.assertEqual(l[3].value(), 4)
v = mlisp.prim_append('append', [_make_list([mlisp.VNumber(1), mlisp.VNumber(2)]),
_make_list([mlisp.VNumber(3), mlisp.VNumber(4)]),
_make_list([mlisp.VNumber(5), mlisp.VNumber(6)])])
l = _unmake_list(v)
self.assertEqual(len(l), 6)
self.assertEqual(l[0].is_number(), True)
self.assertEqual(l[0].value(), 1)
self.assertEqual(l[1].is_number(), True)
self.assertEqual(l[1].value(), 2)
self.assertEqual(l[2].is_number(), True)
self.assertEqual(l[2].value(), 3)
self.assertEqual(l[3].is_number(), True)
self.assertEqual(l[3].value(), 4)
self.assertEqual(l[4].is_number(), True)
self.assertEqual(l[4].value(), 5)
self.assertEqual(l[5].is_number(), True)
self.assertEqual(l[5].value(), 6)
def test_prim_reverse(self):
v = mlisp.prim_reverse('reverse', [_make_list([mlisp.VNumber(1),
mlisp.VNumber(2),
mlisp.VNumber(3),
mlisp.VNumber(4)])])
l = _unmake_list(v)
self.assertEqual(len(l), 4)
self.assertEqual(l[0].is_number(), True)
self.assertEqual(l[0].value(), 4)
self.assertEqual(l[1].is_number(), True)
self.assertEqual(l[1].value(), 3)
self.assertEqual(l[2].is_number(), True)
self.assertEqual(l[2].value(), 2)
self.assertEqual(l[3].is_number(), True)
self.assertEqual(l[3].value(), 1)
def test_prim_first(self):
v = mlisp.prim_first('first', [_make_list([mlisp.VNumber(42)])])
self.assertEqual(v.is_number(), True)
self.assertEqual(v.value(), 42)
v = mlisp.prim_first('first', [_make_list([mlisp.VNumber(42),
mlisp.VString('Alice'),
mlisp.VString('Bob')])])
self.assertEqual(v.is_number(), True)
self.assertEqual(v.value(), 42)
def test_prim_rest(self):
v = mlisp.prim_rest('rest', [_make_list([mlisp.VNumber(42)])])
l = _unmake_list(v)
self.assertEqual(len(l), 0)
v = mlisp.prim_rest('rest', [_make_list([mlisp.VNumber(42),
mlisp.VString('Alice'),
mlisp.VString('Bob')])])
l = _unmake_list(v)
self.assertEqual(len(l), 2)
self.assertEqual(l[0].is_string(), True)
self.assertEqual(l[0].value(), 'Alice')
self.assertEqual(l[1].is_string(), True)
self.assertEqual(l[1].value(), 'Bob')
def test_prim_list(self):
v = mlisp.prim_list('list', [])
l = _unmake_list(v)
self.assertEqual(len(l), 0)
v = mlisp.prim_list('list', [mlisp.VNumber(42)])
l = _unmake_list(v)
self.assertEqual(len(l), 1)
self.assertEqual(l[0].is_number(), True)
self.assertEqual(l[0].value(), 42)
v = mlisp.prim_list('list', [mlisp.VNumber(42),
mlisp.VString('Alice')])
l = _unmake_list(v)
self.assertEqual(len(l), 2)
self.assertEqual(l[0].is_number(), True)
self.assertEqual(l[0].value(), 42)
self.assertEqual(l[1].is_string(), True)
self.assertEqual(l[1].value(), 'Alice')
v = mlisp.prim_list('list', [mlisp.VNumber(42),
mlisp.VString('Alice'),
mlisp.VString('Bob')])
l = _unmake_list(v)
self.assertEqual(len(l), 3)
self.assertEqual(l[0].is_number(), True)
self.assertEqual(l[0].value(), 42)
self.assertEqual(l[1].is_string(), True)
self.assertEqual(l[1].value(), 'Alice')
self.assertEqual(l[2].is_string(), True)
self.assertEqual(l[2].value(), 'Bob')
def test_prim_length(self):
v = mlisp.prim_length('length', [_make_list([])])
self.assertEqual(v.is_number(), True)
self.assertEqual(v.value(), 0)
v = mlisp.prim_length('length', [_make_list([mlisp.VNumber(42)])])
self.assertEqual(v.is_number(), True)
self.assertEqual(v.value(), 1)
v = mlisp.prim_length('length', [_make_list([mlisp.VNumber(42),
mlisp.VString('Alice')])])
self.assertEqual(v.is_number(), True)
self.assertEqual(v.value(), 2)
v = mlisp.prim_length('length', [_make_list([mlisp.VNumber(42),
mlisp.VString('Alice'),
mlisp.VString('Bob')])])
self.assertEqual(v.is_number(), True)
self.assertEqual(v.value(), 3)
def test_prim_nth(self):
v = mlisp.prim_nth('nth', [_make_list([mlisp.VNumber(42),
mlisp.VString('Alice'),
mlisp.VString('Bob')]),
mlisp.VNumber(0)])
self.assertEqual(v.is_number(), True)
self.assertEqual(v.value(), 42)
v = mlisp.prim_nth('nth', [_make_list([mlisp.VNumber(42),
mlisp.VString('Alice'),
mlisp.VString('Bob')]),
mlisp.VNumber(1)])
self.assertEqual(v.is_string(), True)
self.assertEqual(v.value(), 'Alice')
v = mlisp.prim_nth('nth', [_make_list([mlisp.VNumber(42),
mlisp.VString('Alice'),
mlisp.VString('Bob')]),
mlisp.VNumber(2)])
self.assertEqual(v.is_string(), True)
self.assertEqual(v.value(), 'Bob')
def test_prim_map(self):
def prim1(name, args):
return args[0]
def prim2(name, args):
return args[1]
v = mlisp.prim_map('map', [mlisp.VPrimitive('test', prim1, 1),
_make_list([])])
l = _unmake_list(v)
self.assertEqual(len(l), 0)
v = mlisp.prim_map('map', [mlisp.VPrimitive('test', prim1, 1),
_make_list([mlisp.VNumber(42),
mlisp.VString('Alice'),
mlisp.VString('Bob')])])
l = _unmake_list(v)
self.assertEqual(len(l), 3)
self.assertEqual(l[0].is_number(), True)
self.assertEqual(l[0].value(), 42)
self.assertEqual(l[1].is_string(), True)
self.assertEqual(l[1].value(), 'Alice')
self.assertEqual(l[2].is_string(), True)
self.assertEqual(l[2].value(), 'Bob')
v = mlisp.prim_map('map', [mlisp.VPrimitive('test', prim2, 2),
_make_list([]),
_make_list([])])
l = _unmake_list(v)
self.assertEqual(len(l), 0)
v = mlisp.prim_map('map', [mlisp.VPrimitive('test', prim2, 2),
_make_list([]),
_make_list([mlisp.VNumber(42)])])
l = _unmake_list(v)
self.assertEqual(len(l), 0)
v = mlisp.prim_map('map', [mlisp.VPrimitive('test', prim2, 2),
_make_list([mlisp.VNumber(42),
mlisp.VString('Alice'),
mlisp.VString('Bob')]),
_make_list([mlisp.VNumber(84),
mlisp.VString('Charlie'),
mlisp.VString('Darlene')])])
l = _unmake_list(v)
self.assertEqual(len(l), 3)
self.assertEqual(l[0].is_number(), True)
self.assertEqual(l[0].value(), 84)
self.assertEqual(l[1].is_string(), True)
self.assertEqual(l[1].value(), 'Charlie')
self.assertEqual(l[2].is_string(), True)
self.assertEqual(l[2].value(), 'Darlene')
def test_prim_filter(self):
def prim_none(name, args):
return mlisp.VBoolean(False)
def prim_int(name, args):
return mlisp.VBoolean(args[0].is_number())
v = mlisp.prim_filter('filter', [mlisp.VPrimitive('test', prim_none, 1),
_make_list([])])
l = _unmake_list(v)
self.assertEqual(len(l), 0)
v = mlisp.prim_filter('filter', [mlisp.VPrimitive('test', prim_none, 1),
_make_list([mlisp.VNumber(42),
mlisp.VString('Alice'),
mlisp.VString('Bob')])])
l = _unmake_list(v)
self.assertEqual(len(l), 0)
v = mlisp.prim_filter('filter', [mlisp.VPrimitive('test', prim_int, 1),
_make_list([mlisp.VNumber(42),
mlisp.VString('Alice'),
mlisp.VString('Bob')])])
l = _unmake_list(v)
self.assertEqual(len(l), 1)
self.assertEqual(l[0].is_number(), True)
self.assertEqual(l[0].value(), 42)
def test_prim_foldr(self):
def prim(name, args):
return mlisp.VString(args[0].value() + '(' + args[1].value() + ')')
v = mlisp.prim_foldr('foldr', [mlisp.VPrimitive('test', prim, 2),
_make_list([]),
mlisp.VString('base')])
self.assertEqual(v.is_string(), True)
self.assertEqual(v.value(), 'base')
v = mlisp.prim_foldr('foldr', [mlisp.VPrimitive('test', prim, 2),
_make_list([mlisp.VString('Alice'),
mlisp.VString('Bob'),
mlisp.VString('Charlie')]),
mlisp.VString('base')])
self.assertEqual(v.is_string(), True)
self.assertEqual(v.value(), 'Alice(Bob(Charlie(base)))')
def test_prim_foldl(self):
def prim(name, args):
return mlisp.VString('(' + args[0].value() + ')' + args[1].value())
v = mlisp.prim_foldl('foldl', [mlisp.VPrimitive('test', prim, 2),
mlisp.VString('base'),
_make_list([])])
self.assertEqual(v.is_string(), True)
self.assertEqual(v.value(), 'base')
v = mlisp.prim_foldl('foldl', [mlisp.VPrimitive('test', prim, 2),
mlisp.VString('base'),
_make_list([mlisp.VString('Alice'),
mlisp.VString('Bob'),
mlisp.VString('Charlie')])])
self.assertEqual(v.is_string(), True)
self.assertEqual(v.value(), '(((base)Alice)Bob)Charlie')
def test_prim_eqlp(self):
v = mlisp.prim_equalp('=', [mlisp.VNumber(42),
mlisp.VNumber(42)])
self.assertEqual(v.is_boolean(), True)
self.assertEqual(v.value(), True)
v = mlisp.prim_equalp('=', [mlisp.VNumber(42),
mlisp.VNumber(0)])
self.assertEqual(v.is_boolean(), True)
self.assertEqual(v.value(), False)
lst = _make_list([mlisp.VNumber(42)])
v = mlisp.prim_equalp('=', [lst, lst])
self.assertEqual(v.is_boolean(), True)
self.assertEqual(v.value(), True)
v = mlisp.prim_equalp('=', [lst, _make_list([mlisp.VNumber(42)])])
self.assertEqual(v.is_boolean(), True)
self.assertEqual(v.value(), True)
v = mlisp.prim_equalp('=', [lst, mlisp.VNumber(42)])
self.assertEqual(v.is_boolean(), True)
self.assertEqual(v.value(), False)
v = mlisp.prim_equalp('=', | |
_cl_map_1d(self, function, mat1, out=None):
"""Call the map_1d kernel."""
if out is None:
out = mat1
out, mat1 = self._consistify_args(1, out, mat1)
kvw = self._optimal_vector_width(
out.size, self.safe_cast_non_logical(out.dtype, mat1.dtype))
k_getter = lambda e: self.get_program(
'map_1d', MAP_FUNCTION=function,
DTYPE_OUT=out.dtype, DTYPE_IN=mat1.dtype, DTYPE_M1=mat1.dtype,
OFFSET_M1=bool(mat1.begin != 0), OFFSET_OUT=bool(out.begin != 0),
EXACT=bool(e), VECTOR_WIDTH=kvw).map_1d
kernel = k_getter(True)
call_size = _size_t(out.size/kvw)
lws = self._cl_elementwise_local_size(kernel, 1)
gws, lws = self._cl_elementwise_global_size([call_size], lws)
if gws[0] != call_size:
kernel = k_getter(False)
kernel(self.queue, gws, lws, call_size,
out.buffer, out.begin, mat1.buffer, mat1.begin)
self.queue.finish()
def _cl_map(self, function, mat1, out=None):
"""Call the map kernel"""
if out is None:
out = mat1
out, mat1 = self._consistify_args(2, out, mat1)
reverse_ws = out.order == 'C'
k_getter = lambda e: self.get_program(
'map', MAP_FUNCTION=function, REVERSE_WS=reverse_ws,
DTYPE_OUT=out.dtype, DTYPE_IN=mat1.dtype, DTYPE_M1=mat1.dtype,
EXACT=bool(e)).map
kernel = k_getter(True)
lws = self._cl_elementwise_local_size(kernel, 2)
gws, lws = self._cl_elementwise_global_size(out.shape, lws)
if tuple(gws) != out.shape:
kernel = k_getter(False)
if reverse_ws:
lws = [lws[1], lws[0]]
gws = [gws[1], gws[0]]
kernel(self.queue, gws, lws, out.shape0, out.shape1,
out.buffer, out.ptr_stride0, out.ptr_stride1, out.begin,
mat1.buffer, mat1.ptr_stride0, mat1.ptr_stride1, mat1.begin)
self.queue.finish()
def _cl_op_logical_1d(self, function, out, mat1, mat2):
"""Call the op_logical_1d kernel"""
m1_is_matrix = isinstance(mat1, Mat)
m2_is_matrix = isinstance(mat2, Mat)
out, mat1, mat2 = self._consistify_args(1, out, mat1, mat2)
kvw = 1
k_getter = lambda e: self.get_program(
'op_1d', OPERATOR=function, LOGICAL=True, VECTOR_WIDTH=kvw,
DTYPE_OUT=out.dtype,
DTYPE_M1=mat1.dtype, DTYPE_M2=mat2.dtype,
SCALAR_M1=not m1_is_matrix, SCALAR_M2=not m2_is_matrix,
OFFSET_OUT=bool(out.begin != 0),
OFFSET_M1=bool(m1_is_matrix and mat1.begin != 0),
OFFSET_M2=bool(m2_is_matrix and mat2.begin != 0),
EXACT=bool(e)).op_1d
kernel = k_getter(True)
call_size = _size_t(out.size/kvw)
lws = self._cl_elementwise_local_size(kernel, 1)
gws, lws = self._cl_elementwise_global_size([call_size], lws)
if gws[0] != call_size:
kernel = k_getter(False)
args = [out.buffer, out.begin]
if m1_is_matrix:
args += [mat1.buffer, mat1.begin]
else:
args += [mat1, ZERO]
if m2_is_matrix:
args += [mat2.buffer, mat2.begin]
else:
args += [mat2, ZERO]
kernel(self.queue, gws, lws, call_size, *args)
self.queue.finish()
def _cl_op_logical(self, function, out, mat1, mat2):
"""Call the op_logical kernel"""
out, mat1, mat2 = self._consistify_args(2, out, mat1, mat2)
m1_is_matrix = isinstance(mat1, Mat)
m2_is_matrix = isinstance(mat2, Mat)
reverse_ws = out.order == 'C'
k_getter = lambda e: self.get_program(
'op', OPERATOR=function, LOGICAL=True,
DTYPE_OUT=out.dtype,
DTYPE_M1=mat1.dtype, DTYPE_M2=mat2.dtype,
SCALAR_M1=not m1_is_matrix, SCALAR_M2=not m2_is_matrix,
REVERSE_WS=reverse_ws, EXACT=bool(e)).op
kernel = k_getter(True)
lws = self._cl_elementwise_local_size(kernel, 2)
gws, lws = self._cl_elementwise_global_size(out.shape, lws)
if tuple(gws) != out.shape:
kernel = k_getter(False)
if reverse_ws:
lws = [lws[1], lws[0]]
gws = [gws[1], gws[0]]
args = [out.buffer, out.ptr_stride0, out.ptr_stride1, out.begin]
if m1_is_matrix:
args += [mat1.buffer, mat1.ptr_stride0, mat1.ptr_stride1,
mat1.begin]
else:
args += [mat1, ZERO, ZERO, ZERO]
if m2_is_matrix:
args += [mat2.buffer, mat2.ptr_stride0, mat2.ptr_stride1,
mat2.begin]
else:
args += [mat2, ZERO, ZERO, ZERO]
kernel(self.queue, gws, lws, out.shape0, out.shape1, *args)
self.queue.finish()
def _cl_op_1d(self, function, out, mat1, mat2):
"""Call the op_1d kernel"""
m1_is_matrix = isinstance(mat1, Mat)
m2_is_matrix = isinstance(mat2, Mat)
out, mat1, mat2 = self._consistify_args(1, out, mat1, mat2)
kvw = self._optimal_vector_width(
out.size, self.safe_cast_non_logical(mat1.dtype, mat2.dtype,
operator=function))
dtype_in = self.safe_cast_non_logical(mat1.dtype, mat2.dtype,
operator=function)
k_getter = lambda e: self.get_program(
'op_1d', OPERATOR=function, EXACT=bool(e), VECTOR_WIDTH=kvw,
DTYPE_OUT=out.dtype, DTYPE_IN=dtype_in,
DTYPE_M1=mat1.dtype, DTYPE_M2=mat2.dtype,
SCALAR_M1=not m1_is_matrix, SCALAR_M2=not m2_is_matrix,
OFFSET_OUT=bool(out.begin != 0),
OFFSET_M1=bool(m1_is_matrix and mat1.begin != 0),
OFFSET_M2=bool(m2_is_matrix and mat2.begin != 0)).op_1d
kernel = k_getter(True)
call_size = _size_t(out.size/kvw)
lws = self._cl_elementwise_local_size(kernel, 1)
gws, lws = self._cl_elementwise_global_size([call_size], lws)
if gws[0] != call_size:
kernel = k_getter(False)
args = [out.buffer, out.begin]
if m1_is_matrix:
args += [mat1.buffer, mat1.begin]
else:
args += [mat1, ZERO]
if m2_is_matrix:
args += [mat2.buffer, mat2.begin]
else:
args += [mat2, ZERO]
kernel(self.queue, gws, lws, call_size, *args)
self.queue.finish()
def _cl_op(self, function, out, mat1, mat2):
"""Call the op kernel"""
out, mat1, mat2 = self._consistify_args(2, out, mat1, mat2)
m1_is_matrix = isinstance(mat1, Mat)
m2_is_matrix = isinstance(mat2, Mat)
dtype_in = self.safe_cast_non_logical(mat1.dtype, mat2.dtype,
operator=function)
reverse_ws = out.order == 'C'
k_getter = lambda e: self.get_program(
'op', OPERATOR=function,
DTYPE_OUT=out.dtype, DTYPE_IN=dtype_in,
DTYPE_M1=mat1.dtype, DTYPE_M2=mat2.dtype,
SCALAR_M1=not m1_is_matrix, SCALAR_M2=not m2_is_matrix,
REVERSE_WS=reverse_ws, EXACT=bool(e)).op
kernel = k_getter(True)
lws = self._cl_elementwise_local_size(kernel, 2)
gws, lws = self._cl_elementwise_global_size(out.shape, lws)
if tuple(gws) != out.shape:
kernel = k_getter(False)
if reverse_ws:
lws = [lws[1], lws[0]]
gws = [gws[1], gws[0]]
args = [out.buffer, out.ptr_stride0, out.ptr_stride1, out.begin]
if m1_is_matrix:
args += [mat1.buffer, mat1.ptr_stride0, mat1.ptr_stride1,
mat1.begin]
else:
args += [mat1, ZERO, ZERO, ZERO]
if m2_is_matrix:
args += [mat2.buffer, mat2.ptr_stride0, mat2.ptr_stride1,
mat2.begin]
else:
args += [mat2, ZERO, ZERO, ZERO]
kernel(self.queue, gws, lws, out.shape0, out.shape1, *args)
self.queue.finish()
def _cl_iop_1d(self, function, out, mat1):
"""Call the iop_1d kernel"""
if out.computer is not self:
raise ValueError('out is not using this computer.')
out, mat1 = self._consistify_args(1, out, mat1)
m1_is_matrix = isinstance(mat1, Mat)
kvw = self._optimal_vector_width(
out.size, self.safe_cast_non_logical(out.dtype, mat1.dtype))
k_getter = lambda e: self.get_program(
'iop_1d', OPERATOR=function, EXACT=bool(e), VECTOR_WIDTH=kvw,
DTYPE_OUT=out.dtype, DTYPE_M1=mat1.dtype,
OFFSET_OUT=bool(out.begin != 0),
OFFSET_M1=bool(m1_is_matrix and mat1.begin != 0),
SCALAR_M1=not m1_is_matrix).iop_1d
kernel = k_getter(True)
call_size = _size_t(out.size/kvw)
lws = self._cl_elementwise_local_size(kernel, 1)
gws, lws = self._cl_elementwise_global_size([call_size], lws)
if gws[0] != call_size:
kernel = k_getter(False)
args = [out.buffer, out.begin]
if m1_is_matrix:
args += [mat1.buffer, mat1.begin]
else:
args += [mat1, ZERO]
kernel(self.queue, gws, lws, call_size, *args)
self.queue.finish()
def _cl_iop(self, function, out, mat1):
"""Call the iop kernel"""
out, mat1 = self._consistify_args(2, out, mat1)
m1_is_matrix = isinstance(mat1, Mat)
reverse_ws = out.order == 'C'
k_getter = lambda e: self.get_program(
'iop', OPERATOR=function, DTYPE_OUT=out.dtype, DTYPE_M1=mat1.dtype,
SCALAR_M1=not m1_is_matrix, REVERSE_WS=reverse_ws,
EXACT=bool(e)).iop
kernel = k_getter(True)
lws = self._cl_elementwise_local_size(kernel, 2)
gws, lws = self._cl_elementwise_global_size(out.shape, lws)
if tuple(gws) != out.shape:
kernel = k_getter(False)
if reverse_ws:
lws = [lws[1], lws[0]]
gws = [gws[1], gws[0]]
args = [out.buffer, out.ptr_stride0, out.ptr_stride1, out.begin]
if isinstance(mat1, Mat):
args += [mat1.buffer, mat1.ptr_stride0, mat1.ptr_stride1,
mat1.begin]
else:
args += [mat1, ZERO, ZERO, ZERO]
kernel(self.queue, gws, lws, out.shape0, out.shape1, *args)
self.queue.finish()
def _cl_reduce_1d(self, function, mat1):
"""Call the reduce_1d kernel"""
mat1, = self._consistify_args(1, mat1)
out_dtype = BOOL_TYPE if function in ['any', 'all'] else mat1.dtype
kernel = self.get_program(
'reduce_1d', REDUCTION=REDUCTION_ENUM[function],
DTYPE_OUT=out_dtype, DTYPE_M1=mat1.dtype).reduce_1d
if False: # TODO function in ['min', 'max', 'sum', 'prod']:
num_blocks = _size_t(4*self.device.max_compute_units)
gws = [int(num_blocks)]
lws = [4]
partial_buffer = cl.Buffer(self.context, cl.mem_flags.READ_WRITE,
size=num_blocks*out_dtype.itemsize)
block_size = mat1.size//num_blocks
kernel(self.queue, gws, lws, ONE, partial_buffer, ZERO,
block_size, mat1.size, mat1.buffer, mat1.begin)
else:
num_blocks = mat1.size
partial_buffer = mat1.buffer
gws = [1]
lws = [1]
out_buffer = cl.Buffer(self.context, cl.mem_flags.READ_WRITE,
size=out_dtype.itemsize)
kernel(self.queue, gws, lws, ONE, out_buffer, ZERO,
num_blocks, num_blocks, partial_buffer, ZERO)
result = np.empty((1, ), dtype=out_dtype)
self._cl_buffer_copy(result, out_buffer)
self.queue.finish()
return result[0]
def _cl_reduce_2d(self, function, out, mat1, axis):
"""Call the reduce_2d kernel"""
out, mat1 = self._consistify_args(-1, out, mat1)
if out.shape[axis] != 1:
raise ValueError('The output axis dimension must be 1.')
if out.shape[1-axis] != mat1.shape[1-axis]:
raise ValueError('The output-input non-axis dimensions mismatch.')
kernel = self.get_program(
'reduce_2d', REDUCTION=REDUCTION_ENUM[function], AXIS=axis,
DTYPE_OUT=out.dtype, DTYPE_M1=mat1.dtype).reduce_2d
gws = list(map(int, out.shape))
lws = [1, 1]
if self.device.type == CPU:
pass
elif axis == 0 and mat1.c_contiguous:
lws[1] = self.preferred_work_group_size_multiple(kernel)
else:
lws[0] = self.device.max_work_group_size // \
self.preferred_work_group_size_multiple(kernel)
gws[0] = lws[0] * int(np.ceil(gws[0]/lws[0]))
gws[1] = lws[1] * int(np.ceil(gws[1]/lws[1]))
kernel(self.queue, gws, lws,
out.buffer, out.ptr_stride0, out.ptr_stride1, out.begin,
mat1.shape0, mat1.shape1,
mat1.buffer, mat1.ptr_stride0, mat1.ptr_stride1, mat1.begin)
self.queue.finish()
def _cl_f(self, function, out, *args):
"""Call the f kernel"""
if len(args) > 3:
raise TypeError('f only support up to 3 input arguments'
' (was given %d).' % len(args))
cons_args = self._consistify_args(2, out, *args)
gkargs = {}
dtype_in = cons_args[1].dtype
for args_i in range(1, len(cons_args)):
m_arg = cons_args[args_i]
gkargs['DTYPE_M%d' % args_i] = m_arg.dtype
if args_i != 1:
gkargs['M%d' % args_i] = True
gkargs['SCALAR_M%d' % args_i] = bool(not isinstance(m_arg, Mat))
if args_i > 1:
dtype_in = self.safe_cast_non_logical(dtype_in, m_arg.dtype)
reverse_ws = out.order == 'C'
k_getter = lambda e: self.get_program(
'f', EXACT=bool(e), MAP_FUNCTION=function, REVERSE_WS=reverse_ws,
DTYPE_OUT=out.dtype, DTYPE_IN=dtype_in,
**gkargs).f
kernel = k_getter(True)
lws = self._cl_elementwise_local_size(kernel, 2)
gws, lws = self._cl_elementwise_global_size(out.shape, lws)
if tuple(gws) != out.shape:
kernel = k_getter(False)
if reverse_ws:
lws = [lws[1], lws[0]]
gws = [gws[1], gws[0]]
kargs = []
for args_i in range(4): # Includes out
if args_i < len(cons_args):
m_arg = cons_args[args_i]
else:
m_arg = ZERO
if isinstance(m_arg, Mat):
kargs += [m_arg.buffer,
m_arg.ptr_stride0, m_arg.ptr_stride1, m_arg.begin]
else:
kargs += [m_arg, ZERO, ZERO, ZERO]
kernel(self.queue, gws, lws, out.shape0, out.shape1, *kargs)
self.queue.finish()
def _cl_mmult(self, out, mat1, mat2):
"""Call the mmult kernel"""
out, mat1, mat2 = self._consistify_args(-1, out, mat1, mat2)
if mat1.shape1 != mat2.shape0:
raise ValueError('m1 and m2 have inconsistent dimensions: %s %s' %
(mat1.shape, mat2.shape))
if out.shape0 != mat1.shape0:
raise ValueError('out and m1 have inconsistent dimensions: %s %s' %
(out.shape, mat1.shape))
if out.shape1 != mat2.shape1:
raise ValueError('out and m2 have inconsistent dimensions: %s %s' %
(out.shape, mat2.shape))
block_size = self._mmult_preferred_block_size
m1_mod = mat1.shape0 % block_size
m2_mod = mat2.shape1 % | |
# Copyright (c) 2016, Science and Technology Facilities Council
# This software is distributed under a BSD licence. See LICENSE.txt.
"""
mrcobject
---------
Module which exports the :class:`MrcObject` class.
Classes:
:class:`MrcObject`: An object representing image or volume data in the MRC
format.
"""
# Import Python 3 features for future-proofing
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from datetime import datetime
import numpy as np
from . import utils
from .dtypes import HEADER_DTYPE, VOXEL_SIZE_DTYPE
from .constants import (MAP_ID, MRC_FORMAT_VERSION, IMAGE_STACK_SPACEGROUP,
VOLUME_SPACEGROUP, VOLUME_STACK_SPACEGROUP)
class MrcObject(object):
"""An object representing image or volume data in the MRC format.
The header, extended header and data are stored as numpy arrays and
exposed as read-only attributes. To replace the data or extended header,
call :meth:`set_data` or :meth:`set_extended_header`. The header cannot be
replaced but can be modified in place.
Voxel size is exposed as a writeable attribute, but is calculated
on-the-fly from the header's ``cella`` and ``mx``/``my``/``mz`` fields.
Three-dimensional data can represent either a stack of 2D images, or a 3D
volume. This is indicated by the header's ``ispg`` (space group) field,
which is set to 0 for image data and >= 1 for volume data. The
:meth:`is_single_image`, :meth:`is_image_stack`, :meth:`is_volume` and
:meth:`is_volume_stack` methods can be used to identify the type of
information stored in the data array. For 3D data, the
:meth:`set_image_stack` and :meth:`set_volume` methods can be used to
switch between image stack and volume interpretations of the data.
If the data contents have been changed, you can use the
:meth:`update_header_from_data` and :meth:`update_header_stats` methods to
make the header consistent with the data. These methods are called
automatically if the data array is replaced by calling :meth:`set_data`.
:meth:`update_header_from_data` is fast, even with very large data arrays,
because it only examines the shape and type of the data array.
:meth:`update_header_stats` calculates statistics from all items in the
data array and so can be slow for very large arrays. If necessary, the
:meth:`reset_header_stats` method can be called to set the header fields to
indicate that the statistics are undetermined.
Attributes:
* :attr:`header`
* :attr:`extended_header`
* :attr:`data`
* :attr:`voxel_size`
Methods:
* :meth:`set_extended_header`
* :meth:`set_data`
* :meth:`is_single_image`
* :meth:`is_image_stack`
* :meth:`is_volume`
* :meth:`is_volume_stack`
* :meth:`set_image_stack`
* :meth:`set_volume`
* :meth:`update_header_from_data`
* :meth:`update_header_stats`
* :meth:`reset_header_stats`
* :meth:`print_header`
Attributes and methods relevant to subclasses:
* ``_read_only``
* :meth:`_check_writeable`
* :meth:`_create_default_attributes`
* :meth:`_close_data`
* :meth:`_set_new_data`
"""
def __init__(self, **kwargs):
"""Initialise a new :class:`MrcObject`.
This initialiser deliberately avoids creating any arrays and simply
sets the header, extended header and data attributes to :data:`None`.
This allows subclasses to call :meth:`__init__` at the start of their
initialisers and then set the attributes themselves, probably by
reading from a file, or by calling :meth:`_create_default_attributes`
for a new empty object.
Note that this behaviour might change in future: this initialiser could
take optional arguments to allow the header and data to be provided
by the caller, or might create the standard empty defaults rather than
setting the attributes to :data:`None`.
"""
super(MrcObject, self).__init__(**kwargs)
# Set empty default attributes
self._header = None
self._extended_header = None
self._data = None
self._read_only = False
def _check_writeable(self):
"""Check that this MRC object is writeable.
Raises:
:exc:`ValueError`: If this object is read-only.
"""
if self._read_only:
raise ValueError('MRC object is read-only')
def _create_default_attributes(self):
"""Set valid default values for the header and data attributes."""
self._create_default_header()
self._extended_header = np.empty(0, dtype='V1')
self._set_new_data(np.empty(0, dtype=np.int8))
def _create_default_header(self):
"""Create a default MRC file header.
The header is initialised with standard file type and version
information, default values for some essential fields, and zeros
elsewhere. The first text label is also set to indicate the file was
created by this module.
"""
self._header = np.zeros(shape=(), dtype=HEADER_DTYPE).view(np.recarray)
header = self._header
header.map = MAP_ID
header.nversion = MRC_FORMAT_VERSION
header.machst = utils.machine_stamp_from_byte_order(header.mode.dtype.byteorder)
# Default space group is P1
header.ispg = VOLUME_SPACEGROUP
# Standard cell angles all 90.0 degrees
default_cell_angle = 90.0
header.cellb.alpha = default_cell_angle
header.cellb.beta = default_cell_angle
header.cellb.gamma = default_cell_angle
# (this can also be achieved by assigning a 3-tuple to header.cellb
# directly but using the sub-fields individually is easier to read and
# understand)
# Standard axes: columns = X, rows = Y, sections = Z
header.mapc = 1
header.mapr = 2
header.maps = 3
time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
header.label[0] = '{0:40s}{1:>39s} '.format('Created by mrcfile.py',
time)
header.nlabl = 1
self.reset_header_stats()
@property
def header(self):
"""Get the header as a :class:`numpy record array <numpy.recarray>`."""
return self._header
@property
def extended_header(self):
"""Get the extended header as a :class:`numpy array <numpy.ndarray>`.
If this :class:`MrcObject` was read from a file and the extended header
type was recognised, its dtype will be set appropriately. (Currently
the only supported type is ``'FEI1'``.) Otherwise, the dtype will be
void (raw data, dtype ``'V'``). If the actual data type of the extended
header is known, the dtype of the array can be changed to match.
The extended header may be modified in place. To replace it completely,
call :meth:`set_extended_header`.
"""
return self._extended_header
def set_extended_header(self, extended_header):
"""Replace the extended header.
If you set the extended header you should also set the
``header.exttyp`` field to indicate the type of extended header.
"""
self._check_writeable()
self._extended_header = extended_header
self.header.nsymbt = extended_header.nbytes
@property
def data(self):
"""Get the data as a :class:`numpy array <numpy.ndarray>`."""
return self._data
def set_data(self, data):
"""Replace the data array.
This replaces the current data with the given array (or a copy of it),
and updates the header to match the new data dimensions. The data
statistics (min, max, mean and rms) stored in the header will also be
updated.
"""
self._check_writeable()
# Check if the new data's dtype is valid without changes
mode = utils.mode_from_dtype(data.dtype)
new_dtype = (utils.dtype_from_mode(mode)
.newbyteorder(data.dtype.byteorder))
# Copy the data if necessary to ensure correct dtype and C ordering
new_data = np.asanyarray(data, new_dtype, order='C')
# Replace the old data array with the new one, and update the header
self._close_data()
self._set_new_data(new_data)
self.update_header_from_data()
self.update_header_stats()
def _close_data(self):
"""Close the data array."""
self._data = None
def _set_new_data(self, data):
"""Replace the data array with a new one.
The new data array is not checked - it must already be valid for use in
an MRC file.
"""
self._data = data
@property
def voxel_size(self):
"""Get or set the voxel size in angstroms.
The voxel size is returned as a structured :class:`numpy record array
<numpy.recarray>` with three fields (x, y and z). Note that changing
the voxel_size array in-place will *not* change the voxel size in the
file -- to prevent this being overlooked accidentally, the writeable
on the voxel_size array.
To set the voxel size, assign a new value to the voxel_size attribute.
You may give a single number, a 3-tuple ``(x, y ,z)`` or a modified
version of the voxel_size array. The following examples are all
equivalent:
>>> mrc.voxel_size = 1.0
>>> mrc.voxel_size = (1.0, 1.0, 1.0)
>>> vox_sizes = mrc.voxel_size
>>> vox_sizes.flags.writeable = True
>>> vox_sizes.x = 1.0
>>> vox_sizes.y = 1.0
>>> vox_sizes.z = 1.0
>>> mrc.voxel_size = vox_sizes
"""
x = self.header.cella.x / self.header.mx
y = self.header.cella.y / self.header.my
z = self.header.cella.z / self.header.mz
sizes = np.rec.array((x, y, z), VOXEL_SIZE_DTYPE)
sizes.flags.writeable = False
return sizes
@voxel_size.setter
def voxel_size(self, voxel_size):
self._check_writeable()
try:
# First, assume we have a single numeric value
sizes = (float(voxel_size),) * 3
except TypeError:
try:
# Not a single value. Next, if voxel_size is an array (as
# produced by the voxel_size getter), item() gives a 3-tuple
sizes = voxel_size.item()
except AttributeError:
# If the item() method doesn't exist, assume we have a 3-tuple
sizes = voxel_size
self._set_voxel_size(*sizes)
def _set_voxel_size(self, x_size, y_size, z_size):
"""Set the voxel size.
Args:
x_size: The voxel size in the X direction, in angstroms
y_size: The voxel size in the Y direction, in angstroms
z_size: | |
None:
self.xy = np.delete(self.xy, nodes, axis=0)
if return_indices is True:
return np.array(nodes)
def to_df(
self,
**kwargs
) -> pd.DataFrame:
"""Get object as pandas DataFrame.
Parameters
----------
**kwargs : keyword arguments, optional
Keyword arguments passed to pd.DataFrame constructor.
Returns
-------
pandas.DataFrame
Nodes data with node label, coordinates (x, y) and zone
(bool).
"""
df = pd.DataFrame({"label": self.labels, "zone": self.zone}, **kwargs)
df["label"] = df["label"].astype(str)
df["zone"] = df["zone"].astype(bool)
if self.xy is not None:
df[["x", "y"]] = self.xy.astype(self.dtype_float)
return df
def add_to_etree(
self,
root: et.Element,
overwrite: bool = True
):
"""Add node data to xml.etree.ElementTree.Element.
Parameters
----------
root : xml.etree.ElementTree.Element
Element to which 'nodes' will be appended to.
overwrite : bool, default=True
If True, existing 'nodes' Element in root will be
deleted. If False, node data will be appended to the
existing data.
"""
nodes = root.find("nodes")
if overwrite is True and nodes is not None:
root.remove(nodes)
nodes = root.find("nodes")
if nodes is None:
root.append(et.Element("nodes"))
for node in self.to_df().T.to_dict().values():
n_node = et.SubElement(root.find("nodes"), 'node')
n_node.attrib['node'] = node['label']
if 'x' in node:
n_node.attrib['x'] = str(node['x'])
n_node.attrib['y'] = str(node['y'])
if node['zone'] is True:
n_node.attrib['zone'] = "true"
return root
@classmethod
def from_edges(
cls,
edges: Edges,
**kw):
d = dict(zip(edges.labels.ravel(), edges.indices.ravel()))
labels = np.array(list(d.keys()))
indices = np.array(list(d.values()))
if np.array_equal(np.sort(indices), np.arange(len(indices))) is False:
raise ValueError("Invalid edge indices.")
return cls(labels[indices.argsort()], **kw)
@classmethod
def from_xml(
cls,
data,
return_data: bool = False,
**kwargs
):
data = xml_find_root(data)
nodes = data.find("nodes")
if nodes is None:
return None
lbl = []
xy = []
zone = []
for n in nodes:
lbl.append(n.get('node'))
x = parse_number(n.get('x', 0.0))
y = parse_number(n.get('y', 0.0))
xy.append([x, y])
zone.append(n.get('zone', 'false').strip().lower() == 'true')
if return_data is True:
return (lbl, xy, zone)
return cls((lbl, xy, zone), **kwargs)
from_xml.__func__.__doc__ = _doc.from_xml.__doc__
def make_save_dict(self, prefix: str = "", save_dict=None) -> dict:
if save_dict is None:
save_dict = {}
for k in ["labels", "zone", "xy"]:
save_dict[prefix + k] = getattr(self, k)
return save_dict
make_save_dict.__doc__ = _doc.make_save_dict.__doc__
def save_to_numpy(
self,
file: str,
**kwargs
) -> None:
save_dict = self.make_save_dict()
save_dict.update(kwargs)
np.savez(file, **save_dict)
save_to_numpy.__doc__ = _doc.save_to_numpy.__doc__
@classmethod
def from_npz(
cls,
data,
prefix: str = "",
**kwargs,
):
if isinstance(data, str):
data = np.load(data)
# make empty edge object and fill with data
node_data = (
data[prefix + "labels"],
data[prefix + "xy"],
data[prefix + "zone"],
)
return cls(node_data, **kwargs)
from_npz.__func__.__doc__ = _doc.from_npz.__doc__
def _get_node(self, idx):
return {att: getattr(self, att)[idx]
for att in ["index", "node", "x", "z", "zone"]}
@property
def indices(self) -> np.ndarray:
"""ndarray (m, ) of int: node indices."""
return np.arange(len(self.labels), dtype=self.dtype_int)
@property
def has_zones(self) -> bool:
"""bool: Whether Nodes object has any zone."""
return self.zone.any()
@property
def x(self) -> np.ndarray:
"""ndarray (m, ) of floats: X coordinates."""
if self.xy is None:
raise AttributeError("Nodes has no coordinates.")
return self.xy[:, 0]
@property
def y(self) -> np.ndarray:
"""ndarray (m, ) of floats: Y coordinates."""
if self.xy is None:
raise AttributeError("Nodes has no coordinates.")
return self.xy[:, 1]
class Shared:
"""Class that acts as a shared object for a Network.
Consists mainly of nodes and edges object, handles label and index
mappings.
Parameters
----------
edge_data : tuple of ndarray
Data to construct Edges object.
node_data : array_like or tuple of ndarray, optional
Data to construct Nodes object.
dtype_float : dtype, default=numpy.float_
Datatype for all float ndarray.
dtype_int : dtype, default=int
Datatype for all int ndarray.
See Also
--------
Edges
Nodes
"""
def __init__(
self,
edge_data,
node_data=None,
dtype_float=np.float_,
dtype_int=int,
**kwargs
) -> None:
if node_data is None:
self.edges = Edges(edge_data,
dtype_float=dtype_float,
dtype_int=dtype_int,
**kwargs)
self.nodes = Nodes.from_edges(self.edges,
dtype_float=dtype_float,
dtype_int=dtype_int)
else:
self.nodes = Nodes(node_data,
dtype_float=dtype_float,
dtype_int=dtype_int)
self.edges = Edges(edge_data,
map_labels_to_indices=self.nodes.lbl2id,
map_indices_to_labels=self.nodes.id2lbl,
dtype_float=dtype_float,
dtype_int=dtype_int,
**kwargs)
self._update_edges()
self.cache = Cache()
def __eq__(self, other) -> bool:
for att in ["edges", "nodes"]:
if getattr(self, att) != getattr(other, att):
return False
return True
def update(self):
"""Update internal node and edge mappings."""
self._update_nodes()
self._update_edges()
def reset_cache(self, hard: bool = False) -> None:
self.cache.reset()
def _update_nodes(self):
self.nodes.set_mappings()
def _update_edges(self):
self._set_edge_indices()
def _set_edge_indices(self) -> None:
self.edges.map_labels(self.node2id)
self._set_edge_id_mapping()
def _set_edge_id_mapping(self) -> None:
# create mapping (nodeid, nodeid) -> edgeid
k_id = [tuple(row) for row in self.edges.indices]
self._nodes2edge = dict(zip(k_id, range(len(k_id))))
def _get_dtypes(
self,
dtype_int=None,
dtype_float=None
) -> tuple:
if dtype_int is None:
dtype_int = self.dtype_int
if dtype_float is None:
dtype_float = self.dtype_float
return (dtype_int, dtype_float)
def delete_edges(
self,
edges,
update: bool = True,
**kwargs
):
"""Delete edge(s) from Edges object.
Parameters
----------
edges : int or ndarray of ints
Indices to delete.
update : bool, default=True
Whether to reset mapping of node labels to node ids.
return_indices, default=False
If True, edge indices of deleted edges are returned.
Returns
-------
ndarray, optional
If return_indices is True, edge indices of deleted nodes are
returned.
See Also
--------
numpy.delete
"""
self.cache.set_invalid("gamma", "gamma_T")
ret = self.edges._delete_edges(edges, **kwargs)
if update is True:
self._update_edges()
return ret
def delete_nodes(
self,
nodes,
update: bool = True,
is_label: bool = True,
**kwargs
):
"""Delete node(s) from Nodes object.
Parameters
----------
nodes : int, array of ints, str, or array of str
Indices or labels of nodes to delete.
update : bool, default=True
Whether to reset mapping of node labels to node ids.
is_label : bool, default=True
Whether to delete by label or internal node index.
return_indices : bool, default=False
If True, node indices of deleted nodes are returned.
Returns
-------
ndarray, optional
If return_indices is True, node indices of deleted nodes are
returned.
See Also
--------
numpy.delete
"""
if is_label is True:
nodes = self.get_node_id(nodes, vectorize=True)
ret = self.nodes.delete_nodes(nodes, **kwargs)
if update is True:
self._update_nodes()
return ret
def delete_nodes_in_edges(
self,
nodes,
update: bool = True,
is_label: bool = False,
**kwargs
):
"""Delete node(s) from Edges object.
An edge is deleted if either source or target id equals that of
a node in nodes.
Parameters
----------
nodes : int, array of ints, str, or array of str
Nodes to delete.
return_indices : bool, default=False
If True, edge indices of deleted edges are returned.
Returns
-------
ndarray, optional
If return_indices is True, edge indices of deleted edges
are returned.
"""
self.cache.set_invalid("gamma", "gamma_T")
if is_label is True:
nodes = self.get_node_id(nodes, vectorize=True)
ret = self.edges._delete_nodes(nodes, **kwargs)
if update is True:
self._update_edges()
return ret
def incidence_matrix(self, *args, **kwargs) -> sps.spmatrix:
"""Alias for :func:`Shared.Gamma`."""
return self.Gamma(*args, **kwargs)
def Gamma(
self,
return_as: str = 'csr',
transpose: bool = False,
) -> sps.spmatrix:
"""Return the incidence matrix Gamma of the network.
Gamma is of shape (m, n) and is defined as::
Gamma[v, e] = 1 if edge e enters vertex v,
Gamma[v, e] = -1 if edge e leaves vertex v,
Gamma[v, e] = 0 otherwise.
Parameters
----------
return_as : str, default='csr'
Sparse matrix type to be returned.
transpose : bool, default=True
Whether to transpose Gamma matrix.
Returns
-------
Gamma : spmatrix
Incidence matrix of the network.
See Also
--------
scipy.sparse
References
----------
https://en.wikipedia.org/wiki/Incidence_matrix
Examples
--------
Sioux-Falls:
>>> import paminco
>>> net = paminco.net.load_sioux()
>>> net.Gamma().toarray()[:5, :5]
array([[-1, -1, 1, 0, 1],
[ 1, 0, -1, -1, 0],
[ 0, 1, 0, 0, -1],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0]])
"""
# Rebuild gamma if neccessary
if self.cache.is_valid("gamma") is False:
i = self.edges.indices.T.ravel()
j = np.hstack([np.array(range(self.m))] * 2)
vals = np.hstack(([-1] * self.m, [1] * self.m))
coo = sps.coo_matrix((vals, (i, j)), shape=(self.n, self.m))
# Cache gamma and transpose
gamma = sparse_format(coo, return_as)
self.cache["gamma"] = gamma
self.cache["gamma_T"] = gamma.T.tocsr()
if transpose:
return sparse_format(self.cache["gamma_T"], return_as)
return sparse_format(self.cache["gamma"], return_as)
def adjacency_matrix(self, *args, **kw) -> sps.csr_matrix:
"""Alias for :func:`~Shared.csgraph`."""
return self.csgraph(*args, **kw)
def csgraph(
self,
weight=None,
respect_bounds: bool = True,
backward_positive: bool = False,
dtype=None,
) -> sps.csr_matrix:
"""Get the compressed sparse graph, shape (n, n).
A network/graph with n nodes can be represented by an node to
node adjacency matrix H. If there | |
<reponame>tahmadvand/recipe_app_api
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
# use that for making our API requests
from core.models import Recipe, Tag, Ingredient
from ..serializers import RecipeSerializer, RecipeDetailSerializer
import tempfile
# allows you to call a function which will then create a temp file
# somewhere in the system and then you can remove that file after
# you've used it
import os
# this allows us to perform things like
# creating path names and also checking if files exist on the system
from PIL import Image
# pillow, this will import our image class which will let us then
# create test images which we can then upload to our API
RECIPES_URL = reverse('recipe:recipe-list')
# since we're going to need to access the URL in more
# or less all the tests let's assign that as a variable
# at top of the class in all capitals.
# app : identifier of the URL in the app
# /api/recipe/recipes
# /api/recipe/recipes/1/ (id) --> detail url
def image_upload_url(recipe_id):
"""Return URL for recipe image upload"""
return reverse('recipe:recipe-upload-image', args=[recipe_id])
# generate our upload image url
# you're going to need the existing recipe ID in order to upload an image
def detail_url(recipe_id):
"""Return recipe detail URL"""
return reverse('recipe:recipe-detail', args=[recipe_id])
# name of the end point that the default router will create
# for our viewset because we're going to have a detail action
# this is how you specify arguments with the reverse function
# you just pass in args and then you pass in a list of the
# arguments you want to add
# here we have single item
def sample_tag(user, name='Main course'):
"""Create and return a sample tag"""
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name='Cinnamon'):
"""Create and return a sample ingredient"""
return Ingredient.objects.create(user=user, name=name)
def sample_recipe(user, **params):
"""Create and return a sample recipe"""
defaults = {
'title': 'Sample recipe',
'time_minutes': 10,
'price': 5.00,
}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults)
# convert the dictionary into the argument
# when you use the two asterisks when calling a
# function it has the reverse effect.
class PublicRecipeApiTests(TestCase):
"""Test unauthenticated recipe API access"""
def setUp(self):
self.client = APIClient()
def test_required_auth(self):
"""Test the authenticaiton is required"""
res = self.client.get(RECIPES_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeApiTests(TestCase):
"""Test authenticated recipe API access"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'<EMAIL>',
'<PASSWORD>'
)
self.client.force_authenticate(self.user)
def test_retrieve_recipes(self):
"""Test retrieving list of recipes"""
sample_recipe(user=self.user)
sample_recipe(user=self.user)
# we're going to access them by retrieving
# all of the recipes from our database.
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited_to_user(self):
"""Test retrieving recipes for user"""
# test recipes are limited to the authenticated user.
user2 = get_user_model().objects.create_user(
'<EMAIL>',
'pass'
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
# filter our recipes by the authenticated user
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
# many=true: this is because we were returning the list view
# or we wanted to simulate the list view in our serializer
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
def test_view_recipe_detail(self):
"""Test viewing a recipe detail"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe)
# in this case we just want to serialize a single object
self.assertEqual(res.data, serializer.data)
def test_create_basic_recipe(self):
"""Test creating recipe"""
payload = {
'title': 'Test recipe',
'time_minutes': 30,
'price': 10.00,
}
res = self.client.post(RECIPES_URL, payload)
# post this payload dictionary to our recipes URL.
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
# this is the standard HTTP response code for creating objects
# in an API.
recipe = Recipe.objects.get(id=res.data['id'])
# When you create an object using the Django rest framework the
# default behavior is that it will return a dictionary containing
# the created object This is how I know that if we do res.data and
# retrieve the id key this will get the id of the created object.
# Next what we're going to do is we're going to loop through each
# one of these keys and then we're going to check
# that is the correct value assigned to our recipe model.
for key in payload.keys():
self.assertEqual(payload[key], getattr(recipe, key))
# assertion for each one of these keys, check that it is
# equal to the same key in the recipe
# payload[key]: This will actually get the value of the
# key in our payload object
# getattr: that allows you to retrieve an attribute from
# an object by passing in a variable. (instead of recipe.key)
def test_create_recipe_with_tags(self):
"""Test creating a recipe with tags"""
tag1 = sample_tag(user=self.user, name='Tag 1')
tag2 = sample_tag(user=self.user, name='Tag 2')
payload = {
'title': 'Test recipe with two tags',
'tags': [tag1.id, tag2.id],
'time_minutes': 30,
'price': 10.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
# retrieve the created recipe
tags = recipe.tags.all()
# retrieve the tags that were created with the recipe
self.assertEqual(tags.count(), 2)
# because we expect two tags to be assigned.
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
# check if the tags that we created as our sample tags are
# the same as the tags that are in our queryset.
def test_create_recipe_with_ingredients(self):
"""Test creating recipe with ingredients"""
ingredient1 = sample_ingredient(user=self.user, name='Ingredient 1')
ingredient2 = sample_ingredient(user=self.user, name='Ingredient 2')
payload = {
'title': 'Test recipe with ingredients',
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes': 45,
'price': 15.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
# get the ingredients queryset
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
# test partial update and full update of an object
# there are two ways in which you can update an object using the
# API there's two different HTTP methods: put, patch
# patch: Patch is used to update the fields that are provided
# in the payload so the only fields that it will change are the
# fields that are provided and any fields that are omitted from
# the request will not be modified in the object that's being updated.
def test_partial_update_recipe(self):
"""Test updating a recipe with patch"""
# make a request to change a field in our recipe.
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
# add a tag to the recipe
new_tag = sample_tag(user=self.user, name='Curry')
# add a new tag and what we're going to do is we're going
# to swap out this tag that we create here and we're going
# to replace it with a new tag
payload = {'title': 'Partially Updated sample recipe',
'tags': [new_tag.id]}
# tags will be replaced with this new tag so the existing tag that
# we created won't be assigned to it
url = detail_url(recipe.id)
# the way that you update an object using the Django rest framework
# view sets is you use the detail URL so that is the URL of the
# recipe with the ID of the recipe that we want to update.
self.client.patch(url, payload)
# make request
# We're going to retrieve an update to the recipe from the
# database and then we're going to check the fields that
# are assigned and just make sure they match what we expect.
recipe.refresh_from_db()
# refreshes the details in our recipe from the database
# typically when you create a new model and you have a
# reference to a model the details of that won't change
# unless you do refresh from dB if the values have changed
# in the database.
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags)
# check that the tag new tag is in the tags that we retrieved
# test full update
# put: it will replace the object that we're updating with the full
# object that is provided in the request that means if you exclude
# any fields in the payload those fields will actually be removed
# from the object that you're updating
def test_full_update_recipe(self):
"""Test updating a recipe with put"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
payload = {
'title': 'Fully Updated sample recipe',
'time_minutes': 25,
'price': 5.00
}
url = detail_url(recipe.id)
self.client.put(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
self.assertEqual(recipe.time_minutes, payload['time_minutes'])
self.assertEqual(recipe.price, payload['price'])
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import time
import copy
from ytypes import *
from color_print import fatal_print
class NilObj(object):
def __init__(self):
self.obj_header = ObjHeader(OT_NIL, nil_cls, self)
self.nil = None
def __hash__(self):
return hash(self.nil)
def __eq__(self, other):
return hash(self.nil) == hash(other.nil)
class BoolObj(object):
def __init__(self, boolean):
self.obj_header = ObjHeader(OT_BOOL, bool_cls, self)
self.bool = boolean
def __hash__(self):
return hash(self.bool)
def __eq__(self, other):
return hash(self.bool) == hash(other.bool)
class StrObj(object):
def __init__(self, string):
self.obj_header = ObjHeader(OT_STR, str_cls, self)
self.str = str(string)
def __hash__(self):
return hash(self.str)
def __eq__(self, other):
return hash(self.str) == hash(other.str)
class IntObj(object):
def __init__(self, integer):
self.obj_header = ObjHeader(OT_INT, int_cls, self)
self.int = int(integer)
def __hash__(self):
return hash(self.int)
def __eq__(self, other):
return hash(self.int) == hash(other.int)
class FloatObj(object):
def __init__(self, float_):
self.obj_header = ObjHeader(OT_FLOAT, float_cls, self)
self.float = float(float_)
def __hash__(self):
return hash(self.float)
def __eq__(self, other):
return hash(self.float) == hash(other.float)
class ListObj(object):
def __init__(self, list_=[]):
self.obj_header = ObjHeader(OT_LIST, list_cls, self)
if not list_:
list_ = []
self.list = list(list_)
class MapObj(object):
def __init__(self, map_=None):
self.obj_header = ObjHeader(OT_MAP, map_cls, self)
if not map_:
map_ = {}
self.map = dict(map_)
class ModuleObj(object):
def __init__(self, name):
self.obj_header = ObjHeader(OT_MODULE, module_cls, self)
self.name = name
self.module_var_names = []
self.module_var_name_len = 0
self.module_var_values = []
def add_module_var(self, name):
for i in range(len(self.module_var_names)):
if self.module_var_names[i] == name:
return i
self.module_var_names.append(name)
# self.module_var_values.append(value)
self.module_var_name_len += 1
return self.module_var_name_len - 1
class FunObj(object):
def __init__(self, name, scope=1, arg_num=0):
self.obj_header = ObjHeader(OT_FUN, fun_cls, self)
self.name = name
self.stream = []
self.stream_num = 0
# 存放的是Python级别的字符串, 包括数字和字符串的字面量
self.constants = []
self.constant_num = 0
self.max_used_slots = 0
self.cur_idx = 0
self.scope = scope
self.arg_num = arg_num
def add_constant(self, value):
self.constants.append(value)
self.constant_num += 1
return self.constant_num - 1
def call(obj, method_name):
return obj.obj_header.cls_obj.methods[method_name]
def call_by_value(value, method_name):
return call(value.obj(), method_name)
def exit_if_false(cond):
if not cond:
sys.exit(1)
return True
def _type_to_pystr(obj):
if obj.obj_header.obj_type == OT_INT:
return _int_to_str(obj).str
elif obj.obj_header.obj_type == OT_FLOAT:
return _float_to_str(obj).str
elif obj.obj_header.obj_type == OT_STR:
return _str_to_str(obj).str
elif obj.obj_header.obj_type == OT_LIST:
return _list_to_str(obj).str
elif obj.obj_header.obj_type == OT_MAP:
return _map_to_str(obj).str
elif obj.obj_header.obj_type == OT_NIL:
return _nil_to_str(obj).str
elif obj.obj_header.obj_type == OT_BOOL:
return _bool_to_str(obj).str
elif obj.obj_header.obj_type == OT_FUN:
return _fun_to_str(obj).str
elif obj.obj_header.obj_type == OT_MODULE:
return _module_to_str(obj).str
def type_to_pystr(start, args):
obj = args[start].obj()
if obj.obj_header.obj_type == OT_INT:
return int_to_str(start, args).str
elif obj.obj_header.obj_type == OT_FLOAT:
return float_to_str(start, args).str
elif obj.obj_header.obj_type == OT_STR:
return str_to_str(start, args).str
elif obj.obj_header.obj_type == OT_LIST:
return list_to_str(start, args).str
elif obj.obj_header.obj_type == OT_MAP:
return map_to_str(start, args).str
elif obj.obj_header.obj_type == OT_NIL:
return nil_to_str(start, args).str
elif obj.obj_header.obj_type == OT_BOOL:
return bool_to_str(start, args).str
elif obj.obj_header.obj_type == OT_FUN:
return fun_to_str(start, args).str
elif obj.obj_header.obj_type == OT_MODULE:
return module_to_str(start, args).str
def is_type(obj, obj_type):
return obj.obj_header.obj_type == obj_type
def args_num(pystr):
left = pystr.find('(')
right = pystr.rfind(')')
args_str = pystr[left + 1: right]
return len(args_str.split(','))
class ObjHeader(object):
def __init__(self, obj_type, cls_obj, obj):
self.obj_type = obj_type
self.cls_obj = cls_obj
self.obj = obj
class ClsObj(object):
def __init__(self, name):
self.name = name
self.methods = {}
self.method_names = []
module_cls = ClsObj('module_cls')
fun_cls = ClsObj('fun_cls')
nil_cls = ClsObj('nil_cls')
bool_cls = ClsObj('bool_cls')
str_cls = ClsObj('str_cls')
int_cls = ClsObj('int_cls')
float_cls = ClsObj('float_cls')
list_cls = ClsObj('list_cls')
# map对象比较特别, 在yank中就是对象, map的remove, put, get在内部的方式是@remove, @put, @get, 因为yank中通过map实现对象的, 模仿一下js
map_cls = ClsObj('map_cls')
def return_true(start, args, obj):
args[start].to_value(obj)
return True
def return_false():
return False
# 参数被封装成了yank_list
def fun_call(obj, args):
pass
def nil_to_str(start, args):
obj = args[start].obj()
return return_true(start, args, StrObj(str(obj.nil)))
def nil_equ(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type != OT_NIL:
return return_true(start, args, BoolObj(False))
return return_true(start, args, BoolObj(True))
def nil_hash(start, args):
fatal_print('Runtime error, nil cannot be hashed!')
return return_false()
def nil_bind_methods():
nil_cls.methods['tostr()'] = nil_to_str
nil_cls.methods['==(_)'] = nil_equ
nil_cls.methods['hash(_)'] = nil_hash
nil_cls.method_names = ['tostr()', '==(_)', 'hash()']
nil_cls.methods['_tostr()'] = _nil_to_str
nil_cls.methods['_==(_)'] = _nil_equ
nil_cls.methods['_hash()'] = _nil_hash
def bool_to_str(start, args):
obj = args[start].obj()
return return_true(start, args, StrObj(str(obj.bool)))
def bool_equ(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
return return_true(start, args, BoolObj(obj1.bool == obj2.bool))
def bool_hash(start, args):
obj = args[start].obj()
return return_true(start, args, IntObj(hash(obj.bool)))
def bool_bind_methods():
bool_cls.methods['tostr()'] = bool_to_str
bool_cls.methods['==(_)'] = bool_equ
bool_cls.methods['hash()'] = bool_hash
bool_cls.method_names = ['tostr()', '==(_)', 'hash()']
bool_cls.methods['_tostr()'] = _bool_to_str
bool_cls.methods['_==(_)'] = _bool_equ
bool_cls.methods['_hash()'] = _bool_hash
def str_to_str(start, args):
obj = args[start].obj()
return return_true(start, args, StrObj(str(obj.str)))
def str_equ(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
return return_true(start, args, BoolObj(obj1.str == obj2.str))
def str_hash(start, args):
obj = args[start].obj()
return return_true(start, args, IntObj(hash(obj.str)))
def str_add(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type != OT_STR:
fatal_print('Runtime error, arg2 must be string')
return return_false()
return return_true(start, args, StrObj(obj1.str + obj2.str))
def str_numbers(start, args):
obj = args[start].obj()
if obj.str.isdigit():
ret = IntObj(int(obj.str))
else:
try:
ret = FloatObj(float(obj.str))
except:
fatal_print('Runtime error, cannot convert %s to numbers' % obj.str)
return return_false()
return return_true(start, args, ret)
def str_at(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type != OT_STR:
fatal_print('Runtime error, index must be int')
return return_false()
return return_true(start, args, StrObj(obj1.str[obj2.int]))
def str_len(start, args):
obj = args[start].obj()
return return_true(start, args, IntObj(len(obj.str)))
def str_emtpy(start, args):
obj = args[start].obj()
return return_true(start, args, BoolObj(len(obj.str) == 0))
def _str_numbers(obj):
if obj.str.isdigit():
ret = IntObj(int(obj.str))
else:
try:
ret = FloatObj(float(obj.str))
except:
fatal_print('Runtime error, cannot convert %s to numbers' % obj.str)
sys.exit(1)
return ret
def str_bind_methods():
str_cls.methods['tostr()'] = str_to_str
str_cls.methods['==(_)'] = str_equ
str_cls.methods['hash()'] = str_hash
str_cls.methods['+(_)'] = str_add
str_cls.methods['at(_)'] = str_at
str_cls.methods['len()'] = str_len
str_cls.methods['empty()'] = str_emtpy
str_cls.methods['numbers()'] = str_numbers
str_cls.method_names = ['tostr()', '==(_)', 'hash()', '+(_)', 'at(_)', 'len()', 'empty()', 'numbers()']
str_cls.methods['_tostr()'] = _str_to_str
str_cls.methods['_==(_)'] = _str_equ
str_cls.methods['_hash()'] = _str_hash
str_cls.methods['_+(_)'] = _str_add
str_cls.methods['_at(_)'] = _str_at
str_cls.methods['_len()'] = _str_len
str_cls.methods['_empty()'] = _str_emtpy
str_cls.methods['_numbers()'] = _str_numbers
def int_to_str(start, args):
obj = args[start].obj()
return return_true(start, args, StrObj(str(obj.int)))
def int_equ(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
return return_true(start, args, BoolObj(obj1.int == obj2.int))
def int_hash(start, args):
obj = args[start].obj()
return return_true(start, args, IntObj(hash(obj.int)))
def int_to_float(start, args):
obj = args[start].obj()
return return_true(start, args, FloatObj(float(obj.int)))
def int_add(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type == OT_FLOAT:
obj1 = _int_to_float(obj1)
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, arg2 is not a number')
return return_false()
if obj1.obj_header.obj_type == OT_FLOAT:
return return_true(start, args, FloatObj(obj1.float + obj2.float))
if obj1.obj_header.obj_type == OT_INT:
return return_true(start, args, IntObj(obj1.int + obj2.int))
def int_sub(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type == OT_FLOAT:
obj1 = int_to_float(obj1)
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, arg2 is not a number')
return return_false()
if obj1.obj_header.obj_type == OT_FLOAT:
return return_true(start, args, FloatObj(obj1.float - obj2.float))
if obj1.obj_header.obj_type == OT_INT:
return return_true(start, args, IntObj(obj1.int - obj2.int))
def int_mul(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type == OT_FLOAT:
obj1 = _int_to_float(obj1)
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, arg2 is not a number')
return return_false()
if obj1.obj_header.obj_type == OT_FLOAT:
return return_true(start, args, FloatObj(obj1.float * obj2.float))
if obj1.obj_header.obj_type == OT_INT:
return return_true(start, args, IntObj(obj1.int * obj2.int))
def int_div(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type == OT_FLOAT:
obj1 = _int_to_float(obj1)
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, arg2 is not a number')
return return_false()
if obj1.obj_header.obj_type == OT_FLOAT:
if obj2.float == 0.0:
fatal_print('Runtime error, arg2 cannot be 0')
return return_false()
return return_true(FloatObj(obj1.float / obj2.float))
if obj1.obj_header.obj_type == OT_INT:
if obj2.int == 0:
fatal_print('Runtime error, arg2 cannot be 0')
return return_false()
return return_true(start, args, IntObj(obj1.int / obj2.int))
def int_mod(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type != OT_INT:
fatal_print('Runtime error, arg2 must be int')
return return_false()
if obj2.int == 0:
fatal_print('Runtime error, arg2 cannot be 0')
return return_false()
return return_true(start, args, IntObj(obj1.int % obj2.int))
def int_gt(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, arg2 is not a number')
return return_false()
obj1 = _int_to_float(obj1)
if obj2.obj_header.obj_type == OT_INT:
obj2 = _int_to_float(obj2)
return return_true(start, args, BoolObj(obj1.float > obj2.float))
def int_ge(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, args is not a number')
return return_false()
obj1 = _int_to_float(obj1)
if obj2.obj_header.obj_type == OT_INT:
obj2 = _int_to_float(obj2)
return return_true(start, args, BoolObj(obj1.float >= obj2.float))
def int_lt(start, args):
obj1 = args[start].obj()
obj2 = args[start + 1].obj()
if obj2.obj_header.obj_type not in [OT_FLOAT, OT_INT]:
fatal_print('Runtime error, args is not a number')
return return_false()
| |
Tip: If KPOINTS file is generated by this module, ticks on kpath are auto-picked.
[See Docs](https://massgh.github.io/pivotpy/)
"""
def __init__(self,path=None,skipk=None,elim=[],shift_kpath=0,try_pwsh=True):
try:
from IPython import get_ipython
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell' or shell =='Shell':
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('svg')
except: pass
self.data = vp.export_vasprun(path=path,skipk=skipk,elim=elim,shift_kpath=shift_kpath,try_pwsh=try_pwsh)
self.elim = elim
if path == None:
kfile = 'KPOINTS'
else:
kfile = os.path.join(os.path.dirname(path),'KPOINTS')
self.kticks = sio.read_ticks(kfile)
def __handle_kwargs(self,kwargs,dos=False):
kwargs = {'elim': self.elim, **kwargs}
if dos:
return kwargs
ticks = {k:self.kticks[k] for k in ['ktick_inds','ktick_vals','kseg_inds']}
kwargs = {**ticks,**kwargs} #Prefer provided ones
return kwargs
@_g2f
def sbands(self,ax = None,**kwargs):
kwargs = self.__handle_kwargs(kwargs)
return sp.splot_bands(self.data,ax = ax, **kwargs)
@_g2f
def sdos(self,elements = [[0],], orbs = [[0],], labels = ['s',], ax = None,**kwargs):
kwargs = self.__handle_kwargs(kwargs,dos=True)
return sp.splot_dos_lines(self.data,elements = elements, orbs = orbs, labels = labels, ax = ax, **kwargs)
@_g2f
def srgb(self,elements = [[],[],[]], orbs = [[],[],[]], labels = ['','',''], ax = None, **kwargs):
kwargs = self.__handle_kwargs(kwargs)
return sp.splot_rgb_lines(self.data,elements = elements, orbs = orbs, labels = labels, ax = ax, **kwargs)
@_g2f
def scolor(self,elements = [[0],], orbs = [[0],], labels = ['s',],axes = None,**kwargs):
kwargs = self.__handle_kwargs(kwargs)
return sp.splot_color_lines(self.data,elements = elements, orbs = orbs, labels = labels, axes = axes, **kwargs)
@_g2f
def idos(self,elements = [[0],], orbs = [[0],], labels = ['s',],**kwargs):
kwargs = self.__handle_kwargs(kwargs, dos=True)
return ip.iplot_dos_lines(self.data,elements = elements, orbs = orbs, labels = labels, **kwargs)
@_g2f
def irgb(self,elements = [[],[],[]], orbs = [[],[],[]], labels = ['','',''],**kwargs):
kwargs = self.__handle_kwargs(kwargs)
return ip.iplot_rgb_lines(self.data,elements = elements, orbs = orbs, labels = labels, **kwargs)
# Cell
def nav_links(current_index=0,
doc_url = r"https://massgh.github.io/pivotpy/",
items = ["Index",
"XmlElementTree",
"StaticPlots",
"InteractivePlots",
"Utilities",
"StructureIO",
"Widgets"
],
horizontal = False,
out_string = False):
from IPython.display import Markdown
links = [doc_url+item if not 'Index' in item else doc_url for item in items]
style = """<style>a{text-decoration: none !important;color:lightkblue;font-weight:bold;}
a:focus,a:active,a:hover{color:hotpink !important;}</style>\n"""
md_str = style
for i,(link,item) in enumerate(zip(links,items)):
if current_index == i: item = "{}●".format(item)
if not horizontal:
md_str += "> [ `▶` {} ]({}) \n".format(item,link)
else:
md_str += "> [ `▶` {} ]({})\n".format(item,link)
if out_string:
return md_str
return Markdown(md_str)
# Cell
def export_outcar(path=None):
"""
- Read potential at ionic sites from OUTCAR.
"""
if path is None:
path = './OUTCAR'
if not os.path.isfile(path):
return print("{} does not exist!".format(path))
# Raeding it
with open(r'{}'.format(path),'r') as f:
lines = f.readlines()
# Processing
for i,l in enumerate(lines):
if 'NIONS' in l:
N = int(l.split()[-1])
nlines = np.ceil(N/5).astype(int)
if 'electrostatic' in l:
start_index = i+3
stop_index = start_index+nlines
if 'fractional' in l:
first = i+1
if 'vectors are now' in l:
b_first = i+5
if 'NION' in l:
ion_line = l
if 'NKPTS' in l:
kpt_line =l
NKPTS,NKDIMS,NBANDS = [int(v) for v in re.findall(r"\d+",kpt_line)]
NEDOS,NIONS = [int(v) for v in re.findall(r"\d+",ion_line)]
n_kbi = (NKPTS,NBANDS,NIONS)
# Data manipulation
# Potential
data = lines[start_index:stop_index]
initial = np.loadtxt(StringIO(''.join(data[:-1]))).reshape((-1))
last = np.loadtxt(StringIO(data[-1]))
pot_arr = np.hstack([initial,last]).reshape((-1,2))
pot_arr[:,0] = pot_arr[:,0]-1 # Ion index fixing
# Nearest neighbors
pos = lines[first:first+N]
pos_arr = np.loadtxt(StringIO('\n'.join(pos)))
pos_arr[pos_arr>0.98] = pos_arr[pos_arr>0.98]-1 # Fixing outer layers
# positions and potential
pos_pot = np.hstack([pos_arr,pot_arr[:,1:]])
basis = np.loadtxt(StringIO(''.join(lines[b_first:b_first+3])))
final_dict = {'ion_pot':pot_arr,'positions':pos_arr,'site_pot':pos_pot,'basis':basis[:,:3],'rec_basis':basis[:,3:],'n_kbi':n_kbi}
return vp.Dict2Data(final_dict)
# Cell
def export_potential(locpot=None,e = True,m = False):
"""
- Returns Data from LOCPOT and similar structure files like CHG. Loads only single set out of 2/4 magnetization data to avoid performance/memory cost while can load electrostatic and one set of magnetization together.
- **Parameters**
- locpot: path/to/LOCPOT or similar stuructured file like CHG. LOCPOT is auto picked in CWD.
- e : Electric potential/charge density. Default is True.
- m : Magnetization density m. Default is False. If True, picks `m` for spin polarized case, and `m_x` for non-colinear case. Additionally it can take 'x','y' and 'z' in case of non-colinear calculations.
- **Exceptions**
- Would raise index error if magnetization density set is not present in LOCPOT/CHG in case `m` is not False.
"""
if locpot is None:
if os.path.isfile('LOCPOT'):
locpot = 'LOCPOT'
else:
return print('./LOCPOT not found.')
else:
if not os.path.isfile(locpot):
return print("File {!r} does not exist!".format(locpot))
if m not in [True,False,'x','y','z']:
return print("m expects one of [True,False,'x','y','z'], got {}".format(e))
# data fixing after reading islice from file.
def fix_data(islice_gen,shape):
new_gen = (float(l) for line in islice_gen for l in line.split())
COUNT = np.prod(shape).astype(int)
data = np.fromiter(new_gen,dtype=float,count=COUNT) # Count is must for performance
# data written on LOCPOT is in shape of (NGz,NGy,NGx)
N_reshape = [shape[2],shape[1],shape[0]]
data = data.reshape(N_reshape).transpose([2,1,0])
return data
# Reading File
with open(locpot,'r') as f:
lines = []
f.seek(0)
for i in range(8):
lines.append(f.readline())
N = sum([int(v) for v in lines[6].split()])
f.seek(0)
poscar = []
for i in range(N+8):
poscar.append(f.readline())
f.readline() # Empty one
Nxyz = [int(v) for v in f.readline().split()] # Grid line read
nlines = np.ceil(np.prod(Nxyz)/5).astype(int)
#islice is faster generator for reading potential
pot_dict = {}
if e == True:
pot_dict.update({'e':fix_data(islice(f, nlines),Nxyz)})
ignore_set = 0 # Pointer already ahead.
else:
ignore_set = nlines # Needs to move pointer to magnetization
#reading Magnetization if True
ignore_n = np.ceil(N/5).astype(int)+1 #Some kind of useless data
if m == True:
print("m = True would pick m_x for non-colinear case, and m for ISPIN=2.\nUse m='x' for non-colinear or keep in mind that m will refer to m_x.")
start = ignore_n+ignore_set
pot_dict.update({'m': fix_data(islice(f, start,start+nlines),Nxyz)})
elif m == 'x':
start = ignore_n+ignore_set
pot_dict.update({'m_x': fix_data(islice(f, start,start+nlines),Nxyz)})
elif m == 'y':
start = 2*ignore_n+nlines+ignore_set
pot_dict.update({'m_y': fix_data(islice(f, start,start+nlines),Nxyz)})
elif m == 'z':
start = 3*ignore_n+2*nlines+ignore_set
pot_dict.update({'m_z': fix_data(islice(f, start,start+nlines),Nxyz)})
# Read Info
basis = np.loadtxt(StringIO(''.join(poscar[2:5])))*float(poscar[1].strip())
system = poscar[0].strip()
ElemName = poscar[5].split()
ElemIndex = [int(v) for v in poscar[6].split()]
ElemIndex.insert(0,0)
ElemIndex = list(np.cumsum(ElemIndex))
positions = np.loadtxt(StringIO(''.join(poscar[8:N+9])))
final_dict = dict(SYSTEM=system,ElemName=ElemName,ElemIndex=ElemIndex,basis=basis,positions=positions)
final_dict = {**final_dict,**pot_dict}
return vp.Dict2Data(final_dict)
# Cell
class LOCPOT_CHG:
"""
- Returns Data from LOCPOT and similar structure files like CHG. Loads only single set out of 2/4 magnetization data to avoid performance/memory cost while can load electrostatic and one set of magnetization together.
- **Parameters**
- path: path/to/LOCPOT or similar stuructured file like CHG. LOCPOT is auto picked in CWD.
- e : Electric potential/charge density. Default is True.
- m : Magnetization density m. Default is False. If True, picks `m` for spin polarized case, and `m_x` for non-colinear case. Additionally it can take 'x','y' and 'z' in case of non-colinear calculations.
- **Exceptions**
- Would raise index error if magnetization density set is not present in LOCPOT/CHG in case `m` is not False.
"""
def __init__(self,path=None,e = True,m = False):
try:
from IPython import get_ipython
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell' or shell =='Shell':
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('svg')
except: pass
self.path = path # Must be
self.m = m # Required to put in plots.
self.data = export_potential(locpot=path, e=e,m=m)
# DOCS
lines = sp.plot_potential.__doc__.split('\n')
lines = [l for l in [l for l in lines if 'basis' not in l] if 'e_or_m' not in l]
LOCPOT_CHG.plot_e.__doc__ = '\n'.join(lines)
LOCPOT_CHG.plot_m.__doc__ = '\n'.join(lines)
def plot_e(self,operation='mean_z',ax=None,period=None,
lr_pos=(0.25,0.75),lr_widths = [0.5,0.5],
labels=(r'$V(z)$',r'$\langle V \rangle _{roll}(z)$',r'$\langle V \rangle $'),
colors = ((0,0.2,0.7),'b','r'),annotate=True):
return sp.plot_potential(basis=self.data.basis,e_or_m=self.data.e,operation=operation,
ax=ax,period=period,lr_pos=lr_pos,lr_widths=lr_widths,
labels=labels,colors=colors,annotate=annotate)
def plot_m(self,operation='mean_z',ax=None,period=None,
lr_pos = (0.25,0.75),lr_widths = [0.5,0.5],
labels = (r'$M(z)$',r'$\langle M \rangle _{roll}(z)$',r'$\langle M \rangle $'),
colors = ((0,0.2,0.7),'b','r'),annotate=True):
if self.m:
try:
e_or_m = self.data.m
except:
e_or_m = self.data.to_dict()[f'm_{self.m}']
else:
return print("Magnetization data set does not exist in {}".format(self.path))
return sp.plot_potential(basis=self.data.basis,e_or_m=e_or_m,operation=operation,
ax=ax,period=period,lr_pos=lr_pos,lr_widths=lr_widths,
labels=labels,colors=colors,annotate=annotate)
def view_period(self,period_guess=0.25,operation='mean_z',nslice=10,e_or_m=None,):
"""
- Periodicity check by plotly's interactive plot.
- **Parameters**
- period_guess: Initial guess of period. Default is 0.25. Should be in [0,1].
- operation : Any of ['mean_x','min_x','max_x','mean_y','min_y','max_y','mean_z','min_z','max_z'].
- nslice : Default is 10. Number of periods around and including period_guess. e.g. If you give 0.25 as period_guess and nslice is 10, you will get 10 lines of rolling average over given data from where you can choose best fit or try another guess and so on.
- e_or_m : None by default. Not required in most cases as `view_period()` will try to get data itself from top class in order of `self.data.[e,m,m_x,m_y,m_z]` and if `self.data.e` exists it never goes to others, so you can overwrite this by setting | |
pia, pibc, pii = 0,0,0
if Nia>0 or Nibc>0 or Nii>0 :
if Nia>0 : pia = float(Nia)/(Nia+Nibc+Nii)
if Nibc>0 : pibc = float(Nibc)/(Nia+Nibc+Nii)
if Nii>0 : pii = float(Nii)/(Nia+Nibc+Nii)
if verbose : print( " P(Ia) = %.4f"%pia )
#self._colorClassificationMag = np.array([pia, pibc, pii])
self._colorClassification = np.array([pia, pibc, pii])
# # 2013.06.03 : flux-based classifications are inaccurate
# # disabling for now.
# pia, pibc, pii = 0,0,0
# likeia, likeibc, likeii = likelist
# if likeia>0 or likeibc>0 or likeii>0 :
# if likeia>0 : pia = float(likeia)/(likeia+likeibc+likeii)
# if likeibc>0 : pibc = float(likeibc)/(likeia+likeibc+likeii)
# if likeii>0 : pii = float(likeii)/(likeia+likeibc+likeii)
# if verbose : print( " P(Ia)_flux = %.4f"%pia )
# self._colorClassificationFlux = np.array([pia, pibc, pii])
return( self._colorClassification )
def printColorClassification( self ):
from classify import printColorClassification
printColorClassification( self )
def doClassify(self, bands='all', Nsim=2000, trestrange=[-15,30],
modelerror=[0.05,0.07,0.07], errfloor=0.001,
useLuminosityPrior=False, plot=False,
x1prior=lambda x1: bifgauss( x1, 0, 1.5, 0.9),
cprior=lambda c: bifgauss( c, 0, 0.08, 0.14),
avprior=lambda Av: avpriorexp( Av, 0.7),
zprior= lambda z : np.ones(len(z)), npkmjd = 30,
clobber=False, verbose=True, debug=False,
kcorfile='HST/kcor_HST_AB.fits',
pdzfile='', nlogz=0):
""" redirects to doGridClassify. See the doGridClassify doc string for help.
See doClassifyMC for classifications using MC sims """
self.doGridClassify( bands=bands, Nsim=Nsim, trestrange=trestrange,
modelerror=modelerror, errfloor=errfloor, useLuminosityPrior=useLuminosityPrior, plot=plot,
x1prior=x1prior, cprior=cprior, avprior=avprior, zprior=zprior, npkmjd =npkmjd,
clobber=clobber, verbose=verbose, debug=debug,
kcorfile=kcorfile, pdzfile=pdzfile, nlogz=nlogz)
def doClassifyMC(self, bands='all', Nsim=2000, trestrange=[-20,50],
modelerror=[0.08,0.1,0.1], errfloor='auto',
useLuminosityPrior=True,
x1prior=lambda x1: bifgauss( x1, 0, 1.5, 0.9),
cprior= extinction.midIa_c,
avprior= extinction.midCC,
zprior= lambda z : np.ones(len(z)),
kcorfile='HST/kcor_HST_AB.fits', pdzfile='',
clobber=False, verbose=True, debug=False):
""" Compute classification probabilities from comparison of this observed SN light curve
against synthetic light curves from SNANA Monte Carlo simulations.
OPTIONS
bands : a string listing the bands to use. e.g. 'HJW'. Use 'all' for all bands.
trestrange : use only observations within this rest-frame time window (rel. to peak)
modelerror : fractional flux error to apply to each SN model for chi2 calculation
The first value applies to the Ia model and the second to CC templates.
Nsim : the total number of SNe to simulate in each of the 3 SN classes
plot : make plots showing histograms of chi2 and posterior probabilities
useLuminosityPrior : if True, compare each simulated SN to the observations as-is
(i.e. including the luminosity assumptions that are baked in to the
SNANA simulations)
if False, allow a free parameter for scaling the flux of each simulated
SN so that it most closely matches the observed fluxes (i.e. remove all
"baked-in" priors on luminosity from cosmology or luminosity functions)
errfloor : minimum flux error for the model (e.g. for zero-flux extrapolations)
With the default 'auto' setting, the errfloor is automatically set on a
filter-by-filter basis, using the getErrFloor function.
NOTE: clobber decrements by 1 in CC sims to prevent re-running all sims via getClassSim each time
so use clobber=3 to re-make the sims once, but higher than that will result in redundant calls to
the external SNANA snlc_sim executable.
STORED RESULTS :
self.ClassMC : a classify.ClassSim object (a SimTable sub-class) holding the classification results
self.ClassMC.P[Ia,Ibc,II] : final (scalar) classification probabilities for each class
self.ClassMC.[Ia,Ibc,II] : the simulation and classification results from comparison to each SN sub-class
self.ClassMC.[Ia,Ibc,II].CHI2 : len==Nsim vectors of chi2 values from comparison to each simulated SN, by class
self.ClassMC.[Ia,Ibc,II].LIKE : len==Nsim vectors of likelihood values (i.e. exp(-chi2/2) )
self.ClassMC.[Ia,Ibc,II].PRIOR[z,c,x1,Av,Rv] : the prior probability functions
self.ClassMC.[Ia,Ibc,II].PROB : len==Nsim vectors of bayesian posterior probability values
"""
if debug : import pdb; pdb.set_trace()
# check for existing probabilities
if( not clobber and 'ClassMC' in self.__dict__ ) :
if verbose : print( "Monte Carlo Classification already exists. Not clobbering.")
return(None)
# set the modelerror (for backwards compatibility)
if modelerror in [None,0] : modelerror = [0,0]
elif not np.iterable( modelerror ) : modelerror = [ modelerror, modelerror, modelerror ]
elif len(modelerror)==2 : modelerror = [modelerror[0], modelerror[1], modelerror[2] ]
# compute chi2 and likelihood vectors (if needed)
self.getChi2LikelihoodMC( 'Ia', Nsim=Nsim, bands=bands,trestrange=trestrange,
modelerror=modelerror[0], errfloor=errfloor,
useLuminosityPrior=useLuminosityPrior, verbose=verbose, clobber=clobber )
self.getChi2LikelihoodMC( 'Ibc', Nsim=Nsim, bands=bands,trestrange=trestrange,
modelerror=modelerror[1], errfloor=errfloor,
useLuminosityPrior=useLuminosityPrior, verbose=verbose, clobber=(clobber>1) )
self.getChi2LikelihoodMC( 'II', Nsim=Nsim, bands=bands,trestrange=trestrange,
modelerror=modelerror[2], errfloor=errfloor,
useLuminosityPrior=useLuminosityPrior, verbose=verbose, clobber=(clobber>1))
# ----- COMPUTE THE POSTERIOR PROBABILITY ARRAYS ------
# NOTE on the absence of a factor for the parameter sampling interval:
# To define 'proper' priors (i.e. require that they integrate to unity)
# we could define the sampling interval for each parameter e.g:
# x1 = self.ClassMC.Ia.SIM_SALT2x1 # simulated x1 values
# dx1 = (x1.max() - x1.min())/len(x1) # mean x1 step size
# px1 = x1prior( self.ClassMC.Ia.SIM_SALT2x1 ) # values of the (unnormalized) prior dist'n
# px1proper = px1 / (px1.sum()*dx1) # normalized (proper) prior
# Then when we integrate the posterior probabilities to get the final classification
# probability, we would have:
# self.postProbIa = self.likeIa * px1proper * dx1
# which is equivalent to
# self.postProbIa = self.likeIa * px1 / px1.sum()
# ---- TYPE IA POSTERIOR PROBABILITY -------
# define the priors
px1 = x1prior( self.ClassMC.Ia.SIM_SALT2x1 )
pc = cprior( self.ClassMC.Ia.SIM_SALT2c )
if self.zerr>0.01: pz = zprior( self.ClassMC.Ia.SIM_REDSHIFT )
else : pz = 1
self.ClassMC.Ia.PRIORx1 = x1prior
self.ClassMC.Ia.PRIORc = cprior
self.ClassMC.Ia.PRIORz = zprior
# convert the likelihood dist'n into posterior probability dist'n
self.ClassMC.Ia.PROB = px1 * pc * pz * self.ClassMC.Ia.LIKE
# ---- TYPE IB/C POSTERIOR PROBABILITY -------
# Define the priors
if self.zerr>0.01: pz = zprior( self.ClassMC.Ibc.SIM_REDSHIFT )
else : pz = 1
pAv = avprior( self.ClassMC.Ibc.DUMP['AV'] )
self.ClassMC.Ibc.PRIORz = zprior
self.ClassMC.Ibc.PRIORAv = avprior
# convert the likelihood dist'n into posterior probability dist'n
self.ClassMC.Ibc.PROB = pz * pAv * self.ClassMC.Ibc.LIKE
# ---- TYPE II POSTERIOR PROBABILITY -------
# Define the priors
if self.zerr>0.01: pz = zprior( self.ClassMC.II.SIM_REDSHIFT )
else : pz = 1
pAv = avprior( self.ClassMC.II.DUMP['AV'] )
self.ClassMC.II.PRIORz = zprior
self.ClassMC.II.PRIORAv = avprior
# convert the likelihood dist'n into posterior probability dist'n
self.ClassMC.II.PROB = pz * pAv * self.ClassMC.II.LIKE
# Finally, marginalize over nuisance parameters and normalize
# to get the (scalar) classification probabilities:
# the probability that this object belongs to each SN class
pIa, pIbc, pII = self.ClassMC.Ia.PROB.sum(), self.ClassMC.Ibc.PROB.sum(), self.ClassMC.II.PROB.sum()
self.ClassMC.PIa = pIa / ( pIa + pIbc + pII )
self.ClassMC.PIbc = pIbc / ( pIa + pIbc + pII )
self.ClassMC.PII = pII / ( pIa + pIbc + pII )
if verbose>1 :
print("P(Ia) = %.3f\nP(Ib/c) = %.3f\nP(II) = %.3f\n"%(self.ClassMC.PIa,self.ClassMC.PIbc,self.ClassMC.PII) )
def doGridClassify(self, bands='all', Nsim=0, trestrange=[-20,50],
modelerror=[0.05,0.07,0.07], errfloor=0.001, inflateUVerr=True,
useLuminosityPrior=True, magnification=1,
x1prior=lambda x1: bifgauss( x1, 0, 1.5, 0.9), cprior= extinction.midIa_c,
avprior= extinction.midCC, zprior='host', #lambda z : np.ones(len(z)),
classfractions='mid', clobber=False, verbose=True, debug=False,
kcorfile='HST/kcor_HST_AB.fits', pdzfile='',
nlogz=0, ncolorpar=0, ncolorlaw=0, nlumipar=0, npkmjd = 0,
omitTemplateIbc='', omitTemplateII='', getSystematicError=False,
getSNphotz=True ):
""" Bayesian photometric SN classification using SNANA grid simulations.
The user must set the grid size. You can set Nsim for automatic definition of
the grid shape, or you can explicitly provide all of the grid dimensions using
the five parameters: [ nlogz, ncolorpar, ncolorlaw, nlumipar, npkmjd ].
OPTIONS
bands : a string listing the bands to use. e.g. 'HJW'. Use 'all' for all bands.
trestrange : use only observations within this rest-frame time window (rel. to peak)
modelerror : fractional flux error to apply to each SN model for chi2 calculation
The first value applies to the Ia model and the second to CC templates.
useLuminosityPrior :
if False: allow a free parameter for scaling the flux of each simulated
SN so that it most closely matches the observed fluxes
if == True or == 1 : allow the free parameter for flux scaling, but
also apply a prior based on | |
longDesc =
u"""
""",
)
entry(
index = 1899,
label = "N3s-(CdCd)CsH",
group =
"""
1 * N3s u0 {2,S} {5,S} {6,S}
2 Cd u0 {1,S} {3,D} {4,S}
3 Cd u0 {2,D}
4 R u0 {2,S}
5 Cs u0 {1,S}
6 H u0 {1,S}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([5.8,6.1,6.4,6.7,7.5,8.1,9.1],'cal/(mol*K)','+|-',[1.3,1.3,1.3,1.3,1.3,1.3,1.3]),
H298 = (15.3,'kcal/mol','+|-',1.9),
S298 = (8.7,'cal/(mol*K)','+|-',1.7),
),
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 1939,
label = "N3s-CCC",
group =
"""
1 * N3s u0 {2,S} {3,S} {4,S}
2 C u0 {1,S}
3 C u0 {1,S}
4 C u0 {1,S}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([0,0,0,0,0,0,0],'cal/(mol*K)'),
H298 = (0,'kcal/mol'),
S298 = (0,'cal/(mol*K)'),
),
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 1810,
label = "N3s-CsCsCs",
group =
"""
1 * N3s u0 {2,S} {3,S} {4,S}
2 Cs u0 {1,S}
3 Cs u0 {1,S}
4 Cs u0 {1,S}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([3.48,4.56,5.43,5.97,6.56,6.67,6.5],'cal/(mol*K)'),
H298 = (24.4,'kcal/mol'),
S298 = (-13.46,'cal/(mol*K)'),
),
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 1819,
label = "N3s-CbCsCs",
group =
"""
1 * N3s u0 {2,S} {3,S} {4,S}
2 Cb u0 {1,S}
3 Cs u0 {1,S}
4 Cs u0 {1,S}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([0,0,0,0,0,0,0],'cal/(mol*K)'),
H298 = (26.2,'kcal/mol'),
S298 = (0,'cal/(mol*K)'),
),
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 1827,
label = "N3s-(CO)CsCs",
group =
"""
1 * N3s u0 {2,S} {3,S} {4,S}
2 CO u0 {1,S} {5,D}
3 Cs u0 {1,S}
4 Cs u0 {1,S}
5 O2d u0 {2,D}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([0,0,0,0,0,0,0],'cal/(mol*K)'),
H298 = (0,'kcal/mol'),
S298 = (0,'cal/(mol*K)'),
),
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 1830,
label = "N3s-(CO)(CO)Cs",
group =
"""
1 * N3s u0 {2,S} {3,S} {4,S}
2 CO u0 {1,S} {5,D}
3 CO u0 {1,S} {6,D}
4 Cs u0 {1,S}
5 O2d u0 {2,D}
6 O2d u0 {3,D}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([0,0,0,0,0,0,0],'cal/(mol*K)'),
H298 = (-5.9,'kcal/mol'),
S298 = (0,'cal/(mol*K)'),
),
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 1831,
label = "N3s-(CO)(CO)Cb",
group =
"""
1 * N3s u0 {2,S} {3,S} {4,S}
2 CO u0 {1,S} {5,D}
3 CO u0 {1,S} {6,D}
4 Cb u0 {1,S}
5 O2d u0 {2,D}
6 O2d u0 {3,D}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([0,0,0,0,0,0,0],'cal/(mol*K)'),
H298 = (-0.5,'kcal/mol'),
S298 = (0,'cal/(mol*K)'),
),
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 1895,
label = "N3s-(CtN3t)CsCs",
group =
"""
1 * N3s u0 {2,S} {4,S} {5,S}
2 Ct u0 {1,S} {3,T}
3 N3t u0 {2,T}
4 Cs u0 {1,S}
5 Cs u0 {1,S}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([8.6,9.6,10.5,11.4,12.9,13.8,14.8],'cal/(mol*K)','+|-',[1,1,1,1,1,1,1]),
H298 = (53.3,'kcal/mol','+|-',1.3),
S298 = (21,'cal/(mol*K)','+|-',1.2),
),
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 1898,
label = "N3s-(CdCd)CsCs",
group =
"""
1 * N3s u0 {2,S} {5,S} {6,S}
2 Cd u0 {1,S} {3,D} {4,S}
3 Cd u0 {2,D}
4 R u0 {2,S}
5 Cs u0 {1,S}
6 Cs u0 {1,S}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([2.8,2.9,3.3,3.7,4.6,5,5.5],'cal/(mol*K)','+|-',[1.3,1.3,1.3,1.3,1.3,1.3,1.3]),
H298 = (25.9,'kcal/mol','+|-',1.9),
S298 = (-11,'cal/(mol*K)','+|-',1.7),
),
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 1811,
label = "N3s-N3sHH",
group =
"""
1 * N3s u0 {2,S} {3,S} {4,S}
2 H u0 {1,S}
3 H u0 {1,S}
4 N3s u0 {1,S}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([6.1,7.38,8.43,9.27,10.54,11.52,13.19],'cal/(mol*K)'),
H298 = (11.4,'kcal/mol'),
S298 = (29.13,'cal/(mol*K)'),
),
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 1940,
label = "N3s-NCH",
group =
"""
1 * N3s u0 {2,S} {3,S} {4,S}
2 N u0 {1,S}
3 C u0 {1,S}
4 H u0 {1,S}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([0,0,0,0,0,0,0],'cal/(mol*K)'),
H298 = (0,'kcal/mol'),
S298 = (0,'cal/(mol*K)'),
),
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 1812,
label = "N3s-N3sCsH",
group =
"""
1 * N3s u0 {2,S} {3,S} {4,S}
2 Cs u0 {1,S}
3 H u0 {1,S}
4 N3s u0 {1,S}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([4.82,5.8,6.5,7,7.8,8.3,9],'cal/(mol*K)'),
H298 = (20.9,'kcal/mol'),
S298 = (9.61,'cal/(mol*K)'),
),
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 1814,
label = "N3s-N3sCbH",
group =
"""
1 * N3s u0 {2,S} {3,S} {4,S}
2 N3s u0 {1,S}
3 Cb u0 {1,S}
4 H u0 {1,S}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([0,0,0,0,0,0,0],'cal/(mol*K)'),
H298 = (22.1,'kcal/mol'),
S298 = (0,'cal/(mol*K)'),
),
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 1897,
label = "N3s-CsH(N3dOd)",
group =
"""
1 * N3s u0 {2,S} {3,S} {4,S}
2 Cs u0 {1,S}
3 H u0 {1,S}
4 N3d u0 {1,S} {5,D}
5 O2d u0 {4,D}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([10.4,11.9,13.4,14.7,16.6,17.9,19.2],'cal/(mol*K)','+|-',[1,1,1,1,1,1,1]),
H298 = (25.2,'kcal/mol','+|-',1.3),
S298 = (41.7,'cal/(mol*K)','+|-',1.2),
),
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 1902,
label = "N3s-CsH(N5dOdOs)",
group =
"""
1 * N3s u0 {2,S} {3,S} {4,S}
2 Cs u0 {1,S}
3 H u0 {1,S}
4 N5dc u0 {1,S} {5,D} {6,S}
5 O2d u0 {4,D}
6 O2s u0 {4,S}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([13.1,15.5,17.6,19.2,21.4,22.8,24.4],'cal/(mol*K)','+|-',[1,1,1,1,1,1,1]),
H298 = (8.4,'kcal/mol','+|-',1.3),
S298 = (45.3,'cal/(mol*K)','+|-',1.2),
),
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 1901,
label = "N3s-(CdCd)HN3s",
group =
"""
1 * N3s u0 {2,S} {5,S} {6,S}
2 Cd u0 {1,S} {3,D} {4,S}
3 Cd u0 {2,D}
4 R u0 {2,S}
5 H u0 {1,S}
6 N3s u0 {1,S}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([4.5,5.4,6.5,7.3,8.5,9.1,9.9],'cal/(mol*K)','+|-',[1.1,1.1,1.1,1.1,1.1,1.1,1.1]),
H298 = (20.5,'kcal/mol','+|-',1.5),
S298 = (6.6,'cal/(mol*K)','+|-',1.4),
),
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 1940,
label = "N3s-NCC",
group =
"""
1 * N3s u0 {2,S} {3,S} {4,S}
2 N u0 {1,S}
3 C u0 {1,S}
4 C u0 {1,S}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([0,0,0,0,0,0,0],'cal/(mol*K)'),
H298 = (0,'kcal/mol'),
S298 = (0,'cal/(mol*K)'),
),
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 1893,
label = "N3s-NCsCs",
group =
"""
1 * N3s u0 {2,S} {3,S} {4,S}
2 N u0 {1,S}
3 Cs u0 {1,S}
4 Cs u0 {1,S}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([0,0,0,0,0,0,0],'cal/(mol*K)'),
H298 = (29.2,'kcal/mol'),
S298 = (-13.8,'cal/(mol*K)'),
),
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 1813,
label = "N3s-CsCsN3s",
group =
"""
1 * N3s u0 {2,S} {3,S} {4,S}
2 Cs u0 {1,S}
3 Cs u0 {1,S}
4 N3s u0 {1,S}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([3.7,4.9,5.8,6.3,6.8,6.8,6.7],'cal/(mol*K)','+|-',[1,1,1,1,1,1,1]),
H298 = (26.8,'kcal/mol','+|-',1.3),
S298 = (-14.5,'cal/(mol*K)','+|-',1.2),
),
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 1896,
label = "N3s-CsCs(N3dOd)",
group =
"""
1 * N3s u0 {2,S} {3,S} {4,S}
2 Cs u0 {1,S}
3 Cs u0 {1,S}
4 N3d u0 {1,S} {5,D}
5 O2d u0 {4,D}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([9.4,10.5,11.5,12.4,13.8,14.6,15.3],'cal/(mol*K)','+|-',[1,1,1,1,1,1,1]),
H298 = (32.6,'kcal/mol','+|-',1.3),
S298 = (19.3,'cal/(mol*K)','+|-',1.2),
),
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 1903,
label = "N3s-CsCs(N5dOdOs)",
group =
"""
1 * N3s u0 {2,S} {3,S} {4,S}
2 Cs u0 {1,S}
3 Cs u0 {1,S}
4 N5dc u0 {1,S} {5,D} {6,S}
5 O2d u0 {4,D}
6 O2s u0 {4,S}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([11.5,13.4,15.2,16.7,18.8,20,21.1],'cal/(mol*K)','+|-',[1,1,1,1,1,1,1]),
H298 = (16.7,'kcal/mol','+|-',1.3),
S298 = (25.8,'cal/(mol*K)','+|-',1.2),
),
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 1941,
label = "N3s-NCdCs",
group =
"""
1 * N3s u0 {2,S} {3,S} {4,S}
2 N u0 {1,S}
3 Cd u0 {1,S}
4 Cs u0 {1,S}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([0,0,0,0,0,0,0],'cal/(mol*K)'),
H298 = (0,'kcal/mol'),
S298 = (0,'cal/(mol*K)'),
),
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 1900,
label = "N3s-(CdCd)CsN3s",
group =
"""
1 * N3s u0 {2,S} {5,S} {6,S}
2 Cd u0 {1,S} {3,D} {4,S}
3 Cd u0 {2,D}
4 R u0 {2,S}
5 Cs u0 {1,S}
6 N3s u0 {1,S}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([4.2,4.2,4.4,4.8,5.4,5.7,6],'cal/(mol*K)','+|-',[1.1,1.1,1.1,1.1,1.1,1.1,1.1]),
H298 = (30.3,'kcal/mol','+|-',1.5),
S298 = (-13.2,'cal/(mol*K)','+|-',1.4),
),
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 1891,
label = "N3s-CsHOs",
group =
"""
1 * N3s u0 {2,S} {3,S} {4,S}
2 Cs u0 {1,S}
3 H u0 {1,S}
4 O2s u0 {1,S}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([5.2,6.2,7,7.7,8.7,9.4,10.5],'cal/(mol*K)','+|-',[1,1,1,1,1,1,1]),
H298 = (20.4,'kcal/mol','+|-',1.4),
S298 = (8.1,'cal/(mol*K)','+|-',1.3),
),
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 1892,
label = "N3s-CsCsOs",
group =
"""
1 * N3s u0 {2,S} {3,S} {4,S}
2 Cs u0 {1,S}
3 Cs u0 {1,S}
4 O2s u0 {1,S}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([4.3,5.1,5.7,6.2,7,7.3,7.5],'cal/(mol*K)','+|-',[0.8,0.8,0.8,0.8,0.8,0.8,0.8]),
H298 = (26.6,'kcal/mol','+|-',1.2),
S298 = (-12.7,'cal/(mol*K)','+|-',1.1),
),
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 1890,
label = "N3s-OsHH",
group =
"""
1 * N3s u0 {2,S} {3,S} {4,S}
2 O2s u0 {1,S}
3 H u0 {1,S}
4 H u0 {1,S}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([6.1,7.4,8.4,9.3,10.5,11.5,13.2],'cal/(mol*K)'),
H298 = (11.4,'kcal/mol'),
S298 = (29.1,'cal/(mol*K)'),
),
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index | |
from collections import namedtuple
import logging
from operator import itemgetter
import re
import string
import unicodedata
from django.db import transaction
from django.db.utils import IntegrityError
from django.utils import timezone
from spotify.spotify import spotify
from trackmap.models import Album, Track, TrackAvailability
AlbumInfo = namedtuple('AlbumInfo', 'id title year img_small img_medium img_large match_score')
ArtistInfo = namedtuple('ArtistInfo', 'id name multiple match_score')
TrackInfo = namedtuple('TrackInfo', 'id title match_score')
TrackArtistAlbum = namedtuple('TrackArtistAlbum', 'track_info artist_info album_info')
log = logging.getLogger(__name__)
def utc_now():
return timezone.now().replace(tzinfo=timezone.utc)
def remove_accents(input_str):
nkfd_form = unicodedata.normalize('NFKD', input_str)
return "".join([c for c in nkfd_form if not unicodedata.combining(c)])
def chunks(l, n):
"""
Breaks a single list into several lists of size n.
:param list l: list to break into chunks
:param int n: number of elements that each chunk should have
:return: list of lists
"""
n = max(1, n)
return [l[i:i + n] for i in range(0, len(l), n)]
# TODO: map multiple song "songs" played on rp to multiple songs.
# Examples: rp_song_ids: 33058 (Yes), 38699 (Led Zep)
class TrackSearch(object):
# There are three cases a radioparadise artist name to be mapped to multiple spotify artist names:
# 1. For example "<NAME> & <NAME>": These are two artists playing together on one song
# 2. For example '<NAME>': ['<NAME>', '<NAME>']: This is one artist who has works attributed
# to them on Spotify under more than one name.
# 3. Radio Paradise uses one name, Spotify uses another (e.g. '<NAME>': '<NAME> & The Wailers')
# These cases should be handled differently:
# For 1), the artist part of the query should include all the artists
# For 2), the artist names should be queried using OR. If there are more than two mappings,
# this would mean making more than one query if the first query didn't find a good match.
# For 3), the Spotify version of the artist's name should be used in the query.
MAPPING_TYPE_REPLACE = 'replace_single'
MAPPING_TYPE_ONE_TO_MANY = 'one_to_many'
MAPPING_TYPE_ANY_OF = 'any_of'
REPLACE_FOR_SEARCH_ONLY = 'replace_single_search_only'
ISRC_TRACK_MATCH_SCORE = 1.0
ISRC_ARTIST_MATCH_SCORE = 0.9
rp_to_spotify_artist_map = {
MAPPING_TYPE_REPLACE: {
'!Deladap': '!Dela Dap', # Spotify seems wrong on this one
'10 CC': '10cc',
'AfroCelts': 'Afro Celt Sound System',
'Apollo Four Forty': 'Apollo 440',
'Allman Brothers': 'The Allman Brothers Band',
'Angé<NAME>jo': 'An<NAME>',
'<NAME>': '<NAME> & The Wailers',
'Cheik<NAME> Lo': 'Che<NAME>',
'<NAME>': '<NAME> & New Bohemians',
'Emmerhoff': 'Emmerhoff & the Melancholy Babies',
'English Beat': 'The Beat',
'Frederico Aubele': 'Federico Aubele',
'Ihtiyac Molasi': 'İhtiyaç Molası',
'Iron & Wine and Calexico': 'Calexico / Iron and Wine',
'Fläskkvartetten': 'Fleshquartet',
'<NAME> & Canned Heat': '<NAME>',
'Khachaturian': 'Aram Khachaturian',
'Matchbox 20': 'Matchbox Twenty',
'Nikkfurie': 'La Caution',
'Ozzy Osborne': 'Ozzy Osbourne',
'Santana Brothers': 'Santana',
'Sl<NAME>': 'Slaint<NAME>hath',
'Sixteen Horsepower': '16 Horsepower',
'<NAME>': '<NAME> II',
'The Nightwatchman (Tom Morello)': 'Tom Morello: The Nightwatchman',
'The English Beat': 'The Beat',
'Trail of Dead': '...And You Will Know Us By The Trail Of Dead',
'The Trash Can Sinatras': 'Trashcan Sinatras',
'Woven Hand': 'Wovenhand',
},
MAPPING_TYPE_ONE_TO_MANY: {
'Al Di Meola & Paco DeLucia': ['Al Di Meola', 'Paco de Lucía'],
'<NAME> & <NAME>': ['<NAME>', '<NAME>'],
'<NAME> & <NAME>': ['<NAME>é', 'Ry Cooder'],
'Ali <NAME>é & Toumani Diabeté': ['Ali Farka Touré', 'Toumani Diabeté'],
'<NAME> & <NAME>': ['<NAME>', '<NAME>'],
'Anoushka Shankar & Kar<NAME>ale': ['An<NAME>', 'Kar<NAME>ale'],
'B.B. King & Dr. John': ['B.B. King', 'Dr. John'],
'B.B. King & <NAME>': ['B.B. King', '<NAME>'],
'B.B. King & Tracy Chapman': ['B.B. King', 'Tracy Chapman'],
'<NAME> & the Blind Boys of Alabama': ['<NAME>', 'The Blind Boys of Alabama'],
'<NAME> & <NAME>': ['<NAME>', '<NAME>'],
'<NAME> and <NAME>': ['<NAME>', '<NAME>'],
'<NAME> & Wilco': ['<NAME>', 'Wilco'],
'Blanquito Man, Control Machete & Celso Piña': ['Blanquito Man', 'Control Machete', 'Celso Piña'],
'Bloomfield, <NAME>': ['<NAME>', '<NAME>'], # Spotify seems wrong to exclude Bloomfield.
'Buddy & <NAME>': ['<NAME>', '<NAME>'],
'Casualties of Cool': ['<NAME>', '<NAME>'],
'<NAME> & Friends': ['<NAME>', 'Malian Musicians'],
'Danger Mouse & <NAME>pi': ['Danger Mouse', '<NAME>'],
'Danger Mouse & Sparklehorse': ['Danger Mouse', 'Sparklehorse'],
'<NAME> & <NAME>': ['<NAME>', '<NAME>'],
'<NAME> and <NAME>': ['<NAME>', '<NAME>'],
'<NAME> & <NAME>': ['<NAME>', '<NAME>'],
'<NAME> & <NAME>': ['<NAME>', '<NAME>'],
'<NAME> & <NAME>': ['<NAME>', '<NAME>'],
'Habib Koité & Bamada': ['<NAME>', 'Bamada'],
'<NAME>': ['<NAME>', '<NAME>'],
'<NAME> & <NAME>': ['<NAME>', '<NAME>'],
'<NAME> & <NAME>': ['<NAME>', '<NAME>'], # with two spaces!
'<NAME> & the Watson Twins': ['<NAME>', 'The Watson Twins'],
'<NAME> & <NAME>': ['<NAME>', '<NAME>'],
'<NAME> & Fisk Jubilee Singers': ['<NAME>', 'The Fisk Jubilee Singers'],
'Imogen Heap & Vishal-Shekhar': ['Imogen Heap', 'Vishal Shekhar'],
'<NAME> & <NAME>': ['<NAME>', '<NAME>'],
'<NAME> & <NAME>': ['<NAME>', '<NAME>'],
'Leftover Salmon & Cracker': ['Le<NAME>', 'Cracker'],
'Los Lobos & Antonio Banderas': ['Los Lobos', '<NAME>'],
'<NAME> & Wings': ['<NAME>', 'Wings'],
'Paul & <NAME>': ['<NAME>', '<NAME>'],
'<NAME> & <NAME>': ['<NAME>', '<NAME>'],
'<NAME> & <NAME>': ['<NAME>', '<NAME>'],
'Ungar, Mason & friends': ['<NAME>', '<NAME>'],
'<NAME> & <NAME>': ['<NAME>', '<NAME>'],
'Willie and Lobo': ['Willie', 'Lobo'],
},
MAPPING_TYPE_ANY_OF: {
# These will be used when comparing the results returned from Spotify:
'compare': {
'1 Giant Leap': ['<NAME>', '<NAME>'],
'<NAME>': ['<NAME>', '<NAME> And Relentless7'],
'Bé<NAME>': ['<NAME>', '<NAME> and the Flecktones'],
'<NAME>': ['<NAME>', '<NAME> and the Flecktones'],
'Easy Star All-Stars': ['Toots & The Maytals', 'Citizen Cope', 'The Meditations'], # see album: Radiodread
'Elephant Revival': ['Elephant Revival', 'Elephant Revivial'],
'<NAME>': ['<NAME>ello', 'Elvis Costello & The Attractions', 'Elvis Costello And The Roots'],
'Habib Koité & Bamada': ['Habib Koité', 'Bamada'],
'Josh Joplin Group': ['<NAME>', 'Josh Joplin Group'],
'<NAME>': ['<NAME>', '<NAME> and The Black Spirits'],
'<NAME>': ['<NAME>', '<NAME>'],
'<NAME>': ['<NAME>', '<NAME> & The Egyptians'],
'<NAME>': ['<NAME>', '<NAME> & The Cardinals'],
},
# These are used when searching Spotify; a separate search will be made for each separate name:
'search': {
# Note: no need to add alternates if the alternates also start with the same name, because
# Spotify will also match 'foo bar' if searching for an artist named 'foo'.
'1 Giant Leap': ['<NAME>', '<NAME>'],
'Easy Star All-Stars': ['Toots & The Maytals', 'Citizen Cope', 'The Meditations'], # see album: Radiodread
'Elephant Revival': ['Elephant Revival', 'Elephant Revivial'],
'<NAME>': ['<NAME>', '<NAME>'],
}
},
REPLACE_FOR_SEARCH_ONLY: {
'10,000 Maniacs': 'Maniacs', # Strange, but Spotify doesn't find it with 10000 Maniacs.
'<NAME>': '<NAME>',
'<NAME> & Bamada': '<NAME>',
'Josh Joplin Group': '<NAME>',
},
}
# Characters that can be stripped when comparing possible matches:
strip_chars_pattern = re.compile('[{}]'.format(re.escape(string.punctuation + ' ')))
# Characters that can be stripped when querying Spotify for a match:
search_strip_chars_pattern = re.compile('[{}]'.format(
re.escape(string.punctuation.replace('-', '') \
.replace('&', '').replace('$', '').replace('#', '').replace('+', '').replace('/', '')
)))
# Match things like ", part 2":
part_x_pattern = re.compile(',? (\((pt\.|part) \d+\)|(pt\.|part \d+))')
# Match things like ", pt. 2", with a named group for the part number:
part_x_type1 = re.compile(',? ?(pt\.|part) (?P<part_number>\d+)')
# Match things like " (part 2)", with a named group for the part number:
part_x_type2 = re.compile(' ?\((pt\.|part) (?P<part_number>\d+)\)')
# Match things like "(w/ <NAME>)" and "feat. <NAME>":
featuring_pattern = re.compile('\((w/|feat\.?|featuring) (?P<featuring>[^\)]+)\)')
contains_featuring_pattern = re.compile('^.*' + featuring_pattern.pattern + '.*$')
# Match all non-alphanumeric characters (unicode aware):
strip_non_words_pattern = re.compile('[\W_]+', re.UNICODE)
# Match text like .*(live|acoustic)
live_pattern = re.compile(r'^(.*?)(live|acoustic)$')
# Match text like .*(\(live|acoustic\))
live_raw_pattern = re.compile(r'^(.*?)\s*\(\s*(live|acoustic)\s*\)\s*$', re.IGNORECASE)
# Match text like .*(live|acoustic)
unplugged_pattern = re.compile(r'^(.*?)((mtvunplugged(version)?)|unpluggedversion)$')
def __init__(self, query_limit=40, max_items_to_process=200):
self.spotify = spotify()
self.query_limit = query_limit
self.max_items_to_process = max_items_to_process
def spotify_query(self, song):
and_artist_names_for_search, or_artist_names_for_search = self.map_artist_names(song.artists.all(), 'search')
and_artist_names_for_compare, or_artist_names_for_compare = self.map_artist_names(song.artists.all(), 'compare')
title = song.corrected_title or song.title
isrc = song.isrc
# Query each possible or_artist_name separately, because an OR clause does not apply to only the artist names,
# but instead seems to make all elements including the song, be considered as OR-ed elements.
query_info = []
for artist_name in or_artist_names_for_search:
query = self.build_query(title, artist_names=[artist_name])
query_info.append((query, title, and_artist_names_for_compare, or_artist_names_for_compare, None))
if isrc:
query = self.build_query(None, artist_names=[artist_name], isrc=isrc)
query_info.append((query, title, and_artist_names_for_compare, or_artist_names_for_compare, isrc))
if and_artist_names_for_search:
query = self.build_query(title, artist_names=and_artist_names_for_search)
query_info.append((query, title, and_artist_names_for_compare, or_artist_names_for_compare, None))
if isrc:
query = self.build_query(None, artist_names=and_artist_names_for_search, isrc=isrc)
query_info.append((query, title, and_artist_names_for_compare, or_artist_names_for_compare, isrc))
return query_info
def find_matching_tracks(self, song):
"""
Gets the matching tracks that can be played per country.
:param song: rphistory.Song object
:return: tuple: (dict: best_matches, dict: matches_score)
"""
#TODO: a fair number of the songs that fail to match fail because the song title is slightly different
# between | |
<reponame>tlambert03/llspy2
# Form implementation generated from reading ui file '/Users/talley/Dropbox (HMS)/Python/LLSpy/llspy/gui/main_gui.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from qtpy import QtCore, QtGui, QtWidgets
class Ui_Main_GUI:
def setupUi(self, Main_GUI):
Main_GUI.setObjectName("Main_GUI")
Main_GUI.resize(617, 861)
Main_GUI.setMinimumSize(QtCore.QSize(0, 861))
Main_GUI.setFocusPolicy(QtCore.Qt.ClickFocus)
self.centralWidget = QtWidgets.QWidget(Main_GUI)
self.centralWidget.setObjectName("centralWidget")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.centralWidget)
self.verticalLayout_4.setContentsMargins(11, 11, 11, 11)
self.verticalLayout_4.setSpacing(6)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.tabWidget = QtWidgets.QTabWidget(self.centralWidget)
self.tabWidget.setMinimumSize(QtCore.QSize(593, 500))
self.tabWidget.setTabPosition(QtWidgets.QTabWidget.North)
self.tabWidget.setElideMode(QtCore.Qt.ElideRight)
self.tabWidget.setObjectName("tabWidget")
self.tab_process = QtWidgets.QWidget()
self.tab_process.setObjectName("tab_process")
self.process_tab_layout = QtWidgets.QVBoxLayout(self.tab_process)
self.process_tab_layout.setContentsMargins(11, 11, 11, 11)
self.process_tab_layout.setSpacing(6)
self.process_tab_layout.setObjectName("process_tab_layout")
self.listbox = QtWidgets.QTableWidget(self.tab_process)
self.listbox.setMinimumSize(QtCore.QSize(0, 200))
self.listbox.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.listbox.setDragEnabled(True)
self.listbox.setDragDropOverwriteMode(False)
self.listbox.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
self.listbox.setAlternatingRowColors(True)
self.listbox.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.listbox.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.listbox.setShowGrid(True)
self.listbox.setGridStyle(QtCore.Qt.DashLine)
self.listbox.setWordWrap(False)
self.listbox.setCornerButtonEnabled(True)
self.listbox.setColumnCount(7)
self.listbox.setObjectName("listbox")
self.listbox.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.listbox.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.listbox.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.listbox.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.listbox.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.listbox.setHorizontalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.listbox.setHorizontalHeaderItem(5, item)
item = QtWidgets.QTableWidgetItem()
self.listbox.setHorizontalHeaderItem(6, item)
self.process_tab_layout.addWidget(self.listbox)
self.processingToolBox = QtWidgets.QToolBox(self.tab_process)
font = QtGui.QFont()
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
font.setStrikeOut(False)
self.processingToolBox.setFont(font)
self.processingToolBox.setObjectName("processingToolBox")
self.tool_preprocess = QtWidgets.QWidget()
self.tool_preprocess.setGeometry(QtCore.QRect(0, 0, 547, 292))
self.tool_preprocess.setObjectName("tool_preprocess")
self.verticalLayout_16 = QtWidgets.QVBoxLayout(self.tool_preprocess)
self.verticalLayout_16.setContentsMargins(11, 11, 11, 11)
self.verticalLayout_16.setSpacing(6)
self.verticalLayout_16.setObjectName("verticalLayout_16")
self.camcorGroupBox = QtWidgets.QGroupBox(self.tool_preprocess)
self.camcorGroupBox.setStyleSheet(
"QGroupBox{font-size: 14px} QGroupBox::title{subcontrol-position: top left}"
)
self.camcorGroupBox.setObjectName("camcorGroupBox")
self.gridLayout_2 = QtWidgets.QGridLayout(self.camcorGroupBox)
self.gridLayout_2.setContentsMargins(11, 11, 11, 11)
self.gridLayout_2.setSpacing(6)
self.gridLayout_2.setObjectName("gridLayout_2")
self.camcorCheckBox = QtWidgets.QCheckBox(self.camcorGroupBox)
self.camcorCheckBox.setObjectName("camcorCheckBox")
self.gridLayout_2.addWidget(self.camcorCheckBox, 0, 0, 1, 2)
self.camcorTargetCombo = QtWidgets.QComboBox(self.camcorGroupBox)
self.camcorTargetCombo.setEnabled(False)
self.camcorTargetCombo.setObjectName("camcorTargetCombo")
self.camcorTargetCombo.addItem("")
self.camcorTargetCombo.addItem("")
self.camcorTargetCombo.addItem("")
self.gridLayout_2.addWidget(self.camcorTargetCombo, 0, 2, 1, 1)
self.medianFilterCheckBox = QtWidgets.QCheckBox(self.camcorGroupBox)
self.medianFilterCheckBox.setMouseTracking(True)
self.medianFilterCheckBox.setObjectName("medianFilterCheckBox")
self.gridLayout_2.addWidget(self.medianFilterCheckBox, 0, 4, 1, 1)
self.saveCamCorrectedCheckBox = QtWidgets.QCheckBox(self.camcorGroupBox)
self.saveCamCorrectedCheckBox.setEnabled(True)
self.saveCamCorrectedCheckBox.setChecked(True)
self.saveCamCorrectedCheckBox.setObjectName("saveCamCorrectedCheckBox")
self.gridLayout_2.addWidget(self.saveCamCorrectedCheckBox, 0, 5, 1, 2)
self.camParamTiffToolButton = QtWidgets.QToolButton(self.camcorGroupBox)
self.camParamTiffToolButton.setObjectName("camParamTiffToolButton")
self.gridLayout_2.addWidget(self.camParamTiffToolButton, 1, 6, 1, 1)
self.camParamTiffLabel = QtWidgets.QLabel(self.camcorGroupBox)
self.camParamTiffLabel.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter
)
self.camParamTiffLabel.setObjectName("camParamTiffLabel")
self.gridLayout_2.addWidget(self.camParamTiffLabel, 1, 0, 1, 1)
self.camParamTiffLineEdit = QtWidgets.QLineEdit(self.camcorGroupBox)
self.camParamTiffLineEdit.setText("")
self.camParamTiffLineEdit.setAlignment(
QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter
)
self.camParamTiffLineEdit.setReadOnly(True)
self.camParamTiffLineEdit.setObjectName("camParamTiffLineEdit")
self.gridLayout_2.addWidget(self.camParamTiffLineEdit, 1, 1, 1, 5)
spacerItem = QtWidgets.QSpacerItem(
40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
)
self.gridLayout_2.addItem(spacerItem, 0, 3, 1, 1)
self.verticalLayout_16.addWidget(self.camcorGroupBox)
self.trimEdgesGroupBox = QtWidgets.QGroupBox(self.tool_preprocess)
self.trimEdgesGroupBox.setStyleSheet(
"QGroupBox{font-size: 14px} QGroupBox::title{subcontrol-position: top left}"
)
self.trimEdgesGroupBox.setObjectName("trimEdgesGroupBox")
self.gridLayout_23 = QtWidgets.QGridLayout(self.trimEdgesGroupBox)
self.gridLayout_23.setContentsMargins(8, 8, 11, 8)
self.gridLayout_23.setSpacing(6)
self.gridLayout_23.setObjectName("gridLayout_23")
self.trimZ1SpinBox = QtWidgets.QSpinBox(self.trimEdgesGroupBox)
self.trimZ1SpinBox.setMaximum(999)
self.trimZ1SpinBox.setObjectName("trimZ1SpinBox")
self.gridLayout_23.addWidget(self.trimZ1SpinBox, 1, 7, 1, 1)
self.trimX1SpinBox = QtWidgets.QSpinBox(self.trimEdgesGroupBox)
self.trimX1SpinBox.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.trimX1SpinBox.setMaximum(999)
self.trimX1SpinBox.setProperty("value", 1)
self.trimX1SpinBox.setObjectName("trimX1SpinBox")
self.gridLayout_23.addWidget(self.trimX1SpinBox, 1, 1, 1, 1)
self.trimY1SpinBox = QtWidgets.QSpinBox(self.trimEdgesGroupBox)
self.trimY1SpinBox.setMaximum(999)
self.trimY1SpinBox.setObjectName("trimY1SpinBox")
self.gridLayout_23.addWidget(self.trimY1SpinBox, 1, 4, 1, 1)
self.trimZ0Label = QtWidgets.QLabel(self.trimEdgesGroupBox)
self.trimZ0Label.setObjectName("trimZ0Label")
self.gridLayout_23.addWidget(self.trimZ0Label, 0, 6, 1, 1, QtCore.Qt.AlignRight)
self.trimX0SpinBox = QtWidgets.QSpinBox(self.trimEdgesGroupBox)
self.trimX0SpinBox.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.trimX0SpinBox.setAutoFillBackground(False)
self.trimX0SpinBox.setMaximum(999)
self.trimX0SpinBox.setProperty("value", 1)
self.trimX0SpinBox.setObjectName("trimX0SpinBox")
self.gridLayout_23.addWidget(self.trimX0SpinBox, 0, 1, 1, 1)
self.trimZ0SpinBox = QtWidgets.QSpinBox(self.trimEdgesGroupBox)
self.trimZ0SpinBox.setAutoFillBackground(False)
self.trimZ0SpinBox.setMaximum(999)
self.trimZ0SpinBox.setProperty("value", 1)
self.trimZ0SpinBox.setObjectName("trimZ0SpinBox")
self.gridLayout_23.addWidget(self.trimZ0SpinBox, 0, 7, 1, 1)
self.trimY0Label = QtWidgets.QLabel(self.trimEdgesGroupBox)
self.trimY0Label.setObjectName("trimY0Label")
self.gridLayout_23.addWidget(self.trimY0Label, 0, 3, 1, 1, QtCore.Qt.AlignRight)
self.trimZ1Label = QtWidgets.QLabel(self.trimEdgesGroupBox)
self.trimZ1Label.setObjectName("trimZ1Label")
self.gridLayout_23.addWidget(self.trimZ1Label, 1, 6, 1, 1, QtCore.Qt.AlignRight)
self.trimX0Label = QtWidgets.QLabel(self.trimEdgesGroupBox)
self.trimX0Label.setObjectName("trimX0Label")
self.gridLayout_23.addWidget(self.trimX0Label, 0, 0, 1, 1, QtCore.Qt.AlignRight)
self.trimY1Label = QtWidgets.QLabel(self.trimEdgesGroupBox)
self.trimY1Label.setObjectName("trimY1Label")
self.gridLayout_23.addWidget(self.trimY1Label, 1, 3, 1, 1, QtCore.Qt.AlignRight)
spacerItem1 = QtWidgets.QSpacerItem(
40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
)
self.gridLayout_23.addItem(spacerItem1, 1, 2, 1, 1)
self.trimY0SpinBox = QtWidgets.QSpinBox(self.trimEdgesGroupBox)
self.trimY0SpinBox.setMaximum(999)
self.trimY0SpinBox.setObjectName("trimY0SpinBox")
self.gridLayout_23.addWidget(self.trimY0SpinBox, 0, 4, 1, 1)
self.trimX1Label = QtWidgets.QLabel(self.trimEdgesGroupBox)
self.trimX1Label.setObjectName("trimX1Label")
self.gridLayout_23.addWidget(self.trimX1Label, 1, 0, 1, 1, QtCore.Qt.AlignRight)
spacerItem2 = QtWidgets.QSpacerItem(
40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
)
self.gridLayout_23.addItem(spacerItem2, 0, 2, 1, 1)
spacerItem3 = QtWidgets.QSpacerItem(
40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
)
self.gridLayout_23.addItem(spacerItem3, 1, 5, 1, 1)
spacerItem4 = QtWidgets.QSpacerItem(
40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
)
self.gridLayout_23.addItem(spacerItem4, 0, 5, 1, 1)
self.trimX0Label.raise_()
self.trimX0SpinBox.raise_()
self.trimY0Label.raise_()
self.trimY0SpinBox.raise_()
self.trimZ0Label.raise_()
self.trimZ0SpinBox.raise_()
self.trimX1Label.raise_()
self.trimX1SpinBox.raise_()
self.trimY1SpinBox.raise_()
self.trimY1Label.raise_()
self.trimZ1SpinBox.raise_()
self.trimZ1Label.raise_()
self.verticalLayout_16.addWidget(self.trimEdgesGroupBox)
self.backgroundGroupBox = QtWidgets.QGroupBox(self.tool_preprocess)
self.backgroundGroupBox.setStyleSheet(
"QGroupBox{font-size: 14px} QGroupBox::title{subcontrol-position: top left}"
)
self.backgroundGroupBox.setObjectName("backgroundGroupBox")
self.horizontalLayout_6 = QtWidgets.QHBoxLayout(self.backgroundGroupBox)
self.horizontalLayout_6.setContentsMargins(8, 8, 11, 8)
self.horizontalLayout_6.setSpacing(6)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.backgroundAutoRadio = QtWidgets.QRadioButton(self.backgroundGroupBox)
self.backgroundAutoRadio.setChecked(True)
self.backgroundAutoRadio.setObjectName("backgroundAutoRadio")
self.horizontalLayout_6.addWidget(self.backgroundAutoRadio)
spacerItem5 = QtWidgets.QSpacerItem(
40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
)
self.horizontalLayout_6.addItem(spacerItem5)
self.backgroundFixedRadio = QtWidgets.QRadioButton(self.backgroundGroupBox)
self.backgroundFixedRadio.setObjectName("backgroundFixedRadio")
self.horizontalLayout_6.addWidget(self.backgroundFixedRadio)
self.backgroundFixedSpinBox = QtWidgets.QSpinBox(self.backgroundGroupBox)
self.backgroundFixedSpinBox.setEnabled(False)
self.backgroundFixedSpinBox.setAutoFillBackground(False)
self.backgroundFixedSpinBox.setMaximum(1000)
self.backgroundFixedSpinBox.setProperty("value", 90)
self.backgroundFixedSpinBox.setObjectName("backgroundFixedSpinBox")
self.horizontalLayout_6.addWidget(self.backgroundFixedSpinBox)
spacerItem6 = QtWidgets.QSpacerItem(
40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
)
self.horizontalLayout_6.addItem(spacerItem6)
self.backgroundRollingRadio = QtWidgets.QRadioButton(self.backgroundGroupBox)
self.backgroundRollingRadio.setEnabled(False)
self.backgroundRollingRadio.setObjectName("backgroundRollingRadio")
self.horizontalLayout_6.addWidget(self.backgroundRollingRadio)
self.backgroundRollingSpinBox = QtWidgets.QSpinBox(self.backgroundGroupBox)
self.backgroundRollingSpinBox.setEnabled(False)
self.backgroundRollingSpinBox.setProperty("value", 10)
self.backgroundRollingSpinBox.setObjectName("backgroundRollingSpinBox")
self.horizontalLayout_6.addWidget(self.backgroundRollingSpinBox)
self.verticalLayout_16.addWidget(self.backgroundGroupBox)
spacerItem7 = QtWidgets.QSpacerItem(
20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding
)
self.verticalLayout_16.addItem(spacerItem7)
self.processingToolBox.addItem(self.tool_preprocess, "")
self.tool_deconvolution = QtWidgets.QWidget()
self.tool_deconvolution.setGeometry(QtCore.QRect(0, 0, 477, 282))
self.tool_deconvolution.setObjectName("tool_deconvolution")
self.verticalLayout_14 = QtWidgets.QVBoxLayout(self.tool_deconvolution)
self.verticalLayout_14.setContentsMargins(11, 11, 11, 11)
self.verticalLayout_14.setSpacing(6)
self.verticalLayout_14.setObjectName("verticalLayout_14")
self.doDeconGroupBox = QtWidgets.QGroupBox(self.tool_deconvolution)
self.doDeconGroupBox.setToolTip("")
self.doDeconGroupBox.setStyleSheet(
"QGroupBox{font-size: 14px} QGroupBox::title{subcontrol-position: top left}"
)
self.doDeconGroupBox.setFlat(False)
self.doDeconGroupBox.setCheckable(True)
self.doDeconGroupBox.setChecked(True)
self.doDeconGroupBox.setObjectName("doDeconGroupBox")
self.verticalLayout = QtWidgets.QVBoxLayout(self.doDeconGroupBox)
self.verticalLayout.setContentsMargins(6, 6, 6, 6)
self.verticalLayout.setSpacing(6)
self.verticalLayout.setObjectName("verticalLayout")
self.deconParamsLayout = QtWidgets.QHBoxLayout()
self.deconParamsLayout.setContentsMargins(-1, 0, -1, -1)
self.deconParamsLayout.setSpacing(6)
self.deconParamsLayout.setObjectName("deconParamsLayout")
self.iterationsLabel = QtWidgets.QLabel(self.doDeconGroupBox)
self.iterationsLabel.setEnabled(True)
self.iterationsLabel.setObjectName("iterationsLabel")
self.deconParamsLayout.addWidget(self.iterationsLabel)
self.iterationsSpinBox = QtWidgets.QSpinBox(self.doDeconGroupBox)
self.iterationsSpinBox.setEnabled(True)
self.iterationsSpinBox.setMaximum(20)
self.iterationsSpinBox.setProperty("value", 10)
self.iterationsSpinBox.setObjectName("iterationsSpinBox")
self.deconParamsLayout.addWidget(self.iterationsSpinBox)
spacerItem8 = QtWidgets.QSpacerItem(
40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
)
self.deconParamsLayout.addItem(spacerItem8)
self.apodizeLabel = QtWidgets.QLabel(self.doDeconGroupBox)
self.apodizeLabel.setEnabled(True)
self.apodizeLabel.setObjectName("apodizeLabel")
self.deconParamsLayout.addWidget(self.apodizeLabel)
self.apodizeSpinBox = QtWidgets.QSpinBox(self.doDeconGroupBox)
self.apodizeSpinBox.setEnabled(True)
self.apodizeSpinBox.setMaximum(40)
self.apodizeSpinBox.setProperty("value", 15)
self.apodizeSpinBox.setObjectName("apodizeSpinBox")
self.deconParamsLayout.addWidget(self.apodizeSpinBox)
spacerItem9 = QtWidgets.QSpacerItem(
40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
)
self.deconParamsLayout.addItem(spacerItem9)
self.zblendLabel = QtWidgets.QLabel(self.doDeconGroupBox)
self.zblendLabel.setEnabled(True)
self.zblendLabel.setObjectName("zblendLabel")
self.deconParamsLayout.addWidget(self.zblendLabel)
self.zblendSpinBox = QtWidgets.QSpinBox(self.doDeconGroupBox)
self.zblendSpinBox.setEnabled(True)
self.zblendSpinBox.setMaximum(20)
self.zblendSpinBox.setObjectName("zblendSpinBox")
self.deconParamsLayout.addWidget(self.zblendSpinBox)
self.verticalLayout.addLayout(self.deconParamsLayout)
self.deconvSaveLayout = QtWidgets.QHBoxLayout()
self.deconvSaveLayout.setContentsMargins(-1, 0, -1, -1)
self.deconvSaveLayout.setSpacing(6)
self.deconvSaveLayout.setObjectName("deconvSaveLayout")
self.saveDeconvolvedCheckBox = QtWidgets.QCheckBox(self.doDeconGroupBox)
self.saveDeconvolvedCheckBox.setEnabled(False)
self.saveDeconvolvedCheckBox.setChecked(True)
self.saveDeconvolvedCheckBox.setObjectName("saveDeconvolvedCheckBox")
self.deconvSaveLayout.addWidget(self.saveDeconvolvedCheckBox)
spacerItem10 = QtWidgets.QSpacerItem(
40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
)
self.deconvSaveLayout.addItem(spacerItem10)
self.deconvolvedMIPFrame = QtWidgets.QFrame(self.doDeconGroupBox)
self.deconvolvedMIPFrame.setEnabled(True)
self.deconvolvedMIPFrame.setFrameShape(QtWidgets.QFrame.Panel)
self.deconvolvedMIPFrame.setFrameShadow(QtWidgets.QFrame.Raised)
self.deconvolvedMIPFrame.setObjectName("deconvolvedMIPFrame")
self.horizontalLayout_25 = QtWidgets.QHBoxLayout(self.deconvolvedMIPFrame)
self.horizontalLayout_25.setContentsMargins(5, 5, 5, 5)
self.horizontalLayout_25.setSpacing(6)
self.horizontalLayout_25.setObjectName("horizontalLayout_25")
self.deconSaveMIPSLabel = QtWidgets.QLabel(self.deconvolvedMIPFrame)
self.deconSaveMIPSLabel.setEnabled(True)
self.deconSaveMIPSLabel.setObjectName("deconSaveMIPSLabel")
self.horizontalLayout_25.addWidget(self.deconSaveMIPSLabel)
self.deconXMIPCheckBox = QtWidgets.QCheckBox(self.deconvolvedMIPFrame)
self.deconXMIPCheckBox.setEnabled(True)
self.deconXMIPCheckBox.setObjectName("deconXMIPCheckBox")
self.horizontalLayout_25.addWidget(self.deconXMIPCheckBox)
self.deconYMIPCheckBox = QtWidgets.QCheckBox(self.deconvolvedMIPFrame)
self.deconYMIPCheckBox.setEnabled(True)
self.deconYMIPCheckBox.setObjectName("deconYMIPCheckBox")
self.horizontalLayout_25.addWidget(self.deconYMIPCheckBox)
self.deconZMIPCheckBox = QtWidgets.QCheckBox(self.deconvolvedMIPFrame)
self.deconZMIPCheckBox.setEnabled(True)
self.deconZMIPCheckBox.setChecked(True)
self.deconZMIPCheckBox.setObjectName("deconZMIPCheckBox")
self.horizontalLayout_25.addWidget(self.deconZMIPCheckBox)
self.deconvSaveLayout.addWidget(self.deconvolvedMIPFrame)
spacerItem11 = QtWidgets.QSpacerItem(
40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
)
self.deconvSaveLayout.addItem(spacerItem11)
self.deconvolvedBitDepthCombo = QtWidgets.QComboBox(self.doDeconGroupBox)
self.deconvolvedBitDepthCombo.setEnabled(True)
self.deconvolvedBitDepthCombo.setMaximumSize(QtCore.QSize(100, 16777215))
self.deconvolvedBitDepthCombo.setObjectName("deconvolvedBitDepthCombo")
self.deconvolvedBitDepthCombo.addItem("")
self.deconvolvedBitDepthCombo.addItem("")
self.deconvSaveLayout.addWidget(self.deconvolvedBitDepthCombo)
self.verticalLayout.addLayout(self.deconvSaveLayout)
self.verticalLayout_14.addWidget(self.doDeconGroupBox)
self.deskewedGroupBox = QtWidgets.QGroupBox(self.tool_deconvolution)
self.deskewedGroupBox.setStyleSheet(
"QGroupBox{font-size: 14px} QGroupBox::title{subcontrol-position: top left}"
)
self.deskewedGroupBox.setObjectName("deskewedGroupBox")
self.gridLayout_7 = QtWidgets.QGridLayout(self.deskewedGroupBox)
self.gridLayout_7.setContentsMargins(4, 4, 8, 8)
self.gridLayout_7.setSpacing(6)
self.gridLayout_7.setObjectName("gridLayout_7")
spacerItem12 = QtWidgets.QSpacerItem(
40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
)
self.gridLayout_7.addItem(spacerItem12, 0, 1, 1, 1)
self.deskewedMIPFrame = QtWidgets.QFrame(self.deskewedGroupBox)
self.deskewedMIPFrame.setFrameShape(QtWidgets.QFrame.Panel)
self.deskewedMIPFrame.setFrameShadow(QtWidgets.QFrame.Raised)
self.deskewedMIPFrame.setObjectName("deskewedMIPFrame")
self.horizontalLayout_21 = QtWidgets.QHBoxLayout(self.deskewedMIPFrame)
self.horizontalLayout_21.setContentsMargins(5, 5, 5, 5)
self.horizontalLayout_21.setSpacing(6)
self.horizontalLayout_21.setObjectName("horizontalLayout_21")
self.deconSaveMIPSLabel_2 = QtWidgets.QLabel(self.deskewedMIPFrame)
self.deconSaveMIPSLabel_2.setEnabled(True)
self.deconSaveMIPSLabel_2.setObjectName("deconSaveMIPSLabel_2")
self.horizontalLayout_21.addWidget(self.deconSaveMIPSLabel_2)
self.deskewedXMIPCheckBox = QtWidgets.QCheckBox(self.deskewedMIPFrame)
self.deskewedXMIPCheckBox.setObjectName("deskewedXMIPCheckBox")
self.horizontalLayout_21.addWidget(self.deskewedXMIPCheckBox)
self.deskewedYMIPCheckBox = QtWidgets.QCheckBox(self.deskewedMIPFrame)
self.deskewedYMIPCheckBox.setObjectName("deskewedYMIPCheckBox")
self.horizontalLayout_21.addWidget(self.deskewedYMIPCheckBox)
self.deskewedZMIPCheckBox = QtWidgets.QCheckBox(self.deskewedMIPFrame)
self.deskewedZMIPCheckBox.setObjectName("deskewedZMIPCheckBox")
self.horizontalLayout_21.addWidget(self.deskewedZMIPCheckBox)
self.gridLayout_7.addWidget(self.deskewedMIPFrame, 0, 2, 1, 1)
self.saveDeskewedCheckBox = QtWidgets.QCheckBox(self.deskewedGroupBox)
self.saveDeskewedCheckBox.setChecked(True)
self.saveDeskewedCheckBox.setObjectName("saveDeskewedCheckBox")
self.gridLayout_7.addWidget(self.saveDeskewedCheckBox, 0, 0, 1, 1)
self.deskewedBitDepthCombo = QtWidgets.QComboBox(self.deskewedGroupBox)
self.deskewedBitDepthCombo.setEnabled(False)
self.deskewedBitDepthCombo.setMaximumSize(QtCore.QSize(100, 16777215))
self.deskewedBitDepthCombo.setObjectName("deskewedBitDepthCombo")
self.deskewedBitDepthCombo.addItem("")
self.deskewedBitDepthCombo.addItem("")
self.gridLayout_7.addWidget(self.deskewedBitDepthCombo, 0, 4, 1, 1)
spacerItem13 = QtWidgets.QSpacerItem(
40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
)
self.gridLayout_7.addItem(spacerItem13, 0, 3, 1, 1)
self.verticalLayout_14.addWidget(self.deskewedGroupBox)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_2.setSpacing(6)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.deconJoinMIPCheckBox = QtWidgets.QCheckBox(self.tool_deconvolution)
self.deconJoinMIPCheckBox.setEnabled(True)
self.deconJoinMIPCheckBox.setLayoutDirection(QtCore.Qt.LeftToRight)
self.deconJoinMIPCheckBox.setChecked(True)
self.deconJoinMIPCheckBox.setObjectName("deconJoinMIPCheckBox")
self.horizontalLayout_2.addWidget(self.deconJoinMIPCheckBox)
spacerItem14 = QtWidgets.QSpacerItem(
40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
)
self.horizontalLayout_2.addItem(spacerItem14)
self.useLZWCheckBox = QtWidgets.QCheckBox(self.tool_deconvolution)
self.useLZWCheckBox.setEnabled(True)
self.useLZWCheckBox.setLayoutDirection(QtCore.Qt.LeftToRight)
self.useLZWCheckBox.setChecked(True)
self.useLZWCheckBox.setObjectName("useLZWCheckBox")
self.horizontalLayout_2.addWidget(self.useLZWCheckBox)
self.verticalLayout_14.addLayout(self.horizontalLayout_2)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setContentsMargins(-1, 0, -1, -1)
self.horizontalLayout_3.setSpacing(6)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.dupRevStackCheckBox = QtWidgets.QCheckBox(self.tool_deconvolution)
self.dupRevStackCheckBox.setEnabled(True)
self.dupRevStackCheckBox.setLayoutDirection(QtCore.Qt.LeftToRight)
self.dupRevStackCheckBox.setChecked(True)
self.dupRevStackCheckBox.setObjectName("dupRevStackCheckBox")
self.horizontalLayout_3.addWidget(self.dupRevStackCheckBox)
spacerItem15 = QtWidgets.QSpacerItem(
40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
)
self.horizontalLayout_3.addItem(spacerItem15)
self.padValLabel = QtWidgets.QLabel(self.tool_deconvolution)
self.padValLabel.setEnabled(True)
self.padValLabel.setObjectName("padValLabel")
self.horizontalLayout_3.addWidget(self.padValLabel)
self.padValSpinBox = QtWidgets.QSpinBox(self.tool_deconvolution)
self.padValSpinBox.setEnabled(True)
self.padValSpinBox.setMinimum(0)
self.padValSpinBox.setMaximum(9999)
self.padValSpinBox.setProperty("value", 0)
self.padValSpinBox.setObjectName("padValSpinBox")
self.horizontalLayout_3.addWidget(self.padValSpinBox)
self.verticalLayout_14.addLayout(self.horizontalLayout_3)
spacerItem16 = QtWidgets.QSpacerItem(
20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding
)
self.verticalLayout_14.addItem(spacerItem16)
self.processingToolBox.addItem(self.tool_deconvolution, "")
self.tool_postprocess = QtWidgets.QWidget()
self.tool_postprocess.setGeometry(QtCore.QRect(0, 0, 547, 337))
self.tool_postprocess.setObjectName("tool_postprocess")
self.verticalLayout_12 = QtWidgets.QVBoxLayout(self.tool_postprocess)
self.verticalLayout_12.setContentsMargins(11, 11, 11, 11)
self.verticalLayout_12.setSpacing(6)
self.verticalLayout_12.setObjectName("verticalLayout_12")
self.croppingGroupBox = QtWidgets.QGroupBox(self.tool_postprocess)
self.croppingGroupBox.setStyleSheet(
"QGroupBox{font-size: 14px} QGroupBox::title{subcontrol-position: top left}"
)
self.croppingGroupBox.setCheckable(True)
self.croppingGroupBox.setChecked(True)
self.croppingGroupBox.setObjectName("croppingGroupBox")
self.horizontalLayout_7 = QtWidgets.QHBoxLayout(self.croppingGroupBox)
self.horizontalLayout_7.setContentsMargins(8, 5, 11, 5)
self.horizontalLayout_7.setSpacing(6)
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.cropAutoRadio = QtWidgets.QRadioButton(self.croppingGroupBox)
self.cropAutoRadio.setChecked(True)
self.cropAutoRadio.setObjectName("cropAutoRadio")
self.horizontalLayout_7.addWidget(self.cropAutoRadio)
self.autocropPadLabel = QtWidgets.QLabel(self.croppingGroupBox)
self.autocropPadLabel.setEnabled(True)
self.autocropPadLabel.setScaledContents(False)
self.autocropPadLabel.setObjectName("autocropPadLabel")
self.horizontalLayout_7.addWidget(self.autocropPadLabel)
self.autocropPadSpinBox = QtWidgets.QSpinBox(self.croppingGroupBox)
self.autocropPadSpinBox.setEnabled(True)
self.autocropPadSpinBox.setMinimumSize(QtCore.QSize(48, 0))
self.autocropPadSpinBox.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.autocropPadSpinBox.setMaximum(500)
self.autocropPadSpinBox.setProperty("value", 50)
self.autocropPadSpinBox.setObjectName("autocropPadSpinBox")
self.horizontalLayout_7.addWidget(self.autocropPadSpinBox)
spacerItem17 = QtWidgets.QSpacerItem(
40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
)
self.horizontalLayout_7.addItem(spacerItem17)
self.cropManualRadio = QtWidgets.QRadioButton(self.croppingGroupBox)
self.cropManualRadio.setObjectName("cropManualRadio")
self.horizontalLayout_7.addWidget(self.cropManualRadio)
self.cropManualGroupBox = QtWidgets.QGroupBox(self.croppingGroupBox)
self.cropManualGroupBox.setEnabled(True)
self.cropManualGroupBox.setTitle("")
self.cropManualGroupBox.setFlat(False)
self.cropManualGroupBox.setObjectName("cropManualGroupBox")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.cropManualGroupBox)
self.horizontalLayout.setContentsMargins(4, 2, 2, 0)
self.horizontalLayout.setSpacing(6)
self.horizontalLayout.setObjectName("horizontalLayout")
self.cropWidthLabel = QtWidgets.QLabel(self.cropManualGroupBox)
self.cropWidthLabel.setEnabled(False)
self.cropWidthLabel.setScaledContents(False)
self.cropWidthLabel.setObjectName("cropWidthLabel")
self.horizontalLayout.addWidget(self.cropWidthLabel)
self.cropWidthSpinBox = QtWidgets.QSpinBox(self.cropManualGroupBox)
self.cropWidthSpinBox.setEnabled(False)
self.cropWidthSpinBox.setMinimumSize(QtCore.QSize(50, 0))
self.cropWidthSpinBox.setMaximum(2000)
self.cropWidthSpinBox.setObjectName("cropWidthSpinBox")
self.horizontalLayout.addWidget(self.cropWidthSpinBox)
self.cropShiftLabel = QtWidgets.QLabel(self.cropManualGroupBox)
self.cropShiftLabel.setEnabled(False)
self.cropShiftLabel.setObjectName("cropShiftLabel")
self.horizontalLayout.addWidget(self.cropShiftLabel)
self.cropShiftSpinBox = QtWidgets.QSpinBox(self.cropManualGroupBox)
self.cropShiftSpinBox.setEnabled(False)
self.cropShiftSpinBox.setMinimumSize(QtCore.QSize(48, 0))
self.cropShiftSpinBox.setMinimum(-1000)
self.cropShiftSpinBox.setMaximum(1000)
self.cropShiftSpinBox.setObjectName("cropShiftSpinBox")
self.horizontalLayout.addWidget(self.cropShiftSpinBox)
self.horizontalLayout_7.addWidget(self.cropManualGroupBox)
self.verticalLayout_12.addWidget(self.croppingGroupBox)
self.rotateGroupBox = QtWidgets.QGroupBox(self.tool_postprocess)
self.rotateGroupBox.setEnabled(True)
self.rotateGroupBox.setStyleSheet(
"QGroupBox{font-size: 14px} QGroupBox::title{subcontrol-position: top left}"
)
self.rotateGroupBox.setCheckable(True)
self.rotateGroupBox.setChecked(True)
self.rotateGroupBox.setObjectName("rotateGroupBox")
self.horizontalLayout_8 = QtWidgets.QHBoxLayout(self.rotateGroupBox)
self.horizontalLayout_8.setContentsMargins(8, 4, 8, 4)
self.horizontalLayout_8.setSpacing(6)
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.rotateReverseCheckBox = QtWidgets.QCheckBox(self.rotateGroupBox)
self.rotateReverseCheckBox.setEnabled(True)
self.rotateReverseCheckBox.setObjectName("rotateReverseCheckBox")
self.horizontalLayout_8.addWidget(self.rotateReverseCheckBox)
spacerItem18 = QtWidgets.QSpacerItem(
40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
)
self.horizontalLayout_8.addItem(spacerItem18)
self.rotateOverrideCheckBox = QtWidgets.QCheckBox(self.rotateGroupBox)
self.rotateOverrideCheckBox.setEnabled(True)
self.rotateOverrideCheckBox.setObjectName("rotateOverrideCheckBox")
self.horizontalLayout_8.addWidget(
self.rotateOverrideCheckBox, 0, QtCore.Qt.AlignRight
)
self.rotateOverrideSpinBox = QtWidgets.QDoubleSpinBox(self.rotateGroupBox)
self.rotateOverrideSpinBox.setEnabled(False)
self.rotateOverrideSpinBox.setMaximumSize(QtCore.QSize(70, 16777215))
self.rotateOverrideSpinBox.setMinimum(-180.0)
self.rotateOverrideSpinBox.setMaximum(180.0)
self.rotateOverrideSpinBox.setSingleStep(0.5)
self.rotateOverrideSpinBox.setProperty("value", 31.5)
self.rotateOverrideSpinBox.setObjectName("rotateOverrideSpinBox")
self.horizontalLayout_8.addWidget(self.rotateOverrideSpinBox)
self.verticalLayout_12.addWidget(self.rotateGroupBox)
self.doRegistrationGroupBox = QtWidgets.QGroupBox(self.tool_postprocess)
self.doRegistrationGroupBox.setStyleSheet(
"QGroupBox{font-size: 14px} QGroupBox::title{subcontrol-position: top left}"
)
self.doRegistrationGroupBox.setCheckable(True)
self.doRegistrationGroupBox.setChecked(True)
self.doRegistrationGroupBox.setObjectName("doRegistrationGroupBox")
self.gridLayout = QtWidgets.QGridLayout(self.doRegistrationGroupBox)
self.gridLayout.setContentsMargins(11, 11, 11, 11)
self.gridLayout.setSpacing(6)
self.gridLayout.setObjectName("gridLayout")
self.channelRefLabel = QtWidgets.QLabel(self.doRegistrationGroupBox)
self.channelRefLabel.setEnabled(True)
self.channelRefLabel.setObjectName("channelRefLabel")
self.gridLayout.addWidget(self.channelRefLabel, 0, 0, 1, 1)
self.RegProcessChannelRefCombo = QtWidgets.QComboBox(
self.doRegistrationGroupBox
)
self.RegProcessChannelRefCombo.setEnabled(True)
self.RegProcessChannelRefCombo.setMaximumSize(QtCore.QSize(65, 16777215))
self.RegProcessChannelRefCombo.setCurrentText("")
self.RegProcessChannelRefCombo.setObjectName("RegProcessChannelRefCombo")
self.gridLayout.addWidget(self.RegProcessChannelRefCombo, 0, 1, 1, 1)
self.channelRefModeLabel = QtWidgets.QLabel(self.doRegistrationGroupBox)
self.channelRefModeLabel.setObjectName("channelRefModeLabel")
self.gridLayout.addWidget(self.channelRefModeLabel, 0, 2, 1, 1)
self.RegProcessChannelRefModeCombo = QtWidgets.QComboBox(
self.doRegistrationGroupBox
)
self.RegProcessChannelRefModeCombo.setEnabled(True)
self.RegProcessChannelRefModeCombo.setMinimumSize(QtCore.QSize(130, 0))
self.RegProcessChannelRefModeCombo.setMaximumSize(QtCore.QSize(50, 16777215))
self.RegProcessChannelRefModeCombo.setCurrentText("")
self.RegProcessChannelRefModeCombo.setObjectName(
"RegProcessChannelRefModeCombo"
)
self.gridLayout.addWidget(self.RegProcessChannelRefModeCombo, 0, 3, 1, 1)
spacerItem19 = QtWidgets.QSpacerItem(
40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
)
self.gridLayout.addItem(spacerItem19, 0, 4, 1, 1)
self.RegProcessLoadRegFile = QtWidgets.QPushButton(self.doRegistrationGroupBox)
self.RegProcessLoadRegFile.setObjectName("RegProcessLoadRegFile")
self.gridLayout.addWidget(self.RegProcessLoadRegFile, 0, 5, 1, 1)
self.RegProcessPathLabel = QtWidgets.QLabel(self.doRegistrationGroupBox)
self.RegProcessPathLabel.setObjectName("RegProcessPathLabel")
self.gridLayout.addWidget(self.RegProcessPathLabel, 1, 0, 1, | |
# -*- coding: utf-8 -*-
"""
Tests for falcon_marshmallow.middleware
"""
# Std lib
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
try:
from unittest import mock
except ImportError:
import mock # type: ignore
from typing import Optional
# Third party
import pytest
from falcon import errors
from marshmallow import fields, Schema
# Local
from falcon_marshmallow import middleware as mid
from falcon_marshmallow.middleware import MARSHMALLOW_2
class TestMarshmallow:
"""Test Marshmallow middleware"""
mw = mid.Marshmallow()
class FooSchema(Schema):
"""Convert foo to bar for testing purposes"""
if MARSHMALLOW_2:
bar = fields.String(load_from="foo", dump_to="foo")
else:
bar = fields.String(data_key="foo")
int = fields.Integer()
def test_instantiation(self):
"""Test instantiating the class"""
mid.Marshmallow()
@pytest.mark.parametrize(
"method, attr_name, has_sch",
[
("GET", "get_schema", True),
("get", "get_schema", True),
("get", "get_response_schema", False),
("get", "get_request_schema", True),
("get", "schema", False),
("get", "put_schema", False),
("POST", "post_schema", True),
("post", "post_schema", True),
("POST", "schema", False),
("POST", "get_schema", False),
("post", "post_response_schema", False),
("post", "post_request_schema", True),
("PATCH", "patch_schema", True),
("patch", "patch_schema", True),
("PUT", "put_schema", True),
("put", "put_schema", True),
("DELETE", "delete_schema", True),
("delete", "delete_schema", True),
],
)
def test_get_specific_schema(self, method, attr_name, has_sch):
# type: (str, str, bool) -> None
"""Test getting a specific schema from a resource"""
class TestResource:
"""Quick test object"""
def __init__(self):
setattr(self, attr_name, "foo")
tr = TestResource()
sch = mid.Marshmallow._get_specific_schema(tr, method, "request")
if has_sch:
assert sch == "foo"
else:
assert sch is None
@pytest.mark.parametrize(
"method, attrs, exp_value",
[
("GET", (("get_schema", "foo"), ("schema", "bar")), "foo"),
(
"GET",
(
("get_schema", "foo"),
("schema", "bar"),
("post_schema", "baz"),
),
"foo",
),
(
"GET",
(
("get_schema", "foo"),
("get_response_schema", "bar"),
("get_request_schema", "boo"),
("post_response_schema", "baz"),
),
"bar",
),
("GET", (("post_schema", "foo"), ("schema", "bar")), "bar"),
("GET", (("schema", "bar"),), "bar"),
("GET", (), None),
],
)
def test_get_schema(self, method, attrs, exp_value):
# type: (str, tuple, str) -> None
"""Test getting a general or specific schema"""
class TestResource:
"""Quick test object"""
def __init__(self):
for attr_name, value in attrs:
setattr(self, attr_name, value)
tr = TestResource()
val = mid.Marshmallow()._get_schema(tr, method, "response")
if exp_value is None:
assert val is None
else:
assert val == exp_value
@pytest.mark.parametrize(
"stream, content_type, schema, schema_err, bad_sch, force_json, "
"json_err, exp_ret",
[
( # 0: Good schema
'{"foo": "test"}',
"application/json",
True,
False,
False,
False,
False,
{"bar": "test"},
),
( # 1: Schema errors on load
'{"foo": "test", "int": "test"}',
"application/json",
True,
True,
False,
False,
False,
{"bar": "test"},
),
( # 2: Good schema, bad unicode in body
'{"foo": "testé"}',
"application/json",
True,
False,
False,
False,
False,
{"bar": "testé"},
),
( # 3: Bad schema
'{"foo": "test"}',
"application/json",
True,
False,
True,
False,
False,
{"bar": "test"},
),
( # 4: No schema, no force json (no change to req.context)
'{"foo": "test"}',
"application/json",
False,
False,
False,
False,
False,
{"bar": "test"},
),
( # 5: No schema, force json
'{"foo": "test"}',
"application/json",
False,
False,
False,
True,
False,
{"foo": "test"},
),
( # 6: No schema, force json, bad json
'{"foo": }',
"application/json",
False,
False,
False,
True,
True,
{"foo": "test"},
),
( # 7: No schema, force json, good json, bad unicode
'{"foo": "testé"}',
"application/json",
False,
False,
False,
True,
False,
{"foo": "testé"},
),
( # 8: Good schema, extra info on content type
'{"foo": "test"}',
"application/json;encoding=latin1",
True,
False,
False,
False,
False,
{"bar": "test"},
),
( # 9: No content type (assume json)
'{"foo": "test"}',
None,
True,
False,
False,
False,
False,
{"bar": "test"},
),
( # 10: Non-json (no change to req.context)
"1,2,'string'",
"text/csv",
False,
False,
False,
False,
False,
{"bar": "test"},
),
],
)
def test_process_resource(
self,
stream,
content_type,
schema,
schema_err,
bad_sch,
force_json,
json_err,
exp_ret,
):
# type: (str, str, bool, bool, bool, bool, bool, dict) -> None
"""Test processing a resource
:param stream: the return of req.bounded_stream.read()
:param content_type: the value of the request's Content-Type header (req.content_type)
:param schema: whether a schema should be returned (TestSchema)
:param schema_err: whether a schema error is expected
:param bad_sch: pass an uninstantiated or non-schema object
:param force_json: whether to try json if no schema is found
:param json_err: whether a JSON error is expected
:param exp_ret: expected return if no errors are raised
"""
mw = mid.Marshmallow()
mw._force_json = force_json
if schema:
if bad_sch:
setattr(mw, "_get_schema", lambda *_, **__: self.FooSchema)
else:
setattr(mw, "_get_schema", lambda *_, **__: self.FooSchema())
else:
setattr(mw, "_get_schema", lambda *_, **__: None)
req = mock.Mock(method="GET", content_type=content_type)
req.bounded_stream.read.return_value = stream
req.context = {}
if schema_err:
with pytest.raises(errors.HTTPUnprocessableEntity):
mw.process_resource(req, "foo", "foo", "foo") # type: ignore
return
if bad_sch:
with pytest.raises(TypeError):
mw.process_resource(req, "foo", "foo", "foo") # type: ignore
return
if json_err:
with pytest.raises(errors.HTTPBadRequest):
mw.process_resource(req, "foo", "foo", "foo") # type: ignore
return
mw.process_resource(req, "foo", "foo", "foo") # type: ignore
if schema or force_json:
assert req.context[mw._req_key] == exp_ret
else:
assert mw._req_key not in req.context
def test_process_resource_ignores_unexpected_content_by_default(self):
"""By default, we ignore unexpected content types."""
mw = mid.Marshmallow()
setattr(mw, "_get_schema", lambda *_, **__: self.FooSchema())
req = mock.Mock(method="GET", content_type="application/pdf")
req.bounded_stream.read.return_value = ""
req.context = {}
mw.process_resource(req, "foo", "foo", "foo") # type: ignore
assert mw._req_key not in req.context
def test_nondefault_unexpected_content_type(self):
"""We can specify any content type to handle."""
mw = mid.Marshmallow(expected_content_type="application/pdf")
setattr(mw, "_get_schema", lambda *_, **__: self.FooSchema())
req = mock.Mock(method="GET", content_type="application/pdf")
req.bounded_stream.read.return_value = '{"foo": "test"}'
req.context = {}
mw.process_resource(req, "foo", "foo", "foo") # type: ignore
assert req.context[mw._req_key] == {"bar": "test"}
def test_handle_unexpected_content_type(self):
"""We can specify to handle unexpected types."""
mw = mid.Marshmallow(
expected_content_type="application/pdf",
handle_unexpected_content_types=True,
)
setattr(mw, "_get_schema", lambda *_, **__: self.FooSchema())
req = mock.Mock(method="GET", content_type="text/plain")
req.bounded_stream.read.return_value = '{"foo": "test"}'
req.context = {}
mw.process_resource(req, "foo", "foo", "foo") # type: ignore
assert req.context[mw._req_key] == {"bar": "test"}
@pytest.mark.parametrize(
"res, schema, sch_err, bad_sch, force_json, json_err, exp_ret",
[
( # 0: Good result, good schema
{"bar": "test"},
True,
False,
False,
False,
False,
'{"foo": "test"}',
),
( # 1: Schema error on loading result
{"bar": "test", "int": "foo"},
True,
True,
False,
False,
False,
'{"foo": "test"}',
),
( # 2: Bad schema instance (not an instance)
{"bar": "test"},
True,
False,
True,
False,
False,
'{"foo": "test"}',
),
( # 3: Good result, no schema, force json
{"bar": "test"},
False,
False,
False,
True,
False,
'{"bar": "test"}',
),
( # 4: Unserializable response, no schema, force json
set("foo"),
False,
False,
False,
True,
True,
'{"bar": "test"}',
),
( # 5: No schema, no force json
{"bar": "test"},
False,
False,
False,
False,
False,
'{"foo": "test"}',
),
( # 6: No result
None,
True,
False,
False,
False,
False,
'{"foo": "test"}',
),
],
)
def test_process_response(
self, res, schema, sch_err, bad_sch, force_json, json_err, exp_ret
):
"""Test process_response
:param res: the value to put in the result key on req.context
:param schema: whether a schema should be available
:param sch_err: whether an error is expected dumping the data
:param bad_sch: pass an uninstantiated or non-schema object
:param force_json: whether force_json is true
:param json_err: whether an error is expected dumping the data
:param exp_ret: expected return if no errors are raised
"""
mw = mid.Marshmallow()
mw._force_json = force_json
if schema:
if bad_sch:
setattr(mw, "_get_schema", lambda *_, **__: self.FooSchema)
else:
setattr(mw, "_get_schema", lambda *_, **__: self.FooSchema())
else:
setattr(mw, "_get_schema", lambda *_, **__: None)
req = mock.Mock(method="GET")
if res is None:
req.context = {}
else:
req.context = {mw._resp_key: res}
resp = mock.Mock()
if bad_sch:
with pytest.raises(TypeError):
mw.process_response(req, resp, "foo", "foo") # type: ignore
return
if sch_err:
with pytest.raises(errors.HTTPInternalServerError):
mw.process_response(req, resp, "foo", "foo") # type: ignore
return
if json_err:
with pytest.raises(errors.HTTPInternalServerError):
mw.process_response(req, resp, "foo", "foo") # type: ignore
return
mw.process_response(req, resp, "foo", "foo") # type: ignore
if res is None or (not schema and not force_json):
# "body" has not been written, and is thus a mock object still
assert isinstance(resp.body, mock.Mock)
else:
assert resp.body == exp_ret
class TestJSONEnforcer:
"""Test enforcement of JSON requests"""
enforcer = mid.JSONEnforcer()
@pytest.mark.parametrize("accepts", [True, False])
def test_client_accept(self, accepts):
# type: (bool) -> None
"""Test asserting that the client accepts JSON"""
req = mock.Mock()
req.client_accepts_json = accepts
req.method = "GET"
if not accepts:
with pytest.raises(errors.HTTPNotAcceptable):
# noinspection PyTypeChecker
self.enforcer.process_request(req, "foo")
else:
# noinspection PyTypeChecker
self.enforcer.process_request(req, "foo")
@pytest.mark.parametrize(
"method, content_type, raises",
[
("GET", None, False),
("GET", "application/json", False),
("GET", "mimetype/xml", False),
("POST", None, True),
("POST", "application/json", False),
("POST", "mimetype/xml", True),
("PATCH", None, True),
("PATCH", "application/json", False),
("PATCH", "mimetype/xml", True),
("PUT", None, True),
("PUT", "application/json", False),
("PUT", "mimetype/xml", True),
("DELETE", None, False),
("DELETE", | |
<gh_stars>1-10
# Copyright (C) 2020-2022 <NAME>, <NAME>, and others
# SPDX-License-Identifier: MIT
import numpy as np
from warnings import warn
import numpy.polynomial.legendre as np_legendre
import scipy.special as sp_special
import scipy.integrate as sp_integrate
from . import _roots
from . import _gauss
try:
from xprec import ddouble as _ddouble
except ImportError:
_ddouble = None
_xwork_dtype = float
else:
_xwork_dtype = _ddouble
class PiecewiseLegendrePoly:
"""Piecewise Legendre polynomial.
Models a function on the interval ``[-1, 1]`` as a set of segments on the
intervals ``S[i] = [a[i], a[i+1]]``, where on each interval the function
is expanded in scaled Legendre polynomials.
"""
def __init__(self, data, knots, dx=None, symm=None):
"""Piecewise Legendre polynomial"""
if np.isnan(data).any():
raise ValueError("PiecewiseLegendrePoly: data contains NaN!")
if isinstance(knots, self.__class__):
if dx is not None or symm is None:
raise RuntimeError("wrong arguments")
self.__dict__.update(knots.__dict__)
self.data = data
self.symm = symm
return
data = np.array(data)
knots = np.array(knots)
polyorder, nsegments = data.shape[:2]
if knots.shape != (nsegments+1,):
raise ValueError("Invalid knots array")
if not (knots[1:] >= knots[:-1]).all():
raise ValueError("Knots must be monotonically increasing")
if symm is None:
# TODO: infer symmetry from data
symm = np.zeros(data.shape[2:])
else:
symm = np.array(symm)
if symm.shape != data.shape[2:]:
raise ValueError("shape mismatch")
if dx is None:
dx = knots[1:] - knots[:-1]
else:
dx = np.array(dx)
if not np.allclose(dx, knots[1:] - knots[:-1]):
raise ValueError("dx must work with knots")
self.nsegments = nsegments
self.polyorder = polyorder
self.xmin = knots[0]
self.xmax = knots[-1]
self.knots = knots
self.dx = dx
self.data = data
self.symm = symm
self._xm = .5 * (knots[1:] + knots[:-1])
self._inv_xs = 2/dx
self._norm = np.sqrt(self._inv_xs)
def __getitem__(self, l):
"""Return part of a set of piecewise polynomials"""
new_symm = self.symm[l]
if isinstance(l, tuple):
new_data = self.data[(slice(None), slice(None), *l)]
else:
new_data = self.data[:,:,l]
return self.__class__(new_data, self, symm=new_symm)
def __call__(self, x):
"""Evaluate polynomial at position x"""
i, xtilde = self._split(np.asarray(x))
data = self.data[:, i]
# Evaluate for all values of l. x and data array must be
# broadcast'able against each other, so we append dimensions here
func_dims = self.data.ndim - 2
datashape = i.shape + (1,) * func_dims
res = np_legendre.legval(xtilde.reshape(datashape), data, tensor=False)
res *= self._norm[i.reshape(datashape)]
# Finally, exchange the x and vector dimensions
order = tuple(range(i.ndim, i.ndim + func_dims)) + tuple(range(i.ndim))
return res.transpose(*order)
def value(self, l, x):
"""Return value for l and x."""
if self.data.ndim != 3:
raise ValueError("Only allowed for vector of data")
l, x = np.broadcast_arrays(l, x)
i, xtilde = self._split(x)
data = self.data[:, i, l]
# This should now neatly broadcast against each other
res = np_legendre.legval(xtilde, data, tensor=False)
res *= self._norm[i]
return res
def overlap(self, f, *, rtol=2.3e-16, return_error=False):
r"""Evaluate overlap integral of this polynomial with function ``f``.
Given the function ``f``, evaluate the integral::
∫ dx * f(x) * self(x)
using piecewise Gauss-Legendre quadrature, where ``self`` are the
polynomials.
Arguments:
f (callable):
function that is called with a point ``x`` and returns ``f(x)``
at that position.
Return:
array-like object with shape (poly_dims, f_dims)
poly_dims are the shape of the polynomial and f_dims are those
of the function f(x).
"""
int_result, int_error = _compute_overlap(self, f, rtol=rtol)
if return_error:
return int_result, int_error
else:
return int_result
def deriv(self, n=1):
"""Get polynomial for the n'th derivative"""
ddata = np_legendre.legder(self.data, n)
_scale_shape = (1, -1) + (1,) * (self.data.ndim - 2)
scale = self._inv_xs ** n
ddata *= scale.reshape(_scale_shape)
return self.__class__(ddata, self, symm=(-1)**n * self.symm)
def hat(self, freq, n_asymp=None):
"""Get Fourier transformed object"""
return PiecewiseLegendreFT(self, freq, n_asymp)
def roots(self, alpha=2):
"""Find all roots of the piecewise polynomial
Assume that between each two knots (pieces) there are at most ``alpha``
roots.
"""
if self.data.ndim > 2:
raise ValueError("select single polynomial before calling roots()")
grid = self.knots
xmid = (self.xmax + self.xmin) / 2
if self.symm:
if grid[self.nsegments // 2] == xmid:
grid = grid[self.nsegments//2:]
else:
grid = np.hstack((xmid, grid[grid > xmid]))
grid = _refine_grid(grid, alpha)
roots = _roots.find_all(self, grid)
if self.symm == 1:
revroots = (self.xmax + self.xmin) - roots[::-1]
roots = np.hstack((revroots, roots))
elif self.symm == -1:
# There must be a zero at exactly the midpoint, but we may either
# slightly miss it or have a spurious zero
if roots.size:
if roots[0] == xmid or self(xmid) * self.deriv()(xmid) < 0:
roots = roots[1:]
revroots = (self.xmax + self.xmin) - roots[::-1]
roots = np.hstack((revroots, xmid, roots))
return roots
@property
def shape(self): return self.data.shape[2:]
@property
def size(self): return self.data[:1,:1].size
@property
def ndim(self): return self.data.ndim - 2
def _in_domain(self, x):
return (x >= self.xmin).all() and (x <= self.xmax).all()
def _split(self, x):
"""Split segment"""
if not self._in_domain(x):
raise ValueError("x must be in [%g, %g]" % (self.xmin, self.xmax))
i = self.knots.searchsorted(x, 'right').clip(None, self.nsegments)
i -= 1
xtilde = x - self._xm[i]
xtilde *= self._inv_xs[i]
return i, xtilde
class PiecewiseLegendreFT:
"""Fourier transform of a piecewise Legendre polynomial.
For a given frequency index ``n``, the Fourier transform of the Legendre
function is defined as::
phat(n) == ∫ dx exp(1j * pi * n * x / (xmax - xmin)) p(x)
The polynomial is continued either periodically (``freq='even'``), in which
case ``n`` must be even, or antiperiodically (``freq='odd'``), in which case
``n`` must be odd.
"""
_DEFAULT_GRID = np.hstack([np.arange(2**6),
(2**np.linspace(6, 25, 16*(25-6)+1)).astype(int)])
def __init__(self, poly, freq='even', n_asymp=None, power_model=None):
if poly.xmin != -1 or poly.xmax != 1:
raise NotImplementedError("Only interval [-1, 1] supported")
self.poly = poly
self.freq = freq
self.zeta = {'any': None, 'even': 0, 'odd': 1}[freq]
if n_asymp is None:
self.n_asymp = np.inf
self._model = None
else:
self.n_asymp = n_asymp
if power_model is None:
self._model = _power_model(freq, poly)
else:
self._model = power_model
@property
def shape(self): return self.poly.shape
@property
def size(self): return self.poly.size
@property
def ndim(self): return self.poly.ndim
def __getitem__(self, l):
model = self._model if self._model is None else self._model[l]
return self.__class__(self.poly[l], self.freq, self.n_asymp, model)
def __call__(self, n):
"""Obtain Fourier transform of polynomial for given frequencies"""
# Evaluate only on unique frequencies
n_unique, n_where = np.unique(n.ravel(), return_inverse=True)
result_flat = self._call_impl(n_unique)[..., n_where]
return result_flat.reshape(self.poly.shape + n.shape)
def _call_impl(self, n):
"""Obtain Fourier transform of polynomial for given frequencies"""
n = check_reduced_matsubara(n, self.zeta)
n_flat = n.ravel()
result_flat = _compute_unl_inner(self.poly, n_flat)
# We use the asymptotics at frequencies larger than conv_radius
# since it has lower relative error.
cond_outer = np.abs(n_flat) >= self.n_asymp
if cond_outer.any():
n_outer = n_flat[cond_outer]
result_flat[..., cond_outer] = self._model.giw(n_outer).T
return result_flat.reshape(self.poly.shape + n.shape)
def extrema(self, part=None, grid=None):
"""Obtain extrema of fourier-transformed polynomial."""
if self.poly.shape:
raise ValueError("select single polynomial")
if grid is None:
grid = self._DEFAULT_GRID
f = self._func_for_part(part)
x0 = _roots.discrete_extrema(f, grid)
x0 = 2 * x0 + self.zeta
return _symmetrize_matsubara(x0)
def _func_for_part(self, part=None):
if part is None:
parity = self.poly.symm
if np.allclose(parity, 1):
part = 'real' if self.zeta == 0 else 'imag'
elif np.allclose(parity, -1):
part = 'imag' if self.zeta == 0 else 'real'
else:
raise ValueError("cannot detect parity.")
if part == 'real':
return lambda n: self(2*n + self.zeta).real
elif part == 'imag':
return lambda n: self(2*n + self.zeta).imag
else:
raise ValueError("part must be either 'real' or 'imag'")
def check_reduced_matsubara(n, zeta=None):
"""Checks that ``n`` is a reduced Matsubara frequency.
Check that the argument is a reduced Matsubara frequency, which is an
integer obtained by scaling the freqency `w[n]` as follows::
beta / np.pi * w[n] == 2 * n + zeta
Note that this means that instead of a fermionic frequency (``zeta == 1``),
we expect an odd integer, while for a bosonic frequency (``zeta == 0``),
we expect an even one. If ``zeta`` is omitted, any one is fine.
"""
n = np.asarray(n)
if not np.issubdtype(n.dtype, np.integer):
nfloat = n
n = nfloat.astype(int)
if not (n == nfloat).all():
raise ValueError("reduced frequency n must be integer")
if zeta is not None:
if not (n & 1 == zeta).all():
raise ValueError("n have wrong parity")
return n
def _imag_power(n):
"""Imaginary unit raised to an integer power without numerical error"""
n = np.asarray(n)
if not np.issubdtype(n.dtype, np.integer):
raise ValueError("expecting set of integers here")
cycle = np.array([1, 0+1j, -1, 0-1j], complex)
return cycle[n % 4]
def _get_tnl(l, w):
r"""Fourier integral of the l-th Legendre polynomial::
T_l(w) == \int_{-1}^1 dx \exp(iwx) P_l(x)
"""
# | |
<gh_stars>1-10
import abc
import io
import json
from collections import namedtuple
from typing import Any, Optional, Tuple
try:
from avro import io as avro_io
from avro import schema
have_avro = True
except ImportError:
have_avro = False
try:
import msgpack
have_msgpack = True
except ImportError:
have_msgpack = False
try:
import yaml
have_yaml = True
except ImportError:
have_yaml = False
try:
from google.protobuf.message import Message
from google.protobuf import symbol_database
have_protobuf = True
except ImportError:
have_protobuf = False
CONTENT_TYPE_AVRO = "application/x-avro"
CONTENT_TYPE_DATA = "application/data"
CONTENT_TYPE_JSON = "application/json"
CONTENT_TYPE_MSGPACK = "application/msgpack"
CONTENT_TYPE_PROTOBUF = "application/vnd.google.protobuf"
CONTENT_TYPE_TEXT = "text/plain"
CONTENT_TYPE_YAML = "application/yaml"
codec = namedtuple("codec", ("content_type", "content_encoding", "serializer"))
class ISerializer(abc.ABC):
"""
This class represents the base interface for a serializer.
"""
@abc.abstractmethod # pragma: no branch
def encode(self, data, **kwargs):
""" Returns serialized data as a bytes object. """
@abc.abstractmethod # pragma: no branch
def decode(self, data, **kwargs):
""" Returns deserialized data """
class SerializerRegistry:
""" This registry keeps track of serialization strategies.
A convenience name or the specific content-type string can be used to
reference a specific serializer that is capable of encoding and decoding.
"""
def __init__(self):
self._serializers = {}
self._default_codec = None
self.type_to_name = {}
self.name_to_type = {}
def register(
self,
name: Optional[str],
serializer: ISerializer,
content_type: str,
content_encoding: str = "utf-8",
):
""" Register a new serializer.
:param name: A convenience name for the serialization method.
:param serializer: An object that implements the ISerializer interface
that can encode objects and decode data back into the original object.
:param content_type: The mime-type describing the serialized structure.
:param content_encoding: The content encoding (character set) that
the decoder method will be returning. Will usually be `utf-8` or `binary`.
"""
if not isinstance(serializer, ISerializer):
raise Exception(
f"Invalid serializer '{name}'. Expected an instance of ISerializer"
)
self._serializers[name] = codec(content_type, content_encoding, serializer)
# map convenience name to mime-type and back again.
self.type_to_name[content_type] = name
self.name_to_type[name] = content_type
def set_default(self, name_or_type: str):
""" Set the default serialization method used by this library.
:param name_or_type: a string specifying the serialization strategy.
The string may be the alias name (e.g. json) or the mime-type
(e.g. application/json).
Raises:
Exception: If the serialization method requested is not available.
"""
self._default_codec = self._resolve(name_or_type)
@property
def serializers(self):
return self._serializers
def get_serializer(self, name_or_type: str):
""" Return a specific serializer.
:param name_or_type: a string specifying the serialization strategy
to apply to the data (e.g. ``json``). The string may be the
convenience name (e.g. json) or the mime-type (e.g. application/json).
:returns: A serializer object that can encode and decode bytes
using the named strategy.
"""
name = self._resolve(name_or_type)
return self._serializers[name].serializer
def get_codec(self, name_or_type: str):
""" Return codec attributes for a specific serializer.
:param name_or_type: a string specifying the serialization strategy
to apply to the data (e.g. ``json``). The string may be the
convenience name (e.g. json) or the mime-type (e.g. application/json).
:returns: A codec named tuple.
"""
name = self._resolve(name_or_type)
return self._serializers[name]
def dumps(
self, data: Any, name_or_type: str = None, **kwargs
) -> Tuple[Optional[str], str, bytes]:
""" Encode data.
Serialize a data structure into a bytes object suitable for sending
as a message body.
:param data: The message data to send.
:param name_or_type: A string representing the serialization strategy
to apply to the data (e.g. ``json``, etc). If not specified then a
best effort guess will be made. If data is a string then text will
be used, if the data is bytes then data will be used, otherwise the
default serializer will be used (JSON).
Keywords:
:param type_identifier: An integer that uniquely identifies a
registered message.
:returns: A string specifying the content type (e.g.,
`application/json`), a string specifying the content encoding, (e.g.
`utf-8`) and a three-item tuple containing the serialized data as
bytes.
Raises:
Exception: If the serialization method requested is not available.
"""
if name_or_type:
if name_or_type not in self._serializers:
raise Exception(f"Invalid serializer {name_or_type}, can't encode.")
content_type, content_encoding, serializer = self._serializers[name_or_type]
payload = serializer.encode(data, **kwargs)
else:
# Make a best guess based on data type
if isinstance(data, bytes):
content_type = CONTENT_TYPE_DATA
content_encoding = "binary"
payload = data
elif isinstance(data, str):
content_type = CONTENT_TYPE_TEXT
content_encoding = "utf-8"
payload = data.encode("utf-8")
else:
# Use the default encoder
content_type, content_encoding, serializer = self._serializers[
self._default_codec
]
payload = serializer.encode(data)
return content_type, content_encoding, payload
def loads(
self,
data: bytes,
content_type: Optional[str],
content_encoding: Optional[str],
**kwargs,
) -> Any:
""" Decode serialized data.
Deserialize a data blob that was serialized using `dumps` based on `content_type`.
:param data: The message data to deserialize.
:param content_type: The content-type of the data (e.g., application/json).
:param content_encoding: The content-encoding of the data. (e.g., utf-8,
binary). NOTE: This parameter is not currently used.
Keywords:
:param type_identifier: An integer that uniquely identifies a
registered message.
Raises:
Exception: If the serialization method requested is not available.
Returns:
The deserialized data.
"""
content_type = content_type if content_type else CONTENT_TYPE_DATA
# Currently the implementation only supports text data (text, json,
# yaml) as utf-8. If/when more is needed then the content_encoding
# parameter will need to be fed down into the serializers.
# content_encoding = (content_encoding or "utf-8").lower()
if data:
name = self._resolve(content_type)
_ct, _ce, serializer = self._serializers[name]
return serializer.decode(data, **kwargs)
return data
def _resolve(self, x: str) -> str:
""" Return a serializer alias string.
:param x: a string specifying the serialization strategy.
The string may be the alias name (e.g. json) or the mime-type
(e.g. application/json).
"""
if x in self.name_to_type: # pylint: disable=no-else-return
return x
elif x in self.type_to_name:
return self.type_to_name[x]
else:
raise Exception(f"Invalid serializer '{x}'")
def register_none(reg: SerializerRegistry):
""" The serialization you have when you don't want serialization. """
class NoneSerializer(ISerializer):
def encode(self, data, **kwargs):
""" Returns serialized data as a bytes object. """
if not isinstance(data, bytes):
raise Exception(f"Can only serialize bytes, got {type(data)}")
return data
def decode(self, data, **kwargs):
""" Returns deserialized data """
return data
serializer = NoneSerializer()
reg.register(
None, serializer, content_type=CONTENT_TYPE_DATA, content_encoding="binary"
)
def register_text(reg: SerializerRegistry) -> None:
""" Register an encoder/decoder for TEXT serialization. """
class TextSerializer(ISerializer):
def encode(self, data, **kwargs) -> bytes:
""" Encode a string and return a :class:`bytes` object.
:returns: a serialized message as a bytes object.
"""
return data.encode("utf-8")
def decode(self, data: bytes, **kwargs) -> str:
""" Decode *data* from :class:`bytes` to the original data structure.
:param data: a bytes object containing a serialized message.
:returns: A str object.
"""
return data.decode("utf-8")
serializer = TextSerializer()
reg.register(
"text", serializer, content_type=CONTENT_TYPE_TEXT, content_encoding="utf-8"
)
def register_json(reg: SerializerRegistry) -> None:
""" Register an encoder/decoder for JSON serialization. """
class JsonSerializer(ISerializer):
def encode(self, data: Any, **kwargs) -> bytes:
""" Encode an object into JSON and return a :class:`bytes` object.
:returns: a serialized message as a bytes object.
"""
return json.dumps(data).encode("utf-8")
def decode(self, data: bytes, **kwargs) -> str:
""" Decode *data* from :class:`bytes` to the original data structure.
:param data: a bytes object containing a serialized message.
:returns: A Python object.
"""
return json.loads(data if isinstance(data, str) else data.decode("utf-8"))
serializer = JsonSerializer()
reg.register(
"json", serializer, content_type=CONTENT_TYPE_JSON, content_encoding="utf-8"
)
def register_msgpack(reg: SerializerRegistry) -> None:
""" Register an encoder/decoder for MsgPack serialization. """
if have_msgpack:
class MsgpackSerializer(ISerializer):
"""
Must use the use_bin_type flag to ensure that str objects
are returned back as str objects. This avoids the well
known problem of msgpack 'raw' which returns str and bytes
objects as bytes.
"""
def encode(self, data: Any, **kwargs) -> bytes:
""" Encode an object into MsgPack and return a :class:`bytes` object.
:returns: a serialized message as a bytes object.
"""
return msgpack.packb(data, use_bin_type=True)
def decode(self, data: bytes, **kwargs) -> str:
""" Decode *data* from :class:`bytes` to the original data structure.
:param data: a bytes object containing a serialized message.
:returns: A Python object.
"""
return msgpack.unpackb(data, raw=False)
serializer = MsgpackSerializer()
reg.register(
"msgpack",
serializer,
content_type=CONTENT_TYPE_MSGPACK,
content_encoding="binary",
)
def register_yaml(reg: SerializerRegistry) -> None:
""" Register an encoder/decoder for YAML serialization.
It is slower than JSON, but allows for more data types to be serialized.
Useful if you need to send data such as dates.
"""
if have_yaml:
class YamlSerializer(ISerializer):
def encode(self, data: Any, **kwargs) -> bytes:
""" Encode an | |
out2), dim=1)
def _forward_first_plus_last(self, embedded, batch):
batch_size = embedded.size(0)
batch_ids = np.arange(batch_size)
w = self.subword_w
first1 = batch.token_starts[batch_ids, batch.idx1+1]
first2 = batch.token_starts[batch_ids, batch.idx2+1]
last1 = batch.token_starts[batch_ids, batch.idx1+2] - 1
last2 = batch.token_starts[batch_ids, batch.idx2+2] - 1
first1 = embedded[batch_ids, first1]
last1 = embedded[batch_ids, last1]
first2 = embedded[batch_ids, first2]
last2 = embedded[batch_ids, last2]
tgt1 = w * first1 + (1-w) * last1
tgt2 = w * first2 + (1-w) * last2
return torch.cat((tgt1, tgt2), dim=1)
def _forward_lstm(self, embedded, batch):
batch_size = embedded.size(0)
batch_ids = np.arange(batch_size)
target_vecs = []
first1 = batch.token_starts[batch_ids, batch.idx1+1]
first2 = batch.token_starts[batch_ids, batch.idx2+1]
last1 = batch.token_starts[batch_ids, batch.idx1+2]
last2 = batch.token_starts[batch_ids, batch.idx2+2]
for bi in range(batch_size):
lstm_in1 = embedded[bi, first1[bi]:last1[bi]].unsqueeze(0)
_, (h, c) = self.pool_lstm(lstm_in1)
h1 = torch.cat((h[0], h[1]), dim=-1)
lstm_in2 = embedded[bi, first2[bi]:last2[bi]].unsqueeze(0)
_, (h, c) = self.pool_lstm(lstm_in2)
h2 = torch.cat((h[0], h[1]), dim=-1)
target_vecs.append(torch.cat((h1[0], h2[0])))
return torch.stack(target_vecs)
def _forward_mlp(self, embedded, batch):
batch_size = embedded.size(0)
batch_ids = np.arange(batch_size)
target_vecs = []
first1 = batch.token_starts[batch_ids, batch.idx1+1]
first2 = batch.token_starts[batch_ids, batch.idx2+1]
last1 = batch.token_starts[batch_ids, batch.idx1+2]
last2 = batch.token_starts[batch_ids, batch.idx2+2]
for bi in range(batch_size):
mlp_in1 = embedded[bi, first1[bi]:last1[bi]]
w1 = self.subword_mlp(mlp_in1)
w1 = self.softmax(w1).transpose(0, 1)
t1 = w1.mm(mlp_in1).squeeze(0)
mlp_in2 = embedded[bi, first2[bi]:last2[bi]]
w2 = self.subword_mlp(mlp_in2)
w2 = self.softmax(w2).transpose(0, 1)
t2 = w2.mm(mlp_in2).squeeze(0)
target_vecs.append(torch.cat((t1, t2)))
return torch.stack(target_vecs)
def _forward_last2(self, embedded, batch):
batch_size = embedded.size(0)
hidden_size = embedded.size(2)
batch_ids = np.arange(batch_size)
target_vecs = []
last1 = batch.token_starts[batch_ids, batch.idx1+2] - 1
last2 = batch.token_starts[batch_ids, batch.idx2+2] - 1
pad = to_cuda(torch.zeros(hidden_size))
for bi in range(batch_size):
if batch.token_starts[bi, batch.idx1[bi]+1]+1 == \
batch.token_starts[bi, batch.idx1[bi]+2]:
tgt1 = torch.cat((embedded[bi, last1[bi]], pad), 0)
else:
tgt1 = torch.cat((embedded[bi, last1[bi]],
embedded[bi, last1[bi]-1]), 0)
if batch.token_starts[bi, batch.idx2[bi]+1]+1 == \
batch.token_starts[bi, batch.idx2[bi]+2]:
tgt2 = torch.cat((embedded[bi, last2[bi]], pad), 0)
else:
tgt2 = torch.cat((embedded[bi, last2[bi]],
embedded[bi, last2[bi]-1]), 0)
target_vecs.append(torch.cat((tgt1, tgt2)))
return torch.stack(target_vecs)
class TransformerForSequenceTagging(BaseModel):
def __init__(self, config, dataset):
super().__init__(config)
self.dataset = dataset
self.embedder = Embedder(
self.config.model_name, use_cache=False,
layer_pooling=self.config.layer_pooling)
self.output_size = len(dataset.vocabs.labels)
self.dropout = nn.Dropout(self.config.dropout)
mlp_input_size = self.embedder.hidden_size
if self.config.subword_pooling == 'lstm':
sw_lstm_size = self.config.subword_lstm_size
mlp_input_size = sw_lstm_size
self.subword_lstm = nn.LSTM(
self.embedder.hidden_size,
sw_lstm_size // 2,
num_layers=1,
batch_first=True,
bidirectional=True,
)
elif self.config.subword_pooling == 'mlp':
self.subword_mlp = MLP(
input_size=self.embedder.hidden_size,
layers=[self.config.subword_mlp_size],
nonlinearity='ReLU',
output_size=1
)
self.softmax = nn.Softmax(dim=0)
elif self.config.subword_pooling == 'last2':
mlp_input_size *= 2
self.mlp = MLP(
input_size=mlp_input_size,
layers=self.config.mlp_layers,
nonlinearity=self.config.mlp_nonlinearity,
output_size=self.output_size,
)
if self.config.subword_pooling == 'f+l':
self.subword_w = nn.Parameter(torch.ones(1, dtype=torch.float) / 2)
self._cache = {}
self.criterion = nn.CrossEntropyLoss()
self.pooling_func = {
'first': self._forward_with_cache,
'last': self._forward_with_cache,
'max': self._forward_with_cache,
'sum': self._forward_with_cache,
'avg': self._forward_with_cache,
'last2': self._forward_last2,
'f+l': self._forward_first_plus_last,
'lstm': self._forward_lstm,
'mlp': self._forward_mlp,
}
def forward(self, batch):
subword_pooling = self.config.subword_pooling
out = self.pooling_func[subword_pooling](batch)
out = self.dropout(out)
pred = self.mlp(out)
return pred
def _forward_lstm(self, batch):
X = torch.LongTensor(batch.input)
embedded = self.embedder.embed(X, batch.sentence_subword_len)
batch_size, seqlen, hidden_size = embedded.size()
token_lens = batch.token_starts[:, 1:] - batch.token_starts[:, :-1]
token_maxlen = token_lens.max()
pad = to_cuda(torch.zeros((1, hidden_size)))
all_token_vectors = []
all_token_lens = []
for bi in range(batch_size):
for ti in range(batch.sentence_len[bi]):
first = batch.token_starts[bi][ti+1]
last = batch.token_starts[bi][ti+2]
tok_vecs = embedded[bi, first:last]
this_size = tok_vecs.size(0)
if this_size < token_maxlen:
this_pad = pad.repeat((token_maxlen - this_size, 1))
tok_vecs = torch.cat((tok_vecs, this_pad))
all_token_vectors.append(tok_vecs)
all_token_lens.append(this_size)
# _, (h, c) = self.subword_lstm(tok_vecs)
# h = torch.cat((h[0], h[1]), dim=-1)
# out.append(h[0])
lstm_in = torch.stack(all_token_vectors)
seq = torch.nn.utils.rnn.pack_padded_sequence(
lstm_in, all_token_lens, enforce_sorted=False, batch_first=True)
_, (h, c) = self.subword_lstm(seq)
h = torch.cat((h[0], h[1]), dim=-1)
return h
def _forward_mlp(self, batch):
X = torch.LongTensor(batch.input)
embedded = self.embedder.embed(X, batch.sentence_subword_len)
batch_size, seqlen, hidden = embedded.size()
mlp_weights = self.subword_mlp(embedded).view(batch_size, seqlen)
outputs = []
for bi in range(batch_size):
for ti in range(batch.sentence_len[bi]):
first = batch.token_starts[bi][ti+1]
last = batch.token_starts[bi][ti+2]
if last - 1 == first:
outputs.append(embedded[bi, first])
else:
weights = mlp_weights[bi][first:last]
weights = self.softmax(weights).unsqueeze(1)
v = weights * embedded[bi, first:last]
v = v.sum(axis=0)
outputs.append(v)
return torch.stack(outputs)
def _forward_first_plus_last(self, batch):
X = torch.LongTensor(batch.input)
embedded = self.embedder.embed(X, batch.sentence_subword_len)
batch_size, seqlen, hidden = embedded.size()
w = self.subword_w
outputs = []
for bi in range(batch_size):
for ti in range(batch.sentence_len[bi]):
first = batch.token_starts[bi][ti+1]
last = batch.token_starts[bi][ti+2] - 1
f = embedded[bi, first]
la = embedded[bi, last]
outputs.append(w * f + (1-w) * la)
return torch.stack(outputs)
def _forward_last2(self, batch):
X = torch.LongTensor(batch.input)
embedded = self.embedder.embed(X, batch.sentence_subword_len)
batch_size, seqlen, hidden_size = embedded.size()
outputs = []
pad = to_cuda(torch.zeros(hidden_size))
for bi in range(batch_size):
for ti in range(batch.sentence_len[bi]):
first = batch.token_starts[bi][ti+1]
last = batch.token_starts[bi][ti+2] - 1
if first == last:
vec = torch.cat((embedded[bi, last], pad), 0)
else:
vec = torch.cat(
(embedded[bi, last], embedded[bi, last-1]), 0)
outputs.append(vec)
return torch.stack(outputs)
def _forward_with_cache(self, batch):
subword_pooling = self.config.subword_pooling
X = torch.LongTensor(batch.input)
cache_key = tuple(np.array(batch.input).flat)
if cache_key not in self._cache:
batch_size = X.size(0)
batch_ids = []
token_ids = []
embedded = self.embedder.embed(X, batch.sentence_subword_len)
if subword_pooling in ('first', 'last'):
for bi in range(batch_size):
sentence_len = batch.sentence_len[bi]
batch_ids.append(np.repeat(bi, sentence_len))
if subword_pooling == 'first':
token_ids.append(batch.token_starts[bi][1:sentence_len + 1])
elif subword_pooling == 'last':
token_ids.append(
np.array(batch.token_starts[bi][2:sentence_len + 2]) - 1)
batch_ids = np.concatenate(batch_ids)
token_ids = np.concatenate(token_ids)
out = embedded[batch_ids, token_ids]
elif subword_pooling in ('max', 'avg', 'sum'):
outs = []
for bi in range(batch_size):
for ti in range(batch.sentence_len[bi]):
first = batch.token_starts[bi][ti+1]
last = batch.token_starts[bi][ti+2]
if subword_pooling == 'sum':
vec = embedded[bi, first:last].sum(axis=0)
elif subword_pooling == 'avg':
vec = embedded[bi, first:last].mean(axis=0)
elif subword_pooling == 'max':
vec = embedded[bi, first:last].max(axis=0).values
outs.append(vec)
out = torch.stack(outs)
self._cache[cache_key] = out
return self._cache[cache_key]
def compute_loss(self, target, output):
target = to_cuda(torch.LongTensor(target.labels)).view(-1)
loss = self.criterion(output, target)
return loss
# TODO Remove this alias, it's kept for backward compatibility.
TransformerForSequenceClassification = TransformerForSequenceTagging
class BERTEmbedder(nn.Module):
def __init__(self, model_name, layer, use_cache=False):
super().__init__()
if 'bert' in globals():
self.bert = globals()['bert']
else:
self.bert = BertModel.from_pretrained(model_name)
globals()['bert'] = self.bert
for p in self.bert.parameters():
p.requires_grad = False
self.layer = layer
if 'large' in model_name:
n_layer = 24
else:
n_layer = 12
if self.layer == 'weighted_sum':
self.weights = nn.Parameter(torch.ones(n_layer, dtype=torch.float))
self.softmax = nn.Softmax(0)
if use_cache:
self._cache = {}
else:
self._cache = None
def forward(self, sentences, sentence_lens):
self.bert.train(False)
mask = torch.arange(sentences.size(1)) < \
torch.LongTensor(sentence_lens).unsqueeze(1)
mask = to_cuda(mask.long())
bert_out, _ = self.bert(sentences, attention_mask=mask)
return bert_out
def embed(self, sentences, sentence_lens, cache_key=None):
if cache_key is not None and self._cache is not None:
if cache_key not in self._cache:
if self.layer == 'weighted_sum':
self._cache[cache_key] = self.forward(
sentences, sentence_lens)
elif self.layer == 'mean':
self._cache[cache_key] = torch.stack(self.forward(
sentences, sentence_lens)).mean(0)
else:
self._cache[cache_key] = self.forward(
sentences, sentence_lens)[self.layer]
if self.layer == 'weighted_sum':
weights = self.softmax(self.weights)
return (weights[:, None, None, None] *
torch.stack(self._cache[cache_key])).sum(0)
else:
return self._cache[cache_key]
else:
bert_out = self.forward(sentences, sentence_lens)
if self.layer == 'weighted_sum':
weights = self.softmax(self.weights)
return (weights[:, None, None, None] *
torch.stack(bert_out)).sum(0)
elif self.layer == 'mean':
return torch.stack(bert_out).mean(0)
else:
return bert_out[self.layer]
def state_dict(self, *args, **kwargs):
if self.layer == 'weighted_sum':
args[0]['{}weights'.format(args[1])] = self.weights
return args[0]
class ELMOEmbedder(nn.Module):
def __init__(self, model_file, layer, batch_size=128, use_cache=False):
super().__init__()
self.elmo = Embedder(model_file, batch_size=batch_size)
self.layer = layer
if self.layer == 'weighted_sum':
self.weights = nn.Parameter(torch.ones(3, dtype=torch.float))
self.softmax = nn.Softmax(0)
elif self.layer == 'weight_contextual_layers':
self.weights = nn.Parameter(torch.ones(2, dtype=torch.float))
self.softmax = nn.Softmax(0)
if use_cache:
self._cache = {}
else:
self._cache = None
def forward(self, sentence):
return to_cuda(torch.from_numpy(
np.stack(self.elmo.sents2elmo(sentence, -2))))
def embed(self, sentence, cache_key=None):
if cache_key is not None and self._cache is not None:
if cache_key not in self._cache:
self._cache[cache_key] = self.forward(sentence)
elmo_out = self._cache[cache_key]
else:
elmo_out = self.forward(sentence)
if self.layer == 'weighted_sum':
weights = self.softmax(self.weights)
return (weights[None, :, None, None] * elmo_out).sum(1)
elif self.layer == 'weight_contextual_layers':
weights = self.softmax(self.weights)
return (weights[None, :, None, None] * elmo_out[:, 1:]).sum(1)
elif self.layer == 'mean':
return elmo_out.mean(1)
return elmo_out[:, self.layer]
class BERTPairClassifier(BaseModel):
def __init__(self, config, dataset):
super().__init__(config)
self.dataset = dataset
model_name = getattr(self.config, 'bert_model',
'bert-base-multilingual-cased')
self.dropout = nn.Dropout(self.config.dropout)
self.bert = BERTEmbedder(model_name, self.config.layer,
use_cache=self.config.use_cache)
if 'large' in model_name:
hidden = 1024
else:
hidden = 768
self.mlp = MLP(
input_size=2 * hidden,
layers=self.config.mlp_layers,
nonlinearity=self.config.mlp_nonlinearity,
output_size=2,
)
self.criterion = nn.CrossEntropyLoss()
def forward(self, batch, dataset):
key = ('left', id(dataset), dataset._start)
left = self.forward_sentence(
batch.left_tokens, batch.left_sentence_len,
batch.left_target_first, batch.left_target_last, key=key)
key = ('right', id(dataset), dataset._start)
right = self.forward_sentence(
batch.right_tokens, batch.right_sentence_len,
batch.right_target_first, batch.right_target_last, key=key)
mlp_input = torch.cat((left, right), 1)
mlp_out = self.mlp(mlp_input)
return mlp_out
def forward_sentence(self, X, X_len, idx_first, idx_last, key=None):
X = to_cuda(torch.LongTensor(X))
batch_size = X.size(0)
Y = self.bert.embed(X, X_len, cache_key=key)
Y = self.dropout(Y)
helper = to_cuda(torch.arange(batch_size))
if self.config.wp_pool == 'first':
idx = to_cuda(torch.LongTensor(idx_first))
return Y[helper, idx]
elif self.config.wp_pool == 'last':
idx = | |
<filename>qopen/imaging.py
# Copyright 2015-2019 <NAME>, MIT license
"""
Plotting functions
Arguments supported by all plotting functions via its \*\*kwargs are:
:fname: file name for the plot output
(if not provided the figure will be left open)
:title: title of the plot
:figsize: figure size (tuple of inches)
:dpi: resolution of image file
|
"""
from collections import OrderedDict
from copy import copy
import os
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
from qopen.core import get_pair, collect_results
from qopen.source import source_model
from qopen.util import gerr, smooth_func, linear_fit
MS = mpl.rcParams['lines.markersize'] // 2
QUANTITIES = ('g0', 'lsc', 'Qsc', 'b', 'li', 'Qi', 'error', 'nobs')
QUANTITIES_EVENT = ('g0', 'lsc', 'Qsc', 'b', 'li', 'Qi', 'error', 'W', 'sds')
QLABELS = {'g0': r'g0 (m$^{-1}$)',
'lsc': r'l$_{\mathrm{sc}}$ (km)',
'Qsc': r'Q${_{\mathrm{sc}}}^{-1}$',
'b': 'b (s$^{-1}$)',
'li': r'l$_{\mathrm{i}}$ (km)',
'Qi': r'Q${_{\mathrm{i}}}^{-1}$',
'W': 'W (J/Hz)',
'omM': r'$\omega$M (Nm)',
'sds': r'$\omega$M (Nm)',
'error': 'error',
'nobs': 'nobs'}
DEPMAP = {'g0': 'g0', 'lsc': 'g0', 'Qsc': 'g0',
'b': 'b', 'li': 'b', 'Qi': 'b',
'W': 'W', 'omM': 'sds', 'sds': 'sds', 'error': 'error'}
def calc_dependent(quantity, value, freq=None, v0=None):
"""Calculate dependent value (Qsc, Qi, lsc, li) from g0 and b
:param str quantity: one of Qsc, Qi, lsc, li
:param value: value of g0 or b depending on requested quantity
:param freq: frequency in Hz (needed for some calculations)
:param v0: velocity in m/s (needed for some calculations)
:return: value of quantity"""
q = quantity
val = np.array(value, dtype=float)
if not np.isscalar(v0):
v0 = v0[:, np.newaxis]
if q in ('g0', 'b', 'W', '', 'error'):
return val
elif q == 'lsc':
return 1 / val / 1000
elif q == 'Qsc': # actually inverse of Qsc
return val * v0 / (2 * np.pi * freq)
elif q == 'li':
return v0 / val / 1000
elif q == 'Qi': # actually inverse of Qi
return val / (2 * np.pi * freq)
def freqlim(freq):
try:
x1 = freq[0] ** 1.5 / freq[1] ** 0.5
x2 = freq[-1] ** 1.5 / freq[-2] ** 0.5
except IndexError:
return
return x1, x2
def _savefig(fig, title=None, fname=None, dpi=None, figsize=None):
if figsize is not None:
fig.set_size_inches(*figsize)
if title:
extra = (fig.suptitle(title),)
else:
extra = ()
if fname:
path = os.path.dirname(fname)
if path != '' and not os.path.isdir(path):
os.makedirs(path)
fig.savefig(fname, bbox_inches='tight', bbox_extra_artists=extra,
dpi=dpi)
plt.close(fig)
def _set_gridlabels(ax, i, nx, ny, N, xlabel='frequency (Hz)', ylabel=None):
if i % nx != 0 and ylabel:
plt.setp(ax.get_yticklabels(), visible=False)
elif i // nx == (ny - 1) // 2 and ylabel:
ax.set_ylabel(ylabel)
if i < N - nx and xlabel:
plt.setp(ax.get_xticklabels(), visible=False)
elif i % nx == (nx - 1) // 2 and N - i <= nx and xlabel:
ax.set_xlabel(xlabel)
def plot_energies(energies,
bulk_window=None, coda_window=None, downsample_to=None,
xlim_lin=None, xlim_log=None, **kwargs):
"""
Plot observed spectral energy densities on different scales (linear, log)
"""
gs = gridspec.GridSpec(2 * len(energies), 2)
gs.update(wspace=0.05)
fig = plt.figure()
sax1 = sax3 = None
for i, tr in enumerate(energies):
pair = get_pair(tr)
otime = tr.stats.origintime
if downsample_to is None:
d = 1
else:
d = tr.stats.sampling_rate // downsample_to
ts = np.arange(len(tr)) * tr.stats.delta
ts = ts - (otime - tr.stats.starttime)
c = 'k'
ax2 = plt.subplot(gs[2 * i + 1, 0], sharex=sax1, sharey=sax1)
ax1 = plt.subplot(gs[2 * i, 0], sharex=ax2)
ax3 = plt.subplot(gs[2 * i:2 * i + 2, 1], sharex=sax3, sharey=sax3)
ax1.annotate('%s' % pair[1], (1, 0.5), (-10, 0), 'axes fraction',
'offset points', size='small', ha='right', va='center')
ax3.annotate('%s' % pair[0], (0, 1), (10, -5), 'axes fraction',
'offset points', size='small', ha='left', va='top')
ax1.plot(ts[::d], tr.data[::d], color=c)
ax2.semilogy(ts[::d], tr.data[::d], color=c)
ax3.loglog(ts[::d], tr.data[::d], color=c)
for ax in (ax1, ax2, ax3):
plt.setp(ax.get_xticklabels(), visible=False)
ax.set_yticklabels([])
if 'ponset' in tr.stats:
tponset = tr.stats.ponset - otime
ax.axvline(tponset, color='green', alpha=0.5)
if 'sonset' in tr.stats:
tsonset = tr.stats.sonset - otime
ax.axvline(tsonset, color='b', alpha=0.5)
for ax in (ax2, ax3):
if bulk_window and coda_window:
c = ('b', 'k')
wins = (bulk_window[pair], coda_window[pair])
for i, win in enumerate(wins):
ax.axvspan(win[0] - otime, win[1] - otime,
0.05, 0.08, color=c[i], alpha=0.5)
if sax1 is None:
sax1 = ax2
sax3 = ax3
if xlim_lin:
ax1.set_xlim(xlim_lin)
if xlim_log:
ax3.set_xlim(xlim_log)
loglocator = mpl.ticker.LogLocator(base=100)
ax2.yaxis.set_major_locator(loglocator)
ax3.yaxis.set_major_locator(loglocator)
ax2.yaxis.set_minor_locator(mpl.ticker.NullLocator())
ax3.yaxis.set_minor_locator(mpl.ticker.NullLocator())
plt.setp(ax2.get_xticklabels(), visible=True)
plt.setp(ax3.get_xticklabels(), visible=True)
_savefig(fig, **kwargs)
def plot_lstsq(rec, event_station_pairs, ax=None, base=np.e, **kwargs):
"""Plot solution of weighted least squares inversion"""
eventids, stations = zip(*event_station_pairs)
eventids = list(OrderedDict.fromkeys(eventids))
stations = list(OrderedDict.fromkeys(stations))
err, g0, b, W, R, info = rec
tcoda, tbulk, Ecoda, Ebulk, Gcoda, Gbulk = info
fig = None
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
tmin = min(tcoda[i][0] for i in range(len(tcoda)))
tmax = max(tcoda[i][-1] for i in range(len(tcoda)))
for i in range(len(tcoda)):
ev, sta = event_station_pairs[i]
offset = R[stations.index(sta)] * W[eventids.index(ev)] / W[0]
# offset = R[i] if len(W) == 1 else C[i]
# Bci = np.log(Ecoda[i]) - np.log(FS * Gcoda[i]) - np.log(offset)
Bci = np.log(Ecoda[i]) - np.log(Gcoda[i]) - np.log(offset)
ax.plot(tcoda[i], Bci / np.log(base), color='0.7')
for i in range(len(tbulk)):
ev, sta = event_station_pairs[i]
offset = R[stations.index(sta)] * W[eventids.index(ev)] / W[0]
# offset = R[i] if len(W) == 1 else C[i]
# Bbi = np.log(Ebulk[i]) - np.log(FS * Gbulk[i]) - np.log(offset)
Bbi = np.log(Ebulk[i]) - np.log(Gbulk[i]) - np.log(offset)
ax.plot(tbulk[i], Bbi / np.log(base), 'o', color='0.4', mec='0.4',
ms=MS)
tmin = min(tmin, tbulk[i])
t = np.linspace(tmin, tmax, 100)
ax.plot(t, (np.log(W[0]) - b * t) / np.log(base), color='k')
ax.set_xlim(right=tmax)
if fig:
_savefig(fig, **kwargs)
def plot_optimization(record, record_g0, event_station_pairs, num=7, **kwargs):
"""Plot some steps of optimization"""
fig = plt.figure()
if num > 1:
n = (num + 1) // 2
gs = gridspec.GridSpec(n, n)
ax = plt.subplot(gs[1:, 0:-1])
share = None
else:
ax = fig.add_subplot(111)
title = kwargs.pop('title')
if title:
ax.annotate(title, (0, 1), (5, -5), 'axes fraction', 'offset points',
ha='left', va='top')
err, g0 = zip(*record_g0)
if not np.all(np.isinf(err)):
ax.loglog(g0, err, 'xk')
# best value is plotted blue
ax.loglog(g0[-1], err[-1], 'xb', mew=2)
# infinite values are plotted with red crosses
if np.inf in err:
g0_inf = [g0_ for (err_, g0_) in record_g0 if err_ == np.inf]
err_inf = np.mean(ax.get_ylim())
ax.loglog(g0_inf, err_inf * np.ones(len(g0_inf)), 'xr')
for i in range(len(record)):
if record[i][0] == np.inf:
record[i] = (err_inf,) + record[i][1:]
if num > 1:
for i, rec in enumerate(record):
err, g0, b, W, _, _ = rec
if i < n:
gsp = gs[0, i]
l = str(i + 1)
elif i < num - 1:
gsp = gs[i - n + 1, -1]
l = str(i + 1)
else:
gsp = gs[n - 1, -1]
l = 'best'
ax2 = plt.subplot(gsp, sharex=share, sharey=share)
plot_lstsq(rec, event_station_pairs, ax=ax2)
ax2.annotate(l, (0, 1), (5, -5), 'axes fraction',
'offset points', ha='left', va='top')
l2 = 'g$_0$=%.1e\nb=%.1e' % (g0, b)
l2 = l2 + '\nW%s=%.1e' % ('$_1$' * (len(W) > 1), W[0])
ax2.annotate(l2, (1, 1), (-5, -5), 'axes fraction',
'offset points', ha='right', va='top',
size='xx-small')
if l != 'best':
ax.annotate(l, (g0, err), (5, 5), 'data', 'offset points',
ha='left', va='bottom')
if i == 0:
share = ax2
yl = (r'$\ln \frac{E_{\mathrm{obs}\,ij}}{G_{ij}B_jR_i}$')
if len(W) == 1:
yl = (r'$\ln \frac{E_{\mathrm{obs}\,i}}{G_iR_i}$')
ax2.set_ylabel(yl)
plt.setp(ax2.get_xticklabels(), visible=False)
elif l == 'best':
ax2.set_xlabel(r'time ($\mathrm{s}$)')
plt.setp(ax2.get_yticklabels(), visible=False)
else:
plt.setp(ax2.get_xticklabels(), visible=False)
plt.setp(ax2.get_yticklabels(), visible=False)
ax2.locator_params(axis='y', nbins=4)
ax2.locator_params(axis='x', nbins=3)
ax.set_xlabel(r'g$_0$ ($\mathrm{m}^{-1}$)')
# yl = (r'error $\mathrm{rms}\left(\ln\frac{E_{\mathrm{obs}, ij}}'
# r'{E_{\mathrm{mod}, ij}}\right)$')
ax.set_ylabel(r'misfit $\epsilon$')
_savefig(fig, **kwargs)
def _get_times(tr):
t0 = tr.stats.starttime - tr.stats.origintime
return np.arange(len(tr)) * tr.stats.delta + t0
def plot_fits(energies, g0, b, W, R, v0, info, G_func,
smooth=None, smooth_window='bartlett',
xlim=None, ylim=None, **kwargs):
"""Plot fits of spectral energy densities"""
tcoda, tbulk, Ecoda, Ebulk, Gcoda, Gbulk = info
N = len(energies)
n = int(np.ceil(np.sqrt(N)))
fig = plt.figure()
gs = gridspec.GridSpec(n, n)
share = None
if b is None:
b = 0
tmaxs = []
ymaxs = []
ymins = []
c1 = 'mediumblue'
c2 = 'darkred'
c1l = '#8181CD' # 37.25% #'#8686CD'
c2l = '#8B6969' # 25% #'#8B6161'
for i, energy in enumerate(energies):
evid, station = get_pair(energy)
ax = plt.subplot(gs[i // n, i % n], sharex=share, sharey=share)
plot = ax.semilogy
def get_Emod(G, t):
return R[station] * W[evid] * G * np.exp(-b * t)
# return FS * R[station] * W[evid] * G * np.exp(-b * t)
st = energy.stats
r = st.distance
# ax.axvline(st.starttime - st.origintime + r / v0, ls = '--', c='gray')
# ax.axvline(r / v0, ls='--', c='gray')
t = | |
"""
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from math import inf, nan
from pathlib import Path
from unittest.mock import ANY
import pytest
from accuracy_checker.config.config_validator import (
ConfigError,
ConfigValidator,
DictField,
ListField,
NumberField,
PathField,
StringField,
BaseField,
BoolField
)
from accuracy_checker.evaluators import ModelEvaluator
from accuracy_checker.launcher import Launcher
from accuracy_checker.dataset import Dataset
from accuracy_checker.metrics import Metric
from accuracy_checker.postprocessor import Postprocessor
from accuracy_checker.preprocessor import Preprocessor
from accuracy_checker.data_readers import BaseReader
from accuracy_checker.annotation_converters import BaseFormatConverter
from accuracy_checker.adapters import Adapter
from accuracy_checker.utils import contains_all
from tests.common import mock_filesystem
class TestStringField:
def test_expects_string(self):
string_field = StringField()
with pytest.raises(ConfigError):
string_field.validate(b"foo")
with pytest.raises(ConfigError):
string_field.validate({})
with pytest.raises(ConfigError):
string_field.validate(42)
string_field.validate("foo")
def test_choices(self):
string_field = StringField(choices=['foo', 'bar'])
with pytest.raises(ConfigError):
string_field.validate('baz')
string_field.validate('bar')
def test_case_sensitive(self):
string_field = StringField(choices=['foo', 'bar'], case_sensitive=False)
string_field.validate('foo')
string_field.validate('FOO')
string_field = StringField(choices=['foo', 'bar'], case_sensitive=True)
string_field.validate('foo')
with pytest.raises(ConfigError):
string_field.validate('FOO')
def test_regex(self):
string_field = StringField(regex=r'foo\d*')
string_field.validate('foo')
string_field.validate('foo42')
with pytest.raises(ConfigError):
string_field.validate('baz')
def test_custom_exception(self, mocker):
stub = mocker.stub(name='custom_on_error')
string_field = StringField(choices=['foo'], on_error=stub)
with pytest.raises(ConfigError):
string_field.validate('bar', 'foo')
stub.assert_called_once_with('bar', 'foo', ANY)
def test_custom_validator(self, mocker):
stub = mocker.stub(name='custom_validator')
string_field = StringField(choices=['foo'], additional_validator=stub)
string_field.validate('foo', 'baz')
stub.assert_called_once_with('foo', 'baz')
class TestNumberField:
def test_expects_number(self):
number_field = NumberField(value_type=float)
number_field.validate(1.0)
with pytest.raises(ConfigError):
number_field.validate("foo")
with pytest.raises(ConfigError):
number_field.validate({})
with pytest.raises(ConfigError):
number_field.validate([])
number_field = NumberField(value_type=int)
number_field.validate(1)
with pytest.raises(ConfigError):
number_field.validate(1.0)
def test_nans(self):
number_field = NumberField(allow_nan=True)
number_field.validate(nan)
number_field = NumberField(allow_nan=False)
with pytest.raises(ConfigError):
number_field.validate(nan)
def test_infinity(self):
number_field = NumberField(allow_inf=True)
number_field.validate(inf)
number_field = NumberField(allow_inf=False)
with pytest.raises(ConfigError):
number_field.validate(inf)
def test_ranges(self):
number_field = NumberField(min_value=0, max_value=5)
number_field.validate(0)
number_field.validate(1)
number_field.validate(2)
with pytest.raises(ConfigError):
number_field.validate(-1)
with pytest.raises(ConfigError):
number_field.validate(7)
class TestDictField:
def test_expects_dict(self):
dict_field = DictField()
dict_field.validate({})
with pytest.raises(ConfigError):
dict_field.validate("foo")
with pytest.raises(ConfigError):
dict_field.validate(42)
with pytest.raises(ConfigError):
dict_field.validate([])
def test_validates_keys(self):
dict_field = DictField()
dict_field.validate({'foo': 42, 1: 'bar'})
dict_field = DictField(key_type=str)
dict_field.validate({'foo': 42, 'bar': 'bar'})
with pytest.raises(ConfigError):
dict_field.validate({'foo': 42, 1: 'bar'})
dict_field = DictField(key_type=StringField(choices=['foo', 'bar']))
dict_field.validate({'foo': 42, 'bar': 42})
with pytest.raises(ConfigError):
dict_field.validate({'foo': 42, 1: 'bar'})
with pytest.raises(ConfigError):
dict_field.validate({'foo': 42, 'baz': 42})
def test_validates_values(self):
dict_field = DictField()
dict_field.validate({'foo': 42, 1: 'bar'})
dict_field = DictField(value_type=str)
dict_field.validate({'foo': 'foo', 1: 'bar'})
with pytest.raises(ConfigError):
dict_field.validate({'foo': 42, 1: 2})
dict_field = DictField(value_type=StringField(choices=['foo', 'bar']))
dict_field.validate({1: 'foo', 'bar': 'bar'})
with pytest.raises(ConfigError):
dict_field.validate({1: 'foo', 2: 3})
with pytest.raises(ConfigError):
dict_field.validate({1: 'foo', 2: 'baz'})
def test_converts_basic_types(self):
dict_field = DictField(value_type=str)
assert isinstance(dict_field.value_type, StringField)
dict_field = DictField(value_type=int)
assert isinstance(dict_field.value_type, NumberField)
assert dict_field.value_type.type is not float
dict_field = DictField(value_type=float)
assert isinstance(dict_field.value_type, NumberField)
assert dict_field.value_type.type is float
dict_field = DictField(value_type=list)
assert isinstance(dict_field.value_type, ListField)
dict_field = DictField(value_type=dict)
assert isinstance(dict_field.value_type, DictField)
dict_field = DictField(value_type=Path)
assert isinstance(dict_field.value_type, PathField)
def test_empty(self):
dict_field = DictField()
dict_field.validate({})
dict_field = DictField(allow_empty=False)
with pytest.raises(ConfigError):
dict_field.validate({})
class TestListField:
def test_expects_list(self):
list_field = ListField()
list_field.validate([])
with pytest.raises(ConfigError):
list_field.validate("foo")
with pytest.raises(ConfigError):
list_field.validate(42)
with pytest.raises(ConfigError):
list_field.validate({})
def test_validates_values(self):
list_field = ListField()
list_field.validate(['foo', 42])
list_field = ListField(value_type=str)
list_field.validate(['foo', 'bar'])
with pytest.raises(ConfigError):
list_field.validate(['foo', 42])
list_field = ListField(value_type=StringField(choices=['foo', 'bar']))
list_field.validate(['foo', 'bar'])
with pytest.raises(ConfigError):
list_field.validate(['foo', 42])
with pytest.raises(ConfigError):
list_field.validate(['foo', 'bar', 'baz'])
def test_empty(self):
list_field = ListField()
list_field.validate([])
list_field = ListField(allow_empty=False)
with pytest.raises(ConfigError):
list_field.validate([])
class TestPathField:
@pytest.mark.usefixtures('mock_path_exists')
def test_expects_path_like(self):
path_field = PathField()
path_field.validate('foo/bar')
path_field.validate('/home/user')
path_field.validate(Path('foo/bar'))
with pytest.raises(ConfigError):
path_field.validate(42)
with pytest.raises(ConfigError):
path_field.validate({})
with pytest.raises(ConfigError):
path_field.validate([])
def test_path_is_checked(self):
with mock_filesystem(['foo/bar']) as prefix:
prefix_path = Path(prefix)
file_field = PathField(is_directory=False)
with pytest.raises(ConfigError):
file_field.validate(prefix_path / 'foo')
file_field.validate(prefix_path / 'foo' / 'bar')
dir_field = PathField(is_directory=True)
dir_field.validate(prefix_path / 'foo')
with pytest.raises(ConfigError):
dir_field.validate(prefix_path / 'foo' / 'bar')
def test_path_not_checked(self):
with mock_filesystem(['foo/bar']) as prefix:
prefix_path = Path(prefix)
file_field = PathField(is_directory=False, check_exists=False)
file_field.validate(prefix_path / 'foo' / 'bar')
class TestConfigValidator:
def test_compound(self):
class SampleValidator(ConfigValidator):
foo = StringField(choices=['foo'])
bar = NumberField()
sample_validator = SampleValidator('Sample')
sample_validator.validate({'foo': 'foo', 'bar': 1})
with pytest.raises(ConfigError):
sample_validator.validate({'foo': 'foo'})
with pytest.raises(ConfigError):
sample_validator.validate({'foo': 'bar', 'bar': 1})
def test_optional_fields(self):
class SampleValidatorNoOptionals(ConfigValidator):
foo = StringField(choices=['foo'])
bar = NumberField(optional=False)
sample_validator = SampleValidatorNoOptionals('Sample')
sample_validator.validate({'foo': 'foo', 'bar': 1})
with pytest.raises(ConfigError):
sample_validator.validate({'foo': 'bar'})
class SampleValidatorWithOptionals(ConfigValidator):
foo = StringField(choices=['foo'])
bar = NumberField(optional=True)
sample_validator = SampleValidatorWithOptionals('Sample')
sample_validator.validate({'foo': 'foo', 'bar': 1})
sample_validator.validate({'foo': 'foo'})
def test_extra_fields__warn_on_extra(self):
class SampleValidatorWarnOnExtra(ConfigValidator):
foo = StringField(choices=['foo'])
sample_validator = SampleValidatorWarnOnExtra(
'Sample', on_extra_argument=ConfigValidator.WARN_ON_EXTRA_ARGUMENT
)
with pytest.warns(UserWarning):
sample_validator.validate({'foo': 'foo', 'bar': 'bar'})
def test_extra_fields__error_on_extra(self):
class SampleValidatorErrorOnExtra(ConfigValidator):
foo = StringField(choices=['foo'])
sample_validator = SampleValidatorErrorOnExtra(
'Sample', on_extra_argument=ConfigValidator.ERROR_ON_EXTRA_ARGUMENT)
with pytest.raises(ConfigError):
sample_validator.validate({'foo': 'bar', 'bar': 'bar'})
def test_extra_fields__ignore_extra(self):
class SampleValidatorIgnoresExtra(ConfigValidator):
foo = StringField(choices=['foo'])
sample_validator = SampleValidatorIgnoresExtra(
'Sample', on_extra_argument=ConfigValidator.IGNORE_ON_EXTRA_ARGUMENT)
sample_validator.validate({'foo': 'foo', 'bar': 'bar'})
def test_custom_exception(self, mocker):
class SampleValidator(ConfigValidator):
foo = StringField(choices=['foo'])
stub = mocker.stub(name='custom_on_error')
sample_validator = SampleValidator('Sample', on_error=stub)
with pytest.raises(ConfigError):
sample_validator.validate({})
stub.assert_called_once_with(ANY, 'Sample', ANY)
def test_custom_validator(self, mocker):
class SampleValidator(ConfigValidator):
foo = StringField(choices=['foo'])
stub = mocker.stub(name='custom_validator')
sample_validator = SampleValidator('Sample', additional_validator=stub)
entry = {'foo': 'foo'}
sample_validator.validate(entry)
stub.assert_called_once_with(entry, 'Sample')
def test_nested(self):
class InnerValidator(ConfigValidator):
foo = StringField(choices=['foo'])
class OuterValidator(ConfigValidator):
bar = ListField(InnerValidator('Inner'))
outer_validator = OuterValidator('Outer', on_extra_argument=ConfigValidator.ERROR_ON_EXTRA_ARGUMENT)
outer_validator.validate({'bar': [{'foo': 'foo'}, {'foo': 'foo'}]})
def test_inheritance(self):
class ParentValidator(ConfigValidator):
foo = StringField(choices=['foo'])
class DerivedValidator(ParentValidator):
bar = StringField(choices=['bar'])
derived_validator = DerivedValidator('Derived', on_extra_argument=ConfigValidator.ERROR_ON_EXTRA_ARGUMENT)
derived_validator.validate({'foo': 'foo', 'bar': 'bar'})
class TestConfigValidationAPI:
def test_empty_config(self):
config_errors = ModelEvaluator.validate_config({'models': [{}]})
assert len(config_errors) == 2
assert config_errors[0].message == 'launchers section is not provided'
assert not config_errors[0].entry
assert config_errors[0].field_uri == 'models.launchers'
assert config_errors[1].message == 'datasets section is not provided'
assert not config_errors[1].entry
assert config_errors[1].field_uri == 'models.datasets'
def test_empty_launchers_and_datasets_config(self):
config_errors = ModelEvaluator.validate_config({'models': [{'launchers': [], 'datasets': []}]})
assert len(config_errors) == 2
assert config_errors[0].message == 'launchers section is not provided'
assert not config_errors[0].entry
assert config_errors[0].field_uri == 'models.launchers'
assert config_errors[1].message == 'datasets section is not provided'
assert not config_errors[1].entry
assert config_errors[1].field_uri == 'models.datasets'
def test_launcher_config_without_framework(self):
launcher_config = {'model': 'foo'}
config_errors = ModelEvaluator.validate_config({'models': [{'launchers': [launcher_config], 'datasets': []}]})
assert len(config_errors) == 2
assert config_errors[0].message == 'framework is not provided'
assert config_errors[0].entry == launcher_config
assert config_errors[0].field_uri == 'models.launchers.0'
assert config_errors[1].message == 'datasets section is not provided'
assert not config_errors[1].entry
assert config_errors[1].field_uri == 'models.datasets'
def test_unregistered_launcher_config(self):
launcher_config = {'framework': 'foo'}
config_errors = ModelEvaluator.validate_config({'models': [{'launchers': [launcher_config], 'datasets': []}]})
assert len(config_errors) == 2
assert config_errors[0].message == 'launcher foo is not unregistered'
assert config_errors[0].entry == launcher_config
assert config_errors[0].field_uri == 'models.launchers.0'
assert config_errors[1].message == 'datasets section is not provided'
assert not config_errors[1].entry
assert config_errors[1].field_uri == 'models.datasets'
@pytest.mark.usefixtures('mock_file_exists')
def test_valid_launcher_config(self):
launcher_config = {'model': 'foo', 'framework': 'dlsdk', 'device': 'cpu'}
config_errors = ModelEvaluator.validate_config({'models': [{'launchers': [launcher_config], 'datasets': []}]})
assert len(config_errors) == 1
assert config_errors[0].message == 'datasets section is not provided'
assert not config_errors[0].entry
assert config_errors[0].field_uri == 'models.datasets'
@pytest.mark.usefixtures('mock_file_exists')
def test_input_without_type(self):
launcher_config = {'model': 'foo', 'framework': 'dlsdk', 'device': 'cpu', 'inputs': [{"name": 'input'}]}
config_errors = ModelEvaluator.validate_config({'models': [{'launchers': [launcher_config], 'datasets': []}]})
assert len(config_errors) == 2
assert config_errors[0].message.endswith('input type is not provided')
assert config_errors[0].field_uri == 'models.launchers.0.inputs.0'
assert config_errors[1].message == 'datasets section is not provided'
assert not config_errors[1].entry
assert config_errors[1].field_uri == 'models.datasets'
@pytest.mark.usefixtures('mock_file_exists')
def test_input_with_invalid_type(self):
launcher_config = {'model': 'foo', 'framework': 'dlsdk', 'device': 'cpu', 'inputs': [{"name": 'input', 'type': 'FOO'}]}
config_errors = ModelEvaluator.validate_config({'models': [{'launchers': [launcher_config], 'datasets': []}]})
assert len(config_errors) == 2
assert config_errors[0].message.endswith('undefined input type FOO')
assert config_errors[0].field_uri == 'models.launchers.0.inputs.0'
assert config_errors[1].message == 'datasets section is not provided'
assert not config_errors[1].entry
assert config_errors[1].field_uri == 'models.datasets'
@pytest.mark.usefixtures('mock_file_exists')
def test_input_without_name(self):
launcher_config = {'model': 'foo', 'framework': 'dlsdk', 'device': 'cpu', 'inputs': [{"type": 'INPUT'}]}
config_errors = ModelEvaluator.validate_config({'models': [{'launchers': [launcher_config], 'datasets': []}]})
assert len(config_errors) == 2
assert config_errors[0].message.endswith('input name is not provided')
assert config_errors[0].field_uri == 'models.launchers.0.inputs.0'
assert config_errors[1].message == 'datasets section is not provided'
assert not config_errors[1].entry
assert config_errors[1].field_uri == 'models.datasets'
@pytest.mark.usefixtures('mock_file_exists')
def test_adapter_str_config(self):
launcher_config = {'model': 'foo', 'framework': 'dlsdk', 'device': 'cpu', 'adapter': 'classification'}
config_errors = ModelEvaluator.validate_config({'models': [{'launchers': [launcher_config], 'datasets': []}]})
assert len(config_errors) == 1
assert config_errors[0].message == 'datasets section is not provided'
assert not config_errors[0].entry
assert config_errors[0].field_uri == 'models.datasets'
@pytest.mark.usefixtures('mock_file_exists')
def test_adapter_dict_config(self):
launcher_config = {'model': 'foo', 'framework': 'dlsdk', 'device': 'cpu', 'adapter': {'type': 'classification'}}
config_errors = ModelEvaluator.validate_config({'models': [{'launchers': [launcher_config], 'datasets': []}]})
assert len(config_errors) == 1
assert config_errors[0].message == 'datasets section is not provided'
assert not config_errors[0].entry
assert config_errors[0].field_uri == 'models.datasets'
@pytest.mark.usefixtures('mock_file_exists')
def test_unregistered_adapter_config(self):
launcher_config = {'model': 'foo', 'framework': 'dlsdk', 'device': 'cpu', 'adapter': 'not_classification'}
config_errors = ModelEvaluator.validate_config({'models': [{'launchers': [launcher_config], 'datasets': []}]})
assert len(config_errors) == 2
assert config_errors[0].message.startswith('Invalid value "not_classification"')
assert config_errors[0].entry == 'not_classification'
assert config_errors[0].field_uri.startswith('models.launchers.0') and config_errors[0].field_uri.endswith('adapter')
assert config_errors[1].message == 'datasets section is not provided'
assert not config_errors[1].entry
assert config_errors[1].field_uri == 'models.datasets'
@pytest.mark.usefixtures('mock_path_exists')
def test_dataset_config_without_metrics(self):
dataset_config = {'name': 'dataset', 'data_source': 'data', 'annotation': 'annotation'}
config_errors | |
in these columns.
# #### Step 3.2.2 Data Processing
# In[22]:
# group df_sea_list_price by property_type and room_type with a median price and count calculated for listings in each group
df_sea_list_property_price = df_sea_list_price.groupby(
['property_type','room_type']).price.agg(np.median).reset_index()
df_sea_list_property_price['count'] = df_sea_list_price.groupby(
['property_type','room_type']).price.agg(lambda x: len(x)).reset_index()['price']
# only keep the topk most listed groups and rank it in descending order by price
df_sea_list_property_price = df_sea_list_property_price.sort_values(by='count', ascending=False)[:TopK]
df_sea_list_property_price = df_sea_list_property_price.sort_values(by='price', ascending=False)
df_sea_list_property_price
# #### Step 3.2.3 Analysis
# In the above chart, we can find out that listings of "House and Entire home/apt" will be priced highest among major categories, with "Apartment and Entire home/apt" ranked second. "Apartment and Shared room" and "House and Shared room" will be priced significantly lower than general median price, which is $100
# ### Step 3.3 Size
#
# Three features that could be related with the size of listing:
# 1. number of bedrooms
# 2. number of bathrooms
# 3. square feet
# #### Step 3.3.1 Data Understanding
# In[23]:
print("Number of missing values in column bedrooms:", df_sea_list_price.bedrooms.isnull().sum())
print("Number of missing values in column bathrooms:", df_sea_list_price.bathrooms.isnull().sum())
print("Number of missing values in column square_feet: {} out of {}".format(
df_sea_list_price.square_feet.isnull().sum(), len(df_sea_list_price)))
# The number of missing values in bedrooms and bathrooms is very small and we could simply drop those rows. On the other hand, the number of missing values in square_feet is very high, more than 90% of total rows, thus we should not use this feature for analysis at all.
# #### Step 3.3.2 Data Processing
# In[24]:
# group df_sea_list_price by columns ['bedrooms', 'bathrooms'] with median price and count calculated in each group
df_sea_list_size_price = df_sea_list_price.groupby(
['bedrooms', 'bathrooms']).price.agg(np.median).reset_index()
df_sea_list_size_price['count'] = df_sea_list_price.groupby(['bedrooms', 'bathrooms']).price.agg(
lambda x: len(x)).reset_index()['price']
# only keep the topk most listed groups and rank them in descending order by price
df_sea_list_size_price = df_sea_list_size_price.sort_values(by='count', ascending=False)[:TopK]
df_sea_list_size_price = df_sea_list_size_price.sort_values(by='price', ascending=False)
df_sea_list_size_price
# #### Step 3.3.3 Analysis
# The general trend is that the more bedrooms and the more bathrooms we have, the more expensive we could expect from a listing.
# ### Step 3.4 User reviews
#
# Explore features related with review scores and their relations to pricing. We will group the data by each feature and compare the median price in each group. If price is correlated with this feature, we will observe the price difference among groups.
# #### Step 3.4.1 Data Preparation
# There is a significant amount of missing values in these features, where there is no review scores attached to the listing. We will replace these NaN values with -1, as a way to indicate NaN values as pandas.groupby will automatically drop NaN.
# In[25]:
review_features = ["review_scores_accuracy", "review_scores_checkin", "review_scores_cleanliness",
"review_scores_communication", "review_scores_location", "review_scores_rating",
"review_scores_value"]
# In[26]:
for feature in review_features:
print("Number of missing values in column{}: {} out of {}".format(
feature, df_sea_list_price[feature].isnull().sum(), len(df_sea_list_price)))
# In[27]:
review_na_replace_dict = {k: -1 for k in review_features}
df_sea_list_review_price = df_sea_list_price.fillna(review_na_replace_dict)
# #### Step 3.4.2 Data Processing and Visualization
# In[28]:
def display_review_groups(df_review):
for feature in review_features:
df_review_group = df_review.groupby(
feature).price.agg(np.median).reset_index()
df_review_group['count'] = df_sea_list_review_price.groupby(
feature).price.agg(lambda x: len(x)).reset_index()['price']
df_review_group = df_review_group.sort_values(by=feature, ascending=True)
fig = plt.figure() # Create matplotlib figure
ax = fig.add_subplot(111) # Create matplotlib axes
ax2 = ax.twinx() # Create another axes that shares the same x-axis as ax.
df_review_group.plot(x=feature, y='price', legend=False, width=0.3, figsize=(9,6),
kind='bar', color='red', ax=ax, position=1)
df_review_group.plot(x=feature, y='count', legend=False, width=0.3, figsize=(9,6),
kind='bar', color='blue', ax=ax2, position=0)
ax.set_ylabel('median price', color='red')
ax.set_title('Median price grouped by {}'.format(feature))
ax2.set_ylabel('count', color='blue')
plt.show()
# In[29]:
display_review_groups(df_sea_list_review_price)
# #### Step 3.4.3 Analysis
# From the charts above, we don't see a clear connection of review scores to pricing. Whether the score is 10, 9, or -1(NaN), the median price in each group does not distinguish itself from the rest.
# #### Step 3.4.4 Extension
#
# Look at things from a difference angle. Group the data by "number of reviews" and explore if it's related with pricing.
# In[30]:
def partition_by_number_of_reviews(df, idx, interval=50):
col = 'number_of_reviews'
return int(df[col].loc[idx]/interval)*interval+interval/2
# In[31]:
def display_grouping_by_number_of_reviews(df_review, interval=50):
feature = "number_of_reviews"
df_review_group = df_review.groupby(
lambda x: partition_by_number_of_reviews(
df_sea_list_review_price, x, interval)).price.agg(
np.median).reset_index()
df_review_group['count'] = df_sea_list_review_price.groupby(
lambda x: partition_by_number_of_reviews(
df_sea_list_review_price, x, interval)).price.agg(
lambda x: len(x)).reset_index()['price']
df_review_group.columns = [feature, 'price', 'count']
df_review_group = df_review_group.sort_values(by=feature, ascending=True)
fig = plt.figure() # Create matplotlib figure
ax = fig.add_subplot(111) # Create matplotlib axes
ax2 = ax.twinx() # Create another axes that shares the same x-axis as ax.
df_review_group.plot(x=feature, y='price', legend=False, width=0.3, figsize=(9,6),
kind='bar', color='red', ax=ax, position=1)
df_review_group.plot(x=feature, y='count', legend=False, width=0.3, figsize=(9,6),
kind='bar', color='blue', ax=ax2, position=0)
ax.set_ylabel('median price', color='red')
ax.set_title('Median price grouped by {} with interval {}'.format(feature, interval))
ax2.set_ylabel('count', color='blue')
plt.show()
# In[32]:
display_grouping_by_number_of_reviews(df_sea_list_review_price, 10)
# Surprisingly, the general trend is that the more reviews a listing has, the less the median price is. This could make sense since customers are more likely to post a review on an AirBnB/hotle stay if they have a negative experience of it. More reviews a listing has, more negative impressions it might present to the public, thus lower it is priced later.
# ### Step 3.5 Random Forest
#
# There are around 3,800 number of data points and we might easily get **overfitting** problems with linear regression. Choose **Random Forest Classifier** instead and it performs well with limited training data.
# #### Step 3.5.1 Data Preparation
# In[33]:
print("Number of missing values in column price:", df_sea_list.price.isnull().sum())
# In[34]:
# Split into explanatory and response variables
y = df_sea_list['price']
X = df_sea_list.drop('price', axis=1)
# ##### Step 3.5.1.1 Convert Numeric Strings to Numbers
#
# There are some features ought to be converted into numbers from string. And before that, we need to strip the dollar signs and percentage signs from these strings.
# In[35]:
# convert numeric strings to number type
# we will drop 'weekly_price', 'monthly_price' as they are redundant compared to 'price'
features_to_converted_to_number = ['cleaning_fee', 'security_deposit',
'host_acceptance_rate', 'host_response_rate', 'extra_people']
y = y.apply(lambda i: float(sub(r'[^\d.]', '', i)) if i is not np.nan else i)
X[features_to_converted_to_number] = X[features_to_converted_to_number].apply(
lambda array: [float(sub(r'[^\d.]', '', i)) if i is not np.nan else i for i in array])
X[features_to_converted_to_number].head()
# ##### Step 3.5.1.2 Parse Phrases in Selected Features
#
# Some features contains string that we could parse into phrases and obtain some useful information out of them. For example, in "amenities" we could get whether a TV or wireless internet is provided by the host. They could have a impact on pricing as well.
# In[36]:
# parse keywords in features in features_to_parse
features_to_parse = ['host_verifications', 'amenities']
def getPhrases(text):
return re.compile('\w[\w\s\/\(\)\-\_]*').findall(text.lower())
# In[37]:
# example of phrases parsed from host_verifications
set.union(*X['host_verifications'].apply(
lambda x: set(getPhrases(x)) if x is not np.nan else x).tolist())
# In[38]:
# function to parse phrases in selected features
def parseFeaturesByPhrases(df, feature_list):
for feature in feature_list:
# set set of phrases from feature
df[feature] = df[feature].apply(lambda x: set(
getPhrases(x)) if x is not np.nan else x)
set_phrases = set.union(*df[feature].tolist())
# add new columns from phrases
list_phrases = list(set_phrases)
list_addon_features = []
for phrase in list_phrases:
addon_feature = feature+'-'+phrase
list_addon_features.append(addon_feature)
df[addon_feature] = [0]*len(df)
# assign 1 or 0 to each column based on availability of phrases
df[[feature]+list_addon_features] = df[[feature]+list_addon_features].apply(
lambda array: pd.Series([array[0]]+
[1 if array[0] is not np.nan and x in array[0] else 0 for x in list_phrases]
), axis=1)
# drop original column
return df.drop(feature_list, axis=1, inplace=True)
# In[39]:
# perform parsing
parseFeaturesByPhrases(X, feature_list=features_to_parse)
X.head()
# ##### Step 3.5.1.3 Drop Redundant Features
#
# Some features are redundant in predicting price. For example, weekly_price and monthly_price are already good indicators of price. For another example, listing id is directly related to the price of exising data, use it in the model will result in overfitting.
# In[40]:
# drop columns that are subjective desriptions, or redundant informations
cols_to_drop = ['weekly_price', 'monthly_price', 'first_review', 'last_review',
'calendar_last_scraped', 'calendar_updated',
'country', 'country_code', 'smart_location', 'market', 'state', 'city',
'neighbourhood_group_cleansed', 'neighbourhood', 'street', 'host_neighbourhood',
'host_picture_url', 'host_thumbnail_url', 'host_about', 'host_location',
'host_since', 'host_name', 'host_url', 'xl_picture_url', 'picture_url', 'medium_url',
'thumbnail_url', 'transit', 'notes', 'neighborhood_overview', 'description',
'space', 'summary', 'name', 'last_scraped', 'listing_url', 'zipcode',
'calculated_host_listings_count', 'host_total_listings_count', 'host_listings_count',
'id', 'scrape_id', 'host_id']
print("Number of redundant features to drop: {}".format(len(cols_to_drop)))
X = X.drop(cols_to_drop, axis=1)
print("Number of remaining features: {}".format(X.shape[1]))
X.head()
# ##### Step 3.5.1.4 One-Hot-Encode Categorical Features
#
# Perform one-hot-encoding on categorical features. Before that, drop features with more than half of their values missing.
# In[41]:
# perform one-hot-encoding
cat_cols = X.select_dtypes(include=[object]).columns
X = pd.get_dummies(X, dummy_na=True,
columns=cat_cols, drop_first=True)
X.head()
# ##### Step 3.5.1.5 Imputation of Missing Values
#
# Impute missing values by average values in each column.
# In[42]:
# drop columns with more than half of its value missing
X_imputed | |
import json
import os
import tempfile
import math
import traceback
from shutil import copyfile
from collections import OrderedDict
from datetime import datetime
from shapely.geometry import shape,MultiPoint,Point
from shapely.geometry.polygon import Polygon
from shapely.geometry.multipolygon import MultiPolygon
from shapely.geometry.linestring import LineString
from shapely.geometry.multilinestring import MultiLineString
from shapely.geometry import LinearRing,mapping,LineString
from shapely.geometry.collection import GeometryCollection
from gokart.spatial import calculateFeatureArea,calculateGeometryArea,extractPolygons,transform,exportGeojson
def mergeGeometry(geom1,geom2):
if not geom2:
return geom1
elif not geom1:
return geom2
else:
if isinstance(geom1,Point):
if isinstance(geom2,Point):
return MultiPoint([geom1,geom2])
elif isinstance(geom2,MultiPoint):
geoms = [geom1]
for p in geom2.geoms:
geoms.append(p)
return MultiPoint(geoms)
elif isinstance(geom2,(LineString,Polygon)):
return GeometryCollection([geom1,geom2])
elif isinstance(geom2,(MultiLineString,MultiPolygon)):
geoms = [geom1]
for p in geom2.geoms:
geoms.append(p)
return GeometryCollection(geoms)
else:
raise Exception("Unsupported geometry type({}.{})".format(geom2.__class__.__module__,geom2.__class__.__name__))
elif isinstance(geom1,LineString):
if isinstance(geom2,LineString):
return MultiLineString([geom1,geom2])
elif isinstance(geom2,MultiLineString):
geoms = [geom1]
for p in geom2.geoms:
geoms.append(p)
return MultiLineString(geoms)
elif isinstance(geom2,(Point,Polygon)):
return GeometryCollection([geom1,geom2])
elif isinstance(geom2,(MultiPoint,MultiPolygon)):
geoms = [geom1]
for p in geom2.geoms:
geoms.append(p)
return GeometryCollection(geoms)
else:
raise Exception("Unsupported geometry type({}.{})".format(geom2.__class__.__module__,geom2.__class__.__name__))
elif isinstance(geom1,Polygon):
if isinstance(geom2,Polygon):
return MultiPolygon([geom1,geom2])
elif isinstance(geom2,MultiPolygon):
geoms = [geom1]
for p in geom2.geoms:
geoms.append(p)
return MultiPolygon(geoms)
elif isinstance(geom2,(Point,LineString)):
return GeometryCollection([geom1,geom2])
elif isinstance(geom2,(MultiPoint,MultiLineString)):
geoms = [geom1]
for p in geom2.geoms:
geoms.append(p)
return GeometryCollection(geoms)
else:
raise Exception("Unsupported geometry type({}.{})".format(geom2.__class__.__module__,geom2.__class__.__name__))
elif isinstance(geom1,MultiPoint):
geoms = list(geom1.geoms)
if isinstance(geom2,Point):
geoms.append(geom2)
return MultiPoint(geoms)
elif isinstance(geom2,MultiPoint):
for p in geom2.geoms:
geoms.append(p)
return MultiPoint(geoms)
elif isinstance(geom2,(Polygon,LineString)):
geoms.append(geom2)
return GeometryCollection(geoms)
elif isinstance(geom2,(MultiPolygon,MultiLineString)):
for p in geom2.geoms:
geoms.append(p)
return GeometryCollection(geoms)
else:
raise Exception("Unsupported geometry type({}.{})".format(geom2.__class__.__module__,geom2.__class__.__name__))
elif isinstance(geom1,MultiLineString):
geoms = list(geom1.geoms)
if isinstance(geom2,LineString):
geoms.append(geom2)
return MultiLineString(geoms)
elif isinstance(geom2,MultiLineString):
for p in geom2.geoms:
geoms.append(p)
return MultiLineString(geoms)
elif isinstance(geom2,(Point,Polygon)):
geoms.append(geom2)
return GeometryCollection(geoms)
elif isinstance(geom2,(MultiPolygon,MultiPoint)):
for p in geom2.geoms:
geoms.append(p)
return GeometryCollection(geoms)
else:
raise Exception("Unsupported geometry type({}.{})".format(geom2.__class__.__module__,geom2.__class__.__name__))
elif isinstance(geom1,MultiPolygon):
geoms = list(geom1.geoms)
if isinstance(geom2,Polygon):
geoms.append(geom2)
return MultiPolygon(geoms)
elif isinstance(geom2,MultiPolygon):
for p in geom2.geoms:
geoms.append(p)
return MultiPolygon(geoms)
elif isinstance(geom2,(Point,LineString)):
geoms.append(geom2)
return GeometryCollection(geoms)
elif isinstance(geom2,(MultiLineString,MultiPoint)):
for p in geom2.geoms:
geoms.append(p)
return GeometryCollection(geoms)
else:
raise Exception("Unsupported geometry type({}.{})".format(geom2.__class__.__module__,geom2.__class__.__name__))
else:
raise Exception("Unsupported geometry type({}.{})".format(geom1.__class__.__module__,geom1.__class__.__name__))
def symmetric_difference(geom1,geom2,split=False):
if not split:
try:
return extractPolygons(geom1.symmetric_difference(geom2))
except:
return symmetric_difference(geom1,geom2,split=True)
if isinstance(geom1,Polygon):
if isinstance(geom2,Polygon):
return extractPolygons(geom1.symmetric_difference(geom2))
else:
for g in geom2.geoms:
geom1 = extractPolygons(symmetric_difference(geom1,g))
if not geom1:
return None
return geom1
else:
diff_geom = None
for g in geom1.geoms:
diff_g = symmetric_difference(g,geom2)
if not diff_g:
continue
diff_geom = mergeGeometry(diff_geom,diff_g)
return diff_geom
def difference(geom1,geom2,split=False):
if not split:
try:
return extractPolygons(geom1.difference(geom2))
except:
return difference(geom1,geom2,split=True)
if isinstance(geom1,Polygon):
if isinstance(geom2,Polygon):
return extractPolygons(geom1.difference(geom2))
else:
for g in geom2.geoms:
geom1 = extractPolygons(difference(geom1,g))
if not geom1:
return None
return geom1
else:
diff_geom = None
for g in geom1.geoms:
diff_g = difference(g,geom2)
if not diff_g:
continue
diff_geom = mergeGeometry(diff_geom,diff_g)
return diff_geom
def intersects(geom1,geom2,split=False):
if not split:
try:
return geom1.intersects(geom2)
except:
return intersects(geom1,geom2,split=True)
if isinstance(geom1,Polygon):
if isinstance(geom2,Polygon):
return geom1.intersects(geom2)
else:
for g in geom2.geoms:
if intersects(geom1,g):
return True
return False
else:
for g in geom1.geoms:
if intersects(g,geom2):
return True
return False
def default_print_progress_status(print_timestamp=True):
print_timestamp = print_timestamp
def _default_print_progress_status(msg):
if print_timestamp:
if msg:
print("{} : {}".format(datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f"),msg))
else:
print("{}".format(msg))
else:
print("{}".format(msg))
return _default_print_progress_status
def getShapelyGeometry(feature):
if not feature["geometry"]:
return None
elif feature["geometry"]["type"] == "GeometryCollection":
return GeometryCollection([shape(g) for g in feature["geometry"]["geometries"]])
else:
return shape(feature["geometry"])
#return polygon or multipolygons if have, otherwise return None
class PolygonUtil(object):
FIX_RING_ORIENT = int(math.pow(2,0))
FIX_SELFINTERSECT_LINES = int(math.pow(2,1))
FIX_SELFINTERSECT_POINTS = int(math.pow(2,2))
FIX_ORPHAN_RINGS = int(math.pow(2,3))
FIX_ORPHAN_RING_AS_INTERIOR_RING = int(math.pow(2,4))
FIX_ORPHAN_RING_AS_ISLAND = int(math.pow(2,5))
REMOVE_DUPLICATE_POINT = int(math.pow(2,6))
SPLIT_EXTERIOR_RING_2_EXTERIOR_HOLE = int(math.pow(2,7))
SPLIT_INTERIOR_HOLE_2_INTERIOR_RING = int(math.pow(2,8))
SPLIT_EXTERIOR_HOLE_2_EXTERIOR_RING = int(math.pow(2,9))
SPLIT_INTERIOR_RING_2_INTERIOR_HOLE = int(math.pow(2,10))
SPLIT_EXTERIOR_RING = int(math.pow(2,11))
SPLIT_INTERIOR_HOLE = int(math.pow(2,12))
SPLIT_EXTERIOR_HOLE = int(math.pow(2,13))
SPLIT_INTERIOR_RING = int(math.pow(2,14))
SPLIT_ORPHAN_RING = int(math.pow(2,15))
FIX_TYPES = OrderedDict([
(FIX_RING_ORIENT,"Fix ring orient"),
(FIX_SELFINTERSECT_LINES , "Fix selfintersected lines"),
(FIX_SELFINTERSECT_POINTS, "Fix selfintersected points"),
(FIX_ORPHAN_RINGS,"Fix orphan rings"),
(FIX_ORPHAN_RING_AS_INTERIOR_RING,"Fix orphan ring as interior ring"),
(FIX_ORPHAN_RING_AS_ISLAND ,"Fix orphan ring as island"),
(REMOVE_DUPLICATE_POINT ,"Remove duplicate point"),
(SPLIT_EXTERIOR_RING_2_EXTERIOR_HOLE,"Split exterior ring to exterior ring and exterior hole"),
(SPLIT_INTERIOR_HOLE_2_INTERIOR_RING,"Split interior hole to interior hole and interior ring"),
(SPLIT_EXTERIOR_HOLE_2_EXTERIOR_RING,"Split exterior hole to exterior hole and exterior ring"),
(SPLIT_INTERIOR_RING_2_INTERIOR_HOLE,"Split interior ring to interior ring and interior hole"),
(SPLIT_EXTERIOR_RING,"Split exterior rint to 2 exterior rings"),
(SPLIT_INTERIOR_HOLE,"Split interior hole to 2 interior holes"),
(SPLIT_EXTERIOR_HOLE,"Split exterior hole to 2 exterior holes"),
(SPLIT_INTERIOR_RING,"Split interior ring to 2 interior rings"),
(SPLIT_ORPHAN_RING,"Split orphan ring to 2 orphan rings")
])
def __init__(self,name,geom,parentPath=None,print_progress_status=None,properties=None):
self.geom = geom
self.parentPath = parentPath
self.pos = 0
self.name = name
self.properties = properties
self.print_progress_status = print_progress_status or default_print_progress_status()
@classmethod
def fix_type_names(cls,fix_type):
return [name for t,name in cls.FIX_TYPES.items() if t & fix_type == t]
def polygons(self,refresh=False):
"""
Return all polygons included in the geometry as a list of (path,polygon).
"""
def getGeomPath():
curPos = self.pos
self.pos += 1
if self.parentPath:
return "{}.{}".format(self.parentPath,curPos)
else:
return str(curPos)
if refresh or (not hasattr(self,"_polygons")):
self.pos = 0
if isinstance(self.geom,Polygon):
self._polygons = [(getGeomPath(),self.geom)]
elif isinstance(self.geom,MultiPolygon):
self._polygons = [(getGeomPath(),geom1) for geom1 in self.geom.geoms]
elif isinstance(self.geom,GeometryCollection):
polygonList = None
for g in self.geom:
sublist = PolygonUtil(self.name,g,parentPath=getGeomPath(),print_progress_status=self.print_progress_status,properties=self.properties).polygons
if not p:
continue
elif not polygonList:
polygonList = sublist
else:
polygonList += sublist
self._polygons = polygonList
else:
self._polygons = None
return self._polygons
def expandGeom(self,geom=None):
"""
Expand the geometry as a tree structure
"""
geom = geom or self.geom
if isinstance(geom,Polygon):
return ['polygon',self.geom]
elif isinstance(geom,MultiPolygon):
return ["multipolygon",[['polygon',g] for g in geom.geoms]]
elif isinstance(self.geom,GeometryCollection):
return ['geometrycollection',[self.expandGeom(g) for g in geom]]
else:
return ['other',self.geom]
def addOrphanRing(self,expandedGeom,orphanRings):
"""
Try to add orphan interior ring into one of the poylgon,
if this ring is contained by one polygon, then add it to the polygon as interior ring
if this ring is not contained by any polygon, then add it as indenpendent polygon
"""
fix_types = 0
def _addAsInteriorRing(expandedGeom,orphanPoly):
if expandedGeom[0] == 'polygon':
if expandedGeom[1].contains(orphanPoly):
#the polygon contains the orphan polygon, add the orphan polygon as a interior ring.
expandedGeom[1] = Polygon(expandedGeom[1].exterior,[r for r in expandedGeom[1].interiors] + [LinearRing([c for c in reversed(orphanPoly.exterior.coords)])])
return True
else:
return False
elif expandedGeom[0] == 'other':
return False
elif expandedGeom[0] == 'multipolygon':
for g in expandedGeom[1]:
if _addAsInteriorRing(g,orphanPoly):
return True
return False
elif expandedGeom[0] == 'geometrycollection':
for g in expandedGeom[1]:
if _addAsInteriorRing(g,orphanPoly):
return True
return False
else:
return False
index = len(orphanRings) - 1
while index >= 0:
orphanRing = LinearRing(orphanRings[index])
if orphanRing.is_ccw:
orphanPoly = Polygon(LinearRing(reversed(orphanRing.coords)))
else:
orphanPoly = Polygon(orphanRing)
if _addAsInteriorRing(expandedGeom,orphanPoly):
fix_types |= self.FIX_ORPHAN_RING_AS_INTERIOR_RING
del orphanRings[index]
index -= 1
if orphanRings:
fix_types |= self.FIX_ORPHAN_RING_AS_ISLAND
self.print_progress_status("Still have some orphan rings are not contained by polygon, added them as independent polygons")
if expandedGeom[0] == 'polygon':
expandedGeom = ["multipolygon",[expandedGeom]]
for orphanRing in orphanRings:
orphanRing = LinearRing(orphanRing)
if orphanRing.is_ccw:
orphanPoly = Polygon(LinearRing(reversed(orphanRing.coords)))
else:
orphanPoly = Polygon(orphanRing)
expandedGeom[1].append(['polygon',orphanPoly])
elif expandedGeom[0] in ("multipolygon","geometrycollection"):
for orphanRing in orphanRings:
orphanRing = LinearRing(orphanRing)
if orphanRing.is_ccw:
orphanPoly = Polygon(LinearRing(reversed(orphanRing.coords)))
else:
orphanPoly = Polygon(orphanRing)
expandedGeom[1].append(['polygon',orphanPoly])
else:
raise Exception("Doesn't support expanded geometry type ({})".format("expandedGeom[0]"))
return fix_types
def collapseGeom(self,expandedGeom):
"""
Collapse a expanded tree structure as a single geometry
"""
if expandedGeom[0] == 'polygon':
return expandedGeom[1]
elif expandedGeom[0] == 'other':
return expandedGeom[1]
elif expandedGeom[0] == 'multipolygon':
return MultiPolygon([self.collapseGeom(g) for g in expandedGeom[1]])
elif expandedGeom[0] == 'geometrycollection':
return GeometryCollection([self.collapseGeom(g) for g in expandedGeom[1]])
else:
raise Exception("Unsupported geometry type {}".format(expandedGeom[0]))
def first_selfintersect_point(self,ring_coords):
"""
return the first selfintersect point
"""
index = 0
coord_map = {}
for coord in ring_coords:
if coord in coord_map:
if index not in [0,len(ring_coords) - 1]:
return (coord,coord_map[coord],index)
else:
coord_map[coord] = index
index += 1
return None
def selfintersect_points(self,ring):
index = 0
coord_map = {}
duplicate_coords = []
for coord in ring.coords:
if coord in coord_map:
coord_map[coord].append(index)
if coord not in duplicate_coords:
duplicate_coords.append(coord)
else:
coord_map[coord] = [index]
index += 1
result = []
duplicate_coords.sort(key=lambda coord:coord_map[coord][0])
for coord in duplicate_coords:
if any([i for i in coord_map[coord] if i != 0 and i != len(ring.coords) - 1]):
result.append((coord,coord_map[coord]))
return result
def split_intersectlines(self,ring):
"""
find intersectlines
if find, split the line into two lines, and return new ring;otherwise return False
"""
index = 0
coords_len = len(ring.coords)
lines = []
#genarte all lines
while index < coords_len - 1:
lines.append(LineString(ring.coords[index:index+2]))
index += 1
self.print_progress_status("have {} lines for processing ".format(len(lines)))
index = 0
lines_len = len(lines)
changed = False
intersected_lines = []
while index < lines_len - 2:
#self.print_progress_status("check line {}/{}".format(index + 1,len(lines)))
subindex = index + 2
while subindex < lines_len:
intersected_point = lines[index].intersection(lines[subindex])
if intersected_point:
intersected_coord = intersected_point.coords[0]
if intersected_coord | |
<reponame>renaudll/omtk<gh_stars>10-100
import pymel.core as pymel
from maya import OpenMaya
from maya import cmds
from omtk.libs import libRigging
def create_shape_circle(size=1.0, normal=(1, 0, 0), *args, **kwargs):
transform, make = pymel.circle(*args, **kwargs)
make.radius.set(size)
make.normal.set(normal)
# Expose the rotateOrder
transform.rotateOrder.setKeyable(True)
make.radius.set(size)
make.degree.set(1)
make.sections.set(8)
return transform, make
def create_shape_needle(size=1, length=None, radius=None, name=None, normal=(0, 1, 0), *args, **kwargs):
# TODO: docstring
# Resolve length
# Default length is 4x the provided size
if length is None:
length = size * 1.0
# Resolve radius
if radius is None:
radius = size * 0.25
radius_mid = radius * 0.75
y_circle_mid_max = length + radius_mid
y_circle_mid_min = length - radius_mid
y_circle_min = length - radius
y_circle_max = length + radius
xz_circle_rad = radius
xz_circle_mid_rad = xz_circle_rad * 0.75
shape1 = pymel.curve(d=1, p=[
(0.0, 0.0, 0.0),
(0.0, y_circle_min, 0.0)
])
shape2 = pymel.curve(d=1, p=[
(0.0, y_circle_max, -0.0),
(0.0, y_circle_max, 0.0),
(xz_circle_mid_rad, y_circle_mid_max, 0.0),
(xz_circle_rad, length, 0.0),
(xz_circle_mid_rad, y_circle_mid_min, 0.0),
(0.0, y_circle_min, 0),
(-xz_circle_mid_rad, y_circle_mid_min, -0.0),
(-xz_circle_rad, length, 0.0),
(-xz_circle_mid_rad, y_circle_mid_max, 0.0),
(0.0, y_circle_max, 0.0),
(xz_circle_mid_rad, y_circle_mid_max, 0.0)
])
shape3 = pymel.curve(d=1, p=[
(-xz_circle_mid_rad, length, -xz_circle_mid_rad),
(-xz_circle_rad, length, 0.0),
(-xz_circle_mid_rad, length, xz_circle_mid_rad),
(0.0, length, xz_circle_rad),
(xz_circle_mid_rad, length, xz_circle_mid_rad),
(xz_circle_rad, length, 0.0),
(xz_circle_mid_rad, length, -xz_circle_mid_rad),
(0.0, length, -xz_circle_rad),
(-xz_circle_mid_rad, length, -xz_circle_mid_rad),
(-xz_circle_rad, length, 0.0),
(-xz_circle_rad, length, 0.0)
])
shape2.getShape().setParent(shape1, shape=True, relative=True)
shape3.getShape().setParent(shape1, shape=True, relative=True)
pymel.delete(shape2)
pymel.delete(shape3)
# Apply normal parameter
# TODO: Find a better way
need_identity = True
normal_x, normal_y, normal_z = normal
if normal_x:
if normal_x < 0:
shape1.rotateZ.set(90)
else:
shape1.rotateZ.set(-90)
elif normal_y:
if normal_y < 0:
shape1.rotateX.set(180)
else:
need_identity = False
elif normal_z:
if normal_z < 0:
shape1.rotateX.set(-90)
else:
shape1.rotateX.set(90)
if need_identity:
pymel.makeIdentity(shape1, apply=True, rotate=True)
if name:
shape1.rename(name)
# Expose the rotateOrder
shape1.rotateOrder.setKeyable(True)
return shape1
def create_shape_double_needle(normal=(0, 1, 0), *args, **kwargs):
normal_inv = (normal[0] * -1, normal[1] * -1, normal[2] * -1) # TODO: find an eleguant way
shape1 = create_shape_needle(normal=normal, *args, **kwargs)
shape2 = create_shape_needle(normal=normal_inv, *args, **kwargs)
for shape in shape2.getShapes():
shape.setParent(shape1, shape=True, relative=True)
pymel.delete(shape2)
# Expose the rotateOrder
shape1.rotateOrder.setKeyable(True)
return shape1
def create_shape_cross(size=1.0, **kwargs):
s1 = size * 0.5
s2 = size
node = pymel.curve(d=1, p=[
(0,-s1,s1),
(0,-s1,s2),
(0,s1,s2),
(0,s1,s1),
(0,s2,s1),
(0,s2,-s1),
(0,s1,-s1),
(0,s1,-s2),
(0,-s1,-s2),
(0,-s1,-s1),
(0,-s2,-s1),
(0,-s2,s1),
(0,-s1,s1)
], **kwargs)
# Expose the rotateOrder
node.rotateOrder.setKeyable(True)
return node
def create_shape_attrholder(size=1.0, **kwargs):
s1 = size
s2 = s1 * 0.7
node = pymel.curve(d=1, p=[(0,0,s1),(0,s2,s2),(0,s1,0),(0,s2,-s2),(0,0,-s1),(0,-s2,-s2),(0,-s1,0),(0,-s2,s2),(0,0,s1),(-s2,0,s2),(-s1,0,0),(-s2,s2,0),(0,s1,0),(s2,s2,0),(s1,0,0),(s2,0,-s2),(0,0,-s1),(-s2,0,-s2),(-s1,0,0),(-s2,-s2,0),(0,-s1,0),(s2,-s2,0),(s1,0,0),(s2,0,s2),(0,0,s1),(-s2,0,s2)], k=range(26), *kwargs)
# Expose the rotateOrder
node.rotateOrder.setKeyable(True)
return node
def create_shape_box(size=1.0, r=None, h=None):
if r is None:
r = size
if h is None:
h = size / 5.0
node = pymel.curve(d=1, p=[(-r, -h, r), (-r, h, r), (r, h, r), (r, -h, r), (-r, -h, r), (-r, -h, -r), (-r, h, -r), (-r, h, r), (r, h, r), (r, h, -r), (r, -h, -r), (r, -h, r), (r, -h, -r), (-r, -h, -r), (-r, h, -r), (r, h, -r)] )
# Expose the rotateOrder
node.rotateOrder.setKeyable(True)
return node
def _get_bounds_using_raycast(positions, dirs, geometries, parent_tm=None, filter=None):
min_x = max_x = min_y = max_y = min_z = max_z = None
parent_tm_inv = parent_tm.inverse()
# Ray-cast
for pos in positions:
#x = pos.x
#y = pos.y
#z = pos.z
# Expand bounds using starting positions.
pos_local = pymel.datatypes.Point(pos) * parent_tm_inv if parent_tm is not None else pos
x_local = pos_local.x
y_local = pos_local.y
z_local = pos_local.z
if min_x is None or x_local < min_x:
min_x = x_local
if max_x is None or x_local > max_x:
max_x = x_local
if min_y is None or y_local < min_y:
min_y = y_local
if max_y is None or y_local > max_y:
max_y = y_local
if min_z is None or z_local < min_z:
min_z = z_local
if max_z is None or z_local > max_z:
max_z = z_local
for dir in dirs:
ray_cast_pos = libRigging.ray_cast_nearest(pos, dir, geometries, debug=False)
if ray_cast_pos is None:
continue
if parent_tm is not None:
ray_cast_pos = ray_cast_pos * parent_tm_inv
x = ray_cast_pos.x
y = ray_cast_pos.y
z = ray_cast_pos.z
if min_x is None or x < min_x:
min_x = x
if max_x is None or x > max_x:
max_x = x
if min_y is None or y < min_y:
min_y = y
if max_y is None or y > max_y:
max_y = y
if min_z is None or z < min_z:
min_z = z
if max_z is None or z > max_z:
max_z = z
return min_x, max_x, min_y, max_y, min_z, max_z
def create_shape_box_arm(refs, geometries, epsilon=0.01, default_size=1.0):
# TODO: Prevent crashes when there's no geometries
ref = next(iter(refs))
ref_tm = ref.getMatrix(worldSpace=True)
positions = [r.getTranslation(space='world') for r in refs]
# Resolve raycast directions
dir_offset_tm = pymel.datatypes.Matrix( # Remove translation from ref_tm to keep direction normalized.
ref_tm.a00, ref_tm.a01, ref_tm.a02, ref_tm.a03,
ref_tm.a10, ref_tm.a11, ref_tm.a12, ref_tm.a13,
ref_tm.a20, ref_tm.a21, ref_tm.a22, ref_tm.a23,
0, 0, 0, 1
)
x_pos = ref.getTranslation(space='world').x
dirs = [
OpenMaya.MPoint(0,-1,0) * dir_offset_tm,
OpenMaya.MPoint(0,1,0) * dir_offset_tm,
OpenMaya.MPoint(0,0,-1) * dir_offset_tm,
OpenMaya.MPoint(0,0,1) * dir_offset_tm
]
# HACK : Check the x_position to know in which direction we need to do the raycast
if x_pos >= 0.0:
dirs.append(
OpenMaya.MPoint(1,0,0) * dir_offset_tm,
)
else:
dirs.append(
OpenMaya.MPoint(-1,0,0) * dir_offset_tm,
)
min_x, max_x, min_y, max_y, min_z, max_z = _get_bounds_using_raycast(positions, dirs, geometries, parent_tm=ref_tm)
# Ensure a minimum size for the ctrl
if (max_x - min_x) < epsilon:
max_x = default_size
if (max_y - min_y) < epsilon:
min_y = -default_size * 0.5
max_y = default_size * 0.5
if (max_z - min_z) < epsilon:
min_z = -default_size * 0.5
max_z = default_size * 0.5
# Convert our bouding box
#min_x = 0
pos1 = pymel.datatypes.Point(min_x, min_y, min_z)
pos2 = pymel.datatypes.Point(min_x, min_y, max_z)
pos3 = pymel.datatypes.Point(min_x, max_y, min_z)
pos4 = pymel.datatypes.Point(min_x, max_y, max_z)
pos5 = pymel.datatypes.Point(max_x, min_y, min_z)
pos6 = pymel.datatypes.Point(max_x, min_y, max_z)
pos7 = pymel.datatypes.Point(max_x, max_y, min_z)
pos8 = pymel.datatypes.Point(max_x, max_y, max_z)
node = pymel.curve(d=1, p=[pos2, pos4, pos8, pos6, pos2, pos1, pos3, pos4, pos8, pos7, pos5, pos6, pos5, pos1, pos3, pos7] )
# Expose the rotateOrder
node.rotateOrder.setKeyable(True)
return node
def create_shape_box_feet(refs, geometries, *args, **kwargs):
ref = next(iter(refs))
ref_pos = ref.getTranslation(space='world')
ref_tm = pymel.datatypes.Matrix(
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
ref_pos.x, ref_pos.y, ref_pos.z, 1
)
positions = [ref.getTranslation(space='world') for ref in refs]
dirs = [
OpenMaya.MVector(-1,0,0),
OpenMaya.MVector(1,0,0),
OpenMaya.MVector(0,0,-1),
OpenMaya.MVector(0,0,1)
]
# Sanity check, ensure that at least one point is in the bounds of geometries.
# This can prevent rays from being fired from outside a geometry.
# TODO: Make it more robust.
filtered_geometries = []
for geometry in geometries:
xmin, ymin, zmin, xmax, ymax, zmax = cmds.exactWorldBoundingBox(geometry.__melobject__())
bound = pymel.datatypes.BoundingBox((xmin, ymin, zmin), (xmax, ymax, zmax))
if any(True for pos in positions if bound.contains(pos)):
filtered_geometries.append(geometry)
# Using all provided objects
min_x, max_x, min_y, max_y, min_z, max_z = _get_bounds_using_raycast(positions, dirs, filtered_geometries, parent_tm=ref_tm)
min_y = min(min_y, - ref_pos.y)
# If no geometry was provided, there won't be any width in the returned values.
if not geometries:
length = max_z - min_z
desired_width = length * 0.25
min_x = -desired_width
max_x = desired_width
pos1 = pymel.datatypes.Point(min_x, min_y, min_z)
pos2 = pymel.datatypes.Point(min_x, min_y, max_z)
pos3 = pymel.datatypes.Point(min_x, max_y, min_z)
pos4 = pymel.datatypes.Point(min_x, max_y, max_z)
pos5 = pymel.datatypes.Point(max_x, min_y, min_z)
pos6 = pymel.datatypes.Point(max_x, min_y, max_z)
pos7 = pymel.datatypes.Point(max_x, max_y, min_z)
pos8 = pymel.datatypes.Point(max_x, max_y, max_z)
# HACK: Convert to local space...
'''
ref = next(iter(refs))
pos = ref.getTranslation(space='world')
pos1 -= pos
pos2 -= pos
pos3 -= pos
pos4 -= pos
pos5 -= pos
pos6 -= pos
pos7 -= pos
pos8 -= pos
'''
node = pymel.curve(d=1, p=[pos2, pos4, pos8, pos6, pos2, pos1, pos3, pos4, pos8, pos7, pos5, pos6, pos5, pos1, pos3, pos7] )
# Expose the rotateOrder
node.rotateOrder.setKeyable(True)
return node
def create_square(size=1.0, width=None, height=None, **kwargs):
if width is None:
width = 1.0
if height is None:
height = 1.0
width *= size
height *= size
pos1 = pymel.datatypes.Point(-height, -width, 0)
pos2 = pymel.datatypes.Point(-height, width, 0)
pos3 = pymel.datatypes.Point(height, width, 0)
pos4 = pymel.datatypes.Point(height, -width, 0)
pos5 = pymel.datatypes.Point(-height, -width, 0)
node = pymel.curve(d=1, p=[pos1, pos2, pos3, pos4, pos5] )
# Expose the rotateOrder
node.rotateOrder.setKeyable(True)
return node
def create_triangle_upp():
p1 = [0, 0.577, 0]
p2 = [-0.5, -0.288, | |
import os
import torch
import torchvision
import torch.nn as nn
from SCNN import SCNN
from PIL import Image
from scipy import stats
import random
import torch.nn.functional as F
import numpy as np
import time
import scipy.io
import itertools
from torch.optim import lr_scheduler
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def accimage_loader(path):
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def default_loader(path):
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader(path)
else:
return pil_loader(path)
def weight_init(net):
for m in net.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight.data,nonlinearity='relu')
# m.bias.data.zero_()
elif isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight.data,nonlinearity='relu')
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class DBCNN(torch.nn.Module):
def __init__(self, options):
"""Declare all needed layers."""
nn.Module.__init__(self)
# self.features1 = torchvision.models.resnet34(pretrained=True)
self.features1 = torchvision.models.resnet50(pretrained=True)
# weight_init(self.features1)
# Global pooling
self.pooling = nn.AdaptiveAvgPool2d(1)
# Linear classifier.
self.fc = torch.nn.Linear(2048, 1)
if options['fc'] == True:
# Freeze all previous layers.
for param in self.features1.parameters():
param.requires_grad = False
# Initialize the fc layers.
nn.init.kaiming_normal_(self.fc.weight.data)
if self.fc.bias is not None:
nn.init.constant_(self.fc.bias.data, val=0)
def forward(self, X):
"""Forward pass of the network.
"""
N = X.size()[0]
X1 = self.features1.conv1(X)
X1 = self.features1.bn1(X1)
X1 = self.features1.relu(X1)
X1 = self.features1.maxpool(X1)
X1 = self.features1.layer1(X1)
X1 = self.features1.layer2(X1)
X1 = self.features1.layer3(X1)
X1 = self.features1.layer4(X1)
H = X1.size()[2]
W = X1.size()[3]
assert X1.size()[1] == 2048
X1 = self.pooling(X1)
assert X1.size() == (N, 2048, 1, 1)
X1 = X1.view(N, 2048)
X = self.fc(X1)
assert X.size() == (N, 1)
return X
class DBCNNManager(object):
def __init__(self, options, path):
"""Prepare the network, criterion, solver, and data.
Args:
options, dict: Hyperparameters.
"""
print('Prepare the network and data.')
self._options = options
self._path = path
# Network.
self._net = torch.nn.DataParallel(DBCNN(self._options), device_ids=[0]).cuda()
if self._options['fc'] == False:
self._net.load_state_dict(torch.load(path['fc_root']))
print(self._net)
# Criterion.
self._criterion = torch.nn.MSELoss().cuda()
# Solver.
if self._options['fc'] == True:
self._solver = torch.optim.SGD(
self._net.module.fc.parameters(), lr=self._options['base_lr'],
momentum=0.9, weight_decay=self._options['weight_decay'])
else:
self._solver = torch.optim.Adam(
self._net.module.parameters(), lr=self._options['base_lr'],
weight_decay=self._options['weight_decay'])
if (self._options['dataset'] == 'live') | (self._options['dataset'] == 'livec'):
if self._options['dataset'] == 'live':
crop_size = 432
else:
crop_size = 448
train_transforms = torchvision.transforms.Compose([
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.RandomCrop(size=crop_size),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))
])
# if (self._options['dataset'] == 'live'):
# if self._options['dataset'] == 'live':
# crop_size = 432
# else:
# crop_size = 448
# train_transforms = torchvision.transforms.Compose([
# torchvision.transforms.RandomHorizontalFlip(),
# torchvision.transforms.RandomCrop(size=crop_size),
# torchvision.transforms.ToTensor(),
# torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
# std=(0.229, 0.224, 0.225))
# ])
# elif (self._options['dataset'] == 'livec'):
# train_transforms = torchvision.transforms.Compose([
# torchvision.transforms.RandomHorizontalFlip(),
# torchvision.transforms.ToTensor(),
# torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
# std=(0.229, 0.224, 0.225))
# ])
elif (self._options['dataset'] == 'csiq') | (self._options['dataset'] == 'tid2013'):
train_transforms = torchvision.transforms.Compose([
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))
])
elif self._options['dataset'] == 'mlive':
train_transforms = torchvision.transforms.Compose([
torchvision.transforms.Resize((570, 960)),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))
])
elif self._options['dataset'] == 'livecp':
train_transforms = torchvision.transforms.Compose([
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))
])
elif self._options['dataset'] == 'koniq10k':
train_transforms = torchvision.transforms.Compose([
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))
])
elif self._options['dataset'] == 'kadid10k':
train_transforms = torchvision.transforms.Compose([
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))
])
elif self._options['dataset'] == 'kadis700k':
train_transforms = torchvision.transforms.Compose([
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))
])
elif self._options['dataset'] == 'selftrain':
train_transforms = torchvision.transforms.Compose([
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.RandomCrop(size=500),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))
# torchvision.transforms.Resize((384, 512)),
# torchvision.transforms.RandomHorizontalFlip(),
# torchvision.transforms.ToTensor(),
# torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
# std=(0.229, 0.224, 0.225))
])
test_transforms = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))
])
if self._options['dataset'] == 'live':
import LIVEFolder
train_data = LIVEFolder.LIVEFolder(
root=self._path['live'], loader=default_loader, index=self._options['train_index'],
transform=train_transforms)
test_data = LIVEFolder.LIVEFolder(
root=self._path['live'], loader=default_loader, index=self._options['test_index'],
transform=test_transforms)
elif self._options['dataset'] == 'csiq':
import CSIQFolder
train_data = CSIQFolder.CSIQFolder(
root=self._path['csiq'], loader=default_loader, index=self._options['train_index'],
transform=train_transforms)
test_data = CSIQFolder.CSIQFolder(
root=self._path['csiq'], loader=default_loader, index=self._options['test_index'],
transform=test_transforms)
# elif self._options['dataset'] == 'livec':
# import LIVEChallengeFolder2
# train_data = LIVEChallengeFolder2.Koniq10kFolder(
# root=self._path['koniq10k'], loader=default_loader, index=self._options['train_index'],
# transform=train_transforms)
# test_data = LIVEChallengeFolder2.LIVECompressedFolder2(
# root=self._path['livec'], loader=default_loader, index=self._options['test_index'],
# transform=test_transforms)
elif self._options['dataset'] == 'livec':
import LIVEChallengeFolder
train_data = LIVEChallengeFolder.LIVEChallengeFolder(
root=self._path['livec'], loader=default_loader, index=self._options['train_index'],
transform=train_transforms)
test_data = LIVEChallengeFolder.LIVEChallengeFolder(
root=self._path['livec'], loader=default_loader, index=self._options['test_index'],
transform=test_transforms)
elif self._options['dataset'] == 'livecp':
import LIVECompressedFolder
train_data = LIVECompressedFolder.LIVECompressedFolder(
root=self._path['livecp'], loader=default_loader, index=self._options['train_index'],
transform=train_transforms)
test_data = LIVECompressedFolder.LIVECompressedFolder(
root=self._path['livecp'], loader=default_loader, index=self._options['test_index'],
transform=test_transforms)
elif self._options['dataset'] == 'koniq10k':
import Koniq10kFolder
train_data = Koniq10kFolder.Koniq10kFolder(
root=self._path['koniq10k'], loader=default_loader, index=self._options['train_index'],
transform=train_transforms)
test_data = Koniq10kFolder.Koniq10kFolder(
root=self._path['koniq10k'], loader=default_loader, index=self._options['test_index'],
transform=test_transforms)
elif self._options['dataset'] == 'kadid10k':
import Kadid10kFolder
train_data = Kadid10kFolder.Kadid10kFolder(
root=self._path['kadid10k'], loader=default_loader, index=self._options['train_index'],
transform=train_transforms)
test_data = Kadid10kFolder.Kadid10kFolder(
root=self._path['kadid10k'], loader=default_loader, index=self._options['test_index'],
transform=test_transforms)
elif self._options['dataset'] == 'kadis700k':
import Kadis700kFolder
train_data = Kadis700kFolder.Kadis700kFolder(
root=self._path['kadis700k'], loader=default_loader, index=self._options['train_index'],
transform=train_transforms)
test_data = Kadis700kFolder.Kadis700kFolder(
root=self._path['kadis700k'], loader=default_loader, index=self._options['test_index'],
transform=test_transforms)
elif self._options['dataset'] == 'selftrain':
import SelfTrainFolder
# train_data = SelfTrainFolder.Kadid10kFolder(
# root=self._path['kadid10k'], loader = default_loader, index = self._options['train_index'],
# transform=train_transforms)
train_data = SelfTrainFolder.LIVEChallengeFolder(
root=self._path['livecp'], loader=default_loader, index=self._options['train_index'],
transform=train_transforms)
# root = self._path['koniq10k'], loader = default_loader, index = self._options['train_index2'],
# transform = train_transforms)
# test_data = SelfTrainFolder.Kadid10kFolder(
# root=self._path['kadid10k'], loader = default_loader, index = self._options['test_index'],
# transform=test_transforms)
test_data = SelfTrainFolder.LIVECompressed2Folder(
root=self._path['livecp'], loader=default_loader, index=self._options['test_index'],
transform=test_transforms)
# test_data2 = SelfTrainFolder.Koniq10kFolder(
# root=self._path['koniq10k'], loader = default_loader, index = self._options['test_index2'],
# transform=test_transforms)
else:
raise AttributeError('Only support LIVE and LIVEC right now!')
self._train_loader = torch.utils.data.DataLoader(
train_data, batch_size=self._options['batch_size'],
shuffle=True, num_workers=0, pin_memory=True)
self._test_loader = torch.utils.data.DataLoader(
test_data, batch_size=1,
shuffle=False, num_workers=0, pin_memory=True)
self.scheduler = lr_scheduler.StepLR(self._solver,
last_epoch=-1,
step_size=2,
gamma=0.1)
def train(self, iteration):
"""Train the network."""
print('Training.')
best_srcc = 0.0
best_plcc = 0.0
best_epoch = None
print('Epoch\tTrain loss\tTrain_SRCC\tTest_SRCC\tTest_PLCC')
for t in range(self._options['epochs']):
time_start = time.time()
epoch_loss = []
pscores = []
tscores = []
num_total = 0
for X, y in self._train_loader:
# Data.
X = torch.tensor(X.cuda())
y = torch.tensor(y.cuda())
# Clear the existing gradients.
self._solver.zero_grad()
# Forward pass.
score = self._net(X)
loss = self._criterion(score, y.view(len(score), 1).detach())
epoch_loss.append(loss.item())
# Prediction.
num_total += y.size(0)
pscores = pscores + score.cpu().tolist()
tscores = tscores + y.cpu().tolist()
# Backward pass.
loss.backward()
self._solver.step()
train_srcc, _ = stats.spearmanr(pscores, tscores)
test_srcc, test_plcc = self._consitency(self._test_loader)
time_end = time.time()
print('%d epoch done; total time = %f sec' % ((t + 1), (time_end - time_start)))
if test_srcc > best_srcc:
best_srcc = test_srcc
best_plcc = test_plcc
best_epoch = t + 1
print('*', end='')
pwd = os.getcwd()
if self._options['fc'] == True:
modelpath = os.path.join(pwd, 'fc_models', ('net_params' + '_best' + '.pkl'))
else:
modelpath = os.path.join(pwd, 'db_models', ('net_params' + '_best' + '.pkl'))
torch.save(self._net.state_dict(), modelpath)
print('%d\t%4.10f\t%4.3f\t\t%4.4f\t\t%4.4f\t%4.4f' %
(t + 1, self._solver.param_groups[0]['lr'], sum(epoch_loss) / len(epoch_loss), train_srcc, test_srcc, test_plcc))
if self._options['fc'] != True:
self.scheduler.step()
print('Best at epoch %d, test srcc %f' % (best_epoch, best_srcc))
return best_srcc, best_plcc
def _consitency(self, data_loader):
self._net.train(False)
num_total = 0
pscores = []
tscores = []
for X, y in data_loader:
# Data.
X = torch.tensor(X.cuda())
y = torch.tensor(y.cuda())
# Prediction.
score = self._net(X)
pscores = pscores + score[0].cpu().tolist()
tscores = tscores + y.cpu().tolist()
num_total += y.size(0)
test_srcc, _ = stats.spearmanr(pscores, tscores)
tscores = torch.Tensor(tscores).reshape(-1).tolist() # live compressed
test_plcc, _ = stats.pearsonr(pscores, tscores)
self._net.train(True) # Set the model to training phase
return test_srcc, test_plcc
def main():
"""The main function."""
import argparse
parser = argparse.ArgumentParser(
description='Train DB-CNN for BIQA.')
parser.add_argument('--base_lr', dest='base_lr', type=float, default=1e-5,
help='Base learning rate for training.')
parser.add_argument('--batch_size', dest='batch_size', type=int,
default=32, help='Batch size.')
parser.add_argument('--epochs', dest='epochs', type=int,
default=30, help='Epochs for training.')
parser.add_argument('--weight_decay', dest='weight_decay', type=float,
default=5e-4, help='Weight decay.')
parser.add_argument('--dataset', dest='dataset', type=str, default='kadis700k',
help='dataset: live|csiq|tid2013|livec|mlive|livecp|koniq10k|kadid10k|selftrain|kadis700k')
args = parser.parse_args()
if args.base_lr <= 0:
raise AttributeError('--base_lr parameter must >0.')
if args.batch_size <= 0:
raise AttributeError('--batch_size parameter must >0.')
if args.epochs < 0:
raise AttributeError('--epochs parameter must >=0.')
if args.weight_decay <= 0:
raise AttributeError('--weight_decay parameter must >0.')
options = {
'base_lr': args.base_lr,
'batch_size': args.batch_size,
'epochs': args.epochs,
'weight_decay': args.weight_decay,
'dataset': args.dataset,
'fc': [],
'train_index': [],
'test_index': []
}
path = {
'live': os.path.join('dataset', 'F:\dataset\databaserelease2'),
'csiq': os.path.join('dataset', 'S:\dataset\CSIQ'),
'tid2013': os.path.join('dataset', 'TID2013'),
# 'livec': os.path.join('dataset', 'F:\\dataset\\ChallengeDB_release\\Images'),
'livec': os.path.join('dataset', 'F:\\dataset\\ChallengeDB_release'),
'mlive': os.path.join('dataset', 'LIVEmultidistortiondatabase'),
'livecp': os.path.join('dataset', 'F:\\dataset\\LIVECompressed2'), # F:\\dataset\\LIVECompressed2\\level63
'koniq10k': os.path.join('dataset', 'F:\\dataset\\koniq_10k\\author\\koniq10k_512x384'), # 'F:\\dataset\\LIVECompressed2\\Koniq10k_JPEG_distorted_images'),
'kadid10k': os.path.join('dataset', 'F:\\dataset\\KADID-10k\\kadid10k'),
'selftrain': os.path.join('dataset', 'F:\\dataset\\KADID-10k\\kadid10k'),
'kadis700k': os.path.join('dataset', '/mnt/sda2/New/kadis700k/kadis700k'),
'fc_model': os.path.join('fc_models'),
'scnn_root': os.path.join('pretrained_scnn', 'net_params18.pkl'),
'fc_root': os.path.join('fc_models', 'net_params_best.pkl'),
'db_model': os.path.join('db_models'),
'db_root': os.path.join('db_models', 'net_params_best.pkl')
}
if options['dataset'] == 'live':
index = list(range(0, 29))
elif options['dataset'] == 'csiq':
index = list(range(0, 30))
elif options['dataset'] == 'tid2013':
index = list(range(0, 25))
elif options['dataset'] == 'mlive':
index = list(range(0, 15))
# elif options['dataset'] == 'livec':
# index = list(range(0, 10073))
# index_test = | |
: '2017-01-30T22:01:21Z',
## 'state' : 'active'
## }
api.update_conference_member(my_conf_id, my_member_id, mute=True, hold=True)
my_conf = api.get_conference_member(my_member_id)
## {
## 'addedTime': '2017-01-30T22:01:11Z',
## 'call' : 'https://api.catapult.inetwork.com/v1/users/u-abc123/calls/c-callId',
## 'hold' : True,
## 'id' : 'member-memberId',
## 'joinTone' : True,
## 'leavingTone' : False,
## 'mute' : True,
## 'removedTime' : '2017-01-30T22:01:21Z',
## 'state' : 'active'
## }
"""
kwargs['joinTone'] = join_tone
kwargs['leavingTone'] = leaving_tone
kwargs['mute'] = mute
kwargs['hold'] = hold
path = '/users/%s/conferences/%s/members/%s' % (
self.user_id, conference_id, member_id)
self._make_request('post', path, json=kwargs)
def play_audio_to_conference_member(self,
conference_id,
member_id,
file_url=None,
sentence=None,
gender=None,
locale=None,
voice=None,
loop_enabled=None,
**kwargs):
"""
Play audio to a conference member
:param str conference_id: id of a conference
:param str member_id: id of a conference member
:param str file_url: The location of an audio file to play (WAV and MP3 supported).
:param str sentence: The sentence to speak.
:param str gender: The gender of the voice used to synthesize the sentence.
:param str locale: The locale used to get the accent of the voice used to synthesize the sentence.
:param str voice: The voice to speak the sentence.
:param str loop_enabled: When value is true, the audio will keep playing in a loop.
Example: Play audio to specific conference member::
api.play_audio_to_conference_member('conferenceId', 'memberId', fileUrl=http://host/path/file.mp3)
api.play_audio_to_conference_member('conferenceId', 'memberId',
sentence='Press 0 to complete call', gender='female')
# or with extension methods
api.play_audio_file_to_conference_member('conferenceId', 'memberId', 'http://host/path/file.mp3')
api.speak_sentence_to_conference_member('conferenceId', 'memberId', 'Hello')
"""
kwargs['fileUrl'] = file_url
kwargs['sentence'] = sentence
kwargs['gender'] = gender
kwargs['locale'] = locale
kwargs['voice'] = voice
kwargs['loopEnabled'] = loop_enabled
path = '/users/%s/conferences/%s/members/%s/audio' % (
self.user_id, conference_id, member_id)
self._make_request('post', path, json=kwargs)
# extensions
def speak_sentence_to_conference_member(self,
conference_id,
member_id,
sentence,
gender='female',
voice='susan',
locale='en_US',
tag=None):
"""
Speak sentence to a conference member
:type conference_id: str
:param conference_id: id of a conference
:type member_id: str
:param member_id: id of a conference member
:type sentence: str
:param sentence: sentence to say
:type gender: str
:param gender: gender of voice
:type voice: str
:param voice: voice name
:type locale: str
:param locale: locale name
:type tag: str
:param tag: A string that will be included in the callback events of the call.
Example: Speak sentence to specific conference member::
api.speak_sentence_to_conference_member('conferenceId', 'memberId', 'Hello')
"""
self.play_audio_to_conference_member(conference_id, member_id,
sentence=sentence,
gender=gender,
voice=voice,
locale=locale,
tag=tag
)
def play_audio_file_to_conference_member(self, conference_id, member_id, file_url, tag=None):
"""
Play audio file to a conference member
:type conference_id: str
:param conference_id: id of a conference
:type member_id: str
:param member_id: id of a conference member
:type file_url: str
:param file_url: URL to remote file to play
:type tag: str
:param tag: A string that will be included in the callback events of the call.
Example: Play an audio file to specific member::
api.play_audio_file_to_conference_member('conferenceId', 'memberId', 'http://host/path/file.mp3')
"""
self.play_audio_to_conference_member(conference_id, member_id,
file_url=file_url,
tag=tag
)
def remove_conference_member(self, conference_id, member_id):
"""
Remove a conference member
:type conference_id: str
:param conference_id: id of a conference
:type member_id: str
:param member_id: id of a conference member
Example: Remove Member from Conference::
my_conf = api.get_conference('conferenceId')
my_conf_members = list(api.list_conference_members(my_conf['id']))
print(my_conf_members)
## [{ 'addedTime' : '2017-01-30T23:17:11Z',
## 'call' : 'https://api.catapult.inetwork.com/v1/users/u-abc123/calls/c-callId',
## 'hold' : False,
## 'id' : 'member-memberId',
## 'joinTone' : False,
## 'leavingTone': False,
## 'mute' : False,
## 'state' : 'active'},
## { 'addedTime' : '2017-01-30T23:17:14Z',
## 'call' : 'https://api.catapult.inetwork.com/v1/users/u-abc123/calls/c-callId2',
## 'hold' : False,
## 'id' : 'member-memberId2',
## 'joinTone' : False,
## 'leavingTone': False,
## 'mute' : False,
## 'state' : 'active'}]
api.remove_conference_member(my_conf['id'], my_conf_members[1]['id'])
my_conf_members = list(api.list_conference_members(my_conf['id']))
print(my_conf_members)
## [{ 'addedTime' : '2017-01-30T23:17:11Z',
## 'call' : 'https://api.catapult.inetwork.com/v1/users/u-abc123/calls/c-callId',
## 'hold' : False,
## 'id' : 'member-memberId',
## 'joinTone' : False,
## 'leavingTone': False,
## 'mute' : False,
## 'state' : 'active'},
## { 'addedTime' : '2017-01-30T23:17:14Z',
## 'call' : 'https://api.catapult.inetwork.com/v1/users/u-abc123/calls/c-callId2',
## 'hold' : False,
## 'id' : 'member-memberId2',
## 'joinTone' : False,
## 'leavingTone': False,
## 'mute' : False,
## 'state' : 'completed'}]
"""
self.update_conference_member(
conference_id, member_id, state='completed')
def hold_conference_member(self, conference_id, member_id, hold):
"""
Hold or unhold a conference member
:type conference_id: str
:param conference_id: id of a conference
:type member_id: str
:param member_id: id of a conference member
:type hold: bool
:param hold: hold (if true) or unhold (if false) a member
Example: Put specific conference member on hold::
api.hold_conference_member('conferenceId', 'memberId', True)
"""
self.update_conference_member(conference_id, member_id, hold=hold)
def mute_conference_member(self, conference_id, member_id, mute):
"""
Mute or unmute a conference member
:type conference_id: str
:param conference_id: id of a conference
:type member_id: str
:param member_id: id of a conference member
:type mute: bool
:param mute: mute (if true) or unmute (if false) a member
Example: Mute specific conference member::
api.mute_conference_member('conferenceId', 'memberId', True)
"""
self.update_conference_member(conference_id, member_id, mute=mute)
def terminate_conference(self, conference_id):
"""
Terminate of current conference
:type conference_id: str
:param conference_id: id of a conference
Example: End the Conference::
api.terminate_conference('conferenceId')
"""
self.update_conference(conference_id, state='completed')
def hold_conference(self, conference_id, hold):
"""
Hold or unhold a conference
:type conference_id: str
:param conference_id: id of a conference
:type hold: bool
:param hold: hold (if true) or unhold (if false) a conference
Example: Put entire confernce on hold, where no one can hear::
api.hold_conference('conferenceId', True)
"""
self.update_conference(conference_id, hold=hold)
def mute_conference(self, conference_id, mute):
"""
Mute or unmute a conference
:type conference_id: str
:param conference_id: id of a conference
:type mute: bool
:param mute: mute (if true) or unmute (if false) a conference
Example: Mute the entire Conference, where no one can speak::
api.mute_conference('conferenceId', True)
"""
self.update_conference(conference_id, mute=mute)
def list_domains(self, size=None, **kwargs):
"""
Get a list of domains
:param int size: Used for pagination to indicate the size of each page requested for querying a list of items. \
If no value is specified the default value is 25. (Maximum value 100)
:rtype: types.GeneratorType
:returns: list of domains
Example: Fetch domains and print::
domain_list = api.list_domains(size=10)
print(list(domain_list))
## [{ 'endpointsUrl': 'https://api.catapult.inetwork.com/v1/users/u-abc123/domains/endpoints',
## 'id' : 'rd-domainId',
## 'name' : 'siplearn1'},
## { 'endpointsUrl' : 'https://api.catapult.inetwork.com/v1/users/u-abc123/domains/endpoints',
## 'id' : 'rd-domainId2',
## 'name' : 'siplearn2'}]
Example: Search for domain based on name::
domain_list = api.list_domains(size=100)
domain_name = ''
while domain_name != 'My Prod Site':
my_domain = next(domain_list)
domain_name = my_domain['name']
print(my_domain)
## { 'description' : 'Python Docs Example',
## 'endpointsUrl': 'https://api.catapult.inetwork.com/v1/users/u-abc123/domains/rd-domainId/endpoints',
## 'id' : 'rd-domainId',
## 'name' : 'My Prod Site'}
"""
kwargs['size'] = size
path = '/users/%s/domains' % self.user_id
return get_lazy_enumerator(self, lambda: self._make_request('get', path, params=kwargs))
def create_domain(self, name, description=None, **kwargs):
"""
Create a domain
:param str name: The name is a unique URI to be used in DNS lookups
:param str description: String to describe the domain
:rtype: str
:returns: id of created domain
Example: Create Domain::
domain_id = api.create_domain(name='qwerty', description='Python Docs Example')
print(domain_id)
# rd-domainId
"""
kwargs['name'] = name
kwargs['description'] = description
return self._make_request('post', '/users/%s/domains' % self.user_id, json=kwargs)[2]
def get_domain(self, domain_id):
"""
Get information about a domain
:type domain_id: str
:param domain_id: id of the domain
:rtype: dict
:returns: domain information
Example: Create then fetch domain::
domain_id = api.create_domain(name='qwerty', description='Python Docs Example')
print(domain_id)
# rd-domainId
my_domain = api.get_domain(domain_id)
print(my_domain)
## { 'description' : 'Python Docs Example',
## 'endpointsUrl': 'https://api.catapult.inetwork.com/v1/users/u-abc123/domains/rd-domainId/endpoints',
## 'id' : 'rd-domainId',
## 'name' : 'qwerty'}
"""
return self._make_request('get', '/users/%s/domains/%s' % (self.user_id, domain_id))[0]
def delete_domain(self, domain_id):
"""
Delete a domain
:type domain_id: str
:param domain_id: id of a domain
Example: Delete domain 'domainId'::
api.delete_domain('domainId')
"""
self._make_request('delete', '/users/%s/domains/%s' %
(self.user_id, domain_id))
def list_domain_endpoints(self, domain_id, size=None, **kwargs):
"""
Get a list of domain's endpoints
:type domain_id: str
:param domain_id: id of a domain
:param int size: Used for pagination to indicate the size of each page requested for querying a list of items.\
If no value is specified the default value is 25. (Maximum value 1000)
:rtype: types.GeneratorType
:returns: list of endpoints
Example: List and iterate over::
endpoint_list = api.list_domain_endpoints('rd-domainId', size=1000)
for endpoint in endpoint_list:
print(endpoint['id'])
##re-endpointId1
##re-endpointId2
Example: List and print all::
endpoint_list = api.list_domain_endpoints('rd-domainId', size=1000)
print(list(endpoint_list))
## [
## {
## 'applicationId':'a-appId',
## 'credentials' :{
## 'realm' :'creds.bwapp.bwsip.io',
## 'username' :'user1'
## },
## 'description' :"Your SIP Account",
## 'domainId' :'rd-domainId',
## 'enabled' :True,
## 'id' :'re-endpointId1',
## 'name' :'User1_endpoint',
## 'sipUri' :'sip:<EMAIL>.bwsip.io'
## },
## {
## 'applicationId':'a-appId',
## 'credentials' | |
80.754),
))
self.assertEqual(
testee,
AIAircraftShotdownByObjectEvent.from_primitive(testee.to_primitive()),
)
class AIAircraftShotdownByStationaryUnitEventTestCase(unittest.TestCase):
def test_derives_from_ShootdownEvent(self):
self.assertTrue(issubclass(AIAircraftShotdownByStationaryUnitEvent, ShotdownEvent))
def test_is_registered(self):
self.assertEqual(
registry.get_class_by_name("AIAircraftShotdownByStationaryUnitEvent"),
AIAircraftShotdownByStationaryUnitEvent,
)
def test_to_primitive(self):
testee = AIAircraftShotdownByStationaryUnitEvent(AIAircraftShotdownByStationaryUnitInfo(
timestamp=datetime.datetime(2020, 12, 31, 23, 45, 59),
target=AIAircraftActor(
id="r0100",
flight_index=0,
),
attacker=StationaryUnitActor(
id='8165_Static',
),
pos=Point3D(71903.14, 41619.023, 80.754),
))
self.assertEqual(testee.to_primitive(), {
'category': 'shotdown',
'name': 'AIAircraftShotdownByStationaryUnitEvent',
'verbose_name': 'AI aircraft shot down by stationary unit',
'help_text': None,
'data': {
'timestamp': '2020-12-31T23:45:59',
'target': {
'id': 'r0100',
'flight_index': 0,
},
'attacker': {
'id': '8165_Static',
},
'pos': {'x': 71903.14, 'y': 41619.023, 'z': 80.754},
},
})
def test_from_primitive(self):
testee = AIAircraftShotdownByStationaryUnitEvent(AIAircraftShotdownByStationaryUnitInfo(
timestamp=datetime.datetime(2020, 12, 31, 23, 45, 59),
target=AIAircraftActor(
id="r0100",
flight_index=0,
),
attacker=StationaryUnitActor(
id='8165_Static',
),
pos=Point3D(71903.14, 41619.023, 80.754),
))
self.assertEqual(
testee,
AIAircraftShotdownByStationaryUnitEvent.from_primitive(testee.to_primitive()),
)
class AIAircraftShotdownByTreeEventTestCase(unittest.TestCase):
def test_derives_from_ShootdownEvent(self):
self.assertTrue(issubclass(AIAircraftShotdownByTreeEvent, ShotdownEvent))
def test_is_registered(self):
self.assertEqual(
registry.get_class_by_name("AIAircraftShotdownByTreeEvent"),
AIAircraftShotdownByTreeEvent,
)
def test_to_primitive(self):
testee = AIAircraftShotdownByTreeEvent(AIAircraftShotdownByTreeInfo(
timestamp=datetime.datetime(2020, 12, 31, 23, 45, 59),
target=AIAircraftActor(
id="r0100",
flight_index=0,
),
pos=Point3D(71903.14, 41619.023, 80.754),
))
self.assertEqual(testee.to_primitive(), {
'category': 'shotdown',
'name': 'AIAircraftShotdownByTreeEvent',
'verbose_name': 'AI aircraft shot down by tree',
'help_text': None,
'data': {
'timestamp': '2020-12-31T23:45:59',
'target': {
'id': 'r0100',
'flight_index': 0,
},
'pos': {'x': 71903.14, 'y': 41619.023, 'z': 80.754},
},
})
def test_from_primitive(self):
testee = AIAircraftShotdownByTreeEvent(AIAircraftShotdownByTreeInfo(
timestamp=datetime.datetime(2020, 12, 31, 23, 45, 59),
target=AIAircraftActor(
id="r0100",
flight_index=0,
),
pos=Point3D(71903.14, 41619.023, 80.754),
))
self.assertEqual(
testee,
AIAircraftShotdownByTreeEvent.from_primitive(testee.to_primitive()),
)
class AIAircraftShotdownByParatrooperEventTestCase(unittest.TestCase):
def test_derives_from_ShootdownEvent(self):
self.assertTrue(issubclass(AIAircraftShotdownByParatrooperEvent, ShotdownEvent))
def test_is_registered(self):
self.assertEqual(
registry.get_class_by_name("AIAircraftShotdownByParatrooperEvent"),
AIAircraftShotdownByParatrooperEvent,
)
def test_to_primitive(self):
testee = AIAircraftShotdownByParatrooperEvent(AIAircraftShotdownByParatrooperInfo(
timestamp=datetime.datetime(2020, 12, 31, 23, 45, 59),
target=AIAircraftActor(
id="r0100",
flight_index=0,
),
pos=Point3D(71903.14, 41619.023, 80.754),
))
self.assertEqual(testee.to_primitive(), {
'category': 'shotdown',
'name': 'AIAircraftShotdownByParatrooperEvent',
'verbose_name': 'AI aircraft shot down by paratrooper',
'help_text': None,
'data': {
'timestamp': '2020-12-31T23:45:59',
'target': {
'id': 'r0100',
'flight_index': 0,
},
'pos': {'x': 71903.14, 'y': 41619.023, 'z': 80.754},
},
})
def test_from_primitive(self):
testee = AIAircraftShotdownByParatrooperEvent(AIAircraftShotdownByParatrooperInfo(
timestamp=datetime.datetime(2020, 12, 31, 23, 45, 59),
target=AIAircraftActor(
id="r0100",
flight_index=0,
),
pos=Point3D(71903.14, 41619.023, 80.754),
))
self.assertEqual(
testee,
AIAircraftShotdownByParatrooperEvent.from_primitive(testee.to_primitive()),
)
class HumanAircraftShotdownByAIAircraftEventTestCase(unittest.TestCase):
def test_derives_from_ShootdownEvent(self):
self.assertTrue(issubclass(HumanAircraftShotdownByAIAircraftEvent, ShotdownEvent))
def test_is_registered(self):
self.assertEqual(
registry.get_class_by_name("HumanAircraftShotdownByAIAircraftEvent"),
HumanAircraftShotdownByAIAircraftEvent,
)
def test_to_primitive(self):
testee = HumanAircraftShotdownByAIAircraftEvent(HumanAircraftShotdownByAIAircraftInfo(
timestamp=datetime.datetime(2020, 12, 31, 23, 45, 59),
target=HumanAircraftActor(
callsign="TheUser",
aircraft="P-39D2",
),
attacker=AIAircraftActor(
id="r0100",
flight_index=0,
),
pos=Point3D(71903.14, 41619.023, 80.754),
))
self.assertEqual(testee.to_primitive(), {
'category': 'shotdown',
'name': 'HumanAircraftShotdownByAIAircraftEvent',
'verbose_name': 'Human aircraft shot down by AI aircraft',
'help_text': None,
'data': {
'timestamp': '2020-12-31T23:45:59',
'target': {
'callsign': 'TheUser',
'aircraft': 'P-39D2',
},
'attacker': {
'id': 'r0100',
'flight_index': 0,
},
'pos': {'x': 71903.14, 'y': 41619.023, 'z': 80.754},
},
})
def test_from_primitive(self):
testee = HumanAircraftShotdownByAIAircraftEvent(HumanAircraftShotdownByAIAircraftInfo(
timestamp=datetime.datetime(2020, 12, 31, 23, 45, 59),
target=HumanAircraftActor(
callsign="TheUser",
aircraft="P-39D2",
),
attacker=AIAircraftActor(
id="r0100",
flight_index=0,
),
pos=Point3D(71903.14, 41619.023, 80.754),
))
self.assertEqual(
testee,
HumanAircraftShotdownByAIAircraftEvent.from_primitive(testee.to_primitive()),
)
class HumanAircraftShotdownByBridgeEventTestCase(unittest.TestCase):
def test_derives_from_ShootdownEvent(self):
self.assertTrue(issubclass(HumanAircraftShotdownByBridgeEvent, ShotdownEvent))
def test_is_registered(self):
self.assertEqual(
registry.get_class_by_name("HumanAircraftShotdownByBridgeEvent"),
HumanAircraftShotdownByBridgeEvent,
)
def test_to_primitive(self):
testee = HumanAircraftShotdownByBridgeEvent(HumanAircraftShotdownByBridgeInfo(
timestamp=datetime.datetime(2020, 12, 31, 23, 45, 59),
target=HumanAircraftActor(
callsign="TheUser",
aircraft="P-39D2",
),
attacker=BridgeActor(
id="Bridge159",
),
pos=Point3D(71903.14, 41619.023, 80.754),
))
self.assertEqual(testee.to_primitive(), {
'category': 'shotdown',
'name': 'HumanAircraftShotdownByBridgeEvent',
'verbose_name': 'Human aircraft shot down by bridge',
'help_text': None,
'data': {
'timestamp': '2020-12-31T23:45:59',
'target': {
'callsign': 'TheUser',
'aircraft': 'P-39D2',
},
'attacker': {
'id': 'Bridge159',
},
'pos': {'x': 71903.14, 'y': 41619.023, 'z': 80.754},
},
})
def test_from_primitive(self):
testee = HumanAircraftShotdownByBridgeEvent(HumanAircraftShotdownByBridgeInfo(
timestamp=datetime.datetime(2020, 12, 31, 23, 45, 59),
target=HumanAircraftActor(
callsign="TheUser",
aircraft="P-39D2",
),
attacker=BridgeActor(
id="Bridge159",
),
pos=Point3D(71903.14, 41619.023, 80.754),
))
self.assertEqual(
testee,
HumanAircraftShotdownByBridgeEvent.from_primitive(testee.to_primitive()),
)
class HumanAircraftShotdownByBuildingEventTestCase(unittest.TestCase):
def test_derives_from_ShootdownEvent(self):
self.assertTrue(issubclass(HumanAircraftShotdownByBuildingEvent, ShotdownEvent))
def test_is_registered(self):
self.assertEqual(
registry.get_class_by_name("HumanAircraftShotdownByBuildingEvent"),
HumanAircraftShotdownByBuildingEvent,
)
def test_to_primitive(self):
testee = HumanAircraftShotdownByBuildingEvent(HumanAircraftShotdownByBuildingInfo(
timestamp=datetime.datetime(2020, 12, 31, 23, 45, 59),
target=HumanAircraftActor(
callsign="TheUser",
aircraft="P-39D2",
),
attacker=BuildingActor(
id='194_bld',
),
pos=Point3D(71903.14, 41619.023, 80.754),
))
self.assertEqual(testee.to_primitive(), {
'category': 'shotdown',
'name': 'HumanAircraftShotdownByBuildingEvent',
'verbose_name': 'Human aircraft shot down by building',
'help_text': None,
'data': {
'timestamp': '2020-12-31T23:45:59',
'target': {
'callsign': 'TheUser',
'aircraft': 'P-39D2',
},
'attacker': {
'id': '194_bld',
},
'pos': {'x': 71903.14, 'y': 41619.023, 'z': 80.754},
},
})
def test_from_primitive(self):
testee = HumanAircraftShotdownByBuildingEvent(HumanAircraftShotdownByBuildingInfo(
timestamp=datetime.datetime(2020, 12, 31, 23, 45, 59),
target=HumanAircraftActor(
callsign="TheUser",
aircraft="P-39D2",
),
attacker=BuildingActor(
id='194_bld',
),
pos=Point3D(71903.14, 41619.023, 80.754),
))
self.assertEqual(
testee,
HumanAircraftShotdownByBuildingEvent.from_primitive(testee.to_primitive()),
)
class HumanAircraftShotdownByHumanAircraftEventTestCase(unittest.TestCase):
def test_derives_from_ShootdownEvent(self):
self.assertTrue(issubclass(HumanAircraftShotdownByHumanAircraftEvent, ShotdownEvent))
def test_is_registered(self):
self.assertEqual(
registry.get_class_by_name("HumanAircraftShotdownByHumanAircraftEvent"),
HumanAircraftShotdownByHumanAircraftEvent,
)
def test_to_primitive(self):
testee = HumanAircraftShotdownByHumanAircraftEvent(HumanAircraftShotdownByHumanAircraftInfo(
timestamp=datetime.datetime(2020, 12, 31, 23, 45, 59),
target=HumanAircraftActor(
callsign="TheUser",
aircraft="P-39D2",
),
attacker=HumanAircraftActor(
callsign="TheUser2",
aircraft="Bf-109F-4",
),
pos=Point3D(71903.14, 41619.023, 80.754),
))
self.assertEqual(testee.to_primitive(), {
'category': 'shotdown',
'name': 'HumanAircraftShotdownByHumanAircraftEvent',
'verbose_name': 'Human aircraft shot down by human aircraft',
'help_text': None,
'data': {
'timestamp': '2020-12-31T23:45:59',
'target': {
'callsign': 'TheUser',
'aircraft': 'P-39D2',
},
'attacker': {
'callsign': 'TheUser2',
'aircraft': 'Bf-109F-4',
},
'pos': {'x': 71903.14, 'y': 41619.023, 'z': 80.754},
},
})
def test_from_primitive(self):
testee = HumanAircraftShotdownByHumanAircraftEvent(HumanAircraftShotdownByHumanAircraftInfo(
timestamp=datetime.datetime(2020, 12, 31, 23, 45, 59),
target=HumanAircraftActor(
callsign="TheUser",
aircraft="P-39D2",
),
attacker=HumanAircraftActor(
callsign="TheUser2",
aircraft="Bf-109F-4",
),
pos=Point3D(71903.14, 41619.023, 80.754),
))
self.assertEqual(
testee,
HumanAircraftShotdownByHumanAircraftEvent.from_primitive(testee.to_primitive()),
)
class HumanAircraftShotdownByMovingUnitEventTestCase(unittest.TestCase):
def test_derives_from_ShootdownEvent(self):
self.assertTrue(issubclass(HumanAircraftShotdownByMovingUnitEvent, ShotdownEvent))
def test_is_registered(self):
self.assertEqual(
registry.get_class_by_name("HumanAircraftShotdownByMovingUnitEvent"),
HumanAircraftShotdownByMovingUnitEvent,
)
def test_to_primitive(self):
testee = HumanAircraftShotdownByMovingUnitEvent(HumanAircraftShotdownByMovingUnitInfo(
timestamp=datetime.datetime(2020, 12, 31, 23, 45, 59),
target=HumanAircraftActor(
callsign="TheUser",
aircraft="P-39D2",
),
attacker=MovingUnitActor(
id="0_Chief",
),
pos=Point3D(71903.14, 41619.023, 80.754),
))
self.assertEqual(testee.to_primitive(), {
'category': 'shotdown',
'name': 'HumanAircraftShotdownByMovingUnitEvent',
'verbose_name': 'Human aircraft shot down by moving unit',
'help_text': None,
'data': {
'timestamp': '2020-12-31T23:45:59',
'target': {
'callsign': 'TheUser',
'aircraft': 'P-39D2',
},
'attacker': {
'id': '0_Chief',
},
'pos': {'x': 71903.14, 'y': 41619.023, 'z': 80.754},
},
})
def test_from_primitive(self):
testee = HumanAircraftShotdownByMovingUnitEvent(HumanAircraftShotdownByMovingUnitInfo(
timestamp=datetime.datetime(2020, 12, 31, 23, 45, 59),
target=HumanAircraftActor(
callsign="TheUser",
aircraft="P-39D2",
),
attacker=MovingUnitActor(
id="0_Chief",
),
pos=Point3D(71903.14, 41619.023, 80.754),
))
self.assertEqual(
testee,
HumanAircraftShotdownByMovingUnitEvent.from_primitive(testee.to_primitive()),
)
class HumanAircraftShotdownByMovingUnitMemberEventTestCase(unittest.TestCase):
def test_derives_from_ShootdownEvent(self):
self.assertTrue(issubclass(HumanAircraftShotdownByMovingUnitMemberEvent, ShotdownEvent))
def test_is_registered(self):
self.assertEqual(
registry.get_class_by_name("HumanAircraftShotdownByMovingUnitMemberEvent"),
HumanAircraftShotdownByMovingUnitMemberEvent,
)
def test_to_primitive(self):
testee = HumanAircraftShotdownByMovingUnitMemberEvent(HumanAircraftShotdownByMovingUnitMemberInfo(
timestamp=datetime.datetime(2020, 12, 31, 23, 45, 59),
target=HumanAircraftActor(
callsign="TheUser",
aircraft="P-39D2",
),
attacker=MovingUnitMemberActor(
id="0_Chief",
member_index=0,
),
pos=Point3D(71903.14, 41619.023, 80.754),
))
self.assertEqual(testee.to_primitive(), {
'category': 'shotdown',
'name': 'HumanAircraftShotdownByMovingUnitMemberEvent',
'verbose_name': 'Human aircraft shot down by moving unit member',
'help_text': None,
'data': {
'timestamp': '2020-12-31T23:45:59',
'target': {
'callsign': 'TheUser',
'aircraft': 'P-39D2',
},
'attacker': {
'id': '0_Chief',
'member_index': 0,
},
'pos': {'x': 71903.14, 'y': 41619.023, 'z': 80.754},
},
})
def test_from_primitive(self):
testee = HumanAircraftShotdownByMovingUnitMemberEvent(HumanAircraftShotdownByMovingUnitMemberInfo(
timestamp=datetime.datetime(2020, 12, 31, 23, 45, 59),
target=HumanAircraftActor(
callsign="TheUser",
aircraft="P-39D2",
),
attacker=MovingUnitMemberActor(
id="0_Chief",
member_index=0,
),
pos=Point3D(71903.14, 41619.023, 80.754),
))
self.assertEqual(
testee,
HumanAircraftShotdownByMovingUnitMemberEvent.from_primitive(testee.to_primitive()),
)
class HumanAircraftShotdownByObjectEventTestCase(unittest.TestCase):
def test_derives_from_ShootdownEvent(self):
self.assertTrue(issubclass(HumanAircraftShotdownByObjectEvent, ShotdownEvent))
def test_is_registered(self):
self.assertEqual(
registry.get_class_by_name("HumanAircraftShotdownByObjectEvent"),
HumanAircraftShotdownByObjectEvent,
)
def test_to_primitive(self):
testee = HumanAircraftShotdownByObjectEvent(HumanAircraftShotdownByObjectInfo(
timestamp=datetime.datetime(2020, 12, 31, 23, 45, 59),
target=HumanAircraftActor(
callsign="TheUser",
aircraft="P-39D2",
),
attacker=ObjectActor(
id='3do/Buildings/Airdrome/BarrelBlock1/mono.sim',
),
pos=Point3D(71903.14, 41619.023, 80.754),
))
self.assertEqual(testee.to_primitive(), {
'category': 'shotdown',
'name': 'HumanAircraftShotdownByObjectEvent',
'verbose_name': 'Human aircraft shot down by object',
'help_text': None,
'data': {
'timestamp': '2020-12-31T23:45:59',
'target': {
'callsign': 'TheUser',
'aircraft': 'P-39D2',
},
'attacker': {
'id': '3do/Buildings/Airdrome/BarrelBlock1/mono.sim',
},
'pos': {'x': 71903.14, 'y': 41619.023, 'z': 80.754},
},
})
def test_from_primitive(self):
testee = HumanAircraftShotdownByObjectEvent(HumanAircraftShotdownByObjectInfo(
timestamp=datetime.datetime(2020, 12, 31, 23, 45, 59),
target=HumanAircraftActor(
callsign="TheUser",
aircraft="P-39D2",
),
attacker=ObjectActor(
id='3do/Buildings/Airdrome/BarrelBlock1/mono.sim',
),
pos=Point3D(71903.14, 41619.023, 80.754),
))
self.assertEqual(
testee,
HumanAircraftShotdownByObjectEvent.from_primitive(testee.to_primitive()),
)
class HumanAircraftShotdownByStationaryUnitEventTestCase(unittest.TestCase):
def test_derives_from_ShootdownEvent(self):
self.assertTrue(issubclass(HumanAircraftShotdownByStationaryUnitEvent, ShotdownEvent))
def test_is_registered(self):
self.assertEqual(
registry.get_class_by_name("HumanAircraftShotdownByStationaryUnitEvent"),
HumanAircraftShotdownByStationaryUnitEvent,
)
def test_to_primitive(self):
testee = HumanAircraftShotdownByStationaryUnitEvent(HumanAircraftShotdownByStationaryUnitInfo(
timestamp=datetime.datetime(2020, 12, 31, 23, 45, 59),
target=HumanAircraftActor(
callsign="TheUser",
aircraft="P-39D2",
),
attacker=StationaryUnitActor(
id='8165_Static',
),
pos=Point3D(71903.14, 41619.023, 80.754),
))
self.assertEqual(testee.to_primitive(), {
'category': 'shotdown',
'name': 'HumanAircraftShotdownByStationaryUnitEvent',
'verbose_name': 'Human aircraft shot down by stationary unit',
'help_text': None,
'data': {
'timestamp': '2020-12-31T23:45:59',
'target': {
'callsign': 'TheUser',
'aircraft': 'P-39D2',
},
'attacker': {
'id': '8165_Static',
},
'pos': {'x': 71903.14, 'y': 41619.023, 'z': 80.754},
},
})
def test_from_primitive(self):
testee = HumanAircraftShotdownByStationaryUnitEvent(HumanAircraftShotdownByStationaryUnitInfo(
timestamp=datetime.datetime(2020, 12, 31, 23, 45, 59),
target=HumanAircraftActor(
callsign="TheUser",
aircraft="P-39D2",
),
attacker=StationaryUnitActor(
id='8165_Static',
),
pos=Point3D(71903.14, 41619.023, 80.754),
))
self.assertEqual(
testee,
HumanAircraftShotdownByStationaryUnitEvent.from_primitive(testee.to_primitive()),
)
class HumanAircraftShotdownByTreeEventTestCase(unittest.TestCase):
def test_derives_from_ShootdownEvent(self):
self.assertTrue(issubclass(HumanAircraftShotdownByTreeEvent, ShotdownEvent))
def test_is_registered(self):
self.assertEqual(
registry.get_class_by_name("HumanAircraftShotdownByTreeEvent"),
HumanAircraftShotdownByTreeEvent,
)
def test_to_primitive(self):
testee = HumanAircraftShotdownByTreeEvent(HumanAircraftShotdownByTreeInfo(
timestamp=datetime.datetime(2020, 12, 31, 23, 45, 59),
target=HumanAircraftActor(
callsign="TheUser",
aircraft="P-39D2",
),
pos=Point3D(71903.14, 41619.023, 80.754),
))
self.assertEqual(testee.to_primitive(), {
'category': 'shotdown',
'name': 'HumanAircraftShotdownByTreeEvent',
'verbose_name': 'Human aircraft shot down by tree',
'help_text': None,
'data': {
'timestamp': '2020-12-31T23:45:59',
'target': {
'callsign': 'TheUser',
'aircraft': 'P-39D2',
},
'pos': {'x': 71903.14, 'y': 41619.023, 'z': 80.754},
},
})
def test_from_primitive(self):
testee = HumanAircraftShotdownByTreeEvent(HumanAircraftShotdownByTreeInfo(
timestamp=datetime.datetime(2020, 12, 31, 23, 45, 59),
target=HumanAircraftActor(
callsign="TheUser",
aircraft="P-39D2",
),
pos=Point3D(71903.14, 41619.023, 80.754),
))
self.assertEqual(
testee,
HumanAircraftShotdownByTreeEvent.from_primitive(testee.to_primitive()),
)
class HumanAircraftShotdownByParatrooperEventTestCase(unittest.TestCase):
def test_derives_from_ShootdownEvent(self):
self.assertTrue(issubclass(HumanAircraftShotdownByParatrooperEvent, ShotdownEvent))
def test_is_registered(self):
self.assertEqual(
registry.get_class_by_name("HumanAircraftShotdownByParatrooperEvent"),
HumanAircraftShotdownByParatrooperEvent,
)
def test_to_primitive(self):
testee = HumanAircraftShotdownByParatrooperEvent(HumanAircraftShotdownByParatrooperInfo(
timestamp=datetime.datetime(2020, 12, 31, 23, 45, 59),
target=HumanAircraftActor(
callsign="TheUser",
aircraft="P-39D2",
),
pos=Point3D(71903.14, 41619.023, 80.754),
))
self.assertEqual(testee.to_primitive(), {
'category': 'shotdown',
'name': 'HumanAircraftShotdownByParatrooperEvent',
'verbose_name': 'Human aircraft shot down by paratrooper',
'help_text': None,
'data': {
'timestamp': '2020-12-31T23:45:59',
'target': {
'callsign': 'TheUser',
'aircraft': 'P-39D2',
},
'pos': {'x': 71903.14, 'y': 41619.023, 'z': 80.754},
},
})
def test_from_primitive(self):
testee = HumanAircraftShotdownByParatrooperEvent(HumanAircraftShotdownByParatrooperInfo(
timestamp=datetime.datetime(2020, 12, 31, 23, 45, 59),
target=HumanAircraftActor(
callsign="TheUser",
aircraft="P-39D2",
),
pos=Point3D(71903.14, 41619.023, 80.754),
))
self.assertEqual(
testee,
HumanAircraftShotdownByParatrooperEvent.from_primitive(testee.to_primitive()),
)
class AIAircraftShotdownByAIAircraftAndAIAircraftEventTestCase(unittest.TestCase):
def test_derives_from_ShootdownEvent(self):
self.assertTrue(issubclass(AIAircraftShotdownByAIAircraftAndAIAircraftEvent, ShotdownEvent))
def test_is_registered(self):
self.assertEqual(
registry.get_class_by_name("AIAircraftShotdownByAIAircraftAndAIAircraftEvent"),
AIAircraftShotdownByAIAircraftAndAIAircraftEvent,
)
def | |
import sys
import time
from math import *
from random import *
global valm1
global stratDict
sys.setrecursionlimit(2**31-1)
stratDict=dict()
partitions=dict()
stratDict[""]=0
stratDict["1"]=1
partitions[1]=[[1]]
partitions[0]=[]
q = { 1: [[1]] }
g={ 1: [[1]] }
valm1=dict()
amountsasdf=[]
placing=dict()
try:
open("stratC.rtf",'x')
except:
pass
def parses(document,maxs=-1,begin=0):
global stratDict
amount=begin
for line in document:
if maxs>0 and amount>=maxs:
return("")
elif line[0]!="0" and line[0]!='1' and line[0]!='2' and line[0]!='3' and line[0]!='4' and line[0]!='5' and line[0]!='6' and line[0]!='7' and line[0]!='8' and line[0]!='9':
pass
else:
whatis=""
num=0
part1=True
for i in range(len(line)):
if i==len(line):
pass
elif line[i]=="=":
part1=False
elif part1==True:
whatis+=line[i]
else:
try:
int(line[i])
num*=10
num+=int(line[i])
except:
pass
if stratDict.get(whatis,"HI")=="HI":
amount+=1
stratDict[whatis]=num
if amount%1000000==0:
print(amount//1000000,"million have already been added to the dictionary")
def parse(begin=2):
print("What is your prefered cache size?")
print("Every million enteries is about 10 seconds to load.")
parses(open("stratC.rtf",'r'),int(input("Enter: ")),begin)
def stringtodict(string):
dicts=dict()
beginning=0
secondbeginning=100000000000000000000000000000000000000
lists=''
for i in range(len(string)):
if string[i]==':':
secondbeginning=i+1
elif secondbeginning==i:
number=''
oldnum=''
j=i
while True:
number+=string[j]
j+=1
if number.isnumeric()==False:
number=oldnum
break
oldnum=number
dicts[lists]=number
else:
lists+=string[i]
return(dicts)
def Clean(list):
for item in list:
if item<1:
list.remove(item)
return(list)
def partition(n):
try:
return q[n]
except:
pass
result = [[n]]
for i in range(1, n):
a = n-i
R = partition(i)
for r in R:
if r[0] <= a:
result.append([a] + r)
q[n] = result
return result
def goodpartition(n):
try:
return(g[n])
except:
pass
result=[]
for i in range((n/4)//1,n):
a=n-i
R=partition(i)
for r in R:
if r[0] <= a:
result.append([a]+r)
g[n]=result
return(result)
def unit(list):
stuff=dict()
for item in list:
if stuff.get(stringy(item),"Not here")!='Not here':
list.remove(item)
else:
stuff[stringy(item)]='Here'
return(list)
def addlist(originallist,addinglist):
for i in range(len(addinglist)):
originallist.append(addinglist[i])
return(originallist)
def addlisttolist(originallist,addinglist):
for i in range(len(addinglist)):
for j in range(len(originallist)):
addinglist[i].append(originallist[j])
return(addinglist)
def nimsums(piles):
nim=0
for item in piles:
try:
nim = nim ^ item
except:
print(piles)
return(nim)
def opt(i):
powerof2minus2=[2**j for j in range(i)]
powerof2minus2.append(2**i-1)
return(powerof2minus2)
def dup(lists):
s=dict()
cleaned=[]
removed=0
for item in lists:
if s.get(item,"Not Here")==1:
cleaned.remove(item)
s[item]=0
removed+=item
else:
s[item]=1
cleaned.append(item)
return([cleaned,removed])
def stringy(lists):
what=""
for i in range(min(1,len(lists))):
what=str(lists[0])
for i in range(len(lists)-1):
what+=","
what+=str(lists[i+1])
return(what)
def stringtolist(strs):
if strs=='':
return([])
what=[]
currentnumber=''
for i in range(len(strs)):
if strs[i]==',':
what.append(currentnumber)
currentnumber=''
else:
currentnumber=int(str(currentnumber)+strs[i])
what.append(currentnumber)
return(what)
def xth(loc,list):
for item in list:
if item<1:
list.remove(item)
if len(list)==0:
return(0,0)
elif loc<=list[0]:
return(0,loc)
else:
return(1+xth(loc-list[0],list[1:])[0],xth(loc-list[0],list[1:])[1])
def N(piles,main=False,returns=False,move=True):
if dup(piles)[1]!=0 and main==False and returns==False and move==True:
return(dup(piles)[1]+N(dup(piles)[0]))
total=nimsums(piles)
for item in piles:
if item<1:
piles.remove(item)
remainders=[]
newremainders=[]
recieve=[]
moves=[]
locations=[]
if len(piles)==0:
return(0)
if len(piles)==1:
return(piles[0])
if len(piles)==2:
return(max(piles))
for i in range(len(piles)):
remainders.append(piles[i]-nimsums([total,piles[i]]))
for i in range(len(remainders)):
if remainders[i]>=0:
newremainders.append(remainders[i])
locations.append(i)
for item in piles:
if item<1:
piles.remove(item)
if i==0:
k=[piles[i]-remainders[i]]
z=piles[i+1:]
for i in range(len(z)):
k.append(z[i])
for item in k:
if item<1:
k.remove(item)
k.sort()
amount=OptCandy(k)
if main=="a":
print("Roughly " + str(i/len(remainders)*100) +" percent through")
else:
k=piles[:i]
k.append(piles[i]-remainders[i])
z=piles[i+1:]
for j in range(len(z)):
k.append(z[j])
for item in k:
if item<1:
k.remove(item)
k.sort()
amount=OptCandy(k)
if main=="a":
print("Roughly " + str(i/len(remainders)*100) +" percent through")
if len(recieve)==0 or max(recieve)<sum(piles)-amount:
moves=[]
recieve.append(sum(piles)-amount)
if max(recieve)==sum(piles)-amount:
moves.append(k)
j=recieve.index(max(recieve))
if returns==True:
print(moves)
print(j)
return(moves[0])
elif main==True:
newmoves=[]
for item in moves:
item.sort()
for thing in item:
if thing<1:
item.remove(thing)
try:
newmoves.remove(item)
except:
pass
finally:
newmoves.append(item)
moves=newmoves
moves=unit(moves)
if len(moves)==1:
moves=moves[0]
print(moves)
elif move==True:
print("Your options are: ",moves)
print("1 is first, 2 is second, ...")
moves=moves[int(input())-1]
print("You chose: ",moves)
else:
moves=moves[0]
time.sleep(.001)
OptCandy(moves,True,returns,move)
return(max(recieve))
def oneup():
for item in stratDict.keys():
item=stringtolist(item)
for j in range(1):
for i in range(len(item)+1):
newitem=item
if i==len(item):
newitem.append(i)
else:
print(newitem)
newitem[i]=item[i]+1
OptCandy(newitem)
def OptCandy(position,main=False,returns=False,play=True):
if dup(position)[1]!=0 and main==False and returns==False and play==True:
return(dup(position)[1]+OptCandy(dup(position)[0]))
for item in position:
if item<1:
position.remove(item)
position.sort()
isNum=True
for item in position:
if str(item).isnumeric()==False:
isNum=False
if isNum==False:
print("YOU WANTED TO BREAK MY PROGRAM YOU MONSTER")
return("Nope!")
if returns==True:
pass
elif len(position)==0:
return(0)
elif len(position)==1:
stratDict[stringy(position)]=position[0]
return(position[0])
elif len(position)==2:
stratDict[stringy(position)]=max(position)
return(max(position))
elif stratDict.get(stringy(position),"RED ALERT")!="RED ALERT":
for item in position:
if item<1:
position.remove(item)
if main==True or returns==True:
pass
else:
return(stratDict[stringy(position)])
for item in position:
if item<1:
position.remove(item)
if nimsums(position)==0:
currentmaX=0
currentIndex=0
move=[]
for i in range(sum(position)):
thing=xth(i+1,position)
wtf=position[:thing[0]]
wtf.append(position[thing[0]]-thing[1])
asdf=position[thing[0]+1:]
wtf=addlist(wtf,asdf)
wtf.sort()
for item in position:
if item<1:
position.remove(item)
item=OptCandy(wtf)
if main=="a":
print("Roughly " + str(i/sum(position)*100) +" percent through")
item=sum(position)-item
if item>currentmaX:
currentmaX=item
currentIndex=i
move=[wtf]
if item==currentmaX:
move.append(wtf)
stratDict[stringy(position)]=currentmaX
f=open("stratC.rtf","a")
f.write("\n"+stringy(position)+"="+str(currentmaX))
f.close()
if returns==True:
move=move[0]
move=Clean(move)
return(move)
elif main==True:
move=unit(move)
if len(move)==1:
move=move[0]
move=Clean(move)
print(move)
OptCandy(move,True)
return(currentmaX)
elif play==True:
print("Your options are: ",move)
print("1 for first, 2 for second, so on")
move=move[int(input())-1]
print("You chose: ",move)
OptCandy(move,True)
return(currentmaX)
else:
move=move[0]
OptCandy(move,True,returns,play)
else:
return(currentmaX)
else:
stratDict[stringy(position)]=N(position,main,returns,play)
f=open("stratC.rtf",'a')
f.write("\n"+stringy(position)+"="+str(N(position)))
f.close()
if returns==True:
return(N(position,main,returns,play))
return(stratDict[stringy(position)])
def VerboseOutput(position,loud=True):
if nimsums(position)==0:
print("P")
print("The moves are:")
else:
print("N")
print("The moves are:")
x=OptCandy(position,True,False,loud)
if nimsums(position)==0:
print("Loser (first person):")
else:
print("Winner (first person):")
print(x)
if nimsums(position)==0:
print("Winner (second person):")
else:
print("Loser (second person):")
print(sum(position)-x)
print("The difference is:")
print(abs(2*x-sum(position)))
if 2*x-sum(position)<0:
print("However, the V(game) we defined would tell you")
print(2*x-sum(position))
def OneHeuristicOptimalPlace():
placing[2]=[1,1]
placing[4]=[2,2]
placing[6]=[1,2,3]
placing[8]=[1,1,1,2,3]
placing[10]=[1,4,5]
placing[12]=[2,4,6]
placing[14]=[1,2,4,7]
placing[16]=[1,1,1,2,4,7]
placing[18]=[1,8,9]
placing[20]=[2,8,10]
placing[22]=[1,2,8,11]
placing[24]=[4,8,12]
placing[26]=[1,4,8,13]
placing[28]=[2,4,8,14]
placing[30]=[2,4,8,14]
placing[32]=[1,1,1,2,4,8,15]
placing[34]=[1,2,2,2,4,8,15]
placing[36]=[1,2,3,3,4,8,15]
placing[38]=[1,2,16,19]
placing[40]=[4,16,20]
placing[42]=[1,4,16,21]
placing[44]=[2,4,16,22]
placing[46]=[1,2,4,16,23]
placing[48]=[1,1,1,2,4,16,23]
placing[50]=[1,8,16,25]
placing[52]=[2,8,16,26]
placing[54]=[1,2,8,16,27]
placing[56]=[4,8,16,28]
placing[58]=[1,4,8,16,29]
placing[60]=[2,4,8,16,30]
placing[62]=[1,2,4,8,16,31]
placing[64]=[1,1,1,2,4,8,16,31]
placing[66]=[1,2,2,2,4,8,16,31]
placing[68]=[1,2,3,3,4,8,15,31]
placing[70]=[1,2,4,4,4,8,16,31]
placing[72]=[1,2,4,5,5,8,16,31]
placing[74]=[1,5,32,36]
placing[76]=[2, 4, 32, 38]
placing[78]=[1, 2, 4, 32, 39]
placing[80]=[1,1,1,2,4,32,39]
def HeuristicOptimalPlacings():
placing[2]=[[1,1]]
placing[4]=[[1,1,1,1],[2,2]]
placing[6]=[[1,2,3]]
placing[8]=[[1,1,1,2,3]]
placing[10]=[[1,4,5]]
placing[12]=[[2,4,6]]
placing[14]=[[1,2,4,7]]
placing[16]=[[1,1,1,2,4,7]]
placing[18]=[[1,8,9],[1,2,2,2,4,7]]
placing[20]=[[2,8,10]]
placing[22]=[[1,2,8,11]]
placing[24]=[[4,8,12],[1,1,1,2,8,11]]
placing[26]=[[1,4,8,13]]
placing[28]=[[2,4,8,14]]
placing[30]=[[1,2,4,8,15]]
placing[32]=[[1,1,1,1,4,8,15]]
placing[34]=[[1,2,2,2,4,8,15]]
placing[36]=[[1,2,3,3,4,8,15]]
placing[38]=[[1,2,16,19]]
placing[40]=[[4,16,29],[1,1,1,2,16,19]]
placing[42]=[[1,4,16,21],[1,5,16,20]]
placing[44]=[[2,4,16,22]]
placing[46]=[[1,2,4,16,23]]
placing[48]=[[23,16,4,2,1,1,1]]
placing[50]=[[1,8,16,25]]
placing[52]=[[2,8,16,26]]
placing[54]=[[1,2,8,16,27]]
placing[56]=[[4,8,16,28],[1,1,1,2,8,16,27]]
placing[58]=[[1,4,8,16,29]]
placing[60]=[[2,4,8,16,30]]
placing[62]=[[1,2,4,8,16,31]]
placing[64]=[[1,1,1,2,4,8,16,31]]
placing[66]=[[1,2,2,2,4,8,16,31]]
placing[68]=[[1,2,3,3,4,8,16,31]]
def AllPlacements():
placing[2]=[[1,1]]
placing[4]=[[1,1,1,1],[2,2]]
placing[6]=[[1,2,3]]
placing[8]=[[1,1,1,2,3]]
placing[10]=[[1,4,5]]
placing[12]=[[2,4,6]]
placing[14]=[[1,2,4,7]]
placing[16]=[[1,1,1,2,4,7]]
placing[18]=[[1,1,1,1,1,2,4,7],[1,8,9],[2,2,1,2,4,7]]
placing[20]=[[2,8,10]]
placing[22]=[[1,2,8,11]]
placing[24]=[[1,1,1,2,8,11],[4,8,12]]
placing[26]=[[1,4,8,13]]
placing[28]=[[2,4,8,14]]
placing[30]=[[1,2,4,8,15]]
placing[32]=[[1,1,1,2,4,8,15]]
placing[34]=[[1,1,1,1,1,2,4,8,15],[2,2,1,2,4,8,15]]
placing[36]=[[1,1,1,1,1,1,1,2,4,8,15],[2,2,1,1,1,2,4,8,15],[3,3,1,2,4,8,15]]
placing[38]=[[1,2,16,19]]
placing[40]=[[1,1,1,2,16,19],[4,16,20]]
placing[42]=[[1,4,16,21],[1,5,16,20]]
placing[44]=[[2,4,16,22]]
placing[46]=[[1,2,4,16,23]]
placing[48]=[[1,1,1,2,4,16,23]]
placing[50]=[[1,8,16,25]]
placing[52]=[[2,8,16,26]]
placing[54]=[[1,2,8,16,27]]
placing[56]=[[1,1,1,2,8,16,27],[4,8,16,28]]
placing[58]=[[1,4,8,16,29]]
placing[60]=[[2,4,8,16,30]]
placing[62]=[[1,2,4,8,16,31]]
placing[64]=[[1,1,1,2,4,8,16,31]]
placing[66]=[[1,1,1,1,1,2,4,8,16,31],[2,2,1,2,4,8,16,31]]
placing[68]=[[1,2,3,3,4,8,16,31]]
placing[70]=[[1, 2, 5, 9, 16, 31, 3, 3], [1, 2, 4, 24, 31, 4, 4], [1, 3, 5, 8, 16, 31, 3, 3], [1, 4, 7, 12, 18, 28], [1, 2, 4, 12, 23, 28], [1, 2, 3, 4, 11, 16, 31, 1, 1], [2, 5, 8, 16, 31, 4, 4], [1, 2, 4, 15, 16, 24, 4, 4], [4, 5, 6, 8, 16, 31], [1, 2, 32, 35], [2, 4, 7, 12, 17, 28], [1, 2, 4, 7, 12, 16, 28], [1, 3, 4, 9, 16, 31, 3, 3], [1, 2, 3, 7, 8, 16, 31, 1, 1], [3, 4, 8, 16, 31, 4, 4], [1, 2, 7, 11, 16, 31, 1, 1], [2, 5, 12, 22, 29], [2, 4, 8, 17, 31, 4, 4], [2, 4, 9, 16, 31, 4, 4], [1, 2, 4, 8, 17, 30, 4, 4], [2, 3, 6, 8, 16, 31, 2, 2], [1, 2, 3, 6, 9, 16, 31, 1, 1], [3, 5, 9, 16, 31, 3, 3], [2, 5, 6, 12, 16, 29], [1, 2, 12, 16, 31, 4, 4], [2, 3, 4, 10, 16, 31, 2, 2], [1, 6, 8, 16, 31, 4, 4]]
placing[72]=[[1,2,4,5,5,8,16,31]]
placing[74]=[[1,5,32,36],[1,2,4,6,6,8,16,31]]
placing[76]=[[2,4,32,38]]
placing[78]=[[1,2,4,32,39]]
placing[80]=[[1,1,1,2,4,32,39]]
placing[82]=[[1,8,32,41],[1,9,32,40],[1,1,1,1,1,2,4,32,39],[1,2,2,2,4,32,39]]
placing[84]=[[2,10,32,40]]
def place(candies,doinfo=False,restrict=True):
if placing.get(candies,"Not existing")!="Not existing":
if doinfo==True:
print(" ")
print(" ")
print(" ")
print(" ")
print(" ")
print("WE HAVE A WINNAH!!!")
print("THE LUCKY POSITION IS ...")
print(placing[candies])
print("THIS POSITION HAD THE HONOR OF HAVING ",candies," CANDIES!!!")
try:
VerboseOutput(placing[candies])
except:
print("For example, look at ",placing[candies][0])
VerboseOutput(placing[candies][0])
return(placing[candies])
elif candies%2==1:
print("Come on. ")
else:
amount=-1
position=[]
tick=0
if restrict==False:
gp=partition(candies)
else:
gp=goodpartition(candies)
for item in gp:
tick+=1
if nimsums(item)==0 and len(item)<=(log(candies,2))//1+2:
if OptCandy(item)>amount:
amount=OptCandy(item)
item.sort()
position=[item]
elif OptCandy(item)==amount:
item.sort()
position.append(item)
elif nimsums(item)==0 and restrict==False:
if OptCandy(item)>amount:
amount=OptCandy(item)
item.sort()
position=[item]
elif OptCandy(item)==amount:
item.sort()
position.append(item)
position=unit(position)
if doinfo==True:
print(" ")
print(" ")
print(" ")
print(" ")
print(" ")
print("WE HAVE A WINNAH!!!")
print("THE LUCKY POSITION IS ...")
print(position)
print("THIS POSITION HAD THE HONOR OF HAVING ",candies," CANDIES!!!")
try:
VerboseOutput(position[0])
except:
try:
VerboseOutput(position)
except:
pass
placing[candies]=position
return(position)
def countingcounting(begin,restrict=True):
try:
place(begin,True,restrict)
except:
pass
finally:
countingcounting(begin+2,restrict)
def flipflop(game):
game.sort()
if len(game)!=3:
return(0)
else:
k=log(game[0]+1,2)
m=game[1]/(game[0]+1)
if m==1:
return(game[2]-1)
else:
return(2**(k+1)-2+flipflop([2**k-1,2**k*(m-1),2**k*m-1]))
def fractal(game,main=False):
global valm1
game.sort()
if log(game[1]+1,2)==float((log(game[1]+1,2))//1):
print(game[1]//(game[0]+1))
if k==1 or k==2:
if k==1:
return(game[1])
return(6*(game[1]//(game[0]+1)))
m=game[1]//(game[0]+1)
if m==1:
try:
return(valm1[k])
except:
pass
z=[]
for i in range(int(k)):
z.append(game[2]-2**(i+1)+1+fractal([game[0],game[1]-2**i,2**i-1]))
i=0
for i in range(len(z)):
try:
if z[i]==max(z) and main==True:
print("When the smallest pile is",min(game),"candies, an fractal optimal move is",2**i-1)
except:
break
valm1[k]=max(z)
return(max(z))
else:
return((2**(k+1)-2)*(m-1)+fractal([2**k-1,2**k,2**(k+1)-1]))
def main():
a=input("Do you | |
"pt_BR": "Faro",
"ru_RU": "Фаро"
},
"BLACKDEATH_SCENARIO_FRANCE": {
"de_DE": "Frankreich",
"es_ES": "Francia",
"fr_FR": "France",
"it_IT": "Francia",
"ja_JP": "フランス",
"ko_KR": "프랑스",
"pl_PL": "Francja",
"pt_BR": "França",
"ru_RU": "Франция"
},
"BLACKDEATH_SCENARIO_FRANCE_ABILITY": {
"de_DE": "Päpstliche Führung",
"es_ES": "Liderazgo papal",
"fr_FR": "Direction papale",
"it_IT": "Primato del papa",
"ja_JP": "教皇の指導力",
"ko_KR": "교황 지도력",
"pl_PL": "Zwierzchnictwo papieskie",
"pt_BR": "Liderança Papal",
"ru_RU": "Руководящая роль папы"
},
"BLACKDEATH_SCENARIO_GDANSK": {
"de_DE": "Danzig",
"es_ES": "Gdansk",
"fr_FR": "Gdańsk",
"it_IT": "Danzica",
"ja_JP": "グダニスク",
"ko_KR": "그단스크",
"pl_PL": "Gdańsk",
"pt_BR": "Gdansk",
"ru_RU": "Гданьск"
},
"BLACKDEATH_SCENARIO_GENEVA": {
"de_DE": "Genf",
"es_ES": "Ginebra",
"fr_FR": "Genève",
"it_IT": "Ginevra",
"ja_JP": "ジュネーヴ",
"ko_KR": "제네바",
"pl_PL": "Genewa",
"pt_BR": "Genebra",
"ru_RU": "Женева"
},
"BLACKDEATH_SCENARIO_GENOA": {
"de_DE": "Genua",
"es_ES": "Génova",
"fr_FR": "Gênes",
"it_IT": "Genova",
"ja_JP": "ジェノヴァ",
"ko_KR": "제노바",
"pl_PL": "Genua",
"pt_BR": "Gênova",
"ru_RU": "Генуя"
},
"BLACKDEATH_SCENARIO_GRENADA": {
"de_DE": "Grenada",
"es_ES": "Granada",
"fr_FR": "Grenade",
"it_IT": "Granada",
"ja_JP": "グレナダ",
"ko_KR": "그라나다",
"pl_PL": "Granada",
"pt_BR": "Granada",
"ru_RU": "Гранада"
},
"BLACKDEATH_SCENARIO_HOLY_ROMAN_EMPIRE": {
"de_DE": "Heiliges Römisches Reich",
"es_ES": "Sacro Imperio Romano",
"fr_FR": "Saint Empire Romain",
"it_IT": "Sacro Romano Impero",
"ja_JP": "神聖ローマ帝国",
"ko_KR": "신성 로마 제국",
"pl_PL": "Święte Cesarstwo Rzymskie",
"pt_BR": "Sacro Império Romano-Germânico",
"ru_RU": "Священная Римская империя"
},
"BLACKDEATH_SCENARIO_HOLY_ROMAN_EMPIRE_ABILITY": {
"de_DE": "Deutschritterorden",
"es_ES": "Orden Teutona",
"fr_FR": "Ordre teutonique",
"it_IT": "Ordine Teutonico",
"ja_JP": "チュートン騎士団",
"ko_KR": "튜턴 기사단",
"pl_PL": "Zakon krzyżacki",
"pt_BR": "Ordem Teutônica",
"ru_RU": "Тевтонский орден"
},
"BLACKDEATH_SCENARIO_KALMAR": {
"de_DE": "Kalmar",
"es_ES": "Kalmar",
"fr_FR": "Kalmar",
"it_IT": "Kalmar",
"ja_JP": "カルマル",
"ko_KR": "칼마르",
"pl_PL": "Kalmar",
"pt_BR": "Kalmar",
"ru_RU": "Кальмар"
},
"BLACKDEATH_SCENARIO_LISBON": {
"de_DE": "Lissabon",
"es_ES": "Lisboa",
"fr_FR": "Lisbonne",
"it_IT": "Lisbona",
"ja_JP": "リスボン",
"ko_KR": "리스본",
"pl_PL": "Lizbona",
"pt_BR": "Lisboa",
"ru_RU": "Лиссабон"
},
"BLACKDEATH_SCENARIO_MARSEILLES": {
"de_DE": "Marseille",
"es_ES": "Marsella",
"fr_FR": "Marseille",
"it_IT": "Marsiglia",
"ja_JP": "マルセイユ",
"ko_KR": "마르세유",
"pl_PL": "Marsylia",
"pt_BR": "Marselha",
"ru_RU": "Марсель"
},
"BLACKDEATH_SCENARIO_MILAN": {
"de_DE": "Mailand",
"es_ES": "Milán",
"fr_FR": "Milan",
"it_IT": "Milano",
"ja_JP": "ミラノ",
"ko_KR": "밀란",
"pl_PL": "Mediolan",
"pt_BR": "Milão",
"ru_RU": "Милан"
},
"BLACKDEATH_SCENARIO_NAPLES": {
"de_DE": "Neapel",
"es_ES": "Nápoles",
"fr_FR": "Naples",
"it_IT": "Napoli",
"ja_JP": "ナポリ",
"ko_KR": "나폴리",
"pl_PL": "Neapol",
"pt_BR": "Nápoles",
"ru_RU": "Неаполь"
},
"BLACKDEATH_SCENARIO_OSLO": {
"de_DE": "Oslo",
"es_ES": "Oslo",
"fr_FR": "Oslo",
"it_IT": "Oslo",
"ja_JP": "オスロ",
"ko_KR": "오슬로",
"pl_PL": "Oslo",
"pt_BR": "Oslo",
"ru_RU": "Осло"
},
"BLACKDEATH_SCENARIO_PALMA": {
"de_DE": "Palma",
"es_ES": "Palma",
"fr_FR": "Palma",
"it_IT": "Palma",
"ja_JP": "パルマ",
"ko_KR": "팔마",
"pl_PL": "Palma",
"pt_BR": "Palma",
"ru_RU": "Пальма"
},
"BLACKDEATH_SCENARIO_PAMPLONA": {
"de_DE": "Pamplona",
"es_ES": "Pamplona",
"fr_FR": "Pampelune",
"it_IT": "Pamplona",
"ja_JP": "パンプローナ",
"ko_KR": "팜플로나",
"pl_PL": "Pampeluna",
"pt_BR": "Pamplona",
"ru_RU": "Памплона"
},
"BLACKDEATH_SCENARIO_PISA": {
"de_DE": "Pisa",
"es_ES": "Pisa",
"fr_FR": "Pise",
"it_IT": "Pisa",
"ja_JP": "ピサ",
"ko_KR": "피사",
"pl_PL": "Piza",
"pt_BR": "Pisa",
"ru_RU": "Пиза"
},
"BLACKDEATH_SCENARIO_PRAGUE": {
"de_DE": "Prag",
"es_ES": "Praga",
"fr_FR": "Prague",
"it_IT": "Praga",
"ja_JP": "プラハ",
"ko_KR": "프라하",
"pl_PL": "Praga",
"pt_BR": "Praga",
"ru_RU": "Прага"
},
"BLACKDEATH_SCENARIO_ROME": {
"de_DE": "Rom",
"es_ES": "Roma",
"fr_FR": "Rome",
"it_IT": "Roma",
"ja_JP": "ローマ",
"ko_KR": "로마",
"pl_PL": "Rzym",
"pt_BR": "Roma",
"ru_RU": "Рим"
},
"BLACKDEATH_SCENARIO_SPLIT": {
"de_DE": "Split",
"es_ES": "Split",
"fr_FR": "Split",
"it_IT": "Spalato",
"ja_JP": "スプリト",
"ko_KR": "스플리트",
"pl_PL": "Split",
"pt_BR": "Split",
"ru_RU": "Сплит"
},
"BLACKDEATH_SCENARIO_STAVANGER": {
"de_DE": "Stavanger",
"es_ES": "Stavanger",
"fr_FR": "Stavanger",
"it_IT": "Stavanger",
"ja_JP": "スタヴァンゲル",
"ko_KR": "스타방에르",
"pl_PL": "Stavanger",
"pt_BR": "Stavanger",
"ru_RU": "Ставангер"
},
"BLACKDEATH_SCENARIO_STOCKHOLM": {
"de_DE": "Stockholm",
"es_ES": "Estocolmo",
"fr_FR": "Stockholm",
"it_IT": "Stoccolma",
"ja_JP": "ストックホルム",
"ko_KR": "스톡홀름",
"pl_PL": "Sztokholm",
"pt_BR": "Estocolmo",
"ru_RU": "Стокгольм"
},
"BLACKDEATH_SCENARIO_SZCZECIN": {
"de_DE": "Stettin",
"es_ES": "Szczecin",
"fr_FR": "Szczecin",
"it_IT": "Stettino",
"ja_JP": "シュチェチン",
"ko_KR": "슈체친",
"pl_PL": "Szczecin",
"pt_BR": "Szczecin",
"ru_RU": "Щецин"
},
"BLACKDEATH_SCENARIO_VALENCIA": {
"de_DE": "Valencia",
"es_ES": "Valencia",
"fr_FR": "Valence",
"it_IT": "Valencia",
"ja_JP": "バレンシア",
"ko_KR": "발렌시아",
"pl_PL": "Walencja",
"pt_BR": "Valência",
"ru_RU": "Валенсия"
},
"BLACKDEATH_SCENARIO_VENICE": {
"de_DE": "Venedig",
"es_ES": "Venecia",
"fr_FR": "Venise",
"it_IT": "Venezia",
"ja_JP": "ヴェネツィア",
"ko_KR": "베네치아",
"pl_PL": "Wenecja",
"pt_BR": "Veneza",
"ru_RU": "Венеция"
},
"BLACKDEATH_SCENARIO_VIENNA": {
"de_DE": "Wien",
"es_ES": "Viena",
"fr_FR": "Vienne",
"it_IT": "Vienna",
"ja_JP": "ウィーン",
"ko_KR": "빈",
"pl_PL": "Wiedeń",
"pt_BR": "Viena",
"ru_RU": "Вена"
},
"BLACKDEATH_SCENARIO_ZAGREB": {
"de_DE": "Zagreb",
"es_ES": "Zagreb",
"fr_FR": "Zagreb",
"it_IT": "Zagabria",
"ja_JP": "ザグレブ",
"ko_KR": "자그레브",
"pl_PL": "Zagrzeb",
"pt_BR": "Zagreb",
"ru_RU": "Загреб"
},
"BOLOGNA": {
"de_DE": "Bologna",
"es_ES": "Bolonia",
"fr_FR": "Bologne",
"it_IT": "Bologna",
"ja_JP": "ボローニャ",
"ko_KR": "볼로냐",
"pl_PL": "Bolonia",
"pt_BR": "Bolonha",
"ru_RU": "Болонья"
},
"BRAZIL": {
"de_DE": "Brasilien",
"es_ES": "Brasil",
"fr_FR": "Brésil",
"it_IT": "Brasile",
"ja_JP": "ブラジル",
"ko_KR": "브라질",
"pl_PL": "Brazylia",
"pt_BR": "Brasil",
"ru_RU": "Бразилия"
},
"BRUSSELS": {
"de_DE": "Brüssel",
"es_ES": "Bruselas",
"fr_FR": "Bruxelles",
"it_IT": "Bruxelles",
"ja_JP": "ブリュッセル",
"ko_KR": "브뤼셀",
"pl_PL": "Bruksela",
"pt_BR": "Bruxelas",
"ru_RU": "Брюссель"
},
"BUENOS_AIRES": {
"de_DE": "Buenos Aires",
"es_ES": "Buenos Aires",
"fr_FR": "Buenos Aires",
"it_IT": "Buenos Aires",
"ja_JP": "ブエノス・アイレス",
"ko_KR": "부에노스아이레스",
"pl_PL": "Buenos Aires",
"pt_BR": "Buenos Aires",
"ru_RU": "Буэнос-Айрес"
},
"BYZANTIUM": {
"de_DE": "Byzanz",
"es_ES": "Bizancio",
"fr_FR": "Byzance",
"it_IT": "Bisanzio",
"ja_JP": "ビザンティン",
"ko_KR": "비잔틴",
"pl_PL": "Bizancjum",
"pt_BR": "Bizâncio",
"ru_RU": "Византия"
},
"CAGUANA": {
"de_DE": "Caguana",
"es_ES": "Caguana",
"fr_FR": "Caguana",
"it_IT": "Caguana",
"ja_JP": "カグアナ",
"ko_KR": "카구아나",
"pl_PL": "Caguana",
"pt_BR": "Caguana",
"ru_RU": "Кагуана"
},
"CAHOKIA": {
"de_DE": "Cahokia",
"es_ES": "Cahokia",
"fr_FR": "Cahokia",
"it_IT": "Cahokia",
"ja_JP": "カホキア",
"ko_KR": "카호키아",
"pl_PL": "Cahokia",
"pt_BR": "Cahokia",
"ru_RU": "Кахокия"
},
"CANADA": {
"de_DE": "Kanada",
"es_ES": "Canadá",
"fr_FR": "Canada",
"it_IT": "Canada",
"ja_JP": "カナダ",
"ko_KR": "캐나다",
"pl_PL": "Kanada",
"pt_BR": "Canadá",
"ru_RU": "Канада"
},
"CARDIFF": {
"de_DE": "Cardiff",
"es_ES": "Cardiff",
"fr_FR": "Cardiff",
"it_IT": "Cardiff",
"ja_JP": "カーディフ",
"ko_KR": "카디프",
"pl_PL": "Cardiff",
"pt_BR": "Cardiff",
"ru_RU": "Кардифф"
},
"CARTHAGE": {
"de_DE": "Karthago",
"es_ES": "Cartago",
"fr_FR": "Carthage",
"it_IT": "Cartagine",
"ja_JP": "カルタゴ",
"ko_KR": "카르타고",
"pl_PL": "Kartagina",
"pt_BR": "Cártago",
"ru_RU": "Карфаген"
},
"CHINA": {
"de_DE": "China",
"es_ES": "China",
"fr_FR": "Chine",
"it_IT": "Cina",
"ja_JP": "中国",
"ko_KR": "중국",
"pl_PL": "Chiny",
"pt_BR": "China",
"ru_RU": "Китай"
},
"CHINGUETTI": {
"de_DE": "Chinguetti",
"es_ES": "Chinguetti",
"fr_FR": "Chinguetti",
"it_IT": "Chinguetti",
"ja_JP": "シンゲッティ",
"ko_KR": "싱게티",
"pl_PL": "Szinkit",
"pt_BR": "Chinguetti",
"ru_RU": "Шингетти"
},
"CIVROYALE_SCENARIO_ALIENS": {
"de_DE": "Aliens",
"es_ES": "Alienígenas",
"fr_FR": "Extraterrestre",
"it_IT": "Alieni",
"ja_JP": "エイリアン",
"ko_KR": "외계인",
"pl_PL": "Kosmici",
"pt_BR": "Alienígena",
"ru_RU": "Инопланетяне"
},
"CIVROYALE_SCENARIO_CULTISTS": {
"de_DE": "Kultisten",
"es_ES": "Sectarios",
"fr_FR": "Fanatiques",
"it_IT": "Seguaci",
"ja_JP": "カルト教団員",
"ko_KR": "숭배자",
"pl_PL": "Fanatycy",
"pt_BR": "Fanáticos",
"ru_RU": "Сектанты"
},
"CIVROYALE_SCENARIO_EDGELORDS": {
"de_DE": "Grenzkämpfer",
"es_ES": "Señores del límite",
"fr_FR": "Extrémistes",
"it_IT": "Confinanti",
"ja_JP": "エッジロード",
"ko_KR": "중2병자",
"pl_PL": "Władcy Krawędzi",
"pt_BR": "Senhores do Limite",
"ru_RU": "Антигерои"
},
"CIVROYALE_SCENARIO_JOCKS": {
"de_DE": "Enthusiasten",
"es_ES": "Deportistas",
"fr_FR": "Sportifs",
"it_IT": "Bulli",
"ja_JP": "ジョック",
"ko_KR": "체육계",
"pl_PL": "Karki",
"pt_BR": "Atletas",
"ru_RU": "Фанаты"
},
"CIVROYALE_SCENARIO_MUTANTS": {
"de_DE": "Mutanten",
"es_ES": "Mutantes",
"fr_FR": "Mutants",
"it_IT": "Mutanti",
"ja_JP": "ミュータント",
"ko_KR": "돌연변이",
"pl_PL": "Mutanci",
"pt_BR": "Mutantes",
"ru_RU": "Мутанты"
},
"CIVROYALE_SCENARIO_MUTANTS_FALLOUT_MOVE_ABILITY": {
"de_DE": "Radioaktive Bewegung",
"es_ES": "Movimiento radioactivo",
"fr_FR": "Déplacement radioactif",
"it_IT": "Movimento radioattivo",
"ja_JP": "放射能移動",
"ko_KR": "방사성 이동력",
"pl_PL": "Radioaktywna ruchliwość",
"pt_BR": "Movimento radioativo",
"ru_RU": "Радиоактивное перемещение"
},
"CIVROYALE_SCENARIO_MUTANTS_FALLOUT_SPREAD_ABILITY": {
"de_DE": "Strahlende Persönlichkeiten",
"es_ES": "Personalidad radiante",
"fr_FR": "Personnalités irradiantes",
"it_IT": "Personalità radiose",
"ja_JP": "ラディアント・パーソナリティ",
"ko_KR": "방사적 성격",
"pl_PL": "Promienne osobowości",
"pt_BR": "Personalidades radiantes",
"ru_RU": "Светящиеся личности"
},
"CIVROYALE_SCENARIO_MUTANTS_WMD_RESIST_ABILITY": {
"de_DE": "Strahlende Persönlichkeiten",
"es_ES": "Personalidad radiante",
"fr_FR": "Personnalités irradiantes",
"it_IT": "Personalità radiose",
"ja_JP": "ラディアント・パーソナリティ",
"ko_KR": "방사적 성격",
"pl_PL": "Promienne osobowości",
"pt_BR": "Personalidades radiantes",
"ru_RU": "Светящиеся личности"
},
"CIVROYALE_SCENARIO_PIRATES": {
"de_DE": "Piraten",
"es_ES": "Piratas",
"fr_FR": "Pirates",
"it_IT": "Pirati",
"ja_JP": "海賊",
"ko_KR": "해적",
"pl_PL": "Piraci",
"pt_BR": "Piratas",
"ru_RU": "Пираты"
},
"CIVROYALE_SCENARIO_PREPPERS": {
"de_DE": "Prepper",
"es_ES": "Apocalípticos",
"fr_FR": "Survivalistes",
"it_IT": "Survivalisti",
"ja_JP": "終末論者",
"ko_KR": "생존주의자",
"pl_PL": "Surwiwaliści",
"pt_BR": "Sobrevivencialistas",
"ru_RU": "Выживальщики"
},
"CIVROYALE_SCENARIO_SCIENTISTS": {
"de_DE": "Verrückte Wissenschaftler",
"es_ES": "Científicos locos",
"fr_FR": "Scientifiques fous",
"it_IT": "Scienziati pazzi",
"ja_JP": "マッドサイエンティスト",
"ko_KR": "미친 과학자",
"pl_PL": "Szaleni Naukowcy",
"pt_BR": "Cientistas malucos",
"ru_RU": "Сумасшедшие ученые"
},
"CIVROYALE_SCENARIO_WANDERERS": {
"de_DE": "Wanderer",
"es_ES": "Errantes",
"fr_FR": "Vagabonds",
"it_IT": "Viandanti",
"ja_JP": "放浪者",
"ko_KR": "방랑자",
"pl_PL": "Wędrowcy",
"pt_BR": "Andarilhos",
"ru_RU": "Скитальцы"
},
"CIVROYALE_SCENARIO_ZOMBIES": {
"de_DE": "Zombies",
"es_ES": "zombi",
"fr_FR": "Zombies",
"it_IT": "Zombi",
"ja_JP": "ゾンビ",
"ko_KR": "좀비",
"pl_PL": "Zombie",
"pt_BR": "Zumbis",
"ru_RU": "Зомби"
},
"CIVROYALE_SCENARIO_ZOMBIES_BRAINS_ABILITY": {
"de_DE": "Hirn!",
"es_ES": "¡Cerebros!",
"fr_FR": "Cerveaux !",
"it_IT": "Cervelli!",
"ja_JP": "脳みそ!",
"ko_KR": "뇌!",
"pl_PL": "Mózgi!",
"pt_BR": "Cérebro!",
"ru_RU": "Мозги!"
},
"CIVROYALE_SCENARIO_ZOMBIES_WEAPONIZED_ZOMBIES_ABILITY": {
"de_DE": "Kaum waffenfähige Zombiehorde",
"es_ES": "Horda zombi apenas belicosa",
"fr_FR": "Zombies presque inoffensifs",
"it_IT": "Zombie controllati a malapena",
"ja_JP": "ゾンビ軍団",
"ko_KR": "비무장 좀비",
"pl_PL": "Skąpo uzbrojone zombie",
"pt_BR": "Zumbis Pouco Armados",
"ru_RU": "Плохо вооруженные зомби"
},
"CREE": {
"de_DE": "Cree",
"es_ES": "Tierra Cri",
"fr_FR": "Cris",
"it_IT": "Cree",
"ja_JP": "クリー",
"ko_KR": "크리",
"pl_PL": "Kri",
"pt_BR": "Nação Cree",
"ru_RU": "Кри"
},
"EGYPT": {
"de_DE": "Ägypten",
"es_ES": "Egipto",
"fr_FR": "Égypte",
"it_IT": "Egitto",
"ja_JP": "エジプト",
"ko_KR": "이집트",
"pl_PL": "Egipt",
"pt_BR": "Egito",
"ru_RU": "Египет"
},
"ENGLAND": {
"de_DE": "England",
| |
<reponame>SkyLeach/BlenderPanda
from __future__ import print_function
import math
import base64
import struct
from panda3d.core import * # pylint: disable=wildcard-import
try:
from panda3d import bullet
HAVE_BULLET = True
except ImportError:
HAVE_BULLET = False
class Converter():
def __init__(self):
self.cameras = {}
self.lights = {}
self.textures = {}
self.mat_states = {}
self.mat_mesh_map = {}
self.meshes = {}
self.nodes = {}
self.node_paths = {}
self.scenes = {}
self.characters = {}
# Scene props
self.active_scene = NodePath(ModelRoot('default'))
self.background_color = (0, 0, 0)
self.active_camera = None
def update(self, gltf_data, writing_bam=False):
#import pprint
#pprint.pprint(gltf_data)
# Convert data
for camid, gltf_cam in gltf_data.get('cameras', {}).items():
self.load_camera(camid, gltf_cam)
if 'extensions' in gltf_data and 'KHR_materials_common' in gltf_data['extensions']:
for lightid, gltf_light in gltf_data['extensions']['KHR_materials_common'].get('lights', {}).items():
self.load_light(lightid, gltf_light)
for texid, gltf_tex in gltf_data.get('textures', {}).items():
self.load_texture(texid, gltf_tex, gltf_data)
for matid, gltf_mat in gltf_data.get('materials', {}).items():
self.load_material(matid, gltf_mat)
for meshid, gltf_mesh in gltf_data.get('meshes', {}).items():
self.load_mesh(meshid, gltf_mesh, gltf_data)
for nodeid, gltf_node in gltf_data.get('nodes', {}).items():
node = self.nodes.get(nodeid, PandaNode(gltf_node['name']))
self.nodes[nodeid] = node
# If we support writing bam 6.40, we can safely write out
# instanced lights. If not, we have to copy it.
copy_lights = writing_bam and not hasattr(BamWriter, 'root_node')
# Build scenegraphs
def add_node(root, gltf_scene, nodeid):
if nodeid not in gltf_data['nodes']:
print("Could not find node with id: {}".format(nodeid))
return
gltf_node = gltf_data['nodes'][nodeid]
if 'jointName' in gltf_node:
# don't handle joints here
return
panda_node = self.nodes[nodeid]
if 'extras' in gltf_scene and 'hidden_nodes' in gltf_scene['extras']:
if nodeid in gltf_scene['extras']['hidden_nodes']:
panda_node = panda_node.make_copy()
np = self.node_paths.get(nodeid, root.attach_new_node(panda_node))
self.node_paths[nodeid] = np
if 'meshes' in gltf_node:
np_tmp = np
if 'skeletons' in gltf_node:
char = self.characters[gltf_node['name']]
np_tmp = np.attach_new_node(char)
for meshid in gltf_node['meshes']:
mesh = self.meshes[meshid]
np_tmp.attach_new_node(mesh)
if 'camera' in gltf_node:
camid = gltf_node['camera']
cam = self.cameras[camid]
np.attach_new_node(cam)
if 'extensions' in gltf_node:
if 'KHR_materials_common' in gltf_node['extensions']:
lightid = gltf_node['extensions']['KHR_materials_common']['light']
light = self.lights[lightid]
if copy_lights:
light = light.make_copy()
lnp = np.attach_new_node(light)
if isinstance(light, Light):
root.set_light(lnp)
if HAVE_BULLET and 'BLENDER_physics' in gltf_node['extensions']:
phy = gltf_node['extensions']['BLENDER_physics']
shape = None
collision_shape = phy['collisionShapes'][0]
bounding_box = collision_shape['boundingBox']
radius = max(bounding_box[0], bounding_box[1]) / 2.0
height = bounding_box[2]
geomnode = None
static = 'static' in phy and phy['static']
if 'mesh' in collision_shape:
try:
geomnode = self.meshes[collision_shape['mesh']]
except KeyError:
print("Could not find physics mesh ({}) for object ({})".format(collision_shape['mesh'], nodeid))
shape_type = collision_shape['shapeType']
if shape_type == 'BOX':
shape = bullet.BulletBoxShape(LVector3(*bounding_box) / 2.0)
elif shape_type == 'SPHERE':
shape = bullet.BulletSphereShape(max(bounding_box) / 2.0)
elif shape_type == 'CAPSULE':
shape = bullet.BulletCapsuleShape(radius, height - 2.0 * radius, bullet.ZUp)
elif shape_type == 'CYLINDER':
shape = bullet.BulletCylinderShape(radius, height, bullet.ZUp)
elif shape_type == 'CONE':
shape = bullet.BulletConeShape(radius, height, bullet.ZUp)
elif shape_type == 'CONVEX_HULL':
if geomnode:
shape = bullet.BulletConvexHullShape()
for geom in geomnode.get_geoms():
shape.add_geom(geom)
elif shape_type == 'MESH':
if geomnode:
mesh = bullet.BulletTriangleMesh()
for geom in geomnode.get_geoms():
mesh.add_geom(geom)
shape = bullet.BulletTriangleMeshShape(mesh, dynamic=not static)
else:
print("Unknown collision shape ({}) for object ({})".format(shape_type, nodeid))
if shape is not None:
phynode = bullet.BulletRigidBodyNode(gltf_node['name'])
phynode.add_shape(shape)
np.attach_new_node(phynode)
if not static:
phynode.set_mass(phy['mass'])
else:
print("Could not create collision shape for object ({})".format(nodeid))
elif not HAVE_BULLET:
print("Bullet is unavailable, not converting collision shape for object ({})".format(nodeid))
if 'extras' in gltf_node:
for key, value in gltf_node['extras'].items():
np.set_tag(key, str(value))
for child_nodeid in gltf_node.get('children', []):
add_node(np, gltf_scene, child_nodeid)
# Handle visibility after children are loaded
def visible_recursive(node, visible):
if visible:
node.show()
else:
node.hide()
for child in node.get_children():
visible_recursive(child, visible)
if 'extras' in gltf_scene and 'hidden_nodes' in gltf_scene['extras']:
if nodeid in gltf_scene['extras']['hidden_nodes']:
#print('Hiding', np)
visible_recursive(np, False)
else:
#print('Showing', np)
visible_recursive(np, True)
# Check if we need to deal with negative scale values
scale = panda_node.get_transform().get_scale()
negscale = scale.x * scale.y * scale.z < 0
if negscale:
for geomnode in np.find_all_matches('**/+GeomNode'):
tmp = geomnode.get_parent().attach_new_node(PandaNode('ReverseCulling'))
tmp.set_attrib(CullFaceAttrib.make_reverse())
geomnode.reparent_to(tmp)
for sceneid, gltf_scene in gltf_data.get('scenes', {}).items():
scene_root = NodePath(ModelRoot(gltf_scene['name']))
node_list = gltf_scene['nodes']
if 'extras' in gltf_scene and 'hidden_nodes' in gltf_scene['extras']:
node_list += gltf_scene['extras']['hidden_nodes']
for nodeid in node_list:
add_node(scene_root, gltf_scene, nodeid)
self.scenes[sceneid] = scene_root
# Update node transforms for glTF nodes that have a NodePath
for nodeid, gltf_node in gltf_data.get('nodes', {}).items():
if nodeid not in self.node_paths:
continue
np = self.node_paths[nodeid]
np.set_pos(*gltf_node.get('translation', [0, 0, 0]))
np.set_hpr(self.load_quaternion_as_hpr(gltf_node.get('rotation', [0, 0, 0, 1])))
np.set_scale(*gltf_node.get('scale', [1, 1, 1]))
# Set the active scene
sceneid = gltf_data.get('scene', None)
if sceneid in self.scenes:
self.active_scene = self.scenes[sceneid]
if 'scenes' in gltf_data:
gltf_scene = gltf_data['scenes'][sceneid]
if 'extras' in gltf_scene:
if 'background_color' in gltf_scene['extras']:
self.background_color = gltf_scene['extras']['background_color']
if 'active_camera' in gltf_scene['extras']:
self.active_camera = gltf_scene['extras']['active_camera']
def load_matrix(self, mat):
lmat = LMatrix4()
for i in range(4):
lmat.set_row(i, LVecBase4(*mat[i * 4: i * 4 + 4]))
return lmat
def load_quaternion_as_hpr(self, quaternion):
quat = LQuaternion(quaternion[3], quaternion[0], quaternion[1], quaternion[2])
return quat.get_hpr()
def load_texture(self, texid, gltf_tex, gltf_data):
if 'source' not in gltf_tex:
print("Texture '{}' has no source, skipping".format(gltf_tex['name']))
return
source = gltf_data['images'][gltf_tex['source']]
uri = Filename.fromOsSpecific(source['uri'])
texture = TexturePool.load_texture(uri, 0, False, LoaderOptions())
use_srgb = False
if 'format' in gltf_tex and gltf_tex['format'] in (0x8C40, 0x8C42):
use_srgb = True
elif 'internalFormat' in gltf_tex and gltf_tex['internalFormat'] in (0x8C40, 0x8C42):
use_srgb = True
if use_srgb:
if texture.get_num_components() == 3:
texture.set_format(Texture.F_srgb)
elif texture.get_num_components() == 4:
texture.set_format(Texture.F_srgb_alpha)
self.textures[texid] = texture
def load_material(self, matid, gltf_mat):
state = self.mat_states.get(matid, RenderState.make_empty())
if matid not in self.mat_mesh_map:
self.mat_mesh_map[matid] = []
pmat = Material(gltf_mat['name'])
textures = []
if 'extensions' in gltf_mat and 'KHR_materials_common' in gltf_mat['extensions']:
matext = gltf_mat['extensions']['KHR_materials_common']['values']
pmat.set_shininess(matext['shininess'])
if isinstance(matext['diffuse'], list):
diffuse = LColor(*matext['diffuse'])
pmat.set_diffuse(diffuse)
else:
textures.append(matext['diffuse'])
if isinstance(matext['specular'], list):
specular = LColor(*matext['specular'])
pmat.set_specular(specular)
else:
textures.append(matext['specular'])
if isinstance(matext['emission'], list):
emission = LColor(*matext['emission'])
pmat.set_emission(emission)
else:
textures.append(matext['emission'])
ambient = LColor(*matext['ambient'])
pmat.set_ambient(ambient)
state = state.set_attrib(MaterialAttrib.make(pmat))
for i, tex in enumerate(textures):
texdata = self.textures.get(tex, None)
if texdata is None:
print("Could not find texture for key: {}".format(tex))
continue
tex_attrib = TextureAttrib.make()
texstage = TextureStage(str(i))
texstage.set_texcoord_name(InternalName.get_texcoord_name('0'))
if texdata.get_num_components() == 4:
state = state.set_attrib(TransparencyAttrib.make(TransparencyAttrib.M_alpha))
tex_attrib = tex_attrib.add_on_stage(texstage, texdata)
state = state.set_attrib(tex_attrib)
# Remove stale meshes
self.mat_mesh_map[matid] = [
pair for pair in self.mat_mesh_map[matid] if pair[0] in self.meshes
]
# Reload the material
for meshid, geom_idx in self.mat_mesh_map[matid]:
self.meshes[meshid].set_geom_state(geom_idx, state)
self.mat_states[matid] = state
def create_anim(self, character, skel_name, gltf_anim, gltf_data):
root_bone = gltf_data['nodes'][skel_name]
if 'extras' in gltf_data['scenes'][gltf_data['scene']]:
fps = gltf_data['scenes'][gltf_data['scene']].get('frames_per_second', 30)
else:
fps = 30
# Blender exports the same number of elements in each time parameter, so find
# one and assume that the number of elements is the number of frames
num_frames = [
gltf_data['accessors'][accid]['count']
for param, accid in gltf_anim['parameters'].items()
if 'time_parameter' in param
][0]
# Create a simpler samplers dict so we don't have to keep looking
# up parameters
samplers = {
samplerid: gltf_anim['parameters'][sampler['output']]
for samplerid, sampler in gltf_anim['samplers'].items()
}
bundle = AnimBundle(character.get_name(), fps, num_frames)
skeleton = AnimGroup(bundle, '<skeleton>')
def create_anim_channel(parent, boneid):
bone = gltf_data['nodes'][boneid]
channels = [chan for chan in gltf_anim['channels'] if chan['target']['id'] == boneid]
group = AnimChannelMatrixXfmTable(parent, bone['name'])
def extract_chan_data(path):
vals = []
accessors = [
gltf_data['accessors'][samplers[chan['sampler']]]
for chan in channels
if chan['target']['path'] == path
]
if not accessors:
return vals
acc = accessors[0]
buff_view = gltf_data['bufferViews'][acc['bufferView']]
buff = gltf_data['buffers'][buff_view['buffer']]
buff_data = base64.b64decode(buff['uri'].split(',')[1])
start = buff_view['byteOffset']
end = buff_view['byteOffset'] + buff_view['byteLength']
if path == 'rotation':
data = [struct.unpack_from('<ffff', buff_data, idx) for idx in range(start, end, 4 * 4)]
vals += [
[i[0] for i in data],
[i[1] for i in data],
[i[2] for i in data],
[i[3] for i in data]
]
else:
data = [struct.unpack_from('<fff', buff_data, idx) for idx in range(start, end, 3 * 4)]
vals += [
[i[0] for i in data],
[i[1] for i in data],
[i[2] for i in data]
]
return vals
loc_vals = extract_chan_data('translation')
rot_vals = extract_chan_data('rotation')
scale_vals = extract_chan_data('scale')
if loc_vals:
group.set_table(b'x', CPTAFloat(PTAFloat(loc_vals[0])))
group.set_table(b'y', CPTAFloat(PTAFloat(loc_vals[1])))
group.set_table(b'z', CPTAFloat(PTAFloat(loc_vals[2])))
if rot_vals:
tableh = PTAFloat.empty_array(num_frames)
tablep = PTAFloat.empty_array(num_frames)
tabler = PTAFloat.empty_array(num_frames)
for i in range(num_frames):
quat = LQuaternion(rot_vals[3][i], rot_vals[0][i], rot_vals[1][i], rot_vals[2][i])
hpr = quat.get_hpr()
tableh.set_element(i, hpr.get_x())
tablep.set_element(i, hpr.get_y())
tabler.set_element(i, hpr.get_z())
group.set_table(b'h', CPTAFloat(tableh))
group.set_table(b'p', CPTAFloat(tablep))
group.set_table(b'r', CPTAFloat(tabler))
if scale_vals:
group.set_table(b'i', CPTAFloat(PTAFloat(scale_vals[0])))
group.set_table(b'j', CPTAFloat(PTAFloat(scale_vals[1])))
group.set_table(b'k', CPTAFloat(PTAFloat(scale_vals[2])))
for childid in bone.get('children', []):
create_anim_channel(group, childid)
create_anim_channel(skeleton, skel_name)
character.add_child(AnimBundleNode(root_bone['name'], bundle))
def create_character(self, gltf_node, gltf_skin, gltf_mesh, gltf_data):
#print("Creating skinned mesh for", gltf_mesh['name'])
skel_name = gltf_node['skeletons'][0]
root = gltf_data['nodes'][skel_name]
character = Character(gltf_mesh['name'])
bundle = character.get_bundle(0)
skeleton = PartGroup(bundle, "<skeleton>")
jvtmap = {}
bind_mats = []
ibmacc = gltf_data['accessors'][gltf_skin['inverseBindMatrices']]
ibmbv | |
<reponame>SiliconLabs/mltk
from typing import List
import sys
import os
import re
import shutil
import logging
import time
import copy
import typer
from mltk import cli
from mltk import __version__ as mltk_version
from mltk import MLTK_ROOT_DIR
from mltk.utils.shell_cmd import run_shell_cmd
from mltk.utils.path import (create_tempdir, remove_directory, recursive_listdir, get_user_setting, fullpath)
from mltk.utils.python import install_pip_package
from mltk.utils.system import is_linux
@cli.build_cli.command('python_package')
def build_python_package_command(
python_exe: str = typer.Option(None, '--python', '-p',
help='Path to Python executable or Python command found on PATH. If omitted, use current Python'
),
install: bool = typer.Option(True,
help='Install the local repo into the venv, e.g.: pip install -e .'
),
build: bool = typer.Option(True,
help='Build the MLTK wheel'
),
utests: str = typer.Option('api',
help='''\b
Run the MLTK unit tests against the built Python wheel in a new venv, BEFORE releasing to pypi.org.
This should be a comma-separated list of unit tests to run. See "mltk utest --help" for more details.
Set as "none" to skip tests'''
),
release_test: bool = typer.Option(False,
help='Release the built wheel to test.pypi.org'
),
release_public: bool = typer.Option(False,
help='Release the built wheel to pypi.org'
),
release_utests: str = typer.Option('api',
help='''\b
Run the MLTK unit tests against the released package on pypi.org.
This should be a comma-separated list of unit tests to run. See "mltk utest --help" for more details.
Set as "none" to skip tests'''
),
release_all: bool = typer.Option(False, '--all',
help='''\b
Release for all supported Python versions.
~/.mltk/user_settings.yaml must have a field python_paths: which contains list of Python executable paths, e.g.:
python_paths:
- C:/Python37/python.exe
- C:/Python38/python.exe
- C:/Python39/python.exe
'''
),
pip_packages: str = typer.Option(None,
help="""Force specific PIP packages during the unit tests. Each package should be delimited by a pipe character |
\b
e.g.: --pip-packages "tensorflow==2.4.*|tensorflow-probability==0.12.0"
"""
),
):
"""Build a MLTK wheel for a specific Python version and optionally release to pypi.org
\b
To release the built wheel to https://test.pypi.org, add the --release-test option.
To use this option, first update/create the file ~/.mltk/user_settings.yaml,
and add:
test_pypi_token: <token>
where <token> is your test.pypi.org "API Token"
\b
Once released, the wheel may be installed via:
pip install --extra-index-url https://test.pypi.org/simple silabs-mltk
\b
To release the built wheel to https://pypi.org, add the --release-public option.
To use this option, first update/create the file ~/.mltk/user_settings.yaml,
and add:
pypi_token: <token>
where <token> is your pypi.org "API Token"
\b
Once released, the wheel may be installed via:
pip install silabs-mltk
NOTE: Before releasing, the __version__ in <mltk repo>/mltk/__init__.py must be incremented.
This effectively runs the commands:
\b
if --install:
export MLTK_NO_BUILD_WRAPPERS=1
git clone MLTK_ROOT_DIR temp/mltk/release/mltk/python_<version>
python temp/mltk/release/mltk/python_<version>/install_mltk.py
\b
if --build:
<venv python> setup.py bdist_wheel
\b
if --utests:
rm -rf temp/mltk/python_venvs/tests/<python version>
python -m venv temp/mltk/python_venvs/tests/<python version>
<venv python> install <built wheel>
mltk utest api
\b
if --release-test:
twine upload --repository testpypi dist/*
if --release-utests:
<venv pip> install --extra-index-url https://test.pypi.org/simple silabs-mltk
mltk utest api
\b
if --release-public:
twine upload dist/*
if --release-utests:
<venv pip> install silabs-mltk
mltk utest api
HINT: Add the --all option to release for all Python versions at once
"""
logger = cli.get_logger(verbose=True)
if release_all or release_test or release_public:
install_pip_package('twine', logger=logger)
retcode, dst_mltk_origin_url = run_shell_cmd(['git', 'config', '--get', 'remote.origin.url'], cwd=MLTK_ROOT_DIR)
if retcode != 0:
cli.abort(msg=f'Failed to get remote.origin.url from {MLTK_ROOT_DIR}, err: {dst_mltk_origin_url}')
public_mltk_dir = get_user_setting('public_mltk_dir')
if public_mltk_dir is None:
cli.abort(msg='Must specify "public_mltk_dir: <github mltk repo dir>" in ~/.mltk/user_settings.yaml which points to directory of the cloned mltk repo on github')
public_mltk_dir = fullpath(public_mltk_dir)
current_mltk_dir = fullpath(MLTK_ROOT_DIR)
if public_mltk_dir != current_mltk_dir:
cli.abort(
msg=f'~/.mltk/user_settings.yaml:public_mltk_dir={public_mltk_dir}\n' \
f'does not match the current MLTK_ROOT_DIR={current_mltk_dir}\n' \
'You must only release the silabs-mltk package from the public github repo!'
)
if release_all:
python_paths:List[str] = get_user_setting('python_paths')
if not python_paths:
cli.abort(msg='~/.mltk/user_settings.yaml must have a field python_paths: which contains list of Python executable paths, e.g.:\n' + \
'python_paths:\n' + \
' - C:/Python37/python.exe\n' + \
' - C:/Python38/python.exe\n' + \
' - C:/Python39/python.exe\n'
)
for python_path in python_paths: # pylint: disable=not-an-iterable
cmd = copy.deepcopy(sys.argv)
cmd.remove('--all')
cmd.extend(['--python', python_path])
retcode, _ = run_shell_cmd(cmd, outfile=logger, logger=logger)
if retcode != 0:
cli.abort(code=retcode, msg=f'Failed to release wheel for {python_path}')
#######################################
# Determine the Python version
if not python_exe:
python_exe = sys.executable
logger.info(f'Build MLTK wheel using {python_exe} ...')
retcode, retmsg = run_shell_cmd([python_exe, '--version'], outfile=logger, logger=logger)
if retcode != 0:
cli.abort(msg=f'Failed to get Python version, err: {retmsg}\nEnsure the given Python executable is valid')
match = re.match(r'.*\s(\d+).(\d+).(\d+)', retmsg.strip())
if not match:
cli.abort(msg=f'Failed to get Python version from {retmsg}')
python_version_major = match.group(1)
python_version_minor = match.group(2)
python_version = f'{python_version_major}.{python_version_minor}'
mltk_release_dir = create_tempdir(f'release/mltk/python_{python_version}')
python_venv_dir = f'{mltk_release_dir}/.venv'
##########################################
# Clone MLTK to tempdir and run install_mltk.py
if install:
logger.info('#' * 100)
logger.info(f'Cleaning {mltk_release_dir} ...')
remove_directory(mltk_release_dir)
logger.info(f'Cloning {MLTK_ROOT_DIR} to {mltk_release_dir}')
retcode, retmsg = run_shell_cmd(['git', 'clone', MLTK_ROOT_DIR, mltk_release_dir] , outfile=logger, logger=logger)
if retcode != 0:
cli.abort(msg=f'Failed to clone {MLTK_ROOT_DIR} to {mltk_release_dir}, err: {retmsg}')
logger.info('#' * 100)
logger.info(f'Running {mltk_release_dir}/install_mltk.py')
env = os.environ.copy()
if 'PYTHONHOME' in env:
del env['PYTHONHOME']
env['PATH'] = os.path.dirname(python_exe) + os.pathsep + env['PATH']
retcode, retmsg = run_shell_cmd(
[python_exe, f'./install_mltk.py'],
env=env,
cwd=mltk_release_dir,
outfile=logger,
logger=logger,
)
if retcode != 0:
cli.abort(msg=f'Failed to install the MLTK, err: {retmsg}')
if os.name == 'nt':
python_venv_exe = f'{python_venv_dir}/Scripts/python.exe'
else:
python_venv_exe = f'{python_venv_dir}/bin/python3'
if release_test:
test_pypi_token = get_user_setting('test_pypi_token')
if test_pypi_token is None:
cli.abort(
msg='When using the --release-test option, the file ~/.mltk/user_settings.yaml must have the line: "test_pypi_token: <token>"'
'which points the the test.pypi.org API token'
)
_check_pip_version(python_venv_exe, python_version, use_test_pypi=True, logger=logger)
if release_public:
pypi_token = get_user_setting('pypi_token')
if pypi_token is None:
cli.abort(
msg='When using the --release-public option, the file ~/.mltk/user_settings.yaml must have the line: "pypi_token: <token>"'
'which points the the pypi.org API token'
)
_check_pip_version(python_venv_exe, python_version, use_test_pypi=False, logger=logger)
#################################
# Build the MLTK wheel
if build:
logger.info('#' * 100)
logger.info(f'Building the MLTK Python wheel for Python {python_version} ...')
remove_directory(f'{mltk_release_dir}/dist')
remove_directory(f'{mltk_release_dir}/build')
# That wrappers where already built in the above step
env = os.environ.copy()
env['MLTK_NO_BUILD_WRAPPERS'] = '1'
retcode, retmsg = run_shell_cmd(
[python_venv_exe, f'{mltk_release_dir}/setup.py', 'bdist_wheel'],
outfile=logger,
cwd=mltk_release_dir,
logger=logger,
env=env
)
if retcode != 0:
cli.abort(msg=f'Failed to build MLTK Python wheel, err: {retmsg}')
#################################
# Get the path to the built wheel
mltk_version_regex = mltk_version.replace('.', '\\.')
wheel_paths = recursive_listdir(
base_dir=f'{mltk_release_dir}/dist',
regex=f'.*/silabs_mltk-{mltk_version_regex}-\\d+-cp{python_version_major}{python_version_minor}-.*' + '\\.whl'
)
if not wheel_paths:
cli.abort(msg=f'Failed to find built .whl file in {mltk_release_dir}/dist')
wheel_path = wheel_paths[0].replace('\\', '/')
if is_linux():
# FIXME: This is a hack to enable the built wheel to
# be uploaded to pypi.org.
# Technically, the wheel should be built in Docker container
# that allows for building actual "manylinux" wheels
# More details here:
# https://github.com/pypa/manylinux
#
# NOTE: The build scripts statically link most C libs
# and force GCC 2.17, see:
# <mltk repo>/cpp/shared/platforms/linux/CMakeLists.txt
#
# The built wheel has been verified to work on Google Colab
# and AWS lambda Docker
new_path = wheel_path.replace('linux_x86_64', 'manylinux2014_x86_64')
shutil.copy(wheel_path, new_path)
wheel_path = new_path
logger.info('\n\n\n***')
logger.info(f'Built wheel path: {wheel_path}' + '\n\n\n')
##########################################
# Run the MLTK unit tests
_run_unit_tests(
utests=utests,
pip_args=[wheel_path],
logger=logger,
python_exe=python_exe,
python_version=python_version,
pip_packages=pip_packages
)
################################
# Upload wheel to https://test.pypi.org
if release_test:
logger.info('#' * 100)
logger.info(f'Uploading {wheel_path} to https://test.pypi.org ...')
retcode, retmsg = run_shell_cmd(
[sys.executable, '-m', 'twine', 'upload', '--repository', 'testpypi', '-u', '__token__', '-p', test_pypi_token, wheel_path],
outfile=logger,
logger=logger
)
if retcode != 0:
cli.abort(msg=f'Failed to run upload to https://test.pypi.org, err: {retmsg}')
_run_unit_tests(
pip_args=['--extra-index-url', 'https://test.pypi.org/simple/', f'silabs-mltk=={mltk_version}'],
logger=logger,
python_exe=python_exe,
python_version=python_version,
pip_packages=pip_packages,
retry=True,
utests=release_utests
)
################################
# Upload wheel to https://pypi.org
if release_public:
logger.info('#' * 100)
logger.info(f'Uploading {wheel_path} to https://pypi.org ...')
retcode, retmsg = run_shell_cmd(
[sys.executable, '-m', 'twine', 'upload', '-u', '__token__', '-p', pypi_token, wheel_path],
outfile=logger,
logger=logger
)
if retcode != 0:
cli.abort(msg=f'Failed to run upload to https://pypi.org, err: {retmsg}')
_run_unit_tests(
pip_args=[f'silabs-mltk=={mltk_version}'],
logger=logger,
python_exe=python_exe,
python_version=python_version,
pip_packages=pip_packages,
retry=True,
utests=release_utests
)
logger.info('Done')
def _run_unit_tests(
utests:str,
logger:logging.Logger,
python_version:str,
python_exe:str,
pip_args:List[str],
pip_packages:str,
retry:bool = False,
):
logger.info('#' * 100)
if not utests:
logger.info('NOT running unit tests, but still checking that the package was properly built ...')
logger.info('Installing built wheel in virtual environment ...')
python_test_venv_dir = create_tempdir(f'release/python_venvs/tests/{python_version}')
logger.info(f'Cleaning {python_test_venv_dir} ...')
remove_directory(python_test_venv_dir)
os.makedirs(python_test_venv_dir, exist_ok=True)
logger.info(f'Creating Python v{python_version} virtual environment at {python_test_venv_dir}')
retcode, retmsg = run_shell_cmd([python_exe, '-m', 'venv', python_test_venv_dir], outfile=logger, logger=logger)
if retcode != 0:
cli.abort(msg=f'Failed to create Python venv, err: {retmsg}')
if os.name | |
# Tkinter
import tkinter as tk
from tkinter import ttk
# Matplotlib
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import matplotlib.dates as mdates
# Serial
from pySerialTransfer import pySerialTransfer as txfer
# Other
import time
from datetime import datetime, timedelta
import random
import numpy as np
import os
def getNextRow(frame):
if not hasattr(frame, 'row'):
frame.row = 0
row = frame.row
frame.row += 1
return row
# TODO: Refactor
class PressurePlot(tk.Frame):
def __init__(self, parent, x_data):
super().__init__(parent)
# matplotlib figure
self.figure = plt.Figure(figsize=(8.5, 3.5), dpi=100)
self.ax = self.figure.add_subplot(111)
# Label Axes
self.ax.set_xlabel('Time (hh:mm:ss)')
self.ax.set_ylabel('Pressure (PSI)')
# Format the x-axis to show the time
myFmt = mdates.DateFormatter("%H:%M:%S")
self.ax.xaxis.set_major_formatter(myFmt)
# Set initial x and y data (Null Data)
self.x_data = x_data
self.y_data = np.zeros((4,0))
#print(self.y_data)
# Create the plots
self.pressure1Plot, = self.ax.plot(self.x_data, self.y_data[0], label='Pressure1', color="red")
self.pressure2Plot, = self.ax.plot(self.x_data, self.y_data[1], label='Pressure2', color="yellow")
self.pressure3Plot, = self.ax.plot(self.x_data, self.y_data[2], label='Pressure3', color="purple")
self.pressure4Plot, = self.ax.plot(self.x_data, self.y_data[3], label='Pressure4', color="green")
# Add Legend
self.ax.legend()
# Auto format date labels
self.figure.autofmt_xdate()
self.canvas = FigureCanvasTkAgg(self.figure, self)
self.canvas.get_tk_widget().pack()
def update(self, x_data, P1, P2, P3, P4):
self.x_data = x_data
self.y_data = np.append(self.y_data, [[P1],[P2],[P3],[P4]], axis=1)
# remove oldest data point
#self.x_data = self.x_data[1:]
#self.y_data = np.delete(self.y_data, (0), axis=1)
# Update plot data
self.pressure1Plot.set_xdata(self.x_data)
self.pressure2Plot.set_xdata(self.x_data)
self.pressure3Plot.set_xdata(self.x_data)
self.pressure4Plot.set_xdata(self.x_data)
self.pressure1Plot.set_ydata(self.y_data[0])
self.pressure2Plot.set_ydata(self.y_data[1])
self.pressure3Plot.set_ydata(self.y_data[2])
self.pressure4Plot.set_ydata(self.y_data[3])
self.canvas.draw_idle() # redraw plot
#self.canvas.draw()
#self.canvas.flush_events()
class TemperaturePlot(tk.Frame):
def __init__(self, parent, x_data):
super().__init__(parent)
# matplotlib figure
self.figure = plt.Figure(figsize=(8.5, 3.5), dpi=100)
self.ax = self.figure.add_subplot(111)
# Format the x-axis to show the time
myFmt = mdates.DateFormatter("%H:%M:%S")
self.ax.xaxis.set_major_formatter(myFmt)
# Set initial x and y data (Null Data)
self.x_data = x_data
self.y_data = np.zeros(0)
# Create the plot
self.plot = self.ax.plot(self.x_data, self.y_data, label='Temperature', color="orange")[0]
# Label Axes
self.ax.set_xlabel('Time (hh:mm:ss)')
self.ax.set_ylabel('Temperature (Degrees C)')
# Auto format date labels
self.figure.autofmt_xdate()
self.canvas = FigureCanvasTkAgg(self.figure, self)
self.canvas.get_tk_widget().pack()
def update(self, x_data, temperature):
# append new data point to the x and y data
self.x_data = x_data
self.y_data = np.append(self.y_data, temperature)
# remove oldest data point
#self.x_data = self.x_data[1:]
#self.y_data = self.y_data[1:]
# update plot data
self.plot.set_xdata(self.x_data)
self.plot.set_ydata(self.y_data)
self.canvas.draw_idle() # redraw plot
class ScaleSlider(tk.Frame):
def __init__(self, parent, callback, x_data):
super().__init__(parent)
self.callback = callback
self.values = x_data
self.sliderAtMax = True
self.sliderAtMin = True
self.minValue = 0
self.maxValue = 0
#Labels
self.minLabel = tk.Label(self, text="Min:")
self.minLabel.grid(column=0, row=0)
self.maxLabel = tk.Label(self, text="Max:")
self.maxLabel.grid(column=0, row=1)
# Sliders
self.sliderMin = tk.Scale(self, from_=0, to=100, length=600, orient='horizontal', command=self.minSliderChanged, showvalue=0, sliderlength=10, relief=tk.GROOVE)
self.sliderMin.grid(column=1, row=0)
self.sliderMax = tk.Scale(self, from_=0, to=100, length=600, orient='horizontal', command=self.maxSliderChanged, showvalue=0, sliderlength=10, relief=tk.GROOVE)
self.sliderMax.grid(column=1, row=1)
self.sliderMax.set(100)
#Values
self.minValueLabel = tk.Label(self, text="...")
self.minValueLabel.grid(column=2, row=0)
self.maxValueLabel = tk.Label(self, text="...")
self.maxValueLabel.grid(column=2, row=1)
def minSliderChanged(self, minValue):
self.minValue = int(minValue)
if (self.minValue >= self.maxValue - 1):
self.minValue = self.maxValue - 1
self.sliderMin.set(self.minValue)
if (self.minValue == 0):
self.sliderAtMin= True
else:
self.sliderAtMin = False
self.minValueLabel.configure(text=f"{self.values[self.minValue]}"[10:-4])
self.callback(self.minValue, self.maxValue)
def maxSliderChanged(self, maxValue):
self.maxValue = int(maxValue)
if (self.maxValue <= self.minValue + 1):
self.maxValue = self.minValue + 1
self.sliderMax.set(self.maxValue)
if (self.maxValue == self.values.size - 1):
self.sliderAtMax = True
else:
self.sliderAtMax = False
self.maxValueLabel.configure(text=f"{self.values[self.maxValue]}"[10:-4])
self.callback(self.minValue, self.maxValue)
def update(self, x_data):
self.values = x_data
maxIndex = self.values.size - 1
self.sliderMin.configure(to=maxIndex)
self.sliderMax.configure(to=maxIndex)
if self.sliderAtMax:
self.maxValue = self.values.size - 1
self.sliderMax.set(self.maxValue)
if not self.sliderAtMin:
self.minValue = self.minValue + 1
self.sliderMin.set(self.minValue)
else:
self.minValueLabel.configure(text=f"{self.values[self.minValue]}"[10:-4])
class PlotSet(tk.Frame):
def __init__(self, parent):
super().__init__(parent)
self.x_data = np.zeros(0)
self.pressurePlot = PressurePlot(self, self.x_data)
self.pressurePlot.grid(row=getNextRow(self), column=0)
self.slider = ScaleSlider(self, self.updatePlotLimits, self.x_data)
self.slider.grid(row=getNextRow(self), column=0)
self.temperaturePlot = TemperaturePlot(self, self.x_data)
self.temperaturePlot.grid(row=getNextRow(self), column=0)
def update(self, time, data):
self.x_data = np.append(self.x_data, time)
self.slider.update(self.x_data)
self.pressurePlot.update(self.x_data, data.P1, data.P2, data.P3, data.P4)
self.temperaturePlot.update(self.x_data, data.T1)
# Update Limits
if self.slider.sliderAtMax:
self.updatePlotLimits(self.slider.minValue, self.slider.maxValue)
def updatePlotLimits(self, minIndex, maxIndex):
maxYValue = np.max(np.array(self.pressurePlot.y_data)[:, minIndex:maxIndex+1])
self.pressurePlot.ax.set_xlim(self.pressurePlot.x_data[minIndex], self.pressurePlot.x_data[maxIndex])
self.pressurePlot.ax.set_ylim(np.min(np.array(self.pressurePlot.y_data)[:, minIndex:maxIndex+1])-maxYValue*0.1, maxYValue*1.10)
self.temperaturePlot.ax.set_xlim(self.temperaturePlot.x_data[minIndex], self.temperaturePlot.x_data[maxIndex])
self.temperaturePlot.ax.set_ylim(np.min(self.temperaturePlot.y_data[minIndex:maxIndex+1])-5, np.max(self.temperaturePlot.y_data[minIndex:maxIndex+1])+5)
# Good Code
class LabeledToggle(tk.Frame):
def __init__(self, parent, text, callback, command, armed_state_var):
super().__init__(parent)
self.on = False
self.armed_state_var = armed_state_var
self.callback = callback
self.command = command
self.label = tk.Label(self, text=text)
self.label.pack()
self.toggleFrame = tk.Frame(self)
self.toggleFrame.pack()
self.offButton = tk.Button(self.toggleFrame, text="OFF", command=self.toggle)
self.offButton.pack(side="left")
self.onButton = tk.Button(self.toggleFrame, text="ON", command=self.toggle, relief=tk.SUNKEN, bg='#808080', fg="#808080", activeforeground="#808080", activebackground="#808080")
self.onButton.pack(side="right")
self.originalColor = self.offButton.cget("background")
def toggle(self):
if (self.armed_state_var.get() == True):
if (self.on):
self.on = False
self.offButton.config(relief=tk.RAISED, bg=self.originalColor, fg="#000000", activeforeground="#000000")
self.onButton.config(relief=tk.SUNKEN, bg='#808080', fg="#808080", activeforeground="#808080", activebackground="#808080")
self.callback(self.command+"0")
else:
self.on = True
self.onButton.config(relief=tk.RAISED, bg=self.originalColor, fg="#000000", activeforeground="#000000")
self.offButton.config(relief=tk.SUNKEN, bg="#00aa00", fg="#00aa00", activeforeground="#00aa00", activebackground="#00aa00")
self.callback(self.command+"1")
class App(tk.Tk):
def __init__(self, arduino):
super().__init__()
self.arduino = arduino
self.protocol("WM_DELETE_WINDOW", self.close)
# Logger
self.logger = Logger()
self.logger.open()
# Root window configuration
self.title('Table Layout GUI')
self.geometry('1200x750+200+10')
self.row = 0
self.plotsFrame = tk.Frame(self)
self.plotsFrame.grid(row=0, column=0)
# Plots and Sliders
self.plotSet = PlotSet(self.plotsFrame)
self.plotSet.grid(row=getNextRow(self.plotsFrame), column=0)
# Right Hand Frame
self.rightFrame = tk.Frame(self)
self.rightFrame.grid(row=0, column=1)
self.grid_columnconfigure(1, weight=1)
# Readouts Frame
self.readoutsFrame = tk.Frame(self.rightFrame)
self.readoutsFrame.grid(row=0, column=0, columnspan=2)
# Temperature Readout
self.temperatureLabel = tk.Label(self.readoutsFrame, text='Temperature', font=("Arial", 10))
self.temperatureLabel.grid(row=0, column=0, columnspan=2)
self.temperatureReadout = tk.Label(self.readoutsFrame, text='Initalizing...', font=("Arial", 25), width=6)
self.temperatureReadout.grid(row=1, column=0, columnspan=2)
# Pressure Readouts
self.pressureLabel1 = tk.Label(self.readoutsFrame, text='Pressure 1', font=("Arial", 10))
self.pressureLabel1.grid(row=2, column=0)
self.pressureReadout1 = tk.Label(self.readoutsFrame, text='Initalizing...', font=("Arial", 25), width=6)
self.pressureReadout1.grid(row=3, column=0, padx=20)
self.pressureLabel2 = tk.Label(self.readoutsFrame, text='Pressure 2', font=("Arial", 10))
self.pressureLabel2.grid(row=2, column=1)
self.pressureReadout2 = tk.Label(self.readoutsFrame, text='Initalizing...', font=("Arial", 25), width=6)
self.pressureReadout2.grid(row=3, column=1, padx=20)
self.pressureLabel3 = tk.Label(self.readoutsFrame, text='Pressure 3', font=("Arial", 10))
self.pressureLabel3.grid(row=4, column=0)
self.pressureReadout3 = tk.Label(self.readoutsFrame, text='Initalizing...', font=("Arial", 25), width=6)
self.pressureReadout3.grid(row=5, column=0, padx=20)
self.pressureLabel4 = tk.Label(self.readoutsFrame, text='Pressure 4', font=("Arial", 10))
self.pressureLabel4.grid(row=4, column=1)
self.pressureReadout4 = tk.Label(self.readoutsFrame, text='Initalizing...', font=("Arial", 25), width=6)
self.pressureReadout4.grid(row=5, column=1, padx=20)
self.buttonsFrame = tk.Frame(self.rightFrame)
self.buttonsFrame.grid(row=1, column=0)
self.rightFrame.grid_columnconfigure(0, weight=1)
# Armed Check
self.armed_state_var = tk.BooleanVar()
self.armed_state_var.set(False) #set check state
self.armed_checkbutton = tk.Checkbutton(self.buttonsFrame, text='Armed', var=self.armed_state_var)
self.armed_checkbutton.grid(row=0, column=0, pady=(20,0), columnspan=2)
# Solenoid Toggles
self.solenoidFire_toggle = LabeledToggle(self.buttonsFrame, text="Fire", callback=self.arduino.sendCommand, command="00", armed_state_var=self.armed_state_var)
self.solenoidFire_toggle.grid(row=1, column=0, pady=5, padx=20)
self.solenoidFill_toggle = LabeledToggle(self.buttonsFrame, text="Fill", callback=self.arduino.sendCommand, command="01", armed_state_var=self.armed_state_var)
self.solenoidFill_toggle.grid(row=2, column=0, pady=5, padx=20)
self.solenoidVent_toggle = LabeledToggle(self.buttonsFrame, text="Vent", callback=self.arduino.sendCommand, command="02", armed_state_var=self.armed_state_var)
self.solenoidVent_toggle.grid(row=3, column=0, pady=5, padx=20)
self.solenoidPower_toggle = LabeledToggle(self.buttonsFrame, text="Power", callback=self.arduino.sendCommand, command="03", armed_state_var=self.armed_state_var)
self.solenoidPower_toggle.grid(row=4, column=0, pady=5, padx=20)
self.solenoid5_toggle = LabeledToggle(self.buttonsFrame, text="Solenoid 5", callback=self.arduino.sendCommand, command="04", armed_state_var=self.armed_state_var)
self.solenoid5_toggle.grid(row=5, column=0, pady=5, padx=20)
self.solenoid6_toggle = LabeledToggle(self.buttonsFrame, text="Solenoid 6", callback=self.arduino.sendCommand, command="05", armed_state_var=self.armed_state_var)
self.solenoid6_toggle.grid(row=6, column=0, pady=0, padx=20)
self.solenoid7_toggle = LabeledToggle(self.buttonsFrame, text="Solenoid 7", callback=self.arduino.sendCommand, command="06", armed_state_var=self.armed_state_var)
self.solenoid7_toggle.grid(row=1, column=1, pady=5, padx=20)
self.solenoid8_toggle = LabeledToggle(self.buttonsFrame, text="Solenoid 8", callback=self.arduino.sendCommand, command="07", armed_state_var=self.armed_state_var)
self.solenoid8_toggle.grid(row=2, column=1, pady=5, padx=20)
self.solenoid9_toggle = LabeledToggle(self.buttonsFrame, text="Solenoid 9", callback=self.arduino.sendCommand, command="08", armed_state_var=self.armed_state_var)
self.solenoid9_toggle.grid(row=3, column=1, pady=5, padx=20)
self.solenoid10_toggle = LabeledToggle(self.buttonsFrame, text="Solenoid 10", callback=self.arduino.sendCommand, command="09", armed_state_var=self.armed_state_var)
self.solenoid10_toggle.grid(row=4, column=1, pady=5, padx=20)
self.solenoid11_toggle = LabeledToggle(self.buttonsFrame, text="Solenoid 11", callback=self.arduino.sendCommand, command="10", armed_state_var=self.armed_state_var)
self.solenoid11_toggle.grid(row=5, column=1, pady=5, padx=20)
self.solenoid12_toggle = LabeledToggle(self.buttonsFrame, text="Solenoid 12", callback=self.arduino.sendCommand, command="11", armed_state_var=self.armed_state_var)
self.solenoid12_toggle.grid(row=6, column=1, pady=5, padx=20)
def close(self):
print("Closing Application")
if (hasattr(self, 'arduino')):
self.arduino.close()
self.logger.close()
self.destroy()
def slider(self, name):
print(name)
# The main code loop that runs in the background of the window (Every "frequency" milliseconds)
def loop(self, frequency):
try:
#Check for received data
self.arduino.recvData()
time = datetime.now()
# Update Plots
self.plotSet.update(time, self.arduino.data)
# Update Readouts
self.temperatureReadout.config(text=f'{round(self.arduino.data.T1, 2)}')
self.pressureReadout1.config(text=f'{round(self.arduino.data.P1, 2)}')
self.pressureReadout2.config(text=f'{round(self.arduino.data.P2, 2)}')
self.pressureReadout3.config(text=f'{round(self.arduino.data.P3, 2)}')
self.pressureReadout4.config(text=f'{round(self.arduino.data.P4,2)}')
self.logger.write(f"{time},{self.arduino.data.L1},{self.arduino.data.P1},{self.arduino.data.P2},{self.arduino.data.P3},{self.arduino.data.P4},{self.arduino.data.T1},{self.arduino.data.Safe}\n")
except Exception as e:
print(f"Error Parsing Arduino data: '{e}'")
import traceback
traceback.print_exc()
# Run Loop again after "frequency" milliseconds
self.after(frequency, self.loop, frequency)
class Logger():
def __init__(self):
pass
def open(self):
dataFilesPath = "dataFiles"
if not os.path.isdir(dataFilesPath):
os.makedirs(dataFilesPath)
date = datetime.now().strftime("%Y%m%d_%H%M%S")
self.file = open(f"{dataFilesPath}/data_{date}.txt", "w")
self.write(f"Time,L1,P1,P2,P3,P4,T1,Safe\n")
def write(self, message):
self.file.write(message)
def close(self):
self.file.close()
class Data():
millisSince = 0
L1 = 0.0 #Loadcell
P1 = 0.0 #Don't know
P2 = 0.0 #Tank Pressure Bottom
P3 = 0.0 #Tank Pressure Top
P4 = 0.0 #Don't know
T1 = 0.0 #Tank Temperature
Safety = False
class Arduino():
def __init__(self, serialPort):
#Initialize Serial Link
try:
self.link = txfer.SerialTransfer(serialPort)
self.link.open()
time.sleep(2)
except Exception:
import traceback
traceback.print_exc()
try:
self.link.close()
except Exception:
pass
self.control_int = 0
self.control_list = [0] * 13
self.data = Data()
def close(self):
try:
self.link.close()
except Exception:
pass
def recvData(self):
if self.link.available():
recSize = 0
self.data.millisSince = self.link.rx_obj(obj_type='i', start_pos=recSize)
recSize += txfer.STRUCT_FORMAT_LENGTHS['i']
self.data.L1 = self.link.rx_obj(obj_type='f', start_pos=recSize)
recSize += txfer.STRUCT_FORMAT_LENGTHS['f']
self.data.P1 = self.link.rx_obj(obj_type='f', start_pos=recSize)
recSize += txfer.STRUCT_FORMAT_LENGTHS['f']
self.data.P2 = self.link.rx_obj(obj_type='f', start_pos=recSize)
recSize += txfer.STRUCT_FORMAT_LENGTHS['f']
self.data.P3 = self.link.rx_obj(obj_type='f', start_pos=recSize)
recSize += txfer.STRUCT_FORMAT_LENGTHS['f']
self.data.P4 = self.link.rx_obj(obj_type='f', start_pos=recSize)
recSize += txfer.STRUCT_FORMAT_LENGTHS['f']
self.data.T1 = self.link.rx_obj(obj_type='f', start_pos=recSize)
recSize += txfer.STRUCT_FORMAT_LENGTHS['f']
self.data.Safe = self.link.rx_obj(obj_type='b', start_pos=recSize)
recSize += txfer.STRUCT_FORMAT_LENGTHS['b']
elif self.link.status < 0:
if self.link.status == txfer.CRC_ERROR:
print('ERROR: CRC_ERROR')
elif self.link.status == txfer.PAYLOAD_ERROR:
print('ERROR: PAYLOAD_ERROR')
elif self.link.status == txfer.STOP_BYTE_ERROR:
print('ERROR: STOP_BYTE_ERROR')
else:
print('ERROR: | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
# Auto-generated by Stone, do not modify.
# @generated
# flake8: noqa
# pylint: skip-file
"""
This namespace contains endpoints and data types for basic file operations.
"""
try:
from . import stone_validators as bv
from . import stone_base as bb
except (ImportError, SystemError, ValueError):
# Catch errors raised when importing a relative module when not in a package.
# This makes testing this file directly (outside of a package) easier.
import stone_validators as bv
import stone_base as bb
try:
from . import (
async_,
common,
file_properties,
users_common,
)
except (ImportError, SystemError, ValueError):
import async_
import common
import file_properties
import users_common
class GetMetadataArg(bb.Struct):
"""
:ivar files.GetMetadataArg.path: The path of a file or folder on Dropbox.
:ivar files.GetMetadataArg.include_media_info: If true,
``FileMetadata.media_info`` is set for photo and video.
:ivar files.GetMetadataArg.include_deleted: If true,
:class:`DeletedMetadata` will be returned for deleted file or folder,
otherwise ``LookupError.not_found`` will be returned.
:ivar files.GetMetadataArg.include_has_explicit_shared_members: If true, the
results will include a flag for each file indicating whether or not
that file has any explicit members.
:ivar files.GetMetadataArg.include_property_groups: If set to a valid list
of template IDs, ``FileMetadata.property_groups`` is set if there exists
property data associated with the file and each of the listed templates.
"""
__slots__ = [
'_path_value',
'_path_present',
'_include_media_info_value',
'_include_media_info_present',
'_include_deleted_value',
'_include_deleted_present',
'_include_has_explicit_shared_members_value',
'_include_has_explicit_shared_members_present',
'_include_property_groups_value',
'_include_property_groups_present',
]
_has_required_fields = True
def __init__(self,
path=None,
include_media_info=None,
include_deleted=None,
include_has_explicit_shared_members=None,
include_property_groups=None):
self._path_value = None
self._path_present = False
self._include_media_info_value = None
self._include_media_info_present = False
self._include_deleted_value = None
self._include_deleted_present = False
self._include_has_explicit_shared_members_value = None
self._include_has_explicit_shared_members_present = False
self._include_property_groups_value = None
self._include_property_groups_present = False
if path is not None:
self.path = path
if include_media_info is not None:
self.include_media_info = include_media_info
if include_deleted is not None:
self.include_deleted = include_deleted
if include_has_explicit_shared_members is not None:
self.include_has_explicit_shared_members = include_has_explicit_shared_members
if include_property_groups is not None:
self.include_property_groups = include_property_groups
@property
def path(self):
"""
The path of a file or folder on Dropbox.
:rtype: str
"""
if self._path_present:
return self._path_value
else:
raise AttributeError("missing required field 'path'")
@path.setter
def path(self, val):
val = self._path_validator.validate(val)
self._path_value = val
self._path_present = True
@path.deleter
def path(self):
self._path_value = None
self._path_present = False
@property
def include_media_info(self):
"""
If true, ``FileMetadata.media_info`` is set for photo and video.
:rtype: bool
"""
if self._include_media_info_present:
return self._include_media_info_value
else:
return False
@include_media_info.setter
def include_media_info(self, val):
val = self._include_media_info_validator.validate(val)
self._include_media_info_value = val
self._include_media_info_present = True
@include_media_info.deleter
def include_media_info(self):
self._include_media_info_value = None
self._include_media_info_present = False
@property
def include_deleted(self):
"""
If true, :class:`DeletedMetadata` will be returned for deleted file or
folder, otherwise ``LookupError.not_found`` will be returned.
:rtype: bool
"""
if self._include_deleted_present:
return self._include_deleted_value
else:
return False
@include_deleted.setter
def include_deleted(self, val):
val = self._include_deleted_validator.validate(val)
self._include_deleted_value = val
self._include_deleted_present = True
@include_deleted.deleter
def include_deleted(self):
self._include_deleted_value = None
self._include_deleted_present = False
@property
def include_has_explicit_shared_members(self):
"""
If true, the results will include a flag for each file indicating
whether or not that file has any explicit members.
:rtype: bool
"""
if self._include_has_explicit_shared_members_present:
return self._include_has_explicit_shared_members_value
else:
return False
@include_has_explicit_shared_members.setter
def include_has_explicit_shared_members(self, val):
val = self._include_has_explicit_shared_members_validator.validate(val)
self._include_has_explicit_shared_members_value = val
self._include_has_explicit_shared_members_present = True
@include_has_explicit_shared_members.deleter
def include_has_explicit_shared_members(self):
self._include_has_explicit_shared_members_value = None
self._include_has_explicit_shared_members_present = False
@property
def include_property_groups(self):
"""
If set to a valid list of template IDs, ``FileMetadata.property_groups``
is set if there exists property data associated with the file and each
of the listed templates.
:rtype: file_properties.TemplateFilterBase
"""
if self._include_property_groups_present:
return self._include_property_groups_value
else:
return None
@include_property_groups.setter
def include_property_groups(self, val):
if val is None:
del self.include_property_groups
return
self._include_property_groups_validator.validate_type_only(val)
self._include_property_groups_value = val
self._include_property_groups_present = True
@include_property_groups.deleter
def include_property_groups(self):
self._include_property_groups_value = None
self._include_property_groups_present = False
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(GetMetadataArg, self)._process_custom_annotations(annotation_type, field_path, processor)
def __repr__(self):
return 'GetMetadataArg(path={!r}, include_media_info={!r}, include_deleted={!r}, include_has_explicit_shared_members={!r}, include_property_groups={!r})'.format(
self._path_value,
self._include_media_info_value,
self._include_deleted_value,
self._include_has_explicit_shared_members_value,
self._include_property_groups_value,
)
GetMetadataArg_validator = bv.Struct(GetMetadataArg)
class AlphaGetMetadataArg(GetMetadataArg):
"""
:ivar files.AlphaGetMetadataArg.include_property_templates: If set to a
valid list of template IDs, ``FileMetadata.property_groups`` is set for
files with custom properties.
"""
__slots__ = [
'_include_property_templates_value',
'_include_property_templates_present',
]
_has_required_fields = True
def __init__(self,
path=None,
include_media_info=None,
include_deleted=None,
include_has_explicit_shared_members=None,
include_property_groups=None,
include_property_templates=None):
super(AlphaGetMetadataArg, self).__init__(path,
include_media_info,
include_deleted,
include_has_explicit_shared_members,
include_property_groups)
self._include_property_templates_value = None
self._include_property_templates_present = False
if include_property_templates is not None:
self.include_property_templates = include_property_templates
@property
def include_property_templates(self):
"""
If set to a valid list of template IDs, ``FileMetadata.property_groups``
is set for files with custom properties.
:rtype: list of [str]
"""
if self._include_property_templates_present:
return self._include_property_templates_value
else:
return None
@include_property_templates.setter
def include_property_templates(self, val):
if val is None:
del self.include_property_templates
return
val = self._include_property_templates_validator.validate(val)
self._include_property_templates_value = val
self._include_property_templates_present = True
@include_property_templates.deleter
def include_property_templates(self):
self._include_property_templates_value = None
self._include_property_templates_present = False
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(AlphaGetMetadataArg, self)._process_custom_annotations(annotation_type, field_path, processor)
def __repr__(self):
return 'AlphaGetMetadataArg(path={!r}, include_media_info={!r}, include_deleted={!r}, include_has_explicit_shared_members={!r}, include_property_groups={!r}, include_property_templates={!r})'.format(
self._path_value,
self._include_media_info_value,
self._include_deleted_value,
self._include_has_explicit_shared_members_value,
self._include_property_groups_value,
self._include_property_templates_value,
)
AlphaGetMetadataArg_validator = bv.Struct(AlphaGetMetadataArg)
class GetMetadataError(bb.Union):
"""
This class acts as a tagged union. Only one of the ``is_*`` methods will
return true. To get the associated value of a tag (if one exists), use the
corresponding ``get_*`` method.
"""
_catch_all = None
@classmethod
def path(cls, val):
"""
Create an instance of this class set to the ``path`` tag with value
``val``.
:param LookupError val:
:rtype: GetMetadataError
"""
return cls('path', val)
def is_path(self):
"""
Check if the union tag is ``path``.
:rtype: bool
"""
return self._tag == 'path'
def get_path(self):
"""
Only call this if :meth:`is_path` is true.
:rtype: LookupError
"""
if not self.is_path():
raise AttributeError("tag 'path' not set")
return self._value
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(GetMetadataError, self)._process_custom_annotations(annotation_type, field_path, processor)
def __repr__(self):
return 'GetMetadataError(%r, %r)' % (self._tag, self._value)
GetMetadataError_validator = bv.Union(GetMetadataError)
class AlphaGetMetadataError(GetMetadataError):
"""
This class acts as a tagged union. Only one of the ``is_*`` methods will
return true. To get the associated value of a tag (if one exists), use the
corresponding ``get_*`` method.
"""
@classmethod
def properties_error(cls, val):
"""
Create an instance of this class set to the ``properties_error`` tag
with value ``val``.
:param file_properties.LookUpPropertiesError val:
:rtype: AlphaGetMetadataError
"""
return cls('properties_error', val)
def is_properties_error(self):
"""
Check if the union tag is ``properties_error``.
:rtype: bool
"""
return self._tag == 'properties_error'
def get_properties_error(self):
"""
Only call this if :meth:`is_properties_error` is true.
:rtype: file_properties.LookUpPropertiesError
"""
if not self.is_properties_error():
raise AttributeError("tag 'properties_error' not set")
return self._value
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(AlphaGetMetadataError, self)._process_custom_annotations(annotation_type, field_path, processor)
def __repr__(self):
return 'AlphaGetMetadataError(%r, %r)' % (self._tag, self._value)
AlphaGetMetadataError_validator = bv.Union(AlphaGetMetadataError)
class CommitInfo(bb.Struct):
"""
:ivar files.CommitInfo.path: Path in the user's Dropbox to save the file.
:ivar files.CommitInfo.mode: Selects what to do if the file already exists.
:ivar files.CommitInfo.autorename: If there's a conflict, as determined by
``mode``, have the Dropbox server try to autorename the file to avoid
conflict.
:ivar files.CommitInfo.client_modified: The value to store as the
``client_modified`` timestamp. Dropbox automatically records the time at
which the file was written to the Dropbox servers. It can also record an
additional timestamp, provided by Dropbox desktop clients, mobile
clients, and API apps of when the file was actually created or modified.
:ivar files.CommitInfo.mute: Normally, users are made aware of any file
modifications in their Dropbox account via notifications in the client
software. If ``True``, this tells the clients that this modification
shouldn't result in a user notification.
:ivar files.CommitInfo.property_groups: List of custom properties to add to
file.
:ivar files.CommitInfo.strict_conflict: Be more strict about how each
:class:`WriteMode` detects conflict. For example, always return a
conflict error when ``mode`` = ``WriteMode.update`` and the given "rev"
doesn't match the existing file's "rev", even if the existing file has
been deleted.
"""
__slots__ = [
'_path_value',
'_path_present',
'_mode_value',
'_mode_present',
'_autorename_value',
'_autorename_present',
'_client_modified_value',
'_client_modified_present',
'_mute_value',
'_mute_present',
'_property_groups_value',
'_property_groups_present',
'_strict_conflict_value',
'_strict_conflict_present',
]
_has_required_fields = True
def __init__(self,
path=None,
mode=None,
autorename=None,
client_modified=None,
mute=None,
property_groups=None,
strict_conflict=None):
self._path_value = None
self._path_present = False
self._mode_value = None
self._mode_present = False
self._autorename_value = None
self._autorename_present = False
self._client_modified_value = None
self._client_modified_present = False
self._mute_value = None
self._mute_present = False
self._property_groups_value = None
self._property_groups_present = False
self._strict_conflict_value = None
self._strict_conflict_present = False
if path is not None:
self.path = path
if mode is not None:
self.mode = mode
if autorename is not None:
self.autorename = autorename
if client_modified is not None:
self.client_modified = client_modified
if mute is not None:
self.mute = mute
if property_groups is not None:
self.property_groups = property_groups
if strict_conflict | |
<filename>tests/helpers/base_query.py<gh_stars>0
# Copyright (c) 2019 <NAME>. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file is part of the FletcherFiltering project
import mysql.connector
import mysql.connector.errorcode as errorcode
from fletcherfiltering.codegen.compiler import Compiler
from fletcherfiltering.common.data_generation import generate_random_data
from .xsim_output_reader import XSIMOutputReader
from fletcherfiltering import settings
from pathlib import Path
from .mysql_type_mapper import MySQLTypeMapper
from . import python_class_generator
from fletcherfiltering.common.helpers.process_runner import ProcessRunner, VivadoHLSProcessRunner
import pyarrow as pa
import numpy as np
import shutil
import os
import ctypes
import copy
import pytest
import platform
import struct
import string
import math
class BaseQuery:
def __init__(self, printer, cnx, working_dir_base: Path, name='query', has_data_file=False, separate_work_dir=False,
clean_workdir=False):
self.printer = printer
self.cnx = cnx
assert working_dir_base.is_dir()
self.data = []
if self.cnx:
self.cursor = cnx.cursor(dictionary=True, buffered=True)
else:
self.cursor = None
self.name = name
self.has_data_file = has_data_file
self.in_schema = None
self.out_schema = None
self.in_schema_pk = None
self.query = None
self.clean_workdir = clean_workdir
self.swallow_build_output = settings.SWALLOW_OUTPUT
if separate_work_dir:
self.working_dir = working_dir_base / settings.WORKSPACE_NAME / self.name
else:
self.working_dir = working_dir_base / settings.WORKSPACE_NAME
def setup(self):
if 'sql' in settings.TEST_PARTS:
if not self.create_table():
if not self.drop_table():
pytest.fail("Could not drop table successfully.")
if not self.create_table():
pytest.fail("Could not create table successfully on second try.")
#if 'fletcherfiltering' in settings.TEST_PARTS or 'vivado' in settings.TEST_PARTS:
if not self.working_dir.exists():
self.printer("Creating workspace directory '{}'".format(self.working_dir))
self.working_dir.mkdir(parents=True, exist_ok=True)
else:
if self.clean_workdir:
self.printer("Re-creating workspace directory '{}'".format(self.working_dir))
shutil.rmtree(self.working_dir)
self.working_dir.mkdir(parents=True, exist_ok=True)
else:
self.printer("Using workspace directory '{}'".format(self.working_dir))
if not self.has_data_file:
self.printer("Generating random data...".format(self.working_dir))
self.data = generate_random_data(self.in_schema, self.in_schema_pk)
if 'sql' in settings.TEST_PARTS:
self.insert_data()
return True
def create_table(self):
assert self.in_schema_pk is not None
query = """CREATE TABLE `{0}` (
{1}
PRIMARY KEY (`{2}`)
);""".format(self.name, self.get_create_columns(self.in_schema), self.in_schema_pk)
self.printer("Creating table for test {}".format(self.name))
if not self.execute_query(query):
return False
return True
def insert_data(self):
if len(self.data) == 0:
pytest.fail("There is no data.")
query = """INSERT INTO `{0}` ({1}) VALUES ({2});""".format(self.name,
self.get_insert_columns(
self.in_schema),
self.get_data_columns(
self.in_schema))
self.printer("Inserting {} records into database...".format(len(self.data)))
if not self.execute_query(query, self.data):
return False
self.cnx.commit()
return True
def save_data(self):
self.printer("Saving {} data records to header and record batch...".format(len(self.data)))
data_placeholder = []
for data_item in self.data:
data_item_lst = []
for col in self.in_schema:
if col.type == pa.string():
data_item_text = "\"{}\"".format(data_item[col.name])
elif col.type == pa.bool_():
data_item_text = "{}".format('true' if data_item[col.name] else 'false')
elif col.type == pa.float32():
data_item_text = "{}f".format(data_item[col.name])
elif col.type == pa.uint8() or col.type == pa.uint16() or col.type == pa.uint32():
data_item_text = "{}u".format(data_item[col.name])
elif col.type == pa.int64():
data_item_text = "{}ll".format(data_item[col.name])
elif col.type == pa.uint64():
data_item_text = "{}ull".format(data_item[col.name])
elif pa.types.is_timestamp(col.type):
data_item_text = "{}ull".format(data_item[col.name])
else:
data_item_text = "{}".format(data_item[col.name])
if col.nullable:
if data_item[col.name] is not None:
data_item_lst.append("{{ .data = {0}, .valid = true}}".format(data_item_text))
else:
if col.type == pa.string():
default_value = "\"\""
else:
default_value = "0"
data_item_lst.append("{{ .data = {0}, .valid = false}}".format(default_value))
else:
data_item_lst.append("{0}".format(data_item_text))
data_placeholder.append(", ".join(data_item_lst))
template_data = {
'data_N_placeholder': len(self.data),
'data_placeholder': ",\n\t".join(data_placeholder),
}
with open(self.working_dir / Path('{0}{1}.h'.format(self.name, settings.DATA_SUFFIX)), 'r+') as data_file:
data_cpp = string.Template(data_file.read())
data_file.seek(0)
data_file.write(data_cpp.safe_substitute(template_data))
data_file.truncate()
rb_data = []
for col in self.in_schema:
type_func = (lambda x: x)
if col.type == pa.float16():
type_func = np.float16
elif col.type == pa.float32():
type_func = np.float32
elif col.type == pa.float64():
type_func = np.float64
elif col.type == pa.int8():
type_func = np.int8
elif col.type == pa.uint8():
type_func = np.uint8
elif col.type == pa.int16():
type_func = np.int16
elif col.type == pa.uint16():
type_func = np.uint16
elif col.type == pa.int32():
type_func = np.int32
elif col.type == pa.uint32():
type_func = np.uint32
elif col.type == pa.int64():
type_func = np.int64
elif col.type == pa.uint64():
type_func = np.uint64
elif pa.types.is_timestamp(col.type):
type_func = (lambda x: np.datetime64(x, col.type.unit))
rb_data.append(pa.array([(type_func(d[col.name]) if d[col.name] is not None else None) for d in self.data], col.type))
# Create a RecordBatch from the Arrays.
recordbatch = pa.RecordBatch.from_arrays(rb_data, self.in_schema)
# Create an Arrow RecordBatchFileWriter.
writer = pa.RecordBatchFileWriter(self.working_dir / Path('{0}{1}.rb'.format(self.name, settings.DATA_SUFFIX)),
self.in_schema)
# Write the RecordBatch.
writer.write(recordbatch)
writer.close()
def compile(self):
self.printer("Compiling SQL to HLS C++...")
compiler = Compiler(self.in_schema, self.out_schema)
compiler(query_str=self.query, query_name=self.name, output_dir=self.working_dir,
extra_include_dirs=settings.HLS_INCLUDE_PATH, hls_include_dirs=[settings.FLETCHER_DIR / settings.FLETCHER_HLS_DIR], extra_link_dirs=settings.HLS_LINK_PATH,
extra_link_libraries=settings.HLS_LIBS,
include_fletcher_wrapper=platform.system() == 'Linux',
run_vivado_hls=platform.system() == 'Linux', run_fletchgen_in_docker=platform.system() != 'Linux',
include_snap_project=platform.system() == 'Linux')
def build_schema_class(self, schema: pa.Schema, suffix: str):
schema_name = "Struct{}{}".format(self.name, suffix)
schema_ast = python_class_generator.get_class_ast(schema, schema_name)
schema_local_scope = {}
schema_object = compile(schema_ast, filename='<dynamic_ast>', mode='exec')
exec(schema_object, None, schema_local_scope)
return schema_local_scope[schema_name]
def run_fletcherfiltering(self):
if not self.swallow_build_output:
cmake_printer = self.printer
else:
cmake_printer = lambda val: None
self.printer("Running CMake Generate...")
result = ProcessRunner(cmake_printer, ['cmake', '-G', settings.CMAKE_GENERATOR,
'-DCMAKE_BUILD_TYPE={}'.format(settings.BUILD_CONFIG), '.'],
shell=False, cwd=self.working_dir)
if result != 0:
pytest.fail("CMake Generate exited with code {}".format(result))
self.printer("Running CMake Build...")
result = ProcessRunner(cmake_printer, ['cmake', '--build', '.', '--config', settings.BUILD_CONFIG],
shell=False, cwd=self.working_dir)
if result != 0:
pytest.fail("CMake Build exited with code {}".format(result))
in_schema_type = self.build_schema_class(self.in_schema, 'In')
out_schema_type = self.build_schema_class(self.out_schema, 'Out')
if platform.system() == 'Darwin':
lib = ctypes.CDLL(str(self.working_dir / 'libcodegen-{}.dylib'.format(self.name)))
elif platform.system() == 'Windows':
lib = ctypes.WinDLL(str(self.working_dir / settings.BUILD_CONFIG / 'codegen-{}.dll'.format(self.name)))
else:
lib = ctypes.CDLL(str(self.working_dir / 'libcodegen-{}.so'.format(self.name)))
fletcherfiltering_test = lib.__getattr__(self.name + settings.TEST_SUFFIX)
fletcherfiltering_test.restype = ctypes.c_bool
fletcherfiltering_test.argtypes = [ctypes.POINTER(in_schema_type), ctypes.POINTER(out_schema_type)]
result_data = []
in_schema = in_schema_type()
out_schema = out_schema_type()
for col in self.out_schema:
if col.type == pa.string():
setattr(out_schema, col.name,
ctypes.cast(ctypes.create_string_buffer(settings.VAR_LENGTH), ctypes.c_char_p))
for data_item in self.data:
for col in self.in_schema:
if col.type == pa.string():
setattr(in_schema, col.name,
ctypes.cast(ctypes.create_string_buffer(data_item[col.name].encode('utf-8', 'replace'),
size=settings.VAR_LENGTH),
ctypes.c_char_p))
elif col.type == pa.float16():
# Pack the halffloat and unpack it as a short.
setattr(in_schema, col.name, struct.unpack('h', struct.pack('e', data_item[col.name]))[0])
else:
setattr(in_schema, col.name, data_item[col.name])
passed = fletcherfiltering_test(ctypes.byref(in_schema), ctypes.byref(out_schema))
if passed:
out_data = {}
for col in self.out_schema:
if col.type == pa.string():
try:
out_data[col.name] = copy.copy(getattr(out_schema, col.name)).decode('utf-8')
except UnicodeDecodeError:
print(getattr(out_schema, col.name))
elif col.type == pa.float16():
# unpack the data as a short and the unpack that as a halffloat
out_data[col.name] = \
struct.unpack('e', struct.pack('h', copy.copy(getattr(out_schema, col.name))))[0]
else:
out_data[col.name] = copy.copy(getattr(out_schema, col.name))
result_data.append(out_data)
return result_data
def run_vivado(self):
if platform.system() == 'Darwin':
self.printer("Vivado is not supported on macOS.")
return None
if settings.VIVADO_BIN_DIR == '':
pytest.fail("No Vivado install configured.")
vivado_env = os.environ.copy()
vivado_env["PATH"] = str(settings.VIVADO_BIN_DIR) + os.pathsep + vivado_env["PATH"]
vivado_env["XILINX_VIVADO"] = str(settings.VIVADO_DIR)
if not self.swallow_build_output:
vivado_printer = self.printer
else:
vivado_printer = lambda val: None
result, sim_result = VivadoHLSProcessRunner(vivado_printer,
[str(settings.VIVADO_HLS_EXEC), '-f',
str((self.working_dir / 'hls_run_complete.tcl').resolve())],
shell=False, cwd=self.working_dir,
env=vivado_env)
if result != 0:
pytest.fail("Failed to run Vivado. Exited with code {}.".format(result))
self.printer("Vivado reported C/RTL co-simulation result: {}".format(sim_result))
assert sim_result == 'PASS'
xor = XSIMOutputReader(self.in_schema, self.out_schema)
return xor.read(self.working_dir / self.name / 'automated_tests' / 'sim' / 'tv', self.name)
def run_sql(self):
result_data = []
self.cursor.execute(self.query)
result_data = self.cursor.fetchall()
return result_data
def run(self):
self.compile()
self.save_data()
if 'sql' in settings.TEST_PARTS:
self.printer("Executing query on MySQL...")
sql_data = self.run_sql()
else:
sql_data = None
if (
platform.system() == 'Darwin' or platform.system() == 'Linux') and 'fletcherfiltering' in settings.TEST_PARTS:
self.printer("Executing query on FletcherFiltering...")
fletcher_data = self.run_fletcherfiltering()
else:
fletcher_data = None
if (platform.system() == 'Windows' or platform.system() == 'Linux') and 'vivado' in settings.TEST_PARTS:
self.printer("Executing query on Vivado XSIM...")
vivado_data = self.run_vivado()
else:
vivado_data = None
if sql_data is None:
pytest.xfail("No MySQL data was gathered. Can not compare results.")
if fletcher_data is None and vivado_data is None:
pytest.xfail("No implementation data was gathered. Platform possibly unsupported.")
if fletcher_data is not None:
self.printer("Verifying the returned FletcherFiltering data...")
if len(fletcher_data) > len(sql_data):
pytest.fail(
"FlechterFiltering let too many records through {} vs {}".format(len(fletcher_data), len(sql_data)))
elif len(fletcher_data) < len(sql_data):
pytest.fail(
"FlechterFiltering let too few records through {} vs {}".format(len(fletcher_data), len(sql_data)))
else:
for record_set in zip(sql_data, fletcher_data):
if not self.check_record_set(*record_set):
pytest.fail("Item from FletcherFiltering is not the same as item from SQL. \n{}\n{}".format(
record_set[0], record_set[1]))
else:
self.printer("No FletcherFiltering output data, platform not supported.")
if vivado_data is not None:
self.printer("Verifying the returned Vivado XSIM data...")
if len(vivado_data) > len(sql_data):
pytest.fail(
"Vivado XSIM let too many records through {} vs {}".format(len(vivado_data), len(sql_data)))
elif len(vivado_data) < len(sql_data):
pytest.fail(
"Vivado XSIM let too few records through {} vs {}".format(len(vivado_data), len(sql_data)))
else:
for record_set in zip(sql_data, vivado_data):
if not self.check_record_set(*record_set):
pytest.fail(
"Item from Vivado XSIM is not the same as item from SQL. \n{}\n{}".format(record_set[0],
record_set[1]))
else:
self.printer("No Vivado XSIM output data, platform not supported.")
return True
| |
json.dumps(dic)
return HttpResponse(json_data)
def findContainersJson(self, currentACL, userID, pageNumber):
admin = Administrator.objects.get(pk=userID)
if admin.acl.adminStatus != 1:
return ACLManager.loadError()
finalPageNumber = ((pageNumber * 10)) - 10
endPageNumber = finalPageNumber + 10
containers = ACLManager.findContainersObjects(currentACL, userID)[finalPageNumber:endPageNumber]
json_data = "["
checker = 0
for items in containers:
dic = {'name': items.name, 'admin': items.admin.userName, 'tag': items.tag, 'image': items.image}
if checker == 0:
json_data = json_data + json.dumps(dic)
checker = 1
else:
json_data = json_data + ',' + json.dumps(dic)
json_data = json_data + ']'
return json_data
def doContainerAction(self, userID=None, data=None):
try:
name = data['name']
if ACLManager.checkContainerOwnership(name, userID) != 1:
return ACLManager.loadErrorJson('containerActionStatus', 0)
client = docker.from_env()
dockerAPI = docker.APIClient()
action = data['action']
try:
container = client.containers.get(name)
except docker.errors.NotFound as err:
data_ret = {'containerActionStatus': 0, 'error_message': 'Container does not exist'}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except:
data_ret = {'containerActionStatus': 0, 'error_message': 'Unknown'}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
try:
if action == 'start':
container.start()
elif action == 'stop':
container.stop()
elif action == 'restart':
container.restart()
else:
data_ret = {'containerActionStatus': 0, 'error_message': 'Unknown Action'}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except docker.errors.APIError as err:
data_ret = {'containerActionStatus': 0, 'error_message': str(err)}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
time.sleep(3) # Wait 3 seconds for container to finish starting/stopping/restarting
status = container.status
data_ret = {'containerActionStatus': 1, 'error_message': 'None', 'status': status}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except BaseException as msg:
data_ret = {'containerActionStatus': 0, 'error_message': str(msg)}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
def getContainerStatus(self, userID=None, data=None):
try:
name = data['name']
if ACLManager.checkContainerOwnership(name, userID) != 1:
return ACLManager.loadErrorJson('containerStatus', 0)
client = docker.from_env()
dockerAPI = docker.APIClient()
try:
container = client.containers.get(name)
except docker.errors.NotFound as err:
data_ret = {'containerStatus': 0, 'error_message': 'Container does not exist'}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except:
data_ret = {'containerStatus': 0, 'error_message': 'Unknown'}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
status = container.status
data_ret = {'containerStatus': 1, 'error_message': 'None', 'status': status}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except BaseException as msg:
data_ret = {'containerStatus': 0, 'error_message': str(msg)}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
def exportContainer(self, request=None, userID=None, data=None):
try:
name = request.GET.get('name')
if ACLManager.checkContainerOwnership(name, userID) != 1:
return ACLManager.loadErrorJson('containerStatus', 0)
client = docker.from_env()
dockerAPI = docker.APIClient()
try:
container = client.containers.get(name)
except docker.errors.NotFound as err:
data_ret = {'containerStatus': 0, 'error_message': 'Container does not exist'}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except:
data_ret = {'containerStatus': 0, 'error_message': 'Unknown'}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
eFile = container.export() # Export with default chunk size
response = HttpResponse(eFile, content_type='application/force-download')
response['Content-Disposition'] = 'attachment; filename="' + name + '.tar"'
return response
except BaseException as msg:
data_ret = {'containerStatus': 0, 'error_message': str(msg)}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
def getContainerTop(self, userID=None, data=None):
try:
name = data['name']
if ACLManager.checkContainerOwnership(name, userID) != 1:
return ACLManager.loadErrorJson('containerTopStatus', 0)
client = docker.from_env()
dockerAPI = docker.APIClient()
try:
container = client.containers.get(name)
except docker.errors.NotFound as err:
data_ret = {'containerTopStatus': 0, 'error_message': 'Container does not exist'}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except:
data_ret = {'containerTopStatus': 0, 'error_message': 'Unknown'}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
try:
top = container.top()
except docker.errors.APIError as err:
data_ret = {'containerTopStatus': 0, 'error_message': str(err)}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
data_ret = {'containerTopStatus': 1, 'error_message': 'None', 'processes': top}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except BaseException as msg:
data_ret = {'containerTopStatus': 0, 'error_message': str(msg)}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
def assignContainer(self, userID=None, data=None):
try:
# Todo: add check only for super user i.e. main admin
admin = Administrator.objects.get(pk=userID)
if admin.acl.adminStatus != 1:
return ACLManager.loadErrorJson('assignContainerStatus', 0)
client = docker.from_env()
dockerAPI = docker.APIClient()
name = data['name']
dockerOwner = data['admin']
admin = Administrator.objects.get(userName=dockerOwner)
try:
container = client.containers.get(name)
except docker.errors.NotFound as err:
data_ret = {'assignContainerStatus': 0, 'error_message': 'Container does not exist'}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except:
data_ret = {'assignContainerStatus': 0, 'error_message': 'Unknown'}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
con = Containers(admin=admin,
name=name,
cid=container.id)
con.save()
data_ret = {'assignContainerStatus': 1, 'error_message': 'None'}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except BaseException as msg:
data_ret = {'assignContainerStatus': 0, 'error_message': str(msg)}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
def searchImage(self, userID=None, data=None):
try:
admin = Administrator.objects.get(pk=userID)
if admin.acl.adminStatus != 1:
return ACLManager.loadErrorJson('searchImageStatus', 0)
client = docker.from_env()
dockerAPI = docker.APIClient()
string = data['string']
try:
matches = client.images.search(term=string)
except docker.errors.APIError as err:
data_ret = {'searchImageStatus': 0, 'error_message': str(err)}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except:
data_ret = {'searchImageStatus': 0, 'error_message': 'Unknown'}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
print(json.dumps(matches))
for image in matches:
if "/" in image['name']:
image['name2'] = image['name'].split("/")[0] + ":" + image['name'].split("/")[1]
else:
image['name2'] = image['name']
data_ret = {'searchImageStatus': 1, 'error_message': 'None', 'matches': matches}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except BaseException as msg:
data_ret = {'searchImageStatus': 0, 'error_message': str(msg)}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
def images(self, request=None, userID=None, data=None):
try:
admin = Administrator.objects.get(pk=userID)
if admin.acl.adminStatus != 1:
return ACLManager.loadError()
client = docker.from_env()
dockerAPI = docker.APIClient()
try:
imageList = client.images.list()
except docker.errors.APIError as err:
return HttpResponse(str(err))
images = {}
names = []
for image in imageList:
try:
name = image.attrs['RepoTags'][0].split(":")[0]
if "/" in name:
name2 = ""
for item in name.split("/"):
name2 += ":" + item
else:
name2 = name
tags = []
for tag in image.tags:
getTag = tag.split(":")
if len(getTag) == 2:
tags.append(getTag[1])
print(tags)
if name in names:
images[name]['tags'].extend(tags)
else:
names.append(name)
images[name] = {"name": name,
"name2": name2,
"tags": tags}
except:
continue
return render(request, 'dockerManager/images.html', {"images": images, "test": ''})
except BaseException as msg:
return HttpResponse(str(msg))
def manageImages(self, request=None, userID=None, data=None):
try:
currentACL = ACLManager.loadedACL(userID)
if currentACL['admin'] == 1:
pass
else:
return ACLManager.loadError()
client = docker.from_env()
dockerAPI = docker.APIClient()
imageList = client.images.list()
images = {}
names = []
for image in imageList:
try:
name = image.attrs['RepoTags'][0].split(":")[0]
if name in names:
images[name]['tags'].extend(image.tags)
else:
names.append(name)
images[name] = {"name": name,
"tags": image.tags}
except:
continue
return render(request, 'dockerManager/manageImages.html', {"images": images})
except BaseException as msg:
return HttpResponse(str(msg))
def getImageHistory(self, userID=None, data=None):
try:
name = data['name']
client = docker.from_env()
dockerAPI = docker.APIClient()
try:
image = client.images.get(name)
except docker.errors.APIError as err:
data_ret = {'imageHistoryStatus': 0, 'error_message': str(err)}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except:
data_ret = {'imageHistoryStatus': 0, 'error_message': 'Unknown'}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
data_ret = {'imageHistoryStatus': 1, 'error_message': 'None', 'history': image.history()}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except BaseException as msg:
data_ret = {'imageHistoryStatus': 0, 'error_message': str(msg)}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
def removeImage(self, userID=None, data=None):
try:
admin = Administrator.objects.get(pk=userID)
if admin.acl.adminStatus != 1:
return ACLManager.loadError()
client = docker.from_env()
dockerAPI = docker.APIClient()
name = data['name']
try:
if name == 0:
action = client.images.prune()
else:
action = client.images.remove(name)
print(action)
except docker.errors.APIError as err:
data_ret = {'removeImageStatus': 0, 'error_message': str(err)}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except:
data_ret = {'removeImageStatus': 0, 'error_message': 'Unknown'}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
data_ret = {'removeImageStatus': 1, 'error_message': 'None'}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except BaseException as msg:
data_ret = {'removeImageStatus': 0, 'error_message': str(msg)}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
# Internal function for recreating containers
def doRecreateContainer(self, userID, data, con):
try:
client = docker.from_env()
dockerAPI = docker.APIClient()
name = data['name']
unlisted = data['unlisted'] # Pass this as 1 if image is not known for container
image = data['image']
tag = data['tag']
env = data['env']
volumes = data['volumes']
port = data['ports']
memory = data['memory']
if image == 'unknown':
return "Image name not known"
# Call container delete function
delStatus = self.submitContainerDeletion(userID, data, True)
if delStatus != 0:
return delStatus
containerArgs = {'image': image + ":" + tag,
'detach': True,
'name': name,
'ports': port,
'environment': env,
'volumes': volumes,
'publish_all_ports': True,
'mem_limit': memory * 1048576}
if con.startOnReboot == 1:
containerArgs['restart_policy'] = {"Name": "always"}
container = client.containers.create(**containerArgs)
con.cid = container.id
con.save()
return 0
except BaseException as msg:
return str(msg)
def saveContainerSettings(self, userID=None, data=None):
try:
name = data['name']
if ACLManager.checkContainerOwnership(name, userID) != 1:
return ACLManager.loadErrorJson('saveSettingsStatus', 0)
client = docker.from_env()
dockerAPI = docker.APIClient()
memory = data['memory']
startOnReboot = data['startOnReboot']
envList = data['envList']
volList = data['volList']
if startOnReboot == True:
startOnReboot = 1
rPolicy = {"Name": "always"}
else:
startOnReboot = 0
rPolicy = {}
try:
container = client.containers.get(name)
except docker.errors.NotFound as err:
data_ret = {'saveSettingsStatus': 0, 'error_message': 'Container does not exist'}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
except:
data_ret = {'saveSettingsStatus': 0, 'error_message': 'Unknown'}
json_data = json.dumps(data_ret)
return HttpResponse(json_data)
try:
container.update(mem_limit=memory * 1048576,
restart_policy=rPolicy)
except docker.errors.APIError as err:
data_ret | |
import numpy as np
import pickle as pkl
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
import logging
import sys, os
sys.path.append(os.getcwd())
try:
from weak_learner import *
from label_encoder import LabelEncoder, OneHotEncoder, AllPairsEncoder
from datasets import MNISTDataset
from callbacks import CallbacksManagerIterator, Step
from callbacks import ModelCheckpoint, CSVLogger, Progression, BestRoundTrackerCallback
from callbacks import (BreakOnMaxStepCallback, BreakOnPerfectTrainAccuracyCallback,
BreakOnPlateauCallback, BreakOnZeroRiskCallback)
from utils import *
except ModuleNotFoundError:
from .weak_learner import *
from .label_encoder import LabelEncoder, OneHotEncoder, AllPairsEncoder
from .datasets import MNISTDataset
from .callbacks import CallbacksManagerIterator, Step
from .callbacks import ModelCheckpoint, CSVLogger, Progression, BestRoundTrackerCallback
from .callbacks import (BreakOnMaxStepCallback, BreakOnPerfectTrainAccuracyCallback,
BreakOnPlateauCallback, BreakOnZeroRiskCallback)
from .utils import *
class _QuadBoost:
"""
QuadBoost is a boosting algorithm based on the squared loss. Provided with a (weak) learner, the model builds upon a collection of them to be able to make strong predictions. The algorithm has strong guarantees and a quadratic convergence.
AdaBoost is another known algorithm which rely on boosting weak learners. As opposed to QuadBoost, it uses the exponential loss. Indeed, using the squared loss provides many advantages, such as having an exact, solvable minimum at each iteration.
"""
def __init__(self, weak_learner, encoder=None):
"""
Args:
weak_learner (Object that defines the 'fit' method and the 'predict' method): Weak learner that generates weak predictors to be boosted on.
encoder (LabelEncoder object, optional): Object that encodes the labels to provide an easier separation problem. If None, a one-hot encoding is used.
"""
self.weak_learner = weak_learner
self.encoder = encoder
self.best_round = None
def algorithm(self, *args, **kwargs):
raise NotImplementedError
def fit(self, X, Y, f0=None,
max_round_number=None, patience=None, break_on_perfect_train_acc=False,
X_val=None, Y_val=None,
callbacks=None,
**weak_learner_fit_kwargs):
"""
Function that fits the model to the data.
The function is split into two parts: the first prepare the data and the callbacks, the second, done in _fit, actually executes the algorithm. The iteration and the callbacks are handled by a CallbacksManagerIterator.
Args:
X (Array of shape (n_examples, ...)): Examples.
Y (Iterable of 'n_examples' elements): Labels for the examples X. Y is encoded with the encode_labels method if one is provided, else it is transformed as one-hot vectors.
f0 (Array of shape (encoding_dim,), optional, default=None): Initial prediction function. If None, f0 is set to 0.
max_round_number (int, optional, default=-1): Maximum number of boosting rounds. If None, the algorithm will boost indefinitely, until reaching a perfect training accuracy (if True), or until the training accuracy does not improve for 'patience' consecutive boosting rounds (if not None).
patience (int, optional, default=None): Number of boosting rounds before terminating the algorithm when the training accuracy shows no improvements. If None, the boosting rounds will continue until max_round_number iterations (if not None).
break_on_perfect_train_acc (Boolean, optional, default=False): If True, it will stop the iterations if a perfect train accuracy of 1.0 is achieved.
X_val (Array of shape (n_val, ...), optional, default=None): Validation examples. If not None, the validation accuracy will be evaluated at each boosting round.
Y_val (Iterable of 'n_val' elements, optional, default=None): Validation labels for the examples X_val. If not None, the validation accuracy will be evaluated at each boosting round.
callbacks (Iterable of Callback objects, optional, default=None): Callbacks objects to be called at some specific step of the training procedure to execute something. Ending conditions of the boosting iteration are handled with BreakCallbacks. If callbacks contains BreakCallbacks and terminating conditions (max_round_number, patience, break_on_perfect_train_acc) are not None, all conditions will be checked at each round and the first that is not verified will stop the iteration.
weak_learner_fit_kwargs: Keyword arguments to pass to the fit method of the weak learner.
Returns self.
"""
# Encodes the labels
if self.encoder == None:
self.encoder = OneHotEncoder(Y)
encoded_Y, weights = self.encoder.encode_labels(Y)
# Initialization
self.weak_predictors = []
self.weak_predictors_weights = []
if f0 == None:
self.f0 = np.zeros(self.encoder.encoding_dim)
else:
self.f0 = f0
residue = encoded_Y - self.f0
# Callbacks
if callbacks is None:
callbacks = [Progression()]
elif not any(isinstance(callback, Progression) for callback in callbacks):
callbacks.append(Progression())
if not any(isinstance(callback, BestRoundTrackerCallback) for callback in callbacks):
if X_val is not None and Y_val is not None:
callbacks.append(BestRoundTrackerCallback(quantity='valid_acc'))
else:
callbacks.append(BestRoundTrackerCallback(quantity='train_acc'))
if break_on_perfect_train_acc:
callbacks.append(BreakOnPerfectTrainAccuracyCallback())
if max_round_number:
callbacks.append(BreakOnMaxStepCallback(max_step_number=max_round_number))
if patience:
callbacks.append(BreakOnPlateauCallback(patience=patience))
self.callbacks = callbacks
self._fit(X, Y, residue, weights, X_val, Y_val, **weak_learner_fit_kwargs)
return self
def _fit(self, X, Y, residue, weights, X_val, Y_val, **weak_learner_fit_kwargs):
encoded_Y_pred = self.predict_encoded(X)
encoded_Y_val_pred = self.predict_encoded(X_val) if X_val is not None else None
starting_round = BoostingRound(len(self.weak_predictors))
boost_manager = CallbacksManagerIterator(self, self.callbacks, starting_round)
qb_algo = self.algorithm(boost_manager, self.encoder, self.weak_learner,
X, Y, residue, weights, encoded_Y_pred,
X_val, Y_val, encoded_Y_val_pred)
qb_algo.fit(self.weak_predictors, self.weak_predictors_weights, **weak_learner_fit_kwargs)
def resume_fit(self, X, Y, X_val=None, Y_val=None, max_round_number=None, **weak_learner_fit_kwargs):
"""
Function to resume a previous fit uncompleted, with the same callbacks and ending conditions. See 'fit' for a description of the arguments.
The condition on the maximum number of round can be modified or added by specifying max_round_number.
Returns self.
"""
if not hasattr(self, 'weak_predictors'):
logging.error("Can't resume fit if no previous fitting made. Use 'fit' instead.")
return self
if max_round_number:
for callback in self.callbacks:
if isinstance(callback, BreakOnMaxStepCallback):
callback.max_step_number = max_round_number
break
else:
self.callbacks.append(BreakOnMaxStepCallback(max_round_number))
encoded_Y, weights = self.encoder.encode_labels(Y)
residue = encoded_Y - self.f0 - self.predict_encoded(X)
self._fit(X, Y, residue, weights, X_val, Y_val, **weak_learner_fit_kwargs)
return self
def predict(self, X, mode='best'):
"""
Returns the predicted labels of the given sample.
Args:
X (Array of shape(n_examples, ...)): Examples to predict.
mode (str, either 'last' or 'best, optional): Mode of prediction. If 'best' (default), will use only the weak_predictors up to the best round (according to the BestRoundTrackerCallback). If 'last', all weak_predictors found during training are used.
Returns Y_pred (Array of shape (n_examples))
"""
return self.encoder.decode_labels(self.predict_encoded(X, mode))
def predict_encoded(self, X, mode='last'):
"""
Returns the predicted encoded labels of the given sample. Can be decoded with the LabelEncoder.decode_labels() method.
Args:
X (Array of shape(n_examples, ...)): Examples to predict.
mode (str, either 'last' or 'best, optional): Mode of prediction. If 'last' (default), all weak_predictors found during training are used. If 'best', will use only the weak_predictors up to the best round (according to the BestRoundTrackerCallback).
Returns encoded_Y_pred (Array of shape (n_examples, encoding_dim))
"""
encoded_Y_pred = np.zeros((X.shape[0], self.encoder.encoding_dim)) + self.f0
wp_weights, wps = self.weak_predictors_weights, self.weak_predictors
if mode == 'best':
best = self.best_round.step_number + 1
wp_weights, wps = wp_weights[:best], wps[:best]
for wp_weight, wp in zip(wp_weights, wps):
encoded_Y_pred += wp_weight * wp.predict(X)
return encoded_Y_pred
def evaluate(self, X, Y, return_risk=False, mode='best'):
"""
Evaluates the accuracy of the classifier given a sample and its labels.
Args:
X (Array of shape(n_examples, ...)): Examples to predict.
Y (Array of shape (n_examples)): True labels.
return_risk (bool, optional): If True, additionally returns the (non normalized) risk of the examples.
mode (str, either 'last' or 'best, optional): Mode of prediction. If 'best' (default), will use only the weak_predictors up to the best round (according to the BestRoundTrackerCallback). If 'last', all weak_predictors found during training are used.
Returns the accuracy (float) or a tuple of (accuracy (float), risk (float))
"""
encoded_Y_pred = self.predict_encoded(X, mode)
Y_pred = self.encoder.decode_labels(encoded_Y_pred)
accuracy = accuracy_score(y_true=Y, y_pred=Y_pred)
if return_risk:
encoded_Y, W = self.encoder.encode_labels(Y)
risk = np.sum(W * (encoded_Y - self.f0 - encoded_Y_pred)**2)
return accuracy if not return_risk else (accuracy, risk)
@staticmethod
def load(filename):
with open(filename, 'rb') as file:
model = pkl.load(file)
return model
class QuadBoostMH(_QuadBoost):
__doc__ = _QuadBoost.__doc__
def algorithm(self, *args, **kwargs):
return QuadBoostMHAlgorithm(*args, **kwargs)
class QuadBoostMHCR(_QuadBoost):
__doc__ = _QuadBoost.__doc__
def __init__(self, confidence_rated_weak_learner, encoder=None, dampening=1):
"""
Args:
confidence_rated_weak_learner (Object that defines the 'fit' method and the 'predict' method): Weak learner that generates confidence rated weak predictors to be boosted on.
encoder (LabelEncoder object, optional, default=None): Object that encodes the labels to provide an easier separation problem. If None, a one-hot encoding is used.
dampening (float in ]0,1] ): Dampening factor to weight the weak predictors. Serves to slow the convergence of the algorithm so it can boost longer.
"""
super().__init__(confidence_rated_weak_learner, encoder)
self.dampening = dampening
def algorithm(self, *args, **kwargs):
return QuadBoostMHCRAlgorithm(*args, dampening=self.dampening, **kwargs)
class _QuadBoostAlgorithm:
"""
This is an implementation of the QuadBoost algorithm. It is intended to be used inside the QuadBoost class API and not as is.
"""
def __init__(self, boost_manager, encoder, weak_learner,
X, Y, residue, weights, | |
#!/usr/bin/env python
"""Stitch together the same HiRISE color-filter CCDs from an observation to
create a single mosaicked image file.
This program:
- Optionally performs cubenorm processing on each CCD using a training area
that has been selected by the user. Cubenorm processing
is required whenever radiometric problems, such as vertical
striping, remain after the radiometric calibration step in the
HiCal Pipeline. The cubenorm processing is described in the
ISIS program cubenorm.
- Balance the CCD products, created by HiStitch, to radiometrically match.
A multiplicative constant is applied to each CCD to force the
overlapping areas of adjacent CCDs to be identical. The resulting
balanced CCD products, have the naming convention ``*.balance.cub``.
These products are used by subsequent pipeline processing for
creating color products, RDR products, and DTM products.
- Join (stitch together) the CCD products to form an image of the entire
observation.
HiccdStitch is the fourth step in the HiRISE processing chain, after
HiStitch. If an observation has RED, IR and BG CCDs, then this program would
need to be run once for each set.
Data Flow
---------
Input Products:
- ``.cub`` files of the same color (RED, IR, or BG) which are the result of
HiStitch.
- ``.json`` files that start with the CCD ID of each source .cub file.
Output Products:
- A stitched ``.cub`` file for that color.
- A ``.balance.cub`` file for each input cube file.
- A ``.json`` file with summary information about the stitched image.
"""
# Copyright 2006-2020, Arizona Board of Regents on behalf of the Lunar and
# Planetary Laboratory at the University of Arizona.
# - Orignal Perl program.
#
# Copyright 2020, <NAME> (<EMAIL>)
# - Elements of this Python program are are based on the original Perl
# but the logic here is rewritten from scratch to emulate functionality.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This program is based on HiccdStitch version 2.4.8 (2020/11/03),
# and on the Perl HiccdStitch program ($Revision: 1.45 $
# $Date: 2020/11/03 23:44:15 $)
# by <NAME> as an employee of the University of Arizona.
import argparse
import csv
import itertools
import json
import logging
import operator
import os
import pkg_resources
import pvl
import statistics
import subprocess
import sys
from pathlib import Path
import kalasiris as isis
import hiproc.hirise as hirise
import hiproc.util as util
logger = logging.getLogger(__name__)
class HiccdStitchCube(hirise.CCDID):
"""A class for HiRISE CCD IDs with additional capabilities for
HiccdStitch."""
def __init__(self, pathlike, cubenormstep=False):
self.path = Path(pathlike)
self.nextpath = self.path
super().__init__(hirise.get_CCDID_fromfile(self.path))
self.ns = int(isis.getkey_k(self.path, "Dimensions", "Samples"))
self.nl = int(isis.getkey_k(self.path, "Dimensions", "Lines"))
self.bin = int(isis.getkey_k(self.path, "Instrument", "Summing"))
self.cubenormstep = cubenormstep
self.cubenorm_stddev = None
self.sl_cubenorm = None
self.nl_cubenorm = None
self.ss_balance_left = None
self.ns_balance_left = None
self.ss_balance_right = None
self.ns_balance_right = None
self.sl_balance = None
self.nl_balance = None
self.smag = None
self.lmag = None
self.ls_path = None
self.rs_path = None
self.lm_path = None
self.rm_path = None
self.rstats = None
self.lstats = None
self.correction = None
self.hical_status = None
self.snr_list = list()
def __repr__(self):
return f"{self.__class__.__name__}('{self.path}')"
def set_cubenorm_lines(self, skiptop, skipbot, sline, eline, minbin):
# For the cubenorm process, determine starting line and number of lines
start_line = skiptop / self.bin + 1
end_line = self.nl - skipbot / self.bin
if end_line < start_line:
start_line = 1
end_line = self.nl
if sline is not None and sline > 0:
start_line = int(sline * (minbin / self.bin))
if start_line > self.nl:
raise ValueError(
f"The effective starting line ({sline}) of "
"the training area is outside the range of "
f"the image ({self.path})"
)
if eline is not None and eline > 0:
end_line = int(eline * (minbin / self.bin))
if end_line > self.nl:
raise ValueError(
f"The effective ending line ({eline}) of "
"the training area is outside the range of "
f"the image ({self.path})"
)
if end_line < start_line:
raise RuntimeError(
"Something wrong with the calculated training "
"area: end_line < start_line for cubenorm "
"trainging area. The calculated start, end "
f"lines are: {start_line}, {end_line}."
)
self.sl_cubenorm = int(start_line)
self.nl_cubenorm = int(end_line - start_line + 1)
return (self.sl_cubenorm, self.nl_cubenorm)
def set_balance(
self, skiptop: int, skipbot: int, area: dict, minbinlines: int
):
# For balance cube process, determine the crop area for left
# and right crop areas and the scaling needed for each crop are.
(skip, samps) = area[self.bin]
self.ss_balance_left = int(skip + 1)
self.ns_balance_left = int(samps)
self.ss_balance_right = int(self.ns + 1 - skip - samps)
self.ns_balance_right = int(samps)
sl_bal = skiptop / self.bin + 1
nl_bal = minbinlines / self.bin - skipbot / self.bin - sl_bal + 1
if nl_bal < 1:
sl_bal = 1
nl_bal = minbinlines / self.bin
self.sl_balance = int(sl_bal)
self.nl_balance = int(nl_bal)
return
def set_ls_path(self, p: Path):
self.ls_path = p
self.lm_path = p
def set_rs_path(self, p: Path):
self.rs_path = p
self.rm_path = p
def gather_from_db(self, dbs: list = None):
"""There is some data that we need to pull from the Channel DB files,
which this method finds and extracts."""
bad_flag = False
if dbs is None:
dbs = list()
# Scan the parent directory of cube for any .json DB files that
# match the CCD name
for p in self.path.parent.glob(str(self) + "*.json"):
with open(p, "r") as f:
dbs.append(json.load(f))
for db in dbs:
if "hical_status" in db:
if "BadCal" == db["hical_status"]:
bad_flag = True
else:
self.hical_status = db["hical_status"]
if "IMAGE_SIGNAL_TO_NOISE_RATIO" in db:
self.snr_list.append(float(db["IMAGE_SIGNAL_TO_NOISE_RATIO"]))
if bad_flag:
self.hical_status = "BadCal"
return
def arg_parser():
parser = argparse.ArgumentParser(
description=__doc__,
parents=[util.parent_parser()],
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"-o", "--output",
required=False,
default=".HiccdStitch.cub",
help="The name of the output .cub file to write. Optionally, if "
"it starts with a '.' it is considered a suffix"
"and will be added to the Observation ID of the "
"input files. Default: %(default)s",
)
parser.add_argument(
"-c",
"--conf",
required=False,
type=argparse.FileType('r'),
default=pkg_resources.resource_stream(
__name__,
'data/HiccdStitch.conf',
),
help="Path to the HiccdStitch config file. Defaults to "
"HiccdStitch.conf distributed with the library."
)
parser.add_argument(
"--db",
required=False,
default=".HiCat.json",
help="The .json file to output. Optionally, if it "
"starts with a '.' it is considered an extension "
"and will be swapped with the output file's extension "
"to determine the .json filename to use. Default: %(default)s",
)
parser.add_argument(
"--sline",
required=False,
default=None,
type=int,
help="If given, will be used as the starting line to crop the image "
"at in order to create a training area to use for cubenorm "
"processing."
)
parser.add_argument(
"--eline",
required=False,
default=None,
type=int,
help="If given, will be used as the ending line for cubenorm cropping, "
"see --sline for more information."
)
parser.add_argument(
"--cubenorm",
required=False,
nargs="+",
type=bool,
help="To engage cubenorm processing a list of true or false values "
"(could be 0 or 1) that must match the number of input cubes"
"must be given to indicate which of the input cubes should "
"have cubenorm processing applied. If you only had four input"
"cubes, then ``--cubenorm 0 0 1 1`` would not run cubenorm "
"processing on the first two cubes, but would run it on the last"
"two, etc. The default is not to run this processing on any."
)
parser.add_argument(
"cubes",
metavar="cub-file",
nargs="+",
help="Cubes to assemble, which are presumably the output of HiStitch. "
"They must all be from the same detectors, so either all RED, "
"all IR, or all BG cubes."
)
return parser
def main():
# The Original Perl took a .pvl file as input which mostly just had the
# filenames of the ccd files to stitch together. We'll just take those
# on the command line and into args.cubes.
args = arg_parser().parse_args()
util.set_logger(args.verbose, args.logfile, args.log)
# outcub_path = set_outcube(args.output, pid0)
if args.cubenorm is not None:
if len(args.cubenorm) != len(args.cubes):
logger.critical(
f"The number of cubes ({len(args.cubes)}) and "
"the number of cubenorm flags given "
f"({len(args.cubenorm)}) did | |
yang_name="messages-sent", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)
self.__messages_received = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="messages-received", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)
self.__errors_received = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="errors-received", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['system', 'aaa', 'server-groups', 'server-group', 'servers', 'server', 'state']
def _get_name(self):
"""
Getter method for name, mapped from YANG variable /system/aaa/server_groups/server_group/servers/server/state/name (string)
YANG Description: Name assigned to the server
"""
return self.__name
def _set_name(self, v, load=False):
"""
Setter method for name, mapped from YANG variable /system/aaa/server_groups/server_group/servers/server/state/name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_name() directly.
YANG Description: Name assigned to the server
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='string', is_config=False)""",
})
self.__name = t
if hasattr(self, '_set'):
self._set()
def _unset_name(self):
self.__name = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='string', is_config=False)
def _get_address(self):
"""
Getter method for address, mapped from YANG variable /system/aaa/server_groups/server_group/servers/server/state/address (oc-inet:ip-address)
YANG Description: Address of the authentication server
"""
return self.__address
def _set_address(self, v, load=False):
"""
Setter method for address, mapped from YANG variable /system/aaa/server_groups/server_group/servers/server/state/address (oc-inet:ip-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_address() directly.
YANG Description: Address of the authentication server
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))$'}),], is_leaf=True, yang_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-inet:ip-address', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """address must be of a type compatible with oc-inet:ip-address""",
'defined-type': "oc-inet:ip-address",
'generated-type': """YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))$'}),], is_leaf=True, yang_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-inet:ip-address', is_config=False)""",
})
self.__address = t
if hasattr(self, '_set'):
self._set()
def _unset_address(self):
self.__address = YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))$'}),], is_leaf=True, yang_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-inet:ip-address', is_config=False)
def _get_timeout(self):
"""
Getter method for timeout, mapped from YANG variable /system/aaa/server_groups/server_group/servers/server/state/timeout (uint16)
YANG Description: Set the timeout in seconds on responses from the AAA
server
"""
return self.__timeout
def _set_timeout(self, v, load=False):
"""
Setter method for timeout, mapped from YANG variable /system/aaa/server_groups/server_group/servers/server/state/timeout (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_timeout is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_timeout() directly.
YANG Description: Set the timeout in seconds on responses from the AAA
server
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="timeout", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """timeout must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="timeout", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='uint16', is_config=False)""",
})
self.__timeout = t
if hasattr(self, '_set'):
self._set()
def _unset_timeout(self):
self.__timeout = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="timeout", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='uint16', is_config=False)
def _get_connection_opens(self):
"""
Getter method for connection_opens, mapped from YANG variable /system/aaa/server_groups/server_group/servers/server/state/connection_opens (oc-yang:counter64)
YANG Description: Number of new connection requests sent to the server, e.g.
socket open
"""
return self.__connection_opens
def _set_connection_opens(self, v, load=False):
"""
Setter method for connection_opens, mapped from YANG variable /system/aaa/server_groups/server_group/servers/server/state/connection_opens (oc-yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_connection_opens is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_connection_opens() directly.
YANG Description: Number of new connection requests sent to the server, e.g.
socket open
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="connection-opens", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """connection_opens must be of a type compatible with oc-yang:counter64""",
'defined-type': "oc-yang:counter64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="connection-opens", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)""",
})
self.__connection_opens = t
if hasattr(self, '_set'):
self._set()
def _unset_connection_opens(self):
self.__connection_opens = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="connection-opens", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)
def _get_connection_closes(self):
"""
Getter method for connection_closes, mapped from YANG variable /system/aaa/server_groups/server_group/servers/server/state/connection_closes (oc-yang:counter64)
YANG Description: Number of connection close requests sent to the server, e.g.
socket close
"""
return self.__connection_closes
def _set_connection_closes(self, v, load=False):
"""
Setter method for connection_closes, mapped from YANG variable /system/aaa/server_groups/server_group/servers/server/state/connection_closes (oc-yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_connection_closes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_connection_closes() directly.
YANG Description: Number of connection close requests sent to the server, e.g.
socket close
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="connection-closes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """connection_closes must be of a type compatible with oc-yang:counter64""",
'defined-type': "oc-yang:counter64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="connection-closes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)""",
})
self.__connection_closes = t
if hasattr(self, '_set'):
self._set()
def _unset_connection_closes(self):
self.__connection_closes = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="connection-closes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)
def _get_connection_aborts(self):
"""
Getter method for connection_aborts, mapped from YANG variable /system/aaa/server_groups/server_group/servers/server/state/connection_aborts (oc-yang:counter64)
YANG Description: Number of aborted connections to the server. These do
not include connections that are close gracefully.
"""
return self.__connection_aborts
def _set_connection_aborts(self, v, load=False):
"""
Setter method for connection_aborts, mapped from YANG variable /system/aaa/server_groups/server_group/servers/server/state/connection_aborts (oc-yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_connection_aborts is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_connection_aborts() directly.
YANG Description: Number of aborted connections to the server. These do
not include connections that are close gracefully.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="connection-aborts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """connection_aborts must be of a type compatible with oc-yang:counter64""",
'defined-type': "oc-yang:counter64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="connection-aborts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)""",
})
self.__connection_aborts = t
if hasattr(self, '_set'):
self._set()
def _unset_connection_aborts(self):
self.__connection_aborts = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="connection-aborts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)
def _get_connection_failures(self):
"""
Getter method for connection_failures, mapped from YANG variable /system/aaa/server_groups/server_group/servers/server/state/connection_failures (oc-yang:counter64)
YANG Description: Number of connection failures to the server
"""
return self.__connection_failures
def _set_connection_failures(self, v, load=False):
"""
Setter method for connection_failures, mapped from YANG variable /system/aaa/server_groups/server_group/servers/server/state/connection_failures (oc-yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_connection_failures is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_connection_failures() directly.
YANG Description: Number of connection failures to the server
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="connection-failures", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """connection_failures must be of a type compatible with oc-yang:counter64""",
'defined-type': "oc-yang:counter64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="connection-failures", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', | |
< 0))'.format(self._print(func.args[0]))
def _print_Max(self, expr):
if "Max" in self.known_functions:
return self._print_Function(expr)
def inner_print_max(args): # The more natural abstraction of creating
if len(args) == 1: # and printing smaller Max objects is slow
return self._print(args[0]) # when there are many arguments.
half = len(args) // 2
return "((%(a)s > %(b)s) ? %(a)s : %(b)s)" % {
'a': inner_print_max(args[:half]),
'b': inner_print_max(args[half:])
}
return inner_print_max(expr.args)
def _print_Min(self, expr):
if "Min" in self.known_functions:
return self._print_Function(expr)
def inner_print_min(args): # The more natural abstraction of creating
if len(args) == 1: # and printing smaller Min objects is slow
return self._print(args[0]) # when there are many arguments.
half = len(args) // 2
return "((%(a)s < %(b)s) ? %(a)s : %(b)s)" % {
'a': inner_print_min(args[:half]),
'b': inner_print_min(args[half:])
}
return inner_print_min(expr.args)
def indent_code(self, code):
"""Accepts a string of code or a list of code lines"""
if isinstance(code, string_types):
code_lines = self.indent_code(code.splitlines(True))
return ''.join(code_lines)
tab = " "
inc_token = ('{', '(', '{\n', '(\n')
dec_token = ('}', ')')
code = [line.lstrip(' \t') for line in code]
increase = [int(any(map(line.endswith, inc_token))) for line in code]
decrease = [int(any(map(line.startswith, dec_token))) for line in code]
pretty = []
level = 0
for n, line in enumerate(code):
if line == '' or line == '\n':
pretty.append(line)
continue
level -= decrease[n]
pretty.append("%s%s" % (tab*level, line))
level += increase[n]
return pretty
def _get_func_suffix(self, type_):
return self.type_func_suffixes[self.type_aliases.get(type_, type_)]
def _get_literal_suffix(self, type_):
return self.type_literal_suffixes[self.type_aliases.get(type_, type_)]
def _get_math_macro_suffix(self, type_):
alias = self.type_aliases.get(type_, type_)
dflt = self.type_math_macro_suffixes.get(alias, '')
return self.type_math_macro_suffixes.get(type_, dflt)
def _print_Type(self, type_):
self.headers.update(self.type_headers.get(type_, set()))
self.macros.update(self.type_macros.get(type_, set()))
return self._print(self.type_mappings.get(type_, type_.name))
def _print_Declaration(self, decl):
from sympy.codegen.cnodes import restrict
var = decl.variable
val = var.value
if var.type == untyped:
raise ValueError("C does not support untyped variables")
if isinstance(var, Pointer):
result = '{vc}{t} *{pc} {r}{s}'.format(
vc='const ' if value_const in var.attrs else '',
t=self._print(var.type),
pc=' const' if pointer_const in var.attrs else '',
r='restrict ' if restrict in var.attrs else '',
s=self._print(var.symbol)
)
elif isinstance(var, Variable):
result = '{vc}{t} {s}'.format(
vc='const ' if value_const in var.attrs else '',
t=self._print(var.type),
s=self._print(var.symbol)
)
else:
raise NotImplementedError("Unknown type of var: %s" % type(var))
if val != None: # Must be "!= None", cannot be "is not None"
result += ' = %s' % self._print(val)
return result
def _print_Float(self, flt):
type_ = self.type_aliases.get(real, real)
self.macros.update(self.type_macros.get(type_, set()))
suffix = self._get_literal_suffix(type_)
num = str(flt.evalf(type_.decimal_dig))
if 'e' not in num and '.' not in num:
num += '.0'
num_parts = num.split('e')
num_parts[0] = num_parts[0].rstrip('0')
if num_parts[0].endswith('.'):
num_parts[0] += '0'
return 'e'.join(num_parts) + suffix
@requires(headers={'stdbool.h'})
def _print_BooleanTrue(self, expr):
return 'true'
@requires(headers={'stdbool.h'})
def _print_BooleanFalse(self, expr):
return 'false'
def _print_Element(self, elem):
if elem.strides == None: # Must be "== None", cannot be "is None"
if elem.offset != None: # Must be "!= None", cannot be "is not None"
raise ValueError("Expected strides when offset is given")
idxs = ']['.join(map(lambda arg: self._print(arg),
elem.indices))
else:
global_idx = sum([i*s for i, s in zip(elem.indices, elem.strides)])
if elem.offset != None: # Must be "!= None", cannot be "is not None"
global_idx += elem.offset
idxs = self._print(global_idx)
return "{symb}[{idxs}]".format(
symb=self._print(elem.symbol),
idxs=idxs
)
def _print_CodeBlock(self, expr):
""" Elements of code blocks printed as statements. """
return '\n'.join([self._get_statement(self._print(i)) for i in expr.args])
def _print_While(self, expr):
return 'while ({condition}) {{\n{body}\n}}'.format(**expr.kwargs(
apply=lambda arg: self._print(arg)))
def _print_Scope(self, expr):
return '{\n%s\n}' % self._print_CodeBlock(expr.body)
@requires(headers={'stdio.h'})
def _print_Print(self, expr):
return 'printf({fmt}, {pargs})'.format(
fmt=self._print(expr.format_string),
pargs=', '.join(map(lambda arg: self._print(arg), expr.print_args))
)
def _print_FunctionPrototype(self, expr):
pars = ', '.join(map(lambda arg: self._print(Declaration(arg)),
expr.parameters))
return "%s %s(%s)" % (
tuple(map(lambda arg: self._print(arg),
(expr.return_type, expr.name))) + (pars,)
)
def _print_FunctionDefinition(self, expr):
return "%s%s" % (self._print_FunctionPrototype(expr),
self._print_Scope(expr))
def _print_Return(self, expr):
arg, = expr.args
return 'return %s' % self._print(arg)
def _print_CommaOperator(self, expr):
return '(%s)' % ', '.join(map(lambda arg: self._print(arg), expr.args))
def _print_Label(self, expr):
return '%s:' % str(expr)
def _print_goto(self, expr):
return 'goto %s' % expr.label
def _print_PreIncrement(self, expr):
arg, = expr.args
return '++(%s)' % self._print(arg)
def _print_PostIncrement(self, expr):
arg, = expr.args
return '(%s)++' % self._print(arg)
def _print_PreDecrement(self, expr):
arg, = expr.args
return '--(%s)' % self._print(arg)
def _print_PostDecrement(self, expr):
arg, = expr.args
return '(%s)--' % self._print(arg)
def _print_struct(self, expr):
return "%(keyword)s %(name)s {\n%(lines)s}" % dict(
keyword=expr.__class__.__name__, name=expr.name, lines=';\n'.join(
[self._print(decl) for decl in expr.declarations] + [''])
)
def _print_BreakToken(self, _):
return 'break'
def _print_ContinueToken(self, _):
return 'continue'
_print_union = _print_struct
class _C9XCodePrinter(object):
# Move these methods to C99CodePrinter when removing CCodePrinter
def _get_loop_opening_ending(self, indices):
open_lines = []
close_lines = []
loopstart = "for (int %(var)s=%(start)s; %(var)s<%(end)s; %(var)s++){" # C99
for i in indices:
# C arrays start at 0 and end at dimension-1
open_lines.append(loopstart % {
'var': self._print(i.label),
'start': self._print(i.lower),
'end': self._print(i.upper + 1)})
close_lines.append("}")
return open_lines, close_lines
@deprecated(
last_supported_version='1.0',
useinstead="C89CodePrinter or C99CodePrinter, e.g. ccode(..., standard='C99')",
issue=12220,
deprecated_since_version='1.1')
class CCodePrinter(_C9XCodePrinter, C89CodePrinter):
"""
Deprecated.
Alias for C89CodePrinter, for backwards compatibility.
"""
_kf = _known_functions_C9X # known_functions-dict to copy
class C99CodePrinter(_C9XCodePrinter, C89CodePrinter):
standard = 'C99'
reserved_words = set(reserved_words + reserved_words_c99)
type_mappings=dict(chain(C89CodePrinter.type_mappings.items(), {
complex64: 'float complex',
complex128: 'double complex',
}.items()))
type_headers = dict(chain(C89CodePrinter.type_headers.items(), {
complex64: {'complex.h'},
complex128: {'complex.h'}
}.items()))
_kf = known_functions_C99 # known_functions-dict to copy
# functions with versions with 'f' and 'l' suffixes:
_prec_funcs = ('fabs fmod remainder remquo fma fmax fmin fdim nan exp exp2'
' expm1 log log10 log2 log1p pow sqrt cbrt hypot sin cos tan'
' asin acos atan atan2 sinh cosh tanh asinh acosh atanh erf'
' erfc tgamma lgamma ceil floor trunc round nearbyint rint'
' frexp ldexp modf scalbn ilogb logb nextafter copysign').split()
def _print_Infinity(self, expr):
return 'INFINITY'
def _print_NegativeInfinity(self, expr):
return '-INFINITY'
def _print_NaN(self, expr):
return 'NAN'
# tgamma was already covered by 'known_functions' dict
@requires(headers={'math.h'}, libraries={'m'})
@_as_macro_if_defined
def _print_math_func(self, expr, nest=False, known=None):
if known is None:
known = self.known_functions[expr.__class__.__name__]
if not isinstance(known, string_types):
for cb, name in known:
if cb(*expr.args):
known = name
break
else:
raise ValueError("No matching printer")
try:
return known(self, *expr.args)
except TypeError:
suffix = self._get_func_suffix(real) if self._ns + known in self._prec_funcs else ''
if nest:
args = self._print(expr.args[0])
if len(expr.args) > 1:
paren_pile = ''
for curr_arg in expr.args[1:-1]:
paren_pile += ')'
args += ', {ns}{name}{suffix}({next}'.format(
ns=self._ns,
name=known,
suffix=suffix,
next = self._print(curr_arg)
)
args += ', %s%s' % (
self._print(expr.func(expr.args[-1])),
paren_pile
)
else:
args = ', '.join(map(lambda arg: self._print(arg), expr.args))
return '{ns}{name}{suffix}({args})'.format(
ns=self._ns,
name=known,
suffix=suffix,
args=args
)
def _print_Max(self, expr):
return self._print_math_func(expr, nest=True)
def _print_Min(self, expr):
return self._print_math_func(expr, nest=True)
for k in ('Abs Sqrt exp exp2 expm1 log log10 log2 log1p Cbrt hypot fma'
' loggamma sin cos tan asin acos atan atan2 sinh cosh tanh asinh acosh '
'atanh erf erfc loggamma gamma ceiling floor').split():
setattr(C99CodePrinter, '_print_%s' % k, C99CodePrinter._print_math_func)
class C11CodePrinter(C99CodePrinter):
@requires(headers={'stdalign.h'})
def _print_alignof(self, expr):
arg, = expr.args
return 'alignof(%s)' % self._print(arg)
c_code_printers = {
'c89': C89CodePrinter,
'c99': C99CodePrinter,
'c11': C11CodePrinter
}
def ccode(expr, assign_to=None, standard='c99', **settings):
"""Converts an expr to a string of c code
Parameters
==========
expr : Expr
A sympy expression to be converted.
assign_to : optional
When given, the argument is used as the name of the variable to which
the expression is assigned. Can be a string, ``Symbol``,
``MatrixSymbol``, or ``Indexed`` type. This is helpful in case of
line-wrapping, or for expressions that generate multi-line statements.
standard : str, optional
String specifying the standard. If your compiler supports a more modern
standard you may set this to 'c99' to allow the printer to use more math
functions. [default='c89'].
precision : integer, optional
The precision for numbers such as pi [default=17].
user_functions : dict, optional
A dictionary where the keys are string representations of either
``FunctionClass`` or ``UndefinedFunction`` instances and the values
are their desired C string representations. Alternatively, the
dictionary value can be a list of tuples i.e. [(argument_test,
cfunction_string)] or [(argument_test, cfunction_formater)]. See below
for examples.
dereference : iterable, optional
An iterable of symbols that should be dereferenced in the printed code
expression. These would be values passed by address to the function.
For example, if ``dereference=[a]``, the resulting code would print
``(*a)`` instead of ``a``.
human : bool, optional
If True, the result is a single | |
{}
try:
appList = self.appLauncher.getAppList()
self.hashCallbackFunction[ 40000 ]( appList )
except socket.error:
self.hashCallbackFunction[ 40000 ]( appList ) #return appList #the server is not running
except:
tb.print_exc()
return False
def isConnected(self):
return self.connected
def disconnectFromSage(self, isSocketError=False):
""" isSocketError should be True when we didn't close
the connection intentionally but rather the connection
broke for some reason. In that case the onDisconnect
callback is called.
"""
if self.connected == False: return 0
self.threadkilled = True
self.overlaySenderKilled = True
self.connected = False
#self.t.join()
self.sock.close()
del self.sock
print 'disconnected from SAGE',self.sageHost,self.sagePort
if isSocketError and self.onDisconnect:
self.onDisconnect()
return 1
def sendmsg(self, data, code, sailId=''):
if not self.connected:
return 0
self.senderLock.acquire()
msg = self.makemsg(sailId,code,'',len(data),data)
totalcount = 0
try:
totalcount = self.sock.send(msg)
except socket.error:
print 'SageGateBase: socket error on send'
totalcount = 0
self.disconnectFromSage( isSocketError=True )
except Exception:
tb.print_exc()
totalcount = 0
self.senderLock.release()
return totalcount
##################################################################
# Register
##################################################################
# 1000 none
# 40004 display info format
##################################################################
def registerSage(self):
if not self.connected:
return 0
return self.sendmsg('', 1000)
##################################################################
# Execute
##################################################################
# 1001 app-name 100 100 (app-name(?))
# 40001 app-inst-ID left right top bottom sail-ID
##################################################################
def executeApp(self, appName, configName="default", pos=False, size=False, optionalArgs="", useBridge=False, sageIP=None, sagePort=None):
if self.connected == False: return 0
if not appName: return 0
if not sageIP: sageIP = self.sageHost
if not sagePort: sagePort = self.sagePort+1
# try running the app (return -1 if failed for whatever reason)
try:
res = self.appLauncher.startDefaultApp(appName, sageIP, sagePort, useBridge, configName, pos, size, optionalArgs)
except socket.error:
return -1
else:
return res
def executeRemoteApp(self, launcherId, appName, configName="default", pos=False, size=False, optionalArgs="", useBridge=False, sageIP=None, sagePort=None):
if self.connected == False: return 0
if not appName: return 0
if not sageIP: sageIP = self.sageHost
if not sagePort: sagePort = self.sagePort+1
# try running the app (return -1 if failed for whatever reason)
if launcherId in self.launchers:
server = self.launchers[launcherId].getServerHandle()
try:
res = server.startDefaultApp(appName, sageIP, sagePort, useBridge, configName, pos, size, optionalArgs)
except socket.error:
return -1
else:
return res
else:
print "Launcher not found: ", launcherId
return -1
##################################################################
# Shutdown
##################################################################
# 1002 app-instance
# 40003 app-Inst-ID (?)
##################################################################
def shutdownApp(self,appId):
if self.connected == False: return 0
data = str(appId)
return self.sendmsg(data, 1002)
##################################################################
# Forceful Shutdown
##################################################################
def forceShutdownApp(self, portNum):
if self.connected == False: return 0
# the portNum is basically the appId in the appLauncher context
return self.appLauncher.stopApp(portNum)
##################################################################
# Move
##################################################################
# 1003 app-instance dist-X,dist-Y
##################################################################
def moveWindow(self,appId,distX,distY):
if self.connected == False: return 0
#make sure all the coordinates are ints
distX = int(distX)
distY = int(distY)
data = str(appId) + BLANK + str(distX) + BLANK + str(distY)
return self.sendmsg(data, 1003)
##############################################################
# Resize
# 1004 app-instance lef,right,top,bottom
##################################################################
def resizeWindow(self,appId,left=0,right=0,bottom=0,top=0):
if self.connected == False: return 0
#if not appId: return 0
#make sure all the coordinates are ints
left = int(left)
right = int(right)
bottom = int(bottom)
top = int(top)
data = str(appId) + BLANK + str(left) + BLANK + str(right) + BLANK + str(bottom) + BLANK + str(top)
return self.sendmsg(data, 1004)
###########################################################
# Performance Information
###########################################################
# 1005 app-instance sending-rate
# 1006 app-instance
###########################################################
def startPerformance(self,appId,sendingrate=2):
if self.connected == False: return 0
data = "%d %d" % (appId, sendingrate )
return self.sendmsg(data, 1005)
def stopPerformance(self,appId):
if self.connected == False: return 0
data = str(appId) # convert the data to string format
return self.sendmsg(data, 1006)
####################################
# Background Color
# 1007 red,green blue
##################################################################
def setBgColor(self,(red,green,blue)=(1,1,1)):
if self.connected == False: return 0
data = str(red) + BLANK + str(green) + BLANK + str(blue)
return self.sendmsg(data, 1007)
####################################
# bring the application window to the top (front)
# 1010 app-inst-ID
##################################################################
def bringToFront(self, appId):
if self.connected == False: return 0
data = str(appId)
return self.sendmsg(data, 1010)
####################################
# Change App Properties
# 1011 appId, fsmIP, fsmPort, appConfigNum
##################################################################
def changeAppProperties(self,appId, newTitle, newTitleColor=(-1,-1,-1), newBorderColor=(-1,-1,-1)):
if self.connected == False: return 0
data = str(appId) + BLANK + str(newTitle)
data = data + BLANK + str(newTitleColor[0]) + BLANK + str(newTitleColor[1]) + BLANK + str(newTitleColor[2])
data = data + BLANK + str(newBorderColor[0]) + BLANK + str(newBorderColor[1]) + BLANK + str(newBorderColor[2])
return self.sendmsg(data, 1011)
####################################
# Change App Frame Rate
# 1010 appId, fsmIP, fsmPort, appConfigNum
##################################################################
def changeAppFrameRate(self,appId, newFrameRate):
if self.connected == False: return 0
data = str(appId) + BLANK + str(newFrameRate)
return self.sendmsg(data, 1012)
####################################
# Stream Request
# 1014 appId, fsmIP, fsmPort
##################################################################
def streamApp(self,appId, fsmIP, fsmPort):
if self.connected == False: return 0
data = str(appId) + BLANK + str(fsmIP) + BLANK + str(fsmPort)
return self.sendmsg(data, 1014)
####################################
# Rotate Window
# 1018 appId, degree
##################################################################
def rotateWindow(self, appId, degree):
if self.connected == False: return 0
data = str(appId) + BLANK + str(degree)
return self.sendmsg(data, 1018)
####################################
# Overlay Messages
# 1200 - 1205
##################################################################
def addOverlay(self, overlayType, x, y, w, h, isGlobal, drawOrder, displayId=0):
data = '%s %s %s %s %s %s %s %s' % (overlayType, x, y, w, h, int(isGlobal), drawOrder, displayId)
return self.sendmsg(data, 1200)
def moveOverlay(self, id, dx, dy):
""" relative movement """
data = '%s %s %s' % (id, dx, dy)
return self.sendmsg(data, 1201)
def showOverlay(self, id, doShow):
data = '%s %s' % (id, str(int(doShow)))
return self.sendmsg(data, 1204)
def sendOverlayMessage(self, id, *data):
""" this actually puts the messages in a queue
which are then sent at fixed intervals """
# first assemble the data into a string
msg = '%s' % (id)
for d in data:
msg += " "+str(d)
self.overlayMsgLock.acquire()
self.overlayMsgQueue.append(msg)
self.overlayMsgLock.release()
def __sendMultipleOverlayMessages(self, msg): # a bunch of messages combined into one
return self.sendmsg(msg, 1203)
def removeOverlay(self, id):
data = '%s' % (id)
return self.sendmsg(data, 1202)
####################################
# SAGE App events
# 31000 - 31007
##################################################################
def sendAppEvent(self, eventId, sailId, *data):
# first assemble the data into a string
msg = ''
for d in data:
msg += " "+str(d)
self.sendmsg(msg, 31000+eventId, sailId)
####################################
# SAGE shutdown
# 1100 <none>
##################################################################
def shutdownSAGE(self):
if self.connected == False: return 0
return self.sendmsg('', 1100)
##############
# Overlay Sender Thread
# - Sends combined overlay messages at fixed intervals
##################################################################
def overlaySenderThread(self):
while not self.overlaySenderKilled and doRun():
self.overlayMsgLock.acquire()
# iterate through the msg queue and assemble the messages into a string
msg = ""
for m in self.overlayMsgQueue:
msg += m + "\n" # separate messages with \n
self.overlayMsgQueue = [] # clear the queue
self.overlayMsgLock.release()
# send the message if there is something to send
msg = msg.strip()
if msg != "":
self.__sendMultipleOverlayMessages(msg)
# sleep for a certain time
time.sleep(1.0/self.overlayMsgFreq)
print "Overlay message sender thread closed"
##############
# Receiver Thread
# - receives messages from SAGE in a thread
##################################################################
def receiverThread(self):
while self.threadkilled == False and doRun(): #doesn't work as expected without the sock.settimeout (look below)
#############################
try:
code = ""
incomingMsg = ""
msgSize = ""
# first make sure you read the whole 8 bytes for the size
while len(msgSize) < HEADER_ITEM_LEN:
msgSize = self.sock.recv(HEADER_ITEM_LEN)
if len( msgSize ) == 0:
self.threadkilled = True
self.overlaySenderKilled = True
self.disconnectFromSage( isSocketError=True )
break
if self.threadkilled: break
# this is the number of bytes that the total message contains
msgSize = msgSize.replace('\x00', '')
sizeLeft = int(msgSize) - HEADER_ITEM_LEN # we already read the size so read the rest of the bytes
# read the rest of the message
while len( incomingMsg ) < sizeLeft:
incomingMsg = incomingMsg + self.sock.recv( sizeLeft - len(incomingMsg) )
# extract the tokens from the message
if len( incomingMsg ) > 0:
incomingMsg = incomingMsg.replace('\x00', ' ')
dst = incomingMsg[ 1:9 ].strip()
code = int(incomingMsg[ 10:18 ].strip())
appCode = incomingMsg[ 19:27 ].strip()
data = incomingMsg[ 28: ].strip()
# print the message out (except performance info since there are many of them)
if self.verbose and code in self.hashIncomingMessages and code != 40002:
print "\n\tRECEIVED: " + self.hashIncomingMessages[code]
lines = data.split('\n')
if len(lines) < 2:
print "\t\t [" + lines[0] + "]\n\n"
else:
for i in range(0, len(lines)):
if i == 0:
print "\t\t [" + lines[i]
elif i == len(lines)-1:
print "\t\t " + lines[i] + "]\n\n"
else:
print "\t\t " + lines[i]
except socket.timeout:
continue
except socket.error:
print 'SageGateBase: socket error on receive'
self.disconnectFromSage( isSocketError=True )
continue
#except:
# print 'exception: ', sys.exc_info()[0], sys.exc_info()[1]
# break
############################
if self.threadkilled:
break
# finally, do something with this message (ie call the subclass' message handler)
self.onMessage( code, data )
print "SageGate receiver thread closed"
def cleanBuffer( self, stBuffer ):
""" converts all non-printable characters from the | |
] )
I11 . start ( )
return
if 4 - 4: Oo0Ooo * Oo0Ooo / OoOoOO00
if 4 - 4: I1IiiI * OoOoOO00 % I11i . OoOoOO00
if 11 - 11: OOooOOo - OoOoOO00 - o0oOOo0O0Ooo * OoOoOO00 + ooOoO0o
if 62 - 62: I1IiiI * i11iIiiIii . iII111i
if 35 - 35: IiII . O0 + Oo0Ooo + OOooOOo + i1IIi
if 65 - 65: O0 * I1IiiI / I1IiiI . OoOoOO00
if 87 - 87: II111iiii * I1ii11iIi11i % Oo0Ooo * Oo0Ooo
def O0O ( kv_pair ) :
global II1iII1i
global iiI1iIiI
global I11
if 51 - 51: oO0o + OoO0O00 + iII111i + iII111i % o0oOOo0O0Ooo
lispconfig . lisp_map_resolver_command ( kv_pair )
if 29 - 29: ooOoO0o
if ( lisp . lisp_test_mr_timer == None or
lisp . lisp_test_mr_timer . is_alive ( ) == False ) :
lisp . lisp_test_mr_timer = threading . Timer ( 2 , lisp . lisp_test_mr ,
[ II1iII1i , iiI1iIiI ] )
lisp . lisp_test_mr_timer . start ( )
if 41 - 41: O0 % iII111i
if 10 - 10: iII111i . i1IIi + Ii1I
if 66 - 66: OoO0O00 % o0oOOo0O0Ooo
if 21 - 21: OoOoOO00 - OoooooooOO % i11iIiiIii
if 71 - 71: i1IIi - I11i * I1Ii111 + oO0o - OoO0O00 % I1ii11iIi11i
I11 = threading . Timer ( 0 , ii1I , [ ] )
I11 . start ( )
return
if 63 - 63: iIii1I11I1II1 + OOooOOo . OoO0O00 / I1IiiI
if 84 - 84: i1IIi
if 42 - 42: II111iiii - OoO0O00 - OoooooooOO . iII111i / OoOoOO00
if 56 - 56: i11iIiiIii - iIii1I11I1II1 . II111iiii
if 81 - 81: IiII / OoOoOO00 * IiII . O0
if 61 - 61: OoO0O00 * OOooOOo + I1Ii111 . iIii1I11I1II1 % I11i . I1Ii111
if 53 - 53: I1Ii111 * IiII / iIii1I11I1II1 / I1IiiI % I1ii11iIi11i
if 39 - 39: OoO0O00 / OoooooooOO . OoO0O00 * I1ii11iIi11i / OoOoOO00
def II111 ( kv_pair ) :
lispconfig . lisp_database_mapping_command ( kv_pair )
return
if 94 - 94: iII111i % ooOoO0o . oO0o
if 85 - 85: OOooOOo * i1IIi % I1IiiI - ooOoO0o
if 37 - 37: IiII . Oo0Ooo * Oo0Ooo * II111iiii * O0
if 83 - 83: IiII / I1Ii111
if 64 - 64: OoO0O00 % IiII . I1Ii111 % OoO0O00 + I11i * IiII
if 83 - 83: o0oOOo0O0Ooo % oO0o + I11i % i11iIiiIii + O0
if 65 - 65: iIii1I11I1II1 % oO0o + O0 / OoooooooOO
if 52 - 52: Ii1I % OOooOOo * I1IiiI % I11i + OOooOOo / iII111i
def oo000o ( kv_pair ) :
global i111I
if 95 - 95: oO0o - ooOoO0o * I11i / OoO0O00 / II111iiii + O0
if 37 - 37: I11i . I1Ii111 + OOooOOo + I11i . IiII / Ii1I
if 29 - 29: IiII . ooOoO0o - II111iiii
if 68 - 68: iIii1I11I1II1 + II111iiii / oO0o
if 91 - 91: OoOoOO00 % iIii1I11I1II1 . I1IiiI
O00ooooo00 = lisp . lisp_nat_traversal
O0ooOoOO0 = lisp . lisp_rloc_probing
if 56 - 56: o0oOOo0O0Ooo / IiII * I1IiiI . o0oOOo0O0Ooo
if 15 - 15: i11iIiiIii
if 13 - 13: I11i * II111iiii * oO0o * II111iiii % IiII / I1IiiI
if 100 - 100: IiII . Ii1I - iIii1I11I1II1 . i11iIiiIii / II111iiii
lispconfig . lisp_xtr_command ( kv_pair )
if 71 - 71: I1Ii111 * Oo0Ooo . I11i
if 49 - 49: IiII * O0 . IiII
if 19 - 19: II111iiii - IiII
if 59 - 59: o0oOOo0O0Ooo * OoO0O00 - Ii1I . OOooOOo
o0OO00oo0O = ( O00ooooo00 == False and lisp . lisp_nat_traversal and lisp . lisp_rloc_probing )
if 46 - 46: i11iIiiIii - OOooOOo * I1IiiI * I11i % I1ii11iIi11i * i1IIi
Iii1I = ( O0ooOoOO0 == False and lisp . lisp_rloc_probing )
if 40 - 40: IiII * O0
oo0OoOO0o0o = 0
if ( Iii1I ) : oo0OoOO0o0o = 1
if ( o0OO00oo0O ) : oo0OoOO0o0o = 5
if 67 - 67: OoOoOO00 - OoOoOO00 * OoO0O00 - iII111i % oO0o
if ( oo0OoOO0o0o != 0 ) :
iI = [ i111I , i111I ]
lisp . lisp_start_rloc_probe_timer ( oo0OoOO0o0o , iI )
if 47 - 47: OoooooooOO % OoOoOO00
if 63 - 63: OoO0O00 / OoOoOO00 * iIii1I11I1II1 . I1Ii111
if 85 - 85: i11iIiiIii / i11iIiiIii . OoO0O00 . O0
if 67 - 67: II111iiii / o0oOOo0O0Ooo . OOooOOo . OoooooooOO
if 19 - 19: IiII . I1ii11iIi11i / OoOoOO00
if 68 - 68: ooOoO0o / OoooooooOO * I11i / oO0o
if 88 - 88: o0oOOo0O0Ooo
if ( lisp . lisp_crypto_ephem_port == None and lisp . lisp_data_plane_security ) :
i1IiII1III = i111I . getsockname ( ) [ 1 ]
lisp . lisp_crypto_ephem_port = i1IiII1III
lisp . lprint ( "Use port {} for lisp-crypto packets" . format ( i1IiII1III ) )
iI11 = { "type" : "itr-crypto-port" , "port" : i1IiII1III }
lisp . lisp_write_to_dp_socket ( iI11 )
if 97 - 97: oO0o + Oo0Ooo * OOooOOo % Oo0Ooo
if 31 - 31: i11iIiiIii
if 12 - 12: ooOoO0o
if 86 - 86: oO0o - OoO0O00
if 63 - 63: I1IiiI / OoOoOO00 + OoooooooOO . I11i . ooOoO0o
lisp . lisp_ipc_write_xtr_parameters ( lisp . lisp_debug_logging ,
lisp . lisp_data_plane_logging )
return
if 48 - 48: i1IIi - iII111i - i11iIiiIii . I11i - iII111i * I11i
if 60 - 60: OoOoOO00 / I1ii11iIi11i + OOooOOo - iII111i
if 49 - 49: OoO0O00 - O0 / OoO0O00 * OoOoOO00 + I1Ii111
if 35 - 35: II111iiii . I1IiiI / i1IIi / I1IiiI * oO0o
if 85 - 85: II111iiii . ooOoO0o % OOooOOo % I11i
if 80 - 80: oO0o * I11i / iIii1I11I1II1 % oO0o / iIii1I11I1II1
if 42 - 42: i1IIi / i11iIiiIii . Oo0Ooo * iII111i . i11iIiiIii * O0
if 44 - 44: i1IIi . I1IiiI / i11iIiiIii + IiII
if 27 - 27: OOooOOo
def O0OO0ooO00 ( ipc ) :
oO0oOO0o , o0O , I1IIiIIi1Ii1III , oO = ipc . split ( "%" )
oO = int ( oO , 16 )
if 86 - 86: i11iIiiIii + i11iIiiIii . I1Ii111 % I1IiiI . ooOoO0o
iII1iI1IIiI = lisp . lisp_get_echo_nonce ( None , I1IIiIIi1Ii1III )
if ( iII1iI1IIiI == None ) : iII1iI1IIiI = lisp . lisp_echo_nonce ( I1IIiIIi1Ii1III )
if 69 - 69: I11i / i11iIiiIii * o0oOOo0O0Ooo / I1Ii111
if 71 - 71: o0oOOo0O0Ooo / OOooOOo % OOooOOo
if 89 - 89: OoooooooOO + i11iIiiIii / I11i + iIii1I11I1II1 % ooOoO0o
if 29 - 29: I1ii11iIi11i
if 53 - 53: i11iIiiIii . I1ii11iIi11i % Ii1I / ooOoO0o % iIii1I11I1II1
if ( o0O == "R" ) :
iII1iI1IIiI . request_nonce_rcvd = oO
iII1iI1IIiI . last_request_nonce_rcvd = lisp . lisp_get_timestamp ( )
iII1iI1IIiI . echo_nonce_sent = oO
iII1iI1IIiI . last_new_echo_nonce_sent = lisp . lisp_get_timestamp ( )
lisp . lprint ( "Start echo-nonce mode for {}, nonce 0x{}" . format ( lisp . red ( iII1iI1IIiI . rloc_str , False ) , lisp . lisp_hex_string ( oO ) ) )
if 6 - 6: Oo0Ooo - OOooOOo . iIii1I11I1II1
if 30 - 30: ooOoO0o + ooOoO0o % IiII - o0oOOo0O0Ooo - I1ii11iIi11i
if 36 - 36: I11i % OOooOOo
if ( o0O == "E" ) :
iII1iI1IIiI . echo_nonce_rcvd = oO
iII1iI1IIiI . last_echo_nonce_rcvd = lisp . lisp_get_timestamp ( )
if 72 - 72: I1IiiI / iII111i - O0 + I11i
if ( iII1iI1IIiI . request_nonce_sent == oO ) :
o0 = lisp . bold ( "echoed nonce" , False )
lisp . lprint ( "Received {} {} from {}" . format ( o0 ,
lisp . lisp_hex_string ( oO ) ,
lisp . red ( iII1iI1IIiI . rloc_str , False ) ) )
if 48 - 48: o0oOOo0O0Ooo | |
<filename>cadee/ana/alanize.py
#!/usr/bin/env python
"""
The script reads an sqlite3 database produced by CADEE
and creates an interactive web UI application for
visualizing and analysing the results (index.html).
usage: python analyse.py cadee.db
this will create your index.html
Author: {0} ({1})
This program is part of CADEE, the framework for
Computer-Aided Directed Evolution of Enzymes.
"""
from __future__ import print_function
import sys
import os
import sqlite3
import json
__author__ = "<NAME>, <NAME>"
__email__ = "<EMAIL>, <EMAIL>"
html="""
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>No title</title>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/jqueryui/1.12.0/jquery-ui.css">
<link rel="stylesheet" href="http://yui.yahooapis.com/pure/0.6.0/pure-min.css">
<script type="text/javascript" src="https://cdn.plot.ly/plotly-latest.min.js"></script>
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.1.0/jquery.js"></script>
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/jqueryui/1.12.0/jquery-ui.js"></script>
<!--[if lt IE 9]>
<script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script>
<![endif]-->
<style type="text/css">
* {
font-family: "arial";
-webkit-box-sizing: border-box;
-moz-box-sizing: border-box;
box-sizing: border-box;
border-width: 0px;
}
.buttons {
margin-left: 20px;
margin-top: 20px;
}
.sort-button {
border-radius: 5px;
background: #88aacc;
color: #fafaff;
}
#info-div {
opacity: 0.9;
background-color: #fafafa;
display: none;
max-width: 500px;
margin: 0 auto;
padding: 0.5%;
border-radius: 5px;
border: 1px solid #c0c0c0;
font-size: 12px;
z-index: 1000;
}
#overlay-div {
opacity: 0.7;
background-color: #000;
width: 100%;
height: 100%;
z-index: 500;
position: absolute;
left: 0;
top: 0;
display: none;
}
#info-header {
padding: 10px;
background-color: #fafafa;
margin-bottom: 10px;
border-radius: 5px;
border: 1px solid #d0d0d0;
}
#info-stats {
padding-left: 10px;
width: 59%;
display: inline-block;
vertical-align: top;
}
#info-actions {
padding: 0 0 20px 20px;
width: 40%;
margin-left: 1%;
display: inline-block;
vertical-align: top;
border-radius: 5px;
border: 1px solid #d0d0d0;
background-color: #f6f6f6;
}
input[type=checkbox]
{
/* 1.5z-sized Checkboxes */
-ms-transform: scale(1.3); /* IE */
-moz-transform: scale(1.3); /* FF */
-webkit-transform: scale(1.3); /* Safari and Chrome */
-o-transform: scale(1.3); /* Opera */
transform: scale(1.3);
margin: 0 10px 10px 0;
}
.actions {
text-align: left;
}
input[type="text"] {
font-family: sans-serif;
font-size: 12px;
width: 90%;
padding: 5px;
}
#output-div {
margin: 20px;
padding: 10px;
border-radius: 5px;
background-color: #f0f0f0;
}
#output-div > p {
margin: 10px 0 0 0;
padding: 10px;
background-color: #fafafa;
border-radius: 5px;
border: 1px solid #c0c0c0;
}
#top_info {
padding: 10px;
background-color: #ffaa88;
opacity: 0.5;
}
</style>
</head>
<body>
<div id="top_info">Info: Use the 'shift' key to toggle the info box, mouse click to select.</div>
<div class="buttons"><button id="sort-barrier" class="sort-button pure-button">Sort by dG#</button>
<button id="sort-name" class="sort-button pure-button">Sort by name</button>
<button id="sort-action" class="sort-button pure-button">Sort by actions</button></div>
<div id="overlay-div"></div>
<div style="overflow: scroll; width:100%;"><div id="graph"></div> </div>
<div id="output-div">CADEE command<p>Nothing yet...</p></div>
<div id="info-div">
<div id="info-header"></div><div id="info-stats"></div><div id="info-actions">
<p><b>Actions</b></p>
<form class="actions pure-form" action="javascript:void(0);">
<label for="inp_SATURATE"><input type="checkbox" name="saturate" id="inp_SATURATE" value="SATURATE"/>Saturate (20AA)</label> <br>
<label for="inp_APOLAR"><input type="checkbox" name="apolar" id="inp_APOLAR" value="APOLAR"/>Apolar (8AA)</label>
<br>
<label for="inp_POLAR"><input type="checkbox" name="polar" id="inp_POLAR" value="POLAR"/>Polar (9AA)</label>
<br>
<label for="inp_CUSTOM"><input type="checkbox" name="custom" id="inp_CUSTOM" value="CUSTOM"/>Custom</label>
<input type="text" name="CUSTOM_text" id="CUSTOM_text" disabled/>
</form>
</div>
</div>
<div id="comptime-div"></div>
"""
vis="""<script>
var plotted = false;
draw_plot();
var curX;
var curY;
var x_hovered; // index of element in focus (hovered over - plotly_hover)
var info_visible = false;
data.forEach(function(dp) {
// used to store actions
dp.cadee_actions = {
libmut: [],
libmut_custom: ""
}
});
function draw_plot() {
var xs1 = [],
xs2 = [],
ys1 = [],
ys2 = [],
names_list = []
data.forEach( function(dp) {
for (i in dp.barrier) { ys1.push(dp.barrier[i]); xs1.push(dp.name) };
for (i in dp.exotherm) { ys2.push(dp.exotherm[i]); xs2.push(dp.name) };
names_list.push(dp.name);
});
// data1 and data2 are traces for dg# and dg0, respectively
// each of them contain number_of_points_per_mutant*number_of_mutant_names points in 'y' and the same amount of names in 'x'
// (x: [name1, name1, name1, name2, name2, name2 ] if 2 names and 3 points per name )
// (y: [pt1_1, pt1_2, pt1_3, pt2_1, pt2_2, pt2_3]
// this kind of "group" plotting is enabled with 'boxmode: group' in the layout
// see: https://plot.ly/javascript/box-plots/
var data1 = {
name: 'dG#',
y: ys1,
x: xs1,
type: 'box',
hoverinfo: "none",
marker: { color: 'gray' },
// used for sorting (see sort-barrier function)
names_list: names_list
};
var data2 = {
name: 'dG0',
y: ys2,
x: xs2,
type: 'box',
hoverinfo: "none",
marker: { color: '#ccddcc' }
};
if (!plotted) {
//Format the layout
var layout = {
xaxis: {
showgrid: false,
tickangle: 60,
showticklabels: true,
},
yaxis: {
title: "Free energy (rel to WT) [kcal/mol]",
zeroline: true,
gridcolor: 'rgb(220,220,220)'
},
autosize: false,
width: 40*data.length,
margin: {
t: 30,
b: 150,
},
paper_bgcolor: 'rgb(255,255,255)',
plot_bgcolor: 'rgb(253,253,253)',
boxmode: "group",
};
Plotly.newPlot('graph', [data1, data2], layout);
plotted = true;
} else {
var graph = document.getElementById("graph");
graph.data = [data1, data2];
Plotly.redraw(graph);
};
};
function update_cadee_cmd() {
var lmuts = [];
data.forEach(function(dp) {
if (dp.name == "wt") { return };
var resid = dp.name.slice(1,-1);
dp.cadee_actions.libmut.forEach(function(lm) {
lmuts.push(resid + ":" + lm);
});
if (dp.cadee_actions.libmut_custom != "") {
lmuts.push(resid + ":'" + dp.cadee_actions.libmut_custom + "'");
};
});
var cmd_str = "Nothing yet..."
if (lmuts.length > 0) {
cmd_str = "cadee.py --libmut " + lmuts.join(" ")
}
$("#output-div > p").text(cmd_str);
};
function dock_info_div() {
var idiv = $("#info-div");
$("#overlay-div").fadeIn("fast");
idiv.fadeIn("fast");
idiv.css("opacity", "1")
};
function undock_info_div() {
$("#overlay-div").hide();
$("#info-div").css("opacity", "0.9");
};
$("#sort-barrier").click( function() {
var calcdata = document.getElementById("graph").calcdata[0]; // just the first set - dG#
//console.log(calcdata);
var cd_indexes = calcdata.map(function(cd,i) { return i; });
cd_indexes.sort( function(a,b) { return calcdata[a].med - calcdata[b].med } );
var graph_data1 = document.getElementById('graph').data[0] // just the first set - dG#
var cd_names = cd_indexes.map( function(cd_i) { return graph_data1.names_list[cd_i]; });
data.sort( function(a,b) { return cd_names.indexOf(a.name) - cd_names.indexOf(b.name) });
draw_plot(data);
});
$("#sort-name").click( function() {
data.sort( function(a,b) { return a.name.localeCompare(b.name) } );
draw_plot(data);
});
$("#sort-action").click( function() {
data.sort( function(a,b) { return (b.cadee_actions.libmut.length*5 + b.cadee_actions.libmut_custom.length) - (a.cadee_actions.libmut.length*5 + a.cadee_actions.libmut_custom.length) } );
draw_plot(data);
});
$("#overlay-div").click(function() {
undock_info_div();
});
// on change for the input textbox - set the cadee action library mut custom value and update the command
$("#CUSTOM_text").on('input propertychange paste', function() {
// console.log($(this).prop("value"));
var sel_data = data[x_hovered];
sel_data.cadee_actions.libmut_custom = $(this).prop("value");
update_cadee_cmd();
});
// on change for the libmut checkboxes - add the cadee action library mut value on the selected mutant
$("#info-actions").find(":checkbox").each(function() {
$(this).change(function(ev) {
var sel_data = data[x_hovered];
if ($(this).prop("id") == "inp_CUSTOM") {
ct = $("#CUSTOM_text");
if ($(this).is(":checked")) {
ct.prop("disabled", false);
ct.focus();
sel_data.cadee_actions.libmut_custom = ct.prop("value");
} else {
ct.prop("disabled", true);
sel_data.cadee_actions.libmut_custom = ""; // clear the value
};
} else {
j = $.inArray($(this).prop("value"), sel_data.cadee_actions.libmut);
if (j > -1) {
sel_data.cadee_actions.libmut.splice(j,1);
} else {
sel_data.cadee_actions.libmut.push($(this).prop("value"));
};
};
console.log(sel_data.cadee_actions);
update_cadee_cmd();
});
});
// fix for dumbass plotly_click (you have to be really precise to fire the event)
$("#graph").click(function(ev) {
dock_info_div();
});
$("#graph").mousemove(function(ev) {
curX = ev.pageX + 20;
curY = ev.pageY + 20;
var idiv = $("#info-div");
if ((curY < 100) || (curY > 450)) {
idiv.hide();
} else {
if (curX > $(window).width()/2) {
curX -= 540;
}
idiv.css({
"position": "absolute",
"top": curY + "px",
"left": curX + "px"
});
if (info_visible) {
if (!idiv.is(":visible")) {
idiv.show();
};
} else {
idiv.hide();
}
};
});
$(document).keydown(function(e) {
if (e.keyCode == 16) {
info_visible = true;
};
});
$(document).keyup(function(e){
if (e.keyCode == 16) {
info_visible = false;
}
});
document.getElementById("graph").on('plotly_hover', function(eventData){
console.log(eventData);
var tmp = x_hovered;
x_hovered = Math.round(eventData.xvals[0]);
if (tmp === x_hovered) { return };
var sel_data = data[x_hovered];
var cd = document.getElementById("graph").calcdata; // all three sets
var idiv = $("#info-div");
$("#info-header").text("System: " + sel_data.name );
var stats = $("#info-stats");
var actions = $("#info-actions");
var html_str = '<p><b>Stats</b></p><table class="pure-table"><thead><tr><th></th><th>Mean</th><th>St.dev.</th><th>Median</th></tr></thead><tbody>';
["dG#", "dG0"].forEach(function(n,i) {
var mean = Number(cd[i][x_hovered].mean).toFixed(2);
var std = Number(cd[i][x_hovered].sd).toFixed(2);
var med = Number(cd[i][x_hovered].med).toFixed(2);
html_str = html_str + "<tr><td>" + n + "</td><td>" + mean + "</td><td>" + std + "</td><td>" + med + "</td></tr>";
});
html_str = html_str + "</tbody></table>";
stats.html(html_str);
actions.find(":checkbox").each(function() {
i = $.inArray( $(this).prop("value"), sel_data.cadee_actions.libmut);
if (i > -1) {
$(this).prop("checked", true)
} else {
$(this).prop("checked", false)
};
if ($(this).prop("id") == "inp_CUSTOM") {
if (sel_data.cadee_actions.libmut_custom == "") {
$("#CUSTOM_text").prop("disabled", true);
} else {
$("#CUSTOM_text").prop("disabled", false);
$(this).prop("checked", true)
};
};
});
$("#CUSTOM_text").prop("value", sel_data.cadee_actions.libmut_custom);
});
document.getElementById("graph").on('plotly_click', function(eventData){
console.log(eventData);
dock_info_div();
});
</script>
"""
def main(cadee_db):
if not os.path.lexists(cadee_db):
print("File %s does not exist!" % cadee_db)
sys.exit(1)
# connect and get values from DB
conn = sqlite3.connect(cadee_db)
cursor = conn.cursor()
try:
cursor.execute("SELECT mutant,barr_forw,exo,barr_back FROM results WHERE feptype='us'")
except sqlite3.DatabaseError as e:
print("Error when accesing the database: '%s' (%s)" % (cadee_db, e))
sys.exit(1)
results = cursor.fetchall()
conn.close()
# get WT averages
b, e = [], []
for res in results:
mutant, barr, exo, rbarr = res
if "wt" in mutant.lower():
b.append(barr)
e.append(exo)
if not b or not e:
print("No reference ('wt') found in the database, using __absolute__ energetics.")
AVG_BARR_WT = 0
AVG_EXO_WT = 0
else:
print("Reference ('wt') found in the database, using __relative__ energetics.")
AVG_BARR_WT = sum(b)*1.0/len(b)
AVG_EXO_WT = sum(e)*1.0/len(e)
# 3-letter or 1-letter codes?
def | |
inverse DFT and quadratic output factor along x. Axes are (xo, yo, kx).
gradxQinvTPx (3D array): Derivative of above w.r.t output x. Note that z is constant - the derivative is along
a transverse plane rather than the surface.
QinvTPy (3D array): Product propagator, inverse DFT and quadratic output factor along x. Axes are (xo, yo, kx).
gradyQinvTPy (3D array): Derivative of above w.r.t output x. As with gradxQinvTPx, z is constant.
Tx (2D array): DFT factor. Axes are (kx, xi).
Ty (2D array): DFT factor. Axes are (ky, yi).
Qix (3D array): Input x quadratic phase factor vs x. Axes are (xo, yo, xi).
Qiy (3D array): Input y quadratic phase factor. Axes are (xo, yo, yi).
"""
assert kz_mode in ('local_xy', 'paraxial')
num_pointss = sa.to_scalar_pair(num_pointss)
ms = sa.to_scalar_pair(ms)
if np.isclose(ms, 1).any():
logger.debug('At least one magnification (%g, %g) close to 1.', ms)
qs_center = sa.to_scalar_pair(qs_center)
# TODO (maybe) factor out adjust_r.
if kz_mode == 'paraxial':
kz_center = k
else:
kz_center = (k**2 - (qs_center**2).sum())**0.5
z_center = np.mean(z)
if ro_centers is None:
ro_centers = ri_centers + qs_center/kz_center*z_center
# If local_xy mode is requested then propagation distances must be scaled. Calculate scaled propagation distances
# and required kz component.
if kz_mode == 'paraxial':
zx = z
zy = z
delta_kz = k
else:
fx, fy, delta_kz, delta_gxkz, delta_gykz = calc_quadratic_kz_correction(k, *qs_center)
zx = fx*z
zy = fy*z
ro_supports = rs_support*ms
xi, yi = sa.calc_xy(rs_support, num_pointss, ri_centers)
kxi, kyi = sa.calc_kxky(rs_support, num_pointss, qs_center) # +q_curvatures)
roc_x = mathx.divide0(zx, ms[0] - 1, np.inf)
roc_y = mathx.divide0(zy, ms[1] - 1, np.inf)
# Quadratic phase is centered at origin. Numerically, this requires evaluation of complex exponentials of size N^3.
# Significant fraction of total cost of this function, but unavoidable.
Qix = calc_quadratic_phase_1d(-k, xi[:, 0] - ri_centers[0], roc_x[:, :, None])
Qiy = calc_quadratic_phase_1d(-k, yi - ri_centers[1], roc_y[:, :, None])
Tx = make_fft_matrix(num_pointss[0])
if num_pointss[1] == num_pointss[0]:
Ty = Tx
else:
Ty = make_fft_matrix(num_pointss[1])
if 0:
# This was an experiment - we calculate where a given pencil beam lands based on initial position and
# momentum. It didn't improve the stability of the algorithm. In a ray picture, clip factor should be 0.5. But
# I found that anything below 3 gave unacceptable artefacts.
clip_factor = 3
mean_rocs = z_centers/(ms - 1)
xo_kx_xi = xi[:, 0]*ms[0] + (kxi - k*ri_centers[0]/mean_rocs[0])*z_centers[0]/k
yo_ky_yi = yi*ms[1] + (kyi[:, None] - k*ri_centers[1]/mean_rocs[1])*z_centers[1]/k
in_xo = abs(xo_kx_xi - ro_centers[0]) < ro_supports[0]*clip_factor
in_yo = abs(yo_ky_yi - ro_centers[1]) < ro_supports[1]*clip_factor
Txp = Tx*in_xo
Typ = Ty*in_yo
xo, yo = sa.calc_xy(ro_supports, num_pointss, ro_centers)
roc_xo = roc_x + zx
roc_yo = roc_y + zy
# If in local_xy mode, then the factors which depend on xo and yo use transformed quantities, which we subsitute here.
if kz_mode == 'local_xy':
xo = xo + delta_gxkz*z
yo = yo + delta_gykz*z
# Effective kx and ky for propagation takes into account center of curvature.
kxip = kxi[:, 0] - k*ri_centers[0]/roc_x[:, :, None]
kyip = kyi - k*ri_centers[1]/roc_y[:, :, None]
# Calculate product of propagator and inverse transform along x axis. The x axis (arbitrarily) includes delta_kz.
# Could combine all exponents before exponentiation? Won't help much because the time will be dominated by the propagator
# which is N^3 - the others are N^2.
phi = delta_kz*z + k*(ri_centers[0]**2/(2*roc_x) - ri_centers[0]*xo/(roc_x*ms[0]))
QinvTPx = (calc_propagator_quadratic_1d(k*ms[0], kxip, zx[:, :, None])* # Propagation phase scaled by magnification.
calc_quadratic_phase_1d(k, xo, roc_xo)[:, :, None]* # Normal Sziklas-Siegman final quadratic phase.
mathx.expj(phi[:, :, None])*
Tx.conj()[:, None, :]/ # Inverse DFT.
abs(ms[0])**0.5) # Magnification correction to amplitude.
# Calculate product of propagator and inverse transform along x axis. Result depends on
phi = k*(ri_centers[1]**2/(2*roc_y) - ri_centers[1]*yo/(roc_y*ms[1]))
QinvTPy = (calc_propagator_quadratic_1d(k*ms[1], kyip, zy[:, :, None])* # Propagation phase scaled by magnification.
calc_quadratic_phase_1d(k, yo, roc_yo)[:, :, None]* # Normal Sziklas-Siegman final quadratic phase.
mathx.expj(phi[:, :, None])*
Ty.conj()/ # Inverse DFT.
abs(ms[1])**0.5) # Magnification correction to amplitude.
# If local_xy mode, need translation correction factor. Could combine this with above to reduce number of N^3 expj
# calls.
if kz_mode == 'local_xy':
QinvTPx *= mathx.expj(delta_gxkz*kxi[:, 0]/ms[0]*z[:, :, None])
QinvTPy *= mathx.expj(delta_gykz*kyi/ms[1]*z[:, :, None])
# Evaluate derivatives of the invTP factors with respect to output x and y (but constant z - not along the curved
# surface).
gradxQinvTPx = 1j*(k*(xo/roc_xo)[:, :, None] - ri_centers[0]*k/(roc_x[:, :, None]*ms[0]) + kxi[:, 0]/ms[0])*QinvTPx
gradyQinvTPy = 1j*(k*(yo/roc_yo)[:, :, None] - ri_centers[1]*k/(roc_y[:, :, None]*ms[1]) + kyi/ms[1])*QinvTPy
# Won't use z gradients for now - will keep for future.
# gradzPx=1j*calc_kz_paraxial_1d(k*ms[0], kxip)*Px
factors = QinvTPx, gradxQinvTPx, QinvTPy, gradyQinvTPy, Tx, Ty, Qix, Qiy
return factors
def calc_plane_to_curved_spherical_arbitrary_factors(k, rs_support, num_pointss, z, xo, yo, roc_x, roc_y, ri_centers=(0, 0),
qs_center=(0, 0), ro_centers=None, kz_mode='local_xy'):
"""Calculate factors for 2D Sziklas-Siegman propagation (paraxial) from flat to curved surface(s).
The zero-oth order phase (k_z) is (arbitrarily) included in QinvTPx.
Compared to regular Sziklas-Siegman procedure, the factors are complicated by a transformation that allows nonzero
real and angular space centers. See derivation page 114 Dane's logbook 2.
Possible optimizations: could probably shave 50% off by more aggressive use of numba to avoid calculation of intermediates.
But typically the time is spent using (tensor contract) rather than calculating the factors, so it won't bring
a great speedup.
Args:
k (scalar): Wavenumber.
rs_support (scalar or pair): Real space support.
num_pointss (scalar int or pair): Number of sampling points in input.
z (M*N array): Propagation for distance.
xo (M*1 array): Output x sample values.
yo (N array): Output y sample values.
roc_x (M*N array): Radius of curvature in x, at the input, but sampled at the output points.
roc_y (M*N array): Radius of curvature in y, at the input, but sampled at the output points.
ri_centers (pair of scalars): Center of initial real-space aperture.
qs_center (pair of scalars): Center of angular space aperture.
kz_mode (str): 'paraxial' or 'local_xy'.
Returns:
QinvTPx (3D array): Product propagator, inverse DFT and quadratic output factor along x. Axes are (xo, yo, kx).
gradxQinvTPx (3D array): Derivative of above w.r.t output x. Note that z is constant - the derivative is along
a transverse plane rather than the surface.
QinvTPy (3D array): Product propagator, inverse DFT and quadratic output factor along x. Axes are (xo, yo, kx).
gradyQinvTPy (3D array): Derivative of above w.r.t output x. As with gradxQinvTPx, z is constant.
Tx (2D array): DFT factor. Axes are (kx, xi).
Ty (2D array): DFT factor. Axes are (ky, yi).
Qix (3D array): Input x quadratic phase factor vs x. Axes are (xo, yo, xi).
Qiy (3D array): Input y quadratic phase factor. Axes are (xo, yo, yi).
"""
assert np.isscalar(k)
rs_support = sa.to_scalar_pair(rs_support)
assert kz_mode in ('local_xy', 'paraxial')
assert np.ndim(z) == 2
assert np.shape(z) == np.shape(roc_x)
assert np.shape(z) == np.shape(roc_y)
assert np.shape(xo) == (np.shape(z)[0], 1)
assert np.shape(yo) == (np.shape(z)[1], )
num_pointss = sa.to_scalar_pair(num_pointss)
qs_center = sa.to_scalar_pair(qs_center)
if ro_centers is None:
z_center = np.mean(z)
ro_centers = adjust_r(k, ri_centers, z_center, qs_center, kz_mode)
# If local_xy mode is requested then propagation distances must be scaled. Calculate scaled propagation distances
# and required kz component.
if kz_mode == 'paraxial':
zx = z
zy = z
delta_kz = k
else:
fx, fy, delta_kz, delta_gxkz, delta_gykz = calc_quadratic_kz_correction(k, *qs_center)
zx = fx*z
zy = fy*z
mx = zx/roc_x + 1
my = zy/roc_y + 1
xi, yi = sa.calc_xy(rs_support, num_pointss, ri_centers)
kxi, kyi = sa.calc_kxky(rs_support, num_pointss, qs_center) # +q_curvatures)
# Quadratic phase is centered at origin. Numerically, this requires evaluation of complex exponentials of size N^3.
# Significant fraction of total cost of this function, but unavoidable.
Qix = calc_quadratic_phase_1d(-k, xi[:, 0] - ri_centers[0], roc_x[:, :, None])
Qiy = calc_quadratic_phase_1d(-k, yi - ri_centers[1], roc_y[:, :, None])
Tx = make_fft_matrix(num_pointss[0])
if num_pointss[1] == num_pointss[0]:
Ty = Tx
else:
Ty = make_fft_matrix(num_pointss[1])
roc_xo = roc_x + zx
roc_yo = roc_y + zy
# If in local_xy mode, then the result of | |
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='LineDrawingRegionType'):
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id), input_name='id')), ))
if self.orientation is not None and 'orientation' not in already_processed:
already_processed.add('orientation')
outfile.write(' orientation="%s"' % self.gds_format_float(self.orientation, input_name='orientation'))
if self.penColour is not None and 'penColour' not in already_processed:
already_processed.add('penColour')
outfile.write(' penColour=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.penColour), input_name='penColour')), ))
if self.bgColour is not None and 'bgColour' not in already_processed:
already_processed.add('bgColour')
outfile.write(' bgColour=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.bgColour), input_name='bgColour')), ))
if self.embText is not None and 'embText' not in already_processed:
already_processed.add('embText')
outfile.write(' embText="%s"' % self.gds_format_boolean(self.embText, input_name='embText'))
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='xmlns:pc="http://schema.primaresearch.org/PAGE/gts/pagecontent/2010-03-19"', name_='LineDrawingRegionType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Coords is not None:
namespaceprefix_ = self.Coords_nsprefix_ + ':' if (UseCapturedNS_ and self.Coords_nsprefix_) else ''
self.Coords.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Coords', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
self.id = value
value = find_attr_value_('orientation', node)
if value is not None and 'orientation' not in already_processed:
already_processed.add('orientation')
value = self.gds_parse_float(value, node, 'orientation')
self.orientation = value
value = find_attr_value_('penColour', node)
if value is not None and 'penColour' not in already_processed:
already_processed.add('penColour')
self.penColour = value
self.validate_ColourSimpleType(self.penColour) # validate type ColourSimpleType
value = find_attr_value_('bgColour', node)
if value is not None and 'bgColour' not in already_processed:
already_processed.add('bgColour')
self.bgColour = value
self.validate_ColourSimpleType(self.bgColour) # validate type ColourSimpleType
value = find_attr_value_('embText', node)
if value is not None and 'embText' not in already_processed:
already_processed.add('embText')
if value in ('true', '1'):
self.embText = True
elif value in ('false', '0'):
self.embText = False
else:
raise_parse_error(node, 'Bad boolean attribute')
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'Coords':
obj_ = CoordsType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Coords = obj_
obj_.original_tagname_ = 'Coords'
# end class LineDrawingRegionType
class GraphicRegionType(GeneratedsSuper):
"""Regions containing simple graphics, such as a company logo, should be
marked as graphic regions.The orientation in degrees of the baseline of
the rectangle that encapsulates the region (Range: -89.999,90).The type
of graphic in the regionAn approximation of the number of colours used
in the regionSpecifies whether the region also contains text."""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, id=None, orientation=None, type_=None, numColours=None, embText=None, Coords=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.id = _cast(None, id)
self.id_nsprefix_ = None
self.orientation = _cast(float, orientation)
self.orientation_nsprefix_ = None
self.type_ = _cast(None, type_)
self.type__nsprefix_ = None
self.numColours = _cast(int, numColours)
self.numColours_nsprefix_ = None
self.embText = _cast(bool, embText)
self.embText_nsprefix_ = None
self.Coords = Coords
self.Coords_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, GraphicRegionType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if GraphicRegionType.subclass:
return GraphicRegionType.subclass(*args_, **kwargs_)
else:
return GraphicRegionType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_Coords(self):
return self.Coords
def set_Coords(self, Coords):
self.Coords = Coords
def get_id(self):
return self.id
def set_id(self, id):
self.id = id
def get_orientation(self):
return self.orientation
def set_orientation(self, orientation):
self.orientation = orientation
def get_type(self):
return self.type_
def set_type(self, type_):
self.type_ = type_
def get_numColours(self):
return self.numColours
def set_numColours(self, numColours):
self.numColours = numColours
def get_embText(self):
return self.embText
def set_embText(self, embText):
self.embText = embText
def validate_GraphicsTypeSimpleType(self, value):
# Validate type pc:GraphicsTypeSimpleType, a restriction on string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
value = value
enumerations = ['logo', 'letterhead', 'handwritten-annotation', 'stamp', 'signature', 'paper-grow', 'punch-hole', 'other']
if value not in enumerations:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on GraphicsTypeSimpleType' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
def hasContent_(self):
if (
self.Coords is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='xmlns:pc="http://schema.primaresearch.org/PAGE/gts/pagecontent/2010-03-19"', name_='GraphicRegionType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('GraphicRegionType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'GraphicRegionType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='GraphicRegionType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='GraphicRegionType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='GraphicRegionType'):
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id), input_name='id')), ))
if self.orientation is not None and 'orientation' not in already_processed:
already_processed.add('orientation')
outfile.write(' orientation="%s"' % self.gds_format_float(self.orientation, input_name='orientation'))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.type_), input_name='type')), ))
if self.numColours is not None and 'numColours' not in already_processed:
already_processed.add('numColours')
outfile.write(' numColours="%s"' % self.gds_format_integer(self.numColours, input_name='numColours'))
if self.embText is not None and 'embText' not in already_processed:
already_processed.add('embText')
outfile.write(' embText="%s"' % self.gds_format_boolean(self.embText, input_name='embText'))
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='xmlns:pc="http://schema.primaresearch.org/PAGE/gts/pagecontent/2010-03-19"', name_='GraphicRegionType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Coords is not None:
namespaceprefix_ = self.Coords_nsprefix_ + ':' if (UseCapturedNS_ and self.Coords_nsprefix_) else ''
self.Coords.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Coords', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
self.id = value
value = find_attr_value_('orientation', node)
if value is not None and 'orientation' not in already_processed:
already_processed.add('orientation')
value = self.gds_parse_float(value, node, 'orientation')
self.orientation = value
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
self.validate_GraphicsTypeSimpleType(self.type_) # validate type GraphicsTypeSimpleType
value = find_attr_value_('numColours', node)
if value is not None and 'numColours' not in already_processed:
already_processed.add('numColours')
self.numColours = self.gds_parse_integer(value, node, 'numColours')
value = find_attr_value_('embText', node)
if value is not None and 'embText' not in already_processed:
already_processed.add('embText')
if value in ('true', '1'):
self.embText = True
elif value in ('false', '0'):
self.embText = False
else:
raise_parse_error(node, 'Bad boolean attribute')
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'Coords':
obj_ = CoordsType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Coords = obj_
obj_.original_tagname_ = 'Coords'
# end class GraphicRegionType
class TableRegionType(GeneratedsSuper):
"""Tabular data in any form is represented with a table region. Rows and
columns may or may not have separator lines; these lines are not
separator regions.The orientation in degrees of the baseline of the
region (Range: -89.999,90).The number of rows present in the tableThe
number of columns present in the tableThe colour of the lines used in
the regionThe background colour of the regionSpecifies the presence of
line separatorsSpecifies whether the region also contains text"""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, id=None, orientation=None, rows=None, columns=None, lineColour=None, bgColour=None, lineSeparators=None, embText=None, Coords=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.id = _cast(None, id)
self.id_nsprefix_ = None
self.orientation = _cast(float, orientation)
self.orientation_nsprefix_ = None
self.rows = _cast(int, rows)
self.rows_nsprefix_ = None
self.columns = _cast(int, columns)
self.columns_nsprefix_ = None
self.lineColour = _cast(None, lineColour)
self.lineColour_nsprefix_ = None
self.bgColour = _cast(None, bgColour)
self.bgColour_nsprefix_ = None
self.lineSeparators = _cast(bool, lineSeparators)
self.lineSeparators_nsprefix_ = None
self.embText = _cast(bool, embText)
self.embText_nsprefix_ = None
self.Coords = Coords
self.Coords_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, TableRegionType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if TableRegionType.subclass:
return TableRegionType.subclass(*args_, **kwargs_)
else:
return TableRegionType(*args_, | |
#!/usr/bin/env python
# Copyright (c) 2015-2016 University Corporation for Atmospheric Research/Unidata
# Distributed under the terms of the MIT License.
# SPDX-License-Identifier: MIT
import asyncio
import functools
import glob
import logging
import os
import os.path
import shutil
import struct
import sys
import threading
from collections import namedtuple, defaultdict
from contextlib import contextmanager
from datetime import datetime
#
# Set up logging
#
class ProdInfoAdapter(logging.LoggerAdapter):
def process(self, msg, kwargs):
if 'extra' in kwargs:
try:
kwargs['extra'] = kwargs['extra']._asdict()
except AttributeError:
kwargs['extra'] = dict(zip(['site', 'volume_id'], kwargs['extra']))
else:
kwargs['extra'] = self.extra
return msg, kwargs
def init_logger(formatter=None):
import logging.handlers
import socket
# Set the global logger
logger = logging.getLogger('LDMHandler')
# Send logs to LDM's log if possible, otherwise send to stderr.
try:
handler = logging.handlers.SysLogHandler(address='/dev/log', facility='local0')
except (FileNotFoundError, socket.error):
handler = logging.StreamHandler()
if formatter:
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def init_lv2_logger():
import faulthandler
# Set up some kind of logging for crashes
os.makedirs('logs', exist_ok=True)
faulthandler.enable(open('logs/l2assemble-crash.log', 'a'))
fmt = '%(filename)s [%(funcName)s]: [%(site)s %(volume_id)03d] %(message)s'
logger = init_logger(logging.Formatter(fmt=fmt))
return ProdInfoAdapter(logger, {'site': '----', 'volume_id': 0})
def log_rmtree_error(func, path, exc):
logger.error('Error removing (%s %s)', func, path)
logger = logging.getLogger('LDMHandler')
#
# Keeping stats
#
class ChunkStats(object):
def __init__(self, fname):
import sqlite3
self._conn = sqlite3.connect(fname)
try:
self._conn.execute('CREATE TABLE volumes '
'(site text, date timestamp, volume integer, count integer, '
'missing_start integer, missing_chunks text)')
except sqlite3.OperationalError:
pass
def log_volume(self, prod_info, num_chunks, missing_start, missing):
self._conn.execute('INSERT INTO volumes VALUES(?,?,?,?,?,?)',
(prod_info.site, prod_info.dt, prod_info.volume_id, num_chunks,
int(missing_start), ','.join(missing)))
self._conn.commit()
def __del__(self):
self._conn.close()
#
# LDM processing stuff
#
hdr_struct = struct.Struct('>12s2L4s') # Structure of volume header
len_struct = struct.Struct('I')
meta_struct = struct.Struct('6IQiII')
ldm_meta = namedtuple('LDMMeta', 'meta_length md5_1 md5_2 md5_3 md5_4 prod_len creation_secs '
'creation_micro feed_type seq_num')
_ProdInfo = namedtuple('ProdInfo',
'format site dt volume_id chunk_id chunk_type version unused')
class ProdInfo(_ProdInfo):
__slots__ = _ProdInfo.__slots__ # To fix _asdict(). See Python #249358
def __str__(self):
mod = self._replace(dt=self.dt.strftime('%Y%m%d%H%M%S'), chunk_id=str(self.chunk_id),
volume_id=str(self.volume_id))
return '_'.join(mod)
def __hash__(self):
return hash(self.to_key())
def __eq__(self, other):
return self.site == other.site and self.volume_id == other.volume_id
def to_key(self):
return self.site, self.volume_id
@classmethod
def fromstring(cls, s):
c = cls(*s.split('_'))
return c._replace(dt=datetime.strptime(c.dt, '%Y%m%d%H%M%S'), chunk_id=int(c.chunk_id),
volume_id=int(c.volume_id))
# Turn the string 'L2-BZIP2/KFTG/20150908215946/494/43/I/V06/0' into useful information
@classmethod
def from_ldm_string(cls, s):
return cls.fromstring(s.replace('/', '_'))
def as_vol_hdr(self):
version = 'AR2V00' + self.version[1:] + '.' + str(self.volume_id)
timestamp = (self.dt - datetime(1970, 1, 1)).total_seconds()
date = int(timestamp // 86400)
time = int(timestamp - date * 86400)
return hdr_struct.pack(version.encode('ascii'), date + 1, time * 1000,
self.site.encode('ascii'))
# Raises an EOFError if we get a 0 byte read, which is by definition an EOF in Python
async def check_read(fobj, num_bytes):
data = await fobj.readexactly(num_bytes)
if data:
return data
raise EOFError('Got 0 byte read.')
async def read_byte_string(fobj):
data = await check_read(fobj, len_struct.size)
slen, = len_struct.unpack(data)
s = await check_read(fobj, slen)
return s.decode('ascii')
# Stuff for parsing LDM metadata
async def read_metadata(fobj):
data = await check_read(fobj, meta_struct.size)
meta = ldm_meta(*meta_struct.unpack(data))
logger.debug('LDM metadata: %s', meta)
prod_ident = await read_byte_string(fobj)
logger.debug('Got prod_id: %s', prod_ident)
prod_origin = await read_byte_string(fobj)
logger.debug('Got origin: %s', prod_origin)
return prod_ident, meta.prod_len
#
# Caching and storage of chunks
#
# Overriding defaultdict--essentially (at first) just to pass key to factory
class VolumeStore(defaultdict):
RELOAD_FILE = '.vols_restart'
def __init__(self, loop, cache_dir, gen_header, s3=None, s3_path_format=''):
super(defaultdict, self).__init__()
self._loop = loop
self._cache_dir = cache_dir
self._gen_header = gen_header
self._s3_buckets = S3BucketPool(s3) if s3 else None
self._s3_path = s3_path_format
def _reload_vols(self):
if os.path.exists(self.RELOAD_FILE):
with open(self.RELOAD_FILE, 'r') as reload:
for line in reload:
line = line.rstrip() # Pop off newline
if line:
logger.info('Reloading: %s', line)
pi = ProdInfo.fromstring(line)
self.__missing__(pi) # Trigger creation
os.remove(self.RELOAD_FILE)
def __missing__(self, key):
new = self._create_store(key)
self[key] = new
return new
def _create_store(self, prod_info):
logger.debug('Creating store.', extra=prod_info)
store = ChunkStore()
# Try to load any data for this volume from the cache
if self._s3_buckets:
fut = loop.run_in_executor(None, store.loadfroms3, self._s3_buckets,
self._s3_path.format(prod_info), prod_info)
fut.add_done_callback(lambda f: store.ready.set())
else:
cache = self.cache_dir(prod_info.to_key())
if os.path.exists(cache):
logger.debug('Loading previously stored chunks from: %s', cache,
extra=prod_info)
fut = loop.run_in_executor(None, store.loadfromdir, cache)
fut.add_done_callback(lambda f: store.ready.set())
fut.add_done_callback(lambda f: shutil.rmtree(cache,
onerror=log_rmtree_error))
else:
store.ready.set()
# Remove any old cache directories
self.clear_old_caches(prod_info.to_key())
# Pass in call-back to call when done. We don't use the standard future callback
# because it will end up queued--we need to run immediately.
store.task = asyncio.ensure_future(
store.wait_for_chunks(self.timeout,
functools.partial(self.chunk_store_done, key=prod_info)))
store.ensure_header(self._gen_header)
return store
def cache_dir(self, key):
site, vol_num = key
return os.path.join(self._cache_dir, '.' + site, '%03d' % vol_num)
def clear_old_caches(self, key):
logger.debug('Checking for old caches...', extra=key)
# List all old cache directories for this site
site, cur_vol = key
for fname in glob.glob(os.path.join(self._cache_dir, '.' + site, '[0-9][0-9][0-9]')):
if os.path.isdir(fname): # Minor sanity check that this is ours
logger.debug('Found: %s', fname, extra=key)
# Use this volume number as a proxy for time
num = int(os.path.basename(fname))
# Find the difference, account for the wrap 999->0
diff = cur_vol - num
if diff < 0:
diff += 1000
# If the one we found is more than 30 past, delete it
if diff > 30:
logger.info('Deleting old cache: %s', fname, extra=key)
shutil.rmtree(fname, onerror=log_rmtree_error)
def save(self):
if not self._s3_buckets:
for key, chunks in self.items():
cache = self.cache_dir(key.to_key())
logger.warning('Caching chunks to: %s', cache, extra=key)
chunks.savetodir(cache)
async def finish(self):
# Need to iterate over copy of keys because items could be removed during iteration
for key in list(self.keys()):
logger.debug('Flushing chunk store queue.', extra=key)
store = self[key]
await store.finish()
store.task.cancel()
# Mark volumes that do not finish for reload in case we don't get any more chunks
if self:
with open(self.RELOAD_FILE, 'w') as reload:
for key in self:
txt = str(key)
logger.info('Marking for reload: %s', txt, extra=key)
reload.write('%s\n' % txt)
logger.debug('Flushing volumes queue')
await self.vol_dest.join()
async def wait_for_chunks(self, src, vol_dest, timeout):
self.vol_dest = vol_dest
self.timeout = timeout
self._reload_vols()
while True:
# Get the next chunk when available
chunk = await src.get()
# Find the appropriate store (will be created if necessary)
await self[chunk.prod_info].enqueue(chunk)
src.task_done()
def chunk_store_done(self, key):
logger.debug('Chunk store finished.', extra=key)
store = self.pop(key)
self.vol_dest.put_nowait(store)
Chunk = namedtuple('Chunk', 'prod_info data')
class ChunkStore(object):
def __init__(self):
self._store = dict()
self.first = self.last = -1
self._vol_hdr = b''
self._add_header = False
self._queue = asyncio.Queue()
self.ready = asyncio.Event()
def loadfromdir(self, path):
# Go find all the appropriately named files in the directory and load them
for fname in sorted(glob.glob(os.path.join(path, 'L2-BZIP2_*')),
key=lambda f: ProdInfo.fromstring(os.path.basename(f)).chunk_id):
name = os.path.basename(fname)
self.add(Chunk(prod_info=ProdInfo.fromstring(name), data=open(fname, 'rb').read()))
logger.info('Loaded %d chunks from cache %s', len(self), path)
def savetodir(self, path):
# Create the directory if necessary
if not os.path.exists(path):
os.makedirs(path)
# Write the chunks
logger.warning('Saving %d chunks: [%s]', len(self),
' '.join(map(str, self._store.keys())),
extra=self.first_chunk().prod_info)
for chunk in self:
with open(os.path.join(path, str(chunk.prod_info)), 'wb') as outf:
if chunk.prod_info.chunk_id == self.first:
outf.write(self.vol_hdr)
outf.write(chunk.data)
def loadfroms3(self, bucket_pool, key, prod_info):
loaded = False
with bucket_pool.use() as bucket:
prefix = '-'.join(key.split('-')[:-2])
for obj in bucket.objects.filter(Prefix=prefix):
name = os.path.basename(obj.key)
date, time, chunk, chunk_type = name.split('-')
pi = prod_info._replace(chunk_id=int(chunk), chunk_type=chunk_type)
# When loading from cache, make sure we don't already have a chunk before
# adding it
if pi.chunk_id not in self._store:
loaded = True
self.add(Chunk(prod_info=pi, data=obj.get()['Body'].read()))
if loaded:
logger.info('Loaded %d chunks from S3 cache %s', len(self), prefix,
extra=prod_info)
def __len__(self):
return len(self._store)
def need_more(self):
return len(self) != self.last
def min_id(self):
return min(self._store.keys()) if self._store else 0
def max_id(self):
return max(self._store.keys()) if self._store else 0
def first_chunk(self):
return next(iter(self._store.values()))
# Iterate in the order of the keys, but only return the value
def __iter__(self):
return iter(i[1] for i in sorted(self._store.items()))
async def finish(self):
await self._queue.join()
async def enqueue(self, chunk):
await self._queue.put(chunk)
async def wait_for_chunks(self, timeout, when_done):
chunk = None
await self.ready.wait()
while self.need_more():
try:
chunk = await asyncio.wait_for(self._queue.get(), timeout)
self.add(chunk)
self._queue.task_done()
except asyncio.TimeoutError:
kwargs = {'extra': chunk.prod_info} if chunk else {}
logger.warning('Finishing due to timeout.', **kwargs)
break
when_done()
# Add a chunk to our store. If this was the start or end, note that as well.
def add(self, chunk):
max_id = self.max_id()
chunk_id = chunk.prod_info.chunk_id
if chunk_id != max_id + 1:
if chunk_id in self._store:
if chunk_id > 1:
logger.warning('Duplicate chunk: %d', chunk_id, extra=chunk.prod_info)
else:
logger.warning('Chunks out of order--Got: %d Max: %d', chunk_id, max_id,
extra=chunk.prod_info)
logger.debug('Added chunk: %d', chunk_id, extra=chunk.prod_info)
# Not only do we need to note the first block, we need to pop off the header bytes
chunk_type = chunk.prod_info.chunk_type
if chunk_type == 'S':
self.first = chunk_id
self.vol_hdr = chunk.data[:hdr_struct.size]
chunk = chunk._replace(data=chunk.data[hdr_struct.size:])
elif chunk_type == 'E':
self.last = chunk_id
| |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'AutoProvisioningGroupLaunchTemplateConfigArgs',
'DedicatedHostNetworkAttributeArgs',
'EcsLaunchTemplateDataDiskArgs',
'EcsLaunchTemplateNetworkInterfacesArgs',
'EcsLaunchTemplateSystemDiskArgs',
'ImageDiskDeviceMappingArgs',
'ImageImportDiskDeviceMappingArgs',
'InstanceDataDiskArgs',
'LaunchTemplateDataDiskArgs',
'LaunchTemplateNetworkInterfacesArgs',
'LaunchTemplateSystemDiskArgs',
'GetDedicatedHostsOperationLockArgs',
'GetDisksOperationLockArgs',
'GetEcsDisksOperationLockArgs',
]
@pulumi.input_type
class AutoProvisioningGroupLaunchTemplateConfigArgs:
def __init__(__self__, *,
max_price: pulumi.Input[str],
vswitch_id: pulumi.Input[str],
weighted_capacity: pulumi.Input[str],
instance_type: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "max_price", max_price)
pulumi.set(__self__, "vswitch_id", vswitch_id)
pulumi.set(__self__, "weighted_capacity", weighted_capacity)
if instance_type is not None:
pulumi.set(__self__, "instance_type", instance_type)
if priority is not None:
pulumi.set(__self__, "priority", priority)
@property
@pulumi.getter(name="maxPrice")
def max_price(self) -> pulumi.Input[str]:
return pulumi.get(self, "max_price")
@max_price.setter
def max_price(self, value: pulumi.Input[str]):
pulumi.set(self, "max_price", value)
@property
@pulumi.getter(name="vswitchId")
def vswitch_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "vswitch_id")
@vswitch_id.setter
def vswitch_id(self, value: pulumi.Input[str]):
pulumi.set(self, "vswitch_id", value)
@property
@pulumi.getter(name="weightedCapacity")
def weighted_capacity(self) -> pulumi.Input[str]:
return pulumi.get(self, "weighted_capacity")
@weighted_capacity.setter
def weighted_capacity(self, value: pulumi.Input[str]):
pulumi.set(self, "weighted_capacity", value)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "instance_type")
@instance_type.setter
def instance_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_type", value)
@property
@pulumi.getter
def priority(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "priority", value)
@pulumi.input_type
class DedicatedHostNetworkAttributeArgs:
def __init__(__self__, *,
slb_udp_timeout: Optional[pulumi.Input[int]] = None,
udp_timeout: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] slb_udp_timeout: The timeout period for a UDP session between Server Load Balancer (SLB) and the dedicated host. Unit: seconds. Valid values: 15 to 310.
:param pulumi.Input[int] udp_timeout: The timeout period for a UDP session between a user and an Alibaba Cloud service on the dedicated host. Unit: seconds. Valid values: 15 to 310.
"""
if slb_udp_timeout is not None:
pulumi.set(__self__, "slb_udp_timeout", slb_udp_timeout)
if udp_timeout is not None:
pulumi.set(__self__, "udp_timeout", udp_timeout)
@property
@pulumi.getter(name="slbUdpTimeout")
def slb_udp_timeout(self) -> Optional[pulumi.Input[int]]:
"""
The timeout period for a UDP session between Server Load Balancer (SLB) and the dedicated host. Unit: seconds. Valid values: 15 to 310.
"""
return pulumi.get(self, "slb_udp_timeout")
@slb_udp_timeout.setter
def slb_udp_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "slb_udp_timeout", value)
@property
@pulumi.getter(name="udpTimeout")
def udp_timeout(self) -> Optional[pulumi.Input[int]]:
"""
The timeout period for a UDP session between a user and an Alibaba Cloud service on the dedicated host. Unit: seconds. Valid values: 15 to 310.
"""
return pulumi.get(self, "udp_timeout")
@udp_timeout.setter
def udp_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "udp_timeout", value)
@pulumi.input_type
class EcsLaunchTemplateDataDiskArgs:
def __init__(__self__, *,
category: Optional[pulumi.Input[str]] = None,
delete_with_instance: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
encrypted: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
performance_level: Optional[pulumi.Input[str]] = None,
size: Optional[pulumi.Input[int]] = None,
snapshot_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] category: The category of the disk.
:param pulumi.Input[bool] delete_with_instance: Indicates whether the data disk is released with the instance.
:param pulumi.Input[str] description: The description of the data disk.
:param pulumi.Input[bool] encrypted: Encrypted the data in this disk.
:param pulumi.Input[str] name: The name of the data disk.
:param pulumi.Input[str] performance_level: The performance level of the ESSD used as the data disk.
:param pulumi.Input[int] size: The size of the data disk.
:param pulumi.Input[str] snapshot_id: The snapshot ID used to initialize the data disk. If the size specified by snapshot is greater that the size of the disk, use the size specified by snapshot as the size of the data disk.
"""
if category is not None:
pulumi.set(__self__, "category", category)
if delete_with_instance is not None:
pulumi.set(__self__, "delete_with_instance", delete_with_instance)
if description is not None:
pulumi.set(__self__, "description", description)
if encrypted is not None:
pulumi.set(__self__, "encrypted", encrypted)
if name is not None:
pulumi.set(__self__, "name", name)
if performance_level is not None:
pulumi.set(__self__, "performance_level", performance_level)
if size is not None:
pulumi.set(__self__, "size", size)
if snapshot_id is not None:
pulumi.set(__self__, "snapshot_id", snapshot_id)
@property
@pulumi.getter
def category(self) -> Optional[pulumi.Input[str]]:
"""
The category of the disk.
"""
return pulumi.get(self, "category")
@category.setter
def category(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "category", value)
@property
@pulumi.getter(name="deleteWithInstance")
def delete_with_instance(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether the data disk is released with the instance.
"""
return pulumi.get(self, "delete_with_instance")
@delete_with_instance.setter
def delete_with_instance(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "delete_with_instance", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the data disk.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def encrypted(self) -> Optional[pulumi.Input[bool]]:
"""
Encrypted the data in this disk.
"""
return pulumi.get(self, "encrypted")
@encrypted.setter
def encrypted(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "encrypted", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the data disk.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="performanceLevel")
def performance_level(self) -> Optional[pulumi.Input[str]]:
"""
The performance level of the ESSD used as the data disk.
"""
return pulumi.get(self, "performance_level")
@performance_level.setter
def performance_level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "performance_level", value)
@property
@pulumi.getter
def size(self) -> Optional[pulumi.Input[int]]:
"""
The size of the data disk.
"""
return pulumi.get(self, "size")
@size.setter
def size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "size", value)
@property
@pulumi.getter(name="snapshotId")
def snapshot_id(self) -> Optional[pulumi.Input[str]]:
"""
The snapshot ID used to initialize the data disk. If the size specified by snapshot is greater that the size of the disk, use the size specified by snapshot as the size of the data disk.
"""
return pulumi.get(self, "snapshot_id")
@snapshot_id.setter
def snapshot_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "snapshot_id", value)
@pulumi.input_type
class EcsLaunchTemplateNetworkInterfacesArgs:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
primary_ip: Optional[pulumi.Input[str]] = None,
security_group_id: Optional[pulumi.Input[str]] = None,
vswitch_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] description: The description of the data disk.
:param pulumi.Input[str] name: The name of the data disk.
:param pulumi.Input[str] primary_ip: The primary private IP address of the ENI.
:param pulumi.Input[str] security_group_id: The security group ID must be one in the same VPC.
:param pulumi.Input[str] vswitch_id: The VSwitch ID for ENI. The instance must be in the same zone of the same VPC network as the ENI, but they may belong to different VSwitches.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if primary_ip is not None:
pulumi.set(__self__, "primary_ip", primary_ip)
if security_group_id is not None:
pulumi.set(__self__, "security_group_id", security_group_id)
if vswitch_id is not None:
pulumi.set(__self__, "vswitch_id", vswitch_id)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the data disk.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the data disk.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="primaryIp")
def primary_ip(self) -> Optional[pulumi.Input[str]]:
"""
The primary private IP address of the ENI.
"""
return pulumi.get(self, "primary_ip")
@primary_ip.setter
def primary_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "primary_ip", value)
@property
@pulumi.getter(name="securityGroupId")
def security_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The security group ID must be one in the same VPC.
"""
return pulumi.get(self, "security_group_id")
@security_group_id.setter
def security_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "security_group_id", value)
@property
@pulumi.getter(name="vswitchId")
def vswitch_id(self) -> Optional[pulumi.Input[str]]:
"""
The VSwitch ID for ENI. The instance must be in the same zone of the same VPC network as the ENI, but they may belong to different VSwitches.
"""
return pulumi.get(self, "vswitch_id")
@vswitch_id.setter
def vswitch_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vswitch_id", value)
@pulumi.input_type
class EcsLaunchTemplateSystemDiskArgs:
def __init__(__self__, *,
category: Optional[pulumi.Input[str]] = None,
delete_with_instance: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
iops: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
performance_level: Optional[pulumi.Input[str]] = None,
size: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[str] category: The category of the disk.
:param pulumi.Input[bool] delete_with_instance: Indicates whether the data disk is released with the instance.
:param pulumi.Input[str] description: The description of the data disk.
:param pulumi.Input[str] iops: The Iops.
:param pulumi.Input[str] name: The name of the data disk.
:param pulumi.Input[str] performance_level: The performance level of the ESSD used as the data disk.
:param pulumi.Input[int] size: The size of the data disk.
"""
if category is not None:
pulumi.set(__self__, "category", category)
if delete_with_instance is not None:
pulumi.set(__self__, "delete_with_instance", delete_with_instance)
if description is not None:
pulumi.set(__self__, "description", description)
if iops is not None:
pulumi.set(__self__, "iops", iops)
if name is not None:
pulumi.set(__self__, "name", name)
if performance_level is not None:
pulumi.set(__self__, "performance_level", performance_level)
if size is not None:
pulumi.set(__self__, "size", size)
@property
@pulumi.getter
def category(self) -> Optional[pulumi.Input[str]]:
"""
The category of the disk.
"""
return pulumi.get(self, "category")
@category.setter
| |
self._external_attributes[full_name] = cap
else:
self.debug('%s.getXAttr: using %s proxy to %s' % (self._locals.get('ATTRIBUTE'),'taurus' if self.UseTaurus else 'PyTango',full_name))
if write:
self.info('getXAttr(Write): %s(%s)'%(type(wvalue),wvalue))
self._external_attributes[full_name].write(wvalue)
result = wvalue
else:
attrval = self._external_attributes[full_name].read()
result = attrval.value
self.debug('%s.read() = %s ...'%(full_name,str(result)[:40]))
except Exception,e:
msg = 'Unable to read attribute %s from device %s: \n%s' % (str(aname),str(device),traceback.format_exc())
print(msg)
self.error(msg)
self.last_attr_exception = (time.time(),msg,e)
#Exceptions are not re_thrown to allow other commands to be evaluated if this fails.
finally:
if hasattr(self,'myClass') and self.myClass:
self.myClass.DynDev=self #NOT REDUNDANT: If a call to another device in the same server occurs this pointer could have been modified.
#Check added to prevent exceptions due to empty arrays
if hasattr(result,'__len__') and not len(result):
result = default if hasattr(default,'__len__') else []
elif result is None:
result = default
self.debug('Out of getXAttr(%s)'%shortstr(result,40))
return result
def event_received(self,source,type_,attr_value):
"""
This method is needed to re-trigger events in attributes that
receive events from other devices (e.g. use XATTR in formula)
"""
def _log(prio,s,obj=self): #,level=self.log_obj.level):
if obj.getLogLevel(prio)>=obj.log_obj.level:
print('%s(%s) %s %s: %s' % (
prio.upper(),
(obj.getLogLevel(prio),obj.log_obj.level),
time.strftime('%Y-%m-%d %H:%M:%S',time.localtime()),
obj.get_name(),s))
if type_ == tango.fakeEventType.Config:
_log('debug','In DynamicDS.event_received(%s(%s),%s,%s): Config Event Not Implemented!'%(
type(source).__name__,source,tango.fakeEventType[type_],type(attr_value).__name__,#getattr(attr_value,'value',attr_value)
))
else:
_log('info','In DynamicDS.event_received(%s(%s),%s,%s)'%(
type(source).__name__,source,tango.fakeEventType[type_],type(attr_value).__name__)
)
try:
if type_ in ('Error',tango.fakeEventType['Error']):
_log('error','Error received from %s: %s'%(source, attr_value))
full_name = tango.get_model_name(source) #.get_full_name()
if full_name not in self._external_listeners:
self.debug('%s does not trigger any dynamic attribute event'%full_name)
elif self._external_listeners[full_name]:
_log('info','\t%s.listeners: %s'%(full_name,self._external_listeners[full_name]))
for aname in self._external_listeners[full_name]:
if self._locals['ATTRIBUTE'] == aname:
#Variable already being evaluated
continue
else:
_log('info','\tforwarding event to %s ...'%aname)
self.evalAttr(aname)
except:
print(traceback.format_exc())
return
def getXCommand(self,cmd,args=None,feedback=None,expected=None):
"""
Performs an external Command reading, using a DeviceProxy
:param cmd_name: a/b/c/cmd
"""
self.info("DynamicDS(%s)::getXComm(%s,%s,%s,%s): ..."%(self.get_name(),cmd,args,feedback,expected))
if feedback is None:
device,cmd = cmd.rsplit('/',1) if '/' in cmd else (self.get_name(),cmd)
full_name = device+'/'+cmd
result = None
try:
if device == self.get_name():
self.info('getXCommand accessing to device itself ...')
result = getattr(self,cmd)(args)
else:
devs_in_server = self.myClass.get_devs_in_server()
if device in devs_in_server:
self.debug('getXCommand accessing a device in the same server ...')
if cmd.lower()=='state': result = devs_in_server[device].get_state()
elif cmd.lower()=='status': result = devs_in_server[device].get_status()
else: result = getattr(devs_in_server[device],cmd)(argin)
else:
self.debug('getXCommand calling a proxy to %s' % (device,))
if full_name not in self._external_commands:
if self.UseTaurus:
self._external_commands[full_name] = tango.TAU.Device(device)
if len(self._external_commands)==1: tango.TAU_LOGGER.disableLogOutput()
else: self._external_commands[full_name] = PyTango.DeviceProxy(device)
self.debug('getXCommand(%s(%s))'%(full_name,args))
if args in (None,[],()):
result = self._external_commands[full_name].command_inout(cmd)
else:
result = self._external_commands[full_name].command_inout(cmd,args)
#result = self._external_commands[full_name].command_inout(*([cmd,argin] if argin is not None else [cmd]))
except Exception,e:
msg = 'Unable to execute %s(%s): %s' % (full_name,args,traceback.format_exc())
self.last_attr_exception = (time.time(),msg,e)
self.error(msg)
#Exceptions are not re_thrown to allow other commands to be evaluated if this fails.
finally:
if hasattr(self,'myClass') and self.myClass:
self.myClass.DynDev=self #NOT REDUNDANT: If a call to another device in the same server occurs this pointer could have been modified.
return result
else:
if fun.isString(cmd):
if '/' not in cmd: device = self.get_name()
else: device,cmd = cmd.rsplit('/',1)
else: device = self.get_name()
if fun.isString(feedback) and '/' not in feedback: feedback = device+'/'+feedback
return tango.TangoCommand(command=cmd,device=device,feedback=feedback,timeout=10.,wait=10.).execute(args,expected=expected)
@staticmethod
def open_file(filename,device=None):
print('DynamicDS().open_file(%s)'%(filename))
r = []
try:
if device:
if not hasattr(device,'PATH'): device.PATH = device.get_device_property('PATH') or ''
if device.PATH: filename = device.PATH+'/'+filename
f = open(filename)
r = f.readlines()
f.close()
except: print(traceback.format_exc())
return r
@staticmethod
def load_from_file(filename=None,device=None):
""" This line is put in a separate method to allow subclasses to override this behavior"""
filename = filename or device.LoadFromFile
data = DynamicDS.open_file(filename,device=device) if filename.lower().strip() not in ('no','false','') else []
if data and device: device.DynamicAttributes = list(data)+list(device.DynamicAttributes)
return data
###############################################################################
###############################################################################
class DynamicDS(DynamicDSHelpers):
'''
This Class keeps all DynamicDS methods exported to Tango API
Check fandango.dynamic.__doc__ for more information ...
'''
def init_device(self):
self.info( 'DynamicDS.init_device(%d)'%(self.get_init_count()))
try:
if not type(self) is DynamicDS and not self.get_init_count():
self.get_DynDS_properties()
else:
self.updateDynamicAttributes()
for c,f in self.dyn_comms.items():
k = c.split('/')[-1]
if self.get_name().lower() in c.lower() \
and k not in self._locals:
self._locals.update({k:
(lambda argin,cmd=k:self.evalCommand(cmd,argin))})
for i in self.InitDevice:
if str(i).strip().lower() in ('','false'):
continue
elif i not in self.dyn_comms:
self.warning('Unknown %s cmd at init, ignored' % str(i))
else:
try:
self.evalAttr(i)
except Exception,e:
self.error('Unable to execute InitDevice(%s)' % str(i))
traceback.print_exc()
raise e
except:
print(traceback.format_exc()) #self.warning(traceback.format_exc())
self._init_count +=1
def delete_device(self):
self.warning( 'DynamicDS.delete_device(): ... ')
('Device_4Impl' in dir(PyTango) and PyTango.Device_4Impl or PyTango.Device_3Impl).delete_device(self)
@self_locked
def always_executed_hook(self):
self.debug("In DynamicDS::always_executed_hook(TAU=%s)"%tango.TAU)
try:
self._hook_epoch = time.time() #Internal debugging
if not self._prepared: self.prepare_DynDS() #This code is placed here because its proper execution cannot be guaranteed during init_device()
self.myClass.DynDev=self #VITAL: It tells the admin class which device attributes are going to be read
if self.dyn_states: self.check_state()
if self.DynamicStatus: self.check_status()
except:
self.last_state_exception = 'Exception in DynamicDS::always_executed_hook():\n'+str(traceback.format_exc())
self.error(self.last_state_exception)
return
#------------------------------------------------------------------------------------------------------
# State related methods
#------------------------------------------------------------------------------------------------------
def set_state(self,state,push=False):
now = time.time()
old,self._locals['STATE'] = self._locals.get('STATE',None),state
last = now - self.last_state_change
diff = (state,(last > self.DEFAULT_POLLING_PERIOD/1e3 and last))
try:
if diff != (old,False):
self.last_state_change = now
self.push_dyn_attr('State',state,changed=push,queued=True)
except Exception,e:
self.warning('DynamicDS.set_state(%s) failed!: %s'%(state,e))
DynamicDS.get_parent_class(self).set_state(self,state)
def set_status(self,status,save=True):
if save: #not any('STATUS' in s for s in self.DynamicStatus): #adds STATUS to locals only if not used in DynamicStatus?
self._locals['STATUS']=status
self.debug('STATUS: %s'%(status,))
DynamicDS.get_parent_class(self).set_status(self,status)
def set_full_status(self,status,set=True):
if self.last_state_exception:
status += '\nLast DynamicStateException was:\n\t'+self.last_state_exception
if self.last_attr_exception:
status += '\nLast DynamicAttributeException was:\n\t%s:%s'%(time.ctime(self.last_attr_exception[0]),str(self.last_attr_exception[1]))
if set: self.set_status(status)
return status
##########################################################################
def read_attr_hardware(self,data):
self.debug("In DynDS::read_attr_hardware()")
attrs = self.get_device_attr()
read_attrs = [attrs.get_attr_by_ind(d).get_name() for d in data]
for a in read_attrs: self._read_count[a]+=1
return read_attrs
#self.info("read_attr_hardware([%d]=%s)"%(len(data),str(read_attrs)[:80]))
## Edit this code in child classes if needed
#try:
#attrs = self.get_device_attr()
#for d in data:
#a_name = attrs.get_attr_by_ind(d).get_name()
#if a_name in self.dyn_attrs:
#pass
#except Exception,e:
#self.last_state_exception = 'Exception in read_attr_hardware: %s'%str(e)
#self.error('Exception in read_attr_hardware: %s'%str(e))
###########################################################################
# EXTERNAL COMMANDS
###########################################################################
def Help(self,str_format='text'):
"""This command returns help for this device class and its parents"""
return tango.get_device_help(self,str_format)
def setPauseEvents(self,argin):
ov = self._events_paused
self._events_paused = bool(fun.clmatch('yes|true',argin))
if not self._events_paused and ov:
for a,v in self.dyn_values.items():
events = self.check_attribute_events(a)
self.push_dyn_attr(aname,events=events,changed=1,queued=1)
return str(self.check_attribute_events('state'))
#------------------------------------------------------------------
# GetDynamicConfig command:
#
# Description: Return current property values
#
# argin: DevVoid
# argout: DevString Return current property values
#------------------------------------------------------------------
#Methods started with underscore could be inherited by child device servers for debugging purposes
def getDynamicConfig(self):
exclude = 'DynamicAttributes','DynamicCommands','DynamicStates','DynamicStatus'
return '\n'.join(sorted('%s: %s'%(k,getattr(self,k,None))
for l in (DynamicDSClass.class_property_list,DynamicDSClass.device_property_list)
for k in l if k not in exclude))
#------------------------------------------------------------------
# getDynamicAttributes command:
#
# Description: Return current dynamic attributes
#
# argin: DevVoid
# argout: DevVarStringArray Return current dynamic attributes
#------------------------------------------------------------------
#Methods started with underscore could be inherited by child device servers for debugging purposes
def getDynamicAttributes(self):
return self.get_dyn_attr_list()
#------------------------------------------------------------------
# GetMemUsage command:
#
# Description: Returns own process RSS memory usage (Mb).
#
# argin: DevVoid
# argout: DevString Returns own process RSS memory usage (Mb)
#------------------------------------------------------------------
#Methods started with underscore could be inherited by child device servers for debugging purposes
def getMemUsage(self):
return fn.linos.get_memory()/1e3
#------------------------------------------------------------------
# Read MemUsage attribute
#------------------------------------------------------------------
def read_MemUsage(self, attr):
self.debug("In read_MemUsage()")
# Add your own code here
m = self.getMemUsage()
self.push_change_event('MemUsage',m)
attr.set_value(m)
#------------------------------------------------------------------
# Read EventQueueSize attribute
#------------------------------------------------------------------
def read_EventQueueSize(self, attr):
self.debug("In read_EventQueueSize()")
# Add your own code here
attr.set_value(self._events_queue.qsize())
#------------------------------------------------------------------
# EvaluateFormula command:
#
# Description: This execute eval(Expression), just to check if its sintax is adequate or not.
#
# argin: DevString PyTango Expression to evaluate
# argout: DevString
#------------------------------------------------------------------
#Methods started with underscore could be inherited by child device servers for debugging purposes
def evaluateFormula(self,argin):
t0 = time.time()
self.info('\tevaluateFormula(%s)'%(argin,))
e = self.evalState(str(argin))
argout=str(e)
self.info('\tevaluateFormula took %s seconds'%(time.time()-t0))
return argout
def pushAttribute(self,argin):
try:
return self.push_dyn_attr(argin,changed=True,queued=True)
except:
return(traceback.format_exc())
#------------------------------------------------------------------
# getAttrFormula command:
#
# Description: Return DynamicAttribute formula
#
# argin: DevString PyTango Expression to evaluate
# argout: DevString
#------------------------------------------------------------------
#Methods started with underscore could be inherited by child device servers for debugging purposes
def getAttrFormula(self,argin):
return self.get_attr_formula(argin)
#------------------------------------------------------------------------------------------------------
# Lock/Unlock Methods
#------------------------------------------------------------------------------------------------------
def isLocked(self):
return self.clientLock
def Lock(self):
self.clientLock=True
def Unlock(self):
self.clientLock=False
def attribute_polling_report(self):
self.debug('\n'+'-'*80)
try:
now = time.time()
self._cycle_start = now-self._cycle_start
if 'POLL' in self.dyn_values: self.debug('dyn_values[POLL] = %s ; locals[POLL] = %s' % (self.dyn_values['POLL'].value,self._locals['POLL']))
self.info('Last complete reading cycle took: %f seconds' | |
a job." % submit_response
return lambda: self.wait_for_job(jobs[0]["id"], history_id, maxseconds=maxseconds)
def run_tool(self, testdef, history_id, resource_parameters={}):
# We need to handle the case where we've uploaded a valid compressed file since the upload
# tool will have uncompressed it on the fly.
inputs_tree = testdef.inputs.copy()
for key, value in inputs_tree.items():
values = [value] if not isinstance(value, list) else value
new_values = []
for value in values:
if isinstance(value, TestCollectionDef):
hdca_id = self._create_collection(history_id, value)
new_values = [dict(src="hdca", id=hdca_id)]
elif value in self.uploads:
new_values.append(self.uploads[value])
else:
new_values.append(value)
inputs_tree[key] = new_values
if resource_parameters:
inputs_tree["__job_resource|__job_resource__select"] = "yes"
for key, value in resource_parameters.items():
inputs_tree["__job_resource|%s" % key] = value
# HACK: Flatten single-value lists. Required when using expand_grouping
for key, value in inputs_tree.items():
if isinstance(value, list) and len(value) == 1:
inputs_tree[key] = value[0]
submit_response = self.__submit_tool(history_id, tool_id=testdef.tool_id, tool_input=inputs_tree)
submit_response_object = submit_response.json()
try:
return Bunch(
inputs=inputs_tree,
outputs=self.__dictify_outputs(submit_response_object),
output_collections=self.__dictify_output_collections(submit_response_object),
jobs=submit_response_object['jobs'],
)
except KeyError:
message = "Error creating a job for these tool inputs - %s" % submit_response_object['err_msg']
raise RunToolException(message, inputs_tree)
def _create_collection(self, history_id, collection_def):
create_payload = dict(
name=collection_def.name,
element_identifiers=dumps(self._element_identifiers(collection_def)),
collection_type=collection_def.collection_type,
history_id=history_id,
)
return self._post("dataset_collections", data=create_payload).json()["id"]
def _element_identifiers(self, collection_def):
element_identifiers = []
for element_dict in collection_def.elements:
element_identifier = element_dict["element_identifier"]
element_def = element_dict["element_definition"]
if isinstance(element_def, TestCollectionDef):
subelement_identifiers = self._element_identifiers(element_def)
element = dict(
name=element_identifier,
src="new_collection",
collection_type=element_def.collection_type,
element_identifiers=subelement_identifiers
)
else:
element = self.uploads[element_def["value"]].copy()
element["name"] = element_identifier
tags = element_def.get("attributes").get("tags")
if tags:
element["tags"] = tags.split(",")
element_identifiers.append(element)
return element_identifiers
def __dictify_output_collections(self, submit_response):
output_collections_dict = OrderedDict()
for output_collection in submit_response['output_collections']:
output_collections_dict[output_collection.get("output_name")] = output_collection
return output_collections_dict
def __dictify_outputs(self, datasets_object):
# Convert outputs list to a dictionary that can be accessed by
# output_name so can be more flexible about ordering of outputs
# but also allows fallback to legacy access as list mode.
outputs_dict = OutputsDict()
for output in datasets_object['outputs']:
outputs_dict[output.get("output_name")] = output
return outputs_dict
def output_hid(self, output_data):
return output_data['id']
def delete_history(self, history):
return None
def __job_ready(self, job_id, history_id):
if job_id is None:
raise ValueError("__job_ready passed empty job_id")
job_json = self._get("jobs/%s" % job_id).json()
state = job_json['state']
try:
return self._state_ready(state, error_msg="Job in error state.")
except Exception:
if VERBOSE_ERRORS:
self._summarize_history(history_id)
raise
def _summarize_history(self, history_id):
if history_id is None:
raise ValueError("_summarize_history passed empty history_id")
print("Problem in history with id %s - summary of history's datasets and jobs below." % history_id)
try:
history_contents = self.__contents(history_id)
except Exception:
print("*TEST FRAMEWORK FAILED TO FETCH HISTORY DETAILS*")
for history_content in history_contents:
dataset = history_content
print(ERROR_MESSAGE_DATASET_SEP)
dataset_id = dataset.get('id', None)
print("| %d - %s (HID - NAME) " % (int(dataset['hid']), dataset['name']))
if history_content['history_content_type'] == 'dataset_collection':
history_contents_json = self._get("histories/%s/contents/dataset_collections/%s" % (history_id, history_content["id"])).json()
print("| Dataset Collection: %s" % history_contents_json)
print("|")
continue
try:
dataset_info = self._dataset_info(history_id, dataset_id)
print("| Dataset State:")
print(self.format_for_summary(dataset_info.get("state"), "Dataset state is unknown."))
print("| Dataset Blurb:")
print(self.format_for_summary(dataset_info.get("misc_blurb", ""), "Dataset blurb was empty."))
print("| Dataset Info:")
print(self.format_for_summary(dataset_info.get("misc_info", ""), "Dataset info is empty."))
print("| Peek:")
print(self.format_for_summary(dataset_info.get("peek", ""), "Peek unavilable."))
except Exception:
print("| *TEST FRAMEWORK ERROR FETCHING DATASET DETAILS*")
try:
provenance_info = self._dataset_provenance(history_id, dataset_id)
print("| Dataset Job Standard Output:")
print(self.format_for_summary(provenance_info.get("stdout", ""), "Standard output was empty."))
print("| Dataset Job Standard Error:")
print(self.format_for_summary(provenance_info.get("stderr", ""), "Standard error was empty."))
except Exception:
print("| *TEST FRAMEWORK ERROR FETCHING JOB DETAILS*")
print("|")
try:
jobs_json = self._get("jobs?history_id=%s" % history_id).json()
for job_json in jobs_json:
print(ERROR_MESSAGE_DATASET_SEP)
print("| Job %s" % job_json["id"])
print("| State: ")
print(self.format_for_summary(job_json.get("state", ""), "Job state is unknown."))
print("| Update Time:")
print(self.format_for_summary(job_json.get("update_time", ""), "Job update time is unknown."))
print("| Create Time:")
print(self.format_for_summary(job_json.get("create_time", ""), "Job create time is unknown."))
print("|")
print(ERROR_MESSAGE_DATASET_SEP)
except Exception:
print(ERROR_MESSAGE_DATASET_SEP)
print("*TEST FRAMEWORK FAILED TO FETCH HISTORY JOBS*")
print(ERROR_MESSAGE_DATASET_SEP)
def format_for_summary(self, blob, empty_message, prefix="| "):
contents = "\n".join("%s%s" % (prefix, line.strip()) for line in StringIO(blob).readlines() if line.rstrip("\n\r"))
return contents or "%s*%s*" % (prefix, empty_message)
def _dataset_provenance(self, history_id, id):
provenance = self._get("histories/%s/contents/%s/provenance" % (history_id, id)).json()
return provenance
def _dataset_info(self, history_id, id):
dataset_json = self._get("histories/%s/contents/%s" % (history_id, id)).json()
return dataset_json
def __contents(self, history_id):
history_contents_json = self._get("histories/%s/contents" % history_id).json()
return history_contents_json
def _state_ready(self, state_str, error_msg):
if state_str == 'ok':
return True
elif state_str == 'error':
raise Exception(error_msg)
return False
def __submit_tool(self, history_id, tool_id, tool_input, extra_data={}, files=None):
data = dict(
history_id=history_id,
tool_id=tool_id,
inputs=dumps(tool_input),
**extra_data
)
return self._post("tools", files=files, data=data)
def ensure_user_with_email(self, email, password=None):
admin_key = self.master_api_key
all_users = self._get('users', key=admin_key).json()
try:
test_user = [user for user in all_users if user["email"] == email][0]
except IndexError:
username = re.sub(r"[^a-z-\d]", '--', email.lower())
password = password or '<PASSWORD>'
# If remote user middleware is enabled - this endpoint consumes
# ``remote_user_email`` otherwise it requires ``email``, ``password``
# and ``username``.
data = dict(
remote_user_email=email,
email=email,
password=password,
username=username,
)
test_user = self._post('users', data, key=admin_key).json()
return test_user
def __test_data_downloader(self, tool_id):
def test_data_download(filename, mode='file'):
return self.test_data_download(tool_id, filename, mode=mode)
return test_data_download
def __dataset_fetcher(self, history_id):
def fetcher(hda_id, base_name=None):
url = "histories/%s/contents/%s/display?raw=true" % (history_id, hda_id)
if base_name:
url += "&filename=%s" % base_name
return self._get(url).content
return fetcher
def __inject_api_key(self, data, key, admin, anon):
if data is None:
data = {}
params = {}
if not anon:
if not key:
key = self.api_key if not admin else self.master_api_key
params['key'] = key
return params, data
def _post(self, path, data=None, files=None, key=None, admin=False, anon=False):
params, data = self.__inject_api_key(data=data, key=key, admin=admin, anon=anon)
# no params for POST
data.update(params)
return requests.post("%s/%s" % (self.api_url, path), data=data, files=files)
def _delete(self, path, data=None, key=None, admin=False, anon=False):
params, data = self.__inject_api_key(data=data, key=key, admin=admin, anon=anon)
# no data for DELETE
params.update(data)
return requests.delete("%s/%s" % (self.api_url, path), params=params)
def _patch(self, path, data=None, key=None, admin=False, anon=False):
params, data = self.__inject_api_key(data=data, key=key, admin=admin, anon=anon)
return requests.patch("%s/%s" % (self.api_url, path), params=params, data=data)
def _put(self, path, data=None, key=None, admin=False, anon=False):
params, data = self.__inject_api_key(data=data, key=key, admin=admin, anon=anon)
return requests.put("%s/%s" % (self.api_url, path), params=params, data=data)
def _get(self, path, data=None, key=None, admin=False, anon=False):
params, data = self.__inject_api_key(data=data, key=key, admin=admin, anon=anon)
# no data for GET
params.update(data)
if path.startswith("/api"):
path = path[len("/api"):]
url = "%s/%s" % (self.api_url, path)
return requests.get(url, params=params)
class RunToolException(Exception):
def __init__(self, message, inputs=None):
super(RunToolException, self).__init__(message)
self.inputs = inputs
# Galaxy specific methods - rest of this can be used with arbitrary files and such.
def verify_hid(filename, hda_id, attributes, test_data_downloader, hid="", dataset_fetcher=None, keep_outputs_dir=False):
assert dataset_fetcher is not None
def verify_extra_files(extra_files):
_verify_extra_files_content(extra_files, hda_id, dataset_fetcher=dataset_fetcher, test_data_downloader=test_data_downloader, keep_outputs_dir=keep_outputs_dir)
data = dataset_fetcher(hda_id)
item_label = "History item %s" % hid
verify(
item_label,
data,
attributes=attributes,
filename=filename,
get_filecontent=test_data_downloader,
keep_outputs_dir=keep_outputs_dir,
verify_extra_files=verify_extra_files,
)
def verify_collection(output_collection_def, data_collection, verify_dataset):
name = output_collection_def.name
def get_element(elements, id):
for element in elements:
if element["element_identifier"] == id:
return element
return False
expected_collection_type = output_collection_def.collection_type
if expected_collection_type:
collection_type = data_collection["collection_type"]
if expected_collection_type != collection_type:
template = "Expected output collection [%s] to be of type [%s], was of type [%s]."
message = template % (name, expected_collection_type, collection_type)
raise AssertionError(message)
expected_element_count = output_collection_def.count
if expected_element_count:
actual_element_count = len(data_collection["elements"])
if expected_element_count != actual_element_count:
template = "Expected output collection [%s] to have %s elements, but it had %s."
message = template % (name, expected_element_count, actual_element_count)
raise AssertionError(message)
def verify_elements(element_objects, element_tests):
for element_identifier, element_test in element_tests.items():
if isinstance(element_test, dict):
element_outfile, element_attrib = None, element_test
else:
element_outfile, element_attrib = element_test
element = get_element(element_objects, element_identifier)
if not element:
template = "Failed to find identifier [%s] for testing, tool generated collection elements [%s]"
message = template % (element_identifier, element_objects)
raise AssertionError(message)
element_type = element["element_type"]
if element_type != "dataset_collection":
verify_dataset(element, element_attrib, element_outfile)
if element_type == "dataset_collection":
elements = element["object"]["elements"]
verify_elements(elements, element_attrib.get("elements", {}))
verify_elements(data_collection["elements"], output_collection_def.element_tests)
def _verify_composite_datatype_file_content(file_name, hda_id, base_name=None, attributes=None, dataset_fetcher=None, test_data_downloader=None, keep_outputs_dir=False, mode='file'):
assert dataset_fetcher is not None
data = dataset_fetcher(hda_id, base_name)
item_label = "History item %s" % hda_id
try:
verify(
item_label,
data,
attributes=attributes,
filename=file_name,
get_filecontent=test_data_downloader,
keep_outputs_dir=keep_outputs_dir,
mode=mode,
)
except AssertionError as err:
errmsg = 'Composite file (%s) of %s different than expected, difference:\n' % (base_name, item_label)
errmsg += util.unicodify(err)
raise AssertionError(errmsg)
def _verify_extra_files_content(extra_files, hda_id, dataset_fetcher, test_data_downloader, keep_outputs_dir):
files_list = []
cleanup_directories = []
for extra_file_dict in extra_files:
extra_file_type = extra_file_dict["type"]
extra_file_name = extra_file_dict["name"]
extra_file_attributes = extra_file_dict["attributes"]
extra_file_value = extra_file_dict["value"]
if extra_file_type == 'file':
files_list.append((extra_file_name, extra_file_value, extra_file_attributes, extra_file_type))
elif extra_file_type == 'directory':
extracted_path = test_data_downloader(extra_file_value, mode='directory')
cleanup_directories.append(extracted_path)
for root, directories, files in util.path.safe_walk(extracted_path):
for filename in files:
filename = os.path.join(root, filename)
filename = os.path.relpath(filename, extracted_path)
files_list.append((filename, os.path.join(extracted_path, filename), extra_file_attributes, extra_file_type))
else:
raise ValueError('unknown extra_files type: %s' % extra_file_type)
| |
from typing import Optional
import torch
from torch import matmul
from torch_extensions.ops import (
batch_cholesky_inverse,
batch_diag,
kron,
symmetrize,
matvec,
cov_from_invcholesky_param,
inv_from_invcholesky_param,
cholesky,
)
from inference.analytical_gausian_linear.inference_step import (
filter_forward_prediction_step,
filter_forward_measurement_step,
smooth_backward_step,
)
from utils.utils import add_sample_dims_to_initial_state, LOG_2PI, TensorDims
def filter_forward(
dims: TensorDims,
A: torch.Tensor,
B: Optional[torch.Tensor],
C: torch.Tensor,
D: Optional[torch.Tensor],
LQinv_tril: torch.Tensor,
LQinv_logdiag: torch.Tensor,
LRinv_tril: torch.Tensor,
LRinv_logdiag: torch.Tensor,
LV0inv_tril: torch.Tensor,
LV0inv_logdiag: torch.Tensor,
m0: torch.Tensor,
y: torch.Tensor,
u_state: Optional[torch.Tensor] = None,
u_obs: Optional[torch.Tensor] = None,
):
device, dtype = A.device, A.dtype
R = cov_from_invcholesky_param(LRinv_tril, LRinv_logdiag)
Q = cov_from_invcholesky_param(LQinv_tril, LQinv_logdiag)
V0 = cov_from_invcholesky_param(LV0inv_tril, LV0inv_logdiag)
m0, V0 = add_sample_dims_to_initial_state(m0=m0, V0=V0, dims=dims)
# pre-compute biases
b = matvec(B, u_state) if u_state is not None else 0
d = matvec(D, u_obs) if u_obs is not None else 0
m_fw = torch.zeros(
(dims.timesteps, dims.batch, dims.state), device=device, dtype=dtype
)
V_fw = torch.zeros(
(dims.timesteps, dims.batch, dims.state, dims.state),
device=device,
dtype=dtype,
)
for t in range(0, dims.timesteps):
(mp, Vp) = (
filter_forward_prediction_step(
m=m_fw[t - 1], V=V_fw[t - 1], R=R, A=A, b=b[t - 1],
)
if t > 0
else (m0, V0)
)
m_fw[t], V_fw[t] = filter_forward_measurement_step(
y=y[t], m=mp, V=Vp, Q=Q, C=C, d=d[t]
)
return m_fw, V_fw
def smooth_forward_backward(
dims: TensorDims,
A: torch.Tensor,
B: Optional[torch.Tensor],
C: torch.Tensor,
D: Optional[torch.Tensor],
LQinv_tril: torch.Tensor,
LQinv_logdiag: torch.Tensor,
LRinv_tril: torch.Tensor,
LRinv_logdiag: torch.Tensor,
LV0inv_tril: torch.Tensor,
LV0inv_logdiag: torch.Tensor,
m0: torch.Tensor,
y: torch.Tensor,
u_state: Optional[torch.Tensor] = None,
u_obs: Optional[torch.Tensor] = None,
):
device, dtype = A.device, A.dtype
R = cov_from_invcholesky_param(LRinv_tril, LRinv_logdiag)
# pre-compute biases
b = matvec(B, u_state) if u_state is not None else 0
m_sm = torch.zeros(
(dims.timesteps, dims.batch, dims.state), device=device, dtype=dtype
)
V_sm = torch.zeros(
(dims.timesteps, dims.batch, dims.state, dims.state),
device=device,
dtype=dtype,
)
Cov_sm = torch.zeros(
(dims.timesteps, dims.batch, dims.state, dims.state),
device=device,
dtype=dtype,
)
m_fw, V_fw = filter_forward(
dims=dims,
A=A,
B=B,
C=C,
D=D,
LQinv_tril=LQinv_tril,
LQinv_logdiag=LQinv_logdiag,
LRinv_tril=LRinv_tril,
LRinv_logdiag=LRinv_logdiag,
LV0inv_tril=LV0inv_tril,
LV0inv_logdiag=LV0inv_logdiag,
m0=m0,
y=y,
u_state=u_state,
u_obs=u_obs,
)
m_sm[-1], V_sm[-1] = m_fw[-1], V_fw[-1]
for t in reversed(range(0, dims.timesteps - 1)):
m_sm[t], V_sm[t], Cov_sm[t] = smooth_backward_step(
m_sm=m_sm[t + 1],
V_sm=V_sm[t + 1],
m_fw=m_fw[t],
V_fw=V_fw[t],
A=A,
R=R,
b=b[t],
)
return m_sm, V_sm, Cov_sm
def smooth_global(
dims: TensorDims,
A: torch.Tensor,
B: Optional[torch.Tensor],
C: torch.Tensor,
D: Optional[torch.Tensor],
LQinv_tril: torch.Tensor,
LQinv_logdiag: torch.Tensor,
LRinv_tril: torch.Tensor,
LRinv_logdiag: torch.Tensor,
LV0inv_tril: torch.Tensor,
LV0inv_logdiag: torch.Tensor,
m0: torch.Tensor,
y: torch.Tensor,
u_state: Optional[torch.Tensor] = None,
u_obs: Optional[torch.Tensor] = None,
):
""" compute posterior by direct inversion of unrolled model """
device, dtype = A.device, A.dtype
R = cov_from_invcholesky_param(LRinv_tril, LRinv_logdiag)
Q = cov_from_invcholesky_param(LQinv_tril, LQinv_logdiag)
V0 = cov_from_invcholesky_param(LV0inv_tril, LV0inv_logdiag)
Q_field = torch.zeros(
(dims.batch, dims.timesteps * dims.state, dims.timesteps * dims.state),
device=device,
dtype=dtype,
)
h_field = torch.zeros(
(dims.batch, dims.timesteps * dims.state), device=device, dtype=dtype
)
# pre-compute biases
b = matvec(B, u_state) if u_state is not None else 0
d = matvec(D, u_obs) if u_obs is not None else 0
Rinv = symmetrize(torch.cholesky_inverse(cholesky(R)))
Qinv = symmetrize(torch.cholesky_inverse(cholesky(Q)))
V0inv = symmetrize(torch.cholesky_inverse(cholesky(V0)))
CtQinvymd = matvec(matmul(C.transpose(-1, -2), Qinv), y - d)
h_obs = CtQinvymd.transpose(1, 0).reshape(
(dims.batch, dims.timesteps * dims.state,)
)
Q_obs = kron(
torch.eye(dims.timesteps, dtype=dtype, device=device),
matmul(C.transpose(-1, -2), matmul(Qinv, C)),
)
AtRinvA = matmul(A.transpose(-1, -2), matmul(Rinv, A))
RinvA = matmul(Rinv, A)
h_field[:, : dims.state] = matmul(V0inv, m0).repeat(
(dims.batch,) + (1,) * (h_field.ndim - 1)
)
Q_field[:, : dims.state, : dims.state] += V0inv.repeat(
(dims.batch,) + (1,) * (Q_field.ndim - 1)
)
for t in range(dims.timesteps - 1):
idx = t * dims.state
h_field[:, idx : idx + dims.state] += -matvec(
RinvA.transpose(-1, -2), b[t]
)
h_field[:, idx + dims.state : idx + 2 * dims.state] += matvec(
Rinv, b[t]
)
Q_field[:, idx : idx + dims.state, idx : idx + dims.state] += AtRinvA
Q_field[
:, idx : idx + dims.state, idx + dims.state : idx + 2 * dims.state
] += -RinvA.transpose(-1, -2)
Q_field[
:, idx + dims.state : idx + 2 * dims.state, idx : idx + dims.state
] += -RinvA
Q_field[
:,
idx + dims.state : idx + 2 * dims.state,
idx + dims.state : idx + 2 * dims.state,
] += Rinv
L_all_inv = torch.inverse(cholesky(Q_field + Q_obs))
V_all = matmul(L_all_inv.transpose(-1, -2), L_all_inv)
m_all = matvec(V_all, h_obs + h_field)
# Pytorch has no Fortran style reading of indices.
m = m_all.reshape((dims.batch, dims.timesteps, dims.state)).transpose(0, 1)
V, Cov = [], []
for t in range(0, dims.timesteps):
idx = t * dims.state
V.append(V_all[:, idx : idx + dims.state, idx : idx + dims.state])
if t < (dims.timesteps - 1):
Cov.append(
V_all[
:,
idx : idx + dims.state,
idx + dims.state : idx + 2 * dims.state,
]
)
else:
Cov.append(
torch.zeros(
(dims.batch, dims.state, dims.state),
device=device,
dtype=dtype,
)
)
V = torch.stack(V, dim=0)
Cov = torch.stack(Cov, dim=0)
return m, V, Cov
def sample(
dims: TensorDims,
A: torch.Tensor,
B: Optional[torch.Tensor],
C: torch.Tensor,
D: Optional[torch.Tensor],
LQinv_tril: torch.Tensor,
LQinv_logdiag: torch.Tensor,
LRinv_tril: torch.Tensor,
LRinv_logdiag: torch.Tensor,
LV0inv_tril: torch.Tensor,
LV0inv_logdiag: torch.Tensor,
m0: torch.Tensor,
u_state: Optional[torch.Tensor] = None,
u_obs: Optional[torch.Tensor] = None,
):
device, dtype = A.device, A.dtype
# generate noise
wz = torch.randn(dims.timesteps, dims.batch, dims.state)
wy = torch.randn(dims.timesteps, dims.batch, dims.target)
# pre-compute cholesky matrices
LR = torch.inverse(
torch.tril(LRinv_tril, -1) + torch.diag(torch.exp(LRinv_logdiag))
)
LQ = torch.inverse(
torch.tril(LQinv_tril, -1) + torch.diag(torch.exp(LQinv_logdiag))
)
LV0 = torch.inverse(
torch.tril(LV0inv_tril, -1) + torch.diag(torch.exp(LV0inv_logdiag))
)
# pre-compute biases
b = matvec(B, u_state) if u_state is not None else 0
d = matvec(D, u_obs) if u_obs is not None else 0
# Initial step.
# Note: We cannot use in-place operations here because we must backprop through y.
x = [m0 + matvec(LV0, wz[0])] + [None] * (dims.timesteps - 1)
y = [matvec(C, x[0]) + d[0] + matvec(LQ, wy[0])] + [None] * (
dims.timesteps - 1
)
for t in range(1, dims.timesteps):
x[t] = matvec(A, x[t - 1]) + b[t - 1] + matvec(LR, wz[t])
y[t] = matvec(C, x[t]) + d[t] + matvec(LQ, wy[t])
x = torch.stack(x, dim=0)
y = torch.stack(y, dim=0)
return x, y
def loss_forward(
dims: TensorDims,
A: torch.Tensor,
B: Optional[torch.Tensor],
C: torch.Tensor,
D: Optional[torch.Tensor],
LQinv_tril: torch.Tensor,
LQinv_logdiag: torch.Tensor,
LRinv_tril: torch.Tensor,
LRinv_logdiag: torch.Tensor,
LV0inv_tril: torch.Tensor,
LV0inv_logdiag: torch.Tensor,
m0: torch.Tensor,
y: torch.Tensor,
u_state: Optional[torch.Tensor] = None,
u_obs: Optional[torch.Tensor] = None,
):
device, dtype = A.device, A.dtype
R = cov_from_invcholesky_param(LRinv_tril, LRinv_logdiag)
Q = cov_from_invcholesky_param(LQinv_tril, LQinv_logdiag)
V0 = cov_from_invcholesky_param(LV0inv_tril, LV0inv_logdiag)
m0, V0 = add_sample_dims_to_initial_state(m0=m0, V0=V0, dims=dims)
# pre-compute biases
b = matvec(B, u_state) if u_state is not None else 0
d = matvec(D, u_obs) if u_obs is not None else 0
# Note: We can not use (more readable) in-place operations due to backprop problems.
m_fw = [None] * dims.timesteps
V_fw = [None] * dims.timesteps
loss = torch.zeros((dims.batch,), device=device, dtype=dtype)
for t in range(0, dims.timesteps):
(mp, Vp) = (
filter_forward_prediction_step(
m=m_fw[t - 1], V=V_fw[t - 1], R=R, A=A, b=b[t - 1],
)
if t > 0
else (m0, V0)
)
m_fw[t], V_fw[t], dobs_norm, LVpyinv = filter_forward_measurement_step(
y=y[t], m=mp, V=Vp, Q=Q, C=C, d=d[t], return_loss_components=True
)
loss += (
0.5 * torch.sum(dobs_norm ** 2, dim=-1)
- 0.5 * 2 * torch.sum(torch.log(batch_diag(LVpyinv)), dim=(-1,))
+ 0.5 * dims.target * LOG_2PI
)
return loss
def loss_em(
dims: TensorDims,
A: torch.Tensor,
B: Optional[torch.Tensor],
C: torch.Tensor,
D: Optional[torch.Tensor],
LQinv_tril: torch.Tensor,
LQinv_logdiag: torch.Tensor,
LRinv_tril: torch.Tensor,
LRinv_logdiag: torch.Tensor,
LV0inv_tril: torch.Tensor,
LV0inv_logdiag: torch.Tensor,
m0: torch.Tensor,
y: torch.Tensor,
u_state: Optional[torch.Tensor] = None,
u_obs: Optional[torch.Tensor] = None,
):
Rinv = inv_from_invcholesky_param(LRinv_tril, LRinv_logdiag)
Qinv = inv_from_invcholesky_param(LQinv_tril, LQinv_logdiag)
with torch.no_grad(): # E-Step is optimal --> analytically zero gradients.
m, V, Cov = smooth_forward_backward(
dims=dims,
A=A,
B=B,
C=C,
D=D,
LQinv_tril=LQinv_tril,
LQinv_logdiag=LQinv_logdiag,
LRinv_tril=LRinv_tril,
LRinv_logdiag=LRinv_logdiag,
LV0inv_tril=LV0inv_tril,
LV0inv_logdiag=LV0inv_logdiag,
m0=m0,
y=y,
u_state=u_state,
u_obs=u_obs,
)
loss_entropy = -compute_entropy(dims=dims, V=V, Cov=Cov)
Cov_sum = torch.sum(Cov[:-1], dim=0) # idx -1 is Cov_{T, T+1}.
V_sum = torch.sum(V, dim=0)
V_sum_head = V_sum - V[-1]
V_sum_tail = V_sum - V[0]
# initial prior loss
V0inv = inv_from_invcholesky_param(LV0inv_tril, LV0inv_logdiag)
delta_init = m[0] - m0
quad_init = matmul(delta_init[..., None], delta_init[..., None, :]) + V[0]
loss_init = 0.5 * (
torch.sum(V0inv * quad_init, dim=(-1, -2))
- 2.0 * torch.sum(LV0inv_logdiag)
+ dims.state * LOG_2PI
)
# transition losses - summed over all time-steps
b = matvec(B, u_state[:-1]) if u_state is not None else 0
delta_trans = m[1:] - matvec(A, m[:-1]) - b
quad_trans = (
matmul(
delta_trans.transpose(0, 1).transpose(-1, -2),
delta_trans.transpose(0, 1),
)
+ V_sum_tail
- matmul(A, Cov_sum)
- matmul(Cov_sum.transpose(-1, -2), A.transpose(-1, -2))
+ | |
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
"""
This defines the DataShape type system, with unified
shape and data type.
"""
import sys
import ctypes
import operator
from math import ceil
import datashape
import numpy as np
from .py2help import (
OrderedDict,
_inttypes,
_strtypes,
basestring,
unicode,
with_metaclass,
)
from .internal_utils import IndexCallable, isidentifier
# Classes of unit types.
DIMENSION = 1
MEASURE = 2
class Type(type):
_registry = {}
def __new__(meta, name, bases, dct):
cls = super(Type, meta).__new__(meta, name, bases, dct)
# Don't register abstract classes
if not dct.get('abstract'):
Type._registry[name] = cls
return cls
@classmethod
def register(cls, name, type):
# Don't clobber existing types.
if name in cls._registry:
raise TypeError('There is another type registered with name %s'
% name)
cls._registry[name] = type
@classmethod
def lookup_type(cls, name):
return cls._registry[name]
class Mono(with_metaclass(Type, object)):
"""
Monotype are unqualified 0 parameters.
Each type must be reconstructable using its parameters:
type(datashape_type)(*type.parameters)
"""
composite = False
def __init__(self, *params):
self._parameters = params
@property
def _slotted(self):
return hasattr(self, '__slots__')
@property
def parameters(self):
if self._slotted:
return tuple(getattr(self, slot) for slot in self.__slots__)
else:
return self._parameters
def info(self):
return type(self), self.parameters
def __eq__(self, other):
return (isinstance(other, Mono) and
self.shape == other.shape and
self.measure.info() == other.measure.info())
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
try:
h = self._hash
except AttributeError:
h = self._hash = hash(self.shape) ^ hash(self.measure.info())
return h
@property
def shape(self):
return ()
def __len__(self):
return 1
def __getitem__(self, key):
return [self][key]
def __repr__(self):
return '%s(%s)' % (
type(self).__name__,
', '.join(
(
'%s=%r' % (slot, getattr(self, slot))
for slot in self.__slots__
) if self._slotted else
map(repr, self.parameters),
),
)
# Monotypes are their own measure
@property
def measure(self):
return self
def subarray(self, leading):
"""Returns a data shape object of the subarray with 'leading'
dimensions removed. In the case of a measure such as CType,
'leading' must be 0, and self is returned.
"""
if leading >= 1:
raise IndexError(('Not enough dimensions in data shape '
'to remove %d leading dimensions.') % leading)
else:
return self
def __mul__(self, other):
if isinstance(other, _strtypes):
import datashape
return datashape.dshape(other).__rmul__(self)
if isinstance(other, _inttypes):
other = Fixed(other)
if isinstance(other, DataShape):
return other.__rmul__(self)
return DataShape(self, other)
def __rmul__(self, other):
if isinstance(other, _strtypes):
import datashape
return self * datashape.dshape(other)
if isinstance(other, _inttypes):
other = Fixed(other)
return DataShape(other, self)
def __getstate__(self):
return self.parameters
def __setstate__(self, state):
if self._slotted:
for slot, val in zip(self.__slots__, state):
setattr(self, slot, val)
else:
self._parameters = state
def to_numpy_dtype(self):
raise TypeError('DataShape %s is not NumPy-compatible' % self)
class Unit(Mono):
"""
Unit type that does not need to be reconstructed.
"""
def __str__(self):
return type(self).__name__.lower()
class Ellipsis(Mono):
"""Ellipsis (...). Used to indicate a variable number of dimensions.
E.g.:
... * float32 # float32 array w/ any number of dimensions
A... * float32 # float32 array w/ any number of dimensions,
# associated with type variable A
"""
__slots__ = 'typevar',
def __init__(self, typevar=None):
self.typevar = typevar
def __str__(self):
return str(self.typevar) + '...' if self.typevar else '...'
def __repr__(self):
return '%s(%r)' % (type(self).__name__, str(self))
class Null(Unit):
"""The null datashape."""
pass
class Date(Unit):
""" Date type """
cls = MEASURE
__slots__ = ()
def to_numpy_dtype(self):
return np.dtype('datetime64[D]')
class Time(Unit):
""" Time type """
cls = MEASURE
__slots__ = 'tz',
def __init__(self, tz=None):
if tz is not None and not isinstance(tz, _strtypes):
raise TypeError('tz parameter to time datashape must be a string')
# TODO validate against Olson tz database
self.tz = tz
def __str__(self):
basename = super(Time, self).__str__()
if self.tz is None:
return basename
else:
return '%s[tz=%r]' % (basename, str(self.tz))
class DateTime(Unit):
""" DateTime type """
cls = MEASURE
__slots__ = 'tz',
def __init__(self, tz=None):
if tz is not None and not isinstance(tz, _strtypes):
raise TypeError('tz parameter to datetime datashape must be a '
'string')
# TODO validate against Olson tz database
self.tz = tz
def __str__(self):
basename = super(DateTime, self).__str__()
if self.tz is None:
return basename
else:
return '%s[tz=%r]' % (basename, str(self.tz))
def to_numpy_dtype(self):
return np.dtype('datetime64[us]')
_units = set(['ns', 'us', 'ms', 's', 'm', 'h', 'D', 'W', 'M', 'Y'])
_unit_aliases = {
'year': 'Y',
'week': 'W',
'day': 'D',
'date': 'D',
'hour': 'h',
'second': 's',
'millisecond': 'ms',
'microsecond': 'us',
'nanosecond': 'ns'
}
def normalize_time_unit(s):
""" Normalize time input to one of 'year', 'second', 'millisecond', etc..
Example
-------
>>> normalize_time_unit('milliseconds')
'ms'
>>> normalize_time_unit('ms')
'ms'
>>> normalize_time_unit('nanoseconds')
'ns'
>>> normalize_time_unit('nanosecond')
'ns'
"""
s = s.strip()
if s in _units:
return s
if s in _unit_aliases:
return _unit_aliases[s]
if s[-1] == 's' and len(s) > 2:
return normalize_time_unit(s.rstrip('s'))
raise ValueError("Do not understand time unit %s" % s)
class TimeDelta(Unit):
cls = MEASURE
__slots__ = 'unit',
def __init__(self, unit='us'):
self.unit = normalize_time_unit(str(unit))
def __str__(self):
return 'timedelta[unit=%r]' % self.unit
def to_numpy_dtype(self):
return np.dtype('timedelta64[%s]' % self.unit)
class Units(Unit):
""" Units type for values with physical units """
cls = MEASURE
__slots__ = 'unit', 'tp'
def __init__(self, unit, tp=None):
if not isinstance(unit, _strtypes):
raise TypeError('unit parameter to units datashape must be a '
'string')
if tp is None:
tp = DataShape(float64)
elif not isinstance(tp, DataShape):
raise TypeError('tp parameter to units datashape must be a '
'datashape type')
self.unit = unit
self.tp = tp
def __str__(self):
if self.tp == DataShape(float64):
return 'units[%r]' % (self.unit)
else:
return 'units[%r, %s]' % (self.unit, self.tp)
class Bytes(Unit):
""" Bytes type """
cls = MEASURE
__slots__ = ()
_canonical_string_encodings = {
u'A': u'A',
u'ascii': u'A',
u'U8': u'U8',
u'utf-8': u'U8',
u'utf_8': u'U8',
u'utf8': u'U8',
u'U16': u'U16',
u'utf-16': u'U16',
u'utf_16': u'U16',
u'utf16': u'U16',
u'U32': u'U32',
u'utf-32': u'U32',
u'utf_32': u'U32',
u'utf32': u'U32',
}
class String(Unit):
""" String container
>>> String()
ctype("string")
>>> String(10, 'ascii')
ctype("string[10, 'A']")
"""
cls = MEASURE
__slots__ = 'fixlen', 'encoding'
def __init__(self, *args):
if len(args) == 0:
fixlen, encoding = None, None
if len(args) == 1:
if isinstance(args[0], _strtypes):
fixlen, encoding = None, args[0]
if isinstance(args[0], _inttypes):
fixlen, encoding = args[0], None
if len(args) == 2:
fixlen, encoding = args
encoding = encoding or 'U8'
if isinstance(encoding, str):
encoding = unicode(encoding)
try:
encoding = _canonical_string_encodings[encoding]
except KeyError:
raise ValueError('Unsupported string encoding %s' %
repr(encoding))
self.encoding = encoding
self.fixlen = fixlen
# Put it in a canonical form
def __str__(self):
if self.fixlen is None and self.encoding == 'U8':
return 'string'
elif self.fixlen is not None and self.encoding == 'U8':
return 'string[%i]' % self.fixlen
elif self.fixlen is None and self.encoding != 'U8':
return 'string[%s]' % repr(self.encoding).strip('u')
else:
return 'string[%i, %s]' % (self.fixlen,
repr(self.encoding).strip('u'))
def __repr__(self):
s = str(self)
return 'ctype("%s")' % s.encode('unicode_escape').decode('ascii')
def to_numpy_dtype(self):
"""
>>> String().to_numpy_dtype()
dtype('O')
>>> String(30).to_numpy_dtype()
dtype('<U30')
>>> String(30, 'A').to_numpy_dtype()
dtype('S30')
"""
if self.fixlen:
if self.encoding == 'A':
return np.dtype('S%d' % self.fixlen)
else:
return np.dtype('U%d' % self.fixlen)
from .py2help import unicode
# Create a dtype with metadata indicating it's
# a string in the same style as the h5py special_dtype
return np.dtype('O', metadata={'vlen': unicode})
class Decimal(Unit):
"""Decimal type corresponding to SQL Decimal/Numeric types.
The first parameter passed specifies the number of digits of precision that
the Decimal contains. If an additional parameter is given, it represents
the scale, or number of digits of precision that are after the decimal
point.
The Decimal type makes no requirement of how it is to be stored in memory,
therefore, the number of bytes needed to store a Decimal for a given
precision will vary based on the platform where it is used.
Examples
--------
>>> Decimal(18)
Decimal(precision=18, scale=0)
>>> Decimal(7, 4)
Decimal(precision=7, scale=4)
>>> Decimal(precision=11, scale=2)
Decimal(precision=11, scale=2)
"""
cls = MEASURE
__slots__ = 'precision', 'scale'
def __init__(self, precision, scale=0):
self.precision = precision
self.scale = scale
def __str__(self):
return 'decimal[precision={precision}, scale={scale}]'.format(
precision=self.precision, scale=self.scale
)
def to_numpy_dtype(self):
"""Convert a decimal datashape to a NumPy dtype.
Note that floating-point (scale > 0) precision will be lost converting
to NumPy floats.
Examples
--------
>>> Decimal(18).to_numpy_dtype()
dtype('int64')
>>> Decimal(7,4).to_numpy_dtype()
dtype('float64')
"""
if self.scale == 0:
if self.precision <= 2:
return np.dtype(np.int8)
elif self.precision <= 4:
return np.dtype(np.int16)
elif self.precision <= 9:
return np.dtype(np.int32)
elif self.precision <= 18:
return np.dtype(np.int64)
else:
raise TypeError(
'Integer Decimal precision > 18 is not NumPy-compatible')
else:
return np.dtype(np.float64)
class DataShape(Mono):
"""
Composite container for datashape elements.
Elements of a datashape like ``Fixed(3)``, ``Var()`` or | |
1'][0]
form = self._assessment_bank.get_response_form(section_2['section_id'],
section_2['questions'][0].ident)
form.add_choice_id(wrong_answer['id'])
self._assessment_bank.submit_response(section_2['section_id'],
section_2['questions'][0].ident,
form)
sections = self.get_questions_for_taken(taken.ident)
self.validate_number_sections_and_questions(sections, (2, (2, 3)))
self.num_parts(2, sequestered=False)
# magic parts are not saved to disk ...
self.num_parts(6, sequestered=True)
# now let's get the new question wrong
section_2 = sections[1]
new_question = section_2['questions'][1]
choices = new_question.get_choices()
wrong_answer = [c for c in choices if c['name'] == 'Choice 1'][0]
form = self._assessment_bank.get_response_form(section_2['section_id'],
new_question.ident)
form.add_choice_id(wrong_answer['id'])
self._assessment_bank.submit_response(section_2['section_id'],
new_question.ident,
form)
sections = self.get_questions_for_taken(taken.ident)
self.validate_number_sections_and_questions(sections, (2, (2, 4)))
# this should be a bottom-level waypoint with no confused_los
# for its wrong answer. Getting it wrong should generate
# another question with the same confused LO
section_2 = sections[1]
new_question = section_2['questions'][2]
self.assertTrue(any(str(t.ident) == new_question._my_map['itemId']
for t in self._items['waypoint2']))
self.assertEqual(
new_question.object_map['learningObjectiveIds'],
['foo%3A2%40MIT']
)
questions = []
for s in sections:
questions += s['questions']
# let's check the displayLabels
question_names = [q.display_name.text for q in questions]
self.assertEqual(
question_names,
['1', '2', '1', '1.1', '1.1.1', '2']
)
choices = new_question.get_choices()
wrong_answer = [c for c in choices if c['name'] == 'Choice 1'][0]
form = self._assessment_bank.get_response_form(section_2['section_id'],
new_question.ident)
form.add_choice_id(wrong_answer['id'])
self._assessment_bank.submit_response(section_2['section_id'],
new_question.ident,
form)
sections = self.get_questions_for_taken(taken.ident)
self.validate_number_sections_and_questions(sections, (2, (2, 5)))
section_2 = sections[1]
new_question = section_2['questions'][3]
self.assertTrue(any(str(t.ident) == new_question._my_map['itemId']
for t in self._items['waypoint2']))
self.assertEqual(
new_question.object_map['learningObjectiveIds'],
['foo%3A2%40MIT']
)
questions = []
for s in sections:
questions += s['questions']
# let's check the displayLabels
question_names = [q.display_name.text for q in questions]
self.assertEqual(
question_names,
['1', '2', '1', '1.1', '1.1.1', '1.1.2', '2']
)
def test_waypoint_items_do_not_repeat_across_sections(self):
self.num_parts(0)
number_identical_waypoints = 0
taken = self.create_taken(number_waypoints=2)
for i in range(0, 15):
if i > 0:
taken = self.create_taken_only()
self.num_parts(2, sequestered=False)
self.num_parts(6, sequestered=True)
sections = self.get_questions_for_taken(taken.ident)
self.validate_number_sections_and_questions(sections, (2, (2, 2)))
section_2 = sections[1]
choices = section_2['questions'][1].get_choices()
wrong_answer = [c for c in choices if c['name'] == 'Choice 1'][0]
form = self._assessment_bank.get_response_form(section_2['section_id'],
section_2['questions'][1].ident)
form.add_choice_id(wrong_answer['id'])
self._assessment_bank.submit_response(section_2['section_id'],
section_2['questions'][1].ident,
form)
sections = self.get_questions_for_taken(taken.ident)
self.validate_number_sections_and_questions(sections, (2, (2, 3)))
self.num_parts(2, sequestered=False)
# magic parts are not saved to disk ...
self.num_parts(6, sequestered=True)
# now let's get another question wrong
section_1 = sections[0]
new_question = section_1['questions'][0]
choices = new_question.get_choices()
wrong_answer = [c for c in choices if c['name'] == 'Choice 1'][0]
form = self._assessment_bank.get_response_form(section_1['section_id'],
new_question.ident)
form.add_choice_id(wrong_answer['id'])
self._assessment_bank.submit_response(section_1['section_id'],
new_question.ident,
form)
sections = self.get_questions_for_taken(taken.ident)
self.validate_number_sections_and_questions(sections, (2, (3, 3)))
self.num_parts(2, sequestered=False)
# magic parts are not saved to disk ...
self.num_parts(6, sequestered=True)
questions = []
for s in sections:
questions += s['questions']
question_id_strs = [q._my_map['itemId'] for q in questions]
first_waypoint_id = ''
second_waypoint_id = ''
for index, question_id_str in enumerate(question_id_strs):
if index == 0:
self.assertEqual(str(self._items['target'][index].ident),
question_id_str)
elif index == 1 or index == 5:
self.assertTrue(any(str(t.ident) == question_id_str
for t in self._items['waypoint1']))
if index == 1:
first_waypoint_id = self._extract_item_id(question_id_str)
else:
second_waypoint_id = self._extract_item_id(question_id_str)
else:
self.assertEqual(str(self._items['target'][index - 1].ident),
question_id_str)
if first_waypoint_id == second_waypoint_id:
number_identical_waypoints += 1
break
# now delete / finish the taken
self._assessment_bank.delete_assessment_taken(taken.ident)
# should not get any duplicates across sections
self.assertFalse(number_identical_waypoints > 0)
def test_target_items_do_not_repeat_across_assessments(self):
self.num_parts(0)
number_identical_waypoints = 0
taken = self.create_taken(number_waypoints=2)
self.num_parts(2, sequestered=False)
self.num_parts(6, sequestered=True)
sections = self.get_questions_for_taken(taken.ident)
self.validate_number_sections_and_questions(sections, (2, (2, 2)))
section_2 = sections[1]
choices = section_2['questions'][1].get_choices()
wrong_answer = [c for c in choices if c['name'] == 'Choice 1'][0]
form = self._assessment_bank.get_response_form(section_2['section_id'],
section_2['questions'][1].ident)
form.add_choice_id(wrong_answer['id'])
self._assessment_bank.submit_response(section_2['section_id'],
section_2['questions'][1].ident,
form)
sections = self.get_questions_for_taken(taken.ident)
self.validate_number_sections_and_questions(sections, (2, (2, 3)))
inserted_waypoint_id = sections[1]['questions'][2]._my_map['itemId']
original_waypoints = self._items['waypoint1']
taken2 = self.create_taken(number_waypoints=2, create_waypoints=False)
self.num_parts(4, sequestered=False)
self.num_parts(12, sequestered=True)
for i in range(0, 15):
if i > 0:
taken = self.create_taken_only()
else:
taken = taken2
self.num_parts(4, sequestered=False)
self.num_parts(12, sequestered=True)
sections = self.get_questions_for_taken(taken.ident)
self.validate_number_sections_and_questions(sections, (2, (2, 2)))
section_2 = sections[1]
choices = section_2['questions'][1].get_choices()
wrong_answer = [c for c in choices if c['name'] == 'Choice 1'][0]
form = self._assessment_bank.get_response_form(section_2['section_id'],
section_2['questions'][1].ident)
form.add_choice_id(wrong_answer['id'])
self._assessment_bank.submit_response(section_2['section_id'],
section_2['questions'][1].ident,
form)
sections = self.get_questions_for_taken(taken.ident)
self.validate_number_sections_and_questions(sections, (2, (2, 3)))
self.num_parts(4, sequestered=False)
# magic parts are not saved to disk ...
self.num_parts(12, sequestered=True)
questions = []
for s in sections:
questions += s['questions']
question_id_strs = [q._my_map['itemId'] for q in questions]
new_inserted_waypoint_id = ''
for index, question_id_str in enumerate(question_id_strs):
if index == 4:
self.assertTrue(any(str(t.ident) == question_id_str
for t in original_waypoints))
new_inserted_waypoint_id = self._extract_item_id(question_id_str)
else:
self.assertEqual(str(self._items['target'][index].ident),
question_id_str)
if inserted_waypoint_id == new_inserted_waypoint_id:
number_identical_waypoints += 1
break
# now delete / finish the taken
self._assessment_bank.delete_assessment_taken(taken.ident)
# should not get any duplicates across assessments
self.assertFalse(number_identical_waypoints > 0)
class MultiLanguageBaseTestCase(DLKitTestCase):
def _english(self):
return DisplayText(display_text_map={
'text': self._english_text,
'languageTypeId': '639-2%3AENG%40ISO',
'scriptTypeId': '15924%3ALATN%40ISO',
'formatTypeId': 'TextFormats%3APLAIN%40okapia.net'
})
def _hindi(self):
return DisplayText(display_text_map={
'text': self._hindi_text,
'languageTypeId': '639-2%3AHIN%40ISO',
'scriptTypeId': '15924%3ADEVA%40ISO',
'formatTypeId': 'TextFormats%3APLAIN%40okapia.net'
})
def _telugu(self):
return DisplayText({
'text': self._telugu_text,
'languageTypeId': '639-2%3ATEL%40ISO',
'scriptTypeId': '15924%3ATELU%40ISO',
'formatTypeId': 'TextFormats%3APLAIN%40okapia.net'
})
@staticmethod
def _str_txt(display_text):
return {
'text': display_text.text,
'languageTypeId': str(display_text.language_type),
'scriptTypeId': str(display_text.script_type),
'formatTypeId': str(display_text.format_type)
}
def get_bank_with_proxy_set_to_locale(self, locale_code):
# expects eng, hin, tel as code
locale = InitializableLocale(language_type_identifier=locale_code)
condition = PROXY_SESSION.get_proxy_condition()
condition.set_http_request(self.instructor_req)
condition.set_locale(locale)
proxy = PROXY_SESSION.get_proxy(condition)
am = RUNTIME.get_service_manager('ASSESSMENT',
proxy=proxy)
return am.get_bank(self._bank.ident)
def setUp(self):
super(MultiLanguageBaseTestCase, self).setUp()
self._bank = self._get_test_bank()
self._english_text = 'english'
self._hindi_text = 'हिंदी'
self._telugu_text = 'తెలుగు'
def tearDown(self):
super(MultiLanguageBaseTestCase, self).tearDown()
class MultiLanguageItemTests(MultiLanguageBaseTestCase):
def setUp(self):
super(MultiLanguageItemTests, self).setUp()
def tearDown(self):
super(MultiLanguageItemTests, self).tearDown()
def test_can_set_multiple_display_texts(self):
form = self._bank.get_item_form_for_create([MULTI_LANGUAGE_OBJECT_RECORD])
form.add_display_name(self._english())
form.add_display_name(self._hindi())
form.add_display_name(self._telugu())
item = self._bank.create_item(form)
self.assertEqual(
len(item._my_map['displayNames']),
3
)
self.assertIn(self._str_txt(self._english()), item._my_map['displayNames'])
self.assertIn(self._str_txt(self._hindi()), item._my_map['displayNames'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['displayNames'])
self.assertEqual(
item.description.text,
''
)
self.assertEqual(
item.display_name.text,
self._english_text
)
def test_can_set_multiple_descriptions(self):
form = self._bank.get_item_form_for_create([MULTI_LANGUAGE_OBJECT_RECORD])
form.add_description(self._english())
form.add_description(self._hindi())
form.add_description(self._telugu())
item = self._bank.create_item(form)
self.assertEqual(
len(item._my_map['descriptions']),
3
)
self.assertIn(self._str_txt(self._english()), item._my_map['descriptions'])
self.assertIn(self._str_txt(self._hindi()), item._my_map['descriptions'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['descriptions'])
self.assertEqual(
item.display_name.text,
''
)
self.assertEqual(
item.description.text,
self._english_text
)
def test_can_clear_display_names(self):
form = self._bank.get_item_form_for_create([MULTI_LANGUAGE_OBJECT_RECORD])
form.add_display_name(self._english())
form.add_display_name(self._hindi())
form.add_display_name(self._telugu())
item = self._bank.create_item(form)
self.assertEqual(
len(item._my_map['displayNames']),
3
)
self.assertIn(self._str_txt(self._english()), item._my_map['displayNames'])
self.assertIn(self._str_txt(self._hindi()), item._my_map['displayNames'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['displayNames'])
form = self._bank.get_item_form_for_update(item.ident)
form.clear_display_names()
item = self._bank.update_item(form)
self.assertEqual(
len(item._my_map['displayNames']),
0
)
self.assertEqual(
item.description.text,
''
)
self.assertEqual(
item.display_name.text,
''
)
def test_can_clear_descriptions(self):
form = self._bank.get_item_form_for_create([MULTI_LANGUAGE_OBJECT_RECORD])
form.add_description(self._english())
form.add_description(self._hindi())
form.add_description(self._telugu())
item = self._bank.create_item(form)
self.assertEqual(
len(item._my_map['descriptions']),
3
)
self.assertIn(self._str_txt(self._english()), item._my_map['descriptions'])
self.assertIn(self._str_txt(self._hindi()), item._my_map['descriptions'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['descriptions'])
form = self._bank.get_item_form_for_update(item.ident)
form.clear_descriptions()
item = self._bank.update_item(form)
self.assertEqual(
len(item._my_map['descriptions']),
0
)
self.assertEqual(
item.display_name.text,
''
)
self.assertEqual(
item.description.text,
''
)
def test_can_remove_a_display_name(self):
form = self._bank.get_item_form_for_create([MULTI_LANGUAGE_OBJECT_RECORD])
form.add_display_name(self._english())
form.add_display_name(self._hindi())
form.add_display_name(self._telugu())
item = self._bank.create_item(form)
self.assertEqual(
len(item._my_map['displayNames']),
3
)
self.assertIn(self._str_txt(self._english()), item._my_map['displayNames'])
self.assertIn(self._str_txt(self._hindi()), item._my_map['displayNames'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['displayNames'])
form = self._bank.get_item_form_for_update(item.ident)
form.remove_display_name_by_language(self._english().language_type)
item = self._bank.update_item(form)
self.assertEqual(
len(item._my_map['displayNames']),
2
)
self.assertIn(self._str_txt(self._hindi()), item._my_map['displayNames'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['displayNames'])
self.assertEqual(
item.description.text,
''
)
self.assertEqual(
item.display_name.text,
self._hindi_text
)
def test_can_remove_a_description(self):
form = self._bank.get_item_form_for_create([MULTI_LANGUAGE_OBJECT_RECORD])
form.add_description(self._english())
form.add_description(self._hindi())
form.add_description(self._telugu())
item = self._bank.create_item(form)
self.assertEqual(
len(item._my_map['descriptions']),
3
)
self.assertIn(self._str_txt(self._english()), item._my_map['descriptions'])
self.assertIn(self._str_txt(self._hindi()), item._my_map['descriptions'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['descriptions'])
form = self._bank.get_item_form_for_update(item.ident)
form.remove_description_by_language(self._english().language_type)
item = self._bank.update_item(form)
self.assertEqual(
len(item._my_map['descriptions']),
2
)
self.assertIn(self._str_txt(self._hindi()), item._my_map['descriptions'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['descriptions'])
self.assertEqual(
item.display_name.text,
''
)
self.assertEqual(
item.description.text,
self._hindi_text
)
def test_can_replace_a_display_name(self):
form = self._bank.get_item_form_for_create([MULTI_LANGUAGE_OBJECT_RECORD])
form.add_display_name(self._english())
form.add_display_name(self._hindi())
form.add_display_name(self._telugu())
item = self._bank.create_item(form)
self.assertEqual(
len(item._my_map['displayNames']),
3
)
self.assertIn(self._str_txt(self._english()), item._my_map['displayNames'])
self.assertIn(self._str_txt(self._hindi()), item._my_map['displayNames'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['displayNames'])
form = self._bank.get_item_form_for_update(item.ident)
new_display_name = {
'text': self._telugu().text,
'languageTypeId': str(self._english().language_type),
'formatTypeId': str(self._english().format_type),
'scriptTypeId': str(self._english().script_type)
}
form.edit_display_name(DisplayText(display_text_map=new_display_name))
item = self._bank.update_item(form)
self.assertEqual(
len(item._my_map['displayNames']),
3
)
self.assertIn(self._str_txt(self._hindi()), item._my_map['displayNames'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['displayNames'])
self.assertNotIn(self._str_txt(self._english()), item._my_map['displayNames'])
self.assertEqual(
item.description.text,
''
)
self.assertEqual(
item.display_name.text,
self._telugu_text
)
def test_can_replace_a_description(self):
form = self._bank.get_item_form_for_create([MULTI_LANGUAGE_OBJECT_RECORD])
form.add_description(self._english())
form.add_description(self._hindi())
form.add_description(self._telugu())
item = self._bank.create_item(form)
self.assertEqual(
len(item._my_map['descriptions']),
3
)
self.assertIn(self._str_txt(self._english()), item._my_map['descriptions'])
self.assertIn(self._str_txt(self._hindi()), item._my_map['descriptions'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['descriptions'])
form = self._bank.get_item_form_for_update(item.ident)
new_description = {
'text': self._telugu().text,
'languageTypeId': str(self._english().language_type),
'formatTypeId': str(self._english().format_type),
'scriptTypeId': str(self._english().script_type)
}
form.edit_description(DisplayText(display_text_map=new_description))
item = self._bank.update_item(form)
self.assertEqual(
len(item._my_map['descriptions']),
3
)
self.assertIn(self._str_txt(self._hindi()), item._my_map['descriptions'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['descriptions'])
self.assertNotIn(self._str_txt(self._english()), item._my_map['descriptions'])
self.assertEqual(
item.display_name.text,
''
)
self.assertEqual(
item.description.text,
self._telugu_text
)
def test_setting_proxy_locale_gets_item_display_name_in_specified_language(self):
form = self._bank.get_item_form_for_create([MULTI_LANGUAGE_OBJECT_RECORD])
form.add_display_name(self._english())
form.add_display_name(self._hindi())
form.add_display_name(self._telugu())
item = self._bank.create_item(form)
hi_bank = self.get_bank_with_proxy_set_to_locale('HIN')
hi_item = hi_bank.get_item(item.ident)
self.assertEqual(
hi_item.display_name.text,
self._hindi_text
)
en_bank = self.get_bank_with_proxy_set_to_locale('ENG')
en_item = en_bank.get_item(item.ident)
self.assertEqual(
en_item.display_name.text,
self._english_text
)
te_bank = self.get_bank_with_proxy_set_to_locale('TEL')
te_item = te_bank.get_item(item.ident)
self.assertEqual(
te_item.display_name.text,
self._telugu_text
)
def test_english_default_display_name_if_locale_code_not_available(self):
form = self._bank.get_item_form_for_create([MULTI_LANGUAGE_OBJECT_RECORD])
form.add_display_name(self._english())
form.add_display_name(self._telugu())
item = self._bank.create_item(form)
hi_bank = self.get_bank_with_proxy_set_to_locale('HIN')
hi_item = hi_bank.get_item(item.ident)
self.assertEqual(
hi_item.display_name.text,
self._english_text
)
en_bank = self.get_bank_with_proxy_set_to_locale('ENG')
en_item = en_bank.get_item(item.ident)
self.assertEqual(
en_item.display_name.text,
self._english_text
)
te_bank = self.get_bank_with_proxy_set_to_locale('TEL')
te_item = te_bank.get_item(item.ident)
self.assertEqual(
te_item.display_name.text,
self._telugu_text
)
def test_first_available_display_name_if_locale_code_and_english_not_available(self):
form = self._bank.get_item_form_for_create([MULTI_LANGUAGE_OBJECT_RECORD])
form.add_display_name(self._telugu())
item = self._bank.create_item(form)
hi_bank = self.get_bank_with_proxy_set_to_locale('HIN')
hi_item = hi_bank.get_item(item.ident)
self.assertEqual(
hi_item.display_name.text,
self._telugu_text
)
en_bank = self.get_bank_with_proxy_set_to_locale('ENG')
en_item = en_bank.get_item(item.ident)
self.assertEqual(
en_item.display_name.text,
self._telugu_text
)
te_bank = self.get_bank_with_proxy_set_to_locale('TEL')
te_item = te_bank.get_item(item.ident)
self.assertEqual(
te_item.display_name.text,
self._telugu_text
)
def test_setting_proxy_locale_gets_item_description_in_specified_language(self):
form = self._bank.get_item_form_for_create([MULTI_LANGUAGE_OBJECT_RECORD])
form.add_description(self._english())
form.add_description(self._hindi())
form.add_description(self._telugu())
item = self._bank.create_item(form)
hi_bank = self.get_bank_with_proxy_set_to_locale('HIN')
hi_item = hi_bank.get_item(item.ident)
self.assertEqual(
hi_item.description.text,
self._hindi_text
)
en_bank = self.get_bank_with_proxy_set_to_locale('ENG')
en_item = en_bank.get_item(item.ident)
self.assertEqual(
en_item.description.text,
self._english_text
)
te_bank = self.get_bank_with_proxy_set_to_locale('TEL')
te_item = te_bank.get_item(item.ident)
self.assertEqual(
te_item.description.text,
self._telugu_text
)
def test_english_default_description_if_locale_code_not_available(self):
form = | |
settings to...")
self.file_opt = options = {}
options['defaultextension'] = '.ini'
options['filetypes'] = [ ('INI files', '.ini'),('all files', '.*')]
options['title'] = 'Set filename to export the settings to...'
ans = tkFileDialog.asksaveasfilename(**self.file_opt)
if ans != '' and ans != '.' and ans != ():
ans = os.path.normpath(ans)
parsers.writeSettings(parsers.dictSettings(parsers.readSettings(settingsFile,self.Message)),ans,self.Message)
self.Message.set("Settings are exported to "+ans)
else:
self.Message.set("Exporting settings is cancelled.")
def Settings_Import(self):
self.Message.set("Choosing file to import settings from...")
self.file_opt = options = {}
options['defaultextension'] = '.ini'
options['filetypes'] = [ ('INI files', '.ini'),('all files', '.*')]
options['title'] = 'Set filename to import the settings from...'
ans = tkFileDialog.askopenfilename(**self.file_opt)
if ans != '' and ans != '.' and ans != ():
ans = os.path.normpath(ans)
parsers.writeSettings(parsers.dictSettings(parsers.readSettings(ans,self.Message)),settingsFile,self.Message)
self.Menu_Main()
self.initSettings()
self.Message.set("Settings are imported from "+ans)
else:
self.Message.set("Importing settings is cancelled.")
def Plugins_Add(self):
self.Message.set("Choosing plugin binary file to add.")
if tkMessageBox.askyesno('Add Plugin...','Plug-ins are created by users and independent from the program. File paths of your images and file path of a mask file is passed as arguments to the plug-in binaries. If you have obtained the plug-in from somebody else, use it only of you trust it.\nDo you want to proceed?'):
if os.path.sep == '/':
pext = ''
else:
pext = '.exe'
self.file_opt = options = {}
if pext == '.exe':
options['defaultextension'] = '.exe'
options['filetypes'] = [ ('Binary files', '.exe'),('all files', '.*')]
options['title'] = 'Choose plugin binary file to add...'
ans = tkFileDialog.askopenfilename(**self.file_opt)
if ans != '' and ans != '.' and ans != ():
ans = os.path.normpath(ans)
if os.path.splitext(ans)[1] != pext or not os.path.isfile(ans):
tkMessageBox.showwarning('Error','Chosen file is not an executable binary file.')
self.Message.set("Choose plugin binary file is cancelled.")
return False
else:
incaux = False
if tkMessageBox.askyesno('Add Plugin...','Does the executable need also the files in the same folder and subfolders with it, or is only the executable binary file enough?\nIf you answer \'Yes\', all files in the same directory and all subdirectories will be copied to the plugin directory! ('+PluginsDir+')'):
incaux = True
#test plugin
self.Message.set("Testing plugin.|busy:True")
for resolution in [(640,480),(1024,758),(2560,1440)]:
self.Message.set("Testing for resolution: "+str(resolution))
mahotas.imsave(os.path.join(TmpDir,'plugintestimg.jpg'),np.random.randint(0,256,(resolution[1],resolution[0],3)).astype('uint8'))
mask = np.random.randint(0,2,(resolution[1],resolution[0]))*255
mask = np.dstack((mask,mask,mask)).astype('uint8')
mahotas.imsave(os.path.join(TmpDir,'plugintestmsk.jpg'),mask)
try:
pipe = subprocess.Popen([ans,os.path.join(TmpDir,'plugintestimg.jpg'),os.path.join(TmpDir,'plugintestmsk.jpg')], stdout=subprocess.PIPE)
res = pipe.communicate()
pipe.wait()
(res, err) = (res[0],res[1])
if err is not None:
self.Message.set('Error: '+err+': '+res)
res = res.replace('\n','')
outstr = res
res = res.split(',')
(res_title,res) = (res[0],res[1:])
if len(res) < 3 or len(res)%2!=0:
res = False
for j in range(len(res)/2):
float(res[j*2+1])
self.Message.set("Testing passed. Output: "+outstr)
except:
self.Message.set("Testing failed. Plugin can not be added.\nPlugin output:\n"+outstr)
tkMessageBox.showerror('Error',"Testing failed. Plugin can not be added.\nPlugin output:\n"+outstr)
self.Message.set("Testing failed.|busy:False")
return False
self.Message.set("Testing complete.|busy:False")
name = os.path.splitext(os.path.split(ans)[1])[0]
try:
if os.path.exists(os.path.join(PluginsDir,name)):
shutil.rmtree(os.path.join(PluginsDir,name))
if not incaux:
os.makedirs(os.path.join(PluginsDir,name))
if os.path.isfile(ans):
shutil.copyfile(ans,os.path.join(PluginsDir,name,name)+pext)
self.Message.set("Plugin is copied to the plugin directory.")
else:
shutil.copytree(os.path.split(ans)[0],os.path.join(PluginsDir,name))
self.Message.set("Plugin is copied to the plugin directory.")
calculations.AddPlugin(name)
if self.ActiveMenu.get() == "Analyses":
self.Menu_Main_Calculations()
except:
tkMessageBox.showerror('Error','Problem in copying files. Check file/folder permissions.')
self.Message.set('Problem in copying files.')
else:
self.Message.set("Choose plugin binary file is cancelled.")
return False
def Plugins_Remove(self):
pluglist = []
for p in calcnames_en:
if 'Plug-in: ' in p:
pluglist.append(p.replace('Plug-in: ',''))
if len(pluglist) == 0:
self.Message.set('There is not any plugin to be removed.|dialog:info')
return False
self.plugintoremove = Tkinter.StringVar()
self.plugintoremove.set(pluglist[0])
self.removeplugindialog = Tkinter.Toplevel(self,padx=10,pady=10)
self.removeplugindialog.wm_title('Remove plugins')
Tkinter.Label(self.removeplugindialog,text='Choose plugin to remove:').grid(sticky='w'+'e',row=1,column=1,columnspan=2)
Tkinter.OptionMenu(self.removeplugindialog,self.plugintoremove,*pluglist).grid(sticky='w'+'e',row=2,column=1,columnspan=2)
Tkinter.Button(self.removeplugindialog ,text='Cancel',command=self.removeplugindialog.destroy).grid(sticky='w'+'e',row=3,column=1,columnspan=1)
Tkinter.Button(self.removeplugindialog ,text='OK',command=self.Plugins_Remove_Remove).grid(sticky='w'+'e',row=3,column=2,columnspan=1)
self.centerWindow(self.removeplugindialog)
self.removeplugindialog.grab_set()
self.removeplugindialog.lift()
self.removeplugindialog.wait_window()
if self.ActiveMenu.get() == "Analyses":
self.Menu_Main_Calculations()
def Plugins_Remove_Remove(self):
if os.path.exists(os.path.join(PluginsDir,self.plugintoremove.get())):
try:
shutil.rmtree(os.path.join(PluginsDir,self.plugintoremove.get()))
self.Message.set('Files and directories that belongs to the Plug-in: '+self.plugintoremove.get()+' are removed.')
except:
self.Message.set('File operations problem: Files and directories that belongs to the Plug-in: '+self.plugintoremove.get()+' can not be removed. Check file/directory permissions.')
tkMessageBox.showerror('Error','File operations problem: Files and directories that belongs to the Plug-in: '+self.plugintoremove.get()+' can not be removed. Check file/directory permissions.')
self.removeplugindialog.lift()
return False
calculations.RemovePlugin(self.plugintoremove.get())
self.Message.set('Plug-in: '+self.plugintoremove.get()+' is removed from the plugin list.')
tkMessageBox.showinfo('Remove plugin','Plug-in: '+self.plugintoremove.get()+' is removed.')
self.removeplugindialog.destroy()
self.grab_set()
def Tools_Comparison(self):
self.Win1 = Tkinter.Toplevel(self,padx=10,pady=10)
self.Win1.grab_set()
self.Win1.wm_title('Comparison tool')
self.Win1.columnconfigure(2, minsize=150)
self.Win1.columnconfigure(3, minsize=450)
self.Win1.ProdName = Tkinter.StringVar()
self.Win1.ProdName.set("FMIPROT Time series results - Snow Cover Fraction")
Tkinter.Label(self.Win1,text="Product data",anchor='w').grid(sticky='w'+'e',row=1,column=2,columnspan=1)
Tkinter.OptionMenu(self.Win1,self.Win1.ProdName,*auxnamelist).grid(sticky='w'+'e',row=1,column=3,columnspan=1)
self.Win1.RefrName = Tkinter.StringVar()
self.Win1.RefrName.set("MONIMET Visual observations - Snow Cover Fraction")
Tkinter.Label(self.Win1,text="Reference data",anchor='w').grid(sticky='w'+'e',row=2,column=2,columnspan=1)
Tkinter.OptionMenu(self.Win1,self.Win1.RefrName,*auxnamelist).grid(sticky='w'+'e',row=2,column=3,columnspan=1)
self.Win1.ClsdName = Tkinter.StringVar()
self.Win1.ClsdName.set("None")
Tkinter.Label(self.Win1,text="Classification data",anchor='w').grid(sticky='w'+'e',row=3,column=2,columnspan=1)
Tkinter.OptionMenu(self.Win1,self.Win1.ClsdName,*(["None"]+auxnamelist)).grid(sticky='w'+'e',row=3,column=3,columnspan=1)
self.AuxSourcesComparisonType = Tkinter.StringVar()
self.AuxSourcesComparisonType.set('Continuous statistics')
Tkinter.Label(self.Win1,text="Comparison type:",anchor='w').grid(sticky='w'+'e',row=6,column=2,columnspan=1)
Tkinter.OptionMenu(self.Win1,self.AuxSourcesComparisonType,'Binary statistics','Continuous statistics').grid(sticky='w'+'e',row=6,column=3,columnspan=1)
#,'Multi-class statistics'
Tkinter.Button(self.Win1 ,text='Cancel',command=self.CloseWin_1).grid(sticky='w'+'e',row=7,column=2,columnspan=1)
Tkinter.Button(self.Win1 ,text='Next>',command=self.SetupComparison).grid(sticky='w'+'e',row=7,column=3,columnspan=1)
self.centerWindow(self.Win1)
self.Win1.wait_window()
def SetupComparison(self):
if auxlist[self.Win1.ProdName.get()]["metadata"]["temporal"] == "static" or auxlist[self.Win1.RefrName.get()]["metadata"]["temporal"] == "static":
tkMessageBox.showerror('Comparison not applicable','Product and reference data should not be a static map.')
return False
if self.Win1.ClsdName.get() != "None" and auxlist[self.Win1.ClsdName.get()]["metadata"]["temporal"] != "static":
tkMessageBox.showerror('Comparison not applicable','Classification data should be a static map.')
return False
if self.AuxSourcesComparisonType.get() == 'Binary statistics':
if auxlist[self.Win1.ProdName.get()]["metadata"]["valuetype"] == "continuous" or auxlist[self.Win1.RefrName.get()]["metadata"]["valuetype"] == "continuous":
tkMessageBox.showerror('Comparison not applicable','Selected data sources are not applicable for binary statistics. At least one of product and reference data should be binary.')
self.Win1.lift()
self.Win1.grab_set()
return False
if self.AuxSourcesComparisonType.get() == 'Continuous statistics':
if auxlist[self.Win1.ProdName.get()]["metadata"]["valuetype"] == "binary":
tkMessageBox.showerror('Comparison not applicable','Selected data sources are not applicable for continous statistics. Product data should be continous.')
self.Win1.lift()
self.Win1.grab_set()
return False
self.Win2 = Tkinter.Toplevel(self,padx=10,pady=10)
self.Win2.grab_set()
self.Win2.wm_title('Comparison setup')
self.Win2.columnconfigure(1, minsize=100)
self.Win2.columnconfigure(2, minsize=100)
self.Win2.columnconfigure(3, minsize=100)
self.Win2.columnconfigure(4, minsize=100)
r = 1
Tkinter.Label(self.Win2,text="Choose the directory of the datasets. Default values for each data source can be set up in settings menu.",anchor='w',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor).grid(sticky='w'+'e',row=r,column=1,columnspan=4)
self.Win2.ProdDir = Tkinter.StringVar()
self.Win2.RefrDir = Tkinter.StringVar()
self.Win2.ClsdDir = Tkinter.StringVar()
self.Win2.ProdDir.set(auxlist[self.Win1.ProdName.get()]["settings"]["datadir"])
self.Win2.RefrDir.set(auxlist[self.Win1.RefrName.get()]["settings"]["datadir"])
r += 1
Tkinter.Label(self.Win2,text="Product data",anchor='w').grid(sticky='w'+'e',row=r,column=1,columnspan=1)
Tkinter.Entry(self.Win2,textvariable=self.Win2.ProdDir,justify="left").grid(sticky='w'+'e',row=r,column=2,columnspan=2)
Tkinter.Button(self.Win2,text='Browse...',command=self.BrowseProdDir).grid(sticky='w'+'e',row=r,column=4)
r += 1
Tkinter.Label(self.Win2,text="Reference data",anchor='w').grid(sticky='w'+'e',row=r,column=1,columnspan=1)
Tkinter.Entry(self.Win2,textvariable=self.Win2.RefrDir,justify="left").grid(sticky='w'+'e',row=r,column=2,columnspan=2)
Tkinter.Button(self.Win2,text='Browse...',command=self.BrowseRefrDir).grid(sticky='w'+'e',row=r,column=4)
if self.Win1.ClsdName.get() == "None":
self.Win2.ClsdDir.set("")
else:
self.Win2.ClsdDir.set(auxlist[self.Win1.ClsdName.get()]["settings"]["datadir"])
r += 1
Tkinter.Label(self.Win2,text="Classification data",anchor='w').grid(sticky='w'+'e',row=r,column=1,columnspan=1)
Tkinter.Entry(self.Win2,textvariable=self.Win2.ClsdDir,justify="left").grid(sticky='w'+'e',row=r,column=2,columnspan=2)
Tkinter.Button(self.Win2,text='Browse...',command=self.BrowseClsdDir).grid(sticky='w'+'e',row=r,column=4)
r += 1
Tkinter.Label(self.Win2,text="Choose values for comparison in the table. Use comma between multiple values and slash for value ranges as in the example.",anchor='w',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor).grid(sticky='w'+'e',row=r,column=1,columnspan=4)
r += 1
row_sep = deepcopy(r)
Tkinter.Label(self.Win2,text="Product data",anchor='c').grid(sticky='w'+'e',row=r,column=1,columnspan=2)
if auxlist[self.Win1.ProdName.get()]["metadata"]["valuetype"] == "binary":
self.Win2.ValsProdTrue = Tkinter.StringVar()
self.Win2.ValsProdFalse = Tkinter.StringVar()
self.Win2.ValsProdTrue.set(auxlist[self.Win1.ProdName.get()]["metadata"]["truevalue"])
self.Win2.ValsProdFalse.set(auxlist[self.Win1.ProdName.get()]["metadata"]["falsevalue"])
r += 1
Tkinter.Label(self.Win2,text="True values",anchor='w').grid(sticky='w'+'e',row=r,column=1,columnspan=1)
Tkinter.Entry(self.Win2,textvariable=self.Win2.ValsProdTrue,justify="center").grid(sticky='w'+'e',row=r,column=2,columnspan=1)
r += 1
Tkinter.Label(self.Win2,text="False values",anchor='w').grid(sticky='w'+'e',row=r,column=1,columnspan=1)
Tkinter.Entry(self.Win2,textvariable=self.Win2.ValsProdFalse,justify="center").grid(sticky='w'+'e',row=r,column=2,columnspan=1)
if auxlist[self.Win1.ProdName.get()]["metadata"]["valuetype"] == "continuous":
self.Win2.ValsProdRange = Tkinter.StringVar()
self.Win2.ValsProdRange.set(auxlist[self.Win1.ProdName.get()]["metadata"]["valuerange"])
r += 1
Tkinter.Label(self.Win2,text="Value range",anchor='w').grid(sticky='w'+'e',row=r,column=1,columnspan=1)
Tkinter.Entry(self.Win2,textvariable=self.Win2.ValsProdRange,justify="center").grid(sticky='w'+'e',row=r,column=2,columnspan=1)
if self.AuxSourcesComparisonType.get() == 'Binary statistics':
self.Win2.ValsProdTrueMin = Tkinter.StringVar()
self.Win2.ValsProdTrueMin.set((float(auxlist[self.Win1.ProdName.get()]["metadata"]["valuerange"].split('-')[1])-float(auxlist[self.Win1.ProdName.get()]["metadata"]["valuerange"].split('-')[0]))/2)
self.Win2.ValsProdFalseMax = Tkinter.StringVar()
self.Win2.ValsProdFalseMax.set((float(auxlist[self.Win1.ProdName.get()]["metadata"]["valuerange"].split('-')[1])-float(auxlist[self.Win1.ProdName.get()]["metadata"]["valuerange"].split('-')[0]))/2)
r += 1
Tkinter.Label(self.Win2,text="True value threshold (>=)",anchor='w').grid(sticky='w'+'e',row=r,column=1,columnspan=1)
Tkinter.Entry(self.Win2,textvariable=self.Win2.ValsProdTrueMin,justify="center").grid(sticky='w'+'e',row=r,column=2,columnspan=1)
r += 1
Tkinter.Label(self.Win2,text="False value threshold (>)",anchor='w').grid(sticky='w'+'e',row=r,column=1,columnspan=1)
Tkinter.Entry(self.Win2,textvariable=self.Win2.ValsProdFalseMax,justify="center").grid(sticky='w'+'e',row=r,column=2,columnspan=1)
if self.AuxSourcesComparisonType.get() == 'Continuous statistics':
self.Win2.ValsProdScale = Tkinter.StringVar()
self.Win2.ValsProdScale.set(auxlist[self.Win1.ProdName.get()]["metadata"]["valuescale"])
self.Win2.ValsProdBias = Tkinter.StringVar()
self.Win2.ValsProdBias.set(auxlist[self.Win1.ProdName.get()]["metadata"]["valuebias"])
r += 1
Tkinter.Label(self.Win2,text="Correction scale",anchor='w').grid(sticky='w'+'e',row=r,column=1,columnspan=1)
Tkinter.Entry(self.Win2,textvariable=self.Win2.ValsProdScale,justify="center").grid(sticky='w'+'e',row=r,column=2,columnspan=1)
r += 1
Tkinter.Label(self.Win2,text="Correction bias",anchor='w').grid(sticky='w'+'e',row=r,column=1,columnspan=1)
Tkinter.Entry(self.Win2,textvariable=self.Win2.ValsProdBias,justify="center").grid(sticky='w'+'e',row=r,column=2,columnspan=1)
r = row_sep
Tkinter.Label(self.Win2,text="Reference data",anchor='c').grid(sticky='w'+'e',row=r,column=3,columnspan=2)
if auxlist[self.Win1.RefrName.get()]["metadata"]["valuetype"] == "binary":
self.Win2.ValsRefrTrue = Tkinter.StringVar()
self.Win2.ValsRefrFalse = Tkinter.StringVar()
self.Win2.ValsRefrTrue.set(auxlist[self.Win1.RefrName.get()]["metadata"]["truevalue"])
self.Win2.ValsRefrFalse.set(auxlist[self.Win1.RefrName.get()]["metadata"]["falsevalue"])
r += 1
Tkinter.Label(self.Win2,text="True values",anchor='w').grid(sticky='w'+'e',row=r,column=3,columnspan=1)
Tkinter.Entry(self.Win2,textvariable=self.Win2.ValsRefrTrue,justify="center").grid(sticky='w'+'e',row=r,column=4,columnspan=1)
r += 1
Tkinter.Label(self.Win2,text="False values",anchor='w').grid(sticky='w'+'e',row=r,column=3,columnspan=1)
Tkinter.Entry(self.Win2,textvariable=self.Win2.ValsRefrFalse,justify="center").grid(sticky='w'+'e',row=r,column=4,columnspan=1)
if auxlist[self.Win1.RefrName.get()]["metadata"]["valuetype"] == "continuous":
self.Win2.ValsRefrRange = Tkinter.StringVar()
self.Win2.ValsRefrRange.set(auxlist[self.Win1.RefrName.get()]["metadata"]["valuerange"])
r += 1
Tkinter.Label(self.Win2,text="Value range",anchor='w').grid(sticky='w'+'e',row=r,column=3,columnspan=1)
Tkinter.Entry(self.Win2,textvariable=self.Win2.ValsRefrRange,justify="center").grid(sticky='w'+'e',row=r,column=4,columnspan=1)
if self.AuxSourcesComparisonType.get() == 'Binary statistics':
self.Win2.ValsRefrTrueMin = Tkinter.StringVar()
self.Win2.ValsRefrTrueMin.set((float(auxlist[self.Win1.RefrName.get()]["metadata"]["valuerange"].split('-')[1])-float(auxlist[self.Win1.RefrName.get()]["metadata"]["valuerange"].split('-')[0]))/2)
self.Win2.ValsRefrFalseMax = Tkinter.StringVar()
self.Win2.ValsRefrFalseMax.set((float(auxlist[self.Win1.RefrName.get()]["metadata"]["valuerange"].split('-')[1])-float(auxlist[self.Win1.RefrName.get()]["metadata"]["valuerange"].split('-')[0]))/2)
r += 1
Tkinter.Label(self.Win2,text="True value threshold (>=)",anchor='w').grid(sticky='w'+'e',row=r,column=3,columnspan=1)
Tkinter.Entry(self.Win2,textvariable=self.Win2.ValsRefrTrueMin,justify="center").grid(sticky='w'+'e',row=r,column=4,columnspan=1)
r += 1
Tkinter.Label(self.Win2,text="False value threshold (>)",anchor='w').grid(sticky='w'+'e',row=r,column=3,columnspan=1)
Tkinter.Entry(self.Win2,textvariable=self.Win2.ValsRefrFalseMax,justify="center").grid(sticky='w'+'e',row=r,column=4,columnspan=1)
if self.AuxSourcesComparisonType.get() == 'Continuous statistics':
self.Win2.ValsRefrScale = Tkinter.StringVar()
self.Win2.ValsRefrScale.set(auxlist[self.Win1.RefrName.get()]["metadata"]["valuescale"])
self.Win2.ValsRefrBias = Tkinter.StringVar()
self.Win2.ValsRefrBias.set(auxlist[self.Win1.RefrName.get()]["metadata"]["valuebias"])
r += 1
Tkinter.Label(self.Win2,text="Correction scale",anchor='w').grid(sticky='w'+'e',row=r,column=3,columnspan=1)
Tkinter.Entry(self.Win2,textvariable=self.Win2.ValsRefrScale,justify="center").grid(sticky='w'+'e',row=r,column=4,columnspan=1)
r += 1
Tkinter.Label(self.Win2,text="Correction bias",anchor='w').grid(sticky='w'+'e',row=r,column=3,columnspan=1)
Tkinter.Entry(self.Win2,textvariable=self.Win2.ValsRefrBias,justify="center").grid(sticky='w'+'e',row=r,column=4,columnspan=1)
self.Win2.ValsRefrInvs = Tkinter.StringVar()
self.Win2.ValsRefrThrs = Tkinter.StringVar()
self.Win2.ValsClsdThrs = Tkinter.StringVar()
self.Win2.ValsContMatr = Tkinter.StringVar()
self.Win2.ValsRefrInvs.set(auxlist[self.Win1.RefrName.get()]["metadata"]["invisvalue"])
self.Win2.ValsRefrThrs.set(auxlist[self.Win1.RefrName.get()]["metadata"]["invisthreshold"])
self.Win2.ValsContMatr.set('None')
if self.Win1.ClsdName.get() != "None":
self.Win2.ValsClsdThrs.set(auxlist[self.Win1.ClsdName.get()]["metadata"]["invisthreshold"])
r += 1
Tkinter.Label(self.Win2,text="Uncertain values",anchor='w').grid(sticky='w'+'e',row=r,column=3,columnspan=1)
Tkinter.Entry(self.Win2,textvariable=self.Win2.ValsRefrInvs,justify="center").grid(sticky='w'+'e',row=r,column=4,columnspan=1)
r += 1
Tkinter.Label(self.Win2,text="Uncertain value threshold (>=)",anchor='w').grid(sticky='w'+'e',row=r,column=3,columnspan=1)
Tkinter.Entry(self.Win2,textvariable=self.Win2.ValsRefrThrs,justify="center").grid(sticky='w'+'e',row=r,column=4,columnspan=1)
r += 1
Tkinter.Label(self.Win2,text="Choose values for the classes to be used in the contingency matrix (optional).",anchor='w',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor).grid(sticky='w'+'e',row=r,column=1,columnspan=4)
r += 1
Tkinter.Label(self.Win2,textvariable=self.Win2.ValsContMatr,anchor='w').grid(sticky='w'+'e',row=r,column=1,columnspan=3)
Tkinter.Button(self.Win2 ,text='Edit...',command=self.ContMatrEdit).grid(sticky='w'+'e',row=r,column=4,columnspan=1)
if self.Win1.ClsdName.get() != "None":
self.Win2.ValsClsdVals = Tkinter.StringVar()
self.Win2.ValsClsdNams = Tkinter.StringVar()
self.Win2.ValsClsdVals.set(auxlist[self.Win1.ClsdName.get()]["metadata"]["classvalues"])
self.Win2.ValsClsdNams.set(auxlist[self.Win1.ClsdName.get()]["metadata"]["classnames"])
r += 1
Tkinter.Label(self.Win2,text="Choose values for the classes to be used from the classification data. Use semicolon between classes, comma between multiple values and slash for value ranges as in the example.",anchor='w',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor).grid(sticky='w'+'e',row=r,column=1,columnspan=4)
r += 1
Tkinter.Label(self.Win2,text="Class names:",anchor='w').grid(sticky='w'+'e',row=r,column=1,columnspan=1)
Tkinter.Entry(self.Win2,textvariable=self.Win2.ValsClsdNams,justify="center").grid(sticky='w'+'e',row=r,column=2,columnspan=3)
r += 1
Tkinter.Label(self.Win2,text="Class values:",anchor='w').grid(sticky='w'+'e',row=r,column=1,columnspan=1)
Tkinter.Entry(self.Win2,textvariable=self.Win2.ValsClsdVals,justify="center").grid(sticky='w'+'e',row=r,column=2,columnspan=3)
r += 1
Tkinter.Label(self.Win2,text="Fraction threshold (>=)",anchor='w').grid(sticky='w'+'e',row=r,column=1,columnspan=1)
Tkinter.Entry(self.Win2,textvariable=self.Win2.ValsClsdThrs,justify="center").grid(sticky='w'+'e',row=r,column=2,columnspan=3)
r += 1
Tkinter.Label(self.Win2,text="Choose values for resampling parameters. Resampling is done using Gaussian distribution.",anchor='w',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor).grid(sticky='w'+'e',row=r,column=1,columnspan=4)
self.Win2.ResampleRadiusRefr = Tkinter.StringVar()
self.Win2.ResampleSigmaRefr = Tkinter.StringVar()
self.Win2.ResampleRadiusRefr.set("Auto")
self.Win2.ResampleSigmaRefr.set("Auto")
self.Win2.ResampleRadiusClsd = Tkinter.StringVar()
self.Win2.ResampleSigmaClsd = Tkinter.StringVar()
self.Win2.ResampleRadiusClsd.set("Auto")
self.Win2.ResampleSigmaClsd.set("Auto")
self.Win2.ValsClsdOnto = Tkinter.StringVar()
self.Win2.ValsClsdOnto.set('Reference data')
if self.Win1.ClsdName.get() != "None":
r+= 1
Tkinter.Label(self.Win2,text="Reference Data",anchor='w',justify="center").grid(sticky='w'+'e',row=r,column=3,columnspan=1)
Tkinter.Label(self.Win2,text="Classification Data",anchor='w',justify="center").grid(sticky='w'+'e',row=r,column=4,columnspan=1)
r += 1
Tkinter.Label(self.Win2,text="Radius of influence (m)",anchor='w').grid(sticky='w'+'e',row=r,column=1,columnspan=2)
Tkinter.Entry(self.Win2,textvariable=self.Win2.ResampleRadiusRefr,justify="center").grid(sticky='w'+'e',row=r,column=3,columnspan=1)
Tkinter.Entry(self.Win2,textvariable=self.Win2.ResampleRadiusClsd,justify="center").grid(sticky='w'+'e',row=r,column=4,columnspan=1)
r += 1
Tkinter.Label(self.Win2,text="Sigma (See documentation) (m)",anchor='w').grid(sticky='w'+'e',row=r,column=1,columnspan=2)
Tkinter.Entry(self.Win2,textvariable=self.Win2.ResampleSigmaRefr,justify="center").grid(sticky='w'+'e',row=r,column=3,columnspan=1)
Tkinter.Entry(self.Win2,textvariable=self.Win2.ResampleSigmaClsd,justify="center").grid(sticky='w'+'e',row=r,column=4,columnspan=1)
r += 1
Tkinter.Label(self.Win2,text="Reproject onto",anchor='w').grid(sticky='w'+'e',row=r,column=1,columnspan=2)
Tkinter.OptionMenu(self.Win2,self.Win2.ValsClsdOnto,*['Product data','Reference data']).grid(sticky='w'+'e',row=r,column=4,columnspan=2)
else:
r += 1
Tkinter.Label(self.Win2,text="Radius of influence (m)",anchor='w').grid(sticky='w'+'e',row=r,column=1,columnspan=2)
Tkinter.Entry(self.Win2,textvariable=self.Win2.ResampleRadiusRefr,justify="center").grid(sticky='w'+'e',row=r,column=3,columnspan=2)
r += 1
Tkinter.Label(self.Win2,text="Sigma (See documentation) (m)",anchor='w').grid(sticky='w'+'e',row=r,column=1,columnspan=2)
Tkinter.Entry(self.Win2,textvariable=self.Win2.ResampleSigmaRefr,justify="center").grid(sticky='w'+'e',row=r,column=3,columnspan=2)
r = 40
Tkinter.Label(self.Win2,text="Enter the spatial extent for the validation to be done in. Use slash for value ranges as in the example.",anchor='w',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor).grid(sticky='w'+'e',row=r,column=1,columnspan=4)
self.Win2.LatitudeRange = Tkinter.StringVar()
self.Win2.LatitudeRange.set("-90/90")
self.Win2.LongitudeRange = Tkinter.StringVar()
self.Win2.LongitudeRange.set("-180/180")
r += 1
Tkinter.Label(self.Win2,text="Latitude range (degrees)",anchor='w').grid(sticky='w'+'e',row=r,column=1,columnspan=2)
Tkinter.Entry(self.Win2,textvariable=self.Win2.LatitudeRange,justify="center").grid(sticky='w'+'e',row=r,column=3,columnspan=2)
r += 1
Tkinter.Label(self.Win2,text="Longitude range (degrees)",anchor='w').grid(sticky='w'+'e',row=r,column=1,columnspan=2)
Tkinter.Entry(self.Win2,textvariable=self.Win2.LongitudeRange,justify="center").grid(sticky='w'+'e',row=r,column=3,columnspan=2)
r += 1
self.Win2.TemporalRangeDays = Tkinter.IntVar()
self.Win2.TemporalRangeHours = Tkinter.IntVar()
self.Win2.TemporalRangeMinutes = Tkinter.IntVar()
self.Win2.TemporalRangeSeconds = Tkinter.IntVar()
self.Win2.TemporalRangeDays.set("0")
self.Win2.TemporalRangeHours.set("11")
self.Win2.TemporalRangeMinutes.set("59")
self.Win2.TemporalRangeSeconds.set("59")
Tkinter.Label(self.Win2,text="Enter maximum temporal difference for datasets to be compared.",anchor='w',bg=self.MenuTitleBgColor,fg=self.MenuTitleTextColor).grid(sticky='w'+'e',row=r,column=1,columnspan=4)
r += 1
Tkinter.Label(self.Win2,text="Days",anchor='w').grid(sticky='w'+'e',row=r,column=1,columnspan=1)
Tkinter.Entry(self.Win2,textvariable=self.Win2.TemporalRangeDays,justify="center").grid(sticky='w'+'e',row=r,column=2,columnspan=1)
Tkinter.Label(self.Win2,text="Hours",anchor='w').grid(sticky='w'+'e',row=r,column=3,columnspan=1)
Tkinter.Entry(self.Win2,textvariable=self.Win2.TemporalRangeHours,justify="center").grid(sticky='w'+'e',row=r,column=4,columnspan=1)
r += 1
Tkinter.Label(self.Win2,text="Minutes",anchor='w').grid(sticky='w'+'e',row=r,column=1,columnspan=1)
Tkinter.Entry(self.Win2,textvariable=self.Win2.TemporalRangeMinutes,justify="center").grid(sticky='w'+'e',row=r,column=2,columnspan=1)
Tkinter.Label(self.Win2,text="Seconds",anchor='w').grid(sticky='w'+'e',row=r,column=3,columnspan=1)
Tkinter.Entry(self.Win2,textvariable=self.Win2.TemporalRangeSeconds,justify="center").grid(sticky='w'+'e',row=r,column=4,columnspan=1)
r += 1
Tkinter.Button(self.Win2 ,text='Cancel',command=self.CloseWin_2).grid(sticky='w'+'e',row=r,column=1,columnspan=2)
if self.AuxSourcesComparisonType.get() == 'Binary statistics':
Tkinter.Button(self.Win2 ,text='Run comparison',command=self.CompareBinary).grid(sticky='w'+'e',row=r,column=3,columnspan=2)
if self.AuxSourcesComparisonType.get() == 'Continuous statistics':
Tkinter.Button(self.Win2 ,text='Run comparison',command=self.CompareContinuous).grid(sticky='w'+'e',row=r,column=3,columnspan=2)
self.centerWindow(self.Win2)
self.Win2.wait_window()
def BrowseProdDir(self):
self.file_opt = options = {}
options['title'] = 'Select data directory...'
ans = str(os.path.normpath(tkFileDialog.askdirectory(**self.file_opt)))
if ans != '' and ans != '.':
self.Win2.ProdDir.set(ans)
self.Win2.grab_set()
self.Win2.lift()
self.Win2.geometry("")
def BrowseRefrDir(self):
self.file_opt = options = {}
options['title'] = 'Select data directory...'
ans = str(os.path.normpath(tkFileDialog.askdirectory(**self.file_opt)))
if ans != '' and ans != '.':
self.Win2.RefrDir.set(ans)
self.Win2.grab_set()
self.Win2.lift()
self.Win2.geometry("")
def BrowseClsdDir(self):
self.file_opt = options = {}
options['title'] = 'Select data directory...'
ans = str(os.path.normpath(tkFileDialog.askdirectory(**self.file_opt)))
if ans != '' and ans != '.':
self.Win2.ClsdDir.set(ans)
self.Win2.grab_set()
self.Win2.lift()
self.Win2.geometry("")
def ContMatrEdit(self):
self.Win3 = Tkinter.Toplevel(self,padx=10,pady=10)
self.Win3.grid_propagate(1)
self.Win3.grab_set()
self.Win3.wm_title('Comparison setup')
self.Win3.columnconfigure(1, minsize=100)
self.Win3.columnconfigure(2, minsize=30)
self.Win3.columnconfigure(3, minsize=30)
self.Win3.columnconfigure(4, minsize=40)
self.Win3.columnconfigure(5, minsize=30)
self.Win3.columnconfigure(6, minsize=30)
self.Win3.columnconfigure(7, minsize=30)
if self.Win2.ValsContMatr.get() == 'None':
self.Win3.ValsMin = []
self.Win3.ValsMax = []
self.Win3.EqsMin = []
self.Win3.EqsMax = []
else:
for cond in self.Win2.ValsContMatr.get().split(','):
cond = cond.split()
self.Win3.ValsMin.append(cond[0])
self.Win3.EqsMin.append(cond[1])
self.Win3.ValsMax.append(cond[4])
self.Win3.EqsMax.append(cond[3])
r = 0
r += 1
Tkinter.Button(self.Win3,text='Add',command=self.ContMatrAdd).grid(sticky='w'+'e',row=r,column=3)
Tkinter.Button(self.Win3,text='OK',command=self.ContMatrOK).grid(sticky='w'+'e',row=r,column=4)
Tkinter.Button(self.Win3,text='Cancel',command=self.CloseWin_3).grid(sticky='w'+'e',row=r,column=5)
r += 1
Tkinter.Label(self.Win3,text="Class",anchor='c').grid(sticky='w'+'e',row=r,column=1,columnspan=1)
Tkinter.Label(self.Win3,text="Min",anchor='c').grid(sticky='w'+'e',row=r,column=2,columnspan=1)
Tkinter.Label(self.Win3,text="Eq.",anchor='c').grid(sticky='w'+'e',row=r,column=3,columnspan=1)
Tkinter.Label(self.Win3,text="Eq.",anchor='c').grid(sticky='w'+'e',row=r,column=5,columnspan=1)
Tkinter.Label(self.Win3,text="Max",anchor='c').grid(sticky='w'+'e',row=r,column=6,columnspan=1)
for i in range(len(self.Win3.ValsMin)):
r += 1
Tkinter.Label(self.Win3,text="Class "+str(i+1),anchor='c').grid(sticky='w'+'e',row=r,column=1,columnspan=1)
exec("self.Win3.ValMin"+str(i)+" = Tkinter.StringVar()")
exec("self.Win3.ValMin"+str(i)+".set(self.Win3.ValsMin["+str(i)+"])")
Tkinter.Entry(self.Win3,textvariable=eval("self.Win3.ValMin"+str(i)),justify="center").grid(sticky='w'+'e',row=r,column=2,columnspan=1)
exec("self.Win3.EqMin"+str(i)+" = Tkinter.StringVar()")
exec("self.Win3.EqMin"+str(i)+".set(self.Win3.EqsMin["+str(i)+"])")
Tkinter.OptionMenu(self.Win3,eval("self.Win3.EqMin"+str(i)),*['<','<=']).grid(sticky='w'+'e',row=r,column=3,columnspan=1)
Tkinter.Label(self.Win3,text="Value",anchor='c').grid(sticky='w'+'e',row=r,column=4,columnspan=1)
exec("self.Win3.EqMax"+str(i)+" = Tkinter.StringVar()")
exec("self.Win3.EqMax"+str(i)+".set(self.Win3.EqsMax["+str(i)+"])")
Tkinter.OptionMenu(self.Win3,eval("self.Win3.EqMax"+str(i)),*['>','>=']).grid(sticky='w'+'e',row=r,column=5,columnspan=1)
exec("self.Win3.ValMax"+str(i)+" = Tkinter.StringVar()")
exec("self.Win3.ValMax"+str(i)+".set(self.Win3.ValsMax["+str(i)+"])")
Tkinter.Entry(self.Win3,textvariable=eval("self.Win3.ValMax"+str(i)),justify="center").grid(sticky='w'+'e',row=r,column=6,columnspan=1)
Tkinter.Button(self.Win3,text='Del',command=self.BrowseProdDir).grid(sticky='w'+'e',row=r,column=7)
self.centerWindow(self.Win3)
self.Win3.wait_window()
def ContMatrAdd(self):
r = 3 + len(self.Win3.ValsMin)
i = len(self.Win3.ValsMin)
self.Win3.ValsMin.append('0')
self.Win3.EqsMin.append('<')
self.Win3.ValsMax.append('0')
self.Win3.EqsMax.append('>')
Tkinter.Label(self.Win3,text="Class "+str(i+1),anchor='c').grid(sticky='w'+'e',row=r,column=1,columnspan=1)
exec("self.Win3.ValMin"+str(i)+" = Tkinter.StringVar()")
exec("self.Win3.ValMin"+str(i)+".set(self.Win3.ValsMin["+str(i)+"])")
Tkinter.Entry(self.Win3,textvariable=eval("self.Win3.ValMin"+str(i)),justify="center").grid(sticky='w'+'e',row=r,column=2,columnspan=1)
exec("self.Win3.EqMin"+str(i)+" = Tkinter.StringVar()")
exec("self.Win3.EqMin"+str(i)+".set(self.Win3.EqsMin["+str(i)+"])")
Tkinter.OptionMenu(self.Win3,eval("self.Win3.EqMin"+str(i)),*['<','<=']).grid(sticky='w'+'e',row=r,column=3,columnspan=1)
Tkinter.Label(self.Win3,text="Value",anchor='c').grid(sticky='w'+'e',row=r,column=4,columnspan=1)
exec("self.Win3.EqMax"+str(i)+" = Tkinter.StringVar()")
exec("self.Win3.EqMax"+str(i)+".set(self.Win3.EqsMax["+str(i)+"])")
Tkinter.OptionMenu(self.Win3,eval("self.Win3.EqMax"+str(i)),*['>','>=']).grid(sticky='w'+'e',row=r,column=5,columnspan=1)
exec("self.Win3.ValMax"+str(i)+" = Tkinter.StringVar()")
exec("self.Win3.ValMax"+str(i)+".set(self.Win3.ValsMax["+str(i)+"])")
Tkinter.Entry(self.Win3,textvariable=eval("self.Win3.ValMax"+str(i)),justify="center").grid(sticky='w'+'e',row=r,column=6,columnspan=1)
Tkinter.Button(self.Win3,text='Del',command=self.BrowseProdDir).grid(sticky='w'+'e',row=r,column=7)
def ContMatrOK(self):
#update self.Win2.ValsContMatr
return False
def CompareBinary(self):
if tkMessageBox.askyesno("Binary comparison","Depending on the amout of the data, binary comparison can take a long time to be completed. Do you want to proceed?"):
resultspath = self.outputpath.get()
if not os.path.exists(resultspath):
os.makedirs(resultspath)
if auxlist[self.Win1.ProdName.get()]["metadata"]["valuetype"] == "binary":
ProdVals = (self.Win2.ValsProdTrue.get(),self.Win2.ValsProdFalse.get())
if auxlist[self.Win1.ProdName.get()]["metadata"]["valuetype"] == "continuous":
ProdVals = (self.Win2.ValsProdRange.get(),self.Win2.ValsProdTrueMin.get(),self.Win2.ValsProdFalseMax.get())
if auxlist[self.Win1.RefrName.get()]["metadata"]["valuetype"] == "binary":
RefrVals = (self.Win2.ValsRefrTrue.get(),self.Win2.ValsRefrFalse.get(),self.Win2.ValsRefrInvs.get(),self.Win2.ValsRefrThrs.get())
if auxlist[self.Win1.RefrName.get()]["metadata"]["valuetype"] == "continuous":
RefrVals = (self.Win2.ValsRefrRange.get(),self.Win2.ValsRefrTrueMin.get(),self.Win2.ValsRefrFalseMax.get(),self.Win2.ValsRefrInvs.get(),self.Win2.ValsRefrThrs.get())
if self.Win1.ClsdName.get() == "None":
self.Win2.ValsClsdVals = Tkinter.StringVar()
self.Win2.ValsClsdNams = Tkinter.StringVar()
self.Win2.ValsClsdVals.set("None")
self.Win2.ValsClsdNams.set("None")
ClsdVals = None
else:
ClsdVals = (self.Win2.ValsClsdVals.get(),self.Win2.ValsClsdThrs.get(),self.Win2.ValsClsdOnto.get())
extent = (self.Win2.LatitudeRange.get(),self.Win2.LongitudeRange.get(),self.Win2.TemporalRangeDays.get(),self.Win2.TemporalRangeHours.get(),self.Win2.TemporalRangeMinutes.get(),self.Win2.TemporalRangeSeconds.get())
output = comparators.compareBinary(self.Win1.ProdName.get(),self.Win2.ProdDir.get(),ProdVals,self.Win1.RefrName.get(),self.Win2.RefrDir.get(),RefrVals, self.Win1.ClsdName.get(),self.Win2.ClsdDir.get(),self.Win2.ValsClsdNams.get(), ClsdVals,self.Win2.ResampleRadiusRefr.get(),self.Win2.ResampleSigmaRefr.get(),self.Win2.ResampleRadiusClsd.get(),self.Win2.ResampleSigmaClsd.get(),extent,self.Message)
analysis_captions = {'source': '', 'analysis': 'Binary statistics', 'scenario': self.Win1.ProdName.get()+' vs '+self.Win1.RefrName.get(), 'network': ''}
if output:
filelabel = os.path.join(resultspath,'Comparison')
storeData(filelabel,analysis_captions,output,self.Message)
self.CloseWin_2()
self.CloseWin_1()
self.Menu_Main_Results()
self.ResultFolderNameVariable.set(resultspath)
def CompareContinuous(self):
if tkMessageBox.askyesno("Continuous comparison","Depending on the amout of the data, continuous comparison can take a long time to be completed. Do you want to proceed?"):
resultspath = self.outputpath.get()
if not os.path.exists(resultspath):
os.makedirs(resultspath)
if auxlist[self.Win1.ProdName.get()]["metadata"]["valuetype"] == "binary":
ProdVals = (self.Win2.ValsProdTrue.get(),self.Win2.ValsProdFalse.get())
if auxlist[self.Win1.ProdName.get()]["metadata"]["valuetype"] == "continuous":
ProdVals = (self.Win2.ValsProdRange.get(),self.Win2.ValsProdScale.get(),self.Win2.ValsProdBias.get())
if auxlist[self.Win1.RefrName.get()]["metadata"]["valuetype"] == "binary":
RefrVals = (self.Win2.ValsRefrTrue.get(),self.Win2.ValsRefrFalse.get(),self.Win2.ValsRefrInvs.get(),self.Win2.ValsRefrThrs.get())
if auxlist[self.Win1.RefrName.get()]["metadata"]["valuetype"] == "continuous":
RefrVals = (self.Win2.ValsRefrRange.get(),self.Win2.ValsRefrScale.get(),self.Win2.ValsRefrBias.get(),self.Win2.ValsRefrInvs.get(),self.Win2.ValsRefrThrs.get())
if self.Win1.ClsdName.get() == "None":
self.Win2.ValsClsdVals = Tkinter.StringVar()
self.Win2.ValsClsdNams = Tkinter.StringVar()
self.Win2.ValsClsdVals.set("None")
self.Win2.ValsClsdNams.set("None")
ClsdVals = None
else:
ClsdVals = (self.Win2.ValsClsdVals.get(),self.Win2.ValsClsdThrs.get(),self.Win2.ValsClsdOnto.get())
extent = (self.Win2.LatitudeRange.get(),self.Win2.LongitudeRange.get(),self.Win2.TemporalRangeDays.get(),self.Win2.TemporalRangeHours.get(),self.Win2.TemporalRangeMinutes.get(),self.Win2.TemporalRangeSeconds.get())
output = comparators.compareContinuous(self.Win1.ProdName.get(),self.Win2.ProdDir.get(),ProdVals,self.Win1.RefrName.get(),self.Win2.RefrDir.get(),RefrVals,self.Win1.ClsdName.get(),self.Win2.ClsdDir.get(),self.Win2.ValsClsdNams.get(), ClsdVals ,self.Win2.ResampleRadiusRefr.get(),self.Win2.ResampleSigmaRefr.get(),self.Win2.ResampleRadiusClsd.get(),self.Win2.ResampleSigmaClsd.get(),extent,self.Message)
analysis_captions = {'source': '', 'analysis': 'Continuous statistics', 'scenario': self.Win1.ProdName.get()+' vs '+self.Win1.RefrName.get(), 'network': ''}
if output:
filelabel = os.path.join(resultspath,'Comparison')
storeData(filelabel,analysis_captions,output,self.Message)
self.CloseWin_2()
self.CloseWin_1()
self.Menu_Main_Results()
self.ResultFolderNameVariable.set(resultspath)
def Tools_Georectification(self):
self.UpdateSetup()
from georectification import georectificationTool, transSingle, transExtent
analysis = self.setup[self.AnalysisNoVariable.get()-1]['analysis-'+str(self.CalculationNoVariable.get())]
geoparams = paramnames[calcids.index('GEOREC001')]
geoopts = paramopts[calcids.index('GEOREC001')]
corrparams = paramnames[calcids.index('IMGCORR01')]
message = ''
message += 'This tool simulates the georectification using VTK and | |
xy[0] >= 0.0 and xy[1] < 0.0:
ax.text(xy[0] + 0.025, xy[1] - 0.025, name,
ha='left', va='top', size='small')
elif xy[0] >= 0.0 and xy[1] >= 0.0:
ax.text(xy[0] + 0.025, xy[1] + 0.025, name,
ha='left', va='bottom', size='small')
ax.axis('equal')
return ax
def _id_callback(self, event):
# event.ind is the index into event's x and y data.
#
# event.artist.ind is the index of the entire artist into the original
# dataframe.
subset_df = event.artist.df.ix[event.artist.ind]
for i in event.ind:
_id = subset_df.index[i]
yield _id
def _default_callback(self, i):
print(self.data.ix[i])
class DifferentialExpressionResults(ResultsTable):
__doc__ = _base_doc % dedent("""
A ResultsTable subclass for working with differential expression results.
Adds methods for up/down regulation, ma_plot, and sets class variables for
which columns should be considered for pval, log fold change, and mean
values. This class acts as a parent for subclasses like DESeqResults,
EdgeRResults, and others/
""")
pval_column = 'padj'
lfc_column = 'log2FoldChange'
mean_column = 'baseMean'
def __init__(self, data, db=None, header_check=True, **kwargs):
import_kwargs = kwargs.pop('import_kwargs', {})
if header_check and isinstance(data, str):
comment_char = import_kwargs.get('comment', '#')
import_kwargs['comment'] = comment_char
import_kwargs['na_values'] = ['nan']
import_kwargs['index_col'] = import_kwargs.pop('index_col', 0)
super(DifferentialExpressionResults, self).__init__(
data=data, db=db, import_kwargs=import_kwargs, **kwargs)
def changed(self, alpha=0.1, lfc=0, idx=True):
"""
Changed features.
Helper function to get where the pval is <= alpha and the
absolute value log2foldchange is >= lfc.
Parameters
----------
alpha : float
lfc : float
idx : bool
If True, a boolean index will be returned. If False, a new object
will be returned that has been subsetted.
"""
ind = (
(self.data[self.pval_column] <= alpha) &
(np.abs(self.data[self.lfc_column]) >= lfc)
)
if idx:
return ind
return self[ind]
def unchanged(self, alpha=0.1, lfc=0, idx=True):
"""
Unchanged features.
Helper function to get where the pval is > alpha and the
absolute value of the log2foldchange is < lfc.
Parameters
----------
alpha : float
lfc : float
idx : bool
If True, a boolean index will be returned. If False, a new object
will be returned that has been subsetted.
"""
ind = ~self.changed(alpha, lfc, idx)
if idx:
return ind
return self[ind]
def upregulated(self, alpha=0.1, lfc=0, idx=True):
"""
Upregulated features.
Helper function to get where the pval is <= alpha and the
log2foldchange is >= lfc.
Parameters
----------
alpha : float
lfc : float
idx : bool
If True, a boolean index will be returned. If False, a new object
will be returned that has been subsetted.
"""
ind = (
(self.data[self.pval_column] <= alpha) &
(self.data[self.lfc_column] >= lfc)
)
if idx:
return ind
return self[ind]
def downregulated(self, alpha=0.1, lfc=0, idx=True):
"""
Downregulated features.
Helper function to get where the pval is <= alpha and the
log2foldchange is <= lfc.
Parameters
----------
alpha : float
lfc : float
idx : bool
If True, a boolean index will be returned. If False, a new object
will be returned that has been subsetted.
"""
ind = (
(self.data[self.pval_column] <= alpha) &
(self.data[self.lfc_column] <= lfc)
)
if idx:
return ind
return self[ind]
def ma_plot(self, alpha, up_kwargs=None, dn_kwargs=None,
zero_line=None, **kwargs):
"""
MA plot.
Plots the average read count across treatments (x-axis) vs the log2
fold change (y-axis).
Additional kwargs are passed to self.scatter (useful ones might include
`genes_to_highlight`)
Parameters
----------
alpha : float
Features with values <= `alpha` will be highlighted in the plot.
up_kwargs, dn_kwargs : None or dict
Kwargs passed to matplotlib's scatter(), used for styling up/down
regulated features (defined by `alpha` and `col`)
zero_line : None or dict
Kwargs passed to matplotlib.axhline(0).
"""
genes_to_highlight = kwargs.pop('genes_to_highlight', [])
genes_to_highlight.append(
(self.upregulated(alpha),
up_kwargs or dict(color='r')))
genes_to_highlight.append(
(self.downregulated(alpha),
dn_kwargs or dict(color='b')))
if zero_line is None:
zero_line = {}
x = self.mean_column
y = self.lfc_column
if 'xfunc' not in kwargs:
kwargs['xfunc'] = np.log
ax = self.scatter(
x=x,
y=y,
genes_to_highlight=genes_to_highlight,
**kwargs)
if zero_line:
ax.axhline(0, **zero_line)
return ax
class EdgeRResults(DifferentialExpressionResults):
__doc__ = _base_doc % dedent(
"""
Class for working with results from edgeR.
Just like a DifferentialExpressionResults object, but sets the
pval_column, lfc_column, and mean_column to the names used in edgeR's
output.
""")
pval_column = 'FDR'
lfc_column = 'logFC'
mean_column = 'logCPM'
class DESeqResults(DifferentialExpressionResults):
__doc__ = _base_doc % dedent(
"""
Class for working with results from DESeq.
Just like a DifferentialExpressionResults object, but sets the
pval_column, lfc_column, and mean_column to the names used in DESeq
(v1) output.
""")
def colormapped_bedfile(self, genome, cmap=None):
"""
Create a BED file with padj encoded as color
Features will be colored according to adjusted pval (phred
transformed). Downregulated features have the sign flipped.
Parameters
----------
cmap : matplotlib colormap
Default is matplotlib.cm.RdBu_r
Notes
-----
Requires a FeatureDB to be attached.
"""
if self.db is None:
raise ValueError("FeatureDB required")
db = gffutils.FeatureDB(self.db)
def scored_feature_generator(d):
for i in range(len(d)):
try:
feature = db[d.ix[i]]
except gffutils.FeatureNotFoundError:
raise gffutils.FeatureNotFoundError(d.ix[i])
score = -10 * np.log10(d.padj[i])
lfc = d.log2FoldChange[i]
if np.isnan(lfc):
score = 0
if lfc < 0:
score *= -1
feature.score = str(score)
feature = featurefuncs.extend_fields(
featurefuncs.gff2bed(
gffutils.helpers.asinterval(feature)), 9)
fields = feature.fields[:]
fields[6] = fields[1]
fields[7] = fields[2]
fields.append(str(d.padj[i]))
fields.append(str(d.pval[i]))
fields.append('%.3f' % d.log2FoldChange[i])
fields.append('%.3f' % d.baseMeanB[i])
fields.append('%.3f' % d.baseMeanB[i])
yield pybedtools.create_interval_from_list(fields)
x = pybedtools.BedTool(scored_feature_generator(self)).saveas()
norm = x.colormap_normalize()
if cmap is None:
cmap = matplotlib.cm.RdBu_r
cmap = colormap_adjust.cmap_center_point_adjust(
cmap, [norm.vmin, norm.vmax], 0)
def score_zeroer(f):
f.score = '0'
return f
return x.each(featurefuncs.add_color, cmap=cmap, norm=norm)\
.sort()\
.each(score_zeroer)\
.truncate_to_chrom(genome)\
.saveas()
def autosql_file(self):
"""
Generate the autosql for DESeq results (to create bigBed)
Returns a temp filename containing the autosql defining the extra
fields.
This for creating bigBed files from BED files created by
colormapped_bed. When a user clicks on a feature, the DESeq results
will be reported.
"""
fn = pybedtools.BedTool._tmp()
AUTOSQL = dedent(
"""
table example
"output from DESeq"
(
string chrom; "chromosome"
uint chromStart; "start coord"
uint chromEnd; "stop coord"
string name; "name of feature"
uint score; "always zero"
char[1] strand; "+ or - for strand"
uint thickStart; "Coding region start"
uint thickEnd; "Coding region end"
uint reserved; "color according to score"
string padj; "DESeq adjusted p value"
string pval; "DESeq raw p value"
string logfoldchange; "DESeq log2 fold change"
string basemeana; "DESeq baseMeanA"
string basemeanb; "DESeq baseMeanB"
)
""")
fout = open(fn, 'w')
fout.write(AUTOSQL)
fout.close()
return fn
class DESeq2Results(DESeqResults):
__doc__ = _base_doc % dedent(
"""
Class for working with results from DESeq2.
Just like a DifferentialExpressionResults object, but sets the
pval_column, lfc_column, and mean_column to the names used in edgeR's
output.
""")
pval_column = 'padj'
lfc_column = 'log2FoldChange'
mean_column = 'baseMean'
class LazyDict(object):
def __init__(self, fn_dict, index_file=None, index_from=None, extra=None,
cls=DESeqResults):
"""
Dictionary-like object that lazily-loads ResultsTable objects.
Primary use-case for this is for organizing a large number of
differential expression results that you'll be cross-comparing and 1)
you don't want to immediately load EVERYTHING into a dataframe, and 2)
you want to ensure the indexes are aligned across dataframes.
Only upon accessing a value from the LazyDict will the data be loaded
into a DataFrame.
Parameters
----------
fn_dict : dict
Keys of `fn_dict` will be the keys of this LazyDict object. Values
should be filenames which will be loaded into ResultsTable object
upon access for the first time.
index_file : str
Path to a file that contains one ID per line. This file is used to
ensure all ResultsTable objects are aligned to the same index. If
you don't want to provide this, then set it to None and consider
the `index_from` kwarg.
index_from : str
Key of the dataframe whose index will be used to align other
dataframes. If this is provided, this dataframe will have to be
loaded before any others, so the first access will trigger two
dataframe loads.
cls : ResultsTable class or subclass
Each filename in `fn_dict` will be converted using this class.
"""
self.fn_dict = fn_dict
# this acts as the cache
self._dict = {}
if index_file is not None and index_from is not None:
raise ValueError(
"Only one of `index_file` or `index_from` should be provided."
)
self.index_file = index_file
self.index_from = index_from
if index_file:
self.index = [i.strip() for i in open(index_file)]
else:
self.index | |
<reponame>CommonRoad/commonroad-io<filename>commonroad/visualization/icons.py
"""Module for drawing obstacle icons."""
from typing import Union
import matplotlib as mpl
import numpy as np
import math
from commonroad.geometry.transform import rotate_translate
from commonroad.scenario.obstacle import ObstacleType
__author__ = "<NAME>"
__copyright__ = "TUM Cyber-Physical Systems Group"
__version__ = "2022.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Released"
def _obstacle_icon_assignment():
"""Assign obstacle type to icon."""
assign_dict = {ObstacleType.CAR: draw_car_icon, ObstacleType.PARKED_VEHICLE: draw_car_icon,
ObstacleType.TAXI: draw_car_icon, ObstacleType.TRUCK: draw_truck_icon,
ObstacleType.BUS: draw_bus_icon, ObstacleType.BICYCLE: draw_bicycle_icon, }
return assign_dict
def supported_icons():
"""Return a list of obstacle types, that have a icon."""
return list(_obstacle_icon_assignment().keys())
def get_obstacle_icon_patch(obstacle_type: ObstacleType, pos_x: Union[int, float], pos_y: Union[int, float],
orientation: Union[int, float], vehicle_length: Union[int, float],
vehicle_width: Union[int, float], zorder: float = 5, vehicle_color: str = "#ffffff",
edgecolor="black", lw=0.5, ):
"""Get a list of mpl.patches to draw a obstacle specific icon."""
if obstacle_type not in supported_icons():
error_string = (f"There is no icon available for vehicle type: {str(obstacle_type)}\n\nEnsure to call the "
f"get_obstacle_icon_patch(...) function\nonly for vehicle types supported.\nThese can be "
f"retrieved by "
f"calling commonroad.visualization.icons.supported_icons()")
raise TypeError(error_string)
draw_func = _obstacle_icon_assignment()[obstacle_type]
patch = draw_func(pos_x=pos_x, pos_y=pos_y, orientation=orientation, vehicle_length=vehicle_length,
vehicle_width=vehicle_width, zorder=zorder, vehicle_color=vehicle_color, edgecolor=edgecolor,
lw=lw, )
return patch
def _transform_to_global(vertices: list, pos_x: Union[int, float], pos_y: Union[int, float],
orientation: Union[int, float], vehicle_length: Union[int, float],
vehicle_width: Union[int, float], ):
"""Transform absolute coordinate to car-relative coordinate.
Args:
vertices: Shape: (N,2)
pos_x: -
pos_y: -
orientation: -
vehicle_length: -
vehicle_width: -
Returns:
np_array: transformed absolute coordinate in the form (x,y) (shape: (N,2))
"""
# Norm the array
vertices = np.array(vertices)
vertices = vertices * 0.01
# Scale it to vehicle dim
vertices[:, 0] = vertices[:, 0] * vehicle_length
vertices[:, 1] = vertices[:, 1] * vehicle_width
# Preprocess current pos
curr_pos = np.array([pos_x, pos_y])
vertices = rotate_translate(vertices, curr_pos, orientation)
return vertices
def draw_bus_icon(pos_x: Union[int, float], pos_y: Union[int, float], orientation: Union[int, float],
vehicle_length: Union[int, float] = 12, vehicle_width: Union[int, float] = 2.5, zorder: float = 5,
vehicle_color: str = "#ffffff", edgecolor="black", lw=0.5, ):
"""Return the patches of the truck icon.
Define vertices in a normed rectangle.
-50 <= x <= 50 and -50 <= y <= 50
"""
window_color = edgecolor
outline = np.array([[-50, -50], [50, -50], [50, 50], [-50, 50]])
front_window = np.array([[47, -42], [50, -46], [50, 46], [47, 42]])
right_window = np.array([[-20, -50], [-15, -42], [40, -42], [45, -50]])
left_window = np.array([[-20, 50], [-15, 42], [40, 42], [45, 50]])
roof_hatch = np.array([[-40, -27], [-15, -27], [-15, 27], [-40, 27]])
hatch_circles = [[-35, 0], [-27.5, 0], [-20, 0]]
roof_line = np.array([[-7, -27], [-7, 27]])
bus_list = [outline, roof_hatch, roof_line]
window_list = [front_window, right_window, left_window]
bus_list = [_transform_to_global(vertices=part, pos_x=pos_x, pos_y=pos_y, orientation=orientation,
vehicle_length=vehicle_length, vehicle_width=vehicle_width, ) for part in bus_list]
window_list = [_transform_to_global(vertices=window, pos_x=pos_x, pos_y=pos_y, orientation=orientation,
vehicle_length=vehicle_length, vehicle_width=vehicle_width, ) for window in
window_list]
hatch_circles = _transform_to_global(vertices=hatch_circles, pos_x=pos_x, pos_y=pos_y, orientation=orientation,
vehicle_length=vehicle_length, vehicle_width=vehicle_width, )
bus_list_patches = [mpl.patches.Polygon(part, fc=vehicle_color, ec=edgecolor, lw=lw, zorder=zorder, closed=True, )
for part in bus_list]
window_list_patches = [
mpl.patches.Polygon(window, fc=window_color, ec=edgecolor, lw=lw, zorder=zorder + 1, closed=True, ) for window
in window_list]
hatch_circle_patches = [
mpl.patches.Circle(point, radius=vehicle_length * 2.5 / 100, facecolor=vehicle_color, zorder=zorder + 1,
linewidth=lw, edgecolor=edgecolor, ) for point in hatch_circles]
return bus_list_patches + window_list_patches + hatch_circle_patches
def draw_truck_icon(pos_x: Union[int, float], pos_y: Union[int, float], orientation: Union[int, float],
vehicle_length: Union[int, float] = 10, vehicle_width: Union[int, float] = 2.5, zorder: float = 5,
vehicle_color: str = "#ffffff", edgecolor="black", lw=0.5, ):
"""Return the patches of the truck icon.
Define vertices in a normed rectangle.
-50 <= x <= 50 and -50 <= y <= 50
Credits to <NAME> for defining the vertices.
"""
# region Define your points in the norm square (-50<=x<=50, -50<=y<=50)
# x -> length | y -> width
v_trailer = np.array([[-50, -46], [20, -46], [20, 46], [-50, 46]])
v_driver_cabin = np.array([[25, -42], [50, -42], [50, 42], [25, 42]])
v_roof = np.array([[25, -34], [44, -34], [44, 34], [25, 34]])
v_a_col_l = np.array([v_roof[2], v_driver_cabin[2]])
v_a_col_r = np.array([v_roof[1], v_driver_cabin[1]])
v_connection = np.array([v_trailer[2], [v_driver_cabin[3][0], v_driver_cabin[3][1] - 3],
[v_driver_cabin[0][0], v_driver_cabin[0][1] + 3], v_trailer[1], ])
v_mirror_l = np.array([[43, 42], [41, 42], [41, 50], [43, 50]])
v_mirror_r = np.array([[43, -42], [41, -42], [41, -50], [43, -50]])
# endregion
# Transform your coords
truck = [v_trailer, v_driver_cabin, v_roof, v_a_col_l, v_a_col_r, v_connection, v_mirror_l, v_mirror_r, ]
truck = [_transform_to_global(vertices=part, pos_x=pos_x, pos_y=pos_y, orientation=orientation,
vehicle_length=vehicle_length, vehicle_width=vehicle_width, ) for part in truck]
patch_list = [mpl.patches.Polygon(part, fc=vehicle_color, ec=edgecolor, lw=lw, zorder=zorder, closed=True) for part
in truck]
return patch_list
def draw_bicycle_icon(pos_x: Union[int, float], pos_y: Union[int, float], orientation: Union[int, float],
vehicle_length: Union[int, float] = 2.5, vehicle_width: Union[int, float] = 0.8,
zorder: float = 5, vehicle_color: str = "#ffffff", edgecolor="black", lw=0.5, ):
"""Return the patches of the truck icon.
Define vertices in a normed rectangle.
-50 <= x <= 50 and -50 <= y <= 50
Credits to <NAME> for defining the vertices.
"""
def elliptic_arc(center, major, minor, start_angle, end_angle):
"""Create the vertices of an elliptic arc."""
arc = []
angle_list = np.linspace(start_angle, end_angle, 50)
for angle in angle_list:
arc.append([center[0] + major * math.cos(angle), center[1] + minor * math.sin(angle)])
return np.array(arc)
# region Define your points in the norm square (-50<=x<=50, -50<=y<=50)
# x -> length | y -> width
v_front_wheel = elliptic_arc((30, 0), 20, 6, 0, 2 * np.pi)
v_rear_wheel = elliptic_arc((-30, 0), 20, 6, 0, 2 * np.pi)
v_handlebar = np.array([[18, 50], [16, 50], [16, -50], [18, -50]])
v_frame = np.array([[18, 3], [18, -3], [-30, -3], [-30, 3]])
v_body = elliptic_arc((5, 0), 20, 40, np.pi / 2 + 0.2, np.pi * 3 / 2 - 0.2)
v_arm_r = np.array([v_body[-1], v_handlebar[3], [v_handlebar[3][0], v_handlebar[3][1] + 7.5],
[v_body[-1][0], v_body[-1][1] + 15], ])
v_arm_l = np.array([[v_body[0][0], v_body[0][1] - 15], [v_handlebar[0][0], v_handlebar[0][1] - 7.5], v_handlebar[0],
v_body[0], ])
v_body = np.concatenate([v_body, v_arm_r, v_arm_l])
v_head = elliptic_arc((3, 0), 6, 15, 0, 2 * np.pi)
# endregion
# Transform your coords
list_bicycle = [v_front_wheel, v_frame, v_rear_wheel, v_handlebar, v_body, v_head]
list_bicycle = [_transform_to_global(vertices=part, pos_x=pos_x, pos_y=pos_y, orientation=orientation,
vehicle_length=vehicle_length, vehicle_width=vehicle_width, ) for part in
list_bicycle]
patch_list = [mpl.patches.Polygon(part, fc=vehicle_color, ec=edgecolor, lw=lw, zorder=zorder, closed=True) for part
in list_bicycle]
# Return this patch collection
return patch_list
def draw_car_icon(pos_x: Union[int, float], pos_y: Union[int, float], orientation: Union[int, float],
vehicle_length: Union[int, float] = 5, vehicle_width: Union[int, float] = 2, zorder: float = 5,
vehicle_color: str = "#ffffff", edgecolor="black", lw=0.5, ):
"""Return the patches of the car icon.
Define vertices in a normed rectangle.
-50 <= x <= 50 and -50 <= y <= 50
"""
window_color = edgecolor
front_window = np.array(
[[-21.36, -38.33], [-23.93, -27.66], [-24.98, -12.88], [-25.28, -0.3], [-25.29, -0.3], [-25.28, -0.04],
[-25.29, 0.22], [-25.28, 0.22], [-24.98, 12.8], [-23.93, 27.58], [-21.36, 38.24], [-14.65, 36.18],
[-7.64, 33.19], [-8.32, 19.16], [-8.62, -0.04], [-8.32, -19.24], [-7.64, -33.27], [-14.65, -36.27], ])
rear_window = np.array(
[[37.68, -34.02], [26.22, -32.15], [27.43, -14.56], [27.8, -0.41], [27.43, 13.74], [26.22, 31.32],
[37.68, 33.19], [40.17, 21.22], [41.3, -0.34], [40.17, -21.91], [40.17, -21.91], ])
left_window = np.array(
[[4.32, -38.7], [25.84, -37.76], [27.35, -36.27], [15.06, -32.71], [-0.1, -32.71], [-13.6, -37.95],
[0.84, -38.78], ])
left_mirror = np.array(
[[-12.62, -49.78], [-13.3, -50.0], [-15.11, -46.63], [-16.78, -41.24], [-17.23, -39.56], [-14.92, -39.45],
[-14.52, -40.68], [-13.97, -41.47], ])
engine_hood = np.array(
[[-21.67, -38.04], [-32.98, -34.96], [-40.1, -29.77], [-46.78, -18.96], [-49.04, 2.65], [-46.78, 19.35],
[-40.33, 29.6], [-32.98, 35.35], [-21.67, 38.44], ])
right_window = np.array(
[[4.32, 38.7], [25.84, 37.76], [27.35, 36.27], [15.06, 32.71], [-0.1, 32.71], [-13.6, 37.95],
[0.84, 38.78], ])
right_mirror = np.array(
[[-12.62, 49.78], [-13.3, 50.0], [-15.11, 46.63], [-16.78, 41.24], [-17.23, 39.56], [-14.92, 39.45],
[-14.52, 40.68], [-13.97, 41.47], ])
outline = np.array(
[[0.78, -45.23], [-38.09, -42.38], [-45.85, -36.08], [-49.16, -15.15], [-49.99, 1.79], [-50.0, 1.79],
[-50.0, 2.0], [-50.0, 2.22], [-49.99, 2.22], [-49.16, 14.1], [-45.85, 35.03], [-38.09, 41.33],
[0.78, 44.18], [30.15, 42.88], [44.88, 37.96], [47.6, 32.77], [49.58, 14.36], [50.0, 3.86], [50.0, 0.14],
[49.58, -15.41], [47.6, -33.82], [44.88, -39.01], [30.15, -43.93], ])
windows = [-front_window, -rear_window, -left_window, -right_window]
car = [-outline, -left_mirror, -right_mirror, -engine_hood]
windows = [_transform_to_global(vertices=window, pos_x=pos_x, pos_y=pos_y, orientation=orientation,
vehicle_length=vehicle_length, vehicle_width=vehicle_width, ) for window in windows]
car = [_transform_to_global(vertices=part, pos_x=pos_x, pos_y=pos_y, orientation=orientation,
vehicle_length=vehicle_length, vehicle_width=vehicle_width, ) for part in car]
window_patches = [
mpl.patches.Polygon(window, fc=window_color, ec=edgecolor, lw=lw, zorder=zorder + 1, closed=True, ) for window
in windows]
car_patches = [mpl.patches.Polygon(part, fc=vehicle_color, ec=edgecolor, lw=lw, zorder=zorder, closed=True) for part
in car]
| |
"Xds": -793, "cXr": -794, "liX": -795, "crX": -796, "iXn": -797,
" zX": -798, "Xya": -799, "nXm": -800, "X2": -801, "kanlIGI yXk ": 802,
"zalarInda Xl": 803, " in Xldugun": 804, "itelerin yX": 805,
"y yerinde X": 806, "Sinin de Xl": 807, "kerinin Xl": 808,
"e dXkumanl": 809, " olan yXk ": 810, "da dXndurd": -811,
"li dXkuman": 812, "zasInda Xl": 813, "en kiSi Xl": -814,
"rdan yXk ": 815, "i kXyunun": -816, "sunda Xn ": 817,
"lis Xldu ": 818, "leri sXku": 819, "san Xldu ": 820,
"n kim Xlm": 821, "nce Xlmus": 822, "ada Xldu ": 823,
"SInda yXk": 824, " ve gXre ": -825, "sitesi yX": 826,
"rlanan Xn": 827, "cuGun Xld": 828, "arak Xlur": 829,
"le kXyu ": -830, "ken kXyu": 831, "san Xlmu": 832,
"kan kXk ": -833, "ri kXyun": 834, "amlIk kX": -835,
"Glarla X": 836, "un kXyu ": -837, "in kXruk": 838,
"ilen Xn ": 839, "kXpuklu ": 840, "yIlI yXk": 841,
"mada Xn ": 842, "Cilen yX": 843, "elere Xn": 844,
"dUn Xlmu": 845, "Olen Xlu": 846, "rtIk Xld": 847,
"imin Xnu": 848, "edi Xn ": -849, "iyi Xn ": -850,
"kle Xn ": 851, "uzla gX": 852, "rda Xn ": 853,
"sun Xn ": -854, "eve dXn": 855, "anca gX": 856,
"Ica yXk": 857, "bXldur ": -858, "hep Xn ": 859, " Xldug": 860,
" kXku ": 861, " gXlov": 862, "7 Xrt ": 863, "sXylus": -864,
"gut Xn": 865, "Xkumuz": -866, "l bXn ": 867, "i Xpen": 868,
"Xpukte": 869, " tXban": 870, "kXruk ": 871, "l o bX": -872,
"nal Xn": 873, "dur gX": 874, "tek Xp": 875, " kXran": 876,
" Xgunl": 877, "r CXkt": -878, "m tXr ": 879, "uat Xr": -880,
"fer gX": 881, "kXni ": 882, "Xnig ": 883, "sXmek": 884,
" Xrgo": 885, "sXkto": 886, "lstrX": -887, "brXta": 888,
"G Xn ": 889, " dXsu": 890, "CXrus": 891, "3 Xgu": 892,
"kXrug": 893, "Xrteg": -894, "gXlko": 895, "Xgusk": 896,
" Xsta": 897, "Xreck": -898, "sistX": 899, " Xter": 900,
"Xrdog": 901, "ntrXn": 902, "Xlsen": -903, "i gaX": 904,
"rkXz ": 905, " tXs ": 906, "duktX": 907, "Xrmuk": 908,
"Xpus ": -909, "tUnkX": 910, "Xnsez": 911, "dikdX": 912,
" akXz": 913, "Xlero": -914, " tXb ": 915, "Xmurt": -916,
"onglX": 917, "ardXn": -918, " o X ": 919, "Xrtec": 920,
"Xcuna": -921, " Xvus": -922, " inXn": 923, "gesX": 924,
"ndXv": 925, "gXss": -926, "zakX": -927, "hXh ": 928,
" mfX": 929, " Xhc": 930, " asX": -931, " Xgm": 932,
"Xge ": 933, "hXch": 934, "yyXr": 935, "dXro": -936,
"anXn": -937, "dXrf": -938, "Xng ": -939, "Xrla": -940,
"Xgra": -941, "Xsev": -942, "gXf": -943, "gXh": -944,
"Xiz": -945, "hXz": -946, "zgX": 947, "Xyg": -948,
"ana nIn Xl": 949, "e sonra Xl": 950, "rken Xldu ": 951,
"elerin Xnu": 952, "izinden X": 953, "ler kXyun": 954,
"ana Xldu ": 955, " In Xldug": 956, "Xndurulme": 957, "kIn dXgus": 958,
"nserden X": 959, "Once yXk ": 960, "celiyle X": 961, "metin yXk": 962,
"ve Xldu ": 963, "zla kXyu": -964, "a da Xn ": 965, "yla yXk ": 966,
"nra yXk ": 967, "Ore yXk ": 968, "ken yXk ": 969, " In yXk ": 970,
"bir yXk ": 971, "nIn Xnu ": 972, " kXrle ": -973, "ani Xn ": -974,
"zar gXl": 975, "eman gX": 976, "nde Xn ": 977, "Ina Xn ": 978,
"k dXkuy": -979, "yas gX": 980, "sli Xn": -981, " dXksu": 982,
"nI sX ": 983, "Xpukle": 984, "kXrdov": 985, " kXk ": -986,
"bat gX": 987, "pardXs": 988, "Xdunda": -989, "Un Xn ": 990,
"a dXn ": 991, "vinatX": -992, "ora kX": -993, " Xver ": -994,
"bXrtu": 995, "Xkumc": 996, "razXz": 997, "ertXr": 998, "gXmes": -999,
"eryXn": 1000, " d Xr": -1001, "Xruko": 1002, "kXstl": 1003,
"the X": -1004, " sXbu": 1005, "Xnces": 1006, "kXyk": 1007,
" lX ": 1008, "nrIX": 1009, "IgXl": 1010, "Xyli": 1011,
" Xdi": -1012, "CXle": 1013, "Xcek": 1014, "Xnoz": 1015,
"Xnuv": 1016, "dXnl": -1017, "dXra": -1019, "Xnez": -1020,
"Xrto": -1021, " tXy": -1022, "Xzla": -1023, "Xnta": -1024,
" fXr": -1025, "Xley": -1026, "Xzle": 1027, "gXst": 1028,
"Xyy": 1029, "SXo": 1030, "cXz": -1031, "gkX": -1032,
"CgX": 1033, "Xt ": -1034, "dXy": -1035, "Xbb": -1036,
"fXn": -1037, " jX": -1038, "babasI Xlmu": 1039,
"Xlumsuzdur": 1040, "arak Xlmus": 1041, "asInda Xn ": 1042,
"sunda yXk ": 1043, "Cin Xldu ": 1044, "ucu Xldu ": 1045,
" ile yXk ": 1046, " iCin yXk": 1047, "ClIktan X": 1048,
"de kXyu ": -1049, "iden Xn ": 1050, "Glam yXk": 1051,
"I sXkup": 1052, " Xrdugu": 1053, "rI dXnd": -1054,
" en kXy": -1055, " hXrt ": 1056, "pXrsum": 1057,
" Xluyu": 1058, " Xker": 1059, "presX": 1060,
" Xnis": 1061, "Xlusp": -1062, "gXne ": -1063, "dXnay": 1064,
" Xkse": 1065, "nkXr ": 1066, "tXzum": 1067, " sXre": -1068,
"Xmuri": -1069, "rXves": 1070, "gXmez": -1071, "rkXru": 1072,
"etXr ": 1073, " Xtem": 1074, "dXven": 1075, "CXpur": -1076,
"bu Xn ": -1077, "u Xn ": 1078, " Xtuy": 1079, "dXrdu": 1080,
"Xngor": 1081, "iXk ": 1082, "kXko": -1083, "Xrod": -1084,
"Xmea": 1085, "bXgr": 1086, "dXri": -1087, "gXp ": -1088,
" ihX": 1089, "Xdeo": -1090, "Xdug": -1091, "izXr": 1092,
"rXme": -1093, "Xska": -1094, "fiX": -1095, "Xti": -1096,
"Xbi": -1097, "Xrr": -1098, "yaSInda Xldu ": 1099,
"an kXyun ": 1100, " kiSi Xlu": 1101, " yeni yXk": 1102,
"ogan gX": 1103, "ler Xn ": 1104, "laya gX": 1105, "dnan CX": -1106,
" kXyu ": -1107, "Xlmusle": 1108, "orce X": -1109,
"un Xn ": 1110, "Xburlu": -1111, "baS dX": 1112,
"ye dXn": 1113, "Xverm": -1114, "Xdesa": -1115,
" ikX ": 1116, "kXktu": -1117, " Xpey": 1118, "Xngoz": 1119,
"IlXte": 1120, "rXmiy": 1121, " Xrer": 1122, "Xkunt": 1123,
"Xcunm": -1124, "sXvd": 1125, "hXne": -1126, "Xvid": -1127,
"jisX": 1128, " SXy": 1129, "gXru": 1130, "Xlv": -1131,
"OnX": 1132, "sanlar Xlu": 1133, " Xlunun ": 1134, " kale Xn": 1135,
"Xldugune": 1136, "Xldukle": 1137, "geri dX": 1138, "Xtariu": -1139,
"rXlant": 1140, " Xngur": 1141, "Xldule": 1142, "Xteler": 1143,
"Xrdub": 1144, "schXn": 1145, "l Xke": 1146, "kXrpe": 1147,
"Xguto": 1148, "Xgutl": 1149, "Xlcek": 1150, "Xntma": -1151,
"Xkule": 1152, "jXrk ": 1153, "lpXz": 1154, "OrdX": 1155,
"Xldo": -1156, " Xrl": -1157, "gXv ": -1158, "rkXy": 1159,
"Xvgu": 1160, "agXl": 1161, "fXz": -1162, "Xsp": -1163,
"Xlk": -1164, "Xdy": -1165, "a dXnduruld": 1166,
" van gXl": 1167, "nin Xnu ": 1168, " gXre ": -1169,
"sXnuyo": 1170, "k pXrc": 1171, "la Xn ": 1172,
"Xkulup": 1173, "Xrunde": 1174, " Xgud": 1175, "kXpug": 1176,
" m X ": 1177, "minXr": 1178, "Xnesc": -1179, "Xtary": -1180,
" Xnge": 1181, "Xleg": -1182, "yXng": -1183, "CXml": 1184,
"dXrn": -1185, "sXvu": 1186, "Xniz": -1187, "erXn": -1188,
"larak Xn ": 1189, "l kXyun ": 1190, " ile Xn ": 1191,
"n sXkup": 1192, "lIk Xn ": 1193, " kXyun": 1194,
"Xnkolo": -1195, " Xnsec": 1196, "173 X": -1197,
"kXper": -1198, "mirXz": 1199, " Xsyo": -1200,
"Xksur": 1201, "Xkpit": -1202, "Xregi": 1203,
" Xnel": 1204, "kXyis": 1205, "sXgus": 1206,
"kkeX": 1207, "CXme": 1208, "Xnev": -1209,
"SbXl": 1210, " fkX": 1211, "efkX": -1212,
"dXnk": -1213, "Xnec": 1214, "IrXz": 1215,
"Xdo": -1216, " pX": -1217, "Xno": -1218, "r kXyunu": -1219,
"dXndurur": 1220, "n kXku ": 1221, "ik Xn ": 1222, "kOk sX": 1223,
"isarX": 1224, "Xlude": 1225, "schrX": 1226, " Xruy": 1227,
" Xrfi": 1228, " mXsy": 1229, "bXgur": 1230, " Xkke": 1231,
"CXlun": 1232, " yXne": 1233, "nSXr": 1234, "fXtr": 1235,
"syXd": 1236, "OsyX": 1237, "Xnma": -1238, "odX": -1239, "gXk": 1240,
"arak Xld": 1241, " van gX": -1242, "tuz gX": 1243, " Xperi": 1244,
"Xkulu ": -1245, "opXrta": 1246, "trXst": 1247, " Xylu": -1248,
"dXkup": 1249, " Xlse": 1250, "Xdess": -1251, "Xkuml": 1252,
"rXmor": 1253, "gXve": -1254, "Xkuk": 1255, "Xbek": 1256, "Xndurule": 1257,
"Si Xlmu": 1258, "wim kX": | |
<gh_stars>10-100
import pandas as pd
from xlsxwriter.format import Format
class GPTable:
"""
A Good Practice Table. Stores a table and metadata for writing a table
to excel.
Attributes
----------
table : pandas.DataFrame
table to be written to an Excel workbook
title : str
description of the table
subtitles : list
subtitles as a list of strings
scope : str
description of scope/basis of data in table
units : str or dict
units used in all (str) or each (dict) column of `table`
legend : list
descriptions of special notation used in `table`
annotations : dict
notes that are referenced in header or table elements (excluding data)
notes : list
notes that are not referenced
index_columns : dict
mapping an index level to a 0-indexed column as {level: column}.
Default is a level two index in the first column ({2: 0}).
additional_formatting : dict
table-specific formatting for columns, rows or individual cells
include_index_column_headings : bool
indicate whether index column headings should be retained in output
"""
def __init__(self,
table,
title,
scope,
units,
source,
subtitles=[],
legend=[],
annotations={},
notes=[],
index_columns={2:0},
additional_formatting=[],
include_index_column_headings=False
):
# Attributes
self.title = None
self.subtitles = []
self.scope = None
self.units = None # str or {units (str):column index (int)} dict
self._VALID_INDEX_LEVELS = [1, 2, 3]
self.index_levels = 0
self.index_columns = {} # {index level (int): column index (int)}
self._column_headings = set() # Non-index column headings
self.table = pd.DataFrame()
self.source = None
self.legend = []
self.annotations = {}
self.notes = []
self.additional_formatting = []
self.include_index_column_headings=include_index_column_headings
# Valid format labels from XlsxWriter
self._valid_format_labels = [
attr.replace("set_", "")
for attr in Format().__dir__()
if attr.startswith('set_')
and callable(getattr(Format(), attr))
]
# Call methods to set attributes
self.set_title(title)
self.set_subtitles(subtitles)
self.set_scope(scope)
self.set_table(table, index_columns, units)
self.set_source(source)
self.set_legend(legend)
self.set_annotations(annotations)
self.set_notes(notes)
self.set_additional_formatting(additional_formatting)
def set_table(self, new_table, new_index_columns = None, new_units = None):
"""
Set the `table`, `index_columns` and 'units' attributes. Overwrites
existing values for these attributes.
"""
if not isinstance(new_table, pd.DataFrame):
raise TypeError("`table` must be a pandas DataFrame")
default_index = pd.Index(range(new_table.shape[0]))
if not all(new_table.index == default_index) and not new_table.empty:
msg = ("`table` index must not contain index data. It can be reset"
" before adding to a GPTable (see DataFrame.reset_index())."
" Please ensure that index data is stored in the first 1-3"
" columns of `table` and is indicated in `index_columns`.")
raise ValueError(msg)
self.table = new_table.reset_index(drop=True)
if new_index_columns is None:
new_index_columns = self.index_columns
self.set_index_columns(new_index_columns)
if new_units is None:
new_units = self.units
self.set_units(new_units)
def set_index_columns(self, new_index_columns):
"""
Set the `index_columns` attribute. Overwrites any existing values.
A dict must be supplied. This dict should map index level to a
single 0-indexed column number. All other columns will be considered
as data columns.
"""
if isinstance(new_index_columns, dict):
# Check if levels and values are valid
valid_levels = all(level in self._VALID_INDEX_LEVELS for level in new_index_columns.keys())
if not valid_levels:
msg = ("`index_columns` dictionary keys must be valid index"
f" levels: {self._VALID_INDEX_LEVELS}")
raise ValueError(msg)
if not all(isinstance(col, int) for col in new_index_columns.values()):
# Convert col name to numeric index
for key, value in new_index_columns.items():
col_iloc = self.table.columns.get_loc(value)
new_index_columns.update({key: col_iloc})
column_indexes = [col for col in new_index_columns.values()]
valid_columns = all(self._valid_column_index(col) for col in column_indexes)
if not valid_columns:
msg = ("Out of range - `index_columns` dictionary values must"
"be valid, 0-indexed column numbers")
raise ValueError(msg)
self.index_levels = len(new_index_columns.keys())
self.index_columns = new_index_columns
self._set_column_headings()
else:
msg = ("`index_columns` must be a dict mapping a valid index level"
" to a 0-indexed column number")
raise ValueError(msg)
def _valid_column_index(self, column_index):
"""
Check if `column_index` is valid, given the `table` shape.
"""
return column_index in range(self.table.shape[1])
def _set_column_headings(self):
"""
Sets the `column_headings` attribute to the set of column indexes that
are not assigned to `index_columns`.
"""
index_cols = set(self.index_columns.values())
self._column_headings = {x for x in range(self.table.shape[1])} - index_cols
def set_title(self, new_title):
"""
Set the `title` attribute.
"""
self._validate_text(new_title, "title")
self.title = new_title
def add_subtitle(self, new_subtitle):
"""
Add a single subtitle to the existing list of `subtitles`.
"""
self._validate_text(new_subtitle, "subtitles")
self.subtitles.append(new_subtitle)
def set_subtitles(self, new_subtitles, overwrite=True):
"""
Set a list of subtitles to the `subtitles` attribute. Overwrites
existing ist of subtitles by default. If `overwrite` is False, new list
is appended to existing list of subtitles.
"""
if new_subtitles is None:
new_subtitles = []
if not isinstance(new_subtitles, (list)):
msg =("`subtitles` must be provided as a list containing strings"
" and/or lists of strings and format dictionaries"
" (rich text)")
raise TypeError(msg)
for text in new_subtitles:
self._validate_text(text, "subtitles")
if overwrite:
self.subtitles = new_subtitles
else:
self.subtitles += new_subtitles
def set_scope(self, new_scope):
"""
Set the `scope` attribute.
"""
self._validate_text(new_scope, "scope")
self.scope = new_scope
def set_units(self, new_units):
"""
Set the `units` attribute using the supplied str, list or dictionary.
Units supplied as a list must match the length of column headings,
excluding index columns. Units as a dict should be in the format
{column: units_text}. Column can be column name or 0-indexed column
number in `table`. Index columns cannot have units.
"""
if isinstance(new_units, str) or new_units is None:
self._validate_text(new_units, "units")
elif isinstance(new_units, list):
self._validate_text(new_units, "units")
rich_text = any(type(_) == dict for _ in new_units)
if len(new_units) != len(self._column_headings) and not rich_text:
msg = ("length of `units` list must match the number of"
" non-index columns in the `table`")
raise ValueError(msg)
elif isinstance(new_units, dict) and len(new_units) > 0:
units_list = [None for _ in range(len(self._column_headings))]
for key, value in new_units.items():
self._validate_text(value, "units")
if not isinstance(key, int):
iloc = self.table.columns.get_loc(key)
else:
iloc = key
units_list[iloc - self.index_levels] = value
new_units = units_list
else:
msg = ("`units` attribute must be a string, list or dictionary"
" ({column: units_text})")
raise TypeError(msg)
self.units = new_units
def set_source(self, new_source):
"""
Set the source attribute to the specified str.
"""
self._validate_text(new_source, "source")
self.source = new_source
def add_legend(self, new_legend):
"""
Add a single legend entry to the existing `legend` list.
"""
self._validate_text(new_legend, "legend")
self.subtitles.append(new_legend)
def set_legend(self, new_legend, overwrite=True):
"""
Set a list of legend entries to the `legend` attribute. Overwrites
existing legend entries by default. If overwrite is False, new entries
are appended to the `legend` list.
"""
if new_legend is None:
self.legend = []
return
if not isinstance(new_legend, list):
msg = ("`legend` must be provided as a list of text elements")
raise TypeError(msg)
for text in new_legend:
self._validate_text(text, "legend")
if overwrite:
self.legend = new_legend
else:
self.legend += new_legend
def add_annotation(self, new_annotation):
"""
Add one or more annotations to the existing `annotations` dictionary.
"""
if not isinstance(new_annotation, dict):
raise TypeError("`annotations` entries must be dictionaries")
for text in new_annotation.values():
self._validate_text(text, "annotations")
self.annotations.update(new_annotation)
def set_annotations(self, new_annotations, overwrite=True):
"""
Set a list of notes to the `annotations` attribute. Overwrites existing
`annotations` dict by default. If overwrite is False, new entries are
used to update the `annotations` dict.
"""
if not isinstance(new_annotations, dict):
msg = ("annotations must be provided as a dictionary of"
" {reference: note}")
raise TypeError(msg)
if not all(isinstance(key, str) for key in new_annotations.keys()):
raise TypeError("`annotations` keys must be strings")
for text in new_annotations.values():
self._validate_text(text, "annotations")
if overwrite:
self.annotations = new_annotations
else:
self.annotations.update(new_annotations)
def add_note(self, new_note):
"""
Add a single note to the existing `notes` list.
"""
self._validate_text(new_note, "notes")
self.notes.append(new_note)
def set_notes(self, new_notes, overwrite=True):
"""
Set a list of notes to the `notes` attribute. Overwrites existing
`notes` list by default.If overwrite is False, new entries are
appended to the `notes` list.
"""
if new_notes is None:
self.notes = []
return
if not isinstance(new_notes, list):
msg = ("`notes` must be a list of text elements")
raise TypeError(msg)
for text in new_notes:
self._validate_text(text, "notes")
if overwrite:
self.notes = new_notes
else:
self.notes += new_notes
def set_additional_formatting(self, new_formatting):
"""
Set a dictionary of additional formatting to be applied to this table.
"""
if not isinstance(new_formatting, list):
msg = ("`additional_formatting` must be a list | |
cand)
return larger
def func_7b0a748f0d1748a7826768d5660e8fea(budget, cc, placed):
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget /
lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)('Case #%d: %.10lf' % (cc + 1, ret))
return can_replicate
def func_10daafd3c0f648b48d0c629b51d9a68d(budget, cc, placed):
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget /
lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)('Case #%d: %.10lf' % (cc + 1, ret))
return needed_budget
def func_c7d838ac8c7a4a8d9d8e58f411210adb(budget, cc, placed):
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget /
lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)('Case #%d: %.10lf' % (cc + 1, ret))
return p
def func_d17f06f02f8a459ab2621e9229925209(budget, cc, placed):
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget /
lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)('Case #%d: %.10lf' % (cc + 1, ret))
return partial
def func_193c4bae9b8c4b288a64ffceaa1357da(budget, cc, placed):
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget /
lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)('Case #%d: %.10lf' % (cc + 1, ret))
return remaining_budget
def func_f2cea0e383194413943eeeb713f132a0(budget, cc, placed):
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget /
lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)('Case #%d: %.10lf' % (cc + 1, ret))
return larger
def func_79e6f9e552d54076b8120b78f1b74c16(budget, cc, placed):
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget /
lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)('Case #%d: %.10lf' % (cc + 1, ret))
return queue
def func_acb5e7a6c4d542c796f44042db9c89b8(budget, cc, placed):
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget /
lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
| |
16:46:00+00:00 56644.02 56663.43 56605.17 56605.18 27.573654
2021-05-02 16:47:00+00:00 56605.18 56657.55 56605.17 56625.76 14.615437
2021-05-02 16:48:00+00:00 56625.75 56643.60 56614.32 56623.01 5.895843
Close time Quote volume \\
Open time
2021-05-02 14:48:00+00:00 2021-05-02 14:48:59.999000+00:00 1.633534e+06
2021-05-02 14:49:00+00:00 2021-05-02 14:49:59.999000+00:00 1.122519e+06
2021-05-02 14:50:00+00:00 2021-05-02 14:50:59.999000+00:00 1.317969e+06
... ... ...
2021-05-02 16:46:00+00:00 2021-05-02 16:46:59.999000+00:00 1.561548e+06
2021-05-02 16:47:00+00:00 2021-05-02 16:47:59.999000+00:00 8.276017e+05
2021-05-02 16:48:00+00:00 2021-05-02 16:48:59.999000+00:00 3.338702e+05
Number of trades Taker base volume \\
Open time
2021-05-02 14:48:00+00:00 991 13.771152
2021-05-02 14:49:00+00:00 816 5.981942
2021-05-02 14:50:00+00:00 1086 10.813757
... ... ...
2021-05-02 16:46:00+00:00 916 14.869411
2021-05-02 16:47:00+00:00 912 7.778489
2021-05-02 16:48:00+00:00 308 2.358130
Taker quote volume
Open time
2021-05-02 14:48:00+00:00 7.835391e+05
2021-05-02 14:49:00+00:00 3.402170e+05
2021-05-02 14:50:00+00:00 6.156418e+05
... ...
2021-05-02 16:46:00+00:00 8.421173e+05
2021-05-02 16:47:00+00:00 4.404362e+05
2021-05-02 16:48:00+00:00 1.335474e+05
[121 rows x 10 columns]
```
"""
@classmethod
def download(cls: tp.Type[BinanceDataT],
symbols: tp.Labels,
client: tp.Optional["ClientT"] = None,
**kwargs) -> BinanceDataT:
"""Override `vectorbt.data.base.Data.download` to instantiate a Binance client."""
from binance.client import Client
from vectorbt._settings import settings
binance_cfg = settings['data']['binance']
client_kwargs = dict()
for k in get_func_kwargs(Client):
if k in kwargs:
client_kwargs[k] = kwargs.pop(k)
client_kwargs = merge_dicts(binance_cfg, client_kwargs)
if client is None:
client = Client(**client_kwargs)
return super(BinanceData, cls).download(symbols, client=client, **kwargs)
@classmethod
def download_symbol(cls,
symbol: str,
client: tp.Optional["ClientT"] = None,
interval: str = '1d',
start: tp.DatetimeLike = 0,
end: tp.DatetimeLike = 'now UTC',
delay: tp.Optional[float] = 500,
limit: int = 500,
show_progress: bool = True,
tqdm_kwargs: tp.KwargsLike = None) -> tp.Frame:
"""Download the symbol.
Args:
symbol (str): Symbol.
client (binance.client.Client): Binance client of type `binance.client.Client`.
interval (str): Kline interval.
See `binance.enums`.
start (any): Start datetime.
See `vectorbt.utils.datetime_.to_tzaware_datetime`.
end (any): End datetime.
See `vectorbt.utils.datetime_.to_tzaware_datetime`.
delay (float): Time to sleep after each request (in milliseconds).
limit (int): The maximum number of returned items.
show_progress (bool): Whether to show the progress bar.
tqdm_kwargs (dict): Keyword arguments passed to `tqdm`.
For defaults, see `data.binance` in `vectorbt._settings.settings`.
"""
if client is None:
raise ValueError("client must be provided")
if tqdm_kwargs is None:
tqdm_kwargs = {}
# Establish the timestamps
start_ts = datetime_to_ms(to_tzaware_datetime(start, tz=get_utc_tz()))
try:
first_data = client.get_klines(
symbol=symbol,
interval=interval,
limit=1,
startTime=0,
endTime=None
)
first_valid_ts = first_data[0][0]
next_start_ts = start_ts = max(start_ts, first_valid_ts)
except:
next_start_ts = start_ts
end_ts = datetime_to_ms(to_tzaware_datetime(end, tz=get_utc_tz()))
def _ts_to_str(ts: tp.DatetimeLike) -> str:
return str(pd.Timestamp(to_tzaware_datetime(ts, tz=get_utc_tz())))
# Iteratively collect the data
data: tp.List[list] = []
with tqdm(disable=not show_progress, **tqdm_kwargs) as pbar:
pbar.set_description(_ts_to_str(start_ts))
while True:
# Fetch the klines for the next interval
next_data = client.get_klines(
symbol=symbol,
interval=interval,
limit=limit,
startTime=next_start_ts,
endTime=end_ts
)
if len(data) > 0:
next_data = list(filter(lambda d: next_start_ts < d[0] < end_ts, next_data))
else:
next_data = list(filter(lambda d: d[0] < end_ts, next_data))
# Update the timestamps and the progress bar
if not len(next_data):
break
data += next_data
pbar.set_description("{} - {}".format(
_ts_to_str(start_ts),
_ts_to_str(next_data[-1][0])
))
pbar.update(1)
next_start_ts = next_data[-1][0]
if delay is not None:
time.sleep(delay / 1000) # be kind to api
# Convert data to a DataFrame
df = pd.DataFrame(data, columns=[
'Open time',
'Open',
'High',
'Low',
'Close',
'Volume',
'Close time',
'Quote volume',
'Number of trades',
'Taker base volume',
'Taker quote volume',
'Ignore'
])
df.index = pd.to_datetime(df['Open time'], unit='ms', utc=True)
del df['Open time']
df['Open'] = df['Open'].astype(float)
df['High'] = df['High'].astype(float)
df['Low'] = df['Low'].astype(float)
df['Close'] = df['Close'].astype(float)
df['Volume'] = df['Volume'].astype(float)
df['Close time'] = pd.to_datetime(df['Close time'], unit='ms', utc=True)
df['Quote volume'] = df['Quote volume'].astype(float)
df['Number of trades'] = df['Number of trades'].astype(int)
df['Taker base volume'] = df['Taker base volume'].astype(float)
df['Taker quote volume'] = df['Taker quote volume'].astype(float)
del df['Ignore']
return df
def update_symbol(self, symbol: str, **kwargs) -> tp.Frame:
"""Update the symbol.
`**kwargs` will override keyword arguments passed to `BinanceData.download_symbol`."""
download_kwargs = self.select_symbol_kwargs(symbol, self.download_kwargs)
download_kwargs['start'] = self.data[symbol].index[-1]
download_kwargs['show_progress'] = False
kwargs = merge_dicts(download_kwargs, kwargs)
return self.download_symbol(symbol, **kwargs)
class CCXTData(Data):
"""`Data` for data coming from `ccxt`.
Usage:
* Fetch the 1-minute data of the last 2 hours, wait 1 minute, and update:
```pycon
>>> import vectorbt as vbt
>>> ccxt_data = vbt.CCXTData.download(
... "BTC/USDT",
... start='2 hours ago UTC',
... end='now UTC',
... timeframe='1m'
... )
>>> ccxt_data.get()
2021-05-02 14:50:26.305000+00:00 - 2021-05-02 16:50:00+00:00: : 1it [00:00, 1.96it/s]
Open High Low Close Volume
Open time
2021-05-02 14:51:00+00:00 56934.70 56964.59 56910.00 56948.99 22.158319
2021-05-02 14:52:00+00:00 56948.99 56999.00 56940.04 56977.62 46.958464
2021-05-02 14:53:00+00:00 56977.61 56987.09 56882.98 56885.42 27.752200
... ... ... ... ... ...
2021-05-02 16:48:00+00:00 56625.75 56643.60 56595.47 56596.01 15.452510
2021-05-02 16:49:00+00:00 56596.00 56664.14 56596.00 56640.35 12.777475
2021-05-02 16:50:00+00:00 56640.35 56675.82 56640.35 56670.65 6.882321
[120 rows x 5 columns]
>>> import time
>>> time.sleep(60)
>>> ccxt_data = ccxt_data.update()
>>> ccxt_data.get()
Open High Low Close Volume
Open time
2021-05-02 14:51:00+00:00 56934.70 56964.59 56910.00 56948.99 22.158319
2021-05-02 14:52:00+00:00 56948.99 56999.00 56940.04 56977.62 46.958464
2021-05-02 14:53:00+00:00 56977.61 56987.09 56882.98 56885.42 27.752200
... ... ... ... ... ...
2021-05-02 16:49:00+00:00 56596.00 56664.14 56596.00 56640.35 12.777475
2021-05-02 16:50:00+00:00 56640.35 56689.99 56640.35 56678.33 14.610231
2021-05-02 16:51:00+00:00 56678.33 56688.99 56636.89 56653.42 11.647158
[121 rows x 5 columns]
```
"""
@classmethod
def download_symbol(cls,
symbol: str,
exchange: tp.Union[str, "ExchangeT"] = 'binance',
config: tp.Optional[dict] = None,
timeframe: str = '1d',
start: tp.DatetimeLike = 0,
end: tp.DatetimeLike = 'now UTC',
delay: tp.Optional[float] = None,
limit: tp.Optional[int] = 500,
retries: int = 3,
show_progress: bool = True,
params: tp.Optional[dict] = None,
tqdm_kwargs: tp.KwargsLike = None) -> tp.Frame:
"""Download the symbol.
Args:
symbol (str): Symbol.
exchange (str or object): Exchange identifier or an exchange object of type
`ccxt.base.exchange.Exchange`.
config (dict): Config passed to the exchange upon instantiation.
Will raise an exception if exchange has been already instantiated.
timeframe (str): Timeframe supported by the exchange.
start (any): Start datetime.
See `vectorbt.utils.datetime_.to_tzaware_datetime`.
end (any): End datetime.
See `vectorbt.utils.datetime_.to_tzaware_datetime`.
delay (float): Time to sleep after each request (in milliseconds).
!!! note
Use only if `enableRateLimit` is not set.
limit (int): The maximum number of returned items.
retries (int): The number of retries on failure to fetch data.
show_progress (bool): Whether to show the progress bar.
tqdm_kwargs (dict): Keyword arguments passed to `tqdm`.
params (dict): Exchange-specific key-value parameters.
For defaults, see `data.ccxt` in `vectorbt._settings.settings`.
"""
import ccxt
from vectorbt._settings import settings
ccxt_cfg = settings['data']['ccxt']
if config is None:
config = {}
if tqdm_kwargs is None:
tqdm_kwargs = {}
if params is None:
params = {}
if isinstance(exchange, str):
if not hasattr(ccxt, exchange):
raise ValueError(f"Exchange {exchange} not found")
# Resolve config
default_config = {}
for k, v in ccxt_cfg.items():
# Get general (not per exchange) settings
if k in ccxt.exchanges:
continue
default_config[k] = v
if exchange in ccxt_cfg:
default_config = merge_dicts(default_config, ccxt_cfg[exchange])
config = merge_dicts(default_config, config)
exchange = getattr(ccxt, exchange)(config)
else:
if len(config) > 0:
raise ValueError("Cannot apply config after instantiation of the exchange")
if not exchange.has['fetchOHLCV']:
raise ValueError(f"Exchange {exchange} does not support OHLCV")
if timeframe not in exchange.timeframes:
raise ValueError(f"Exchange {exchange} does not support {timeframe} timeframe")
if exchange.has['fetchOHLCV'] == 'emulated':
warnings.warn("Using emulated OHLCV candles", stacklevel=2)
def _retry(method):
@wraps(method)
def retry_method(*args, **kwargs):
for i in range(retries):
try:
return method(*args, **kwargs)
except (ccxt.NetworkError, ccxt.ExchangeError) as e:
if i == retries - 1:
raise e
if delay is not None:
time.sleep(delay / 1000)
return retry_method
@_retry
def _fetch(_since, _limit):
return exchange.fetch_ohlcv(
symbol,
timeframe=timeframe,
since=_since,
limit=_limit,
params=params
)
# Establish the timestamps
start_ts = datetime_to_ms(to_tzaware_datetime(start, tz=get_utc_tz()))
try:
first_data = _fetch(0, 1)
first_valid_ts = first_data[0][0]
next_start_ts = start_ts = max(start_ts, first_valid_ts)
except:
next_start_ts = start_ts
end_ts = datetime_to_ms(to_tzaware_datetime(end, tz=get_utc_tz()))
def _ts_to_str(ts):
return str(pd.Timestamp(to_tzaware_datetime(ts, tz=get_utc_tz())))
# Iteratively collect the data
data: tp.List[list] = []
with tqdm(disable=not show_progress, **tqdm_kwargs) as pbar:
pbar.set_description(_ts_to_str(start_ts))
while True:
# Fetch the klines for the next interval
next_data = _fetch(next_start_ts, limit)
if len(data) > 0:
next_data = list(filter(lambda d: next_start_ts < d[0] < end_ts, next_data))
else:
next_data = list(filter(lambda d: d[0] < end_ts, next_data))
# Update the timestamps and the progress bar
if not len(next_data):
break
data += next_data
pbar.set_description("{} - {}".format(
_ts_to_str(start_ts),
_ts_to_str(next_data[-1][0])
))
pbar.update(1)
next_start_ts = next_data[-1][0]
if delay is not None:
time.sleep(delay / 1000) # be kind to api
# Convert data to a DataFrame
df = pd.DataFrame(data, columns=[
'Open time',
'Open',
'High',
'Low',
'Close',
'Volume'
])
df.index = pd.to_datetime(df['Open time'], unit='ms', utc=True)
| |
self.sabio_insensitive:
if 'bigg_name' in self.sabio_insensitive.get(met):
bigg_chemicals.append(coefficient + __reformat_met_name(self.sabio_insensitive.get(met)['bigg_name']))
else:
bigg_chemicals.append(coefficient + __reformat_met_name(met))
elif met in self.bigg_insensitive:
if 'bigg_name' in self.bigg_insensitive.get(met):
bigg_chemicals.append(coefficient + __reformat_met_name(self.bigg_insensitive.get(met)['bigg_name']))
else:
bigg_chemicals.append(coefficient + __reformat_met_name(met))
else:
print(f'-->ERROR: The {met} metabolite in is not recognized by BiGG.')
final_length = len(bigg_chemicals)
if original_length == final_length:
return bigg_chemicals, False
else:
return bigg_chemicals, True
def parsing_chemical_list(chemical_list, sabio):
bigg_chemicals = []
sabio_chemicals = []
for met in chemical_list:
if not re.search('[A-Za-z]', met):
continue
met, coefficient = __met_parsing(met)
met = __name_refinement(met)
# assign the proper chemical names
if not sabio:
sabio_chemicals.append(coefficient + __reformat_met_name(self.bigg_to_sabio_metabolites[met]['name'], True))
else:
sabio_chemicals.append(coefficient + __reformat_met_name(met, True))
match = __check_existence(met, coefficient, bigg_chemicals, True)
if not match:
if re.search('D-', met):
met = re.sub('D-', '', met)
bigg_chemicals, match = __check_existence(met, coefficient, bigg_chemicals, sabio)
elif not re.search('D-', met) and not match:
met = 'D-' + met
bigg_chemicals, match = __check_existence(met, coefficient, bigg_chemicals, sabio)
# elif not re.search('D-', met) and not match:
# met = 'D-' + met
return bigg_chemicals, sabio_chemicals
# parse the reactants and products for the specified reaction string
if not sabio:
reaction_split = reaction_string.split(' <-> ')
else:
reaction_split = reaction_string.split(' = ')
reactants_list = reaction_split[0].split(' + ')
products_list = reaction_split[1].split(' + ')
# parse the reactants and products
bigg_reactants, sabio_reactants = parsing_chemical_list(reactants_list, sabio)
bigg_products, sabio_products = parsing_chemical_list(products_list, sabio)
# assemble the chemicals list and reaction string
bigg_compounds = bigg_reactants + bigg_products
sabio_chemicals = sabio_reactants + sabio_products
reactant_string = ' + '.join(bigg_reactants)
product_string = ' + '.join(bigg_products)
reaction_string = ' <-> '.join([reactant_string, product_string])
# if sabio:
# reaction_string = ' = '.join([reactant_string, product_string])
return reaction_string, sabio_chemicals, bigg_compounds
def _refine_scraped_file(self, enzyme_name, ID):
# open the most recent file
xls_files = glob(os.path.join(self.paths['raw_data'], '*.xls'))
most_recent = max(xls_files, key = os.path.getctime)
with open(most_recent) as xls:
df = pandas.read_excel(xls.name)
# apply the enzyme name information with the BiGG name, and save as the
df['Enzymename'] = [enzyme_name for name in range(len(df['Enzymename']))]
sabio_ids = df["SabioReactionID"].unique().tolist()
# export the XLS with a unique name
count = -1
file_extension = ''
df_path = os.path.join(self.paths['raw_data'], enzyme_name+'.csv')
while os.path.exists(df_path):
count += 1
if re.search('(\.[a-zA-Z]+$)', df_path):
file_extension = re.search('(\.[a-zA-Z]+$)', df_path).group()
df_path = re.sub(file_extension, '', df_path)
if not re.search('(-[0-9]+$)', df_path):
df_path += f'-{count}'
else:
df_path = re.sub('([0-9]+)$', str(count), df_path)
df_path += file_extension
os.remove(most_recent)
dir = os.path.dirname(df_path)
if not os.path.exists(dir):
print(f'missing directory {dir} has been created.')
os.mkdir(dir)
df.to_csv(df_path)
# store the matched content for future access during parsing
self.id_bigg_matches[enzyme_name] = sabio_ids
self.id_bigg_matches[ID] = enzyme_name
def _glob_csv(self,):
# scraped_sans_parentheses_enzymes = glob('./{}/*.xls'.format(self.paths['raw_data']))
total_dataframes = []
original_csvs = glob(os.path.join(self.paths['raw_data'], '*.csv'))
for path in original_csvs:
size = os.path.getsize(path)
if size > 0:
with open(path, 'rb') as file:
encoding = detect(file.read())['encoding']
if encoding is None:
encoding = 'utf-8'
dfn = pandas.read_csv(path)
total_dataframes.append(dfn)
remaining_xls = glob(os.path.join(self.paths['raw_data'], '*.xls'))
for path in remaining_xls:
size = os.path.getsize(path)
if size > 0:
with open(path, 'rb') as file:
encoding = detect(file.read())['encoding']
if encoding is None:
encoding = 'utf-8'
dfn = pandas.read_excel(path)
total_dataframes.append(dfn)
# All scraped dataframes are combined and duplicate rows are removed
combined_df = pandas.DataFrame()
combined_df = pandas.concat(total_dataframes)
combined_df = combined_df.fillna('')
combined_df = combined_df.drop_duplicates()
# remove the individual dataframes
total_files = original_csvs+remaining_xls
for file in total_files:
os.remove(file)
# export the concatenated dataframe
combined_df.to_csv(self.paths['concatenated_data'])
print(f'SABIO data has been concatenated.')
def _scrape_entry_id(self,entry_id):
entry_id = str(entry_id)
self.driver.get("http://sabiork.h-its.org/newSearch/index")
time.sleep(self.parameters['general_delay'])
self._wait_for_id("resetbtn")
time.sleep(self.parameters['general_delay'])
self._click_element_id("resetbtn")
time.sleep(self.parameters['general_delay']*2)
self._click_element_id("option")
self._select_dropdown_id("searchterms", "EntryID")
text_area = self.driver.find_element_by_id("searchtermField")
time.sleep(self.parameters['general_delay'])
text_area.send_keys(entry_id)
time.sleep(self.parameters['general_delay'])
self._click_element_id("addsearch")
# wait for the information expansion to open
for delay in range(60):
try:
self._click_element_id(entry_id + "img")
break
except:
if delay == 59:
return {'content':None}
time.sleep(self.parameters['general_delay'])
time.sleep(self.parameters['general_delay'])
# wait for the table to load
self.driver.switch_to.frame(self.driver.find_element_by_xpath("//iframe[@name='iframe_" + entry_id + "']"))
for delay in range(60):
try:
element = self.driver.find_element_by_xpath("//table")
break
except:
if delay == 59:
return {'content':None}
time.sleep(self.parameters['general_delay'])
element = self.driver.find_element_by_xpath("//table")
html_source = element.get_attribute('innerHTML')
table_df = pandas.read_html(html_source)
reaction_parameters_df = pandas.DataFrame()
counter = 0
parameters_json = {}
for df in table_df:
try:
if df[0][0] == "Parameter":
reaction_parameters_df = table_df[counter]
except:
self.driver.get("http://sabiork.h-its.org/newSearch/index")
return parameters_json
counter += 1
for row in range(2, len(reaction_parameters_df[0])):
parameter_name = str(reaction_parameters_df[0][row])
inner_parameters_json = {}
for col in range(1, len(reaction_parameters_df.columns)):
parameter_type = str(reaction_parameters_df[col][1])
inner_parameters_json[parameter_type] = reaction_parameters_df[col][row]
parameters_json[parameter_name] = inner_parameters_json
return parameters_json
def _scrape_entryids(self,):
self._open_driver()
self.sabio_df = pandas.read_csv(self.paths['concatenated_data'])
self._previous_scrape()
entryids = self.sabio_df["EntryID"].unique().tolist()
remaining_entryids = set(entryids) - set(self.variables['is_scraped_entryids'])
# estimate the time to scrape the the entryids
seconds_per_enzyme = 1*minute
scraping_time = seconds_per_enzyme * len(remaining_entryids)
estimated_completion = datetime.datetime.now() + datetime.timedelta(seconds = scraping_time) # approximately 1 minute per enzyme for Step 1
print(f'Estimated completion of scraping the XLS data for {self.bigg_model_name}: {estimated_completion}, in {scraping_time/hour} hours')
for entryid in remaining_entryids:
entryid = str(entryid)
# only entries that possess calculable units will be processed and accepted
self.variables['is_scraped_entryids'][entryid] = "erroneous"
parameters = self._scrape_entry_id(entryid)
if parameters is not None:
for param in parameters:
if not 'unit' in parameters[param]:
self.variables['is_scraped_entryids'][entryid] = "missing_unit"
elif parameters[param]['unit'] == '-':
self.variables['is_scraped_entryids'][entryid] = "missing_unit"
elif parameters[param]['start val.'] == '-' and parameters[param]['end val.'] == '-':
self.variables['is_scraped_entryids'][entryid] = "missing_values"
else:
self.variables['is_scraped_entryids'][entryid] = "acceptable"
if self.variables['is_scraped_entryids'][entryid] == 'acceptable':
self.variables['entryids'][entryid] = parameters
with open(self.paths["is_scraped_entryids"], 'w') as outfile:
json.dump(self.variables['is_scraped_entryids'], outfile, indent = 4)
outfile.close()
with open(self.paths["entryids_path"], 'w') as f:
json.dump(self.variables['entryids'], f, indent = 4)
f.close()
else:
if self.verbose:
print(entryid, self.variables['is_scraped_entryids'][entryid])
pprint(parameters)
print(f'\rScraped entryID {entryids.index(int(entryid))}/{len(entryids)}\t{datetime.datetime.now()}', end='')
# update the step counter
print(f'The parameter specifications for each entryid have been scraped.')
def scrape_bigg_xls(self,):
self._open_driver()
# estimate the time to scrape the XLS files
minutes_per_enzyme = 0.016*minute
scraping_time = minutes_per_enzyme * len(self.model['reactions'])
estimated_completion = datetime.datetime.now() + datetime.timedelta(seconds = scraping_time)
print(f'Estimated completion of scraping the XLS data for {self.bigg_model_name}: {estimated_completion}, in {scraping_time/hour} hours')
# scrape SABIO data based upon various search parameters
self.count = len(self.variables["is_scraped"])
annotation_search_pairs = {
"sabiork":"SabioReactionID",
"metanetx.reaction":"MetaNetXReactionID",
"ec-code":"ECNumber",
"kegg.reaction":"KeggReactionID",
"rhea":"RheaReactionID"
}
self.bigg_sabio_enzymes = {}
self.id_bigg_matches = {}
for enzyme in self.model['reactions']:
# search SABIO for reaction kinetics
# enzyme_name = enzyme['name'].replace("\"", "")
enzyme_name = enzyme['name']
if not enzyme_name in self.variables['is_scraped']:
self.variables['is_scraped'][enzyme_name] = {}
annotation_search_pairs.update({
enzyme_name:"Enzymename"
})
for database in annotation_search_pairs:
if database in self.model_contents[enzyme_name]['annotations']:
for ID in self.model_contents[enzyme_name]['annotations'][database]:
self.variables['is_scraped'][enzyme_name][ID] = False
scraped = self._scrape_csv(ID, annotation_search_pairs[database])
if scraped:
self.variables['is_scraped'][enzyme_name][ID] = True
try:
self._refine_scraped_file(enzyme_name, ID)
except:
warnings.warn(f'The downloaded XLS file for {enzyme_name} and the {ID} ID could not be opened.')
# self._change_enzyme_name(enzyme_name)
self.count += 1
print(f"\rCompleted reaction: {self.count+1}/{len(self.model['reactions'])}\t{datetime.datetime.now()}", end='')
else:
print(f'< {enzyme_name} > was either already scraped, or is duplicated in the model.')
# tracks scraping progress
with open(self.paths['is_scraped'], 'w') as outfile:
json.dump(self.variables['is_scraped'], outfile, indent = 4)
outfile.close()
if self.export_model_content:
with open(self.paths['model_contents'], 'w') as out:
json.dump(self.model_contents, out, indent = 3)
# process the data
print(f'SABIO data has been downloaded.')
self._glob_csv()
self._scrape_entryids()
self.step_number = 2
self._progress_update(self.step_number)
"""
--------------------------------------------------------------------
STEP 2: COMBINE XLS AND ENTRYID DATA INTO A dFBA INPUT JSON FILE
--------------------------------------------------------------------
"""
def _determine_parameter_value(self, unit, original_value):
# parse the unit
numerator = ''
denominator = ''
term = ''
skips = 0
next_denominator = False
for index in range(len(unit)):
if skips > 0:
skips -= 1
continue
ch = unit[index]
term += ch
# parse the unit characters
if index == len(unit)-1:
if next_denominator:
denominator += term
else:
numerator += term
term = ''
elif index+1 == len(unit)-1:
if next_denominator:
denominator += term
else:
numerator += term
term = ''
elif unit[index+1] == '^':
if unit[index+2:index+6] == '(-1)':
denominator += term
skips = 5
term = ''
else:
print(unit, term)
elif unit[index+1] == '/':
numerator += term
term = ''
skips += 1
next_denominator = True
if term != '':
print(unit, term)
unit_dic = {
'numerator':numerator,
'denominator': denominator
}
# determine the mathematically equivalent value in base units
# print('original_value', original_value)
if original_value is None or original_value in ['None']:
return original_value, unit_dic
value = float(original_value)
for group in unit_dic:
term = unit_dic[group]
if | |
ConfigCACsrCa object.
:param str expiry: (optional) The expiration for the root CA certificate.
:param float pathlength: (optional) The pathlength field is used to limit
CA certificate hierarchy. 0 means that the CA cannot issue CA certs, only
entity certificates. 1 means that the CA can issue both.
"""
self.expiry = expiry
self.pathlength = pathlength
@classmethod
def from_dict(cls, _dict: Dict) -> 'ConfigCACsrCa':
"""Initialize a ConfigCACsrCa object from a json dictionary."""
args = {}
if 'expiry' in _dict:
args['expiry'] = _dict.get('expiry')
if 'pathlength' in _dict:
args['pathlength'] = _dict.get('pathlength')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ConfigCACsrCa object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'expiry') and self.expiry is not None:
_dict['expiry'] = self.expiry
if hasattr(self, 'pathlength') and self.pathlength is not None:
_dict['pathlength'] = self.pathlength
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ConfigCACsrCa object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ConfigCACsrCa') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ConfigCACsrCa') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConfigCACsrKeyrequest():
"""
ConfigCACsrKeyrequest.
:attr str algo: The algorithm to use for CSRs.
:attr float size: The size of the key for CSRs.
"""
def __init__(self,
algo: str,
size: float) -> None:
"""
Initialize a ConfigCACsrKeyrequest object.
:param str algo: The algorithm to use for CSRs.
:param float size: The size of the key for CSRs.
"""
self.algo = algo
self.size = size
@classmethod
def from_dict(cls, _dict: Dict) -> 'ConfigCACsrKeyrequest':
"""Initialize a ConfigCACsrKeyrequest object from a json dictionary."""
args = {}
if 'algo' in _dict:
args['algo'] = _dict.get('algo')
else:
raise ValueError('Required property \'algo\' not present in ConfigCACsrKeyrequest JSON')
if 'size' in _dict:
args['size'] = _dict.get('size')
else:
raise ValueError('Required property \'size\' not present in ConfigCACsrKeyrequest JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ConfigCACsrKeyrequest object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'algo') and self.algo is not None:
_dict['algo'] = self.algo
if hasattr(self, 'size') and self.size is not None:
_dict['size'] = self.size
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ConfigCACsrKeyrequest object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ConfigCACsrKeyrequest') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ConfigCACsrKeyrequest') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConfigCACsrNamesItem():
"""
ConfigCACsrNamesItem.
:attr str c:
:attr str st:
:attr str l: (optional)
:attr str o:
:attr str ou: (optional)
"""
def __init__(self,
c: str,
st: str,
o: str,
*,
l: str = None,
ou: str = None) -> None:
"""
Initialize a ConfigCACsrNamesItem object.
:param str c:
:param str st:
:param str o:
:param str l: (optional)
:param str ou: (optional)
"""
self.c = c
self.st = st
self.l = l
self.o = o
self.ou = ou
@classmethod
def from_dict(cls, _dict: Dict) -> 'ConfigCACsrNamesItem':
"""Initialize a ConfigCACsrNamesItem object from a json dictionary."""
args = {}
if 'C' in _dict:
args['c'] = _dict.get('C')
else:
raise ValueError('Required property \'C\' not present in ConfigCACsrNamesItem JSON')
if 'ST' in _dict:
args['st'] = _dict.get('ST')
else:
raise ValueError('Required property \'ST\' not present in ConfigCACsrNamesItem JSON')
if 'L' in _dict:
args['l'] = _dict.get('L')
if 'O' in _dict:
args['o'] = _dict.get('O')
else:
raise ValueError('Required property \'O\' not present in ConfigCACsrNamesItem JSON')
if 'OU' in _dict:
args['ou'] = _dict.get('OU')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ConfigCACsrNamesItem object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'c') and self.c is not None:
_dict['C'] = self.c
if hasattr(self, 'st') and self.st is not None:
_dict['ST'] = self.st
if hasattr(self, 'l') and self.l is not None:
_dict['L'] = self.l
if hasattr(self, 'o') and self.o is not None:
_dict['O'] = self.o
if hasattr(self, 'ou') and self.ou is not None:
_dict['OU'] = self.ou
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ConfigCACsrNamesItem object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ConfigCACsrNamesItem') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ConfigCACsrNamesItem') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConfigCADbTls():
"""
ConfigCADbTls.
:attr List[str] certfiles: (optional)
:attr ConfigCADbTlsClient client: (optional)
:attr bool enabled: (optional) Set to true if TLS is to be used between the CA
and its database, else false.
"""
def __init__(self,
*,
certfiles: List[str] = None,
client: 'ConfigCADbTlsClient' = None,
enabled: bool = None) -> None:
"""
Initialize a ConfigCADbTls object.
:param List[str] certfiles: (optional)
:param ConfigCADbTlsClient client: (optional)
:param bool enabled: (optional) Set to true if TLS is to be used between
the CA and its database, else false.
"""
self.certfiles = certfiles
self.client = client
self.enabled = enabled
@classmethod
def from_dict(cls, _dict: Dict) -> 'ConfigCADbTls':
"""Initialize a ConfigCADbTls object from a json dictionary."""
args = {}
if 'certfiles' in _dict:
args['certfiles'] = _dict.get('certfiles')
if 'client' in _dict:
args['client'] = ConfigCADbTlsClient.from_dict(_dict.get('client'))
if 'enabled' in _dict:
args['enabled'] = _dict.get('enabled')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ConfigCADbTls object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'certfiles') and self.certfiles is not None:
_dict['certfiles'] = self.certfiles
if hasattr(self, 'client') and self.client is not None:
_dict['client'] = self.client.to_dict()
if hasattr(self, 'enabled') and self.enabled is not None:
_dict['enabled'] = self.enabled
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ConfigCADbTls object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ConfigCADbTls') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ConfigCADbTls') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConfigCADbTlsClient():
"""
ConfigCADbTlsClient.
:attr str certfile: The TLS certificate for client TLS as base 64 encoded PEM.
:attr str keyfile: The TLS private key for client TLS as base 64 encoded PEM.
"""
def __init__(self,
certfile: str,
keyfile: str) -> None:
"""
Initialize a ConfigCADbTlsClient object.
:param str certfile: The TLS certificate for client TLS as base 64 encoded
PEM.
:param str keyfile: The TLS private key for client TLS as base 64 encoded
PEM.
"""
self.certfile = certfile
self.keyfile = keyfile
@classmethod
def from_dict(cls, _dict: Dict) -> 'ConfigCADbTlsClient':
"""Initialize a ConfigCADbTlsClient object from a json dictionary."""
args = {}
if 'certfile' in _dict:
args['certfile'] = _dict.get('certfile')
else:
raise ValueError('Required property \'certfile\' not present in ConfigCADbTlsClient JSON')
if 'keyfile' in _dict:
args['keyfile'] = _dict.get('keyfile')
else:
raise ValueError('Required property \'keyfile\' not present in ConfigCADbTlsClient JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ConfigCADbTlsClient object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'certfile') and self.certfile is not None:
_dict['certfile'] = self.certfile
if hasattr(self, 'keyfile') and self.keyfile is not None:
_dict['keyfile'] = self.keyfile
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ConfigCADbTlsClient object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ConfigCADbTlsClient') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
| |
"""Core functionality for foamPy."""
from __future__ import division, print_function
import numpy as np
import os
import re
import datetime
import sys
import time
import subprocess
import pandas
import glob
from .dictionaries import *
from .templates import *
def gen_stripped_lines(fpath):
with open(fpath) as f:
for line in f.readlines():
yield line.replace("(", " ").replace(")", " ")
def load_forces(casedir="./", object_name="forces", start_time=0):
"""Load forces and moments as a pandas DataFrame."""
glob_string = os.path.join(
casedir,
"postProcessing/{}/{}/forces*.dat".format(object_name, start_time)
)
fpath = sorted(glob.glob(glob_string))[-1]
data = np.loadtxt(gen_stripped_lines(fpath))
df = pandas.DataFrame()
df["time"] = data[:, 0]
df["fx_pressure"] = data[:, 1]
df["fx_viscous"] = data[:, 4]
df["fx_porous"] = data[:, 7]
df["fy_pressure"] = data[:, 2]
df["fy_viscous"] = data[:, 5]
df["fy_porous"] = data[:, 8]
df["fz_pressure"] = data[:, 3]
df["fz_viscous"] = data[:, 6]
df["fz_porous"] = data[:, 9]
df["mx_pressure"] = data[:, 10]
df["mx_viscous"] = data[:, 13]
df["mx_porous"] = data[:, 16]
df["my_pressure"] = data[:, 11]
df["my_viscous"] = data[:, 14]
df["my_porous"] = data[:, 17]
df["mz_pressure"] = data[:, 12]
df["mz_viscous"] = data[:, 15]
df["mz_porous"] = data[:, 18]
for fm in ["f", "m"]:
for component in ["x", "y", "z"]:
df[fm + component] = df[fm + component + "_pressure"] \
+ df[fm + component + "_viscous"] \
+ df[fm + component + "_porous"]
return df
def load_probes_data(casedir="./", object_name="probes", start_time=0,
field_name="U"):
"""Load probes data as pandas ``DataFrame``."""
fpath = os.path.join(casedir, "postProcessing", object_name,
str(start_time), field_name)
# First get probe locations to use as column names
with open(fpath) as f:
txt = f.read()
probe_lines = re.findall(r"# Probe \d.*\n", txt)
probe_locs = []
for line in probe_lines:
probe_locs.append(line.split("(")[-1].split(")")[0].split())
data = np.loadtxt(gen_stripped_lines(fpath))
df = pandas.DataFrame()
df["time"] = data[:, 0]
# Determine the rank of the data
nprobes = len(probe_locs)
nsamps = data.shape[0]
dims = (data.shape[1] - 1) // nprobes
for n, probe_loc in enumerate(probe_locs):
probe_loc = [float(pl) for pl in probe_loc]
d = data[:, n + 1:n + dims + 1]
if dims > 1:
d = [tuple(p) for p in d]
df[tuple(probe_loc)] = d
return df
def load_torque_drag(casedir="", folder="0", filename=None,
torque_axis="z", drag_axis="x"):
"""Loads time, z-axis torque, and streamwise force from specified forces
folder. Case name can be left empty if running within a case folder."""
# Create empty lists
t = []
fpx = []; fpy = []; fpz = []
fpox = []; fpoy = []; fpoz = []
fvx = []; fvy = []; fvz = []
mpx = []; mpy = []; mpz = []
mpox = []; mpoy = []; mpoz = []
mvx = []; mvy = []; mvz = []
# Cycle through file
if casedir: casedir += "/"
if not filename: filename = "forces.dat"
with open(casedir+"postProcessing/forces/"+str(folder)+"/"+filename, "r") as f:
for line in f.readlines():
line = line.replace("(", "")
line = line.replace(")", "")
line = line.replace(",", " ")
line = line.split()
if line[0] != "#":
t.append(float(line[0]))
fpx.append(float(line[1]))
fpy.append(float(line[2]))
fpz.append(float(line[3]))
fvx.append(float(line[4]))
fvy.append(float(line[5]))
fvz.append(float(line[6]))
fpox.append(float(line[7]))
fpoy.append(float(line[8]))
fpoz.append(float(line[9]))
mpx.append(float(line[10]))
mpy.append(float(line[11]))
mpz.append(float(line[12]))
mvx.append(float(line[13]))
mvy.append(float(line[14]))
mvz.append(float(line[15]))
mpox.append(float(line[16]))
mpoy.append(float(line[17]))
mpoz.append(float(line[18]))
#Convert to numpy arrays
t = np.asarray(t)
if torque_axis == "z":
torque = np.asarray(np.asarray(mpz) + np.asarray(mvz))
elif torque_axis == "x":
torque = np.asarray(np.asarray(mpx) + np.asarray(mvx))
if drag_axis == "x":
drag = np.asarray(np.asarray(fpx) + np.asarray(fvx))
return t, torque, drag
def load_all_torque_drag(casedir="", torque_axis="z", drag_axis="x"):
t, torque, drag = np.array([]), np.array([]), np.array([])
if casedir: casedir += "/"
folders = sorted(os.listdir(casedir+"postProcessing/forces"))
for folder in folders:
files = sorted(os.listdir(casedir+"postProcessing/forces/"+folder))
for f in files:
t1, torque1, drag1 = load_torque_drag(casedir=casedir,
folder=folder,
filename=f,
torque_axis=torque_axis,
drag_axis=drag_axis)
t = np.append(t, t1)
torque = np.append(torque, torque1)
drag = np.append(drag, drag1)
return t, torque, drag
def load_theta_omega(casedir="", t_interp=[], theta_units="degrees"):
"""Import omega from ``dynamicMeshDict`` table. Returns t, theta,
omega (rad/s) where theta is calculated using the trapezoidal rule.
`t_interp` is a keyword argument for an array over which omega and theta
will be interpolated.
"""
t = []
omega = []
if casedir != "":
casedir += "/"
with open(casedir+"constant/dynamicMeshDict", "r") as f:
regex = r"\d+.\d+"
for line in f.readlines():
match = re.findall(regex, line)
if len(match)==2:
t.append(float(match[0]))
omega.append(float(match[1]))
omega = np.asarray(omega)
t = np.asarray(t)
# Integrate omega to obtain theta
theta = np.zeros(len(t))
for n in range(len(t)):
theta[n] = np.trapz(omega[:n], t[:n])
# If provided, interpolate omega to match t vector
if len(t_interp) > 0:
omega = np.interp(t_interp, t, omega)
theta = np.interp(t_interp, t, theta)
if theta_units == "degrees":
theta = theta/np.pi*180
return t, theta, omega
def load_set(casedir="./", name="profile", quantity="U", fmt="xy", axis="xyz"):
"""Import text data created with the OpenFOAM sample utility."""
folder = os.path.join(casedir, "postProcessing", "sets")
t = []
times = os.listdir(folder)
for time1 in times:
try:
float(time1)
except ValueError:
times.remove(time1)
try:
t.append(int(time1))
except ValueError:
t.append(float(time1))
t.sort()
data = {"time" : t}
for ts in t:
filename = "{folder}/{time}/{name}_{q}.{fmt}".format(folder=folder,
time=ts, name=name, q=quantity, fmt=fmt)
with open(filename) as f:
d = np.loadtxt(f)
if quantity == "U":
data[ts] = {"u" : d[:, len(axis)],
"v" : d[:, len(axis)+1],
"w" : d[:, len(axis)+2]}
if len(axis) == 1:
data[ts][axis] = d[:,0]
else:
data[ts]["x"] = d[:,0]
data[ts]["y"] = d[:,1]
data[ts]["z"] = d[:,2]
return data
def load_sample_xy(casedir="./", profile="U"):
"""Import text data created with the OpenFOAM sample utility."""
folder = os.path.join(casedir, "postProcessing", "sets")
t = []
times = os.listdir(folder)
for time1 in times:
try:
float(time1)
except ValueError:
times.remove(time1)
try:
t.append(int(time1))
except ValueError:
t.append(float(time1))
t.sort()
# Load a y vector from a single file since they are identical
with open(folder+"/0/profile_"+profile+".xy") as f:
y = np.loadtxt(f)[:,0]
if profile == "U":
u = np.zeros((len(y), len(times)))
v = np.zeros((len(y), len(times)))
elif profile == "R":
uu = np.zeros((len(y), len(times)))
uv = np.zeros((len(y), len(times)))
uw = np.zeros((len(y), len(times)))
vv = np.zeros((len(y), len(times)))
vw = np.zeros((len(y), len(times)))
ww = np.zeros((len(y), len(times)))
for n in range(len(times)):
with open(folder+"/"+str(t[n])+"/profile_"+profile+".xy") as f:
data = np.loadtxt(f)
if profile == "U":
u[:,n] = data[:,1]
v[:,n] = data[:,2]
elif profile == "R":
uu[:,n] = data[:,1]
uv[:,n] = data[:,2]
uw[:,n] = data[:,3]
vv[:,n] = data[:,4]
vw[:,n] = data[:,5]
ww[:,n] = data[:,6]
t = np.asarray(t, dtype=float)
if profile == "U":
data = {"t" : t, "u" : u, "v": v, "y" : y}
elif profile == "R":
data = {"t" : t, "uu" : uu, "vv": vv, "ww" : ww,
"uv" : uv, "y" : y}
return data
def get_endtime():
"""Get run ``endTime``."""
with open("system/controlDict", "r") as f:
for line in f.readlines():
line = line.replace(";", "").split()
if "endTime" in line and line[0] == "endTime":
endtime = float(line[1])
return endtime
def get_deltat(casedir="./"):
"""Get run ``deltaT``."""
fpath = os.path.join(casedir, "system", "controlDict")
with open(fpath) as f:
for line in f.readlines():
line = line.replace(";", "").split()
if "deltaT" in line and line[0] == "deltaT":
deltat = float(line[1])
return deltat
def get_ncells(casedir="./", logname="log.checkMesh", keyword="cells",
autogen=True):
fpath = os.path.join(casedir, logname)
if not os.path.isfile(fpath) and autogen:
start_dir = os.getcwd()
os.chdir(casedir)
run("checkMesh", args="-time 0")
os.chdir(start_dir)
if keyword == "cells":
keyword = "cells:"
with open(fpath) as f:
for line in f.readlines():
ls = line.split()
if ls and ls[0] == keyword:
value = ls[1]
return int(value)
def get_max_courant_no():
with open("system/controlDict") as f:
for line in f.readlines():
if ";" in line:
ls = line.replace(";", " ").split()
if ls[0] == "maxCo":
return float(ls[1])
def read_dict(dictname=None, dictpath=None, casedir="./"):
"""Read an OpenFOAM dict into a Python dict. Right now this is quite
crude, but gets the job done decently for 1 word parameters."""
foamdict = {}
if dictpath is None and dictname is not None:
if dictname in system_dicts:
p = "system/" + dictname
elif dictname in constant_dicts:
p = "constant/" + dictname
dictpath = os.path.join(casedir, p)
with open(dictpath) as f:
for line in f.readlines():
if ";" in line:
line = line.replace(";", "")
line = line.split()
if len(line) > 1:
foamdict[line[0]] = line[1]
return foamdict
def read_case():
"""Will eventually read all case dicts and put in a hierarchy of dicts."""
pass
def gen_dynmeshdict(U, R, meantsr, cellzone="AMIsurface", rpm_fluc=3.7,
npoints=400, axis="(0 0 1)", direction=1):
"""Generates a dynamicMeshDict for a given U, R, meantsr, and an optional
rpm fluctuation amplitude. Phase is fixed."""
meanomega = meantsr*U/R
if npoints > 0:
amp_omega = rpm_fluc*2*np.pi/60.0
endtime = get_endtime()
t = np.linspace(0, endtime, npoints)
omega = meanomega + amp_omega*np.sin(3*meanomega*t - np.pi/1.2)
# Write to file
top = \
r"""/*--------------------------------*- C++ -*----------------------------------*\
| ========= | |
| \\ / <NAME>eld | OpenFOAM: The Open Source CFD Toolbox |
| \\ / O peration | Version: 2.3.x |
| | |
# -*- coding: utf-8 -*-1
"""
2014, LAAS/CNRS
@author: <NAME>
"""
from __future__ import print_function
from dynamic_graph import plug
import numpy as np
from dynamic_graph.sot.core.latch import Latch
from dynamic_graph.sot.core.operator import Selec_of_vector, Mix_of_vector
from dynamic_graph.sot.torque_control.numerical_difference import NumericalDifference
from dynamic_graph.sot.torque_control.joint_torque_controller import JointTorqueController
from dynamic_graph.sot.torque_control.joint_trajectory_generator import JointTrajectoryGenerator
from sot_talos_balance.nd_trajectory_generator import NdTrajectoryGenerator
from dynamic_graph.sot.torque_control.se3_trajectory_generator import SE3TrajectoryGenerator
from dynamic_graph.sot.torque_control.control_manager import ControlManager
from dynamic_graph.sot.torque_control.current_controller import CurrentController
from sot_talos_balance.simple_admittance_controller import SimpleAdmittanceController as AdmittanceController
from dynamic_graph.sot.torque_control.position_controller import PositionController
from dynamic_graph.tracer_real_time import TracerRealTime
from dynamic_graph.sot.torque_control.talos.motors_parameters import NJ
from dynamic_graph.sot.torque_control.talos.motors_parameters import *
from dynamic_graph.sot.torque_control.talos.sot_utils_talos import Bunch
from dynamic_graph.sot.torque_control.utils.filter_utils import create_butter_lp_filter_Wn_05_N_3
#from dynamic_graph.sot.torque_control.talos.joint_pos_ctrl_gains import *
def get_default_conf():
import dynamic_graph.sot.torque_control.talos.balance_ctrl_conf as balance_ctrl_conf
import dynamic_graph.sot.torque_control.talos.base_estimator_conf as base_estimator_conf
import dynamic_graph.sot.torque_control.talos.control_manager_conf as control_manager_conf
import dynamic_graph.sot.torque_control.talos.current_controller_conf as current_controller_conf
import dynamic_graph.sot.torque_control.talos.force_torque_estimator_conf as force_torque_estimator_conf
import dynamic_graph.sot.torque_control.talos.joint_torque_controller_conf as joint_torque_controller_conf
import dynamic_graph.sot.torque_control.talos.joint_pos_ctrl_gains as pos_ctrl_gains
import dynamic_graph.sot.torque_control.talos.motors_parameters as motor_params
import dynamic_graph.sot.torque_control.talos.ddp_controller_conf as ddp_controller_conf
conf = Bunch()
conf.balance_ctrl = balance_ctrl_conf
conf.base_estimator = base_estimator_conf
conf.control_manager = control_manager_conf
conf.current_ctrl = current_controller_conf
conf.force_torque_estimator = force_torque_estimator_conf
conf.joint_torque_controller = joint_torque_controller_conf
conf.pos_ctrl_gains = pos_ctrl_gains
conf.motor_params = motor_params
conf.ddp_controller = ddp_controller_conf
return conf
def get_sim_conf():
import dynamic_graph.sot.torque_control.talos.balance_ctrl_sim_conf as balance_ctrl_conf
import dynamic_graph.sot.torque_control.talos.base_estimator_sim_conf as base_estimator_conf
import dynamic_graph.sot.torque_control.talos.control_manager_sim_conf as control_manager_conf
import dynamic_graph.sot.torque_control.talos.current_controller_sim_conf as current_controller_conf
import dynamic_graph.sot.torque_control.talos.force_torque_estimator_conf as force_torque_estimator_conf
import dynamic_graph.sot.torque_control.talos.joint_torque_controller_conf as joint_torque_controller_conf
import dynamic_graph.sot.torque_control.talos.joint_pos_ctrl_gains_sim as pos_ctrl_gains
import dynamic_graph.sot.torque_control.talos.motors_parameters as motor_params
import dynamic_graph.sot.torque_control.talos.ddp_controller_conf as ddp_controller_conf
conf = Bunch()
conf.balance_ctrl = balance_ctrl_conf
conf.base_estimator = base_estimator_conf
conf.control_manager = control_manager_conf
conf.current_ctrl = current_controller_conf
conf.force_torque_estimator = force_torque_estimator_conf
conf.joint_torque_controller = joint_torque_controller_conf
conf.pos_ctrl_gains = pos_ctrl_gains
conf.motor_params = motor_params
conf.ddp_controller = ddp_controller_conf
return conf
def create_encoders(robot):
encoders = Selec_of_vector('qn')
plug(robot.device.robotState, encoders.sin);
encoders.selec(6,NJ+6);
return encoders
def create_encoders_velocity(robot):
encoders = Selec_of_vector('dqn')
plug(robot.device.robotVelocity, encoders.sin);
encoders.selec(6,NJ+6);
return encoders
def create_joint_pos_selector(robot, conf):
encoders = Selec_of_vector('selecDdpJointPos')
plug(robot.device.robotState, encoders.sin);
encoders.selec(conf.controlled_joint+6, conf.controlled_joint+7);
return encoders
def create_joint_vel_selector(robot, conf):
encoders = Selec_of_vector('selecDdpJointVel')
plug(robot.device.robotVelocity, encoders.sin);
encoders.selec(conf.controlled_joint+6, conf.controlled_joint+7);
return encoders
def create_joint_torque_selector(robot, conf):
encoders = Selec_of_vector('selecDdpJointTorque')
plug(robot.device.ptorque, encoders.sin);
encoders.selec(conf.controlled_joint, conf.controlled_joint+1);
return encoders
def create_pos_des_selector(robot, conf):
encoders = Selec_of_vector('selecDdpJointPosDes')
plug(robot.traj_gen.q, encoders.sin);
encoders.selec(conf.controlled_joint, conf.controlled_joint+1);
return encoders
def create_motor_pos_selector(robot, conf):
encoders = Selec_of_vector('selecDdpMotorPos')
plug(robot.device.motor_angles, encoders.sin);
encoders.selec(conf.controlled_joint, conf.controlled_joint+1);
return encoders
def create_tau_des_selector(robot, conf):
encoders = Selec_of_vector('selecDdpTauDes')
plug(robot.inv_dyn.tau_des, encoders.sin);
encoders.selec(conf.controlled_joint, conf.controlled_joint+1);
return encoders
def create_torque_des_selector(robot, conf):
encoders = Selec_of_vector('selecDdpTorqueDes')
plug(robot.torque_ctrl.u, encoders.sin);
encoders.selec(0, 31);
return encoders
def create_torque_des_selector2(robot, conf):
encoders = Selec_of_vector('selecDdpTorqueDes2')
plug(robot.torque_ctrl.u, encoders.sin);
encoders.selec(31, 32);
return encoders
def create_signal_mixer(robot, conf):
signal_mixer = Mix_of_vector('mix');
signal_mixer.setSignalNumber(2);
plug(robot.torque_des_selec_ddp.sout, signal_mixer.default);
#plug(robot.inv_dyn.tau_des, signal_mixer.default);
plug(robot.ddp_ctrl.tau, signal_mixer.sin1);
#plug(robot.torque_des_selec_ddp2.sout, signal_mixer.sin1);
#plug(robot.inv_dyn.tau_des, signal_mixer.sin1);
#signal_mixer.addSelec(1, 1, 31);
signal_mixer.addSelec(1, 0, 1);
#signal_mixer.addSelec(1, conf.controlled_joint+1, conf.NJ-conf.controlled_joint);
#plug(signal_mixer.sout, robot.torque_ctrl.jointsTorquesDesired);
return signal_mixer
def create_base_estimator(robot, dt, conf, robot_name="robot"):
from dynamic_graph.sot.torque_control.base_estimator import BaseEstimator
base_estimator = BaseEstimator('base_estimator');
plug(robot.encoders.sout, base_estimator.joint_positions);
#plug(robot.device.forceRLEG, base_estimator.forceRLEG);
#plug(robot.device.forceLLEG, base_estimator.forceLLEG);
plug(robot.filters.ft_LF_filter.x_filtered, base_estimator.forceLLEG)
plug(robot.filters.ft_RF_filter.x_filtered, base_estimator.forceRLEG)
plug(robot.filters.ft_LF_filter.dx, base_estimator.dforceLLEG)
plug(robot.filters.ft_RF_filter.dx, base_estimator.dforceRLEG)
plug(robot.filters.estimator_kin.dx, base_estimator.joint_velocities);
plug(robot.imu_filter.imu_quat, base_estimator.imu_quaternion);
#plug(robot.imu_offset_compensation.accelerometer_out, base_estimator.accelerometer);
#plug(robot.imu_offset_compensation.gyrometer_out, base_estimator.gyroscope);
plug(robot.filters.gyro_filter.x_filtered, base_estimator.gyroscope);
plug(robot.filters.acc_filter.x_filtered, base_estimator.accelerometer);
base_estimator.K_fb_feet_poses.value = conf.K_fb_feet_poses;
try:
base_estimator.w_lf_in.value = conf.w_lf_in;
base_estimator.w_rf_in.value = conf.w_rf_in;
except:
pass;
base_estimator.set_imu_weight(conf.w_imu);
base_estimator.set_stiffness_right_foot(conf.K);
base_estimator.set_stiffness_left_foot(conf.K);
base_estimator.set_zmp_std_dev_right_foot(conf.std_dev_zmp)
base_estimator.set_zmp_std_dev_left_foot(conf.std_dev_zmp)
base_estimator.set_normal_force_std_dev_right_foot(conf.std_dev_fz)
base_estimator.set_normal_force_std_dev_left_foot(conf.std_dev_fz)
base_estimator.set_zmp_margin_right_foot(conf.zmp_margin)
base_estimator.set_zmp_margin_left_foot(conf.zmp_margin)
base_estimator.set_normal_force_margin_right_foot(conf.normal_force_margin)
base_estimator.set_normal_force_margin_left_foot(conf.normal_force_margin)
base_estimator.set_right_foot_sizes(conf.RIGHT_FOOT_SIZES)
base_estimator.set_left_foot_sizes(conf.LEFT_FOOT_SIZES)
base_estimator.init(dt, robot_name);
return base_estimator;
def create_imu_offset_compensation(robot, dt):
from dynamic_graph.sot.torque_control.imu_offset_compensation import ImuOffsetCompensation
imu_offset_compensation = ImuOffsetCompensation('imu_offset_comp');
plug(robot.device.accelerometer, imu_offset_compensation.accelerometer_in);
plug(robot.device.gyrometer, imu_offset_compensation.gyrometer_in);
imu_offset_compensation.init(dt);
return imu_offset_compensation;
def create_imu_filter(robot, dt):
from dynamic_graph.sot.core.madgwickahrs import MadgwickAHRS
imu_filter = MadgwickAHRS('imu_filter');
imu_filter.init(dt);
plug(robot.imu_offset_compensation.accelerometer_out, imu_filter.accelerometer);
plug(robot.imu_offset_compensation.gyrometer_out, imu_filter.gyroscope);
return imu_filter;
def create_com_traj_gen(robot, dt):
com_traj_gen = NdTrajectoryGenerator("com_traj_gen");
com_traj_gen.initial_value.value = robot.dynamic.com.value
com_traj_gen.trigger.value = 1.0
com_traj_gen.init(dt,3)
return com_traj_gen
def create_force_traj_gen(name, initial_value, dt):
force_traj_gen = NdTrajectoryGenerator(name);
force_traj_gen.initial_value.value = initial_value;
force_traj_gen.init(dt,6);
return force_traj_gen ;
def create_waist_traj_gen(name, robot, dt):
waist_traj_gen = SE3TrajectoryGenerator(name)
ref_waist = robot.dynamic.data.oMi[robot.dynamic.model.getJointId('root_joint')]
trans = ref_waist.translation
rot = ref_waist.rotation
rot = rot.reshape(9)
initial_value = np.concatenate((trans,rot))
waist_traj_gen.initial_value.value = tuple(initial_value)
waist_traj_gen.trigger.value = 1.0
waist_traj_gen.init(dt);
return waist_traj_gen;
def create_trajectory_switch():
traj_sync = Latch("traj_sync");
return traj_sync ;
def connect_synchronous_trajectories(switch, list_of_traj_gens):
for traj_gen in list_of_traj_gens:
plug(switch.out, traj_gen.trigger);
def create_free_flyer_locator(ent, robot_name="robot"):
from dynamic_graph.sot.torque_control.free_flyer_locator import FreeFlyerLocator
ff_locator = FreeFlyerLocator("ffLocator");
plug(ent.device.robotState, ff_locator.base6d_encoders);
plug(ent.filters.estimator_kin.dx, ff_locator.joint_velocities);
try:
plug(ff_locator.base6dFromFoot_encoders, ent.dynamic.position);
except:
print("[WARNING] Could not connect to dynamic entity, probably because you are in simulation")
ff_locator.init(robot_name)
return ff_locator
def create_flex_estimator(robot, dt=0.001):
from dynamic_graph.sot.application.state_observation.initializations.hrp2_model_base_flex_estimator_imu_force import HRP2ModelBaseFlexEstimatorIMUForce
flex_est = HRP2ModelBaseFlexEstimatorIMUForce(robot, useMocap=False, dt=dt);
flex_est.setOn(False);
flex_est.interface.setExternalContactPresence(False);
flex_est.interface.enabledContacts_lf_rf_lh_rh.value=(1,1,0,0);
plug(robot.ff_locator.v, flex_est.leftFootVelocity.sin2);
plug(robot.ff_locator.v, flex_est.rightFootVelocity.sin2);
plug(robot.ff_locator.v, flex_est.inputVel.sin2);
plug(robot.ff_locator.v, flex_est.DCom.sin2);
return flex_est;
def create_floatingBase(robot):
from dynamic_graph.sot.application.state_observation.initializations.hrp2_model_base_flex_estimator_imu_force import FromLocalToGLobalFrame
floatingBase = FromLocalToGLobalFrame(robot.flex_est, "FloatingBase")
plug(robot.ff_locator.freeflyer_aa, floatingBase.sinPos);
from dynamic_graph.sot.core import Selec_of_vector
base_vel_no_flex = Selec_of_vector('base_vel_no_flex');
plug(robot.ff_locator.v, base_vel_no_flex.sin);
base_vel_no_flex.selec(0, 6);
plug(base_vel_no_flex.sout, floatingBase.sinVel);
return floatingBase
def create_position_controller(robot, gains, dt=0.001, robot_name="robot"):
posCtrl = PositionController('pos_ctrl')
posCtrl.Kp.value = tuple(gains.kp_pos[round(dt,3)]);
posCtrl.Kd.value = tuple(gains.kd_pos[round(dt,3)]);
posCtrl.Ki.value = tuple(gains.ki_pos[round(dt,3)]);
posCtrl.dqRef.value = NJ*(0.0,);
plug(robot.device.robotState, posCtrl.base6d_encoders);
try: # this works only in simulation
#plug(robot.device.jointsVelocities, posCtrl.jointsVelocities);
plug(robot.encoders_velocity.sout, posCtrl.jointsVelocities);
except:
plug(robot.filters.estimator_kin.dx, posCtrl.jointsVelocities);
pass;
plug(posCtrl.pwmDes, robot.device.control);
try:
plug(robot.traj_gen.q, posCtrl.qRef);
except:
pass;
posCtrl.init(dt, robot_name);
return posCtrl;
def create_trajectory_generator(robot, dt=0.001, robot_name="robot"):
jtg = JointTrajectoryGenerator("jtg");
plug(robot.device.robotState, jtg.base6d_encoders);
jtg.init(dt, robot_name);
return jtg;
def create_filters(robot, conf, motor_params, dt):
filters = Bunch()
# create low-pass filter for motor currents
filters.current_filter = create_butter_lp_filter_Wn_05_N_3('current_filter', dt, NJ)
#filters.current_filter = NumericalDifference("current_filter");
filters.ft_RF_filter = NumericalDifference("ft_RF_filter");
filters.ft_LF_filter = NumericalDifference("ft_LF_filter");
filters.ft_RH_filter = NumericalDifference("ft_RH_filter");
filters.ft_LH_filter = NumericalDifference("ft_LH_filter");
filters.acc_filter = NumericalDifference("dv_filter");
filters.gyro_filter = NumericalDifference("w_filter");
filters.estimator_kin = NumericalDifference("estimator_kin");
plug(robot.encoders.sout, filters.estimator_kin.x);
plug(robot.imu_offset_compensation.accelerometer_out, filters.acc_filter.x);
plug(robot.imu_offset_compensation.gyrometer_out, filters.gyro_filter.x);
plug(robot.device.forceRLEG, filters.ft_RF_filter.x);
plug(robot.device.forceLLEG, filters.ft_LF_filter.x);
plug(robot.device.forceRARM, filters.ft_RH_filter.x);
plug(robot.device.forceLARM, filters.ft_LH_filter.x);
plug(robot.device.currents, filters.current_filter.x);
'''plug(filters.acc_filter.x_filtered, estimator_ft.accelerometer);
plug(filters.gyro_filter.x_filtered, estimator_ft.gyro);
plug(filters.gyro_filter.dx, estimator_ft.dgyro);
plug(filters.ft_RF_filter.x_filtered, estimator_ft.ftSensRightFoot);
plug(filters.ft_LF_filter.x_filtered, estimator_ft.ftSensLeftFoot);
plug(filters.ft_RH_filter.x_filtered, estimator_ft.ftSensRightHand);
plug(filters.ft_LH_filter.x_filtered, estimator_ft.ftSensLeftHand);
plug(filters.current_filter.x_filtered, estimator_ft.current);'''
'''plug(filters.estimator_kin.x_filtered, estimator_ft.q_filtered);
plug(filters.estimator_kin.dx, estimator_ft.dq_filtered);
plug(filters.estimator_kin.ddx, estimator_ft.ddq_filtered);
try:
plug(robot.traj_gen.dq, estimator_ft.dqRef);
plug(robot.traj_gen.ddq, estimator_ft.ddqRef);
except:
pass;
estimator_ft.wCurrentTrust.value = tuple(NJ*[conf.CURRENT_TORQUE_ESTIMATION_TRUST,])
estimator_ft.saturationCurrent.value = tuple(NJ*[conf.SATURATION_CURRENT,])
estimator_ft.motorParameterKt_p.value = tuple(motor_params.Kt_p)
estimator_ft.motorParameterKt_n.value = tuple(motor_params.Kt_n)
estimator_ft.motorParameterKf_p.value = tuple(motor_params.Kf_p)
estimator_ft.motorParameterKf_n.value = tuple(motor_params.Kf_n)
estimator_ft.motorParameterKv_p.value = tuple(motor_params.Kv_p)
estimator_ft.motorParameterKv_n.value = tuple(motor_params.Kv_n)
estimator_ft.motorParameterKa_p.value = tuple(motor_params.Ka_p)
estimator_ft.motorParameterKa_n.value = tuple(motor_params.Ka_n)
estimator_ft.rotor_inertias.value = motor_params.ROTOR_INERTIAS;
estimator_ft.gear_ratios.value = motor_params.GEAR_RATIOS;
estimator_ft.init(True);'''
#filters.current_filter.init(dt,NJ, conf.DELAY_CURRENT*dt,1)
filters.ft_RF_filter.init(dt, 6, conf.DELAY_FORCE*dt,1)
filters.ft_LF_filter.init(dt, 6, conf.DELAY_FORCE*dt,1)
filters.ft_RH_filter.init(dt, 6, conf.DELAY_FORCE*dt,1)
filters.ft_LH_filter.init(dt, 6, conf.DELAY_FORCE*dt,1)
filters.gyro_filter.init(dt, 3, conf.DELAY_GYRO*dt,1)
filters.acc_filter.init(dt, 3, conf.DELAY_ACC*dt,1)
filters.estimator_kin.init(dt,NJ, conf.DELAY_ENC*dt,2);
return filters;
def create_torque_controller(robot, conf, motor_params, dt=0.001, robot_name="robot"):
torque_ctrl = JointTorqueController("jtc");
plug(robot.encoders.sout, torque_ctrl.jointsPositions);
plug(robot.filters.estimator_kin.dx, torque_ctrl.jointsVelocities);
plug(robot.filters.estimator_kin.ddx, torque_ctrl.jointsAccelerations);
#plug(robot.estimator_ft.jointsTorques, torque_ctrl.jointsTorques);
plug(robot.device.ptorque, torque_ctrl.jointsTorques); #New
torque_ctrl.jointsTorquesDesired.value = NJ*(0.0,);
torque_ctrl.jointsTorquesDerivative.value = NJ*(0.0,);
torque_ctrl.dq_des.value = NJ*(0.0,);
torque_ctrl.KpTorque.value = tuple(conf.k_p_torque);
torque_ctrl.KdTorque.value = tuple(conf.k_d_torque);
torque_ctrl.KiTorque.value = tuple(conf.k_i_torque);
torque_ctrl.KdVel.value = tuple(conf.k_d_vel);
torque_ctrl.KiVel.value = tuple(conf.k_i_vel);
torque_ctrl.torque_integral_saturation.value = tuple(conf.torque_integral_saturation);
torque_ctrl.coulomb_friction_compensation_percentage.value = NJ*(conf.COULOMB_FRICTION_COMPENSATION_PERCENTAGE,);
torque_ctrl.motorParameterKt_p.value = tuple(motor_params.Kt_p)
torque_ctrl.motorParameterKt_n.value = tuple(motor_params.Kt_n)
torque_ctrl.motorParameterKf_p.value = tuple(motor_params.Kf_p)
torque_ctrl.motorParameterKf_n.value = tuple(motor_params.Kf_n)
torque_ctrl.motorParameterKv_p.value = tuple(motor_params.Kv_p)
torque_ctrl.motorParameterKv_n.value = tuple(motor_params.Kv_n)
torque_ctrl.motorParameterKa_p.value = tuple(motor_params.Ka_p)
torque_ctrl.motorParameterKa_n.value = tuple(motor_params.Ka_n)
torque_ctrl.polySignDq.value = NJ*(conf.poly_sign_dq,);
torque_ctrl.init(dt, robot_name);
return torque_ctrl;
def create_balance_controller(robot, conf, motor_params, dt, robot_name='robot'):
from dynamic_graph.sot.torque_control.inverse_dynamics_balance_controller import InverseDynamicsBalanceController
ctrl = InverseDynamicsBalanceController("invDynBalCtrl");
try:
plug(robot.base_estimator.q, ctrl.q);
plug(robot.base_estimator.v, ctrl.v);
except:
plug(robot.ff_locator.base6dFromFoot_encoders, ctrl.q);
plug(robot.ff_locator.v, ctrl.v);
try:
from dynamic_graph.sot.core import Selec_of_vector
robot.ddq_des = Selec_of_vector('ddq_des')
plug(ctrl.dv_des, robot.ddq_des.sin);
robot.ddq_des.selec(6,NJ+6);
#plug(robot.ddq_des.sout, robot.estimator_ft.ddqRef);
except:
print("WARNING: Could not connect dv_des from BalanceController to ForceTorqueEstimator")
#plug(robot.estimator_ft.contactWrenchRightSole, ctrl.wrench_right_foot);
#plug(robot.estimator_ft.contactWrenchLeftSole, ctrl.wrench_left_foot);
plug(robot.device.forceRLEG, ctrl.wrench_right_foot); # New
plug(robot.device.forceLLEG, ctrl.wrench_left_foot); # New
plug(ctrl.tau_des, robot.torque_ctrl.jointsTorquesDesired);
#plug(ctrl.dq_admittance, robot.torque_ctrl.dq_des);
# robot.torque_ctrl.dq_des.value = NJ*(0.0,);
#plug(ctrl.tau_des, robot.estimator_ft.tauDes);
plug(ctrl.right_foot_pos, robot.rf_traj_gen.initial_value);
ctrl.rf_ref_pos.value = robot.rf_traj_gen.initial_value.value
ctrl.rf_ref_vel.value = 12*(0.0,)
ctrl.rf_ref_acc.value = 12*(0.0,)
# plug(robot.rf_traj_gen.x, ctrl.rf_ref_pos);
# plug(robot.rf_traj_gen.dx, ctrl.rf_ref_vel);
# plug(robot.rf_traj_gen.ddx, ctrl.rf_ref_acc);
plug(ctrl.left_foot_pos, robot.lf_traj_gen.initial_value);
ctrl.lf_ref_pos.value = robot.lf_traj_gen.initial_value.value
ctrl.lf_ref_vel.value = 12*(0.0,)
ctrl.lf_ref_acc.value = 12*(0.0,)
# plug(robot.lf_traj_gen.x, ctrl.lf_ref_pos);
# plug(robot.lf_traj_gen.dx, ctrl.lf_ref_vel);
# plug(robot.lf_traj_gen.ddx, ctrl.lf_ref_acc);
plug(ctrl.right_hand_pos, robot.rh_traj_gen.initial_value);
ctrl.rh_ref_pos.value = robot.rh_traj_gen.initial_value.value
ctrl.rh_ref_vel.value = 12*(0.0,)
ctrl.rh_ref_acc.value = 12*(0.0,)
# plug(robot.rh_traj_gen.x, ctrl.rh_ref_pos);
# plug(robot.rh_traj_gen.dx, ctrl.rh_ref_vel);
# plug(robot.rh_traj_gen.ddx, ctrl.rh_ref_acc);
plug(ctrl.left_hand_pos, robot.lh_traj_gen.initial_value);
ctrl.lh_ref_pos.value = robot.lh_traj_gen.initial_value.value
ctrl.lh_ref_vel.value = 12*(0.0,)
ctrl.lh_ref_acc.value = 12*(0.0,)
# plug(robot.lh_traj_gen.x, ctrl.lh_ref_pos);
# plug(robot.lh_traj_gen.dx, ctrl.lh_ref_vel);
# plug(robot.lh_traj_gen.ddx, ctrl.lh_ref_acc);
ctrl.posture_ref_pos.value = robot.halfSitting[7:]
ctrl.posture_ref_vel.value = 32*(0.0,)
ctrl.posture_ref_acc.value = 32*(0.0,)
ctrl.com_ref_pos.value = robot.dynamic.com.value
ctrl.com_ref_vel.value = 3*(0.0,)
ctrl.com_ref_acc.value = 3*(0.0,)
ctrl.waist_ref_pos.value = robot.waist_traj_gen.initial_value.value
ctrl.waist_ref_vel.value = 12*(0.0,)
ctrl.waist_ref_acc.value = 12*(0.0,)
# plug(robot.traj_gen.q, ctrl.posture_ref_pos);
# plug(robot.traj_gen.dq, ctrl.posture_ref_vel);
# plug(robot.traj_gen.ddq, ctrl.posture_ref_acc);
# plug(robot.com_traj_gen.x, ctrl.com_ref_pos);
# plug(robot.com_traj_gen.dx, ctrl.com_ref_vel);
# plug(robot.com_traj_gen.ddx, ctrl.com_ref_acc);
# plug(robot.waist_traj_gen.x, ctrl.waist_ref_pos);
# plug(robot.waist_traj_gen.dx, ctrl.waist_ref_vel);
# plug(robot.waist_traj_gen.ddx, ctrl.waist_ref_acc);
# plug(robot.rf_force_traj_gen.x, ctrl.f_ref_right_foot);
# plug(robot.lf_force_traj_gen.x, ctrl.f_ref_left_foot);
# rather than giving to the controller the values of gear ratios and rotor inertias
# it is better to compute directly their product in python and pass the result
# to the C++ entity, because otherwise we get a loss of precision
# ctrl.rotor_inertias.value = conf.ROTOR_INERTIAS;
# ctrl.gear_ratios.value = conf.GEAR_RATIOS;
ctrl.rotor_inertias.value = tuple([g*g*r for (g,r) in
zip(motor_params.GEAR_RATIOS, motor_params.ROTOR_INERTIAS)])
ctrl.gear_ratios.value = NJ*(1.0,);
ctrl.contact_normal.value = conf.FOOT_CONTACT_NORMAL;
ctrl.contact_points.value = conf.RIGHT_FOOT_CONTACT_POINTS;
ctrl.f_min.value = conf.fMin;
ctrl.f_max_right_foot.value = conf.fMax;
ctrl.f_max_left_foot.value = conf.fMax;
ctrl.mu.value = conf.mu[0];
ctrl.weight_contact_forces.value = (1e2, 1e2, 1e0, 1e3, 1e3, 1e3);
ctrl.kp_com.value = 3*(conf.kp_com,);
ctrl.kd_com.value = 3*(conf.kd_com,);
ctrl.kp_constraints.value = 6*(conf.kp_constr,);
ctrl.kd_constraints.value = 6*(conf.kd_constr,);
ctrl.kp_feet.value = 6*(conf.kp_feet,);
ctrl.kd_feet.value = 6*(conf.kd_feet,);
ctrl.kp_hands.value = 6*(conf.kp_hands,);
ctrl.kd_hands.value = 6*(conf.kd_hands,);
ctrl.kp_posture.value = conf.kp_posture;
ctrl.kd_posture.value = conf.kd_posture;
ctrl.kp_pos.value = conf.kp_pos;
ctrl.kd_pos.value = conf.kd_pos;
ctrl.kp_waist.value = 6*(conf.kp_waist,);
ctrl.kd_waist.value = 6*(conf.kd_waist,);
ctrl.w_com.value = conf.w_com;
ctrl.w_feet.value = conf.w_feet;
ctrl.w_hands.value = conf.w_hands;
ctrl.w_forces.value = conf.w_forces;
ctrl.w_posture.value = conf.w_posture;
ctrl.w_base_orientation.value = conf.w_base_orientation;
ctrl.w_torques.value = conf.w_torques;
ctrl.init(dt, robot_name);
return ctrl;
def create_simple_inverse_dyn_controller(robot, conf, dt, robot_name='robot'):
from dynamic_graph.sot.torque_control.simple_inverse_dyn import SimpleInverseDyn
ctrl = SimpleInverseDyn("invDynCtrl")
q = Mix_of_vector('selecJointConf')
q.setSignalNumber(2);
plug(robot.device.robotState, q.default)
q.sin1.value = robot.halfSitting
q.addSelec(1, 0, 6)
plug(q.sout, ctrl.q)
plug(robot.device.robotVelocity, ctrl.v)
# plug(robot.base_estimator.q, ctrl.q)
# plug(robot.base_estimator.v, ctrl.v)
# plug(robot.device.robotState, ctrl.q)
# plug(robot.device.robotVelocity, ctrl.v)
# |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.