input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
try:
sf = c.parent_block().constraint_transformed_scaling_factor.get(c, default)
except AttributeError:
sf = default # when there is no suffix
return sf
def __unset_constraint_transform_applied_scaling_factor(c):
"""PRIVATE FUNCTION: Delete the recorded scale factor that has been used
to transform constraint c. This is used when undoing a constraint
transformation.
"""
try:
del c.parent_block().constraint_transformed_scaling_factor[c]
except AttributeError:
pass # no scaling factor suffix, is fine
except KeyError:
pass # no scaling factor is fine
def constraint_scaling_transform(c, s, overwrite=True):
"""This transforms a constraint by the argument s. The scaling factor
applies to original constraint (e.g. if one where to call this twice in a row
for a constraint with a scaling factor of 2, the original constraint would
still, only be scaled by a factor of 2.)
Args:
c: Pyomo constraint
s: scale factor applied to the constraint as originally written
overwrite: overwrite existing scaling factors if present (default=True)
Returns:
None
"""
if not isinstance(c, _ConstraintData):
raise TypeError(f"{c} is not a constraint or is an indexed constraint")
st = get_constraint_transform_applied_scaling_factor(c, default=None)
if not overwrite and st is not None:
# Existing scaling factor and overwrite False, do nothing
return
if st is None:
# If no existing scaling factor, use value of 1
st = 1
v = s/st
c.set_value(
(__none_mult(c.lower, v), __none_mult(c.body, v), __none_mult(c.upper, v)))
__set_constraint_transform_applied_scaling_factor(c, s)
def constraint_scaling_transform_undo(c):
"""The undoes the scaling transforms previously applied to a constraint.
Args:
c: Pyomo constraint
Returns:
None
"""
if not isinstance(c, _ConstraintData):
raise TypeError(f"{c} is not a constraint or is an indexed constraint")
v = get_constraint_transform_applied_scaling_factor(c)
if v is None:
return # hasn't been transformed, so nothing to do.
c.set_value(
(__none_mult(c.lower, 1/v), __none_mult(c.body, 1/v), __none_mult(c.upper, 1/v)))
__unset_constraint_transform_applied_scaling_factor(c)
def unscaled_variables_generator(blk, descend_into=True, include_fixed=False):
"""Generator for unscaled variables
Args:
block
Yields:
variables with no scale factor
"""
for v in blk.component_data_objects(pyo.Var, descend_into=descend_into):
if v.fixed and not include_fixed:
continue
if get_scaling_factor(v) is None:
yield v
def unscaled_constraints_generator(blk, descend_into=True):
"""Generator for unscaled constraints
Args:
block
Yields:
constraints with no scale factor
"""
for c in blk.component_data_objects(
pyo.Constraint, active=True, descend_into=descend_into):
if get_scaling_factor(c) is None and \
get_constraint_transform_applied_scaling_factor(c) is None:
yield c
def constraints_with_scale_factor_generator(blk, descend_into=True):
"""Generator for constraints scaled by a sclaing factor, may or not have
been transformed.
Args:
block
Yields:
constraint with a scale factor, scale factor
"""
for c in blk.component_data_objects(
pyo.Constraint, active=True, descend_into=descend_into):
s = get_scaling_factor(c)
if s is not None:
yield c, s
def badly_scaled_var_generator(
blk, large=1e4, small=1e-3, zero=1e-10, descend_into=True, include_fixed=False):
"""This provides a rough check for variables with poor scaling based on
their current scale factors and values. For each potentially poorly scaled
variable it returns the var and its current scaled value.
Args:
blk: pyomo block
large: Magnitude that is considered to be too large
small: Magnitude that is considered to be too small
zero: Magnitude that is considered to be zero, variables with a value of
zero are okay, and not reported.
Yields:
variable data object, current absolute value of scaled value
"""
for v in blk.component_data_objects(pyo.Var, descend_into=descend_into):
if v.fixed and not include_fixed:
continue
val = pyo.value(v, exception=False)
if val is None:
continue
sf = get_scaling_factor(v, default=1)
sv = abs(val * sf) # scaled value
if sv > large:
yield v, sv
elif sv < zero:
continue
elif sv < small:
yield v, sv
def constraint_autoscale_large_jac(
m,
ignore_constraint_scaling=False,
ignore_variable_scaling=False,
max_grad=100,
min_scale=1e-6,
no_scale=False
):
"""Automatically scale constraints based on the Jacobian. This function
imitates Ipopt's default constraint scaling. This scales constraints down
to avoid extremely large values in the Jacobian. This function also returns
the unscaled and scaled Jacobian matrixes and the Pynumero NLP which can be
used to identify the constraints and variables corresponding to the rows and
comlumns.
Args:
m: model to scale
ignore_constraint_scaling: ignore existing constraint scaling
ignore_variable_scaling: ignore existing variable scaling
max_grad: maximum value in Jacobian after scaling, subject to minimum
scaling factor restriction.
min_scale: minimum scaling factor allowed, keeps constraints from being
scaled too much.
no_scale: just calculate the Jacobian and scaled Jacobian, don't scale
anything
Returns:
unscaled Jacobian CSR from, scaled Jacobian CSR from, Pynumero NLP
"""
# Pynumero requires an objective, but I don't, so let's see if we have one
n_obj = 0
for c in m.component_data_objects(pyo.Objective, active=True):
n_obj += 1
# Add an objective if there isn't one
if n_obj == 0:
dummy_objective_name = unique_component_name(m, "objective")
setattr(m, dummy_objective_name, pyo.Objective(expr=0))
# Create NLP and calculate the objective
nlp = PyomoNLP(m)
jac = nlp.evaluate_jacobian().tocsr()
# Get lists of varibles and constraints to translate Jacobian indexes
# save them on the NLP for later, since genrating them seems to take a while
nlp.clist = clist = nlp.get_pyomo_constraints()
nlp.vlist = vlist = nlp.get_pyomo_variables()
# Create a scaled Jacobian to account for variable scaling, for now ignore
# constraint scaling
jac_scaled = jac.copy()
for i, c in enumerate(clist):
for j in jac_scaled[i].indices:
v = vlist[j]
if ignore_variable_scaling:
sv = 1
else:
sv = get_scaling_factor(v, default=1)
jac_scaled[i,j] = jac_scaled[i,j]/sv
# calculate constraint scale factors
for i, c in enumerate(clist):
sc = get_scaling_factor(c, default=1)
if not no_scale:
if (ignore_constraint_scaling or get_scaling_factor(c) is None):
row = jac_scaled[i]
for d in row.indices:
row[0,d] = abs(row[0,d])
mg = row.max()
if mg > max_grad:
sc = max(min_scale, max_grad/mg)
set_scaling_factor(c, sc)
for j in jac_scaled[i].indices:
# update the scaled jacobian
jac_scaled[i,j] = jac_scaled[i,j]*sc
# delete dummy objective
if n_obj == 0:
delattr(m, dummy_objective_name)
return jac, jac_scaled, nlp
def get_jacobian(m, scaled=True):
"""
Get the Jacobian matrix at the current model values. This function also
returns the Pynumero NLP which can be used to identify the constraints and
variables corresponding to the rows and comlumns.
Args:
m: model to get Jacobian from
scaled: if True return scaled Jacobian, else get unscaled
Returns:
Jacobian matrix in Scipy CSR format, Pynumero nlp
"""
jac, jac_scaled, nlp = constraint_autoscale_large_jac(m, no_scale=True)
if scaled:
return jac_scaled, nlp
else:
return jac, nlp
def extreme_jacobian_entries(
m=None, scaled=True, large=1e4, small=1e-4, zero=1e-10, jac=None, nlp=None):
"""
Show very large and very small Jacobian entries.
Args:
m: model
scaled: if true use scaled Jacobian
large: >= to this value is consdered large
small: <= to this and >= zero is consdered small
Returns:
(list of tuples), Jacobian entry, Constraint, Variable
"""
if jac is None or nlp is None:
jac, nlp = get_jacobian(m, scaled)
el = []
for i, c in enumerate(nlp.clist):
for j in jac[i].indices:
v = nlp.vlist[j]
e = abs(jac[i, j])
if (e <= small and e > zero) or e >= large:
el.append((e, c, v))
return el
def jacobian_cond(m=None, scaled=True, ord=None, pinv=False, jac=None):
"""
Get the condition number of the scaled or unscaled Jacobian matrix of a model.
Args:
m: calculate the condition number of the Jacobian from this model.
scaled: if True use scaled Jacobian, else use unscaled
ord: norm order, None = Frobenius, see scipy.sparse.linalg.norm for more
pinv: Use pseudoinverse, works for non-square matrixes
jac: (optional) perviously calculated jacobian
Returns:
(float) Condition number
"""
if jac is None:
jac, nlp = get_jacobian(m, scaled)
jac = jac.tocsc()
if jac.shape[0] != jac.shape[1] and not pinv:
_log.warning("Nonsquare Jacobian using pseudo inverse")
pinv = True
if not pinv:
jac_inv = spla.inv(jac)
return spla.norm(jac, ord)*spla.norm(jac_inv, ord)
else:
jac_inv = la.pinv(jac.toarray())
return spla.norm(jac, ord)*la.norm(jac_inv, ord)
class CacheVars(object):
"""
A class for saving the values of variables then reloading them,
usually after they have been used to perform some solve or calculation.
"""
def __init__(self, vardata_list):
self.vars = vardata_list
self.cache = [None for var in self.vars]
def __enter__(self):
for i, var in enumerate(self.vars):
self.cache[i] = var.value
return self
def __exit__(self, ex_type, ex_value, ex_traceback):
for i, var in enumerate(self.vars):
var.set_value(self.cache[i])
class FlattenedScalingAssignment(object):
"""
A class to assist in the calculation of scaling factors when a
variable-constraint assignment can be constructed, especially when
the variables and constraints are all indexed by some common set(s).
"""
def __init__(self, scaling_factor, varconlist=None, nominal_index=()):
"""
Args:
scaling_factor: A Pyomo scaling_factor Suffix that will hold all
the scaling factors calculated
varconlist: A list of variable, constraint tuples. These variables
and constraints should be indexed by the same sets,
so | |
<reponame>gabemery/gammapy
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import abc
import logging
from subprocess import call
from tempfile import NamedTemporaryFile
from copy import deepcopy
from collections import OrderedDict, namedtuple
import numpy as np
from numpy.lib.arraypad import _validate_lengths
from ..extern import six
from astropy.io import fits
from astropy.coordinates import SkyCoord, Angle
from astropy.coordinates.angle_utilities import angular_separation
from astropy.convolution import Tophat2DKernel
from astropy import units as u
from astropy.nddata.utils import Cutout2D
from regions import PixCoord, PixelRegion, SkyRegion
from astropy.wcs import WCS, WcsError
from astropy.wcs.utils import pixel_to_skycoord, skycoord_to_pixel, proj_plane_pixel_scales
from ..utils.fits import SmartHDUList, fits_header_to_meta_dict
from ..utils.scripts import make_path
from ..utils.wcs import get_resampled_wcs
from ..image.utils import make_header
__all__ = ['SkyImage']
log = logging.getLogger(__name__)
_DEFAULT_WCS_ORIGIN = 0
_DEFAULT_WCS_MODE = 'all'
@six.add_metaclass(abc.ABCMeta)
class MapBase(object):
"""Map base class.
This is just a temp solution to put code that's common
between `SkyImage` and `SkyCube`.
.. note::
A new set of map and cube classes is being developed in `gammapy.maps`
and long-term will replace the existing `gammapy.image.SkyImage` and
`gammapy.cube.SkyCube` classes. Please consider trying out `gammapy.maps`
and changing your scripts to use those new classes. See :ref:`maps`.
"""
@property
def is_mask(self):
"""Is this a mask (check values, not dtype).
"""
if self.data.dtype == bool:
return True
d = self.data
mask = (d == 0) | (d == 1)
return mask.all()
def _check_is_mask(self):
if not self.is_mask:
raise ValueError('This method is only available for masks.')
class SkyImage(MapBase):
"""Sky image.
.. note::
A new set of map and cube classes is being developed in `gammapy.maps`
and long-term will replace the existing `gammapy.image.SkyImage` and
`gammapy.cube.SkyCube` classes. Please consider trying out `gammapy.maps`
and changing your scripts to use those new classes. See :ref:`maps`.
For further information, see :ref:`image`.
Parameters
----------
name : str
Name of the image.
data : `~numpy.ndarray`
Data array.
wcs : `~astropy.wcs.WCS`
WCS transformation object.
unit : str
String specifying the data units.
meta : `~collections.OrderedDict`
Dictionary to store meta data.
"""
_AxisIndex = namedtuple('AxisIndex', ['x', 'y'])
_ax_idx = _AxisIndex(x=1, y=0)
def __init__(self, name=None, data=None, wcs=None, unit='', meta=None):
# TODO: validate inputs
self.name = name
self.data = data
self.wcs = wcs
if meta is None:
self.meta = OrderedDict()
else:
self.meta = OrderedDict(meta)
self.unit = u.Unit(unit)
@property
def center_pix(self):
"""Center pixel coordinate of the image (`~regions.PixCoord`)."""
x = 0.5 * (self.data.shape[self._ax_idx.x] - 1)
y = 0.5 * (self.data.shape[self._ax_idx.y] - 1)
return PixCoord(x=x, y=y)
@property
def center(self):
"""Center sky coordinate of the image (`~astropy.coordinates.SkyCoord`)."""
center = self.center_pix
return SkyCoord.from_pixel(
xp=center.x,
yp=center.y,
wcs=self.wcs,
origin=_DEFAULT_WCS_ORIGIN,
mode=_DEFAULT_WCS_MODE,
)
@classmethod
def read(cls, filename, hdu=None, **kwargs):
"""Read image from FITS file (`SkyImage`).
Parameters are passed to `~gammapy.utils.fits.SmartHDUList`.
"""
hdu_list = SmartHDUList.open(filename, **kwargs)
hdu = hdu_list.get_hdu(hdu=hdu, hdu_type='image')
return cls.from_image_hdu(hdu)
def write(self, filename, *args, **kwargs):
"""
Write image to FITS file.
Parameters
----------
filename : str
Name of the FITS file.
*args : list
Arguments passed to `~astropy.io.fits.ImageHDU.writeto`.
**kwargs : dict
Keyword arguments passed to `~astropy.io.fits.ImageHDU.writeto`.
"""
filename = str(make_path(filename))
hdu = self.to_image_hdu()
hdu.writeto(filename, *args, **kwargs)
@classmethod
def from_image_hdu(cls, image_hdu):
"""
Create image from ImageHDU.
Parameters
----------
image_hdu : `astropy.io.fits.ImageHDU`
Source image HDU.
Examples
--------
>>> from astropy.io import fits
>>> from gammapy.image import SkyImage
>>> hdu_list = fits.open('data.fits')
>>> image = SkyImage.from_image_hdu(hdu_list['myimage'])
"""
data = image_hdu.data
header = image_hdu.header
wcs = WCS(image_hdu.header)
name = header.get('HDUNAME')
if name is None:
name = header.get('EXTNAME')
try:
# Validate unit string
unit = u.Unit(header['BUNIT'], format='fits').to_string()
except (KeyError, ValueError):
unit = ''
meta = fits_header_to_meta_dict(header)
obj = cls(name, data, wcs, unit, meta)
# For now, we give the user a copy of the header as a
# private, undocumented attribute, because it's sometimes
# useful to have.
obj._header = header
return obj
def to_image_hdu(self):
"""
Convert image to a `~astropy.io.fits.PrimaryHDU`.
Returns
-------
hdu : `~astropy.io.fits.PrimaryHDU`
Primary image hdu object.
"""
header = fits.Header()
header.update(self.meta)
if self.wcs is not None:
# update wcs, because it could have changed
header_wcs = self.wcs.to_header()
header.update(header_wcs)
if self.unit is not None:
header['BUNIT'] = u.Unit(self.unit).to_string('fits')
if self.name is not None:
header['EXTNAME'] = self.name
header['HDUNAME'] = self.name
return fits.PrimaryHDU(data=self.data, header=header)
@classmethod
def empty(cls, name=None, nxpix=200, nypix=200, binsz=0.02, xref=0, yref=0,
fill=0, proj='CAR', coordsys='GAL', xrefpix=None, yrefpix=None,
dtype='float64', unit='', meta=None):
"""
Create an empty image from scratch.
Uses the same parameter names as the Fermi tool ``gtbin``
(see https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtbin.txt).
If no reference pixel position is given it is assumed to be
at the center of the image.
Parameters
----------
name : str
Name of the image.
nxpix : int, optional
Number of pixels in x axis. Default is 200.
nypix : int, optional
Number of pixels in y axis. Default is 200.
binsz : float, optional
Bin size for x and y axes in units of degrees. Default is 0.02.
xref : float, optional
Coordinate system value at reference pixel for x axis. Default is 0.
yref : float, optional
Coordinate system value at reference pixel for y axis. Default is 0.
fill : float, optional
Fill image with constant value. Default is 0.
proj : string, optional
Any valid WCS projection type. Default is 'CAR' (cartesian).
coordsys : {'CEL', 'GAL'}, optional
Coordinate system, either Galactic ('GAL') or Equatorial ('CEL').
Default is 'GAL' (Galactic).
xrefpix : float, optional
Coordinate system reference pixel for x axis. Default is None.
yrefpix: float, optional
Coordinate system reference pixel for y axis. Default is None.
dtype : str, optional
Data type, default is float32
unit : str or `~astropy.units.Unit`
Data unit.
meta : `~collections.OrderedDict`
Meta data attached to the image.
Returns
-------
image : `~gammapy.image.SkyImage`
Empty image.
"""
header = make_header(nxpix, nypix, binsz, xref, yref,
proj, coordsys, xrefpix, yrefpix)
data = fill * np.ones((nypix, nxpix), dtype=dtype)
wcs = WCS(header)
header.update(meta)
return cls(name=name, data=data, wcs=wcs, unit=unit, meta=header)
@classmethod
def empty_like(cls, image, name=None, unit='', fill=0, meta=None):
"""
Create an empty image like the given image.
The WCS is copied over, the data array is filled with the ``fill`` value.
Parameters
----------
image : `~gammapy.image.SkyImage` or `~astropy.io.fits.ImageHDU`
Instance of `~gammapy.image.SkyImage`.
fill : float, optional
Fill image with constant value. Default is 0.
name : str
Name of the image.
unit : str
String specifying the data units.
meta : `~collections.OrderedDict`
Dictionary to store meta data.
"""
if isinstance(image, SkyImage):
wcs = image.wcs.copy()
elif isinstance(image, (fits.ImageHDU, fits.PrimaryHDU)):
wcs = WCS(image.header)
else:
raise TypeError("Can't create image from type {}".format(type(image)))
data = fill * np.ones_like(image.data)
header = wcs.to_header()
header.update(meta)
return cls(name, data, wcs, unit, meta=header)
def fill_events(self, events, weights=None):
"""Fill events (modifies ``data`` attribute).
Calls `numpy.histogramdd`
Parameters
----------
events : `~gammapy.data.EventList`
Event list
weights : str, optional
Column to use as weights (none by default)
Examples
--------
Show example how to make an empty image and fill it.
"""
if weights is not None:
weights = events.table[weights]
xx, yy = self.wcs_skycoord_to_pixel(events.radec)
bins = self._bins_pix
data = np.histogramdd([yy, xx], bins, weights=weights)[0]
self.data = self.data + data
@property
def _bins_pix(self):
bins0 = np.arange(self.data.shape[0] + 1) - 0.5
bins1 = np.arange(self.data.shape[1] + 1) - 0.5
return bins0, bins1
def coordinates_pix(self, mode='center'):
"""
Pixel sky coordinate images.
Parameters
----------
mode : {'center', 'edges'}
Return coordinate values at the pixels edges or pixel centers.
Returns
-------
x, y : tuple
Return arrays representing the coordinates of a sky grid.
"""
if mode == 'center':
y, x = np.indices(self.data.shape)
elif mode == 'edges':
shape = self.data.shape[0] + 1, self.data.shape[1] + 1
y, x = np.indices(shape)
y, x = y - 0.5, x - 0.5
else:
raise ValueError('Invalid mode to compute coordinates.')
return PixCoord(x, y)
def coordinates(self, mode='center'):
"""
Sky coordinate images.
Parameters
----------
mode : {'center', 'edges'}
Return coordinate values at the pixels edges or pixel centers.
Returns
-------
coordinates : `~astropy.coordinates.SkyCoord`
Position on the sky.
"""
pixcoord = self.coordinates_pix(mode=mode)
coordinates = self.wcs_pixel_to_skycoord(xp=pixcoord.x, yp=pixcoord.y)
return coordinates
def contains(self, position):
"""
Check if given position on the sky is contained in the image.
Parameters
----------
position : `~astropy.coordinates.SkyCoord`
Position on the sky.
Returns
-------
containment : array
Bool array
"""
ny, nx = self.data.shape
x, y = self.wcs_skycoord_to_pixel(coords=position)
return (x >= 0.5) & (x <= nx + 0.5) & (y >= 0.5) & | |
<reponame>wendbv/pluvo-python
from mock import call, patch
import pytest
import pluvo
from pluvo import PluvoResultSet, DEFAULT_API_URL, DEFAULT_PAGE_SIZE
import datetime
class Multiple:
call_nr = 0
def __init__(self, pages):
self.pages = pages
def results(self, *args, **kwargs):
result = self.pages[self.call_nr]
self.call_nr += 1
return result
def test_pluvo_resultset_get_page(mocker):
pages = [
{'count': 4, 'data': [0, 1]},
{'count': 4, 'data': [2, 3]}
]
i = iter(pages)
request_mock = mocker.MagicMock(
side_effect=lambda *args, **kwargs: next(i))
pluvo_mock = mocker.MagicMock(page_size=2, _request=request_mock)
p = PluvoResultSet(pluvo_mock, 'endpoint')
page0 = p._get_page(0)
# make sure that fetching twice doesn't generate a new request
page0_again = p._get_page(0)
page1 = p._get_page(1)
assert page0 == page0_again
assert page0 == pages[0]['data']
assert page1 == pages[1]['data']
pluvo_mock._request.assert_has_calls([
call('GET', 'endpoint', params={'limit': 2, 'offset': 0}),
call('GET', 'endpoint', params={'limit': 2, 'offset': 2}),
])
def test_pluvo_resultset_post_page(mocker):
pages = [
{'count': 4, 'data': [0, 1]},
{'count': 4, 'data': [2, 3]}
]
i = iter(pages)
request_mock = mocker.MagicMock(
side_effect=lambda *args, **kwargs: next(i))
pluvo_mock = mocker.MagicMock(page_size=2, _request=request_mock)
p = PluvoResultSet(pluvo_mock, 'endpoint', method='POST')
page0 = p._get_page(0)
assert page0 == [0, 1]
pluvo_mock._request_assert_has_calls([
call('POST', 'endpoint', data={'limit': 2, 'offset': 0})
])
def test_pluvo_resultset_get_page_key_offset(mocker):
pluvo_mock = mocker.MagicMock(page_size=2)
p = PluvoResultSet(pluvo_mock, 'endpoint')
assert p._get_page_key_offset(0) == (0, 0)
assert p._get_page_key_offset(1) == (0, 1)
assert p._get_page_key_offset(7) == (3, 1)
def test_pluvo_resultset_getitem(mocker):
pages = [
[0, 1],
[2, 3],
[4, 5],
[6, 7],
]
get_page_mock = mocker.MagicMock(side_effect=lambda x: pages[x])
pluvo_mock = mocker.MagicMock(page_size=2)
p = PluvoResultSet(pluvo_mock, 'endpoint')
p._count = 8
with patch.object(p, '_get_page', get_page_mock):
assert p[:] == [0, 1, 2, 3, 4, 5, 6, 7]
assert p[-1] == 7
assert p[:1] == [0]
assert p[-3:] == [5, 6, 7]
assert p[:-3] == [0, 1, 2, 3, 4]
assert p[8:1] == []
assert p[0:1] == [0]
assert p[3:7] == [3, 4, 5, 6]
with pytest.raises(IndexError):
p[8]
def test_pluvo_resultset_len(mocker):
pages = [
{'count': 4, 'data': [0, 1]},
{'count': 4, 'data': [2, 3]}
]
i = iter(pages)
request_mocker = mocker.MagicMock(
side_effect=lambda *args, **kwargs: next(i))
pluvo_mock = mocker.MagicMock(page_size=2, _request=request_mocker)
p = PluvoResultSet(pluvo_mock, 'endpoint')
assert len(p) == 4
assert len(p) == 4
# assert that even though len is called twice, we do not make 2 requests
assert pluvo_mock._request.call_count == 1
def test_pluvo_resultset_iter(mocker):
pages = [
[1, 2],
[3, 4],
[5, 6],
[7],
]
get_page_mock = mocker.MagicMock(side_effect=lambda x: pages[x])
pluvo_mock = mocker.MagicMock(page_size=2)
p = PluvoResultSet(pluvo_mock, 'endpoint')
p._count = 7
with patch.object(p, '_get_page', get_page_mock):
assert list(iter(p)) == [1, 2, 3, 4, 5, 6, 7]
get_page_mock.assert_has_calls([
call(0), call(1), call(2), call(3)
])
def test_pluvo_init_client_credentials():
p = pluvo.Pluvo(client_id='client_id', client_secret='client_secret')
assert p.client_id == 'client_id'
assert p.client_secret == 'client_secret'
assert p.token is None
assert p.api_url == DEFAULT_API_URL
assert p.page_size == DEFAULT_PAGE_SIZE
def test_pluvo_init_client_credentials_missing_one():
with pytest.raises(pluvo.PluvoMisconfigured):
pluvo.Pluvo(client_id='client_id')
with pytest.raises(pluvo.PluvoMisconfigured):
pluvo.Pluvo(client_secret='client_secret')
def test_pluvo_init_token():
p = pluvo.Pluvo(token='token')
assert p.client_id is None
assert p.client_secret is None
assert p.token == 'token'
assert p.api_url == DEFAULT_API_URL
assert p.page_size == DEFAULT_PAGE_SIZE
def test_pluvo_init_client_credentials_too_many():
with pytest.raises(pluvo.PluvoMisconfigured):
pluvo.Pluvo(client_id='client_id', client_secret='client_secret',
token='token')
def test_pluvo_init_no_credentials():
with pytest.raises(pluvo.PluvoMisconfigured):
pluvo.Pluvo()
with pytest.raises(pluvo.PluvoMisconfigured):
pluvo.Pluvo(client_id='', client_secret='')
with pytest.raises(pluvo.PluvoMisconfigured):
pluvo.Pluvo(client_id=None, client_secret=None)
def test_pluvo_init_api_url():
p = pluvo.Pluvo(token='token', api_url='api_url')
assert p.client_id is None
assert p.client_secret is None
assert p.token == 'token'
assert p.api_url == 'api_url'
assert p.page_size == DEFAULT_PAGE_SIZE
def test_pluvo_init_page_size():
p = pluvo.Pluvo(token='token', page_size='page_size')
assert p.client_id is None
assert p.client_secret is None
assert p.token == 'token'
assert p.api_url == DEFAULT_API_URL
assert p.page_size == 'page_size'
def test_pluvo_set_auth_headers():
p = pluvo.Pluvo(client_id='client_id', client_secret='client_secret')
retval = p._set_auth_headers()
assert retval == {'client_id': 'client_id',
'client_secret': 'client_secret'}
retval = p._set_auth_headers(headers={'test': 1})
assert retval == {'client_id': 'client_id',
'client_secret': 'client_secret', 'test': 1}
def test_pluvo_set_auth_params():
p = pluvo.Pluvo(token='token')
retval = p._set_auth_params()
assert retval == {'token': 'token'}
retval = p._set_auth_params(params={'test': 1})
assert retval == {'token': 'token', 'test': 1}
def test_pluvo_get_with_client_credentials(mocker):
p = pluvo.Pluvo(client_id='client_id', client_secret='client_secret')
requests_mock = mocker.patch(
'requests.request', return_value=mocker.MagicMock(status_code=200))
p.session.request = requests_mock
retval = p._request('GET', 'url')
assert retval == requests_mock.return_value.json()
requests_mock.assert_called_once_with(
'GET', '{}url'.format(DEFAULT_API_URL), params={}, json=None,
headers={'client_id': 'client_id', 'client_secret': 'client_secret'})
def test_pluvo_get_with_token(mocker):
p = pluvo.Pluvo(token='token')
requests_mock = mocker.patch(
'requests.request', return_value=mocker.MagicMock(status_code=200))
p.session.request = requests_mock
retval = p._request('GET', 'url')
assert retval == requests_mock.return_value.json()
requests_mock.assert_called_once_with(
'GET', '{}url'.format(DEFAULT_API_URL), params={'token': 'token'},
json=None, headers={})
def test_pluvo_get_with_params_and_token(mocker):
p = pluvo.Pluvo(token='token')
requests_mock = mocker.patch(
'requests.request', return_value=mocker.MagicMock(status_code=200))
p.session.request = requests_mock
retval = p._request('GET', 'url', params={'param': 1})
assert retval == requests_mock.return_value.json()
requests_mock.assert_called_once_with(
'GET', '{}url'.format(DEFAULT_API_URL),
json=None, params={'param': 1, 'token': 'token'}, headers={})
def test_pluvo_get_request_error(mocker):
p = pluvo.Pluvo(token='token')
requests_mock = mocker.patch(
'requests.request', return_value=mocker.MagicMock(
status_code=400, json=mocker.MagicMock(
return_value={'error': 'error message'})))
p.session.request = requests_mock
with pytest.raises(pluvo.PluvoAPIException) as exc_info:
p._request('GET', 'url')
assert exc_info.value.status_code == 400
assert exc_info.value.message == 'error message'
assert str(exc_info.value) == 'HTTP status 400 - error message'
def test_pluvo_request_500_error(mocker):
p = pluvo.Pluvo(token='token')
requests_mock = mocker.patch(
'requests.request', return_value=mocker.MagicMock(
status_code=500, json=mocker.MagicMock(side_effect=ValueError())))
p.session.request = requests_mock
with pytest.raises(pluvo.PluvoException):
p._request('GET', 'url')
def test_pluvo_request_no_json_response(mocker):
p = pluvo.Pluvo(token='token')
requests_mock = mocker.patch(
'requests.request', return_value=mocker.MagicMock(
status_code=200, json=mocker.MagicMock(side_effect=ValueError())))
p.session.request = requests_mock
with pytest.raises(pluvo.PluvoException):
p._request('GET', 'url')
def test_pluvo_request_error_no_error_data(mocker):
p = pluvo.Pluvo(token='token')
requests_mock = mocker.patch(
'requests.request', return_value=mocker.MagicMock(
status_code=404, json=mocker.MagicMock(return_value={''})))
p.session.request = requests_mock
with pytest.raises(pluvo.PluvoException):
p._request('GET', 'url')
def test_pluvo_get_multiple(mocker):
p = pluvo.Pluvo(token='token')
pluvo_generator_mock = mocker.patch('pluvo.pluvo.PluvoResultSet')
p._get_multiple('endpoint', params='params', method='POST')
pluvo_generator_mock.assert_called_once_with(
pluvo=p, endpoint='endpoint', params='params', method='POST')
def test_pluvo_put(mocker):
p = pluvo.Pluvo(token='token')
mocker.patch.object(p, '_set_auth_params')
mocker.patch.object(p, '_set_auth_headers')
requests_mock = mocker.patch(
'requests.request', return_value=mocker.MagicMock(status_code=200))
p.session.request = requests_mock
retval = p._request('PUT', 'endpoint', {'test': 1}, params='params')
assert retval == requests_mock.return_value.json()
p._set_auth_params.assert_called_once_with('params')
p._set_auth_headers.assert_called_once_with()
requests_mock.assert_called_once_with(
'PUT', '{}endpoint'.format(DEFAULT_API_URL),
params=p._set_auth_params.return_value,
headers=p._set_auth_headers.return_value, json={"test": 1})
def test_pluvo_put_request_error(mocker):
p = pluvo.Pluvo(token='token')
requests_mock = mocker.patch(
'requests.request', return_value=mocker.MagicMock(
status_code=400, json=mocker.MagicMock(
return_value={'error': 'error message'})))
p.session.request = requests_mock
with pytest.raises(pluvo.PluvoAPIException) as exc_info:
p._request('PUT', 'url', 'data')
assert exc_info.value.status_code == 400
assert exc_info.value.message == 'error message'
assert str(exc_info.value) == 'HTTP status 400 - error message'
def test_pluvo_post(mocker):
p = pluvo.Pluvo(token='token')
mocker.patch.object(p, '_set_auth_params')
mocker.patch.object(p, '_set_auth_headers')
requests_mock = mocker.patch(
'requests.request', return_value=mocker.MagicMock(status_code=200))
p.session.request = requests_mock
retval = p._request('POST', 'endpoint', {'test': 1}, params='params')
assert retval == requests_mock.return_value.json()
p._set_auth_params.assert_called_once_with('params')
p._set_auth_headers.assert_called_once_with()
requests_mock.assert_called_once_with(
'POST', '{}endpoint'.format(DEFAULT_API_URL),
params=p._set_auth_params.return_value,
headers=p._set_auth_headers.return_value, json={"test": 1})
def test_pluvo_post_request_error(mocker):
p = pluvo.Pluvo(token='token')
requests_mock = mocker.patch(
'requests.request', return_value=mocker.MagicMock(
status_code=400, json=mocker.MagicMock(
return_value={'error': 'error message'})))
p.session.request = requests_mock
with pytest.raises(pluvo.PluvoAPIException) as exc_info:
p._request('POST', 'url', 'data')
assert exc_info.value.status_code == 400
assert exc_info.value.message == 'error message'
assert str(exc_info.value) == 'HTTP status 400 - error message'
def test_pluvo_set_course_put(mocker):
p = pluvo.Pluvo(token='token')
mocker.patch.object(p, '_request')
retval = p.set_course({'id': 1})
assert retval == p._request.return_value
p._request.assert_called_once_with('PUT', 'course/1/', {'id': 1})
def test_delete_course(mocker):
p = pluvo.Pluvo(token='token')
mocker.patch.object(p, '_request')
retval = p.delete_course(1)
assert retval == p._request.return_value
p._request.assert_called_once_with('DELETE', 'course/1/')
def test_pluvo_set_course_post(mocker):
p = pluvo.Pluvo(token='token')
mocker.patch.object(p, '_request')
retval = p.set_course({'test': 1})
assert retval == p._request.return_value
p._request.assert_called_once_with('POST', 'course/', {'test': 1})
def test_pluvo_get_course(mocker):
p = pluvo.Pluvo(token='token')
mocker.patch.object(p, '_request')
retval = p.get_course(1)
assert retval == p._request.return_value
p._request.assert_called_once_with('GET', 'course/1/')
def test_pluvo_copy_course(mocker):
p = pluvo.Pluvo(token='token')
mocker.patch.object(p, '_request')
retval = p.copy_course(1, 2)
assert retval == p._request.return_value
p._request.assert_called_once_with(
'POST', 'course/1/copy/', data={'creator_id': 2})
def test_pluvo_get_lti_info(mocker):
p = pluvo.Pluvo(token='token')
mocker.patch.object(p, '_request')
retval = p.get_lti_info(1)
assert retval == p._request.return_value
p._request.assert_called_once_with('GET', 'course/1/lti/')
def test_pluvo_get_courses(mocker):
p = pluvo.Pluvo(token='token')
mocker.patch.object(p, '_get_multiple')
retval = p.get_courses(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)
assert retval == p._get_multiple.return_value
p._get_multiple.assert_called_once_with(
'course/', params={
'offset': 1, 'limit': 2, 'title': 3,
'description': 4, 'published_from': 5,
'published_to': 6, 'student_id': 7,
'creator_id': 8, 'creation_date_from': 9,
'creation_date_to': 10,
'order_by': 11,
'id': 12,
'include_version_numbers': False,
}, method='POST')
def test_pluvo_set_organisation_put(mocker):
p = pluvo.Pluvo(token='token')
mocker.patch.object(p, '_request')
retval = p.set_organisation({'id': 1})
assert retval == p._request.return_value
p._request.assert_called_once_with('PUT', 'organisation/1/', {'id': 1})
def test_pluvo_set_organisation_post(mocker):
p = pluvo.Pluvo(token='token')
mocker.patch.object(p, '_request')
retval = p.set_organisation({'test': 1})
assert retval == p._request.return_value
p._request.assert_called_once_with('POST', 'organisation/', {'test': 1})
def test_pluvo_get_s3_upload_token(mocker):
p = pluvo.Pluvo(token='token')
mocker.patch.object(p, '_request')
retval = p.get_s3_upload_token('filename.jpg', 'image/jpeg')
assert retval == p._request.return_value
p._request.assert_called_once_with(
'GET', 'media/s3_upload_token/',
params={'filename': 'filename.jpg', 'media_type': 'image/jpeg'})
def test_pluvo_get_token(mocker):
p = pluvo.Pluvo(token='token')
mocker.patch.object(p, '_request')
retval = p.get_token('student', 1, 2)
assert retval == p._request.return_value
p._request.assert_called_once_with('GET', 'user/token/student/',
params={'user_id': 1, 'course_id': 2})
def test_pluvo_get_trainer_token(mocker):
p = pluvo.Pluvo(token='token')
mocker.patch.object(p, '_request')
retval = p.get_token('trainer', 1, 2, 3)
assert retval == p._request.return_value
p._request.assert_called_once_with('GET', 'user/token/trainer/', params={
'user_id': 1, 'course_id': 2, 'trainer_id': 3})
def test_pluvo_get_user(mocker):
p = pluvo.Pluvo(token='token')
mocker.patch.object(p, '_request')
retval = p.get_user(1)
assert retval == p._request.return_value
p._request.assert_called_once_with('GET', 'user/1/')
def test_pluvo_get_users(mocker):
p = pluvo.Pluvo(token='token')
mocker.patch.object(p, '_get_multiple')
retval = p.get_users(1, 2, 3, 4, 5, 6, 7)
assert retval == p._get_multiple.return_value
p._get_multiple.assert_called_once_with(
'user/', params={
'offset': 1, 'limit': 2, 'name': 3,
'creation_date_from': 4, 'creation_date_to': 5,
'created_course_id': 6, 'following_course_id': 7
})
def test_pluvo_set_user_put(mocker):
p = pluvo.Pluvo(token='token')
mocker.patch.object(p, '_request')
retval = p.set_user({'id': 1})
assert retval == p._request.return_value
p._request.assert_called_once_with('PUT', 'user/1/', {'id': 1})
def test_pluvo_set_user_post(mocker):
p = pluvo.Pluvo(token='token')
mocker.patch.object(p, '_request')
retval = p.set_user({'test': 1})
assert retval == p._request.return_value
p._request.assert_called_once_with('POST', 'user/', {'test': 1})
def test_pluvo_get_progress_report(mocker):
p = pluvo.Pluvo(token='token')
mocker.patch.object(p, '_get_multiple')
dt1 = datetime.datetime(1900, 1, 2, 3, 4, 5, 6)
dt2 = datetime.datetime(1901, 2, 3, 4, 5, 6, 7)
retval = p.get_progress_report(
[1, | |
import dataclasses
import datetime
import enum
import typing
import pytest
from faker import Faker
from dataclasses_avroschema import AvroModel, exceptions, fields, types
from . import consts
faker = Faker()
def test_invalid_type_container_field():
python_type = typing.Set
name = "test_field"
msg = f"Invalid Type for field {name}. Accepted types are list, tuple, dict or typing.Union"
with pytest.raises(ValueError, match=msg):
fields.AvroField(name, python_type, default=dataclasses.MISSING)
@pytest.mark.parametrize("sequence, python_primitive_type,python_type_str", consts.SEQUENCES_AND_TYPES)
def test_sequence_type(sequence, python_primitive_type, python_type_str):
"""
When the type is List, the Avro field type should be array
with the items attribute present.
"""
name = "an_array_field"
python_type = sequence[python_primitive_type]
field = fields.AvroField(name, python_type, default=dataclasses.MISSING)
expected = {
"name": name,
"type": {"type": "array", "name": name, "items": python_type_str},
}
assert expected == field.to_dict()
field = fields.AvroField(name, python_type, default=None)
expected = {
"name": name,
"type": {"type": "array", "name": name, "items": python_type_str},
"default": [],
}
assert expected == field.to_dict()
if python_type_str == fields.BYTES:
values = [b"hola", b"hi"]
default = ["hola", "hi"]
else:
values = default = faker.pylist(2, True, python_primitive_type)
field = fields.AvroField(name, python_type, default=default, default_factory=lambda: values)
expected = {
"name": name,
"type": {"type": "array", "name": name, "items": python_type_str},
"default": default,
}
assert expected == field.to_dict()
@pytest.mark.parametrize(
"sequence,python_primitive_type,python_type_str,value",
consts.SEQUENCES_LOGICAL_TYPES,
)
def test_sequence_with_logical_type(sequence, python_primitive_type, python_type_str, value):
"""
When the type is List, the Avro field type should be array
with the items attribute present.
"""
name = "an_array_field"
python_type = sequence[python_primitive_type]
field = fields.AvroField(name, python_type, default=dataclasses.MISSING)
expected = {
"name": name,
"type": {"type": "array", "name": name, "items": python_type_str},
}
assert expected == field.to_dict()
field = fields.AvroField(name, python_type, default=None)
expected = {
"name": name,
"type": {"type": "array", "name": name, "items": python_type_str},
"default": [],
}
assert expected == field.to_dict()
values = [value]
field = fields.AvroField(name, python_type, default=values, default_factory=lambda: values)
expected = {
"name": name,
"type": {"type": "array", "name": name, "items": python_type_str},
"default": [fields.LOGICAL_TYPES_FIELDS_CLASSES[python_primitive_type].to_avro(value) for value in values],
}
assert expected == field.to_dict()
@pytest.mark.parametrize("union,items,default", consts.ARRAY_WITH_UNION_TYPES)
def test_sequence_with_union_type(union, items, default):
name = "an_array_field"
python_type = typing.List[union]
field = fields.AvroField(name, python_type, default=dataclasses.MISSING)
expected = {"name": name, "type": {"type": "array", "name": name, "items": items}}
assert expected == field.to_dict()
field = fields.AvroField(name, python_type, default_factory=lambda: default)
expected = {
"name": name,
"type": {"type": "array", "name": name, "items": items},
"default": default,
}
assert expected == field.to_dict()
field = fields.AvroField(name, python_type, default=None)
items.insert(0, fields.NULL)
expected = {
"name": name,
"type": {"type": "array", "name": name, "items": items},
"default": [],
}
assert expected == field.to_dict()
@pytest.mark.parametrize("mapping,python_primitive_type,python_type_str", consts.MAPPING_AND_TYPES)
def test_mapping_type(mapping, python_primitive_type, python_type_str):
"""
When the type is Dict, the Avro field type should be map
with the values attribute present. The keys are always string type.
"""
name = "a_map_field"
python_type = mapping[str, python_primitive_type]
field = fields.AvroField(name, python_type, default=dataclasses.MISSING)
expected = {
"name": name,
"type": {"type": "map", "name": name, "values": python_type_str},
}
assert expected == field.to_dict()
field = fields.AvroField(name, python_type, default=None)
expected = {
"name": name,
"type": {"type": "map", "name": name, "values": python_type_str},
"default": {},
}
assert expected == field.to_dict()
if python_type_str == fields.BYTES:
value = {"hola": b"hi"}
default = {"hola": "hi"}
else:
value = default = faker.pydict(2, True, python_primitive_type)
field = fields.AvroField(name, python_type, default=default, default_factory=lambda: value)
expected = {
"name": name,
"type": {"type": "map", "name": name, "values": python_type_str},
"default": default,
}
assert expected == field.to_dict()
def test_invalid_map():
name = "a_map_field"
python_type = typing.Dict[int, str]
with pytest.raises(exceptions.InvalidMap) as excinfo:
fields.AvroField(name, python_type, default=dataclasses.MISSING)
msg = "Invalid map on field a_map_field. Keys must be string not <class 'int'>"
assert msg == str(excinfo.value)
@pytest.mark.parametrize("mapping,python_primitive_type,python_type_str,value", consts.MAPPING_LOGICAL_TYPES)
def test_mapping_logical_type(mapping, python_primitive_type, python_type_str, value):
"""
When the type is Dict, the Avro field type should be map
with the values attribute present. The keys are always string type.
"""
name = "a_map_field"
python_type = mapping[str, python_primitive_type]
field = fields.AvroField(name, python_type, default=dataclasses.MISSING)
expected = {
"name": name,
"type": {"type": "map", "name": name, "values": python_type_str},
}
assert expected == field.to_dict()
field = fields.AvroField(name, python_type, default=None)
expected = {
"name": name,
"type": {"type": "map", "name": name, "values": python_type_str},
"default": {},
}
assert expected == field.to_dict()
values = {"key": value}
field = fields.AvroField(name, python_type, default=dataclasses.MISSING, default_factory=lambda: values)
expected = {
"name": name,
"type": {"type": "map", "name": name, "values": python_type_str},
"default": {
key: fields.LOGICAL_TYPES_FIELDS_CLASSES[python_primitive_type].to_avro(value)
for key, value in values.items()
},
}
assert expected == field.to_dict()
@pytest.mark.parametrize("primitive_types, avro_types, default", consts.UNION_PRIMITIVE_ELEMENTS)
def test_union_type(primitive_types, avro_types, default):
name = "an_union_field"
python_type = typing.Union[primitive_types]
field = fields.AvroField(name, python_type)
expected = {"name": name, "type": [*avro_types]}
assert expected == field.to_dict()
# Tests to make sure defaults work, and that defaults are sorted to the beginning of the union
@pytest.mark.parametrize("primitive_types, avro_types, default", consts.UNION_PRIMITIVE_ELEMENTS_DEFAULTS)
def test_union_type_with_default(primitive_types, avro_types, default):
name = "an_union_field"
python_type = typing.Union[primitive_types]
field = fields.AvroField(name, python_type, default=default)
if isinstance(default, datetime.datetime):
default = (default - datetime.datetime(1970, 1, 1)).total_seconds() * 1000
elif isinstance(default, bytes):
default = default.decode()
expected = {"name": name, "type": [*avro_types], "default": default}
assert expected == field.to_dict()
@pytest.mark.parametrize("complex_type, avro_types", consts.UNION_WITH_ARRAY)
def test_union_with_arrays(complex_type, avro_types):
name = "an_union_field"
python_type = typing.Union[complex_type]
field = fields.AvroField(name, python_type)
expected = {"name": name, "type": [{"type": "array", "name": name, "items": avro_types[0]}, avro_types[1]]}
assert expected == field.to_dict()
@pytest.mark.parametrize("complex_type, avro_types", consts.UNION_WITH_MAP)
def test_union_with_maps(complex_type, avro_types):
name = "an_union_field"
python_type = typing.Union[complex_type]
field = fields.AvroField(name, python_type)
expected = {"name": name, "type": [{"type": "map", "name": name, "values": avro_types[0]}, avro_types[1]]}
assert expected == field.to_dict()
@pytest.mark.parametrize("complex_type, avro_type", consts.OPTIONAL_UNION_COMPLEX_TYPES)
def test_union_as_optional_with_complex_types(complex_type, avro_type):
"""
Test cases when typing.Optional is used.
The result of typing.Optional[Any] is typing.Union[Any, NoneType]
Always NoneType is placed at the end
"""
name = "optional_field"
python_type = typing.Optional[complex_type]
field = fields.AvroField(name, python_type)
expected = {"name": name, "type": [avro_type, "null"]}
assert expected == field.to_dict()
@pytest.mark.parametrize("primitive_type, avro_type", consts.PRIMITIVE_TYPES)
def test_union_as_optional_with_primitives(primitive_type, avro_type):
"""
Test cases when typing.Optional is used.
The result of typing.Optional[Any] is typing.Union[Any, NoneType]
Always NoneType is placed at the end
"""
name = "an_optional_union_field"
python_type = typing.Optional[primitive_type]
field = fields.AvroField(name, python_type)
expected = {"name": name, "type": [avro_type, "null"]}
assert expected == field.to_dict()
def test_union_type_with_records():
class User(AvroModel):
"User"
first_name: str
class Car(AvroModel):
"Car"
engine_name: str
class UnionRecord(AvroModel):
an_union_field: typing.Union[User, Car]
schema = UnionRecord.avro_schema_to_python()
expected = {
"name": "an_union_field",
"type": [
{
"name": "User",
"type": "record",
"doc": "User",
"fields": [{"name": "first_name", "type": "string"}],
},
{
"name": "Car",
"type": "record",
"doc": "Car",
"fields": [{"name": "engine_name", "type": "string"}],
},
],
}
assert expected == schema["fields"][0]
def test_union_type_with_record_default():
class User(AvroModel):
"User"
first_name: str
class Car(AvroModel):
"Car"
engine_name: str
class UnionRecord(AvroModel):
an_union_field: typing.Optional[typing.Union[User, Car]] = None
schema = UnionRecord.avro_schema_to_python()
expected = {
"name": "an_union_field",
"type": [
fields.NULL,
{
"name": "User",
"type": "record",
"doc": "User",
"fields": [{"name": "first_name", "type": "string"}],
},
{
"name": "Car",
"type": "record",
"doc": "Car",
"fields": [{"name": "engine_name", "type": "string"}],
},
],
"default": None,
}
assert expected == schema["fields"][0]
class UnionRecordTwo(AvroModel):
an_union_field: typing.Union[User, Car] = dataclasses.field(default_factory=lambda: {"first_name": "a name"})
schema = UnionRecordTwo.avro_schema_to_python()
expected = {
"name": "an_union_field",
"type": [
{
"name": "User",
"type": "record",
"doc": "User",
"fields": [{"name": "first_name", "type": "string"}],
},
{
"name": "Car",
"type": "record",
"doc": "Car",
"fields": [{"name": "engine_name", "type": "string"}],
},
],
"default": {"first_name": "a name"},
}
assert expected == schema["fields"][0]
def test_fixed_type():
"""
When the type is types.Fixed, the Avro field type should be fixed
with size attribute present.
"""
name = "a_fixed_field"
namespace = "md5"
aliases = ["md5", "hash"]
default = types.Fixed(16, namespace=namespace, aliases=aliases)
python_type = types.Fixed
field = fields.AvroField(name, python_type, default=default)
expected = {
"name": name,
"type": {
"type": "fixed",
"name": name,
"size": default.size,
"namespace": namespace,
"aliases": aliases,
},
}
assert expected == field.to_dict()
def test_enum_type():
"""
When the type is enum.Enum, the Avro field type should be Enum
with symbols attribute present.
"""
name = "an_enum_field"
namespace = "my_enum"
aliases = ["enum", "first enum"]
class CardType(enum.Enum):
SPADES = "SPADES"
HEARTS = "HEARTS"
DIAMONDS = "DIAMONDS"
CLUBS = "CLUBS"
class Meta:
namespace = "my_enum"
aliases = ["enum", "first enum"]
python_type = CardType
field = fields.AvroField(name, python_type, default=CardType.CLUBS)
symbols = ["SPADES", "HEARTS", "DIAMONDS", "CLUBS"]
expected = {
"name": name,
"type": {
"type": "enum",
"name": name,
"symbols": symbols,
"namespace": namespace,
"aliases": aliases,
},
"default": CardType.CLUBS.value,
}
assert expected == field.to_dict()
class CardType(enum.Enum):
SPADES = "SPADES"
HEARTS = "HEARTS"
DIAMONDS = "DIAMONDS"
CLUBS = "CLUBS"
class Meta:
namespace = "my_enum"
python_type = CardType
field = fields.AvroField(name, python_type)
expected = {"name": name, "type": {"type": "enum", "name": name, "symbols": symbols, "namespace": namespace}}
assert expected == field.to_dict()
class CardType(enum.Enum):
SPADES = "SPADES"
HEARTS = "HEARTS"
DIAMONDS = "DIAMONDS"
CLUBS = "CLUBS"
python_type | |
running, or printed to stdout otherwise.
If `delayed` is set to False, messages are immediately printed instead.
Example:
def function(...):
profiler = Profiler()
... do stuff ...
profiler('did stuff')
... do other stuff ...
profiler('did other stuff')
# profiler is garbage-collected and flushed at function end
If this function is a method of class C, setting `PYQTGRAPHPROFILE` to
"C.function" (without the module name) will enable this profiler.
For regular functions, use the qualified name of the function, stripping
only the initial "pyqtgraph." prefix from the module.
"""
_profilers = os.environ.get("PYQTGRAPHPROFILE", "")
_depth = 0
_msgs = []
class DisabledProfiler(object):
def __init__(self, *args, **kwds):
pass
def __call__(self, *args):
pass
def finish(self):
pass
def mark(self, msg=None):
pass
_disabledProfiler = DisabledProfiler()
if _profilers:
_profilers = _profilers.split(",")
def __new__(cls, msg=None, disabled='env', delayed=True):
"""Optionally create a new profiler based on caller's qualname.
"""
if disabled is True:
return cls._disabledProfiler
# determine the qualified name of the caller function
caller_frame = sys._getframe(1)
try:
caller_object_type = type(caller_frame.f_locals["self"])
except KeyError: # we are in a regular function
qualifier = caller_frame.f_globals["__name__"].split(".", 1)[1]
else: # we are in a method
qualifier = caller_object_type.__name__
func_qualname = qualifier + "." + caller_frame.f_code.co_name
if func_qualname not in cls._profilers: # don't do anything
return cls._disabledProfiler
# create an actual profiling object
cls._depth += 1
obj = super(Profiler, cls).__new__(cls)
obj._name = msg or func_qualname
obj._delayed = delayed
obj._markCount = 0
obj._finished = False
obj._firstTime = obj._lastTime = ptime.time()
obj._newMsg("> Entering " + obj._name)
return obj
else:
def __new__(cls, delayed=True):
return lambda msg=None: None
def __call__(self, msg=None):
"""Register or print a new message with timing information.
"""
if msg is None:
msg = str(self._markCount)
self._markCount += 1
newTime = ptime.time()
self._newMsg(" %s: %0.4f ms",
msg, (newTime - self._lastTime) * 1000)
self._lastTime = newTime
def mark(self, msg=None):
self(msg)
def _newMsg(self, msg, *args):
msg = " " * (self._depth - 1) + msg
if self._delayed:
self._msgs.append((msg, args))
else:
print(msg % args)
def __del__(self):
self.finish()
def finish(self, msg=None):
"""Add a final message; flush the message list if no parent profiler.
"""
if self._finished:
return
self._finished = True
if msg is not None:
self(msg)
self._newMsg("< Exiting %s, total time: %0.4f ms",
self._name, (ptime.time() - self._firstTime) * 1000)
type(self)._depth -= 1
if self._depth < 1 and self._msgs:
print("\n".join([m[0]%m[1] for m in self._msgs]))
type(self)._msgs = []
def profile(code, name='profile_run', sort='cumulative', num=30):
"""Common-use for cProfile"""
cProfile.run(code, name)
stats = pstats.Stats(name)
stats.sort_stats(sort)
stats.print_stats(num)
return stats
#### Code for listing (nearly) all objects in the known universe
#### http://utcc.utoronto.ca/~cks/space/blog/python/GetAllObjects
# Recursively expand slist's objects
# into olist, using seen to track
# already processed objects.
def _getr(slist, olist, first=True):
i = 0
for e in slist:
oid = id(e)
typ = type(e)
if oid in olist or typ is int: ## or e in olist: ## since we're excluding all ints, there is no longer a need to check for olist keys
continue
olist[oid] = e
if first and (i%1000) == 0:
gc.collect()
tl = gc.get_referents(e)
if tl:
_getr(tl, olist, first=False)
i += 1
# The public function.
def get_all_objects():
"""Return a list of all live Python objects (excluding int and long), not including the list itself."""
gc.collect()
gcl = gc.get_objects()
olist = {}
_getr(gcl, olist)
del olist[id(olist)]
del olist[id(gcl)]
del olist[id(sys._getframe())]
return olist
def lookup(oid, objects=None):
"""Return an object given its ID, if it exists."""
if objects is None:
objects = get_all_objects()
return objects[oid]
class ObjTracker(object):
"""
Tracks all objects under the sun, reporting the changes between snapshots: what objects are created, deleted, and persistent.
This class is very useful for tracking memory leaks. The class goes to great (but not heroic) lengths to avoid tracking
its own internal objects.
Example:
ot = ObjTracker() # takes snapshot of currently existing objects
... do stuff ...
ot.diff() # prints lists of objects created and deleted since ot was initialized
... do stuff ...
ot.diff() # prints lists of objects created and deleted since last call to ot.diff()
# also prints list of items that were created since initialization AND have not been deleted yet
# (if done correctly, this list can tell you about objects that were leaked)
arrays = ot.findPersistent('ndarray') ## returns all objects matching 'ndarray' (string match, not instance checking)
## that were considered persistent when the last diff() was run
describeObj(arrays[0]) ## See if we can determine who has references to this array
"""
allObjs = {} ## keep track of all objects created and stored within class instances
allObjs[id(allObjs)] = None
def __init__(self):
self.startRefs = {} ## list of objects that exist when the tracker is initialized {oid: weakref}
## (If it is not possible to weakref the object, then the value is None)
self.startCount = {}
self.newRefs = {} ## list of objects that have been created since initialization
self.persistentRefs = {} ## list of objects considered 'persistent' when the last diff() was called
self.objTypes = {}
ObjTracker.allObjs[id(self)] = None
self.objs = [self.__dict__, self.startRefs, self.startCount, self.newRefs, self.persistentRefs, self.objTypes]
self.objs.append(self.objs)
for v in self.objs:
ObjTracker.allObjs[id(v)] = None
self.start()
def findNew(self, regex):
"""Return all objects matching regex that were considered 'new' when the last diff() was run."""
return self.findTypes(self.newRefs, regex)
def findPersistent(self, regex):
"""Return all objects matching regex that were considered 'persistent' when the last diff() was run."""
return self.findTypes(self.persistentRefs, regex)
def start(self):
"""
Remember the current set of objects as the comparison for all future calls to diff()
Called automatically on init, but can be called manually as well.
"""
refs, count, objs = self.collect()
for r in self.startRefs:
self.forgetRef(self.startRefs[r])
self.startRefs.clear()
self.startRefs.update(refs)
for r in refs:
self.rememberRef(r)
self.startCount.clear()
self.startCount.update(count)
#self.newRefs.clear()
#self.newRefs.update(refs)
def diff(self, **kargs):
"""
Compute all differences between the current object set and the reference set.
Print a set of reports for created, deleted, and persistent objects
"""
refs, count, objs = self.collect() ## refs contains the list of ALL objects
## Which refs have disappeared since call to start() (these are only displayed once, then forgotten.)
delRefs = {}
for i in self.startRefs.keys():
if i not in refs:
delRefs[i] = self.startRefs[i]
del self.startRefs[i]
self.forgetRef(delRefs[i])
for i in self.newRefs.keys():
if i not in refs:
delRefs[i] = self.newRefs[i]
del self.newRefs[i]
self.forgetRef(delRefs[i])
#print "deleted:", len(delRefs)
## Which refs have appeared since call to start() or diff()
persistentRefs = {} ## created since start(), but before last diff()
createRefs = {} ## created since last diff()
for o in refs:
if o not in self.startRefs:
if o not in self.newRefs:
createRefs[o] = refs[o] ## object has been created since last diff()
else:
persistentRefs[o] = refs[o] ## object has been created since start(), but before last diff() (persistent)
#print "new:", len(newRefs)
## self.newRefs holds the entire set of objects created since start()
for r in self.newRefs:
self.forgetRef(self.newRefs[r])
self.newRefs.clear()
self.newRefs.update(persistentRefs)
self.newRefs.update(createRefs)
for r in self.newRefs:
self.rememberRef(self.newRefs[r])
#print "created:", len(createRefs)
## self.persistentRefs holds all objects considered persistent.
self.persistentRefs.clear()
self.persistentRefs.update(persistentRefs)
print("----------- Count changes since start: ----------")
c1 = count.copy()
for k in self.startCount:
c1[k] = c1.get(k, 0) - self.startCount[k]
typs = list(c1.keys())
typs.sort(lambda a,b: cmp(c1[a], c1[b]))
for t in typs:
if c1[t] == 0:
continue
num = "%d" % c1[t]
print(" " + num + " "*(10-len(num)) + str(t))
print("----------- %d Deleted since last diff: ------------" % len(delRefs))
self.report(delRefs, objs, **kargs)
print("----------- %d Created since last diff: ------------" % len(createRefs))
self.report(createRefs, objs, **kargs)
print("----------- %d Created since start (persistent): ------------" % len(persistentRefs))
self.report(persistentRefs, objs, **kargs)
def __del__(self):
self.startRefs.clear()
self.startCount.clear()
self.newRefs.clear()
self.persistentRefs.clear()
del ObjTracker.allObjs[id(self)]
for v in self.objs:
del ObjTracker.allObjs[id(v)]
@classmethod
def isObjVar(cls, o):
return type(o) is cls or id(o) in cls.allObjs
def collect(self):
print("Collecting list of all objects...")
gc.collect()
objs = get_all_objects()
frame = sys._getframe()
del objs[id(frame)] ## ignore the current | |
<filename>src/deploy/osp_deployer/director.py
#!/usr/bin/env python
# Copyright (c) 2015-2019 Dell Inc. or its subsidiaries.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from settings.config import Settings
from checkpoints import Checkpoints
from collections import defaultdict
from infra_host import InfraHost
from auto_common import Scp
import json
import logging
import os
import re
import subprocess
import sys
import tempfile
import time
import yaml
logger = logging.getLogger("osp_deployer")
exitFlag = 0
# Ceph pools present in a default install
HEAVY_POOLS = ['images',
'vms',
'volumes']
OTHER_POOLS = ['.rgw.buckets',
'.rgw.root',
'backups',
'default.rgw.buckets.data',
'default.rgw.buckets.index',
'default.rgw.control',
'default.rgw.log',
'default.rgw.meta',
'metrics']
# tempest configuraton file
TEMPEST_CONF = "tempest.conf"
class Director(InfraHost):
def __init__(self):
self.settings = Settings.settings
self.user = self.settings.director_install_account_user
self.ip = self.settings.director_node.public_api_ip
self.provisioning_ip = self.settings.director_node.provisioning_ip
self.pwd = self.settings.director_install_account_pwd
self.root_pwd = self.settings.director_node.root_password
self.home_dir = "/home/" + self.user
self.pilot_dir = os.path.join(self.home_dir, "pilot")
self.sanity_dir = os.path.join(self.pilot_dir, "deployment-validation")
self.images_dir = os.path.join(self.pilot_dir, "images")
self.templates_dir = os.path.join(self.pilot_dir, "templates")
self.nic_configs_dir = os.path.join(self.templates_dir,
"nic-configs")
self.validation_dir = os.path.join(self.pilot_dir,
"deployment-validation")
self.source_stackrc = 'source ' + self.home_dir + "/stackrc;"
self.tempest_directory = os.path.join(self.home_dir,
self.settings.tempest_workspace)
self.tempest_conf = os.path.join(self.tempest_directory,
"etc", TEMPEST_CONF)
cmd = "mkdir -p " + self.pilot_dir
self.run(cmd)
def apply_internal_repos(self):
# Add the internal repo. if going down that road,
# Pull the target rpm's
if self.settings.internal_repos is True:
logger.debug(
"Applying internal repo's to the "
"director vm & reinstall rdo manager")
count = 1
for repo in self.settings.internal_repos_urls:
cmd = 'curl ' + \
repo + \
" > /etc/yum.repos.d/internal_" + \
str(count) + \
".repo"
self.run_as_root(cmd)
self.run_as_root("sed -i '/enabled=1/a priority=1' "
"/etc/yum.repos.d/internal_" +
str(count) + ".repo")
count += 1
else:
for repo in self.settings.rhsm_repos:
_, std_err, _ = self.run_as_root('subscription-manager repos '
'--enable=' + repo)
if std_err:
logger.error("Unable to enable repo {}: {}".format(
repo, std_err))
sys.exit(1)
def upload_update_conf_files(self):
logger.debug("tar up the required pilot files")
os.system("cd " +
self.settings.foreman_configuration_scripts +
";tar -zcvf /root/pilot.tar.gz pilot common")
self.upload_file("/root/pilot.tar.gz",
self.home_dir + "/pilot.tar.gz")
self.run('cd;tar zxvf pilot.tar.gz')
cmds = [
'sed -i "s|undercloud_hostname = .*|undercloud_hostname = ' +
self.settings.director_node.hostname + "." +
self.settings.domain +
'|" pilot/undercloud.conf',
'sed -i "s|local_ip = .*|local_ip = ' +
self.settings.director_node.provisioning_ip +
'/24|" pilot/undercloud.conf',
'sed -i "s|local_interface = .*|'
'local_interface = eth1|" pilot/undercloud.conf',
'sed -i "s|masquerade_network = .*|masquerade_network = ' +
self.settings.provisioning_network +
'|" pilot/undercloud.conf',
'sed -i "s|dhcp_start = .*|dhcp_start = ' +
self.settings.provisioning_net_dhcp_start +
'|" pilot/undercloud.conf',
'sed -i "s|dhcp_end = .*|dhcp_end = ' +
self.settings.provisioning_net_dhcp_end +
'|" pilot/undercloud.conf',
'sed -i "s|cidr = .*|cidr = ' +
self.settings.provisioning_network +
'|" pilot/undercloud.conf',
'sed -i "s|gateway = .*|gateway = ' +
self.settings.director_node.provisioning_ip +
'|" pilot/undercloud.conf',
'sed -i "s|inspection_iprange = .*|inspection_iprange = ' +
self.settings.discovery_ip_range +
'|" pilot/undercloud.conf',
'sed -i "s|undercloud_ntp_servers = .*|undercloud_ntp_servers = ' +
self.settings.sah_node.provisioning_ip +
'|" pilot/undercloud.conf'
]
for cmd in cmds:
self.run(cmd)
if self.settings.version_locking_enabled is True:
yaml = "/overcloud_images.yaml"
source_file = self.settings.lock_files_dir + yaml
dest_file = self.home_dir + yaml
self.upload_file(source_file, dest_file)
unity_lock = "/unity_container_lock.ini"
unity_lock_file = self.settings.lock_files_dir + unity_lock
if self.settings.enable_unity_backend is True:
cmd = "grep cinder_unity_container_version " + \
unity_lock_file + \
" | awk -F '=' '{print $2}'"
self.settings.cinder_unity_container_version = \
self.run_tty(cmd)
if self.settings.enable_unity_manila_backend is True:
cmd = "grep manila_unity_container_version " + \
unity_lock_file + \
" | awk -F '=' '{print $2}'"
self.settings.manila_unity_container_version = \
self.run_tty(cmd)
def install_director(self):
logger.info("Installing the undercloud")
if self.settings.use_satellite:
cmd = '~/pilot/install-director.sh --dns ' + \
self.settings.name_server + ' --satellite_hostname ' + \
self.settings.satellite_hostname + ' --satellite_org ' + \
self.settings.satellite_org + ' --satellite_key ' + \
self.settings.satellite_activation_key
if self.settings.pull_containers_from_satellite is True:
cmd += " --containers_prefix " + \
self.settings.containers_prefix
else:
cmd = '~/pilot/install-director.sh --dns ' + \
self.settings.name_server + \
" --sm_user " + \
self.settings.subscription_manager_user + \
" --sm_pwd " + \
self.settings.subscription_manager_password + \
" --sm_pool " + \
self.settings.subscription_manager_vm_ceph
if len(self.settings.overcloud_nodes_pwd) > 0:
cmd += " --nodes_pwd " + self.settings.overcloud_nodes_pwd
stdout, stderr, exit_status = self.run(cmd)
if exit_status:
raise AssertionError("Director/Undercloud did not " +
"install properly - see " +
"/pilot/install-director.log" +
" for details")
tester = Checkpoints()
tester.verify_undercloud_installed()
def upload_cloud_images(self):
if self.settings.pull_images_from_cdn is False:
logger.debug("Uploading cloud images to the Director vm")
self.run("mkdir -p " + self.images_dir)
self.upload_file(self.settings.discovery_ram_disk_image,
self.images_dir + "/discovery-ramdisk.tar")
self.upload_file(self.settings.overcloud_image,
self.images_dir + "/overcloud-full.tar")
else:
pass
def node_discovery(self):
setts = self.settings
if setts.use_custom_instack_json is True:
logger.debug(
"Using custom instack.json file - NOT scannings nodes")
cmd = "rm " + self.home_dir + "/instackenv.json -f"
self.run_tty(cmd)
remote_file = self.home_dir + "/instackenv.json"
self.upload_file(setts.custom_instack_json,
remote_file)
cmd = "sudo chown " + setts.director_install_account_user + ":" + \
setts.director_install_account_user + " " + remote_file
self.run_tty(cmd)
else:
# In 13g servers, the iDRAC sends out a DHCP req every 3 seconds
# for 1 minute. If it still hasn't received a response, it sleeps
# for 20 seconds and then repeats. As a result, we sleep for 30
# seconds here to make sure that every iDRAC has had a chance to
# get a DHCP address prior to launching node discovery.
time.sleep(30)
setts = self.settings
cmd = "cd ~/pilot/discover_nodes;./discover_nodes.py -u " + \
setts.ipmi_user + \
" -p '" + setts.ipmi_password + "'"
# Discover the nodes using DHCP for the iDRAC
cmd += ' ' + setts.management_allocation_pool_start + "-" + \
setts.management_allocation_pool_end
# Discover the nodes using static IPs for the iDRAC
for node in (self.settings.controller_nodes +
self.settings.compute_nodes +
self.settings.ceph_nodes):
if hasattr(node, "idrac_ip"):
cmd += ' ' + node.idrac_ip
cmd += '> ~/instackenv.json'
self.run_tty(cmd)
cmd = "ls -la ~/instackenv.json | awk '{print $5;}'"
size = \
self.run_tty(cmd)[0]
if int(size) <= 50:
logger.fatal("did not manage to pick up the nodes..")
raise AssertionError(
"Unable to scan all the nodes ... need to go & pull "
"the plug(s) - " +
size + " - " +
size[0])
else:
logger.debug("nodes appear to have been picked up")
logger.debug("Verify the number of nodes picked match up to settings")
expected_nodes = len(self.settings.controller_nodes) + len(
self.settings.compute_nodes) + len(
self.settings.ceph_nodes)
found = self.run_tty(
"grep pm_addr ~/instackenv.json | wc -l")[0].rstrip()
logger.debug("Found " + found + " Expected : " + str(expected_nodes))
if int(found) == expected_nodes:
pass
else:
raise AssertionError(
"Number of nodes in instackenv.json does not add up"
" to the number of nodes defined in .properties file")
if setts.use_ipmi_driver is True:
logger.debug("Using pxe_ipmi driver")
cmd = 'sed -i "s|pxe_drac|pxe_ipmitool|" ~/instackenv.json'
self.run_tty(cmd)
def configure_idracs(self):
nodes = list(self.settings.controller_nodes)
nodes.extend(self.settings.compute_nodes)
nodes.extend(self.settings.ceph_nodes)
cmd = "~/pilot/config_idracs.py "
json_config = defaultdict(dict)
for node in nodes:
if hasattr(node, 'idrac_ip'):
node_id = node.idrac_ip
else:
node_id = node.service_tag
if hasattr(node, 'pxe_nic'):
json_config[node_id]["pxe_nic"] = node.pxe_nic
new_ipmi_password = self.settings.new_ipmi_password
if new_ipmi_password:
json_config[node_id]["password"] = <PASSWORD>
if node.skip_nic_config:
json_config[node_id]["skip_nic_config"] = node.skip_nic_config
if json_config.items():
cmd += "-j '{}'".format(json.dumps(json_config))
stdout, stderr, exit_status = self.run(cmd)
if exit_status:
raise AssertionError("An error occurred while running "
"config_idracs. exit_status: {}, "
"error: {}, stdout: {}".format(exit_status,
stderr,
stdout))
def import_nodes(self):
stdout, stderr, exit_status = self.run(self.source_stackrc +
"~/pilot/import_nodes.py")
if exit_status:
raise AssertionError("Unable to import nodes into Ironic. "
"exit_status: {}, error: {}, "
"stdout: {}".format(
exit_status, stderr, stdout))
tester = Checkpoints()
tester.verify_nodes_registered_in_ironic()
def node_introspection(self):
setts = self.settings
stdout, stderr, exit_status = self.run(
self.source_stackrc + "~/pilot/prep_overcloud_nodes.py")
if exit_status:
raise AssertionError("An error occurred while running "
"prep_overcloud_nodes. exit_status: {}, "
"error: {}, stdout: {}".format(exit_status,
stderr,
stdout))
# Turning LLDP off before introspection
lldp_off_cmd = "sudo sed -i 's/ipa-collect-lldp=1/ipa-collect-lldp=0/g' /httpboot/inspector.ipxe" # noqa
self.run(lldp_off_cmd)
introspection_cmd = self.source_stackrc + "~/pilot/introspect_nodes.py"
if setts.use_in_band_introspection is True:
introspection_cmd += " -i"
stdout, stderr, exit_status = self.run(introspection_cmd)
if exit_status:
raise AssertionError("Unable to introspect nodes. "
"exit_status: {}, error: {}, "
"stdout: {}".format(
exit_status, stderr, stdout))
tester = Checkpoints()
tester.verify_introspection_sucessfull()
def assign_role(self, node, role, index):
assign_role_command = self._create_assign_role_command(
node, role, index)
stdout, stderr, exit_status = self.run(self.source_stackrc +
"cd ~/pilot;" +
assign_role_command)
if exit_status:
if hasattr(node, 'service_tag'):
node_identifier = "service tag " + node.service_tag
else:
node_identifier = "ip " + node.idrac_ip
raise AssertionError("Failed to assign {} role to {}: stdout={}, "
"stderr={}, exit_status={}".format(
role,
| |
# -*- coding: utf-8 -*-
from ..util import log
from ..util.log import debug
from ..util import urlopen
import json, re
import requests
from bs4 import BeautifulSoup
from ..util.base import clean_html
user_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.100'
def get_tmdb_api_key():
key = '<KEY>'
host = 'api.themoviedb.org'
from ..util import filesystem
try:
from ..kodi.compat import translatePath
from ..util.string import decode_string
import xbmc
home_path = decode_string(translatePath('special://home'))
major = xbmc.getInfoLabel("System.BuildVersion").split(".")[0]
if int(major) > 17:
return {'host': host, 'key': key }
except ImportError:
# cur = filesystem.dirname(__file__)
# home_path = filesystem.join(cur, '../..')
return {'host': host, 'key': key }
try:
xml_path = filesystem.join(home_path, 'addons', 'metadata.common.themoviedb.org', 'tmdb.xml')
with filesystem.fopen(xml_path, 'r') as xml:
content = xml.read()
match = re.search(r'api_key=(\w+)', content)
if match:
key = match.group(1)
debug('get_tmdb_api_key: ok')
m = re.search(r'://(.+)/3/', content)
if m:
host = m.group(1)
except BaseException as e:
debug('get_tmdb_api_key: ' + str(e))
return {'host': host, 'key': key }
def attr_text(s):
return s.get_text()
def attr_split_slash(s):
itms = s.get_text().split('/')
return [i.strip() for i in itms]
def attr_year(s):
import re
m = re.search(r'(\d\d\d\d)', s.get_text())
if m:
return m.group(1)
def attr_genre(s):
return [ a.get_text() for a in s.find_all('a') ]
class IDs(object):
kp_by_imdb = {}
imdb_by_kp = {}
@staticmethod
def id_by_kp_url(url):
import re
m = re.search(r"(\d\d+)", url)
if m:
return m.group(1)
return None
@staticmethod
def get_by_kp(kp_url):
return IDs.imdb_by_kp.get(IDs.id_by_kp_url(kp_url))
@staticmethod
def get_by_imdb(imdb_id):
return IDs.kp_by_imdb.get(imdb_id)
@staticmethod
def set(imdb_id, kp_url):
if imdb_id and kp_url:
kp_id = IDs.id_by_kp_url(kp_url)
IDs.imdb_by_kp[kp_id] = imdb_id
IDs.kp_by_imdb[imdb_id] = kp_id
@staticmethod
def has_imdb(imdb_id):
return imdb_id in IDs.kp_by_imdb
@staticmethod
def has_kp(kp_url):
kp_id = IDs.id_by_kp_url(kp_url)
return kp_id in IDs.imdb_by_kp
from ..base.soup_base import soup_base
class world_art_soup(soup_base):
headers = {
'Host': 'www.world-art.ru',
'Upgrade-Insecure-Requests': '1',
'User-Agent': user_agent,
'X-Compress': 'null',
}
def __init__(self, url):
soup_base.__init__(self, url, self.headers)
class world_art_actors(world_art_soup):
def __init__(self, url):
world_art_soup.__init__(self, url)
self._actors = []
@property
def actors(self):
if not self._actors:
def append_actor(tr):
tds = tr.find_all('td', recursive=False)
a = tds[1].find('a')
act = {}
if a:
id = a['href'].split('?id=')[-1]
id = id.split('&')[0]
#if td.find('img', attrs={'src': "../img/photo.gif"}):
# act['photo'] = 'http://www.world-art.ru/img/people/10000/{}.jpg'.format(int(id))
act['ru_name'] = tds[1].get_text()
act['en_name'] = tds[2].get_text()
act['role'] = tds[3].get_text()
#act = { k:v for k, v in act.iteritems() if v } ## No python 2.6 compatible
res = {}
for k, v in act.iteritems():
if v:
res[k] = v
self._actors.append(res)
for b in self.soup.find_all('b'):
if b.get_text() == u'Актёры':
table = b.find_parent('table')
table = table.find_next_siblings('table')[1]
for tr_next in table.find_all('tr'):
append_actor(tr_next)
'''
tr = b.find_parent('tr')
if tr:
for tr_next in tr.find_next_siblings('tr'):
append_actor(tr_next)
'''
return self._actors
def __getitem__(self, i):
from ..util.string import is_string_type
if isinstance(i, int):
return self.actors[i]
elif is_string_type(i):
for act in self.actors:
if act['ru_name'] == i:
return act
if act['en_name'] == i:
return act
raise KeyError
class world_art_info(world_art_soup):
Request_URL = "http://www.world-art.ru/%s"
attrs = [
(u'Названия', 'knowns', attr_split_slash),
(u'Производство', 'country', attr_text),
(u'Хронометраж', 'runtime', attr_text),
(u'Жанр', 'genre', attr_genre),
(u'Первый показ', 'year', attr_year),
(u'Режиссёр', 'director', attr_text),
]
def __init__(self, url):
world_art_soup.__init__(self, self.Request_URL % url)
self._info_data = dict()
self._actors = None
@property
def actors(self):
if not self._actors:
self._actors = world_art_actors(self.url.replace('cinema.php', 'cinema_full_cast.php'))
return self._actors.actors
@property
def data(self):
def next_td(td, fn):
return fn(td.next_sibling.next_sibling)
if not self._info_data:
data = {}
for td in self.soup.find_all('td', class_='review'):
td_text = td.get_text()
find = [item for item in self.attrs if td_text in item]
if find:
item = find[0]
data[item[1]] = next_td(td, item[2])
self._info_data = data.copy()
return self._info_data
def __getattr__(self, name):
names = [i[1] for i in self.attrs]
if name in names:
return self.data[name]
raise AttributeError
@property
def imdb(self):
a = self.soup.select('a[href*=imdb.com]')
if a:
for part in a[0]['href'].split('/'):
if part.startswith('tt'):
return part
@property
def kp_url(self):
a = self.soup.select('a[href*=kinopoisk.ru]')
return a[0]['href']
@property
def plot(self):
p = self.soup.find('p', attrs ={'class':'review', 'align': 'justify'})
if p:
return p.get_text()
class world_art(world_art_soup):
Request_URL = "http://www.world-art.ru/search.php?public_search=%s&global_sector=cinema"
def __init__(self, title, year=None, imdbid=None, kp_url=None):
from ..util import quote_plus
url = self.Request_URL % quote_plus(title.encode('cp1251'))
world_art_soup.__init__(self, url)
self._title = title
self._year = year
self._imdbid = imdbid
self._kp_url = kp_url
self._info = None
@property
def info(self):
if not self._info:
results = self.search_by_title(self._title)
#filter by year
if self._year:
results = [ item for item in results if item.year == self._year ]
if self._imdbid:
results = [ item for item in results if item.imdb == self._imdbid ]
if results:
self._info = results[0]
return self._info
if self._kp_url:
results = [ item for item in results if IDs.id_by_kp_url(item.kp_url) == IDs.id_by_kp_url(self._kp_url) ]
if results:
self._info = results[0]
return self._info
# filter by title
for item in results:
if self._title in item.knowns:
self._info = item
return self._info
self._info = 'No info'
#for info in results:
# imdb = info.imdb
if self._info == 'No info':
raise AttributeError
return self._info
def search_by_title(self, title):
result = []
for meta in self.soup.find_all('meta'):
# meta <meta content="0; url=/cinema/cinema.php?id=68477" http-equiv="Refresh"/> Tag
if meta.get('http-equiv') == "Refresh" and 'url=/cinema/cinema.php?id=' in meta.get('content'):
url = meta.get('content').split('url=/')[-1]
info = world_art_info(url)
info.year = self._year
#info.knowns = [ self._title ]
result.append( info )
for a in self.soup.find_all('a', class_="estimation"):
info = world_art_info(a['href'])
tr = a
while tr.name != 'tr':
tr = tr.parent
info.year = tr.find('td').get_text()
td = a.parent
info.knowns = [ i.get_text() for i in td.find_all('i') ]
result.append( info )
return result
def plot(self):
return self.info.plot
#def trailer(self):
# info = self.info
def director(self):
try:
result = self.info.director
result = result.replace(u'и другие', '')
return [d.strip() for d in result.split(',')]
except:
return []
def actors(self):
try:
return self.info.actors
except:
return []
class tmdb_movie_item(object):
def __init__(self, json_data, type='movie'):
self.json_data_ = json_data
self.type = type
def poster(self):
try:
return 'http://image.tmdb.org/t/p/w500' + self.json_data_['poster_path']
except BaseException:
return ''
def fanart(self):
try:
return 'http://image.tmdb.org/t/p/original' + self.json_data_['backdrop_path']
except BaseException:
return ''
def get_art(self):
art = {}
path = self.poster()
art['thumb'] = path
art['poster'] = path
art['thumbnailImage'] = path
art['fanart'] = self.fanart()
return art
def get_info(self):
info = {}
if 'genres' in self.json_data_:
info['genre'] = u', '.join([i['name'] for i in self.json_data_['genres']])
analogy = [
('aired', 'release_date'),
('plot', 'overview'),
('title', 'name'),
('originaltitle', 'original_title'),
('originaltitle', 'original_name'),
]
for xbmc_tag, tmdb_tag in analogy:
if tmdb_tag in self.json_data_:
info[xbmc_tag] = self.json_data_[tmdb_tag]
for tag in ['first_air_date', 'aired', 'release_date']:
if tag in self.json_data_:
aired = self.json_data_[tag]
if aired:
m = re.search(r'(\d{4})', aired)
if m:
info['year'] = int(m.group(1))
break
try:
vid_item = self.json_data_['videos']['results'][0]
if vid_item['site'] == 'YouTube':
info['trailer'] = 'plugin://plugin.video.youtube/?action=play_video&videoid=' + vid_item['key']
except BaseException:
pass
string_items = ['director', 'mpaa', 'title', 'originaltitle', 'duration', 'studio', 'code', 'album', 'votes', 'thumb']
for item in string_items:
if item in self.json_data_:
info[item] = self.json_data_[item]
# 'credits',
return info
def imdb(self):
try:
if 'imdb_id' in self.json_data_:
return self.json_data_['imdb_id']
elif 'external_ids' in self.json_data_ and 'imdb_id' in self.json_data_['external_ids']:
return self.json_data_['external_ids']['imdb_id']
except BaseException:
return None
def tmdb_id(self):
if 'id' in self.json_data_:
return self.json_data_['id']
else:
return None
class Object(object):
pass
class KinopoiskAPI(object):
# Common session for KP requests
session = None
kp_requests = []
@staticmethod
def make_url_by_id(kp_id):
return 'http://www.kinopoisk.ru/film/' + str(kp_id) + '/'
def __init__(self, kinopoisk_url = None, settings = None):
from settings import Settings
self.settings = settings if settings else Settings('')
self.kinopoisk_url = kinopoisk_url
self.soup = None
self._actors = None
def _http_get(self, url):
for resp in KinopoiskAPI.kp_requests:
if resp['url'] == url:
return resp['response']
r = requests.Response()
if self.session is None:
self.session = requests.session()
try:
if self.settings.kp_googlecache:
r = self.get_google_cache(url)
else:
proxy = 'socks5h://socks.zaborona.help:1488'
proxies = { 'http': proxy, 'https': proxy } if self.settings.kp_usezaborona else None
headers = {'user-agent': user_agent}
r = self.session.get(url, headers=headers, proxies=proxies, timeout=5.0)
except requests.exceptions.ConnectionError as ce:
r = requests.Response()
r.status_code = requests.codes.service_unavailable
debug(str(ce))
except requests.exceptions.Timeout as te:
r = requests.Response()
r.status_code = requests.codes.request_timeout
debug(str(te))
if not self.settings.kp_googlecache:
if 'captcha' in r.text:
r = self.get_google_cache(url)
KinopoiskAPI.kp_requests.append({'url': url, 'response': r})
return r
def get_google_cache(self, url):
from ..util import quote_plus
search_url = "http://www.google.com/search?q=" + quote_plus(url)
headers = {'user-agent': user_agent}
r = self.session.get(search_url, headers=headers, timeout=2.0)
try:
soup = BeautifulSoup(clean_html(r.text), 'html.parser')
a = soup.find('a', class_='fl')
if a:
cache_url = a['href']
from ..util import urlparse, urlunparse, ParseResult
res = urlparse(cache_url)
res = ParseResult(res.scheme if res.scheme else 'https',
res.netloc if res.netloc else 'webcache.googleusercontent.<EMAIL>',
res.path, res.params, res.query, res.fragment)
cache_url = urlunparse(res)
#print cache_url
r = self.session.get(cache_url, headers=headers, timeout=2.0)
indx = r._content.find('<html')
r._content = r._content[indx:]
resp = r
return resp
except BaseException as e:
debug(str(e))
return requests.Response()
def makeSoup(self):
if self.kinopoisk_url and self.soup is None:
r = self._http_get(self.kinopoisk_url)
if r.status_code == requests.codes.ok:
text = clean_html(r.content)
self.soup = BeautifulSoup(text, 'html.parser')
else:
pass
def title(self):
title = None
self.makeSoup()
if self.soup:
s = self.soup.find('span', class_='moviename-title-wrapper')
if s:
title = s.get_text().strip('\t\r\n ')
return title
def originaltitle(self):
title = None
self.makeSoup()
if self.soup:
span = self.soup.find('span', attrs = {'itemprop': 'alternativeHeadline'})
if span:
title = span.get_text().strip('\t\r\n ')
return title
def year(self):
self.makeSoup()
if self.soup:
for a in self.soup.find_all('a'):
if '/lists/m_act%5Byear%5D/' in a.get('href', ''):
return a.get_text()
raise AttributeError
def director(self):
self.makeSoup()
if self.soup:
#<td itemprop="director"><a href="/name/535852/" data-popup-info="enabled"><NAME></a></td>
td = self.soup.find('td', attrs={"itemprop": "director"})
if td:
return [ a.get_text() for a in td.find_all('a') if '/name' in a['href'] ]
raise AttributeError
def plot(self):
plot = None
self.makeSoup()
if self.soup:
div = self.soup.find('div', attrs={"itemprop": "description"})
if div:
plot = div.get_text().replace(u'\xa0', u' ')
return plot
raise AttributeError
def base_actors_list(self):
actors = []
self.makeSoup()
if self.soup:
for li in self.soup.find_all('li', attrs={'itemprop': 'actors'}):
a = li.find('a')
if a:
actors.append(a.get_text())
if '...' in actors:
actors.remove('...')
if actors:
return ', '.join(actors)
else:
return ''
def actors(self):
if self._actors is not None:
return self._actors
self._actors = []
if self.kinopoisk_url:
cast_url = self.kinopoisk_url + 'cast/'
r = self._http_get(cast_url)
if r.status_code == requests.codes.ok:
text = clean_html(r.text)
soup = BeautifulSoup(text, 'html.parser')
if not soup:
return []
for actorInfo in soup.find_all('div', class_='actorInfo'):
photo = actorInfo.select('div.photo a')[0]['href']
#http://st.kp.yandex.net/images/actor_iphone/iphone360_30098.jpg
#/name/7627/
photo = photo.replace('/', '').replace('name', '')
photo = 'http://st.kp.yandex.net/images/actor_iphone/iphone360_' + photo + '.jpg'
ru_name = actorInfo.select('div.info .name a')[0].get_text()
en_name = actorInfo.select('div.info .name span')[0].get_text()
role = actorInfo.select('div.info .role')[0].get_text().replace('... ', '')
role = role.split(',')[0]
self._actors.append({'photo': photo,'ru_name': ru_name,'en_name': en_name,'role': role})
return self._actors
def __trailer(self, element):
for parent in element.parents:
#debug(parent.tag)
if parent.name == 'tr':
for tr in parent.next_siblings:
if not hasattr(tr, 'select'):
continue
if tr.name != 'tr':
continue
for a_cont in tr.select('a.continue'):
if u'Высокое качество' in a_cont.get_text():
trailer = a_cont['href']
trailer = re.search('link=(.+?)$', trailer).group(1)
try:
debug('trailer: ' + trailer)
except:
pass
return trailer
return None
def trailer(self):
if self.kinopoisk_url:
trailer_page = self.kinopoisk_url + 'video/type/1/'
r = self._http_get(trailer_page)
if r.status_code == requests.codes.ok:
text = clean_html(r.text)
soup = BeautifulSoup(text, 'html.parser')
if not soup:
return None
for div in soup.select('tr td div div.flag2'):
trailer = self.__trailer(div)
if trailer:
return trailer
for a in soup.select('a.all'):
return self.__trailer(a)
return None
def poster(self):
raise AttributeError
class imdb_cast(soup_base):
def __init__(self, url):
soup_base(self, url + '/fullcredits')
self._actors = []
@property
def actors(self):
if not self._actors:
tbl = self.soup.find('table', class_='cast_list')
if tbl:
for tr in tbl.find('tr'):
if 'class' in tr:
act = {}
# https://images-na.ssl-images-amazon.com/images/M/MV5BMTkxMzk2MDkwOV5BMl5BanBnXkFtZTcwMDAxODQwMg@@._V1_UX32_CR0,0,32,44_AL_.jpg
# https://images-na.ssl-images-amazon.com/images/M/MV5BMjExNzA4MDYxN15BMl5BanBnXkFtZTcwOTI1MDAxOQ@@._V1_SY1000_CR0,0,721,1000_AL_.jpg
# https://images-na.ssl-images-amazon.com/images/M/MV5BMjExNzA4MDYxN15BMl5BanBnXkFtZTcwOTI1MDAxOQ@@._V1_UY317_CR7,0,214,317_AL_.jpg
#img = tr.find('img')
class ImdbAPI(object):
headers = { 'Accept-Language': 'ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7' }
def __init__(self, imdb_id):
resp = requests.get('http://www.imdb.com/title/' + imdb_id + '/', headers=self.headers)
if resp.status_code == requests.codes.ok:
text = clean_html(resp.content)
self.page = BeautifulSoup(text, 'html.parser')
def year(self):
a = self.page.find('a', href=re.compile(r'releaseinfo\?ref_=tt_ov_rdat'))
if a:
result = a.get_text()
return result[:4]
else:
raise AttributeError
def rating(self):
span = self.page.find('span', class_=re.compile(r'AggregateRatingButton__RatingScore'))
if span:
return span.get_text().replace(',', '.')
else:
raise AttributeError
def runtime(self):
div = self.page.find('div', class_=re.compile(r'TitleBlock__TitleMetaDataContainer'))
for li in div.find_all('li'):
if re.match(r'\d+(h|min)', li.get_text()):
return li.get_text()
raise AttributeError
def mpaa(self):
pattern = r"/title/tt\d+/parentalguide/certificates"
a = self.page.find('a', href=re.compile(pattern))
if a:
return a.get_text()
else:
raise AttributeError
def title(self):
from ..util.string import uni_type
h1 = self.page.find('h1', class_=re.compile(r'TitleHeader__TitleText'))
if h1:
return uni_type( h1.contents[0] ).replace(u'\xa0', u' ').strip()
else:
raise AttributeError
def originaltitle(self):
from ..util.string import uni_type
div = self.page.find('div', class_=re.compile(r'OriginalTitle__OriginalTitleText'))
if div:
return uni_type( div.contents[0] ).replace(u'\xa0', u' ').strip().replace('Original title: ', '')
else:
raise AttributeError
def type(self):
a = self.page.find('a', href=re.compile(r'/title/tt\d+/episodes'))
return 'tvshow' if a else 'movie'
class KinopoiskAPI2(KinopoiskAPI):
movie_cc = {}
token = '037313259a17be837be3bd04a51bf678'
def __init__(self, kinopoisk_url = None, settings = None):
if kinopoisk_url:
self.kp_id = IDs.id_by_kp_url(kinopoisk_url)
return super(KinopoiskAPI2, self).__init__(kinopoisk_url, settings)
else:
self.kp_id = None
@property
def data_cc(self):
if self.kp_id is None:
return {}
if self.kp_id in self.movie_cc:
return self.movie_cc[self.kp_id]
url = 'http://getmovie.cc/api/kinopoisk.json?id=%s&token=%s' % (self.kp_id, self.token)
r = requests.get(url)
if r.status_code == requests.codes.ok:
self.movie_cc[self.kp_id] = r.json()
return self.movie_cc[self.kp_id]
return {}
def title(self):
return self.data_cc.get('name_ru')
def | |
- start) / n_rows)
# The exact end point is therefore the ncolumns*nrows away from the start
end = start + n_columns * n_rows
ep = np.reshape(pp[start:end], (n_rows, n_columns))
if scale == "log":
ep = np.log10(ep)
# Reshape the freq into n_rowss of n_columnss & create arays
ef = np.reshape(ff[start:end], (n_rows, n_columns))
x_f = (ef[0, :] - ef[0, 0]) % deltanu
y_f = ef[:, 0]
return ep, x_f, y_f
def plot_echelle(
self,
deltanu=None,
numax=None,
minimum_frequency=None,
maximum_frequency=None,
smooth_filter_width=0.1,
scale="linear",
ax=None,
cmap="Blues",
):
"""Plots an echelle diagram of the periodogram by stacking the
periodogram in slices of deltanu.
Modes of equal radial degree should appear approximately vertically aligned.
If no structure is present, you are likely dealing with a faulty deltanu
value or a low signal to noise case.
This method is adapted from work by <NAME> & <NAME>.
Parameters
----------
deltanu : float
Value for the large frequency separation of the seismic mode
frequencies in the periodogram. Assumed to have the same units as
the frequencies, unless given an Astropy unit.
Is assumed to be in the same units as frequency if not given a unit.
numax : float
Value for the frequency of maximum oscillation. If a numax is
passed, a suitable range one FWHM of the mode envelope either side
of the will be shown. This is overwritten by custom frequency ranges.
Is assumed to be in the same units as frequency if not given a unit.
minimum_frequency : float
The minimum frequency at which to display the echelle
Is assumed to be in the same units as frequency if not given a unit.
maximum_frequency : float
The maximum frequency at which to display the echelle.
Is assumed to be in the same units as frequency if not given a unit.
smooth_filter_width : float
If given a value, will smooth periodogram used to plot the echelle
diagram using the periodogram.smooth(method='boxkernel') method with
a filter width of `smooth_filter_width`. This helps visualise the
echelle diagram. Is assumed to be in the same units as the
periodogram frequency.
scale: str
Set z axis to be "linear" or "log". Default is linear.
cmap : str
The name of the matplotlib colourmap to use in the echelle diagram.
Returns
-------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
"""
if (minimum_frequency is None) & (maximum_frequency is None):
numax = self._validate_numax(numax)
deltanu = self._validate_deltanu(deltanu)
if (not hasattr(numax, "unit")) & (numax is not None):
numax = numax * self.periodogram.frequency.unit
if (not hasattr(deltanu, "unit")) & (deltanu is not None):
deltanu = deltanu * self.periodogram.frequency.unit
ep, x_f, y_f = self._clean_echelle(
numax=numax,
deltanu=deltanu,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency,
smooth_filter_width=smooth_filter_width,
)
# Plot the echelle diagram
with plt.style.context(MPLSTYLE):
if ax is None:
_, ax = plt.subplots()
extent = (x_f[0].value, x_f[-1].value, y_f[0].value, y_f[-1].value)
figsize = plt.rcParams["figure.figsize"]
a = figsize[1] / figsize[0]
b = (extent[3] - extent[2]) / (extent[1] - extent[0])
vmin = np.nanpercentile(ep.value, 1)
vmax = np.nanpercentile(ep.value, 99)
im = ax.imshow(
ep.value,
cmap=cmap,
aspect=a / b,
origin="lower",
extent=extent,
vmin=vmin,
vmax=vmax,
)
cbar = plt.colorbar(im, ax=ax, extend="both", pad=0.01)
if isinstance(self.periodogram, SNRPeriodogram):
ylabel = "Signal to Noise Ratio (SNR)"
elif self.periodogram.power.unit == cds.ppm:
ylabel = "Amplitude [{}]".format(
self.periodogram.power.unit.to_string("latex")
)
else:
ylabel = "Power Spectral Density [{}]".format(
self.periodogram.power.unit.to_string("latex")
)
if scale == "log":
ylabel = "log10(" + ylabel + ")"
cbar.set_label(ylabel)
ax.set_xlabel(r"Frequency mod. {:.2f}".format(deltanu))
ax.set_ylabel(
r"Frequency [{}]".format(
self.periodogram.frequency.unit.to_string("latex")
)
)
ax.set_title("Echelle diagram for {}".format(self.periodogram.label))
return ax
def _make_echelle_elements(
self,
deltanu,
cmap="viridis",
minimum_frequency=None,
maximum_frequency=None,
smooth_filter_width=0.1,
scale="linear",
plot_width=490,
plot_height=340,
title="Echelle",
):
"""Helper function to make the elements of the echelle diagram for bokeh plotting."""
if not hasattr(deltanu, "unit"):
deltanu = deltanu * self.periodogram.frequency.unit
if smooth_filter_width:
pgsmooth = self.periodogram.smooth(filter_width=smooth_filter_width)
freq = pgsmooth.frequency # Makes code below more readable below
else:
freq = self.periodogram.frequency # Makes code below more readable
ep, x_f, y_f = self._clean_echelle(
deltanu=deltanu,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency,
smooth_filter_width=smooth_filter_width,
scale=scale,
)
fig = figure(
plot_width=plot_width,
plot_height=plot_height,
x_range=(0, 1),
y_range=(y_f[0].value, y_f[-1].value),
title=title,
tools="pan,box_zoom,reset",
toolbar_location="above",
border_fill_color="white",
)
fig.yaxis.axis_label = r"Frequency [{}]".format(freq.unit.to_string())
fig.xaxis.axis_label = r"Frequency / {:.3f} Mod. 1".format(deltanu)
lo, hi = np.nanpercentile(ep.value, [0.1, 99.9])
vlo, vhi = 0.3 * lo, 1.7 * hi
vstep = (lo - hi) / 500
color_mapper = LogColorMapper(palette="RdYlGn10", low=lo, high=hi)
fig.image(
image=[ep.value],
x=0,
y=y_f[0].value,
dw=1,
dh=y_f[-1].value,
color_mapper=color_mapper,
name="img",
)
stretch_slider = RangeSlider(
start=vlo,
end=vhi,
step=vstep,
title="",
value=(lo, hi),
orientation="vertical",
width=10,
height=230,
direction="rtl",
show_value=False,
sizing_mode="fixed",
name="stretch",
)
def stretch_change_callback(attr, old, new):
"""TPF stretch slider callback."""
fig.select("img")[0].glyph.color_mapper.high = new[1]
fig.select("img")[0].glyph.color_mapper.low = new[0]
stretch_slider.on_change("value", stretch_change_callback)
return fig, stretch_slider
def interact_echelle(self, notebook_url="localhost:8888", **kwargs):
"""Display an interactive Jupyter notebook widget showing an Echelle diagram.
This feature only works inside an active Jupyter Notebook, and
requires an optional dependency, ``bokeh`` (v1.0 or later).
This dependency can be installed using e.g. `conda install bokeh`.
Parameters
----------
notebook_url : str
Location of the Jupyter notebook page (default: "localhost:8888")
When showing Bokeh applications, the Bokeh server must be
explicitly configured to allow connections originating from
different URLs. This parameter defaults to the standard notebook
host and port. If you are running on a different location, you
will need to supply this value for the application to display
properly. If no protocol is supplied in the URL, e.g. if it is
of the form "localhost:8888", then "http" will be used.
"""
try:
import bokeh
if bokeh.__version__[0] == "0":
warnings.warn(
"interact() requires Bokeh version 1.0 or later", LightkurveWarning
)
except ImportError:
log.error(
"The interact() tool requires the `bokeh` Python package; "
"you can install bokeh using e.g. `conda install bokeh`."
)
return None
maximum_frequency = kwargs.pop(
"maximum_frequency", self.periodogram.frequency.max().value
)
minimum_frequency = kwargs.pop(
"minimum_frequency", self.periodogram.frequency.min().value
)
if not hasattr(self, "deltanu"):
dnu = SeismologyQuantity(
quantity=self.periodogram.frequency.max() / 30,
name="deltanu",
method="echelle",
)
else:
dnu = self.deltanu
def create_interact_ui(doc):
fig_tpf, stretch_slider = self._make_echelle_elements(
dnu,
maximum_frequency=maximum_frequency,
minimum_frequency=minimum_frequency,
**kwargs
)
maxdnu = self.periodogram.frequency.max().value / 5
# Interactive slider widgets
dnu_slider = Slider(
start=0.01,
end=maxdnu,
value=dnu.value,
step=0.01,
title="Delta Nu",
width=290,
)
r_button = Button(label=">", button_type="default", width=30)
l_button = Button(label="<", button_type="default", width=30)
rr_button = Button(label=">>", button_type="default", width=30)
ll_button = Button(label="<<", button_type="default", width=30)
def update(attr, old, new):
"""Callback to take action when dnu slider changes"""
dnu = SeismologyQuantity(
quantity=dnu_slider.value * u.microhertz,
name="deltanu",
method="echelle",
)
ep, _, _ = self._clean_echelle(
deltanu=dnu,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency,
**kwargs
)
fig_tpf.select("img")[0].data_source.data["image"] = [ep.value]
fig_tpf.xaxis.axis_label = r"Frequency / {:.3f} Mod. 1".format(dnu)
def go_right_by_one_small():
"""Step forward in time by a single cadence"""
existing_value = dnu_slider.value
if existing_value < maxdnu:
dnu_slider.value = existing_value + 0.002
def go_left_by_one_small():
"""Step back in time by a single cadence"""
existing_value = dnu_slider.value
if existing_value > 0:
dnu_slider.value = existing_value - 0.002
def go_right_by_one():
"""Step forward in time by a single cadence"""
existing_value = dnu_slider.value
if existing_value < maxdnu:
dnu_slider.value = existing_value + 0.01
def go_left_by_one():
"""Step back in time by a single cadence"""
existing_value = dnu_slider.value
if existing_value > 0:
dnu_slider.value = existing_value - 0.01
dnu_slider.on_change("value", update)
r_button.on_click(go_right_by_one_small)
l_button.on_click(go_left_by_one_small)
rr_button.on_click(go_right_by_one)
ll_button.on_click(go_left_by_one)
widgets_and_figures = layout(
[fig_tpf, [Spacer(height=20), stretch_slider]],
[
ll_button,
Spacer(width=30),
l_button,
Spacer(width=25),
dnu_slider,
Spacer(width=30),
r_button,
Spacer(width=23),
rr_button,
],
)
doc.add_root(widgets_and_figures)
output_notebook(verbose=False, hide_banner=True)
return show(create_interact_ui, notebook_url=notebook_url)
def estimate_numax(self, method="acf2d", **kwargs):
"""Returns the frequency of the peak of the seismic oscillation modes envelope.
At present, the only method supported is based on using a
2D autocorrelation function (ACF2D). This method is implemented by the
`~lightkurve.seismology.estimate_numax_acf2d` function which accepts
the parameters `numaxs`, `window_width`, and `spacing`.
For details and literature references, please read the detailed
docstring of this function by typing ``lightkurve.seismology.estimate_numax_acf2d?``
in a Python terminal or notebook.
Parameters
----------
method : str
Method to use. Only ``"acf2d"`` is supported at this time.
Returns
-------
numax : `~lightkurve.seismology.SeismologyQuantity`
Numax of the periodogram, including details on the units and method.
"""
method = validate_method(method, supported_methods=["acf2d"])
if method == "acf2d":
from .numax_estimators import estimate_numax_acf2d
result = estimate_numax_acf2d(self.periodogram, **kwargs)
self.numax = result
return result
def diagnose_numax(self, numax=None):
"""Create diagnostic plots showing how numax was estimated."""
numax = self._validate_numax(numax)
return numax.diagnostics_plot_method(numax, self.periodogram)
def estimate_deltanu(self, method="acf2d", numax=None):
"""Returns the average value of the large frequency spacing, DeltaNu,
of the seismic oscillations of | |
'ipfs key list -l'. Default:. (string) Default: self
:returns: A parsed dict result of:
.. code-block:: python
{
"Name": "<string>"
"Value": "<string>"
}
"""
endpoint = 'name/publish'
args = [(ipfs_path, 'string')]
return await self.client.get_parsed(endpoint, args, kwargs)
async def name_pubsub_cancel(self, name, **kwargs):
"""
Cancel a name subscription
:param name: Name to cancel the subscription for. (string)
:returns: A parsed dict result of:
.. code-block:: python
{
"Canceled": "<bool>"
}
"""
endpoint = 'name/pubsub/cancel'
args = [(name, 'string')]
return await self.client.get_parsed(endpoint, args, kwargs)
async def name_pubsub_state(self, **kwargs):
"""
Query the state of IPNS pubsub
:returns: A parsed dict result of:
.. code-block:: python
{
"Enabled": "<bool>"
}
"""
endpoint = 'name/pubsub/state'
args = []
return await self.client.get_parsed(endpoint, args, kwargs)
async def name_pubsub_subs(self, **kwargs):
"""
Show current name subscriptions
:returns: A parsed dict result of:
.. code-block:: python
{
"Strings": [
"<string>"
]
}
"""
endpoint = 'name/pubsub/subs'
args = []
return await self.client.get_parsed(endpoint, args, kwargs)
async def name_resolve(self, **kwargs):
"""
Resolve IPNS names.
:param name: The IPNS name to resolve. Defaults to your node's peerID. (string) Default: ""
:param recursive: Resolve until the result is not an IPNS name. (bool) Default: False
:param nocache: Do not use cached entries. (bool) Default: False
:param dht_record_count: Number of records to request for DHT resolution. (uint) Default: 0
:param dht_timeout: Max time to collect values during DHT resolution eg "30s". Pass 0 for no timeout. (string) Default: ""
:returns: A parsed dict result of:
.. code-block:: python
{
"Path": "<string>"
}
"""
endpoint = 'name/resolve'
args = []
return await self.client.get_parsed(endpoint, args, kwargs)
def object_data(self, key, **kwargs):
"""
Output the raw bytes of an IPFS object.
:param key: Key of the object to retrieve, in base58-encoded multihash format. (string)
:returns: A aiohttp.ClientResponse object that can be read like a file.
"""
endpoint = 'object/data'
args = [(key, 'string')]
return self.client.get(endpoint, args, kwargs)
async def object_diff(self, obj_a, obj_b, **kwargs):
"""
Display the diff between two ipfs objects.
:param obj_a: Object to diff against. (string)
:param obj_b: Object to diff. (string)
:param verbose: Print extra information. (bool) Default: False
:returns: A parsed dict result of:
.. code-block:: python
{
"Changes": [
{
"Type": "<int>"
"Path": "<string>"
"Before": "<string>"
"After": "<string>"
}
]
}
"""
endpoint = 'object/diff'
args = [(obj_a, 'string'), (obj_b, 'string')]
return await self.client.get_parsed(endpoint, args, kwargs)
async def object_get(self, key, **kwargs):
"""
Get and serialize the DAG node named by <key>.
:param key: Key of the object to retrieve, in base58-encoded multihash format. (string)
:returns: A parsed dict result of:
.. code-block:: python
{
"Links": [
{
"Name": "<string>"
"Hash": "<string>"
"Size": "<uint64>"
}
]
"Data": "<string>"
}
"""
endpoint = 'object/get'
args = [(key, 'string')]
return await self.client.get_parsed(endpoint, args, kwargs)
async def object_links(self, key, **kwargs):
"""
Output the links pointed to by the specified object.
:param key: Key of the object to retrieve, in base58-encoded multihash format. (string)
:param headers: Print table headers (Hash, Size, Name). (bool) Default: False
:returns: A parsed dict result of:
.. code-block:: python
{
"Hash": "<string>"
"Links": [
{
"Name": "<string>"
"Hash": "<string>"
"Size": "<uint64>"
}
]
}
"""
endpoint = 'object/links'
args = [(key, 'string')]
return await self.client.get_parsed(endpoint, args, kwargs)
async def object_new(self, **kwargs):
"""
Create a new object from an ipfs template.
:param template: Template to use. Optional. (string) Default: ""
:returns: A parsed dict result of:
.. code-block:: python
{
"Hash": "<string>"
"Links": [
{
"Name": "<string>"
"Hash": "<string>"
"Size": "<uint64>"
}
]
}
"""
endpoint = 'object/new'
args = []
return await self.client.get_parsed(endpoint, args, kwargs)
async def object_patch_add_link(self, root, name, ref, **kwargs):
"""
Add a link to a given object.
:param root: The hash of the node to modify. (string)
:param name: Name of link to create. (string)
:param ref: IPFS object to add link to. (string)
:param create: Create intermediary nodes. (bool) Default: False
:returns: A parsed dict result of:
.. code-block:: python
{
"Hash": "<string>"
"Links": [
{
"Name": "<string>"
"Hash": "<string>"
"Size": "<uint64>"
}
]
}
"""
endpoint = 'object/patch/add-link'
args = [(root, 'string'), (name, 'string'), (ref, 'string')]
return await self.client.get_parsed(endpoint, args, kwargs)
async def object_patch_append_data(self, root, data, **kwargs):
"""
Append data to the data segment of a dag node.
:param root: The hash of the node to modify. (string)
:param data: Data to append. (string)
:returns: A parsed dict result of:
.. code-block:: python
{
"Hash": "<string>"
"Links": [
{
"Name": "<string>"
"Hash": "<string>"
"Size": "<uint64>"
}
]
}
"""
endpoint = 'object/patch/append-data'
args = [(root, 'string'), (data, 'file')]
return await self.client.get_parsed(endpoint, args, kwargs)
async def object_patch_rm_link(self, root, link, **kwargs):
"""
Remove a link from an object.
:param root: The hash of the node to modify. (string)
:param link: Name of the link to remove. (string)
:returns: A parsed dict result of:
.. code-block:: python
{
"Hash": "<string>"
"Links": [
{
"Name": "<string>"
"Hash": "<string>"
"Size": "<uint64>"
}
]
}
"""
endpoint = 'object/patch/rm-link'
args = [(root, 'string'), (link, 'string')]
return await self.client.get_parsed(endpoint, args, kwargs)
async def object_patch_set_data(self, root, data, **kwargs):
"""
Set the data field of an IPFS object.
:param root: The hash of the node to modify. (string)
:param data: The data to set the object to. (string)
:returns: A parsed dict result of:
.. code-block:: python
{
"Hash": "<string>"
"Links": [
{
"Name": "<string>"
"Hash": "<string>"
"Size": "<uint64>"
}
]
}
"""
endpoint = 'object/patch/set-data'
args = [(root, 'string'), (data, 'file')]
return await self.client.get_parsed(endpoint, args, kwargs)
async def object_put(self, data, **kwargs):
"""
Store input as a DAG object, print its key.
:param data: Data to be stored as a DAG object. (string)
:param inputenc: Encoding type of input data. One of: {"protobuf", "json"}. (string) Default: json
:param datafieldenc: Encoding type of the data field, either "text" or "base64". (string) Default: text
:param pin: Pin this object when adding. (bool) Default: False
:param quiet: Write minimal output. (bool) Default: False
:returns: A parsed dict result of:
.. code-block:: python
{
"Hash": "<string>"
"Links": [
{
"Name": "<string>"
"Hash": "<string>"
"Size": "<uint64>"
}
]
}
"""
endpoint = 'object/put'
args = [(data, 'file')]
return await self.client.get_parsed(endpoint, args, kwargs)
async def object_stat(self, key, **kwargs):
"""
Get stats for the DAG node named by <key>.
:param key: Key of the object to retrieve, in base58-encoded multihash format. (string)
:returns: A parsed dict result of:
.. code-block:: python
{
"Hash": "<string>"
"NumLinks": "<int>"
"BlockSize": "<int>"
"LinksSize": "<int>"
"DataSize": "<int>"
"CumulativeSize": "<int>"
}
"""
endpoint = 'object/stat'
args = [(key, 'string')]
return await self.client.get_parsed(endpoint, args, kwargs)
def p2p_listener_close(self, **kwargs):
"""
Close active p2p listener.
:param protocol: P2P listener protocol (string) Default: ""
:param all: Close all listeners. (bool) Default: False
:returns: A aiohttp.ClientResponse object that can be read like a file.
"""
endpoint = 'p2p/listener/close'
args = []
return self.client.get(endpoint, args, kwargs)
async def p2p_listener_ls(self, **kwargs):
"""
List active p2p listeners.
:param headers: Print table headers (HandlerID, Protocol, Local, Remote). (bool) Default: False
:returns: A parsed dict result of:
.. code-block:: python
{
"Listeners": [
{
"Protocol": "<string>"
"Address": "<string>"
}
]
}
"""
endpoint = 'p2p/listener/ls'
args = []
return await self.client.get_parsed(endpoint, args, kwargs)
def p2p_listener_open(self, protocol, address, **kwargs):
"""
Forward p2p connections to a network multiaddr.
:param protocol: Protocol identifier. (string)
:param address: Request handling application address. (string)
:returns: A aiohttp.ClientResponse object that can be read like a file.
"""
endpoint = 'p2p/listener/open'
args = [(protocol, 'string'), (address, 'string')]
return self.client.get(endpoint, args, kwargs)
def p2p_stream_close(self, **kwargs):
"""
Close active p2p stream.
:param handlerid: Stream HandlerID (string) Default: ""
:param all: Close all streams. (bool) Default: False
:returns: A aiohttp.ClientResponse object that can be read like a file.
"""
endpoint = 'p2p/stream/close'
args = []
return self.client.get(endpoint, args, kwargs)
def p2p_stream_dial(self, peer, protocol, **kwargs):
"""
Dial to a p2p listener.
:param peer: Remote peer to connect to (string)
:param protocol: Protocol identifier. (string)
| |
import abc
from collections import OrderedDict
import time
import gtimer as gt
import numpy as np
from rlkit.core import logger, eval_util
from rlkit.data_management.env_replay_buffer import MultiTaskReplayBuffer,EnvReplayBuffer
from rlkit.data_management.path_builder import PathBuilder
from rlkit.samplers.in_place import SMMInPlacePathSampler, InPlacePathSampler,SeedInPlacePathSampler, ExpInPlacePathSampler,ExpInPlacePathSamplerSimple
from rlkit.torch import pytorch_util as ptu
from rlkit.smm.smm_policy import hard_smm_point
from rlkit.smm.smm_sampler import SMMSampler
from rlkit.policies.base import ExplorationPolicy
import pickle
import torch
class MetaRLAlgorithm(metaclass=abc.ABCMeta):
def __init__(
self,
env,
agent,
train_tasks,
eval_tasks,
meta_batch=64,
num_iterations=100,
num_train_steps_per_itr=1000,
num_initial_steps=100,
num_tasks_sample=100,
num_steps_prior=100,
num_steps_posterior=100,
num_extra_rl_steps_posterior=100,
num_evals=10,
num_steps_per_eval=1000,
batch_size=1024,
embedding_batch_size=1024,
embedding_mini_batch_size=1024,
max_path_length=1000,
discount=0.99,
replay_buffer_size=1000000,
reward_scale=1,
num_exp_traj_eval=1,
update_post_train=1,
eval_deterministic=False,
render=False,
save_replay_buffer=False,
save_algorithm=False,
save_environment=False,
render_eval_paths=False,
dump_eval_paths=False,
plotter=None,
use_SMM=False,
load_SMM =False,
use_history=False,
SMM_path=None,
num_skills = 1,
seed_sample=False,
attention=False,
snail=False,
sample_interval=5
):
"""
:param env: training env
:param agent: agent that is conditioned on a latent variable z that rl_algorithm is responsible for feeding in
:param train_tasks: list of tasks used for training
:param eval_tasks: list of tasks used for eval
see default experiment config file for descriptions of the rest of the arguments
"""
self.env = env
self.agent = agent
self.exploration_agent = agent # Can potentially use a different policy purely for exploration rather than also solving tasks, currently not being used
self.train_tasks = train_tasks
self.eval_tasks = eval_tasks
self.meta_batch = meta_batch
self.num_iterations = num_iterations
self.num_train_steps_per_itr = num_train_steps_per_itr
self.num_initial_steps = num_initial_steps
self.num_tasks_sample = num_tasks_sample
self.num_steps_prior = num_steps_prior
self.num_steps_posterior = num_steps_posterior
self.num_extra_rl_steps_posterior = num_extra_rl_steps_posterior
self.num_evals = num_evals
self.num_steps_per_eval = num_steps_per_eval
self.batch_size = batch_size
self.embedding_batch_size = embedding_batch_size
self.embedding_mini_batch_size = embedding_mini_batch_size
self.max_path_length = max_path_length
self.discount = discount
self.replay_buffer_size = replay_buffer_size
self.reward_scale = reward_scale
self.update_post_train = update_post_train
self.num_exp_traj_eval = num_exp_traj_eval
self.eval_deterministic = eval_deterministic
self.render = render
self.save_replay_buffer = save_replay_buffer
self.save_algorithm = save_algorithm
self.save_environment = save_environment
self.eval_statistics = None
self.render_eval_paths = render_eval_paths
self.dump_eval_paths = dump_eval_paths
self.plotter = plotter
self.use_SMM = use_SMM
self.load_SMM = load_SMM
self.use_history = use_history,
self.SMM_path = SMM_path
self.num_skills = num_skills
self.seed_sample = seed_sample
self.sampler = InPlacePathSampler(
env=env,
policy=agent,
max_path_length=self.max_path_length,
)
if self.seed_sample:
self.seedsampler = SeedInPlacePathSampler(
env=env,
policy=agent,
max_path_length=self.max_path_length,
sample_interval=sample_interval
)
if self.use_SMM:
self.smm_sampler = SMMSampler(
env=env,
max_path_length=max_path_length,
agent = agent,
load_SMM=self.load_SMM,
use_history=self.use_history,
SMM_path=self.SMM_path,
num_skills = self.num_skills
)
# separate replay buffers for
# - training RL update
# - training encoder update
self.replay_buffer = MultiTaskReplayBuffer(
self.replay_buffer_size,
env,
self.train_tasks,
)
self.enc_replay_buffer = MultiTaskReplayBuffer(
self.replay_buffer_size,
env,
self.train_tasks,
)
self._n_env_steps_total = 0
self._n_train_steps_total = 0
self._n_rollouts_total = 0
self._do_train_time = 0
self._epoch_start_time = None
self._algo_start_time = None
self._old_table_keys = None
self._current_path_builder = PathBuilder()
self._exploration_paths = []
def make_exploration_policy(self, policy):
return policy
def make_eval_policy(self, policy):
return policy
def sample_task(self, is_eval=False):
'''
sample task randomly
'''
if is_eval:
idx = np.random.randint(len(self.eval_tasks))
else:
idx = np.random.randint(len(self.train_tasks))
return idx
def train(self):
'''
meta-training loop
'''
self.pretrain()
params = self.get_epoch_snapshot(-1)
logger.save_itr_params(-1, params)
gt.reset()
gt.set_def_unique(False)
self._current_path_builder = PathBuilder()
# at each iteration, we first collect data from tasks, perform meta-updates, then try to evaluate
for it_ in gt.timed_for(
range(self.num_iterations),
save_itrs=True,
):
self._start_epoch(it_)
self.training_mode(True)
if it_ == 0:
print('collecting initial pool of data for train and eval')
# temp for evaluating
for idx in self.train_tasks:
self.task_idx = idx
self.env.reset_task(idx)
if not self.use_SMM:
if not self.seed_sample:
self.collect_data(self.num_initial_steps, 1, np.inf)
else:
self.collect_data(self.num_initial_steps, 1, np.inf)
self.collect_data_seed(self.num_initial_steps, 1, np.inf,accumulate_context=False)
else:
self.collect_data_smm(self.num_initial_steps)
self.collect_data_policy(self.num_initial_steps, 1, np.inf)
# Sample data from train tasks.
for i in range(self.num_tasks_sample):
idx = np.random.randint(len(self.train_tasks))
self.task_idx = idx
self.env.reset_task(idx)
self.enc_replay_buffer.task_buffers[idx].clear()
if not self.use_SMM:
if not self.seed_sample:
# collect some trajectories with z ~ prior
if self.num_steps_prior > 0:
self.collect_data(self.num_steps_prior, 1, np.inf)
# collect some trajectories with z ~ posterior
if self.num_steps_posterior > 0:
self.collect_data(self.num_steps_posterior, 1, self.update_post_train)
# even if encoder is trained only on samples from the prior, the policy needs to learn to handle z ~ posterior
if self.num_extra_rl_steps_posterior > 0:
self.collect_data(self.num_extra_rl_steps_posterior, 1, self.update_post_train,
add_to_enc_buffer=False)
else:
if self.num_steps_prior > 0:
self.collect_data(self.num_steps_prior, 1, np.inf)
self.collect_data_seed(self.num_steps_prior, 1, np.inf,accumulate_context=False)
if self.num_steps_posterior > 0:
self.collect_data(self.num_steps_posterior, 1, self.update_post_train)
self.collect_data_seed(self.num_steps_posterior, 1, self.update_post_train)
if self.num_extra_rl_steps_posterior > 0:
self.collect_data(self.num_extra_rl_steps_posterior, 1, self.update_post_train,
add_to_enc_buffer=False)
self.collect_data_seed(self.num_extra_rl_steps_posterior, 1, self.update_post_train,
add_to_enc_buffer=False)
else:
if self.num_steps_prior > 0:
self.collect_data_smm(self.num_steps_prior)
self.collect_data_policy(self.num_steps_prior, 1, np.inf)
# collect some trajectories with z ~ posterior
if self.num_steps_posterior > 0:
self.collect_data_smm(self.num_steps_posterior)
self.collect_data_policy(self.num_steps_posterior, 1, self.update_post_train)
# even if encoder is trained only on samples from the prior, the policy needs to learn to handle z ~ posterior
if self.num_extra_rl_steps_posterior > 0:
self.collect_data_policy(self.num_extra_rl_steps_posterior, 1, self.update_post_train)
# Sample train tasks and compute gradient updates on parameters.
for train_step in range(self.num_train_steps_per_itr):
indices = np.random.choice(self.train_tasks, self.meta_batch)
self._do_training(indices)
self._n_train_steps_total += 1
gt.stamp('train')
self.training_mode(False)
# eval
self._try_to_eval(it_)
gt.stamp('eval')
self._end_epoch()
def pretrain(self):
"""
Do anything before the main training phase.
"""
pass
def collect_data_smm(self,num_samples):
'''
Notice that SMM data should only be available for the encoder
:param num_samples: number of transitions to sample
:return:
'''
num_transitions = 0
while num_transitions < num_samples:
paths, n_samples = self.smm_sampler.obtain_samples(max_samples=num_samples - num_transitions,
max_trajs=np.inf)
num_transitions += n_samples
self.enc_replay_buffer.add_paths(self.task_idx, paths)
self._n_env_steps_total += num_transitions
gt.stamp('smm sample')
def collect_data_policy(self, num_samples, resample_z_rate, update_posterior_rate):
'''
get trajectories from current env in batch mode with given policy
collect complete trajectories until the number of collected transitions >= num_samples
:param agent: policy to rollout
:param num_samples: total number of transitions to sample
:param resample_z_rate: how often to resample latent context z (in units of trajectories)
:param update_posterior_rate: how often to update q(z | c) from which z is sampled (in units of trajectories)
:param add_to_enc_buffer: whether to add collected data to encoder replay buffer
'''
# start from the prior
self.agent.clear_z()
num_transitions = 0
while num_transitions < num_samples:
paths, n_samples = self.sampler.obtain_samples(max_samples=num_samples - num_transitions,
max_trajs=update_posterior_rate,
accum_context=False,
resample=resample_z_rate)
num_transitions += n_samples
self.replay_buffer.add_paths(self.task_idx, paths)
if update_posterior_rate != np.inf:
context = self.sample_context(self.task_idx)
self.agent.infer_posterior(context)
self._n_env_steps_total += num_transitions
gt.stamp('policy sample')
def collect_data(self, num_samples, resample_z_rate, update_posterior_rate, add_to_enc_buffer=True,add_to_policy_buffer=True):
'''
get trajectories from current env in batch mode with given policy
collect complete trajectories until the number of collected transitions >= num_samples
:param agent: policy to rollout
:param num_samples: total number of transitions to sample
:param resample_z_rate: how often to resample latent context z (in units of trajectories)
:param update_posterior_rate: how often to update q(z | c) from which z is sampled (in units of trajectories)
:param add_to_enc_buffer: whether to add collected data to encoder replay buffer
'''
# start from the prior
self.agent.clear_z()
num_transitions = 0
while num_transitions < num_samples:
paths, n_samples = self.sampler.obtain_samples(max_samples=num_samples - num_transitions,
max_trajs=update_posterior_rate,
accum_context=False,
resample=resample_z_rate)
num_transitions += n_samples
#for p in paths:
# print(p['actions'],p['rewards'])
if add_to_policy_buffer:
self.replay_buffer.add_paths(self.task_idx, paths)
if add_to_enc_buffer:
self.enc_replay_buffer.add_paths(self.task_idx, paths)
if update_posterior_rate != np.inf:
context = self.sample_context(self.task_idx)
self.agent.infer_posterior(context)
self._n_env_steps_total += num_transitions
gt.stamp('sample')
def collect_data_seed(self, num_samples, resample_z_rate, update_posterior_rate, add_to_enc_buffer=True,add_to_policy_buffer=True,accumulate_context=True):
self.agent.clear_z()
num_transitions = 0
while num_transitions < num_samples:
paths, n_samples = self.seedsampler.obtain_samples(max_samples=num_samples - num_transitions,
max_trajs=1,
accum_context=accumulate_context
)
num_transitions += n_samples
if add_to_policy_buffer:
self.replay_buffer.add_paths(self.task_idx, paths)
if add_to_enc_buffer:
self.enc_replay_buffer.add_paths(self.task_idx, paths)
#if update_posterior_rate != np.inf:
# context = self.prepare_context(self.task_idx)
# self.agent.infer_posterior(context)
self._n_env_steps_total += num_transitions
gt.stamp('sample')
def _try_to_eval(self, epoch):
logger.save_extra_data(self.get_extra_data_to_save(epoch))
if self._can_evaluate():
self.evaluate(epoch)
params = self.get_epoch_snapshot(epoch)
logger.save_itr_params(epoch, params)
table_keys = logger.get_table_key_set()
if self._old_table_keys is not None:
assert table_keys == self._old_table_keys, (
"Table keys cannot change from iteration to iteration."
)
self._old_table_keys = table_keys
logger.record_tabular(
"Number of train steps total",
self._n_train_steps_total,
)
logger.record_tabular(
"Number of env steps total",
self._n_env_steps_total,
)
logger.record_tabular(
"Number of rollouts total",
self._n_rollouts_total,
)
times_itrs = gt.get_times().stamps.itrs
train_time = times_itrs['train'][-1]
if not self.use_SMM:
sample_time = times_itrs['sample'][-1]
else:
sample_time = times_itrs['policy sample'][-1]
eval_time = times_itrs['eval'][-1] if epoch > 0 else 0
epoch_time = train_time + sample_time + eval_time
total_time = gt.get_times().total
logger.record_tabular('Train Time (s)', train_time)
logger.record_tabular('(Previous) Eval Time (s)', eval_time)
logger.record_tabular('Sample Time (s)', sample_time)
logger.record_tabular('Epoch Time (s)', epoch_time)
logger.record_tabular('Total Train Time (s)', total_time)
logger.record_tabular("Epoch", epoch)
logger.dump_tabular(with_prefix=False, with_timestamp=False)
else:
logger.log("Skipping eval for now.")
def _can_evaluate(self):
"""
One annoying thing about the logger table is that the keys at each
iteration need to be the exact same. So unless you can compute
everything, skip evaluation.
A common example for why you might want to skip evaluation is that at
the beginning of training, you may not have enough data for a
validation and training set.
:return:
"""
# eval collects its own context, so can eval any time
return True
def _can_train(self):
return all([self.replay_buffer.num_steps_can_sample(idx) >= self.batch_size for idx in self.train_tasks])
def _get_action_and_info(self, agent, observation):
"""
Get an action to take in the environment.
| |
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This code is based on: https://github.com/nutonomy/second.pytorch.git
#
# MIT License
# Copyright (c) 2018
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
from enum import Enum
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from utils import box_torch_ops
from models.pointpillars import PillarFeatureNet, PointPillarsScatter, RPN
from utils.torchprofiler import Profiler
import os
if os.environ["W_QUANT"]=='1':
from pytorch_nndct.nn import QuantStub, DeQuantStub
class LossNormType(Enum):
NormByNumPositives = "norm_by_num_positives"
NormByNumExamples = "norm_by_num_examples"
NormByNumPosNeg = "norm_by_num_pos_neg"
def get_paddings_indicator(actual_num, max_num, axis=0):
"""Create boolean mask by actually number of a padded tensor.
Args:
actual_num ([type]): [description]
max_num ([type]): [description]
Returns:
[type]: [description]
"""
actual_num = torch.unsqueeze(actual_num, axis + 1)
# tiled_actual_num: [N, M, 1]
max_num_shape = [1] * len(actual_num.shape)
max_num_shape[axis + 1] = -1
max_num = torch.arange(
max_num, dtype=torch.int, device=actual_num.device).view(max_num_shape)
# tiled_actual_num: [[3,3,3,3,3], [4,4,4,4,4], [2,2,2,2,2]]
# tiled_max_num: [[0,1,2,3,4], [0,1,2,3,4], [0,1,2,3,4]]
paddings_indicator = actual_num.int() > max_num
# paddings_indicator shape: [batch_size, max_num]
return paddings_indicator
class PreProcess(object):
def __init__(self,
with_distance=False,
voxel_size=(0.2, 0.2, 4),
pc_range=(0, -40, -3, 70.4, 40, 1)):
# Need pillar (voxel) size and x/y offset in order to calculate pillar offset
self.vx = voxel_size[0]
self.vy = voxel_size[1]
self.x_offset = self.vx / 2 + pc_range[0]
self.y_offset = self.vy / 2 + pc_range[1]
self._with_distance = with_distance
# scale x,y,z according to the range
self.vz = voxel_size[2]
self.pc_range = pc_range
def __call__(self, features, num_voxels, coors):
device = features.device
'''
# Find distance of x, y, and z from cluster center
points_mean = features[:, :, :3].sum(dim=1, keepdim=True) / num_voxels.type_as(features).view(-1, 1, 1)
f_cluster = features[:, :, :3] - points_mean
# Find distance of x, y, and z from pillar center
f_center = torch.zeros_like(features[:, :, :2])
f_center[:, :, 0] = features[:, :, 0] - (coors[:, 3].float().unsqueeze(1) * self.vx + self.x_offset)
f_center[:, :, 1] = features[:, :, 1] - (coors[:, 2].float().unsqueeze(1) * self.vy + self.y_offset)
# scale feature according to the range
half_voxel_len = torch.Tensor([self.vx, self.vy, self.vz]).to(device) / 2.
voxel_len = torch.Tensor([self.vx, self.vy, self.vz]).to(device)
f_cluster = (f_cluster + half_voxel_len) / voxel_len
f_center[:, :, 0] = (f_center[:, :, 0] + half_voxel_len[0]) / self.vx
f_center[:, :, 1] = (f_center[:, :, 1] + half_voxel_len[1]) / self.vy
'''
pc_start = torch.from_numpy(self.pc_range[:3]).to(device)
pc_len = torch.from_numpy(self.pc_range[3:6] - self.pc_range[:3]).to(device)
features[:, :, :3] = features[:, :, :3] - pc_start
features[:, :, :3] = features[:, :, :3] / pc_len
# Combine together feature decorations
# features_ls = [features, f_cluster, f_center]
features_ls = [features]
if self._with_distance:
points_dist = torch.norm(features[:, :, :3], 2, 2, keepdim=True)
features_ls.append(points_dist)
features = torch.cat(features_ls, dim=-1)
# The feature decorations were calculated without regard to whether pillar was empty. Need to ensure that
# empty pillars remain set to zeros.
voxel_count = features.shape[1]
mask = get_paddings_indicator(num_voxels, voxel_count, axis=0)
mask = torch.unsqueeze(mask, -1).type_as(features)
features *= mask
features = features.permute(2, 0, 1).contiguous().unsqueeze(0)
return features
class PostProcess(object):
def __init__(self,
num_class=2,
use_direction_classifier=True,
use_sigmoid_score=False,
encode_background_as_zeros=True,
use_rotate_nms=True,
multiclass_nms=False,
nms_score_threshold=0.5,
nms_pre_max_size=1000,
nms_post_max_size=20,
nms_iou_threshold=0.1,
target_assigner=None):
super().__init__()
self._box_coder = target_assigner.box_coder
self._num_class = num_class
self._encode_background_as_zeros = encode_background_as_zeros
self._use_direction_classifier = use_direction_classifier
self._use_sigmoid_score = use_sigmoid_score
self._use_rotate_nms = use_rotate_nms
self._multiclass_nms = multiclass_nms
self._nms_pre_max_size = nms_pre_max_size
self._nms_post_max_size = nms_post_max_size
self._nms_iou_threshold = nms_iou_threshold
self._nms_score_threshold = nms_score_threshold
def __call__(self, example, preds_dict):
t = time.time()
batch_size = example['anchors'].shape[0]
batch_anchors = example["anchors"].view(batch_size, -1, 7)
# self._total_inference_count += batch_size
batch_rect = example["rect"]
batch_Trv2c = example["Trv2c"]
batch_P2 = example["P2"]
if "anchors_mask" not in example:
batch_anchors_mask = [None] * batch_size
else:
batch_anchors_mask = example["anchors_mask"].view(batch_size, -1)
batch_imgidx = example['image_idx']
batch_box_preds = preds_dict["box_preds"]
batch_cls_preds = preds_dict["cls_preds"]
batch_box_preds = batch_box_preds.view(batch_size, -1,
self._box_coder.code_size)
num_class_with_bg = self._num_class
if not self._encode_background_as_zeros:
num_class_with_bg = self._num_class + 1
batch_cls_preds = batch_cls_preds.view(batch_size, -1, num_class_with_bg)
batch_box_preds = self._box_coder.decode_torch(batch_box_preds, batch_anchors)
if self._use_direction_classifier:
batch_dir_preds = preds_dict["dir_cls_preds"]
batch_dir_preds = batch_dir_preds.view(batch_size, -1, 2)
else:
batch_dir_preds = [None] * batch_size
predictions_dicts = []
for box_preds, cls_preds, dir_preds, rect, Trv2c, P2, img_idx, a_mask in zip(
batch_box_preds, batch_cls_preds, batch_dir_preds, batch_rect,
batch_Trv2c, batch_P2, batch_imgidx, batch_anchors_mask
):
if a_mask is not None:
box_preds = box_preds[a_mask]
cls_preds = cls_preds[a_mask]
if self._use_direction_classifier:
if a_mask is not None:
dir_preds = dir_preds[a_mask]
# print(dir_preds.shape)
dir_labels = torch.max(dir_preds, dim=-1)[1]
if self._encode_background_as_zeros:
# this don't support softmax
assert self._use_sigmoid_score is True
total_scores = torch.sigmoid(cls_preds)
else:
# encode background as first element in one-hot vector
if self._use_sigmoid_score:
total_scores = torch.sigmoid(cls_preds)[..., 1:]
else:
total_scores = F.softmax(cls_preds, dim=-1)[..., 1:]
# Apply NMS in birdeye view
if self._use_rotate_nms:
nms_func = box_torch_ops.rotate_nms
else:
nms_func = box_torch_ops.nms
selected_boxes = None
selected_labels = None
selected_scores = None
selected_dir_labels = None
if self._multiclass_nms:
# curently only support class-agnostic boxes.
boxes_for_nms = box_preds[:, [0, 1, 3, 4, 6]]
if not self._use_rotate_nms:
box_preds_corners = box_torch_ops.center_to_corner_box2d(
boxes_for_nms[:, :2], boxes_for_nms[:, 2:4],
boxes_for_nms[:, 4])
boxes_for_nms = box_torch_ops.corner_to_standup_nd(
box_preds_corners)
boxes_for_mcnms = boxes_for_nms.unsqueeze(1)
selected_per_class = box_torch_ops.multiclass_nms(
nms_func=nms_func,
boxes=boxes_for_mcnms,
scores=total_scores,
num_class=self._num_class,
pre_max_size=self._nms_pre_max_size,
post_max_size=self._nms_post_max_size,
iou_threshold=self._nms_iou_threshold,
score_thresh=self._nms_score_threshold,
)
selected_boxes, selected_labels, selected_scores = [], [], []
selected_dir_labels = []
for i, selected in enumerate(selected_per_class):
if selected is not None:
num_dets = selected.shape[0]
selected_boxes.append(box_preds[selected])
selected_labels.append(
torch.full([num_dets], i, dtype=torch.int64))
if self._use_direction_classifier:
selected_dir_labels.append(dir_labels[selected])
selected_scores.append(total_scores[selected, i])
if len(selected_boxes) > 0:
selected_boxes = torch.cat(selected_boxes, dim=0)
selected_labels = torch.cat(selected_labels, dim=0)
selected_scores = torch.cat(selected_scores, dim=0)
if self._use_direction_classifier:
selected_dir_labels = torch.cat(
selected_dir_labels, dim=0)
else:
selected_boxes = None
selected_labels = None
selected_scores = None
selected_dir_labels = None
else:
# get highest score per prediction, than apply nms
# to remove overlapped box.
if num_class_with_bg == 1:
top_scores = total_scores.squeeze(-1)
top_labels = torch.zeros(
total_scores.shape[0],
device=total_scores.device,
dtype=torch.long)
else:
top_scores, top_labels = torch.max(total_scores, dim=-1)
if self._nms_score_threshold > 0.0:
thresh = torch.tensor(
[self._nms_score_threshold],
device=total_scores.device).type_as(total_scores)
top_scores_keep = (top_scores >= thresh)
top_scores = top_scores.masked_select(top_scores_keep)
if top_scores.shape[0] != 0:
if self._nms_score_threshold > 0.0:
box_preds = box_preds[top_scores_keep]
if self._use_direction_classifier:
dir_labels = dir_labels[top_scores_keep]
top_labels = top_labels[top_scores_keep]
boxes_for_nms = box_preds[:, [0, 1, 3, 4, 6]]
if not self._use_rotate_nms:
box_preds_corners = box_torch_ops.center_to_corner_box2d(
boxes_for_nms[:, :2], boxes_for_nms[:, 2:4],
boxes_for_nms[:, 4])
boxes_for_nms = box_torch_ops.corner_to_standup_nd(
box_preds_corners)
# the nms in 3d detection just remove overlap boxes.
selected = nms_func(
boxes_for_nms,
top_scores,
pre_max_size=self._nms_pre_max_size,
post_max_size=self._nms_post_max_size,
iou_threshold=self._nms_iou_threshold,
)
else:
selected = None
if selected is not None:
selected_boxes = box_preds[selected]
if self._use_direction_classifier:
selected_dir_labels = dir_labels[selected]
selected_labels = top_labels[selected]
selected_scores = top_scores[selected]
# finally generate predictions.
if selected_boxes is not None:
box_preds = selected_boxes
scores = selected_scores
label_preds = selected_labels
if self._use_direction_classifier:
dir_labels = selected_dir_labels
# opp_labels = (box_preds[..., -1] > 0) ^ dir_labels.byte()
opp_labels = (box_preds[..., -1] > 0) ^ dir_labels.bool()
box_preds[..., -1] += torch.where(
opp_labels,
torch.tensor(np.pi).type_as(box_preds),
torch.tensor(0.0).type_as(box_preds))
# box_preds[..., -1] += (
# ~(dir_labels.byte())).type_as(box_preds) * np.pi
final_box_preds = box_preds
final_scores = scores
final_labels = label_preds
final_box_preds_camera = box_torch_ops.box_lidar_to_camera(
final_box_preds, rect, Trv2c)
locs = final_box_preds_camera[:, :3]
dims = final_box_preds_camera[:, 3:6]
angles = final_box_preds_camera[:, 6]
camera_box_origin = [0.5, 1.0, 0.5]
box_corners = box_torch_ops.center_to_corner_box3d(
locs, dims, angles, camera_box_origin, axis=1)
box_corners_in_image = box_torch_ops.project_to_image(
box_corners, P2)
# box_corners_in_image: [N, | |
folder to save features
data_type: The type of features, train/test/val
crop_type: The method to crop the images.
Options are 'none' (no cropping)
'bbox' (crop using bounding box coordinates),
'context' (A region containing pedestrian and their local surround)
'surround' (only the region around the pedestrian. Pedestrian appearance
is suppressed)
crop_mode: How to resize ond/or pad the corpped images (see utils.img_pad)
crop_resize_ratio: The ratio by which the image is enlarged to capture the context
Used by crop types 'context' and 'surround'.
target_dim: Dimension of final visual features
regen_data: Whether regenerate visual features. This will overwrite the cached features
Returns:
Numpy array of visual features
Tuple containing the size of features
"""
# load the feature files if exists
print("Generating {} features crop_type={} crop_mode={}\
\nsave_path={}, ".format(data_type, crop_type, crop_mode, save_path))
sequences = []
bbox_seq = bbox_sequences.copy()
i = -1
# flow size (h,w)
flow_size = read_flow_file(img_sequences[0][0].replace('images', 'optical_flow').replace('png', 'flo')).shape
img_size = cv2.imread(img_sequences[0][0]).shape
# A ratio to adjust the dimension of bounding boxes (w,h)
box_resize_coef = (flow_size[1] / img_size[1], flow_size[0] / img_size[0])
for seq, pid in zip(img_sequences, ped_ids):
i += 1
update_progress(i / len(img_sequences))
flow_seq = []
for imp, b, p in zip(seq, bbox_seq[i], pid):
flip_image = False
set_id, vid_id, img_name = PurePath(imp)[-3:]
set_id = set_id.split('.')[0]
optflow_save_folder = os.path.join(save_path, set_id, vid_id)
ofp = imp.replace('images', 'optical_flow').replace('png', 'flo')
# Modify the path depending on crop mode
if crop_type == 'none':
optflow_save_path = os.path.join(optflow_save_folder, img_name + '.flo')
else:
optflow_save_path = os.path.join(optflow_save_folder, img_name + '_' + p[0] + '.flo')
# Check whether the file exists
if os.path.exists(optflow_save_path) and not regen_data:
if not self._generator:
ofp_data = read_flow_file(optflow_save_path)
else:
if 'flip' in imp:
ofp = ofp.replace('_flip', '')
flip_image = True
if crop_type == 'none':
ofp_image = read_flow_file(ofp)
ofp_data = cv2.resize(ofp_image, target_dim)
if flip_image:
ofp_data = cv2.flip(ofp_data, 1)
else:
ofp_image = read_flow_file(ofp)
# Adjust the size of bbox according to the dimensions of flow map
b = list(map(int, [b[0] * box_resize_coef[0], b[1] * box_resize_coef[1],
b[2] * box_resize_coef[0], b[3] * box_resize_coef[1]]))
if flip_image:
ofp_image = cv2.flip(ofp_image, 1)
if crop_type == 'bbox':
cropped_image = ofp_image[b[1]:b[3], b[0]:b[2], :]
ofp_data = img_pad(cropped_image, mode=crop_mode, size=target_dim[0])
elif 'context' in crop_type:
bbox = jitter_bbox(imp, [b], 'enlarge', crop_resize_ratio)[0]
bbox = squarify(bbox, 1, ofp_image.shape[1])
bbox = list(map(int, bbox[0:4]))
cropped_image = ofp_image[bbox[1]:bbox[3], bbox[0]:bbox[2], :]
ofp_data = img_pad(cropped_image, mode='pad_resize', size=target_dim[0])
elif 'surround' in crop_type:
b_org = b.copy()
bbox = jitter_bbox(imp, [b], 'enlarge', crop_resize_ratio)[0]
bbox = squarify(bbox, 1, ofp_image.shape[1])
bbox = list(map(int, bbox[0:4]))
ofp_image[b_org[1]:b_org[3], b_org[0]: b_org[2], :] = 0
cropped_image = ofp_image[bbox[1]:bbox[3], bbox[0]:bbox[2], :]
ofp_data = img_pad(cropped_image, mode='pad_resize', size=target_dim[0])
else:
raise ValueError('ERROR: Undefined value for crop_type {}!'.format(crop_type))
# Save the file
if not os.path.exists(optflow_save_folder):
os.makedirs(optflow_save_folder)
write_flow(ofp_data, optflow_save_path)
# if using the generator save the cached features path and size of the features
if self._generator:
flow_seq.append(optflow_save_path)
else:
flow_seq.append(ofp_data)
sequences.append(flow_seq)
sequences = np.array(sequences)
# compute size of the features after the processing
if self._generator:
feat_shape = read_flow_file(sequences[0][0]).shape
if not isinstance(feat_shape, tuple):
feat_shape = (feat_shape,)
feat_shape = (np.array(bbox_sequences).shape[1],) + feat_shape
else:
feat_shape = sequences.shape[1:]
return sequences, feat_shape
def get_data_sequence(self, data_type, data_raw, opts):
"""
Generates raw sequences from a given dataset
Args:
data_type: Split type of data, whether it is train, test or val
data_raw: Raw tracks from the dataset
opts: Options for generating data samples
Returns:
A list of data samples extracted from raw data
Positive and negative data counts
"""
print('\n#####################################')
print('Generating raw data')
print('#####################################')
d = {'center': data_raw['center'].copy(),
'box': data_raw['bbox'].copy(),
'ped_id': data_raw['pid'].copy(),
'crossing': data_raw['activities'].copy(),
'image': data_raw['image'].copy()}
balance = opts['balance_data'] if data_type == 'train' else False
obs_length = opts['obs_length']
time_to_event = opts['time_to_event']
normalize = opts['normalize_boxes']
try:
d['speed'] = data_raw['obd_speed'].copy()
except KeyError:
d['speed'] = data_raw['vehicle_act'].copy()
print('Jaad dataset does not have speed information')
print('Vehicle actions are used instead')
if balance:
self.balance_data_samples(d, data_raw['image_dimension'][0])
d['box_org'] = d['box'].copy()
d['tte'] = []
if isinstance(time_to_event, int):
for k in d.keys():
for i in range(len(d[k])):
d[k][i] = d[k][i][- obs_length - time_to_event:-time_to_event]
d['tte'] = [[time_to_event]] * len(data_raw['bbox'])
else:
overlap = opts['overlap'] # if data_type == 'train' else 0.0
olap_res = obs_length if overlap == 0 else int((1 - overlap) * obs_length)
olap_res = 1 if olap_res < 1 else olap_res
for k in d.keys():
seqs = []
for seq in d[k]:
start_idx = len(seq) - obs_length - time_to_event[1]
end_idx = len(seq) - obs_length - time_to_event[0]
seqs.extend([seq[i:i + obs_length] for i in
range(start_idx, end_idx + 1, olap_res)])
d[k] = seqs
for seq in data_raw['bbox']:
start_idx = len(seq) - obs_length - time_to_event[1]
end_idx = len(seq) - obs_length - time_to_event[0]
d['tte'].extend([[len(seq) - (i + obs_length)] for i in
range(start_idx, end_idx + 1, olap_res)])
if normalize:
for k in d.keys():
if k != 'tte':
if k != 'box' and k != 'center':
for i in range(len(d[k])):
d[k][i] = d[k][i][1:]
else:
for i in range(len(d[k])):
d[k][i] = np.subtract(d[k][i][1:], d[k][i][0]).tolist()
d[k] = np.array(d[k])
else:
for k in d.keys():
d[k] = np.array(d[k])
d['crossing'] = np.array(d['crossing'])[:, 0, :]
pos_count = np.count_nonzero(d['crossing'])
neg_count = len(d['crossing']) - pos_count
print("Negative {} and positive {} sample counts".format(neg_count, pos_count))
return d, neg_count, pos_count
def balance_data_samples(self, d, img_width, balance_tag='crossing'):
"""
Balances the ratio of positive and negative data samples. The less represented
data type is augmented by flipping the sequences
Args:
d: Sequence of data samples
img_width: Width of the images
balance_tag: The tag to balance the data based on
"""
print("Balancing with respect to {} tag".format(balance_tag))
gt_labels = [gt[0] for gt in d[balance_tag]]
num_pos_samples = np.count_nonzero(np.array(gt_labels))
num_neg_samples = len(gt_labels) - num_pos_samples
# finds the indices of the samples with larger quantity
if num_neg_samples == num_pos_samples:
print('Positive and negative samples are already balanced')
else:
print('Unbalanced: \t Positive: {} \t Negative: {}'.format(num_pos_samples, num_neg_samples))
if num_neg_samples > num_pos_samples:
gt_augment = 1
else:
gt_augment = 0
num_samples = len(d[balance_tag])
for i in range(num_samples):
if d[balance_tag][i][0][0] == gt_augment:
for k in d:
if k == 'center':
flipped = d[k][i].copy()
flipped = [[img_width - c[0], c[1]]
for c in flipped]
d[k].append(flipped)
if k == 'box':
flipped = d[k][i].copy()
flipped = [np.array([img_width - b[2], b[1], img_width - b[0], b[3]])
for b in flipped]
d[k].append(flipped)
if k == 'image':
flipped = d[k][i].copy()
flipped = [im.replace('.png', '_flip.png') for im in flipped]
d[k].append(flipped)
if k in ['speed', 'ped_id', 'crossing', 'walking', 'looking']:
d[k].append(d[k][i].copy())
gt_labels = [gt[0] for gt in d[balance_tag]]
num_pos_samples = np.count_nonzero(np.array(gt_labels))
num_neg_samples = len(gt_labels) - num_pos_samples
if num_neg_samples > num_pos_samples:
rm_index = np.where(np.array(gt_labels) == 0)[0]
else:
rm_index = np.where(np.array(gt_labels) == 1)[0]
# Calculate the difference of sample counts
dif_samples = abs(num_neg_samples - num_pos_samples)
# shuffle the indices
np.random.seed(42)
np.random.shuffle(rm_index)
# reduce the number of indices to the difference
rm_index = rm_index[0:dif_samples]
# update the data
for k in d:
seq_data_k = d[k]
d[k] = [seq_data_k[i] for i in range(0, len(seq_data_k)) if i not in rm_index]
new_gt_labels = [gt[0] for gt in d[balance_tag]]
num_pos_samples = np.count_nonzero(np.array(new_gt_labels))
print('Balanced:\t Positive: %d \t Negative: %d\n'
% (num_pos_samples, len(d[balance_tag]) - num_pos_samples))
def get_context_data(self, model_opts, data, data_type, feature_type):
print('\n#####################################')
print('Generating {} {}'.format(feature_type, data_type))
print('#####################################')
process = model_opts.get('process', True)
aux_name = [self._backbone]
if not process:
aux_name.append('raw')
aux_name = '_'.join(aux_name).strip('_')
eratio = model_opts['enlarge_ratio']
dataset = model_opts['dataset']
data_gen_params = {'data_type': data_type, 'crop_type': 'none',
'target_dim': model_opts.get('target_dim', (224, 224))}
if 'local_box' in feature_type:
data_gen_params['crop_type'] = 'bbox'
data_gen_params['crop_mode'] = 'pad_resize'
elif 'mask_cnn' in feature_type:
data_gen_params['crop_type'] = 'mask_cnn'
elif 'context_split' in feature_type:
data_gen_params['crop_type'] = 'context_split_surround'
elif 'context_split' in feature_type:
data_gen_params['crop_type'] = 'context_split'
elif 'mask_vit' in feature_type:
data_gen_params['crop_type'] = 'mask_vit'
elif 'context_vit32' in feature_type:
data_gen_params['crop_type'] = 'context_vit32'
elif 'mask' in feature_type:
data_gen_params['crop_type'] = 'mask'
# data_gen_params['crop_mode'] = 'pad_resize'
elif 'local_context_cnn' in feature_type:
data_gen_params['crop_type'] = 'local_context_cnn'
elif 'local_context' in feature_type:
data_gen_params['crop_type'] = 'context'
data_gen_params['crop_resize_ratio'] = eratio
elif 'surround' in feature_type:
data_gen_params['crop_type'] = 'surround'
data_gen_params['crop_resize_ratio'] = eratio
elif 'scene_context' in feature_type:
data_gen_params['crop_type'] = 'scene_context'
save_folder_name = feature_type
if 'flow' not in feature_type:
save_folder_name = '_'.join([feature_type, aux_name])
if 'local_context' in feature_type or 'surround' in feature_type:
save_folder_name = '_'.join([save_folder_name, str(eratio)])
data_gen_params['save_path'], _ = get_path(save_folder=save_folder_name,
dataset=dataset, save_root_folder='data/features')
if 'flow' in feature_type:
return self.get_optical_flow(data['image'],
data['box_org'],
data['ped_id'],
**data_gen_params)
else:
return self.load_images_crop_and_process(data['image'],
data['box_org'],
data['ped_id'],
process=process,
**data_gen_params)
def get_data(self, data_type, data_raw, model_opts):
"""
Generates data train/test/val data
Args:
data_type: Split type | |
for next cycle of loop
return result_dict, result_path
def _check_manifest_matches(self, parent_trace, manifest_filename, manifest_dict,
manifest_api_name, namespace, name, kind, minimal_version):
'''
Helper method for self.findLatestVersionManifest. It verifies that the manifest_dict has
all the fields as stated in the parameters (up to YAML equivalence)
Returns True if it does, False if it doesn't. Raises an error if in the process it finds
any corruption (e.g., fields inside the manifest_dict not matching the expectations
of the parameters)
@param manifest_api_name A string representing the Apodeixi API defining the YAML schemas for the
manifest kinds subsumed under such API. The search for manifests is filtered to those
whose YAML representation declares itself as falling under this API.
Example: 'delivery-planning.journeys.a6i.io'
@param manifest_dict A dict object representing the YAML content of a manifest
@minimal_version An int, stating the minimal value that the version field must have for this
manifest to be considered a match
'''
UTILS = DictionaryUtils()
my_trace = parent_trace.doing("Checking apiVersion matches")
if True:
api_found, api_suffix_found = ManifestUtils().get_manifest_apiversion(my_trace, manifest_dict)
if api_found != manifest_api_name:
# This is a manifest for a different API, just happens to have one schema named the same kind as ours
return False
my_trace = parent_trace.doing("Checking namespace matches")
if True:
UTILS.validate_path( parent_trace = my_trace,
root_dict = manifest_dict,
root_dict_name = manifest_filename,
path_list = ['metadata', 'namespace'],
valid_types = [str])
namespace_found = manifest_dict['metadata']['namespace']
#if namespace_found != namespace:
if not StringUtils().equal_as_yaml(namespace_found, namespace):
# This YAML file is corrupted, since it is under the namespace directory but internally has a different
# namespace
raise ApodeixiError(my_trace, "Encountered corrupted YAML file: inconsistent namespace",
data = {"YAML file": str(manifest_filename),
"namespace in YAML file": str(namespace_found),
"namespace in folder structure": str(namespace)})
my_trace = parent_trace.doing("Checking name matches")
if True:
UTILS.validate_path( parent_trace = my_trace,
root_dict = manifest_dict,
root_dict_name = manifest_filename,
path_list = ['metadata', 'name'],
valid_types = [str])
name_found = manifest_dict['metadata']['name']
#if name_found != name:
if not StringUtils().equal_as_yaml(name_found, name):
# This YAML file is corrupted, since it is under the names directory but internally has a different
# names
raise ApodeixiError(my_trace, "Encountered corrupted YAML file: inconsistent name",
data = {"YAML file": str(manifest_filename),
"name in YAML file": str(name_found),
"name in folder structure": str(name)})
return False
my_trace = parent_trace.doing("Checking kind matches")
if True:
UTILS.validate_path( parent_trace = my_trace,
root_dict = manifest_dict,
root_dict_name = manifest_filename,
path_list = ['kind'],
valid_types = [str])
kind_found = manifest_dict['kind']
#if kind_found != kind:
if not StringUtils().equal_as_yaml(kind_found, kind):
# This YAML file is corrupted, since it is named after a different kind than what it internally has
raise ApodeixiError(my_trace, "Encountered corrupted YAML file: inconsistent kind",
data = {"YAML file": str(manifest_filename),
"kind in YAML file": str(kind_found),
"kind in filename": str(kind)})
my_trace = parent_trace.doing("Checking version is high enough")
if True:
UTILS.validate_path( parent_trace = my_trace,
root_dict = manifest_dict,
root_dict_name = manifest_filename,
path_list = ['metadata', 'version'],
valid_types = [int])
version_found = manifest_dict['metadata']['version']
if version_found < minimal_version:
return False
# If we got this far then all checks pass
return True
def _remember_posting_write(self, parent_trace, relative_path):
'''
Helper method. If we are in a transaction, it will remember the relative path of a write
for a posting
'''
current_env = self.current_environment(parent_trace)
env_name = current_env.name(parent_trace)
if env_name in self._transaction_events_dict.keys():
transaction_events = self._transaction_events_dict[env_name]
transaction_events.remember_posting_write(relative_path)
def _remember_posting_delete(self, parent_trace, relative_path):
'''
Helper method. If we are in a transaction, it will remember the relative path of a delete
for a posting
'''
current_env = self.current_environment(parent_trace)
env_name = current_env.name(parent_trace)
if env_name in self._transaction_events_dict.keys():
transaction_events = self._transaction_events_dict[env_name]
transaction_events.remember_posting_delete(relative_path)
def _remember_manifest_write(self, parent_trace, relative_path):
'''
Helper method. If we are in a transaction, it will remember the relative path of a write
for a manifest
'''
current_env = self.current_environment(parent_trace)
env_name = current_env.name(parent_trace)
if env_name in self._transaction_events_dict.keys():
transaction_events = self._transaction_events_dict[env_name]
transaction_events.remember_manifest_write(relative_path)
def _remember_clientURL_write(self, parent_trace, relative_path):
'''
Helper method. If we are in a transaction, it will remember the relative path of a write
for a clientURL file
'''
current_env = self.current_environment(parent_trace)
env_name = current_env.name(parent_trace)
if env_name in self._transaction_events_dict.keys():
transaction_events = self._transaction_events_dict[env_name]
transaction_events.remember_clientURL_write(relative_path)
def _remember_clientURL_delete(self, parent_trace, relative_path):
'''
Helper method. If we are in a transaction, it will remember the relative path of a delete
for a posting
'''
current_env = self.current_environment(parent_trace)
env_name = current_env.name(parent_trace)
if env_name in self._transaction_events_dict.keys():
transaction_events = self._transaction_events_dict[env_name]
transaction_events.remember_clientURL_delete(relative_path)
def retrieveManifest(self, parent_trace, manifest_handle):
'''
Returns a dict and a string.
The dict represents the unique manifest in the store that is identified by the `manifest handle`.
The string represents the full pathname for the manifest.
If none exists, it returns None, None.
@param manifest_handle A ManifestHandle instance that uniquely identifies the manifest we seek to retrieve.
'''
matching_manifests = [] # List of dictionaries, one per manifest
matching_filenames = [] # List of filename strings. Will be 1-1 lined up with matching_manifests
folder = self._current_env.manifestsURL(parent_trace) + '/' \
+ manifest_handle.namespace + '/' + manifest_handle.name
manifests, filenames = self._getMatchingManifests( parent_trace = parent_trace,
folder = folder,
manifest_handle = manifest_handle)
matching_manifests.extend(manifests)
matching_filenames.extend(filenames)
if len(matching_filenames) > 1:
raise ApodeixiError(parent_trace, "Found multiple manifests for given handle",
data = {'manifest_handle': str(manifest_handle),
'matching files': str(matching_filenames)},
origination = {
'concrete class': str(self.__class__.__name__),
'signaled_from': __file__})
if len(matching_filenames) == 0:
return None, None
# By now we know there is exactly one match - that must be the manifest we are after
manifest_path = folder + "/" + matching_filenames[0]
return matching_manifests[0], manifest_path
def _getMatchingManifests(self, parent_trace, folder, manifest_handle):
'''
Returns two lists of the same length:
* A list of dictionaries, one per manifest that matches the given manifest handle
* A list of filenames, which is where each of those manifests was retrieved from
The search is done over the space of objects in the store that lie "at or below the folder", where
the notion of "folder" depends on the concrete store class. For filesystem-based stores, "folder" would
literally be a directory of some filesystem mount.
@param folder A string scoping a subset of the store
@param manifest_handle A ManifestHandle instance that (should) uniquely identify a single manifest in the store
'''
matching_manifests = [] # List of dictionaries, one per manifest
matching_filenames = [] # List of filename strings. Will be 1-1 lined up with matching_manifests
# Two areas where to search for manifests: input area, and output area. First the input area
for filename in self._getFilenames(parent_trace, folder):
my_trace = parent_trace.doing("Loading manifest from file",
data = {'filename': filename,
'folder': folder},
origination = {
'concrete class': str(self.__class__.__name__),
'signaled_from': __file__})
manifest_dict = YAML_Utils().load(my_trace, folder + '/' + filename)
inferred_handle = ManifestUtils().inferHandle(my_trace, manifest_dict)
#if inferred_handle == manifest_handle: # This looks wrong as it will fail if later we change API version
if inferred_handle == manifest_handle:
matching_filenames.append(filename)
matching_manifests.append(manifest_dict)
return matching_manifests, matching_filenames
def _getFilenames(self, parent_trace, folder):
'''
Helper method that looks at all files in the given folder that end in the "yaml" suffix and returns their filenames
'''
matches = []
if _os.path.isdir(folder):
matches = [filename for filename in _os.listdir(folder) if filename.endswith(".yaml")]
return matches
def searchManifests(self, parent_trace, kinds_of_interest, manifest_filter):
'''
Returns a list of dict objects, each representing the content of a manifest in the store for
one of the kinds in the `kinds_of_interest` list.
The returned list comprises all such manifests known to the KnowledgeBaseStore that pass the `manifest_filter`.
I.e., it rturns a list of objects `manifest_dict` such that
`manifest_filter(parent_trace, manifest_dict) == True` and `manifest_dict["kind"]` is in `kinds_of_interest`
If `manifest_filter` is None, then no filter is applied and all manifests in the store are returned.
@param manifest_filter A function that takes two parameters: a FunctionalTrace and a dict object, and returns
a boolean.
'''
result = []
for currentdir, dirs, files in _os.walk(self.current_environment(parent_trace).manifestsURL(parent_trace)):
loop_trace = parent_trace.doing("Scanning directory", data = {'currentdir': currentdir})
for a_file in files:
tokens = a_file.split(".")
# We are only interested in files like "big-rock.2.yaml" with tokens ["big-rock", "2", "yaml"]
if | |
<filename>tweetarchiver/__init__.py<gh_stars>0
import time
import json
import logging
from hashlib import md5
from calendar import timegm
from urllib.parse import urlparse
from typing import Generator, BinaryIO, Optional, List, Tuple, Union, NamedTuple
import requests
import sqlalchemy as sqla
from sqlalchemy import func as sql_func
from sqlalchemy.orm import exc as sql_exc
from sqlalchemy.orm import relationship, Session
from sqlalchemy.ext.declarative import declarative_base
from bs4 import BeautifulSoup
DeclarativeBase = declarative_base()
__VERSION__ = "0.1"
LOG_FORMAT_TERM = logging.Formatter("[%(levelname)s] %(message)s")
LOGGER = logging.getLogger("tweetarchiver")
LOGGER.setLevel(logging.DEBUG)
TH = logging.StreamHandler()
TH.setLevel(logging.INFO)
TH.setFormatter(LOG_FORMAT_TERM)
LOGGER.addHandler(TH)
HTML_PARSER = "html.parser"
USER_AGENT = "".join(
["TweetArchiver/", __VERSION__,
"(+https://github.com/rmmbear/tweet-archiver)"
]
)
# Note that session should be closed by the batch functions which benefit from
# connection pooling (scrape_tweets, for example)
# it should also clear automatically in case of uncaught exception if module was
# called from __main__.main()
TWITTER_SESSION = requests.Session()
TWITTER_SESSION.headers["User-Agent"] = USER_AGENT
TWITTER_SESSION.headers["Accept-Language"] = "en-US,en;q=0.5"
TWITTER_SESSION.headers["x-twitter-client-language"] = "en"
#TWITTER_SESSION.headers["Accept-Encoding"] = "gzip, deflate"
#TWITTER_SESSION.headers["Accept"] = "gzip, deflate"
#TWITTER_SESSION.headers["Connection"] = "keep-alive"
#https://twitter.com/intent/user?user_id=XXX
#
def set_guest_token() -> None:
"""Set the authorization and guest token in twitter
session's headers. This is only necessary for videos, all
other parts of the site can be accessed without any authorization.
"""
TWITTER_SESSION.headers["Authorization"] = "Bearer AAAAAAAAAAAAAAAAAAAAAPYXBAAAAAAACLXUNDekMxqa8h%2F40K4moUkGsoc%3DTYfbDKbT3jJPCEVnMYqilB28NHfOPqkca3qaAxGfsyKCs0wRbw"
link = "https://api.twitter.com/1.1/guest/activate.json"
response_json = download(link, method="POST").response.text
response_json = json.loads(response_json)
guest_token = None
try:
guest_token = response_json["guest_token"]
except:
LOGGER.error("Did not receive guest token ")
LOGGER.error("Contents of response: \n %s", json.dumps(response_json, indent=4))
raise RuntimeError("Could not retrieve twitter guest token")
LOGGER.debug("setting guest token to %s", guest_token)
TWITTER_SESSION.headers["x-guest-token"] = guest_token
class Response(NamedTuple):
"""Convenient """
response: requests.Response
size: int = 0
hash: str = ""
def download(link: str,
method: str = "GET",
to_file: Optional[BinaryIO] = None,
headers: Optional[dict] = None,
allow_redirects: bool = True,
max_retries: int = 3) -> "Response":
"""
Return Response named tuple
Response.response - requests.Response object
Response.size - size of downloaded file, 0 if to_file is None
Response.hash - md5 hash of the downloaded file, empty string if to_file is None
"""
exp_delay = [2**(x+1) for x in range(max_retries)]
retry_count = 0
query = requests.Request(method, link)
query = TWITTER_SESSION.prepare_request(query)
LOGGER.debug("Making %s request to %s", method, link)
if headers:
query.headers.update(headers)
while True:
try:
response = TWITTER_SESSION.send(query, allow_redirects=allow_redirects, stream=True, timeout=15)
response.raise_for_status()
if to_file:
size = 0
md5_hash = md5()
for chunk in response.iter_content(chunk_size=(1024**2)*3):
to_file.write(chunk)
md5_hash.update(chunk)
size += len(chunk)
#LOGGER.info("left=%s right=%s", size, response.headers["content-length"])
assert size == int(response.headers["content-length"])
return Response(response=response, size=size, hash=md5_hash.hexdigest())
return Response(response)
except requests.HTTPError:
LOGGER.error("Received HTTP error code %s", response.status_code)
if response.status_code in [404] or retry_count >= max_retries:
raise
except requests.Timeout:
LOGGER.error("Connection timed out")
if retry_count >= max_retries:
raise
except requests.ConnectionError:
LOGGER.error("Could not establish a new connection")
#most likely a client-side connection error, do not retry
raise
except requests.RequestException as err:
LOGGER.error("Unexpected request exception")
LOGGER.error("request url = %s", query.url)
LOGGER.error("request method = %s", query.method)
LOGGER.error("request headers = %s", query.headers)
LOGGER.error("request body = %s", query.body)
raise err
retry_count += 1
delay = exp_delay[retry_count-1]
print(f"Retrying ({retry_count}/{max_retries}) in {delay}s")
LOGGER.error("Retrying (%s/%s) in %ss", retry_count, max_retries, delay)
time.sleep(delay)
class TweetHTML(DeclarativeBase):
"""Table storing tweets in html form. For testing purposes only.
"""
__tablename__ = "account_html"
tweet_id = sqla.Column(sqla.Integer, primary_key=True, nullable=False)
html = sqla.Column(sqla.String, nullable=False)
scraped_on = sqla.Column(sqla.Integer, nullable=False)
def parse(self) -> "Tweet":
return Tweet.from_html(BeautifulSoup(self.html, HTML_PARSER).select_one(".js-stream-tweet"))
def __init__(self, tweet_html: BeautifulSoup, timestamp: int) -> None:
self.tweet_id = tweet_html.get("data-tweet-id").strip()
self.html = str(tweet_html)
self.scraped_on = timestamp
@classmethod
def newest_tweet(cls, session: Session) -> int:
max_id = session.query(sql_func.max(cls.tweet_id))
try:
tid = session.query(cls).filter(cls.tweet_id == max_id).one().tweet_id
return int(tid)
except sql_exc.NoResultFound:
return 0
@classmethod
def oldest_tweet(cls, session: Session) -> int:
min_id = session.query(sql_func.min(cls.tweet_id))
try:
tid = session.query(cls).filter(cls.tweet_id == min_id).one().tweet_id
return int(tid)
except sql_exc.NoResultFound:
return 0
class Attachment(DeclarativeBase):
__tablename__ = "account_attachments"
id = sqla.Column(sqla.Integer, primary_key=True)
url = sqla.Column(sqla.String, nullable=False)
# while this is not the case 90% of the time, urls can repeat
tweet_id = sqla.Column(sqla.Integer, sqla.ForeignKey("account_archive.tweet_id"), nullable=False)
position = sqla.Column(sqla.Integer, nullable=False) # to retain order in which images are displayed
sensitive = sqla.Column(sqla.Boolean, nullable=False)
type = sqla.Column(sqla.String, nullable=False)
size = sqla.Column(sqla.Integer, nullable=True)
hash = sqla.Column(sqla.String, nullable=True)
path = sqla.Column(sqla.String, nullable=True)
attached = relationship("Tweet", back_populates="media")
@classmethod
def from_html(cls, tweet_html: BeautifulSoup) -> List["Attachment"]:
tweet_id = int(tweet_html.get("data-tweet-id").strip())
video_elements = tweet_html.select(".js-stream-tweet .is-video")
image_elements = tweet_html.select(".js-stream-tweet .AdaptiveMedia-photoContainer img")
tombstone_label = tweet_html.select_one(".AdaptiveMediaOuterContainer .Tombstone-label")
sensitive = False
if tombstone_label:
tombstone_label = tombstone_label.text
sensitive = "media may contain sensitive material" in tombstone_label
media = []
for num, image in enumerate(image_elements):
image_url = image.get("src").strip()
#TODO: detect apngs
media.append(
cls(
url=image_url,
tweet_id=tweet_id,
position=num+1,
sensitive=sensitive,
type=f"img:{image_url.rsplit('.', maxsplit=1)[-1]}"
)
)
if video_elements:
gif = tweet_html.select_one(".PlayableMedia--gif")
if gif:
video_type = "vid:gif"
# 'gifs' (actually short mp4s) can be downloaded directly, for actual vids m3u fuckery is needed
# note that in web twitter the furthest descendant of .PlayableMedia-player
# would be a video tag containing the direct url to the video
# but because of the approach for accessing twitter, we do not have accesss to that
# video tag als ocontains url to a 'poster' displayed while the video is not playing
# image is hosted at https://pbs.twimg.com/tweet_video_thumb/{file}
# and the video at https://video.twimg.com/tweet_video/{file}
# the poster image and video file always use the same name, so if we know that the
# image is named EOFhYRnWkAIlIK8.jpg then the url for our video
# is https://video.twimg.com/tweet_video/EOFhYRnWkAIlIK8.mp4
# as it happens, the .PlayableMedia-player element contains a style attribute, which
# includes a background image - this is the exact same file as in the video tag
# this means we can:
# 1 grab .PlayableMedia-playerelement
# 2 get its style attribute
# 3 parse it and get the image url
# 4 place the filename in video url template
# and we have the url to the video
player_style = tweet_html.select_one(".PlayableMedia-player").get("style")
player_style = dict([x.strip().split(":", maxsplit=1) for x in player_style.split(";")])
assert player_style["background-image"].startswith("url")
image_url = player_style["background-image"][5:-2:]
image_url = urlparse(image_url)
# take path -> split on elements, take the last one -> split on extension, take name
video_name = image_url.path.rsplit("/", maxsplit=1)[-1].rsplit(".", maxsplit=1)[0]
vid_url = f"https://video.twimg.com/tweet_video/{video_name}.mp4"
else:
video_type = "vid:mp4"
vid_url = f"https://twitter.com/user/status/{tweet_id}"
video = cls(
url=vid_url,
tweet_id=tweet_id,
position=1,
sensitive=sensitive,
type=video_type)
media.append(video)
return media
@classmethod
def with_missing_files(cls, session: Session) -> List["Attachment"]:
attachments_missing_files = session.query(cls).filter(cls.path == None).order_by(cls.tweet_id)
return attachments_missing_files.all()
class Account(DeclarativeBase):
__tablename__ = "account_details"
account_id = sqla.Column(sqla.Integer, primary_key=True)
join_date = sqla.Column(sqla.Integer)
name = sqla.Column(sqla.String)
handle = sqla.Column(sqla.String)
link = sqla.Column(sqla.String)
description = sqla.Column(sqla.String)
avatar = sqla.Column(sqla.String)
location = sqla.Column(sqla.String)
previous_names = sqla.Column(sqla.String)
previous_handles = sqla.Column(sqla.String)
previous_links = sqla.Column(sqla.String)
previous_descriptions = sqla.Column(sqla.String)
previous_avatars = sqla.Column(sqla.String)
previous_locations = sqla.Column(sqla.String)
class Tweet(DeclarativeBase):
__tablename__ = "account_archive"
tweet_id = sqla.Column(sqla.Integer, primary_key=True, nullable=False)
thread_id = sqla.Column(sqla.Integer, nullable=False)
timestamp = sqla.Column(sqla.Integer, nullable=False)
account_id = sqla.Column(sqla.Integer, sqla.ForeignKey("account_details.account_id"), nullable=False)
replying_to = sqla.Column(sqla.Integer, nullable=True)
qrt_id = sqla.Column(sqla.Integer, nullable=True)
poll_data = sqla.Column(sqla.JSON, nullable=True)
poll_finished = sqla.Column(sqla.Boolean, nullable=True) # if false, will need to be updated
has_video = sqla.Column(sqla.Boolean, nullable=False)
image_count = sqla.Column(sqla.Integer, nullable=False)
replies = sqla.Column(sqla.Integer, nullable=False)
retweets = sqla.Column(sqla.Integer, nullable=False)
favorites = sqla.Column(sqla.Integer, nullable=False)
embedded_link = sqla.Column(sqla.String, nullable=True)
text = sqla.Column(sqla.String, nullable=True)
poi = sqla.Column(sqla.String, nullable=True) # format is "{label}:{place_id}"
# author can choose to include label location to the tweet when composing it
# this is different from the location added automatically to tweets if location data is enabled
# I'm deciding to keep this only because it has to be included manually at which point it becomes
# content
withheld_in = sqla.Column(sqla.String, nullable=True)
# two types of values possible: "unknown" if tweet is withheld but where exactly is not known
# otherwise two letter country identifiers (ISO 3166-1 alpha-2) separated with commas
media = relationship(Attachment, order_by=Attachment.position)
@classmethod
def from_html(cls, tweet_html: BeautifulSoup) -> "Tweet":
new_tweet = cls()
new_tweet.tweet_id = int(tweet_html.get("data-tweet-id").strip())
new_tweet.thread_id = int(tweet_html.get("data-conversation-id").strip())
new_tweet.account_id = int(tweet_html.get("data-user-id").strip())
withheld = tweet_html.select_one(".StreamItemContent--withheld")
if withheld:
LOGGER.error("Encountered a withheld tweet %s", new_tweet.tweet_id)
tombstone_label = tweet_html.select_one(".Tombstone .Tombstone-label").text
new_tweet.text = tombstone_label.strip()
if "withheld in response to a report from the copyright holder" in new_tweet.text:
# as per info in https://developer.twitter.com/en/docs/tweets/data-dictionary/overview/user-object
# “XY” - Content is withheld due to a DMCA request.
takedown_type = "XY"
else:
takedown_type = "unknown"
new_tweet.withheld_in = takedown_type
new_tweet.timestamp = 0
new_tweet.has_video = False
new_tweet.image_count = 0
new_tweet.favorites = 0
new_tweet.retweets = 0
new_tweet.replies = 0
# favorites, retweets, replies and timestamp can be looked up through their api,
# but original text and attachments are lost
return new_tweet
new_tweet.timestamp = int(tweet_html.select_one(".js-short-timestamp").get("data-time").strip())
new_tweet.replying_to = None # need a second pass on specific threads | |
<reponame>loco-philippe/Environnemental-Sensing
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 3 23:40:06 2021
@author: <EMAIL>
The `ES.ESObservation` module contains the main class
of Environmental Sensing : `Observation` class.
"""
from ESconstante import ES, _classval
#import ESValue
from ESValue import LocationValue, DatationValue, PropertyValue, \
NamedValue, ExternValue, ESValue, ESValueEncoder
from timeslot import TimeSlot
import datetime
import json, folium, copy, csv, bson, math
import numpy as np
import xarray as xr
import matplotlib.pyplot as plt
import struct
#import os
#os.chdir('C:/Users/a179227/OneDrive - Alliance/perso Wx/ES standard/python ESstandard/iList')
from ilist import Ilist, CborDecoder
#import ilist
from tabulate import tabulate
import cbor2
#from ESValue import _EsClassValue
class Observation :
"""
An `Observation` is made up of `ES.ilist` object which describe the axes of this object.
*Attributes (for @property see methods)* :
- **option** : Dictionnary with options
- **ilist** : Ilist object (data and axes)
- **name** : textual description
- **mAtt** : namedValue dictionnary (internal parameters)
- **parameter** : namedValue dictionnary (external data)
The methods defined in this class are :
*property (getters)*
- `Observation.axes`
- `Observation.bounds`
- `Observation.complet`
- `Observation.consistent`
- `Observation.dimension`
- `Observation.json`
- `Observation.jsonFeature`
- `Observation.nValueObs`
- `Observation.rate`
- `Observation.score`
- `Observation.setDatation`
- `Observation.setLocation`
- `Observation.setProperty`
- `Observation.setResult`
- `Observation.typeObs`
*getters*
- `Observation.vList`
- `Observation.vListName`
- `Observation.vListSimple`
- `Observation.vListValue`
*add value*
- `Observation.addAttributes`
- `Observation.addJson`
- `Observation.append`
- `Observation.appendList`
- `Observation.appendObs`
*update value*
- `Observation.majList`
- `Observation.majValue`
*selecting*
- `Observation.indexLoc`
- `Observation.iLoc`
- `Observation.iObsIndex`
- `Observation.loc`
*management*
- `Observation.extend`
- `Observation.filter`
- `Observation.full`
- `Observation.sort`
*visualization*
- `Observation.choropleth`
- `Observation.voxel`
- `Observation.plot`
- `Observation.view`
*exports - imports*
- `Observation.from_file`
- `Observation.to_csv`
- `Observation.to_dataFrame`
- `Observation.to_json`
- `Observation.to_numpy`
- `Observation.to_file`
- `Observation.to_xarray`
- `Observation.to_bytes` # à voir
- `Observation.from_bytes` # à voir
"""
#%% constructor
def __init__(self, *args, order = [], idxref = {}, **kwargs):
'''
Several Observation creation modes :
- Observation(dictESValue1, dictESValue2, ...) where dictESValue = {ESValuename : value}
- Observation({ObsDict}) where ObsDict is a dictionnary with the same data as an ObsJSON
- Observation(ObsJSON) where ObsJSON is a string with the JSON format
- Observation(ObsBSON) where ObsBSON is a bytes with the BSON format
- Observation([ESSetDatation, ESSetLocation, ESSetProperty, ESSetResult]) where ESSet is a list of ESValue :
[ESValue1, ESValue2,...] or [ESValue] or ESValue
- Observation(datation=ESSetDatation, location=ESSetLocation,
property=ESSetProperty, result=ESSetResult)
*Note : the parameters 'idxref' and 'order' are used only when an ESSetResult without Index is in arguments.
'order' indicates the order for the Index creation and 'idxref' the linked index (0 for Datation, 1 for Location, 2 for Property).*
'''
kwargs |= {'order' : order, 'idxref' : idxref}
self.name = "observation du " + datetime.datetime.now().isoformat()
self.option = ES.mOption.copy()
self.parameter = ES.nullAtt # json
self.ilist = Ilist()
self.mAtt = {}
#self.mAtt[ES.obs_reference] = 0
self.mAtt[ES.obs_reference] = ES.nullVal
self.mAtt[ES.obs_resultTime] = ES.nullAtt
self.mAtt[ES.obs_id] = ES.nullAtt
self._initObs(*args, **kwargs)
@classmethod
def Ilist(cls, ilist, name='', option=None, parameter=None, mAtt=None):
obs = cls()
obs.ilist = ilist
if name != '': obs.name = name
if option: obs.option |= option
if parameter: obs.parameter = parameter
if mAtt: obs.mAtt |= mAtt
return obs
def _initObs(self, *args, **kwargs):
''' data creation '''
dic = {}
if len(args) == 0 and len(kwargs) == 2 : args = [{}]
for arg in args :
if type(arg) == str : # creation à partir d'un json "key : [liste]"
try: arg=json.loads(arg, object_hook=CborDecoder().codecbor)
except: pass
elif isinstance(arg, bytes):
if arg[len(arg)-1] != 0x00 and (arg[0] & 0b11100000) == 0b10100000:
dic=cbor2.loads(arg)
else: dic = bson.decode(arg)
if type(arg) == dict : # creation à partir d'un dict "key : [liste]"
for k,v in arg.items() :
if k not in dic : dic |= {k:v}
if len(dic) == 1 and list(dic.keys())[0] == ES.obs_valName : dic = dic[ES.obs_valName] #!!!
if dic != {} or len(args) == 0 or args == [{}] :
for k,v in kwargs.items() :
if k not in dic : dic |= {k:v}
self._initDict(dic)
return
for arg in args :
if type(arg) == list and len(arg) == 4 : # création à partir d'un jeu de valeur [[dat], [loc], [prp], [res]]
self._initList(arg, **kwargs)
return
elif type(arg) == tuple : # creation uniquement d'un jeu de données dat, loc, prp, res
self.append(arg[0], arg[1], arg[2], arg[3])
def _initDict(self, dic) :
''' data creation in dict mode'''
js = {}
for k,v in dic.items():
if k in ES.invcodeb: js[ES.invcodeb[k]] = v
else: js[k] = v
if ES.obs_id in list(js): self.mAtt[ES.obs_id] = js[ES.obs_id]
if ES.obs_attributes in list(js):
if type(js[ES.obs_attributes]) == dict: js = js[ES.obs_attributes]
else: return
dicilist = {}
order = []
for classES in ES.esObsClass[0:3] :
if classES in list(js) :
dicilist[classES] = js[classES]
order.append(classES)
if ES.res_classES in list(js) :
dicilist[ES.res_classES] = js[ES.res_classES]
if 'order' in list(js) and len(js['order'])>0: dicilist['order'] = js['order']
else: dicilist['order'] = sorted(order)
if 'idxref' in list(js) : dicilist['idxref'] = js['idxref']
if 'index' in list(js) : dicilist['index'] = js['index']
self.ilist = Ilist.from_obj(dicilist)
ordern = sorted(self.ilist.idxname)
if ordern != self.ilist.idxname:
self.ilist.swapindex([self.ilist.idxname.index(i) for i in ordern])
self.addAttributes(js)
def _initList(self, lis, **kwargs) :
'''data creation in list mode '''
if 'order' in kwargs : order = kwargs['order']
else : order = ['datation', 'location', 'property']
ordern = [ES.esObsClass.index(idx) for idx in order]
if 'idxref' in kwargs : idxref = kwargs['idxref']
else : idxref = {}
idxrefn = [0,1,2]
for key, val in idxref.items() :
keyn = order.index(key)
valn = order.index(val)
idxrefn[max(keyn, valn)] = min(keyn, valn)
self.ilist = Ilist.Iset(valiidx=lis[3],
setidx=[lis[0], lis[1], lis[2]],
order=ordern, idxref=idxrefn, valname=ES.esObsClass[3],
idxname=ES.esObsClass[0:3], defaultidx=False)
#%% special
def __copy__(self):
''' Copy all the data, included ESValue'''
return copy.deepcopy(self)
@property
def __geo_interface__(self):
'''dict (@property) : return the union of geometry (see shapely)'''
if self.setLocation :
collec = self.vListValue(ES.loc_classES)[0]
first = True
for shap in self.vListValue(ES.loc_classES) :
if not first : collec = collec.union(shap)
first = False
return collec.__geo_interface__
else : return ""
def __ior__(self, other):
''' Add other's index to self's index'''
self.option = other.option | self.option
self.mAtt = other.mAtt | self.mAtt
self.ilist |= other.ilist
newindex = []
nameES = self.ilist.idxname
if ES.dat_classES in nameES: newindex.append(nameES.index(ES.dat_classES))
if ES.loc_classES in nameES: newindex.append(nameES.index(ES.loc_classES))
if ES.prp_classES in nameES: newindex.append(nameES.index(ES.prp_classES))
self.ilist.swapindex(newindex)
return self
def __or__(self, other):
''' Add other's index to self's index and return a new Observation'''
obres = self.__copy__()
obres.__ior__(other)
return obres
def __getitem__(self, ind):
''' return ResValue item'''
return self.ilist[ind]
def __iadd__(self, other):
''' Add other's values to self's values'''
#self.ilist += other.ilist
self.ilist.iadd(other.ilist, unique=self.option["unic_index"])
self.option = other.option | self.option
self.mAtt = other.mAtt | self.mAtt
return self
def __add__(self, other):
''' Add other's values to self's values and return a new Observation'''
obres = self.__copy__()
obres.__iadd__(other)
return obres
def __len__(self): return len(self.ilist)
def __repr__(self):
return self.__class__.__name__ + '[' + str(len(self)) + ', ' + str(self.ilist.lenidx) + ']'
def __to_bytes__(self, **option):
return self.to_json(encoded=option['encoded'], encode_format='bson',
json_info=False, json_res_index=True, json_param=True)
#%% properties
@property
def axes(self):
'''
**list of integer (@property)** : list of independant axes in the Observation
(0 for Datation, 1 for Location, 2 for Property)'''
axes =[]
for i in self.ilist.axes :
if self.ilist.idxname[i] in ES.esObsClass:
axes.append(ES.esObsClass.index(self.ilist.idxname[i]))
return axes
@property
def bounds(self):
'''
**list of `ES.ESValue` (@property)** : `ES.ESValue` bounding box for each axis.'''
bound = [None, None, None]
#if self.setDatation : bound[0] = self._boundingBox(DatationValue, self.setDatation)
#if self.setLocation : bound[1] = self._boundingBox(LocationValue, self.setLocation)
if self.setDatation : bound[0] = self._boundingBox(self.setDatation)
if self.setLocation : bound[1] = self._boundingBox(self.setLocation)
if self.setProperty : bound[2] = self.setProperty[0]
return bound
@property
def complet(self):
'''
**boolean (@property)** : True if self.ilist is complete (if the number of
values is consistent with the number of index values)'''
return self.ilist.complete
@property
def consistent(self):
'''
**boolean (@property)** : True if Observation is consistent (no duplicate index) '''
return self.ilist.consistent
@property
def dimension(self):
'''
**integer (@property)** : number of independant axes in the Observation'''
return self.ilist.dimension
@property
def json(self):
'''
**string (@property)** : JSON Observation (ObsJSON format) whit index
and whitout informations'''
return self.to_json(encoded=True, encode_format='json',
json_info=False, json_res_index=True, json_param=True)
@property
def jsonFeature(self):
'''
**string (@property)** : "FeatureCollection" with ESSetLocation geometry'''
if self.setLocation :
geo = self.__geo_interface__
if geo['type'] == "MultiPolygon": typ = "Polygon"
else : typ = "Point"
lis = list(dict((("type", typ), ("coordinates", geo['coordinates'][i]))) for i in range(len(geo['coordinates'])))
fea = | |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 22 15:07:09 2016
@author: advena
"""
#import re
from datetime import datetime
#import numpy as np
import pandas as pd
import os
import sys
import shutil
from dateutil import parser
########################################################################
# User Inputs #
########################################################################
# Do not edit the PJM owner list (except to correct it)
pjm_owner_list=[202,205,209,212,215,222,225,320,345]
# Filter the data down to these owners.
# For PJM only, set compare_owner_list = pjm_owner_list
# For all untis, set compare_owner_list = []
compare_owner_list = pjm_owner_list
#src_dir = r'K:\AFC_MODEL_UPDATES\2016_S\IDC Models'
src_dir = r'K:\AFC_MODEL_UPDATES\2016_S\IDC Models'
# Working directory. If src_dir != tgt_dir, a copy of the original raw
# files is copied to tgt_dir to prevent corruption of the originals.
# The copy here will have be modified; a new line will be added to the top
# to allow for csv parsing without error.
tgt_dir = r'C:\temp'
# The raw files to compare
raw_file1 = r'sum16idctr1p4_v32.RAW'
raw_file2 = r'sum16idctr1p6_v32.RAW'
# Maximim number of columns in any row, likely 28.
max_cols = 28 # set to zero to automatically determine.
# The regex pattern (before compiling) the identifies the header to a new section of the log file.
end_of_section_pattern="0 / "
########################################################################
# Function Dfinitions #
########################################################################
def max_col_cnt(filename):
'''
Finds row with mwx number of columns by counting commas
'''
max_commas = 0
lines = open(filename)
for line in lines:
cnt = line.count(',')
if cnt > max_commas:
max_commas = cnt
return max_commas + 1
def raw_to_df(src_dir, tgt_dir, filename, max_cols=28):
'''
src_dir: directory in which the raw files are located
tgt_dir: directory in which to copy the files
(to prevent corrupting originals)
filename: name of raw file (exluding path)
ins_hdr: True to add a generic header to the file (col1, col2, ...)
False if you already added a header to the file.
max_cols: The maximim number of columns in any row, likely 28.
'''
#create generic column headers
cols=["col"+str(i) for i in range(max_cols)]
#concatenate path and filename
src=os.path.join(src_dir,filename)
#copy both files to the target directory
if src_dir != tgt_dir and tgt_dir!=None and tgt_dir!='':
print(' copying raw file to working directory: ' + tgt_dir)
tgt=os.path.join(tgt_dir,filename)
shutil.copyfile(src, tgt)
else:
tgt=src
# return dataframe
print(' reading raw file into datafrme: ' + tgt_dir)
lst = pd.read_csv(open(tgt), names=cols, dtype= str )
return pd.DataFrame(lst)
def define_sections(df, end_of_section_pattern=end_of_section_pattern):
sections = []
first_row = 3
for row_num, row in df.iterrows():
if row[0][:4] == end_of_section_pattern:
#section_name = row[0][3:].replace("END OF","").strip()
section_name = row[0][11:]
#sections [from line, to line, section name]
sections += [[first_row, row_num, section_name]]
first_row = row_num+1
return sections
def parse_raw_header(first_3_rows):
data = list(first_3_rows.iloc[0,:][0:7])+[None]
data[7] = data[6]
data[5] = data[5].split('/')[1][-4].strip()
data[5] = data[5].split('/')[0].strip()
data += [first_3_rows.iloc[1,:][0]]
data += [first_3_rows.iloc[2,:][0] + ',' + first_3_rows.iloc[2,:][1]]
for i in range(len(data)):
first_3_rows.iloc[0,i]=data[i]
data=[item.strip(' ') for item in data]
data[6]=parser.parse(data[6])
data[7]=parser.parse(data[7])
cols = ['col0','col1','col2','col3','col4','col5','Updated','Updated2',\
'Case_Name','Updated3']
dtype_dict = {'col0':'float','col1':'float','col2':'float','col3':'float',\
'col4':'float','col5':'str','Updated':'str','Updated2':'str',\
'Case_Name':'str','Updated3':'str'}
df = pd.DataFrame([data])
#print('raw summary:')
#print(df)
df.columns = cols
df.dtype = dtype_dict
return df
def append_owner(df, owner_df):
''' Add Bus_Name columns to a Branch dataframe.
Branch1/2 only has bus numbers. Look up Bus_Name in the
Bus1 or Bus2 dataframe and apply
'''
ret = pd.merge(df, owner_df, left_on="Owner", right_on="Owner_Num", how="inner")
ret.drop('Owner_Num', axis=1, inplace=True)
return ret
def append_bus_info_to_branch(branch_df, bus_df):
''' Add Bus_Name columns to a Branch dataframe.
Branch1/2 only has bus numbers. Look up Bus_Name in the
Bus1 or Bus2 dataframe and apply
'''
bus_slim = bus_df.loc[:,['Bus_Num', 'Bus_Name']].copy()
# FROM bus
ret = pd.merge(branch_df, bus_slim, left_on="Fr_Bus_Num", right_on="Bus_Num", how="inner")
ret = ret.rename(columns = {'Bus_Name':'Fr_Bus_Name'})
ret.drop('Bus_Num', axis=1, inplace=True)
# TO bus
ret = pd.merge(ret, bus_slim, left_on="To_Bus_Num", right_on="Bus_Num", how="inner")
ret = ret.rename(columns = {'Bus_Name':'To_Bus_Name'})
ret.drop('Bus_Num', axis=1, inplace=True)
ret = ret[ret['Fr_Bus_Num'].notnull()]
return ret
def branch_df_compare(branch_df1, branch_df2):
'''
branch_cols=['Fr_Bus_Num','To_Bus_Num','ID','Line_R_pu',\
'Line_X_pu','Charging_pu','Rate_A_MVA',\
'Rate_B_MVA','Rate_C_MVA','Line_G_From_pu',\
'Line_B_From_pu','Line_G_To_pu','Line_B_To_pu',\
'In_Service','Code','Length','Owner',\
'Fraction']
'''
# dropped branch
ret = pd.merge(branch_df1, branch_df2, how='left', on=['Fr_Bus_Num','To_Bus_Num'])
ret['change_type'] = 'dropped branch (RateA)'
ret['change_amt']=-ret['Rate_A_MVA_x']
ret = ret[(ret['Rate_A_MVA_x'].notnull()) & (ret['Rate_A_MVA_y'].isnull())]
print('Dropped branch')
ret['change_type'] = 'dropped branch (RateA)'
ret['change_amt']=-ret['Rate_A_MVA_x']
print(ret.loc[:,['Fr_Bus_Num','To_Bus_Num','Rate_A_MVA_x','change_type','change_amt']].head())
# added
added = pd.merge(branch_df1, branch_df2, how='right', on=['Fr_Bus_Num','To_Bus_Num'])
added = added[(added['Rate_A_MVA_x'].isnull()) & (added['Rate_A_MVA_y'].notnull())]
added['change_type'] = 'added branch (RateA)'
added['change_amt']=added['Rate_A_MVA_y']
print('Added branch')
print(added.loc[:,['Fr_Bus_Num','To_Bus_Num','Rate_A_MVA_y','change_type','change_amt']].head())
ret = ret.append(added)
added=None
# changes in Rate_A
delta = pd.merge(branch_df1, branch_df2, how='inner', on=['Fr_Bus_Num','To_Bus_Num'])
delta = delta[delta['Rate_A_MVA_x'] != delta['Rate_A_MVA_y']]
delta['change_type'] = 'delta branch RateA'
delta['change_amt']=delta['Rate_A_MVA_x']-delta['Rate_A_MVA_y']
print('Delta Rate_A')
print(delta.loc[:,['Fr_Bus_Num','To_Bus_Num','Rate_A_MVA_x','Rate_A_MVA_y','change_type','change_amt']].head())
print(delta.head())
ret = ret.append(delta)
delta=None
return ret
def append_bus_info_to_gen(gen_df, bus_df):
bus_slim = bus_df.loc[:,['Bus_Num', 'Bus_Name', 'Bus_kV','Area_Num','Zone_Num']].copy()
bus_slim = bus_slim.rename(columns = {'Bus_Num':'Bus_Num2'})
print("bus_slim.columns")
print(bus_slim.columns)
print("gen_df.columns")
print(gen_df.columns)
ret = pd.merge(gen_df, bus_slim, left_on="Bus_Num", right_on="Bus_Num2", how="inner")
ret.drop('Bus_Num2', axis=1, inplace=True)
ret = ret[ret['Bus_Num'].notnull()]
return ret
def gen_df_compare(gen_df1, gen_df2, area_list=compare_owner_list):
'''
Compares the generation data from the two raw files.
Parameters:
gen_df1: dataframe containing the generation table from raw file1
gen_df2: dataframe containing the generation table from raw file2
cols: list of column names
Returns dataframe with dropped gen, added gen and changes in Pgen, Pmax,
Qgen, Qmax, In_Service.
gen_cols=['Bus_Num', 'ID', 'Pgen', 'Qgen', 'Qmax', 'Qmin', 'VSched_pu',\
'Remote_Bus_Num','Mbase', 'R_source_pu', 'X_source_pu',\
'RTran_pu', 'XTran_pu','Gentap_pu', 'In_Service', 'RMPCT','Pmax',\
'Pmin','Owner','Owner_Fraction']
'''
# dropped gen
ret = pd.merge(gen_df1, gen_df2, how='left', on=['Bus_Num','ID'])
ret = ret[(ret['Pgen_x'].notnull()) & (ret['Pgen_y'].isnull())] #I picked Pgen arbitrarily.
print('Dropped gen')
ret['change_type'] = 'dropped gen (Pgen)'
ret['change_amt']=-ret['Pgen_x']
#ret.insert(3, 'change_type', 'dropped gen (Pgen)')
#ret.insert(3, 'change_amt', 'Pgen_x')
print(ret.loc[:,['Bus_Num','ID','Pgen_x','change_type','change_amt']].head())
# added
added = pd.merge(gen_df1, gen_df2, how='right', on=['Bus_Num','ID'])
added = added[(added['Pgen_x'].isnull()) & (added['Pgen_y'].notnull())] #I picked Pgen arbitrarily.
added['change_type'] = 'added gen (Pgen)'
added['change_amt']=added['Pgen_y']
print('Added gen')
print(added.loc[:,['Bus_Num','ID','Pgen_y','change_type','change_amt']].head())
ret = ret.append(added)
added=None
# changes in Pgen
delta = pd.merge(gen_df1, gen_df2, how='inner', on=['Bus_Num','ID'])
delta = delta[delta['Pgen_x'] != delta['Pgen_y']]
delta['change_type'] = 'delta gen Pgen'
delta['change_amt']=delta['Pgen_x']-delta['Pgen_y']
print('Delta Pgen')
print(delta.loc[:,['Bus_Num','ID','Pgen_x','Pgen_y','change_type','change_amt']].head())
print(delta.head())
ret = ret.append(delta)
delta=None
return ret
########################################################################
# Column Definitions #
########################################################################
# Define the columns in each section
bus_dtype={'Bus_Num':'int', 'Bus_Name':'str', 'Bus_kV':'float', 'Code':'int', \
'Area_Num':'int', 'Zone_Num':'int', \
'Owner_Num':'int','Voltage_pu':'float','Angle':'float'}
bus_cols=['Bus_Num', 'Bus_Name', 'Bus_kV', 'Code', 'Area_Num', 'Zone_Num', \
'Owner_Num','Voltage_pu','Angle']
load_dtype={'Bus_Num':'int','I_P_RC':'str','Code':'int','Area':'int','Zone':'int',\
'P':'float','Q':'float','float1':'float','float1':'float',\
'float3':'float','float4':'float','Owner':'int','In_Service':'int'}
fixed_shunt_dtype={'Bus_Num':'int', 'ID':'str','In_Service':'int',\
'float1':'float', 'float2':'float'}
gen_dtype={'Bus_Num':'int', 'ID':'str', 'Pgen':'float', 'Qgen':'float', \
'Qmax':'float', 'Qmin':'float', 'VSched_pu':'float',\
'Remote_Bus_Num':'int','Mbase':'float', \
'R_source_pu':'float', 'X_source_pu':'float',\
'RTran_pu':'float', 'XTran_pu':'float','Gentap_pu':'float', \
'In_Service':'int', 'RMPCT':'float','Pmax':'float',\
'Pmin':'float','Owner':'int','Owner_Fraction':'float'}
branch_dtype={'Fr_Bus_Num':'int','To_Bus_Num':'int','ID':'str','Line_R_pu':'float',\
'Line_X_pu':'float','Charging_pu':'float','Rate_A_MVA':'float',\
'Rate_B_MVA':'float','Rate_C_MVA':'float','Line_G_From_pu':'float',\
'Line_B_From_pu':'float','Line_G_To_pu':'float','Line_B_To_pu':'float',\
'In_Service':'int','Code':'int','Length':'float','Owner':'int',\
'Fraction':'float'}
xfrmr1_dtype={'Fr_Bus_Num','To_Bus_Num',' Metered_on_Fr_End','ID','??',\
'Winding1_on_Fr_End','AutoAdj','Magnetizing_G_pu',\
'Magnetizing_B_pu','Xrmr_name','In_Service',\
' Owner1','Fraction_1','Owner_2','Fraction_2',\
'Owner_3','Fraction_3','Owner_4','Fraction_4'}
xfrmr2_dtype={'Specified_R_pu':'float', 'Specified_X_pu':'float', 'Winding_MVA':'float'}
xfrmr3_dtype={'W1Ratio_pu':'float','W1NominalkV':'float','W1Angle_deg':'float',\
'RateA_MVA':'float','Rate_B_MVA':'float','Rate_C_MVA':'float',\
'CtrlMode':'float','CtrlBusNum':'float','R1_Max_deg':'float',\
'R1_Min_deg':'float','V_max_MW':'float','V_min_MW':'float',\
'Tap_postition':'float','Impedence_Tbl':'float','Load_Drop_R_pu':'float',\
'Load_Drop_X_pu':'float','Unk':'float'}
xfrmr4_dtype={'Winding2Ratio_pu':'float', 'Winding2Nominal_kV':'float'}
area_dtype={'Area_Num':'int', 'Gen':'float', 'Ld':'float', \
'float3':'float', 'Area_Name':'str'}
two_term_dc_dtype={}
vsc_dc1_dtype={"Name":'str','Int1':'int','float1':'float', \
'float2':'float', 'float3':'float'}
vsc_dc2_dtype={'Bus_Num':'int','Terminal':'int','Int1':'int','float1':'float', \
'pu':'float','MVA1':'float','float2':'float', 'float3':'float', \
'float4':'float', 'MVA2':'float', 'float5':'float', 'float6':'float', \
'float7':'float',"Bus_Num":'int', 'float8':'float'}
vsc_dc3_dtype=vsc_dc2_dtype
imped_correction_dtype={'Index':'int', 'float1':'float', 'float2':'float', \
'float3':'float', 'float4':'float', 'float5':'float', \
'float6':'float', 'float7':'float', 'float8':'float', \
'float9':'float', 'float10':'float', 'float11':'float',\
'float12':'float', 'float13':'float', 'float14':'float', \
'float15':'float', 'float16':'float', 'float17':'float',\
'float18':'float', 'float19':'float', 'float20':'float', \
'float21':'float', 'float22':'float'}
multi_term_dc_dtype={'Fr_Bus_Num':'int','To_Bus_Num':'int','Amp1':'str',\
'Int1':'int','Bus3_Num':'int'}
multi_sctn_line_dtype={'Fr_Bus_Num':'int','To_Bus_Num':'int','Amp1':'str',\
'Int1':'int','Bus3_Num':'int'}
zone_dtype={'Zone_Num':'int','Zone_Name':'str'}
xfer_dtype={'unk':'float'}
owner_dtype={'Owner_Num':'int','Owner_Name':'str'}
facts_dtype={'Name':'float', 'Number':'float', 'int1':'float', 'int2':'float', \
'X':'float', 'R':'float', 'pu':'float', 'col7':'float', \
'col8':'float', 'col9':'float', 'col10':'float', \
'col11':'float', 'col12':'float', 'col13':'float', \
'col14':'float', 'col15':'float', 'col16':'float', \
'col17':'float', 'col18':'float', 'col19':'float'}
sw_shunt_dtype={}
#gne_dtype={}
load_cols=['Bus_Num','I_P_RC','Code','Area','Zone',\
'P','Q','float1','float1',\
'float3','float4','Owner','In_Service']
fixed_shunt_cols=['Bus_Num', 'ID','In_Service',\
'float1', 'float2']
gen_cols=['Bus_Num', 'ID', 'Pgen', 'Qgen', \
'Qmax', 'Qmin', 'VSched_pu',\
'Remote_Bus_Num','Mbase', \
'R_source_pu', 'X_source_pu',\
'RTran_pu', 'XTran_pu','Gentap_pu', \
'In_Service', 'RMPCT','Pmax',\
'Pmin','Owner','Owner_Fraction']
branch_cols=['Fr_Bus_Num','To_Bus_Num','ID','Line_R_pu',\
'Line_X_pu','Charging_pu','Rate_A_MVA',\
'Rate_B_MVA','Rate_C_MVA','Line_G_From_pu',\
'Line_B_From_pu','Line_G_To_pu','Line_B_To_pu',\
'In_Service','Code','Length','Owner',\
'Fraction']
xfrmr1_cols=['From_Bus_Num','To_Bus_Num','Fr_Bus_Num',\
'l1c2','To_Bus_Num','ID','l1c4',\
'l1c5','l1c6','R_pu','X_pu',\
'l1c9','Name','In-Service',\
'Owner','Fraction']
xfrmr2_cols=['l2c0', 'l2c1', 'l2c2']
xfrmr3_cols=['Fr_kV','To_kV','l3c2',\
'Rate_A_MVA','Rate_B_MVA','Rate_C_MVA',\
'G Fr pu','B Fr pu','Fr_kV2',\
'To_kV2','l3c10','l3c11',\
'l3c12','l3c13','B_To_pu',\
'G_To_pu','l3c16']
xfrmr4_cols=['l4c0', 'l4c1']
area_cols=['Area_Num', 'Gen', 'Ld', \
'float3', 'Area_Name']
two_term_dc_cols=[]
vsc_dc1_cols=["Name",'Int1','float1', \
'float2', 'float3']
vsc_dc2_cols=['Bus_Num','Terminal','Int1','float1', \
'pu','MVA1','float2', 'float3', \
'float4', 'MVA2', 'float5', 'float6', \
'float7',"Bus_Num", 'float8']
vsc_dc3_cols=vsc_dc2_cols
imped_correction_cols=['Index', 'float1', 'float2', \
'float3', 'float4', 'float5', \
'float6', 'float7', 'float8', \
'float9', 'float10', 'float11',\
'float12', 'float13', 'float14', \
'float15', 'float16', 'float17',\
'float18', 'float19', 'float20', \
'float21', 'float22']
multi_term_dc_cols=['Fr_Bus_Num','To_Bus_Num','Amp1',\
'Int1','Bus3_Num']
multi_sctn_line_cols=['Fr_Bus_Num','To_Bus_Num','Amp1',\
'Int1','Bus3_Num']
zone_cols=['Zone_Num','Zone_Name']
xfer_cols=['unk']
owner_cols=['Owner_Num','Owner_Name']
facts_cols=['Name', 'Number', 'int1', 'int2', \
'X', 'R', 'pu', 'col7', \
'col8', 'col9', 'col10', \
'col11', 'col12', 'col13', \
'col14', 'col15', 'col16', \
'col17', 'col18', 'col19']
sw_shunt_cols=[]
#gne_cols={}
########################################################################
# Main #
########################################################################
start=datetime.now()
print('')
print('Starting raw file comparison script')
datestr = str(datetime.now()).replace(' ','_').replace(':','').replace('.','')[:17]
if max_cols < 1:
max_cols = max_col_cnt(os.path.join(tgt_dir,raw_file1))
max_cols2 = max_col_cnt(os.path.join(tgt_dir,raw_file2))
if max_cols2 > max_cols:
max_cols = max_cols2
print('Max column count: ' + str(max_cols))
print('')
print('1. Parsing raw file 1: ' + raw_file1)
# load dataframes
print(' loading raw file')
df1 = raw_to_df(src_dir, tgt_dir, raw_file1, max_cols)
raw1_summary = parse_raw_header(df1[:3])
#print(' raw1 summary')
#print(raw1_summary)
df1['data_type']='na'
# Find sections within the dataframe
section_def1 = define_sections(df1)
#print(' raw file sections')
#print(section_def1)
# create section dataframes
print(' splitting raw file sections')
print
for i, sublist in enumerate(section_def1):
#print("\n"+str(sublist[2])+": " )
if 'BUS DATA' in sublist[2].upper():
bus1 = df1[sublist[0]:sublist[1]].copy().iloc[:,0:9]
df1[sublist[0]:sublist[1]]['data_type']='bus'
bus1.columns = bus_cols #[s+'1' for s in bus_cols]
for key in bus_dtype:
bus1[key]=bus1[key].astype(bus_dtype[key])
elif 'LOAD DATA' in sublist[2].upper():
load1 = df1[sublist[0]:sublist[1]].copy().iloc[:,0:13]
df1[sublist[0]:sublist[1]]['data_type']='load'
elif 'FIXED SHUNT DATA' in sublist[2].upper():
fixed_shunt1 = df1[sublist[0]:sublist[1]].copy().iloc[:,0:13]
df1[sublist[0]:sublist[1]]['data_type']='fixed_shunt'
elif 'GENERATOR DATA' in sublist[2].upper():
gen1 = df1[sublist[0]:sublist[1]].copy().iloc[:,0:20]
df1[sublist[0]:sublist[1]]['data_type']='gen'
gen1.columns = gen_cols #[s+'1' for s in gen_cols]
for key in gen_dtype:
gen1[key]=gen1[key].astype(gen_dtype[key])
elif 'BRANCH DATA' in sublist[2].upper():
branch1 = df1[sublist[0]:sublist[1]].copy().iloc[:,0:18]
df1[sublist[0]:sublist[1]]['data_type']='bbranch'
branch1.columns = branch_cols #[s+'1' for s in branch_cols]
for key in branch_dtype:
branch1[key]=branch1[key].astype(branch_dtype[key])
elif 'TRANSFORMER DAT' in sublist[2].upper():
xfrmr1 = df1[sublist[0]:sublist[1]].copy().iloc[:,0:17]
| |
<gh_stars>100-1000
import json
import appscale.infrastructure as iaas
from tornado.testing import AsyncHTTPTestCase, gen_test
from tornado.httpclient import HTTPRequest, HTTPError
from tornado.ioloop import IOLoop
from mock import patch, MagicMock
from appscale.agents.base_agent import AgentRuntimeException
from appscale.agents.ec2_agent import EC2Agent
full_params = {
'a': 'b', 'EC2_URL': 'http://testing.appscale.com:8773/foo/bar',
'EC2_ACCESS_KEY': 'access_key', 'EC2_SECRET_KEY': 'secret_key',
'group': 'boogroup',
'machine': 'booid',
'infrastructure': 'ec2',
'instance_type': 'booinstance_type',
'keyname': 'bookeyname',
'use_spot_instances': False,
'region': 'my-zone-1',
'zone': 'my-zone-1b',
'autoscale_agent': True
}
class TestInfrastructureManager(AsyncHTTPTestCase):
def get_app(self):
return iaas.make_app("secret", True)
############################################################
# InstancesHandler tests
############################################################
@gen_test
def test_describe_instances(self):
# No secret header.
payload_request = HTTPRequest(
allow_nonstandard_methods=True, method='GET',
url=self.get_url('/instances'), headers=None, body=None
)
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(payload_request)
self.assertEqual(context.exception.code, 401)
self.assertEqual(context.exception.message, 'Invalid secret')
# Invalid secret header.
payload_request = HTTPRequest(
allow_nonstandard_methods=True, method='GET',
url=self.get_url('/instances'),
headers={'AppScale-Secret': 'invalid-secret'}, body=None
)
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(payload_request)
self.assertEqual(context.exception.code, 401)
self.assertEqual(context.exception.message, 'Invalid secret')
# No operation_id.
payload_request = HTTPRequest(
allow_nonstandard_methods=True, method='GET',
url=self.get_url('/instances'),
headers={'AppScale-Secret': 'secret'}, body=json.dumps({})
)
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(payload_request)
self.assertEqual(context.exception.code, 400)
self.assertEqual(context.exception.message,
'operation_id is a required parameter')
operation_id = '0000000000'
# operation_id not valid.
payload_request = HTTPRequest(
allow_nonstandard_methods=True, method='GET',
url=self.get_url('/instances'),
body=json.dumps({'operation_id': operation_id}),
headers={'AppScale-Secret': 'secret'}
)
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(payload_request)
self.assertEqual(context.exception.code, 404)
self.assertEqual(context.exception.message, 'Operation id not found')
# operation_id valid.
vm_info = {
'public_ips': ['public-ip'],
'private_ips': ['private-ip'],
'instance_ids': ['i-id']
}
status_info = {
'success': True,
'reason': 'received run request',
'state': iaas.InstancesHandler.STATE_SUCCESS,
'vm_info': vm_info
}
iaas.operation_ids[operation_id] = status_info
result = iaas.operation_ids.get(operation_id)
payload_request = HTTPRequest(
allow_nonstandard_methods=True,
method='GET', url=self.get_url('/instances'),
body=json.dumps({'operation_id': operation_id}),
headers={'AppScale-Secret': 'secret'}
)
response = yield self.http_client.fetch(payload_request)
self.assertEqual(response.code, 200)
self.assertEquals(result, json.loads(response.body))
@gen_test
def test_run_instances_fail_cases(self):
"""Success cases are done in the test_{cloud}_agent files."""
# No secret header.
payload_request = HTTPRequest(
method='POST', url=self.get_url('/instances'), headers=None,
body=json.dumps({})
)
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(payload_request)
self.assertEqual(context.exception.code, 401)
self.assertEqual(context.exception.message, 'Invalid secret')
# Invalid secret header.
payload_request = HTTPRequest(
method='POST', url=self.get_url('/instances'),
headers={'AppScale-Secret': 'invalid-secret'}, body=json.dumps({})
)
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(payload_request)
self.assertEqual(context.exception.code, 401)
self.assertEqual(context.exception.message, 'Invalid secret')
# Missing parameters.
params1 = json.dumps({})
payload_request = HTTPRequest(
method='POST', url=self.get_url('/instances'),
headers={'AppScale-Secret': 'secret'}, body=params1
)
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(payload_request)
self.assertEqual(context.exception.code, 400)
self.assertEqual(context.exception.message,
'infrastructure is a required parameter')
params2 = json.dumps(full_params)
payload_request = HTTPRequest(
method='POST', url=self.get_url('/instances'),
headers={'AppScale-Secret': 'secret'}, body=params2
)
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(payload_request)
self.assertEqual(context.exception.code, 400)
self.assertEqual(context.exception.message,
'num_vms is a required parameter')
params_copy = full_params.copy()
params_copy['num_vms'] = 0
params = json.dumps(params_copy)
payload_request = HTTPRequest(
method='POST', url=self.get_url('/instances'),
headers={'AppScale-Secret': 'secret'}, body=params
)
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(payload_request)
self.assertEqual(context.exception.code, 400)
self.assertEqual(context.exception.message, 'Invalid VM count: 0')
params_copy = full_params.copy()
params_copy['num_vms'] = 1
run_params = json.dumps(params_copy)
# Passing parameter verification and calling __spawn_vms.
# Successful calls ignoring callback of __spawn_vms
with patch.object(EC2Agent, 'assert_credentials_are_valid'):
with patch.object(IOLoop, 'spawn_callback', return_value=None):
payload_request = HTTPRequest(
method='POST', url=self.get_url('/instances'),
headers={'AppScale-Secret': 'secret'}, body=run_params
)
response = yield self.http_client.fetch(payload_request)
self.assertEqual(response.code, 200)
self.assertTrue(iaas.PARAM_OPERATION_ID in json.loads(response.body))
operation_id = json.loads(response.body)[iaas.PARAM_OPERATION_ID]
vm_info = {
'public_ips': ['public-ip'],
'private_ips': ['private-ip'],
'instance_ids': ['i-id']
}
status_info = {
'success': True,
'reason': 'received run request',
'state': iaas.InstancesHandler.STATE_SUCCESS,
'vm_info': vm_info
}
iaas.operation_ids[operation_id] = status_info
result = iaas.operation_ids.get(operation_id)
payload_request = HTTPRequest(
allow_nonstandard_methods=True,
method='GET', url=self.get_url('/instances'),
body=json.dumps({'operation_id': operation_id}),
headers={'AppScale-Secret': 'secret'}
)
response = yield self.http_client.fetch(payload_request)
self.assertEqual(response.code, 200)
self.assertEquals(result, json.loads(response.body))
payload_request = HTTPRequest(
method='POST', url=self.get_url('/instances'),
headers={'AppScale-Secret': 'secret'}, body=run_params
)
response = yield self.http_client.fetch(payload_request)
self.assertEqual(response.code, 200)
self.assertTrue(iaas.PARAM_OPERATION_ID in json.loads(response.body))
operation_id = json.loads(response.body)[iaas.PARAM_OPERATION_ID]
# operation_id valid.
status_info = {
'success': False,
'reason': str(AgentRuntimeException),
'state': iaas.InstancesHandler.STATE_FAILED,
}
iaas.operation_ids[operation_id] = status_info
result = iaas.operation_ids.get(operation_id)
payload_request = HTTPRequest(
allow_nonstandard_methods=True,
method='GET', url=self.get_url('/instances'),
body=json.dumps({'operation_id': operation_id}),
headers={'AppScale-Secret': 'secret'}
)
response = yield self.http_client.fetch(payload_request)
self.assertEqual(response.code, 200)
self.assertEquals(result, json.loads(response.body))
@gen_test
def test_terminate_instances(self):
"""Success cases are done in the test_{cloud}_agent files."""
# No secret header.
payload_request = HTTPRequest(
method='DELETE', url=self.get_url('/instances'), headers={}
)
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(payload_request)
self.assertEqual(context.exception.code, 401)
self.assertEqual(context.exception.message, 'Invalid secret')
# Invalid secret header.
payload_request = HTTPRequest(
method='DELETE', url=self.get_url('/instances'),
headers={'AppScale-Secret': 'invalid-secret'}
)
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(payload_request)
self.assertEqual(context.exception.code, 401)
self.assertEqual(context.exception.message, 'Invalid secret')
payload_request = HTTPRequest(
allow_nonstandard_methods=True,
method='DELETE', url=self.get_url('/instances'),
headers={'AppScale-Secret': 'secret'}, body=json.dumps({})
)
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(payload_request)
self.assertEqual(context.exception.code, 400)
self.assertEqual(context.exception.message,
'infrastructure is a required parameter')
payload_request = HTTPRequest(
allow_nonstandard_methods=True,
method='DELETE', url=self.get_url('/instances'),
headers={'AppScale-Secret': 'secret'}, body=json.dumps(full_params)
)
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(payload_request)
self.assertEqual(context.exception.code, 400)
self.assertEqual(context.exception.message,
'instance_ids is a required parameter')
params_copy = full_params.copy()
params_copy['instance_ids'] = ['i-foobar']
terminate_params = json.dumps(params_copy)
# Successful calls ignoring callback of __kill_vms
with patch.object(EC2Agent, 'assert_credentials_are_valid'):
with patch.object(IOLoop, 'spawn_callback', return_value=None):
payload_request = HTTPRequest(
allow_nonstandard_methods=True,
method='DELETE',
url=self.get_url('/instances'), body=terminate_params,
headers={'AppScale-Secret': 'secret'}
)
response = yield self.http_client.fetch(payload_request)
self.assertEqual(response.code, 200)
self.assertTrue(iaas.PARAM_OPERATION_ID in json.loads(response.body))
operation_id = json.loads(response.body)[iaas.PARAM_OPERATION_ID]
# operation_id valid.
status_info = {
'success': True,
'reason': 'received kill request',
'state': iaas.InstancesHandler.STATE_SUCCESS,
}
iaas.operation_ids[operation_id] = status_info
result = iaas.operation_ids.get(operation_id)
payload_request = HTTPRequest(
allow_nonstandard_methods=True,
method='GET',
url=self.get_url('/instances'),
body=json.dumps({'operation_id': operation_id}),
headers={'AppScale-Secret': 'secret'}
)
response = yield self.http_client.fetch(payload_request)
self.assertEqual(response.code, 200)
self.assertEquals(result, json.loads(response.body))
# AgentRuntimeException raised.
payload_request = HTTPRequest(
allow_nonstandard_methods=True,
method='DELETE',
url=self.get_url('/instances'), body=terminate_params,
headers={'AppScale-Secret': 'secret'}
)
response = yield self.http_client.fetch(payload_request)
self.assertEqual(response.code, 200)
self.assertTrue(iaas.PARAM_OPERATION_ID in json.loads(response.body))
operation_id = json.loads(response.body)[iaas.PARAM_OPERATION_ID]
# operation_id valid.
status_info = {
'success': False,
'state': iaas.InstancesHandler.STATE_FAILED,
'reason': str(AgentRuntimeException)
}
iaas.operation_ids[operation_id] = status_info
result = iaas.operation_ids.get(operation_id)
payload_request = HTTPRequest(
allow_nonstandard_methods=True,
method='GET', url=self.get_url('/instances'),
body=json.dumps({'operation_id': operation_id}),
headers={'AppScale-Secret': 'secret'}
)
response = yield self.http_client.fetch(payload_request)
self.assertEqual(response.code, 200)
self.assertEquals(result, json.loads(response.body))
############################################################
# InstancesHandler helper methods tests
############################################################
@gen_test
def test_spawn_vms(self):
no_vms = ([], [], [])
vm_info_return = (['i-id'], ['public-ip'], ['private-ip'])
describe_vms_return = (['public-ip'], ['private-ip'], ['i-id'])
agent_exception = AgentRuntimeException("Runtime Exception")
mocked_agent = EC2Agent()
mocked_agent.assert_credentials_are_valid = MagicMock()
mocked_agent.run_instances = MagicMock(side_effect=[vm_info_return,
agent_exception,
agent_exception])
mocked_agent.describe_instances = MagicMock(side_effect=[no_vms,
describe_vms_return])
mocked_agent.configure_instance_security = MagicMock()
initial_status_info = {
'success': False,
'reason': 'received run request',
'state': iaas.InstancesHandler.STATE_PENDING
}
iaas.operation_ids['op_id'] = initial_status_info
iaas.InstancesHandler._spawn_vms(mocked_agent, 1, full_params, 'op_id')
vm_info = {
'public_ips': ['public-ip'],
'private_ips': ['private-ip'],
'instance_ids': ['i-id']
}
result_status_info = {
'success': True,
'reason': 'received run request',
'state': iaas.InstancesHandler.STATE_SUCCESS,
'vm_info': vm_info
}
self.assertEqual(iaas.operation_ids['op_id'], result_status_info)
# Exception happened but vms were started.
mocked_agent.describe_instances = MagicMock(side_effect=[no_vms,
describe_vms_return])
initial_status_info = {
'success': False,
'reason': 'received run request',
'state': iaas.InstancesHandler.STATE_PENDING
}
iaas.operation_ids['op_id_2'] = initial_status_info
iaas.InstancesHandler._spawn_vms(mocked_agent, 1, full_params, 'op_id_2')
result_status_info = {
'success': False,
'reason': str(agent_exception),
'state': iaas.InstancesHandler.STATE_SUCCESS,
'vm_info': vm_info
}
self.assertEqual(iaas.operation_ids['op_id_2'], result_status_info)
# Exception happened but vms were not started.
mocked_agent.describe_instances = MagicMock(side_effect=[no_vms, no_vms])
initial_status_info = {
'success': False,
'reason': 'received run request',
'state': iaas.InstancesHandler.STATE_PENDING
}
iaas.operation_ids['op_id_3'] = initial_status_info
iaas.InstancesHandler._spawn_vms(mocked_agent, 1, full_params, 'op_id_3')
result_status_info = {
'success': False,
'reason': str(agent_exception),
'state': iaas.InstancesHandler.STATE_FAILED,
}
self.assertEqual(iaas.operation_ids['op_id_3'], result_status_info)
@gen_test
def test_kill_vms(self):
agent_exception = AgentRuntimeException("Runtime Exception")
mocked_agent = EC2Agent()
mocked_agent.terminate_instances = MagicMock(side_effect=[None,
agent_exception])
initial_status_info = {
'success': False,
'reason': 'received kill request',
'state': iaas.InstancesHandler.STATE_PENDING,
'vm_info': None
}
iaas.operation_ids['op_id'] = initial_status_info
terminate_params = full_params.copy()
terminate_params['instance_ids'] = ['i-foobar']
iaas.InstancesHandler._kill_vms(mocked_agent, terminate_params, 'op_id')
result_status_info = {
'success': True,
'reason': 'received kill request',
'state': iaas.InstancesHandler.STATE_SUCCESS,
'vm_info': None
}
self.assertEqual(iaas.operation_ids['op_id'], result_status_info)
initial_status_info = {
'success': False,
'reason': 'received kill request',
'state': iaas.InstancesHandler.STATE_PENDING,
'vm_info': None
}
iaas.operation_ids['op_id_2'] = initial_status_info
iaas.InstancesHandler._kill_vms(mocked_agent, terminate_params, 'op_id_2')
result_status_info = {
'success': False,
'reason': str(agent_exception),
'state': iaas.InstancesHandler.STATE_FAILED,
'vm_info': None
}
self.assertEqual(iaas.operation_ids['op_id_2'], result_status_info)
@gen_test
def test_describe_vms(self):
agent_exception = AgentRuntimeException("Runtime Exception")
vm_info_return = (['public-ip'], ['private-ip'], ['i-id'])
mocked_agent = EC2Agent()
mocked_agent.describe_instances= MagicMock(side_effect=[vm_info_return,
agent_exception])
# Test describe vms returns values.
expected = vm_info_return
actual = iaas.InstancesHandler._describe_vms(mocked_agent, full_params)
self.assertEquals(actual, expected)
# Test describe vms runs into exception.
with self.assertRaises(AgentRuntimeException):
iaas.InstancesHandler._describe_vms(mocked_agent, full_params)
self.assertEquals(actual, expected)
############################################################
# InstanceHandler tests
############################################################
@gen_test
def test_attach_disk(self):
"""Success cases are done in the test_{cloud}_agent files."""
# No secret header.
payload_request = HTTPRequest(
method='POST', url=self.get_url('/instance'), headers=None,
body=json.dumps({})
)
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(payload_request)
self.assertEqual(context.exception.code, 401)
self.assertEqual(context.exception.message, 'Invalid secret')
# Invalid secret header.
payload_request = HTTPRequest(
method='POST', url=self.get_url('/instance'),
headers={'AppScale-Secret': 'invalid-secret'}, body=json.dumps({})
)
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(payload_request)
self.assertEqual(context.exception.code, 401)
self.assertEqual(context.exception.message, 'Invalid secret')
# Missing parameters.
params1 = json.dumps({})
payload_request = HTTPRequest(
method='POST', url=self.get_url('/instance'),
headers={'AppScale-Secret': 'secret'}, body=params1
)
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(payload_request)
self.assertEqual(context.exception.code, 400)
self.assertEqual(context.exception.message,
'disk_name is a required parameter')
params2 = json.dumps({'disk_name': 'foo'})
payload_request = HTTPRequest(
method='POST', url=self.get_url('/instance'),
headers={'AppScale-Secret': 'secret'}, body=params2
)
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(payload_request)
self.assertEqual(context.exception.code, 400)
self.assertEqual(context.exception.message,
'instance_id is a required parameter')
params3 = json.dumps({'disk_name': 'foo', 'instance_id': 'i-foobar'})
payload_request = HTTPRequest(
method='POST', url=self.get_url('/instance'),
headers={'AppScale-Secret': 'secret'}, body=params3
)
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(payload_request)
self.assertEqual(context.exception.code, 400)
self.assertEqual(context.exception.message,
'infrastructure is a required parameter')
params_copy = full_params.copy()
params_copy.update({'disk_name': 'foo', 'instance_id': 'i-foobar'})
attach_params = json.dumps(params_copy)
attach_exception = AgentRuntimeException("Runtime Exception")
with patch.object(EC2Agent, 'assert_credentials_are_valid'):
with patch.object(EC2Agent, 'attach_disk', side_effect=[
attach_exception, '/dev/sdc']):
payload_request = HTTPRequest(
method='POST', url=self.get_url('/instance'),
headers={'AppScale-Secret': 'secret'}, body=attach_params
)
with self.assertRaises(HTTPError) as | |
import sys, time, os, importlib, random
import numpy as np
import torch
import torch.nn.functional as F
from scipy.io import wavfile
from config.args import parser
from tools import load_yaml_config, load_speaker_model_parameters, reverb, reverb_np
args = parser.parse_args()
args = load_yaml_config(args, parser)
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
# set random seed for reproducibility
def seed_torch(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.enabled = False
seed_torch(args.seed)
# load wav data with shape (1, SampleNumbers)
def loadWAV(filename):
sample_rate, audio = wavfile.read(filename)
feat = np.stack([audio], axis=0).astype(np.float)
return feat
# perform attack
def Attack(speaker_model, enroll_speaker_feat, train_speaker, speaker, target, args):
'''
speaker_model: the ASV model we want to attack.
speaker: original speaker name.
target: targeted speaker name.
enroll_speaker_feat: all enrolled targeted speaker embeddings.
train_speaker: original speakers with their train audio set.
args: hyper-parameters
'''
# Set parameter
num_train = args.num_train
adv_len = int(args.noise_len * 16000)
max_steps = args.max_steps
lr = args.lr
momentum = args.momentum
max_delta = args.max_delta
e1 = args.e1
e2 = args.e2
# Load wav data
wavs = train_speaker[speaker]
wav_input_np = []
max_len = 0
for i in range(num_train):
wav = os.path.join(args.wav_path, wavs[i])
wav_data = loadWAV(wav)
if wav_data.shape[1] > max_len:
max_len = wav_data.shape[1]
wav_input_np.append(wav_data)
# repeat wav data to max audio length in train audio set
for i in range(num_train):
wav_input_np[i] = np.tile(wav_input_np[i], (1, max_len // wav_input_np[i].shape[1] + 1))[:, 0:max_len]
wav_input_np = np.asarray(wav_input_np).squeeze(axis=1)
# with rir simulation, load train rir path list
if args.rir:
rir_wavs_train = os.listdir(args.attack_rir_path)
# initialize tensors
adv_noise_np = np.random.uniform(-max_delta, max_delta, adv_len).astype(np.int16)
target_feat = torch.FloatTensor(enroll_speaker_feat[target]).cuda()
wav_input = torch.FloatTensor(wav_input_np).cuda()
# Step1 maximize the attack on ASV model
batch_num = num_train // args.batch # we do this for GPU memory limit
grad_pre = 0
result = adv_noise_np
for i in range(max_steps):
loss = 0
grad = 0
dist = []
for b in range(batch_num):
wav_input_batch = wav_input[b*args.batch:(b+1)*args.batch]
# optimized tensors
adv_noise = torch.FloatTensor(adv_noise_np).cuda()
adv_noise.requires_grad = True
# pad the adversarial perturbation to have a same length with train audios
adv_noise_tmp = adv_noise.repeat(max_len // adv_len + 1)[:max_len]
# if doing rir simulation, we have to reverberate the audio and perturbation
if not args.rir:
wav_adv_input = wav_input_batch + adv_noise_tmp.unsqueeze(0)
else:
rir_tensor = torch.FloatTensor(loadWAV(os.path.join(args.attack_rir_path, random.choice(rir_wavs_train)))).cuda()
rir_tensor = rir_tensor / torch.norm(rir_tensor) # Maintain the energy of the original signal
adv_noise_tmp = reverb(adv_noise_tmp.unsqueeze(0), rir_tensor)
wav_input_batch = reverb(wav_input_batch, rir_tensor)
wav_adv_input = wav_input_batch + adv_noise_tmp
wav_adv_input = torch.clamp(wav_adv_input, -2**15, 2**15-1) # clip the input data to a normal format
wav_feat = speaker_model.forward(wav_adv_input)
dist_batch = F.cosine_similarity(wav_feat, target_feat)
# loss1
loss1 = torch.sum(torch.clamp(args.thresh - dist_batch + args.margine, min=0.))
loss1.backward()
loss += loss1.item()
grad += adv_noise.grad.data.cpu().numpy()
dist.extend(list(dist_batch.detach().cpu().numpy()))
# Collect and average the loss and gradient
loss /= batch_num
grad /= batch_num
print('step1: %02d, %s to %s, loss: % 2.4f' % (i, speaker, target, loss))
# if convergence break
result = adv_noise_np
if loss < e1:
break
# PGD can also be interpreted as an iterative algorithm to solve the problem
grad_new = momentum * grad_pre + grad
grad_pre = grad_new
adv_noise_np = adv_noise_np - lr * np.sign(grad_new)
adv_noise_np = np.clip(adv_noise_np, -max_delta, max_delta)
# record the iteration steps and best adversarial perturbation for step1
step1 = i
result1 = result
# Step2 minimize impact on ASR model
# initialize the adversarial perturbation from best result from step1
grad_pre = 0
for i in range(max_steps):
loss1_value = 0
loss2_value = 0
grad1 = 0
grad2 = 0
dist = []
for b in range(batch_num):
wav_input_batch = wav_input[b*args.batch:(b+1)*args.batch]
# optimized tensors
adv_noise = torch.FloatTensor(adv_noise_np).cuda()
adv_noise.requires_grad = True
# pad the adversarial perturbation to have a same length with train audios
adv_noise_tmp = adv_noise.repeat(max_len // adv_len + 1)[:max_len]
# if doing rir simulation, we have to reverberate the audio and perturbation
if not args.rir:
wav_adv_input = wav_input_batch + adv_noise_tmp.unsqueeze(0)
else:
rir_tensor = torch.FloatTensor(loadWAV(os.path.join(args.attack_rir_path, random.choice(rir_wavs_train)))).cuda()
rir_tensor = rir_tensor / torch.norm(rir_tensor) # Maintain the energy of the original signal
adv_noise_tmp = reverb(adv_noise_tmp.unsqueeze(0), rir_tensor)
wav_input_batch = reverb(wav_input_batch, rir_tensor)
wav_adv_input = wav_input_batch + adv_noise_tmp
wav_adv_input = torch.clamp(wav_adv_input, -2**15, 2**15-1) # clip the input data to a normal format
wav_feat = speaker_model.forward(wav_adv_input)
dist_batch = F.cosine_similarity(wav_feat, target_feat)
# loss1
loss1 = torch.sum(torch.clamp(args.thresh - dist_batch + args.margine, min=0.))
# loss2
spec_stft = torch.stft(adv_noise/2**15, n_fft=320, hop_length=160, win_length=320, window=torch.hamming_window(320).cuda())
loss2 = torch.mean(torch.sqrt(torch.square(spec_stft[:,:,0]) + torch.square(spec_stft[:,:,1])))
loss1_value += loss1.item()
loss2_value += loss2.item()
# caculate gradient separately
grad1 += torch.autograd.grad(loss1, adv_noise)[0].detach().cpu().numpy()
grad2 += torch.autograd.grad(loss2, adv_noise)[0].detach().cpu().numpy()
dist.extend(list(dist_batch.detach().cpu().numpy()))
# Collect and average the loss and gradient
loss1_value /= batch_num
loss2_value /= batch_num
grad1 /= batch_num
grad2 /= batch_num
print('step2: %02d, %s to %s, loss1: %2.4f, loss2: %2.4f' % (i, speaker, target, loss1_value, loss2_value))
# if convergence break
result = adv_noise_np
if loss1_value <= e1 and loss2_value <= e2:
break
# PGD can also be interpreted as an iterative algorithm to solve the problem:
alpha = args.gamma if loss1_value > e1 else 0
beta = 1 if loss2_value > e2 else 0
grad_new = momentum * grad_pre + alpha * grad1 / (np.linalg.norm(grad1, 1) + 1e-12) + beta * grad2 / np.linalg.norm(grad2, 1)
grad_new = np.nan_to_num(grad_new)
grad_pre = grad_new
adv_noise_np = adv_noise_np - lr * np.sign(grad_new)
adv_noise_np = np.clip(adv_noise_np, -max_delta, max_delta)
step2 = i
# return the best result, iteration steps, loss value for step1 and step2,
return result1, step1, loss, result, step2, loss1_value, loss2_value
def Test(speaker_model, enroll_speaker_feat, test_speaker, speaker, target, adv_noise, args):
'''
speaker_model: the ASV model we want to attack.
speaker: original speaker name.
target: targeted speaker name.
adv_noise: adversarial perturbation that attack ASV model to verify speaker as target.
enroll_speaker_feat: all enrolled targeted speaker embeddings.
train_speaker: original speakers with their train audio set.
args: hyper-parameters
'''
wavs = test_speaker[speaker]
target_feat = torch.FloatTensor(enroll_speaker_feat[target]).cuda()
success = 0
# with rir simulation, load test rir path list
if args.rir:
rir_wavs_test = os.listdir(args.attack_rir_path)
# evaluate test audios one by one
for wav in wavs:
wav = os.path.join(args.wav_path, wav)
wav_data = loadWAV(wav)
# pad the adversarial perturbation to the same length with one test audio
adv_noise_tmp = np.tile(adv_noise, wav_data.shape[1] // adv_noise.shape[0] + 1)[0:wav_data.shape[1]]
# if doing rir simulation, we have to reverberate the audio and perturbation
if not args.rir:
wav_data = wav_data + np.expand_dims(adv_noise_tmp, axis=0)
else:
rir_np = loadWAV(os.path.join(args.attack_rir_path, random.choice(rir_wavs_test)))
rir_np = rir_np / np.linalg.norm(rir_np)
adv_noise_tmp = reverb_np(np.expand_dims(adv_noise_tmp, axis=0), rir_np)
wav_data = reverb_np(wav_data, rir_np)
wav_data = wav_data + adv_noise_tmp
wav_data = np.clip(wav_data, -2**15, 2**15-1)
wav_data_tensor = torch.FloatTensor(wav_data).cuda()
wav_feat = speaker_model.forward(wav_data_tensor).detach()
# use consine similarity or MSE distance metric
if args.cosine_similarity:
dist = F.cosine_similarity(wav_feat, target_feat).cpu().numpy()
score = 1 * np.mean(dist)
else:
wav_feat = F.normalize(wav_feat, p=2, dim=1)
target_feat = F.normalize(target_feat, p=2, dim=1)
dist = F.pairwise_distance(wav_feat.unsqueeze(-1), target_feat.unsqueeze(-1).transpose(0,2)).detach().cpu().numpy()
score = -1 * np.mean(dist)
print(score)
if score > args.thresh:
success += 1
print('%s to %s attack success rate: %2.1f' % (speaker, target, success * 1.0 / len(wavs) * 100))
return success, len(wavs), success * 1.0 / len(wavs) * 100
####################################################################################################################
####################################################################################################################
### step 1: Set Parameters ###
enroll_file = args.enroll_file
train_file = args.train_file
test_file = args.test_file
wav_path = args.wav_path
enroll_path = args.enroll_path
libri_speaker_file = './datas/gender_librispeech_test_clean.txt'
enroll_speaker = {}
with open(enroll_file, 'r') as f:
for line in f.readlines():
speaker = line.split(" ")[0]
if speaker not in enroll_speaker:
enroll_speaker[speaker] = []
enroll_speaker[speaker].append(line.split(" ")[1].strip())
train_speaker = {}
with open(train_file, 'r') as f:
for line in f.readlines():
speaker = line.split(" ")[0]
if speaker not in train_speaker:
train_speaker[speaker] = []
train_speaker[speaker].append(line.split(" ")[1].strip())
test_speaker = {}
with open(test_file, 'r') as f:
for line in f.readlines():
speaker = line.split(" ")[0]
if speaker not in test_speaker:
test_speaker[speaker] = []
test_speaker[speaker].append(line.split(" ")[1].strip())
### step 2: Load speaker model ###
print('Loading speaker model...')
speaker_model = importlib.import_module('models.' + args.model).__getattribute__('MainModel')
speaker_model = speaker_model(**vars(args)).cuda()
if args.initial_model != "":
speaker_model = load_speaker_model_parameters(speaker_model, args.initial_model)
print("Model %s loaded!"%args.initial_model)
speaker_model.eval()
### step 3: enrolling speakers ###
print('Enrolling...')
enroll_speaker_feat = {}
with torch.no_grad():
for k,v in enroll_speaker.items():
enroll_speaker_feat[k] = 0
for i in range(len(v)):
wav = os.path.join(wav_path, v[i])
wav_data = torch.FloatTensor(loadWAV(wav)).cuda()
wav_feat = speaker_model.forward(wav_data)
enroll_speaker_feat[k] += wav_feat.detach().cpu().numpy()
del wav_feat, wav_data
enroll_speaker_feat[k] /= len(v)
print('Enrolling ok!')
### step 4: match original and target speaker ###
female_speaker = []
male_speaker = []
with open(libri_speaker_file, 'r') as f:
for line in f.readlines():
if line.split(" ")[1].strip() == 'M':
male_speaker.append(line.split(" ")[0])
else:
female_speaker.append(line.split(" ")[0])
original_speaker = []
target_speaker = []
# intra-genter
for speaker in female_speaker:
| |
<filename>Lib/fontParts/test/test_anchor.py
import unittest
import collections
from fontParts.base import FontPartsError
class TestAnchor(unittest.TestCase):
def getAnchor_generic(self):
anchor, _ = self.objectGenerator("anchor")
anchor.name = "Anchor Attribute Test"
anchor.x = 1
anchor.y = 2
anchor.color = None
return anchor
# ----
# repr
# ----
def test_reprContents(self):
anchor = self.getAnchor_generic()
value = anchor._reprContents()
self.assertIsInstance(value, list)
for i in value:
self.assertIsInstance(i, str)
def test_reprContents_noGlyph(self):
anchor, _ = self.objectGenerator("anchor")
value = anchor._reprContents()
self.assertIsInstance(value, list)
for i in value:
self.assertIsInstance(i, str)
def test_reprContents_color(self):
anchor = self.getAnchor_generic()
anchor.color = (1, 0, 1, 1)
value = anchor._reprContents()
self.assertIsInstance(value, list)
for i in value:
self.assertIsInstance(i, str)
def test_reprContents_noGlyph_color(self):
anchor, _ = self.objectGenerator("anchor")
anchor.color = (1, 0, 1, 1)
value = anchor._reprContents()
self.assertIsInstance(value, list)
for i in value:
self.assertIsInstance(i, str)
# ----------
# Attributes
# ----------
# Name
def test_get(self):
anchor = self.getAnchor_generic()
self.assertEqual(anchor.name, "Anchor Attribute Test")
def test_set_valid(self):
anchor = self.getAnchor_generic()
anchor.name = u"foo"
self.assertEqual(anchor.name, u"foo")
def test_set_none(self):
anchor = self.getAnchor_generic()
anchor.name = None
self.assertIsNone(anchor.name)
def test_set_invalid(self):
anchor = self.getAnchor_generic()
with self.assertRaises(TypeError):
anchor.name = 123
# Color
def test_color_get_none(self):
anchor = self.getAnchor_generic()
self.assertIsNone(anchor.color)
def test_color_set_valid_max(self):
anchor = self.getAnchor_generic()
anchor.color = (1, 1, 1, 1)
self.assertEqual(anchor.color, (1, 1, 1, 1))
def test_color_set_valid_min(self):
anchor = self.getAnchor_generic()
anchor.color = (0, 0, 0, 0)
self.assertEqual(anchor.color, (0, 0, 0, 0))
def test_color_set_valid_decimal(self):
anchor = self.getAnchor_generic()
anchor.color = (0.1, 0.2, 0.3, 0.4)
self.assertEqual(anchor.color, (0.1, 0.2, 0.3, 0.4))
def test_color_set_none(self):
anchor = self.getAnchor_generic()
anchor.color = None
self.assertIsNone(anchor.color)
def test_color_set_invalid_over_max(self):
anchor = self.getAnchor_generic()
with self.assertRaises(ValueError):
anchor.color = (1.1, 0.2, 0.3, 0.4)
def test_color_set_invalid_uner_min(self):
anchor = self.getAnchor_generic()
with self.assertRaises(ValueError):
anchor.color = (-0.1, 0.2, 0.3, 0.4)
def test_color_set_invalid_too_few(self):
anchor = self.getAnchor_generic()
with self.assertRaises(ValueError):
anchor.color = (0.1, 0.2, 0.3)
def test_color_set_invalid_string(self):
anchor = self.getAnchor_generic()
with self.assertRaises(TypeError):
anchor.color = "0.1,0.2,0.3,0.4"
def test_color_set_invalid_int(self):
anchor = self.getAnchor_generic()
with self.assertRaises(TypeError):
anchor.color = 123
# Identifier
def test_identifier_get_none(self):
anchor = self.getAnchor_generic()
self.assertIsNone(anchor.identifier)
def test_identifier_generated_type(self):
anchor = self.getAnchor_generic()
anchor.generateIdentifier()
self.assertIsInstance(anchor.identifier, str)
def test_identifier_consistency(self):
anchor = self.getAnchor_generic()
anchor.generateIdentifier()
# get: twice to test consistency
self.assertEqual(anchor.identifier, anchor.identifier)
def test_identifier_cannot_set(self):
# identifier is a read-only property
anchor = self.getAnchor_generic()
with self.assertRaises(FontPartsError):
anchor.identifier = "ABC"
def test_identifier_force_set(self):
identifier = "ABC"
anchor = self.getAnchor_generic()
anchor._setIdentifier(identifier)
self.assertEqual(anchor.identifier, identifier)
# Index
def getAnchor_index(self):
glyph, _ = self.objectGenerator("glyph")
glyph.appendAnchor("anchor 0", (0, 0))
glyph.appendAnchor("anchor 1", (0, 0))
glyph.appendAnchor("anchor 2", (0, 0))
return glyph
def test_get_index_noParent(self):
anchor, _ = self.objectGenerator("anchor")
self.assertIsNone(anchor.index)
def test_get_index(self):
glyph = self.getAnchor_index()
for i, anchor in enumerate(glyph.anchors):
self.assertEqual(anchor.index, i)
def test_set_index_noParent(self):
anchor, _ = self.objectGenerator("anchor")
with self.assertRaises(FontPartsError):
anchor.index = 1
def test_set_index_positive(self):
glyph = self.getAnchor_index()
anchor = glyph.anchors[0]
with self.assertRaises(FontPartsError):
anchor.index = 2
def test_set_index_negative(self):
glyph = self.getAnchor_index()
anchor = glyph.anchors[1]
with self.assertRaises(FontPartsError):
anchor.index = -1
# x
def test_x_get(self):
anchor = self.getAnchor_generic()
self.assertEqual(anchor.x, 1)
def test_x_set_valid_positive(self):
anchor = self.getAnchor_generic()
anchor.x = 100
self.assertEqual(anchor.x, 100)
def test_x_set_valid_negative(self):
anchor = self.getAnchor_generic()
anchor.x = -100
self.assertEqual(anchor.x, -100)
def test_x_set_valid_zero(self):
anchor = self.getAnchor_generic()
anchor.x = 0
self.assertEqual(anchor.x, 0)
def test_x_set_valid_positive_decimal(self):
anchor = self.getAnchor_generic()
anchor.x = 1.1
self.assertEqual(anchor.x, 1.1)
def test_x_set_valid_negative_decimal(self):
anchor = self.getAnchor_generic()
anchor.x = -1.1
self.assertEqual(anchor.x, -1.1)
def test_x_set_invalid_none(self):
anchor = self.getAnchor_generic()
with self.assertRaises(TypeError):
anchor.x = None
def test_x_set_valid_string(self):
anchor = self.getAnchor_generic()
with self.assertRaises(TypeError):
anchor.x = "ABC"
# y
def test_y_get(self):
anchor = self.getAnchor_generic()
self.assertEqual(anchor.y, 2)
def test_y_set_valid_positive(self):
anchor = self.getAnchor_generic()
anchor.y = 100
self.assertEqual(anchor.y, 100)
def test_y_set_valid_negative(self):
anchor = self.getAnchor_generic()
anchor.y = -100
self.assertEqual(anchor.y, -100)
def test_y_set_valid_zero(self):
anchor = self.getAnchor_generic()
anchor.y = 0
self.assertEqual(anchor.y, 0)
def test_y_set_valid_positive_decimal(self):
anchor = self.getAnchor_generic()
anchor.y = 1.1
self.assertEqual(anchor.y, 1.1)
def test_y_set_valid_negative_decimal(self):
anchor = self.getAnchor_generic()
anchor.y = -1.1
self.assertEqual(anchor.y, -1.1)
def test_y_set_invalid_none(self):
anchor = self.getAnchor_generic()
with self.assertRaises(TypeError):
anchor.y = None
def test_y_set_valid_string(self):
anchor = self.getAnchor_generic()
with self.assertRaises(TypeError):
anchor.y = "ABC"
# -------
# Methods
# -------
def getAnchor_copy(self):
anchor = self.getAnchor_generic()
anchor.color = (0.1, 0.2, 0.3, 0.4)
return anchor
# copy
def test_copy_seperate_objects(self):
anchor = self.getAnchor_copy()
copied = anchor.copy()
self.assertIsNot(anchor, copied)
def test_copy_same_name(self):
anchor = self.getAnchor_copy()
copied = anchor.copy()
self.assertEqual(anchor.name, copied.name)
def test_copy_same_color(self):
anchor = self.getAnchor_copy()
copied = anchor.copy()
self.assertEqual(anchor.color, copied.color)
def test_copy_same_identifier(self):
anchor = self.getAnchor_copy()
copied = anchor.copy()
self.assertEqual(anchor.identifier, copied.identifier)
def test_copy_generated_identifier_different(self):
anchor = self.getAnchor_copy()
copied = anchor.copy()
anchor.generateIdentifier()
copied.generateIdentifier()
self.assertNotEqual(anchor.identifier, copied.identifier)
def test_copy_same_x(self):
anchor = self.getAnchor_copy()
copied = anchor.copy()
self.assertEqual(anchor.x, copied.x)
def test_copy_same_y(self):
anchor = self.getAnchor_copy()
copied = anchor.copy()
self.assertEqual(anchor.y, copied.y)
# transform
def test_transformBy_valid_no_origin(self):
anchor = self.getAnchor_generic()
anchor.transformBy((2, 0, 0, 3, -3, 2))
self.assertEqual(anchor.x, -1)
self.assertEqual(anchor.y, 8)
def test_transformBy_valid_origin(self):
anchor = self.getAnchor_generic()
anchor.transformBy((2, 0, 0, 2, 0, 0), origin=(1, 2))
self.assertEqual(anchor.x, 1)
self.assertEqual(anchor.y, 2)
def test_transformBy_invalid_one_string_value(self):
anchor = self.getAnchor_generic()
with self.assertRaises(TypeError):
anchor.transformBy((1, 0, 0, 1, 0, "0"))
def test_transformBy_invalid_all_string_values(self):
anchor = self.getAnchor_generic()
with self.assertRaises(TypeError):
anchor.transformBy("1, 0, 0, 1, 0, 0")
def test_transformBy_invalid_int_value(self):
anchor = self.getAnchor_generic()
with self.assertRaises(TypeError):
anchor.transformBy(123)
# moveBy
def test_moveBy_valid(self):
anchor = self.getAnchor_generic()
anchor.moveBy((-1, 2))
self.assertEqual(anchor.x, 0)
self.assertEqual(anchor.y, 4)
def test_moveBy_invalid_one_string_value(self):
anchor = self.getAnchor_generic()
with self.assertRaises(TypeError):
anchor.moveBy((-1, "2"))
def test_moveBy_invalid_all_strings_value(self):
anchor = self.getAnchor_generic()
with self.assertRaises(TypeError):
anchor.moveBy("-1, 2")
def test_moveBy_invalid_int_value(self):
anchor = self.getAnchor_generic()
with self.assertRaises(TypeError):
anchor.moveBy(1)
# scaleBy
def test_scaleBy_valid_one_value_no_origin(self):
anchor = self.getAnchor_generic()
anchor.scaleBy((-2))
self.assertEqual(anchor.x, -2)
self.assertEqual(anchor.y, -4)
def test_scaleBy_valid_two_values_no_origin(self):
anchor = self.getAnchor_generic()
anchor.scaleBy((-2, 3))
self.assertEqual(anchor.x, -2)
self.assertEqual(anchor.y, 6)
def test_scaleBy_valid_two_values_origin(self):
anchor = self.getAnchor_generic()
anchor.scaleBy((-2, 3), origin=(1, 2))
self.assertEqual(anchor.x, 1)
self.assertEqual(anchor.y, 2)
def test_scaleBy_invalid_one_string_value(self):
anchor = self.getAnchor_generic()
with self.assertRaises(TypeError):
anchor.scaleBy((-1, "2"))
def test_scaleBy_invalid_two_string_values(self):
anchor = self.getAnchor_generic()
with self.assertRaises(TypeError):
anchor.scaleBy("-1, 2")
def test_scaleBy_invalid_tuple_too_many_values(self):
anchor = self.getAnchor_generic()
with self.assertRaises(ValueError):
anchor.scaleBy((-1, 2, -3))
# rotateBy
def test_rotateBy_valid_no_origin(self):
anchor = self.getAnchor_generic()
anchor.rotateBy(45)
self.assertAlmostEqual(anchor.x, -0.707, places=3)
self.assertAlmostEqual(anchor.y, 2.121, places=3)
def test_rotateBy_valid_origin(self):
anchor = self.getAnchor_generic()
anchor.rotateBy(45, origin=(1, 2))
self.assertAlmostEqual(anchor.x, 1)
self.assertAlmostEqual(anchor.y, 2)
def test_rotateBy_invalid_string_value(self):
anchor = self.getAnchor_generic()
with self.assertRaises(TypeError):
anchor.rotateBy("45")
def test_rotateBy_invalid_too_large_value_positive(self):
anchor = self.getAnchor_generic()
with self.assertRaises(ValueError):
anchor.rotateBy(361)
def test_rotateBy_invalid_too_large_value_negative(self):
anchor = self.getAnchor_generic()
with self.assertRaises(ValueError):
anchor.rotateBy(-361)
# skewBy
def test_skewBy_valid_no_origin_one_value(self):
anchor = self.getAnchor_generic()
anchor.skewBy(100)
self.assertAlmostEqual(anchor.x, -10.343, places=3)
self.assertEqual(anchor.y, 2.0)
def test_skewBy_valid_no_origin_two_values(self):
anchor = self.getAnchor_generic()
anchor.skewBy((100, 200))
self.assertAlmostEqual(anchor.x, -10.343, places=3)
self.assertAlmostEqual(anchor.y, 2.364, places=3)
def test_skewBy_valid_origin_one_value(self):
anchor = self.getAnchor_generic()
anchor.skewBy(100, origin=(1, 2))
self.assertEqual(anchor.x, 1)
self.assertEqual(anchor.y, 2)
def test_skewBy_valid_origin_two_values(self):
anchor = self.getAnchor_generic()
anchor.skewBy((100, 200), origin=(1, 2))
self.assertEqual(anchor.x, 1)
self.assertEqual(anchor.y, 2)
# round
def getAnchor_round(self):
anchor = self.getAnchor_generic()
anchor.x = 1.1
anchor.y = 2.5
return anchor
def test_round_close_to(self):
anchor = self.getAnchor_round()
anchor.round()
self.assertEqual(anchor.x, 1)
def test_round_at_half(self):
anchor = self.getAnchor_round()
anchor.round()
self.assertEqual(anchor.y, 3)
# ----
# Hash
# ----
def test_hash_object_self(self):
anchor_one = self.getAnchor_generic()
self.assertEqual(
hash(anchor_one),
hash(anchor_one)
)
def test_hash_object_other(self):
anchor_one = self.getAnchor_generic()
anchor_two = self.getAnchor_generic()
self.assertNotEqual(
hash(anchor_one),
hash(anchor_two)
)
def test_hash_object_self_variable_assignment(self):
anchor_one = self.getAnchor_generic()
a = anchor_one
self.assertEqual(
hash(anchor_one),
hash(a)
)
def test_hash_object_other_variable_assignment(self):
anchor_one = self.getAnchor_generic()
anchor_two = self.getAnchor_generic()
a = anchor_one
self.assertNotEqual(
hash(anchor_two),
hash(a)
)
def test_is_hashable(self):
anchor_one = self.getAnchor_generic()
self.assertTrue(
isinstance(anchor_one, collections.Hashable)
)
# -------
# Parents
# -------
def test_get_parent_font(self):
font, _ = self.objectGenerator("font")
layer = font.newLayer("L")
glyph = layer.newGlyph("X")
anchor = glyph.appendAnchor("anchor 0", (0, 0))
self.assertIsNotNone(anchor.font)
self.assertEqual(
anchor.font,
font
)
def test_get_parent_noFont(self):
layer, _ = self.objectGenerator("layer")
glyph = layer.newGlyph("X")
anchor = glyph.appendAnchor("anchor 0", (0, 0))
self.assertIsNone(anchor.font)
def test_get_parent_layer(self):
layer, _ = self.objectGenerator("layer")
glyph = layer.newGlyph("X")
anchor = glyph.appendAnchor("anchor 0", (0, 0))
self.assertIsNotNone(anchor.layer)
self.assertEqual(
anchor.layer,
layer
)
def test_get_parent_noLayer(self):
glyph, _ = self.objectGenerator("glyph")
anchor = glyph.appendAnchor("anchor 0", (0, 0))
self.assertIsNone(anchor.font)
self.assertIsNone(anchor.layer)
def test_get_parent_glyph(self):
glyph, _ = self.objectGenerator("glyph")
anchor = glyph.appendAnchor("anchor 0", (0, 0))
self.assertIsNotNone(anchor.glyph)
self.assertEqual(
anchor.glyph,
glyph
)
def test_get_parent_noGlyph(self):
anchor, _ = self.objectGenerator("anchor")
self.assertIsNone(anchor.font)
self.assertIsNone(anchor.layer)
self.assertIsNone(anchor.glyph)
def test_set_parent_glyph(self):
glyph, _ = self.objectGenerator("glyph")
anchor = self.getAnchor_generic()
anchor.glyph = glyph
self.assertIsNotNone(anchor.glyph)
self.assertEqual(
anchor.glyph,
glyph
)
def test_set_parent_glyph_none(self):
anchor, _ = self.objectGenerator("anchor")
anchor.glyph = None
self.assertIsNone(anchor.glyph)
def test_set_parent_glyph_exists(self):
glyph, _ = self.objectGenerator("glyph")
otherGlyph, _ = self.objectGenerator("glyph")
anchor = glyph.appendAnchor("anchor 0", (0, 0))
with self.assertRaises(AssertionError):
anchor.glyph = otherGlyph
# --------
# Equality
# --------
def test_object_equal_self(self):
anchor_one = self.getAnchor_generic()
self.assertEqual(
anchor_one,
anchor_one
)
def test_object_not_equal_other(self):
anchor_one = self.getAnchor_generic()
anchor_two = self.getAnchor_generic()
self.assertNotEqual(
anchor_one,
anchor_two
)
def test_object_equal_variable_assignment_self(self):
anchor_one = self.getAnchor_generic()
a = anchor_one
a.moveBy((-1, 2))
self.assertEqual(
anchor_one,
a
)
def test_object_not_equal_variable_assignment_other(self):
anchor_one = self.getAnchor_generic()
anchor_two = self.getAnchor_generic()
a = anchor_one
self.assertNotEqual(
anchor_two,
a
)
# ---------
# Selection
# ---------
def test_selected_true(self):
anchor = self.getAnchor_generic()
try:
anchor.selected = False
except NotImplementedError:
return
anchor.selected = True
self.assertEqual(
anchor.selected,
True
)
def test_selected_false(self):
anchor = self.getAnchor_generic()
try:
anchor.selected = False
except NotImplementedError:
return
| |
0:
time.sleep(0.003)
if BotUtils.detect_xprompt(gamename):
break
try:
newx, newy = Looting.grab_farloot_locations(gamename, rect)[
0]
time_taken = time.time() - loop_time
movementx = confirmed[0] - newx
speed = movementx/time_taken
if speed != 0:
time_remaining = abs(
relx/speed) - time_taken
rect = [newx-100, newy-30, newx+100, newy+30]
except:
try:
time.sleep(time_remaining)
break
except:
return False
for key in ["left", "right"]:
CustomInput.release_key(CustomInput.key_map[key], key)
BotUtils.move_towards(rely, "y")
start_time = time.time()
if rely < 0:
expected_time = abs(rely/7.5)
else:
expected_time = abs(rely/5.5)
while not BotUtils.detect_xprompt(gamename):
time.sleep(0.005)
# After moving in opposite direction
if time.time() - start_time > 10:
# If have moved opposite with no result for equal amount
if time.time() - start_time > 10 + 2*(1 + expected_time):
for key in ["up", "down"]:
CustomInput.release_key(CustomInput.key_map[key], key)
# Return falsepos so that it will ignore this detection
return "falsepos"
# If no result for 3 seconds
elif time.time() - start_time > 1 + expected_time:
# Try moving in the opposite direction
for key in ["up", "down"]:
CustomInput.release_key(CustomInput.key_map[key], key)
BotUtils.move_towards(-1*rely, "y")
start_time -= 8.5
for key in ["up", "down"]:
CustomInput.release_key(CustomInput.key_map[key], key)
pydirectinput.press("x")
return True
def grab_farloot_locations(gamename=False, rect=False, return_image=False):
if gamename:
if not rect:
rect1 = [100, 160, 1223, 688]
wincap = WindowCapture(gamename, rect1)
else:
wincap = WindowCapture(gamename, rect)
original_image = wincap.get_screenshot()
else:
original_image = cv2.imread(os.path.dirname(
os.path.abspath(__file__)) + "/testimages/lootscene.jpg")
filter = HsvFilter(15, 180, 0, 20, 255, 63, 0, 0, 0, 0)
output_image = BotUtils.filter_blackwhite_invert(
filter, original_image, True, 0, 180)
output_image = cv2.blur(output_image, (8, 1))
output_image = cv2.blur(output_image, (8, 1))
output_image = cv2.blur(output_image, (8, 1))
_, thresh = cv2.threshold(output_image, 127, 255, 0)
contours, _ = cv2.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
if len(contours) < 2:
return False
contours.pop(0)
rectangles = []
for contour in contours:
(x, y), _ = cv2.minEnclosingCircle(contour)
rectangles.append([x-50, y, 100, 5])
rectangles.append([x-50, y, 100, 5])
rectangles, _ = cv2.groupRectangles(
rectangles, groupThreshold=1, eps=0.9)
if len(rectangles) < 1:
return False
points = []
for (x, y, w, h) in rectangles:
# Account for the rect
if rect:
# Account for the rect
x += rect[0]
y += rect[1]
else:
x += 100
y += 135
center_x = x + int(w/2)
center_y = y + int(h/2)
points.append((center_x, center_y))
if return_image:
if rect:
return points, original_image, rect[0], rect[1]
else:
return points, original_image, rect1[0], rect1[1]
return points
class Events:
def choose_random_reward(gamename):
wincap = WindowCapture(gamename)
posx = wincap.window_rect[0] + (460+(180*random.randint(0, 2)))
posy = wincap.window_rect[1] + (200+(132*random.randint(0, 3)))
pydirectinput.click(int(posx), int(posy))
time.sleep(0.1)
# Now accept the reward
pydirectinput.click(
wincap.window_rect[0]+750, wincap.window_rect[1]+720)
def detect_reward_choice_open(gamename):
wincap = WindowCapture(gamename, [503, 90, 535, 92])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[0][-1]]
if a + d > 400:
if b + e > 500:
if c + f < 105:
return True
return False
def detect_move_reward_screen(gamename):
wincap = WindowCapture(gamename, [581, 270, 593, 272])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[0][-1]]
if a + d > 360 and a + d < 400:
if b + e > 360 and b + e < 400:
if c + f < 10:
return True
return False
def detect_endlevel_chest(gamename):
wincap = WindowCapture(gamename, [454, 250, 525, 252])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[0][-1]]
if a + d < 50:
if b + e > 480:
if c + f > 290 and c+f < 320:
return True
return False
def detect_endlevel_bonus_area(gamename):
wincap = WindowCapture(gamename, [503, 487, 514, 589])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[0][-1]]
if a + d > 400:
if b + e > 400:
if c + f > 400:
return True
return False
def detect_in_dungeon(wincap=False):
if not wincap:
with open("gamename.txt") as f:
gamename = f.readline()
wincap = WindowCapture(gamename, [1090, 331, 1092, 353])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[-1][0]]
if d < 20:
if a + b + e > 400 and a+b+e < 500:
if c + f > 480:
return True
return False
def detect_go(gamename):
wincap = WindowCapture(gamename, [623, 247, 628, 249])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
if a < 30:
if b > 240:
if c > 140:
return True
return False
def detect_one_card(gamename):
# Cards only show up once one has been picked
# Therefore need to check against bronze, gold, silver
wincap = WindowCapture(gamename, [833, 44, 835, 46])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
# Bronze
if a == 27:
if b == 48:
if c == 87:
return True
# Silver
if a == 139:
if b == 139:
if c == 139:
return True
# Gold
if a == 38:
if b == 129:
if c == 160:
return True
return False
def detect_yes_no(gamename):
wincap = WindowCapture(gamename, [516, 426, 541, 441])
image = wincap.get_screenshot()
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
tess_config = '--psm 7 --oem 3 -c tessedit_char_whitelist=Yes'
result = pytesseract.image_to_string(
rgb, lang='eng', config=tess_config)[:-2]
if result == "Yes":
return True
return False
def detect_resurrect_prompt(gamename):
wincap = WindowCapture(gamename, [763, 490, 818, 492])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[-1][0]]
if a + d > 500:
if b + e > 500:
if c + f > 500:
return True
return False
def detect_store(gamename=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
wincap = WindowCapture(gamename, [1084, 265, 1099, 267])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[-1][0]]
if a + d > 500:
if b + e > 500:
if c + f > 500:
return True
return False
class RHClick:
def click_yes(gamename):
wincap = WindowCapture(gamename)
pydirectinput.click(
wincap.window_rect[0]+528, wincap.window_rect[1]+433)
def click_no(gamename):
wincap = WindowCapture(gamename)
pydirectinput.click(
wincap.window_rect[0]+763, wincap.window_rect[1]+433)
def click_otherworld_ok(gamename):
wincap = WindowCapture(gamename)
pydirectinput.click(
wincap.window_rect[0]+503, wincap.window_rect[1]+487)
def click_otherworld_no(gamename):
wincap = WindowCapture(gamename)
pydirectinput.click(
wincap.window_rect[0]+778, wincap.window_rect[1]+487)
def click_choose_map(gamename):
wincap = WindowCapture(gamename)
pydirectinput.click(
wincap.window_rect[0]+1150, wincap.window_rect[1]+210)
def click_explore_again(gamename):
wincap = WindowCapture(gamename)
pydirectinput.click(
wincap.window_rect[0]+1150, wincap.window_rect[1]+152)
def click_back_to_town(gamename):
wincap = WindowCapture(gamename)
pydirectinput.click(
wincap.window_rect[0]+1150, wincap.window_rect[1]+328)
def click_map_number(gamename, mapnum):
wincap = WindowCapture(gamename)
map_to_clickpoints = {
5: (728, 521),
6: (640, 631),
7: (605, 455),
8: (542, 350),
9: (293, 297),
10: (777, 406),
11: (140, 370),
12: (500, 246),
13: (500, 672),
14: (419, 478),
15: (423, 263),
16: (563, 562),
17: (642, 432),
18: (249, 325)
}
x, y = map_to_clickpoints[mapnum]
pydirectinput.click(wincap.window_rect[0]+x, wincap.window_rect[1]+y)
def choose_difficulty_and_enter(gamename, diff):
wincap = WindowCapture(gamename)
num_clicks = 0
if diff == "N":
num_clicks = 0
elif diff == "H":
num_clicks = 1
elif diff == "VH":
num_clicks == 2
elif diff == "BM":
num_clicks == 3
for i in range(num_clicks):
pydirectinput.click(
wincap.window_rect[0]+618, wincap.window_rect[1]+333)
time.sleep(0.3)
# Then click on enter dungeon
pydirectinput.click(
wincap.window_rect[0]+1033, wincap.window_rect[1]+736)
def go_to_change_character(gamename):
if not BotUtils.detect_menu_open(gamename):
pydirectinput.press('esc')
wincap = WindowCapture(gamename)
pydirectinput.click(
wincap.window_rect[0]+640, wincap.window_rect[1]+363)
def exit_game(gamename):
if not BotUtils.detect_menu_open(gamename):
pydirectinput.press('esc')
wincap = WindowCapture(gamename)
pydirectinput.click(
wincap.window_rect[0]+640, wincap.window_rect[1]+480)
time.sleep(0.2)
pydirectinput.click(
wincap.window_rect[0]+640, wincap.window_rect[1]+428)
def choose_character(gamename, charnum):
wincap = WindowCapture(gamename)
char_clickpoints = {
1: (1100, 140),
2: (1100, 210),
3: (1100, 280),
4: (1100, 350),
5: (1100, 420),
6: (1100, 490),
7: (1100, 560),
8: (1100, 630)
}
if charnum > 8:
pydirectinput.click(
wincap.window_rect[0]+1165, wincap.window_rect[1]+680)
x, y = char_clickpoints[charnum-8]
else:
pydirectinput.click(
wincap.window_rect[0]+1035, wincap.window_rect[1]+680)
x, y = char_clickpoints[charnum]
time.sleep(0.2)
pydirectinput.click(wincap.window_rect[0]+x, wincap.window_rect[1]+y)
time.sleep(0.2)
pydirectinput.click(
wincap.window_rect[0]+640, wincap.window_rect[1]+765)
class Vision:
def __init__(self, needle_img_path, method=cv2.TM_CCOEFF_NORMED):
self.needle_img = cv2.imread(needle_img_path, cv2.IMREAD_UNCHANGED)
self.needle_w = self.needle_img.shape[1]
self.needle_h = self.needle_img.shape[0]
# TM_CCOEFF, TM_CCOEFF_NORMED, TM_CCORR, TM_CCORR_NORMED, TM_SQDIFF, TM_SQDIFF_NORMED
self.method = method
def find(self, haystack_img, threshold=0.7, max_results=15, epsilon=0.5):
result = cv2.matchTemplate(haystack_img, self.needle_img, self.method)
locations = np.where(result >= threshold)
locations = list(zip(*locations[::-1]))
if not locations:
return np.array([], dtype=np.int32).reshape(0, 4)
rectangles = []
for loc in locations:
rect = [int(loc[0]), int(loc[1]), | |
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'frr-exclude-interfaces',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.InterfaceFrrTiebreakers.InterfaceFrrTiebreaker' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.InterfaceFrrTiebreakers.InterfaceFrrTiebreaker',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('tiebreaker', REFERENCE_ENUM_CLASS, 'IsisInterfaceFrrTiebreakerEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisInterfaceFrrTiebreakerEnum',
[], [],
''' Tiebreaker for which configuration
applies
''',
'tiebreaker',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('index', ATTRIBUTE, 'int' , None, None,
[('1', '255')], [],
''' Preference order among tiebreakers
''',
'index',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'interface-frr-tiebreaker',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.InterfaceFrrTiebreakers' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.InterfaceFrrTiebreakers',
False,
[
_MetaInfoClassMember('interface-frr-tiebreaker', REFERENCE_LIST, 'InterfaceFrrTiebreaker' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.InterfaceFrrTiebreakers.InterfaceFrrTiebreaker',
[], [],
''' Configure tiebreaker for multiple
backups
''',
'interface_frr_tiebreaker',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'interface-frr-tiebreakers',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable',
False,
[
_MetaInfoClassMember('frr-exclude-interfaces', REFERENCE_CLASS, 'FrrExcludeInterfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrExcludeInterfaces',
[], [],
''' FRR exclusion configuration
''',
'frr_exclude_interfaces',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('frr-remote-lfa-max-metrics', REFERENCE_CLASS, 'FrrRemoteLfaMaxMetrics' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrRemoteLfaMaxMetrics',
[], [],
''' Remote LFA maxmimum metric
''',
'frr_remote_lfa_max_metrics',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('frr-remote-lfa-types', REFERENCE_CLASS, 'FrrRemoteLfaTypes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrRemoteLfaTypes',
[], [],
''' Remote LFA Enable
''',
'frr_remote_lfa_types',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('frr-types', REFERENCE_CLASS, 'FrrTypes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrTypes',
[], [],
''' Type of FRR computation per level
''',
'frr_types',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('frrlfa-candidate-interfaces', REFERENCE_CLASS, 'FrrlfaCandidateInterfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrlfaCandidateInterfaces',
[], [],
''' FRR LFA candidate configuration
''',
'frrlfa_candidate_interfaces',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('frrtilfa-types', REFERENCE_CLASS, 'FrrtilfaTypes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrtilfaTypes',
[], [],
''' TI LFA Enable
''',
'frrtilfa_types',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('interface-frr-tiebreaker-defaults', REFERENCE_CLASS, 'InterfaceFrrTiebreakerDefaults' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.InterfaceFrrTiebreakerDefaults',
[], [],
''' Interface FRR Default tiebreaker
configuration
''',
'interface_frr_tiebreaker_defaults',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('interface-frr-tiebreakers', REFERENCE_CLASS, 'InterfaceFrrTiebreakers' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.InterfaceFrrTiebreakers',
[], [],
''' Interface FRR tiebreakers configuration
''',
'interface_frr_tiebreakers',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'interface-frr-table',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.MplsLdp' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.MplsLdp',
False,
[
_MetaInfoClassMember('sync-level', ATTRIBUTE, 'int' , None, None,
[('0', '2')], [],
''' Enable MPLS LDP Synchronization for an
IS-IS level
''',
'sync_level',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'mpls-ldp',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AutoMetrics.AutoMetric' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AutoMetrics.AutoMetric',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('proactive-protect', ATTRIBUTE, 'int' , None, None,
[('1', '16777214')], [],
''' Allowed auto metric:<1-63> for narrow
,<1-16777214> for wide
''',
'proactive_protect',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'auto-metric',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AutoMetrics' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AutoMetrics',
False,
[
_MetaInfoClassMember('auto-metric', REFERENCE_LIST, 'AutoMetric' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AutoMetrics.AutoMetric',
[], [],
''' AutoMetric Proactive-Protect
configuration. Legal value depends on
the metric-style specified for the
topology. If the metric-style defined is
narrow, then only a value between <1-63>
is allowed and if the metric-style is
defined as wide, then a value between
<1-16777214> is allowed as the
auto-metric value.
''',
'auto_metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'auto-metrics',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AdminTags.AdminTag' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AdminTags.AdminTag',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('admin-tag', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' Tag to associate with connected routes
''',
'admin_tag',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'admin-tag',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AdminTags' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AdminTags',
False,
[
_MetaInfoClassMember('admin-tag', REFERENCE_LIST, 'AdminTag' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AdminTags.AdminTag',
[], [],
''' Admin tag for advertised interface
connected routes
''',
'admin_tag',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'admin-tags',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceLinkGroup' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceLinkGroup',
False,
[
_MetaInfoClassMember('level', ATTRIBUTE, 'int' , None, None,
[('0', '2')], [],
''' Level in which link group will be
effective
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('link-group', ATTRIBUTE, 'str' , None, None,
[(0, 40)], [],
''' Link Group
''',
'link_group',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'interface-link-group',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics.Metric.MetricEnum' : _MetaInfoEnum('MetricEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg',
{
'maximum':'maximum',
}, 'Cisco-IOS-XR-clns-isis-cfg', _yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg']),
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics.Metric' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics.Metric',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('metric', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Allowed metric: <1-63> for narrow,
<1-16777215> for wide
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False, [
_MetaInfoClassMember('metric', REFERENCE_ENUM_CLASS, 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics.Metric.MetricEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics.Metric.MetricEnum',
[], [],
''' Allowed metric: <1-63> for narrow,
<1-16777215> for wide
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metric', ATTRIBUTE, 'int' , None, None,
[('1', '16777215')], [],
''' Allowed metric: <1-63> for narrow,
<1-16777215> for wide
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
]),
],
'Cisco-IOS-XR-clns-isis-cfg',
'metric',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics',
False,
[
_MetaInfoClassMember('metric', REFERENCE_LIST, 'Metric' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics.Metric',
[], [],
''' Metric configuration. Legal value depends on
the metric-style specified for the topology. If
the metric-style defined is narrow, then only a
value between <1-63> is allowed and if the
metric-style is defined as wide, then a value
between <1-16777215> is allowed as the metric
value. All routers exclude links with the
maximum wide metric (16777215) from their SPF
''',
'metric',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'metrics',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Weights.Weight' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Weights.Weight',
False,
[
_MetaInfoClassMember('level', REFERENCE_ENUM_CLASS, 'IsisInternalLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisInternalLevelEnum',
[], [],
''' Level to which configuration applies
''',
'level',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('weight', ATTRIBUTE, 'int' , None, None,
[('1', '16777214')], [],
''' Weight to be configured under interface for
Load Balancing. Allowed weight: <1-16777215>
''',
'weight',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'weight',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Weights' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Weights',
False,
[
_MetaInfoClassMember('weight', REFERENCE_LIST, 'Weight' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Weights.Weight',
[], [],
''' Weight configuration under interface for load
balancing
''',
'weight',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'weights',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName',
False,
[
_MetaInfoClassMember('topology-name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Topology Name
''',
'topology_name',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('admin-tags', REFERENCE_CLASS, 'AdminTags' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AdminTags',
[], [],
''' admin-tag configuration
''',
'admin_tags',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('auto-metrics', REFERENCE_CLASS, 'AutoMetrics' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AutoMetrics',
[], [],
''' AutoMetric configuration
''',
'auto_metrics',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('interface-af-state', REFERENCE_ENUM_CLASS, 'IsisInterfaceAfStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'IsisInterfaceAfStateEnum',
[], [],
''' Interface state
''',
'interface_af_state',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('interface-frr-table', REFERENCE_CLASS, 'InterfaceFrrTable' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable',
[], [],
''' Fast-ReRoute configuration
''',
'interface_frr_table',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('interface-link-group', REFERENCE_CLASS, 'InterfaceLinkGroup' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceLinkGroup',
[], [],
''' Provide link group name and level
''',
'interface_link_group',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('metrics', REFERENCE_CLASS, 'Metrics' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics',
[], [],
''' Metric configuration
''',
'metrics',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('mpls-ldp', REFERENCE_CLASS, 'MplsLdp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.MplsLdp',
[], [],
''' MPLS LDP configuration
''',
'mpls_ldp',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('prefix-sid', REFERENCE_CLASS, 'PrefixSid' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.PrefixSid',
[], [],
''' Assign prefix SID to an interface,
ISISPHPFlag will be rejected if set to
disable, ISISEXPLICITNULLFlag will
override the value of ISISPHPFlag
''',
'prefix_sid',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('running', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' The presence of this object allows an
address-family to be run over the
interface in question.This must be the
first object created under the
InterfaceAddressFamily container, and the
last one deleted
''',
'running',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('weights', REFERENCE_CLASS, 'Weights' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Weights',
[], [],
''' Weight configuration
''',
'weights',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'topology-name',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf',
False,
[
_MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'IsisAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisAddressFamilyEnum',
[], [],
''' Address family
''',
'af_name',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'IsisSubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes', 'IsisSubAddressFamilyEnum',
[], [],
''' Sub address family
''',
'saf_name',
'Cisco-IOS-XR-clns-isis-cfg', True),
_MetaInfoClassMember('interface-af-data', REFERENCE_CLASS, 'InterfaceAfData' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData',
[], [],
''' Data container.
''',
'interface_af_data',
'Cisco-IOS-XR-clns-isis-cfg', False),
_MetaInfoClassMember('topology-name', REFERENCE_LIST, 'TopologyName' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg', 'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName',
[], [],
''' keys: topology-name
''',
'topology_name',
'Cisco-IOS-XR-clns-isis-cfg', False),
],
'Cisco-IOS-XR-clns-isis-cfg',
'interface-af',
_yang_ns._namespaces['Cisco-IOS-XR-clns-isis-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg'
),
},
'Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs' : {
'meta_info' : _MetaInfoClass('Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs',
False,
[
_MetaInfoClassMember('interface-af', | |
extract_text(data['awardID']['awardContractID']['modNumber'])
except (KeyError, TypeError):
unique_string += "-none-"
unique_string += "_"
try:
unique_string += extract_text(data['awardID']['referencedIDVID']['PIID'])
except (KeyError, TypeError):
unique_string += "-none-"
unique_string += "_"
try:
unique_string += extract_text(data['awardID']['awardContractID']['transactionNumber'])
except (KeyError, TypeError):
unique_string += "-none-"
else:
try:
unique_string += extract_text(data['contractID']['IDVID']['agencyID'])
except (KeyError, TypeError):
unique_string += "-none-"
# referenced_idv_agency_iden not used in IDV identifier, just set it to "-none-"
unique_string += "_-none-_"
try:
unique_string += extract_text(data['contractID']['IDVID']['PIID'])
except (KeyError, TypeError):
unique_string += "-none-"
unique_string += "_"
try:
unique_string += extract_text(data['contractID']['IDVID']['modNumber'])
except (KeyError, TypeError):
unique_string += "-none-"
# parent_award_id not used in IDV identifier and transaction_number not in IDV feed, just set them to "-none-"
unique_string += "_-none-_-none-"
return unique_string
def create_processed_data_list(data, contract_type, sess, sub_tier_list, county_by_name, county_by_code,
state_code_list, country_list, exec_comp_dict):
""" Create a list of processed data
Args:
data: an object containing the data gathered from the feed
sess: the database connection
contract_type: a string indicating whether the atom feed being checked is 'award' or 'IDV'
sub_tier_list: a dictionary containing all the sub tier agency information keyed by sub tier agency code
county_by_name: a dictionary containing all county codes, keyed by state and county name
county_by_code: a dictionary containing all county names, keyed by state and county code
state_code_list: a dictionary containing all state names, keyed by state code
country_list: a dictionary containing all country names, keyed by country code
exec_comp_dict: a dictionary containing all the data for Executive Compensation data keyed by DUNS number
Returns:
A list containing the processed and calculated data.
"""
data_list = []
for value in data:
tmp_obj = process_data(value['content'][contract_type], sess, atom_type=contract_type,
sub_tier_list=sub_tier_list, county_by_name=county_by_name,
county_by_code=county_by_code, state_code_list=state_code_list,
country_list=country_list, exec_comp_dict=exec_comp_dict)
data_list.append(tmp_obj)
return data_list
def add_processed_data_list(data, sess):
try:
sess.bulk_save_objects([DetachedAwardProcurement(**fpds_data) for fpds_data in data])
sess.commit()
except IntegrityError:
sess.rollback()
logger.error("Attempted to insert duplicate FPDS data. Inserting each row in batch individually.")
for fpds_obj in data:
insert_statement = insert(DetachedAwardProcurement).values(**fpds_obj).\
on_conflict_do_update(index_elements=['detached_award_proc_unique'], set_=fpds_obj)
sess.execute(insert_statement)
sess.commit()
def process_and_add(data, contract_type, sess, sub_tier_list, county_by_name, county_by_code, state_code_list,
country_list, exec_comp_dict, now, threaded=False):
""" Start the processing for data and add it to the DB.
Args:
data: an object containing the data gathered from the feed
contract_type: a string indicating whether the atom feed being checked is 'award' or 'IDV'
sess: the database connection
sub_tier_list: a dictionary containing all the sub tier agency information keyed by sub tier agency code
county_by_name: a dictionary containing all county codes, keyed by state and county name
county_by_code: a dictionary containing all county names, keyed by state and county code
state_code_list: a dictionary containing all state names, keyed by state code
country_list: a dictionary containing all country names, keyed by country code
exec_comp_dict: a dictionary containing all the data for Executive Compensation data keyed by DUNS number
now: a timestamp indicating the time to set the updated_at to
threaded: a boolean indicating whether the process is running as a thread or not
"""
if threaded:
for value in data:
tmp_obj = process_data(value['content'][contract_type], sess, atom_type=contract_type,
sub_tier_list=sub_tier_list, county_by_name=county_by_name,
county_by_code=county_by_code, state_code_list=state_code_list,
country_list=country_list, exec_comp_dict=exec_comp_dict)
tmp_obj['updated_at'] = now
insert_statement = insert(DetachedAwardProcurement).values(**tmp_obj).\
on_conflict_do_update(index_elements=['detached_award_proc_unique'], set_=tmp_obj)
sess.execute(insert_statement)
else:
for value in data:
tmp_obj = process_data(value['content'][contract_type], sess, atom_type=contract_type,
sub_tier_list=sub_tier_list, county_by_name=county_by_name,
county_by_code=county_by_code, state_code_list=state_code_list,
country_list=country_list, exec_comp_dict=exec_comp_dict)
try:
statement = insert(DetachedAwardProcurement).values(**tmp_obj)
sess.execute(statement)
sess.commit()
except IntegrityError:
sess.rollback()
tmp_obj['updated_at'] = now
sess.query(DetachedAwardProcurement).\
filter_by(detached_award_proc_unique=tmp_obj['detached_award_proc_unique']).\
update(tmp_obj, synchronize_session=False)
sess.commit()
def get_with_exception_hand(url_string):
""" Retrieve data from FPDS, allow for multiple retries and timeouts """
exception_retries = -1
retry_sleep_times = [5, 30, 60, 180, 300, 360, 420, 480, 540, 600]
request_timeout = 60
while exception_retries < len(retry_sleep_times):
try:
resp = requests.get(url_string, timeout=request_timeout)
break
except (ConnectionResetError, ReadTimeoutError, ConnectionError, ReadTimeout) as e:
exception_retries += 1
request_timeout += 60
if exception_retries < len(retry_sleep_times):
logger.info('Connection exception. Sleeping {}s and then retrying with a max wait of {}s...'
.format(retry_sleep_times[exception_retries], request_timeout))
time.sleep(retry_sleep_times[exception_retries])
else:
logger.info('Connection to FPDS feed lost, maximum retry attempts exceeded.')
raise e
return resp
def get_total_expected_records(base_url):
""" Retrieve the total number of expected records based on the last paginated URL """
# get a single call so we can find the last page
initial_request = get_with_exception_hand(base_url)
initial_request_xml = xmltodict.parse(initial_request.text, process_namespaces=True, namespaces=FPDS_NAMESPACES)
# retrieve all URLs
try:
urls_list = list_data(initial_request_xml['feed']['link'])
except KeyError:
urls_list = []
# retrieve the "last" URL from the list
final_request_url = None
for url in urls_list:
if url['@rel'] == 'last':
final_request_url = url['@href']
continue
# retrieve the count from the URL of the last page
if not final_request_url:
try:
return len(list_data(initial_request_xml['feed']['entry']))
except KeyError:
return 0
# retrieve the page from the final_request_url
final_request_count = int(final_request_url.split('&start=')[-1])
# retrieve the last page of data
final_request = get_with_exception_hand(final_request_url)
final_request_xml = xmltodict.parse(final_request.text, process_namespaces=True, namespaces=FPDS_NAMESPACES)
try:
entries_list = list_data(final_request_xml['feed']['entry'])
except KeyError:
raise Exception("Initial count failed, no entries in last page of request.")
return final_request_count + len(entries_list)
def get_data(contract_type, award_type, now, sess, sub_tier_list, county_by_name, county_by_code, state_code_list,
country_list, exec_comp_dict, last_run=None, threaded=False, start_date=None, end_date=None, metrics=None):
""" Get the data from the atom feed based on contract/award type and the last time the script was run.
Args:
contract_type: a string indicating whether the atom feed being checked is 'award' or 'IDV'
award_type: a string indicating what the award type of the feed being checked is
now: a timestamp indicating the time to set the updated_at to
sess: the database connection
sub_tier_list: a dictionary containing all the sub tier agency information keyed by sub tier agency code
county_by_name: a dictionary containing all county codes, keyed by state and county name
county_by_code: a dictionary containing all county names, keyed by state and county code
state_code_list: a dictionary containing all state names, keyed by state code
country_list: a dictionary containing all country names, keyed by country code
exec_comp_dict: a dictionary containing all the data for Executive Compensation data keyed by DUNS number
last_run: a date indicating the last time the pull was run
threaded: a boolean indicating whether the process is running as a thread or not
start_date: a date indicating the first date to pull from (must be provided with end_date)
end_date: a date indicating the last date to pull from (must be provided with start_date)
metrics: a dictionary to gather metrics for the script in
"""
if not metrics:
metrics = {}
data = []
yesterday = now - datetime.timedelta(days=1)
utcnow = datetime.datetime.utcnow()
# if a date that the script was last successfully run is not provided, get all data
if not last_run:
params = 'SIGNED_DATE:[2016/10/01,' + yesterday.strftime('%Y/%m/%d') + '] '
metrics['start_date'] = '2016/10/01'
metrics['end_date'] = yesterday.strftime('%Y/%m/%d')
# if a date that the script was last successfully run is provided, get data since that date
else:
last_run_date = last_run - relativedelta(days=1)
params = 'LAST_MOD_DATE:[' + last_run_date.strftime('%Y/%m/%d') + ',' + yesterday.strftime('%Y/%m/%d') + '] '
metrics['start_date'] = last_run_date.strftime('%Y/%m/%d')
metrics['end_date'] = yesterday.strftime('%Y/%m/%d')
if start_date and end_date:
params = 'LAST_MOD_DATE:[' + start_date + ',' + end_date + '] '
metrics['start_date'] = start_date
metrics['end_date'] = end_date
base_url = feed_url + params + 'CONTRACT_TYPE:"' + contract_type.upper() + '" AWARD_TYPE:"' + award_type + '"'
logger.info('Starting get feed: %s', base_url)
# retrieve the total count of expected records for this pull
total_expected_records = get_total_expected_records(base_url)
logger.info('{} record(s) expected from this feed'.format(total_expected_records))
entries_processed = 0
while True:
async def atom_async_get(entries_already_processed):
response_list = []
loop = asyncio.get_event_loop()
futures = [
loop.run_in_executor(
None,
get_with_exception_hand,
base_url + "&start=" + str(entries_already_processed + (start_offset * MAX_ENTRIES))
)
for start_offset in range(REQUESTS_AT_ONCE)
]
for response in await asyncio.gather(*futures):
response_list.append(response.text)
pass
return response_list
# End async get requests def
loop = asyncio.get_event_loop()
full_response = loop.run_until_complete(atom_async_get(entries_processed))
for next_resp in full_response:
response_dict = xmltodict.parse(next_resp, process_namespaces=True, namespaces=FPDS_NAMESPACES)
try:
entries_per_response = list_data(response_dict['feed']['entry'])
except KeyError:
continue
if last_run:
for entry in entries_per_response:
data.append(entry)
entries_processed += 1
else:
data.extend(create_processed_data_list(entries_per_response, contract_type, sess, sub_tier_list,
county_by_name, county_by_code, state_code_list, country_list,
exec_comp_dict))
entries_processed += len(entries_per_response)
if len(data) % SPOT_CHECK_COUNT == 0 and entries_processed > total_expected_records:
# Find entries that don't have FPDS content and print them all
for next_resp in full_response:
response_dict = xmltodict.parse(next_resp, process_namespaces=True, namespaces=FPDS_NAMESPACES)
try:
list_data(response_dict['feed']['entry'])
except KeyError:
logger.info(response_dict)
continue
raise Exception("Total number of expected records has changed\nExpected: {}\nRetrieved so far: {}"
.format(total_expected_records, len(data)))
if data:
# Log which one we're on so we can keep track of how far | |
'bizzenphew',
'bizzenphooey',
'bizzenpocket',
'bizzenpoof',
'bizzenpop',
'bizzenpounce',
'bizzenpow',
'bizzenpretzel',
'bizzenquack',
'bizzenroni',
'bizzenscooter',
'bizzenscreech',
'bizzensmirk',
'bizzensnooker',
'bizzensnoop',
'bizzensnout',
'bizzensocks',
'bizzenspeed',
'bizzenspinner',
'bizzensplat',
'bizzensprinkles',
'bizzensticks',
'bizzenstink',
'bizzenswirl',
'bizzenteeth',
'bizzenthud',
'bizzentoes',
'bizzenton',
'bizzentoon',
'bizzentooth',
'bizzentwist',
'bizzenwhatsit',
'bizzenwhip',
'bizzenwig',
'bizzenwoof',
'bizzenzaner',
'bizzenzap',
'bizzenzapper',
'bizzenzilla',
'bizzenzoom',
'bla',
'blabbing',
'black',
'black-eyed',
'blackbeard',
"blackbeard's",
'blackbeards',
'blackbeared',
'blackbelt',
'blackberries',
'blackberry',
'blackbird',
'blackboard',
'blackboards',
'blackdeath',
'blacked',
'blackened',
'blackest',
'blackguard',
'blackguards',
'blackhaerts',
'blackhawk',
'blackheads',
'blackheart',
"blackheart's",
'blackhearts',
'blacking',
'blackish',
'blackjack',
'blackjacks',
'blacklist',
'blacklisted',
'blacklisting',
'blackness',
'blackout',
'blackoutaviation',
'blackouts',
'blackrage',
'blackrose',
'blacksail',
'blacksmith',
"blacksmith's",
'blacksmithing',
'blacksmiths',
'blackthorn',
'blackwatch',
'blackwater',
'bladder',
'bladders',
"blade's",
'bladebreakerr',
'blademasters',
'blades',
'bladeskulls',
'bladestorm',
'blaggards',
'blah',
'blair',
'blaise',
'blake',
'blakeley',
"blakeley's",
'blame',
'blamed',
'blamer',
'blamers',
'blames',
'blaming',
'blanada',
'blanago',
'blanca',
"blanca's",
'blanche',
'bland',
'blank',
'blanked',
'blanket',
'blankets',
'blanking',
'blankly',
'blanks',
'blanos',
'blaring',
'blast',
"blast'em",
'blasted',
'blaster',
'blasters',
"blastin'",
'blasting',
'blastoise',
'blasts',
'blasty',
'blat',
'blaze',
'bldg',
'bldgs',
'bleached',
'bleak',
'bleary',
'bled',
'bleep',
'bleeped',
'bleeper',
'bleepin',
'bleeping',
'bleeps',
'blend',
'blended',
'blender',
'blenders',
'blending',
'blends',
'blenny',
'bless',
'blessed',
'blesses',
'blessing',
'blessings',
'bleu',
'blew',
'bligh',
'blight',
'blighters',
'blimey',
'blimp',
'blind',
'blinded',
'blinder',
'blinders',
'blindfold',
'blindfolded',
'blinding',
'blindly',
'blindness',
'blinds',
'blindsided',
'bling',
'bling-bling',
'blingbling',
'blinged',
'blinging',
'blings',
'blink',
'blinked',
'blinker',
'blinkers',
'blinking',
'blinks',
'blinky',
'blip',
'blipping',
'bliss',
'blissfully',
'blister',
'blistering',
'blisters',
'blitz',
'blizzard',
'blizzards',
'bloat',
'bloated',
'bloats',
'blob',
'blobby',
'blobs',
'bloc',
'block',
"block's",
'blockade',
'blockader',
'blockades',
'blockading',
'blockbuster',
'blocked',
'blocker',
'blockers',
'blocking',
'blockout',
'blocks',
'blocky',
'bloke',
'blokes',
'blomma',
'blond',
'blonde',
"blonde's",
'blondes',
'blondie',
'blonds',
'bloodbrothers',
'bloodhounds',
'bloodless',
'bloodshot',
'bloodsucker',
'bloodsuckers',
'bloodthrushers',
'bloom',
'bloomers',
'blooming',
'blooms',
'bloop',
'bloopa',
'bloopers',
'blossom',
'blossoms',
'blossum',
'blot',
'blots',
'blouse',
'blowfish',
'blowy',
'blu-ray',
'blub',
'blubber',
'blubberbee',
'blubberberry',
'blubberblabber',
'blubberbocker',
'blubberboing',
'blubberboom',
'blubberbounce',
'blubberbouncer',
'blubberbrains',
'blubberbubble',
'blubberbumble',
'blubberbump',
'blubberbumper',
'blubberburger',
'blubberchomp',
'blubbercorn',
'blubbercrash',
'blubbercrumbs',
'blubbercrump',
'blubbercrunch',
'blubberdoodle',
'blubberdorf',
'blubberface',
'blubberfidget',
'blubberfink',
'blubberfish',
'blubberflap',
'blubberflapper',
'blubberflinger',
'blubberflip',
'blubberflipper',
'blubberfoot',
'blubberfuddy',
'blubberfussen',
'blubbergadget',
'blubbergargle',
'blubbergloop',
'blubberglop',
'blubbergoober',
'blubbergoose',
'blubbergrooven',
'blubberhoffer',
'blubberhopper',
'blubbering',
'blubberjinks',
'blubberklunk',
'blubberknees',
'blubbermarble',
'blubbermash',
'blubbermonkey',
'blubbermooch',
'blubbermouth',
'blubbermuddle',
'blubbermuffin',
'blubbermush',
'blubbernerd',
'blubbernoodle',
'blubbernose',
'blubbernugget',
'blubberphew',
'blubberphooey',
'blubberpocket',
'blubberpoof',
'blubberpop',
'blubberpounce',
'blubberpow',
'blubberpretzel',
'blubberquack',
'blubberroni',
'blubberscooter',
'blubberscreech',
'blubbersmirk',
'blubbersnooker',
'blubbersnoop',
'blubbersnout',
'blubbersocks',
'blubberspeed',
'blubberspinner',
'blubbersplat',
'blubbersprinkles',
'blubbersticks',
'blubberstink',
'blubberswirl',
'blubberteeth',
'blubberthud',
'blubbertoes',
'blubberton',
'blubbertoon',
'blubbertooth',
'blubbertwist',
'blubberwhatsit',
'blubberwhip',
'blubberwig',
'blubberwoof',
'blubberzaner',
'blubberzap',
'blubberzapper',
'blubberzilla',
'blubberzoom',
'bludgeon',
'bludgeoning',
'blue',
"blue's",
'bluebeards',
'bluebell',
'blueberries',
'blueberry',
'bluebird',
'bluebirds',
'blueblood',
'bluefishes',
'bluegrass',
'bluejay',
'blueprints',
'blues',
'bluff',
'bluffed',
'bluffer',
'bluffers',
'bluffing',
'bluffs',
'blunder',
'blundering',
'bluntly',
'blur',
'blurb',
'blurbs',
'blurred',
'blurry',
'blurs',
'blurting',
'blush',
'blushed',
'blushes',
'blushing',
'blustery',
'blut',
'blynken',
'bman',
'bo',
'boa',
'boar',
'board',
"board's",
'boardbot',
'boardbots',
'boarded',
'boarder',
'boarders',
'boarding',
'boards',
'boardwalk',
'boardwalks',
'boarhound',
'boars',
'boas',
'boast',
'boastful',
'boasting',
'boat',
"boat's",
'boated',
'boater',
'boaters',
'boathouse',
'boating',
'boatload',
'boatloads',
'boats',
'boatswain',
"boatswain's",
'boatswains',
'boatyard',
'bob',
'bobbed',
'bobber',
'bobbidi',
'bobble',
'bobbleheads',
'bobby',
"bobby's",
'bobbys',
'boberts',
'bobo',
'bobsled',
'bobsleded',
'bobsleding',
'bobsleds',
'bobsleigh',
'bobsleighes',
'bock',
'bodacious',
'bode',
'bodeguita',
'bodice',
'bodices',
'bodied',
'bodies',
'bodily',
'body',
"body's",
'bodyguard',
'bodyguards',
'boffo',
'bog',
'bogart',
'bogey',
'bogger',
'boggle',
'boggles',
'boggy',
'bogie',
'bogs',
'bogus',
'boi',
'boil',
'boiled',
'boiler',
'boiling',
'boils',
'boingenbee',
'boingenberry',
'boingenblabber',
'boingenbocker',
'boingenboing',
'boingenboom',
'boingenbounce',
'boingenbouncer',
'boingenbrains',
'boingenbubble',
'boingenbumble',
'boingenbump',
'boingenbumper',
'boingenburger',
'boingenchomp',
'boingencorn',
'boingencrash',
'boingencrumbs',
'boingencrump',
'boingencrunch',
'boingendoodle',
'boingendorf',
'boingenface',
'boingenfidget',
'boingenfink',
'boingenfish',
'boingenflap',
'boingenflapper',
'boingenflinger',
'boingenflip',
'boingenflipper',
'boingenfoot',
'boingenfuddy',
'boingenfussen',
'boingengadget',
'boingengargle',
'boingengloop',
'boingenglop',
'boingengoober',
'boingengoose',
'boingengrooven',
'boingenhoffer',
'boingenhopper',
'boingenjinks',
'boingenklunk',
'boingenknees',
'boingenmarble',
'boingenmash',
'boingenmonkey',
'boingenmooch',
'boingenmouth',
'boingenmuddle',
'boingenmuffin',
'boingenmush',
'boingennerd',
'boingennoodle',
'boingennose',
'boingennugget',
'boingenphew',
'boingenphooey',
'boingenpocket',
'boingenpoof',
'boingenpop',
'boingenpounce',
'boingenpow',
'boingenpretzel',
'boingenquack',
'boingenroni',
'boingenscooter',
'boingenscreech',
'boingensmirk',
'boingensnooker',
'boingensnoop',
'boingensnout',
'boingensocks',
'boingenspeed',
'boingenspinner',
'boingensplat',
'boingensprinkles',
'boingensticks',
'boingenstink',
'boingenswirl',
'boingenteeth',
'boingenthud',
'boingentoes',
'boingenton',
'boingentoon',
'boingentooth',
'boingentwist',
'boingenwhatsit',
'boingenwhip',
'boingenwig',
'boingenwoof',
'boingenzaner',
'boingenzap',
'boingenzapper',
'boingenzilla',
'boingenzoom',
'boingyboro',
'bokugeki',
'bokuji',
'bokuzama',
'bold',
'bolder',
'boldest',
'boldly',
'bole',
'bolivia',
'bollard',
'bologna',
'bolt',
"bolt's",
'bolted',
'bolton',
'bolts',
'boma',
'boma-boma',
'bombard',
'bombarding',
'bombardment',
'bombe',
'bombed',
'bomber',
'bombers',
'bombing',
'bombs',
'bombshell',
'bon',
'bonaam',
'bonanza',
'bonbons',
'bond',
'bonded',
'bonding',
'bonds',
'bondsman',
'bonehead',
'boneheads',
'boneless',
'boneyard',
'boneyards',
'bonfire',
'bonfires',
'bongo',
'bonita',
"bonita's",
'bonito',
'bonjour',
'bonkers',
'bonkl',
'bonnet',
'bonnets',
'bonney',
'bonnie',
'bonny',
'bono',
'bonsai',
'bonsoir',
'bonus',
'bonuses',
'bony',
'bonzo',
'boo',
"boo's",
'boo-yaa',
'booed',
'booger',
'boogers',
'boogey',
'boogie',
'boogie-woogie',
'boogied',
'boogies',
'boohoo',
'book',
"book's",
'bookball',
'bookcase',
'bookcases',
'booked',
'bookie',
'booking',
'bookings',
'bookkeeper',
'bookkeepers',
'bookkeeping',
'booklet',
'bookmaker',
'bookmakers',
'bookmark',
'bookmarked',
'books',
'bookshelf',
'bookshelves',
'bookstore',
'bookworm',
'boom',
'boomcrash',
'boomed',
'boomer',
'boomerang',
'boomers',
'booming',
'booms',
'boon',
'boonies',
'booo',
'boooo',
'booooh',
'boooom',
'booooo',
'booooom',
'booooomin',
'boooooo',
'boooooooooooooooooooo',
'boooooooooooooooooooooooooommm',
'boop',
'boor',
'boos',
'boost',
'boosted',
'booster',
'boosters',
'boosting',
'boosts',
'boot',
"boot'n'ears",
'booted',
'booth',
"booth's",
'booths',
'bootiful',
'booting',
'bootleggers',
'boots',
'bootstrap',
'bootstraps',
'bootsy',
'booyah',
'bop',
'bopper',
'bord',
'border',
"border's",
'bordered',
'borderer',
'bordering',
'borderings',
'borderline',
'borders',
'bore',
'borealis',
'bored',
'boredom',
'borer',
'bores',
'boring',
'boris',
'bork',
'borks',
'borksalot',
'born',
'borrow',
'borrowed',
'borrower',
'borrowers',
'borrowing',
'borrowings',
'borrows',
'bos',
'boss',
"boss'",
'bossbot',
'bossbots',
'bossed',
'bosses',
'bossily',
'bossing',
'bossy',
'bossyboots',
'bot',
"bot's",
'botched',
'both',
'bother',
'bothered',
'bothering',
'bothers',
'bothersome',
'bots',
'bottle',
"bottle's",
'bottled',
'bottleneck',
'bottler',
'bottlers',
'bottles',
'bottling',
'bottom',
'bottomed',
'bottomer',
'bottomfeeder',
'bottomfeeders',
'bottoming',
'bottomless',
'bottoms',
'bough',
'boughs',
'bought',
'boulder',
'boulders',
'boulevard',
"boulevard's",
'boulevards',
'bounce',
'bounced',
'bouncer',
'bouncers',
'bounces',
'bouncing',
'bouncy',
'bound',
'boundaries',
'boundary',
"boundary's",
'bounding',
'bounds',
'bounteous',
'bounties',
'bountiful',
'bounty',
'bountyhunter',
'bourbon',
'bourse',
'bout',
'boutique',
'bouts',
'bovel',
'bovine',
'bow',
"bow's",
'bowdash',
'bowed',
'bowers',
'bowie',
'bowing',
'bowl',
"bowl's",
'bowlegged',
'bowler',
'bowling',
'bowls',
'bowman',
'bows',
'bowser',
'box',
'boxcar',
'boxed',
'boxer',
'boxes',
'boxfish',
'boxing',
'boy',
"boy's",
'boycott',
'boycotting',
'boyfriend',
"boyfriend's",
'boyfriends',
'boyish',
'boys',
'boysenberry',
'bozo',
"bozo's",
'bozos',
'brace',
'bracelet',
'bracelets',
'braces',
'bracket',
'bracketing',
'brad',
"brad's",
'bradley',
'brady',
'brag',
'braggart',
'braggarts',
'bragged',
'bragger',
'braggers',
'bragging',
'brags',
'braid',
'braided',
'braiding',
'braids',
'brail',
'brain',
"brain's",
'brained',
'braining',
'brainless',
'brains',
'brainstorm',
'brainwash',
'brainwashed',
'brainy',
'brake',
'braken',
'brakes',
'braking',
'bran',
'branch',
'branched',
'branches',
'branching',
'branchings',
'brand',
'brandishing',
'brandon',
"brandon's",
'brands',
'brandy',
"brandy's",
'brantley',
'brash',
'brass',
'brat',
'brats',
'bratty',
'brave',
'braved',
'bravely',
'braver',
'bravery',
'braves',
'bravest',
'braving',
'bravo',
"bravo's",
'brawl',
'brawny',
'bray',
'brazen',
'brazil',
'brb',
'breach',
'breached',
'bread',
"bread's",
'bread-buttering',
'breadcrumbs',
'breaded',
'breading',
'breads',
'breadstick',
"breadstick's",
'breadth',
'break',
'breakable',
'breakdown',
'breaker',
'breakers',
'breakfast',
'breakfasted',
'breakfaster',
'breakfasters',
'breakfasting',
'breakfasts',
'breaking',
'breakout',
'breaks',
'breakup',
'breath',
'breathe',
'breathed',
'breather',
'breathers',
'breathes',
'breathing',
'breathless',
'breaths',
'breathtaking',
'bred',
'breech',
'breeches',
'breeze',
"breeze's",
'breezed',
'breezes',
'breezest',
'breezing',
'breezy',
'brenda',
"brenda's",
'brethern',
'brethren',
'brevrend',
'brew',
'brewed',
'brewer',
'brewers',
'brewing',
'brews',
'brian',
'briar',
'briars',
'briarstone',
'briarstones',
'bribe',
'bribed',
'bribery',
'bribes',
'bribing',
'brick',
'bricked',
'bricker',
'bricking',
'bricks',
'bridal',
'bride',
"bride's",
'brides',
'bridesmaid',
'bridge',
"bridge's",
'bridged',
'bridges',
'bridget',
'bridging',
'brie',
'brief',
'briefed',
'briefer',
'briefest',
'briefing',
'briefings',
'briefly',
'briefs',
'brig',
"brig's",
'brigad',
'brigade',
'brigadeers',
'brigades',
'brigadier',
'brigadiers',
'brigand',
'brigands',
'brigantine',
'brigantines',
'bright',
'brighten',
'brightens',
'brighter',
'brightest',
'brighting',
'brightly',
'brightness',
'brights',
'brigs',
'brilliance',
'brilliant',
'brilliantly',
'brim',
'brimming',
'brimstone',
'brine',
'briney',
'bring',
'bringer',
'bringers',
'bringing',
'brings',
'bringthemadness',
'brining',
'brink',
'brinks',
'briny',
'brio',
'briquettes',
'brisk',
'brisket',
'britannia',
'britches',
'british',
'bro',
"bro's",
'broached',
'broad',
'broadband',
'broadcast',
'broadcasts',
'broaden',
'broadens',
'broader',
'broadest',
'broadly',
'broads',
'broadside',
"broadside's",
'broadsided',
'broadsides',
'broadsword',
'broadway',
'broccoli',
'brochure',
'brogan',
'brogans',
'broil',
'broiled',
'broke',
'broken',
'brokenly',
'broker',
'brokers',
'broking',
'bronco',
'broncos',
'bronze',
'bronzed',
'bronzy',
'brood',
'brook',
"brook's",
'brooks',
'broom',
"broom's",
'brooms',
'broomstick',
"broomstick's",
'broomsticks',
'bros',
'broth',
'brother',
"brother's",
'brotherhood',
"brotherhood's",
'brotherhoods',
'brothering',
'brotherly',
'brothers',
"brothers'",
'broths',
'brought',
'brouhaha',
'brow',
'brown',
'browncoats',
'browner',
'brownie',
'brownies',
'browning',
'brownish',
'browns',
'brows',
'browse',
'browsed',
'browser',
'browsers',
'browsing',
'brrr',
'brrrgh',
'brt',
'bruce',
"bruce's",
'bruckheimer',
'bruh',
'bruin',
'bruise',
'bruised',
'bruiser',
'bruises',
'bruising',
'brulee',
'brume',
'brunch',
'brunette',
'brunettes',
'brunt',
'brush',
'brushed',
'brusher',
'brushes',
'brushing',
'brushoff',
'brussels',
'brute',
'brutish',
'bryan',
'bryanna',
'bryson',
'btb',
'btl',
'btw',
'bubba',
'bubble',
"bubble's",
'bubbled',
'bubblegum',
'bubbles',
'bubbling',
'bubbloon',
'bubbly',
'bubo',
'bubs',
"buc's",
'bucaneer',
'bucanneers',
'buccaneer',
"buccaneer's",
'buccaneers',
'bucco',
'buckaroo',
"buckaroo's",
'buckaroos',
'bucket',
"bucket's",
'bucketed',
'bucketing',
'buckets',
'buckeye',
'buckeyes',
'buckle',
"buckle's",
'buckled',
'buckler',
'buckles',
'bucko',
"bucko's",
'buckos',
'bucks',
'buckshot',
'buckskin',
'buckwheat',
'bucs',
'bud',
"bud's",
'budd',
"budd's",
'buddies',
'buddy',
"buddy's",
'budge',
'budged',
'budget',
'budgeted',
'budgeter',
'budgeters',
'budgeting',
'budgets',
'budgie',
'budging',
'buds',
'bueno',
'buff',
'buffalo',
"buffalo's",
'buffalos',
'buffed',
'buffer',
'buffet',
"buffet's",
'buffets',
'buffoon',
'buffoons',
'buffs',
'bug',
"bug's",
'bug-eye',
'bugalicious',
'bugariffic',
'bugbear',
'bugbears',
'bugeye',
'bugged',
'buggered',
'buggers',
'buggier',
'buggies',
'buggiest',
'bugging',
'buggy',
'bugle',
'bugles',
'bugs',
'bugsit',
'bugtastic',
'buh',
'build',
'builded',
'builder',
"builder's",
'builders',
'building',
"building's",
'buildings',
'builds',
'buildup',
'buillion',
'built',
'bulb',
"bulb's",
'bulbasaur',
| |
import torch
import argparse
import torch.nn.functional as F
import copy
from utils import tps
import time
from collections import defaultdict
from torch.autograd import gradcheck
LOCAL_CHECKS = False
PROFILE = False
class DenseCorr(torch.autograd.Function):
@staticmethod
def forward(ctx, feats1, feats2, xxyy, batch_grid_u, stride, pow=0.5):
"""Compute the folded dense correlation loss forward pass.
Args:
feats1 (torch.Tensor): N x C x h x h tensor of features
feats2 (torch.Tensor): N x C x h x w tensor of features
xxyy (torch.Tensor): H x W x 2 grid of uniform sampling locations.
batch_grid_u (torch.Tensor): N x h x w x 2 grid of sampling
locations.
stride (int): the stride to be applied to the image grid to match
the spatial dimensions of the features (so that
`H = h * stride`).
pow (float :: 0.5): power by which to raise the root distances
between pixel locations.
Returns:
(torch.Tensor): The total loss for the given minibatch of inputs.
"""
with torch.no_grad():
B, C, H, W = feats1.shape
params = torch.IntTensor([B, C, H, W, stride])
pow_tensor = torch.FloatTensor([pow])
ctx.save_for_backward(feats1, feats2, xxyy, batch_grid_u,
params, pow_tensor)
loss = 0.
for b in range(B):
f1 = feats1[b].reshape(C, H * W) # source
f2 = feats2[b].reshape(C, H * W) # target
corr = torch.matmul(f1.t(), f2)
corr = corr.reshape(H, W, H, W)
diff = batch_grid_u[b, :, :, None, None, :] - \
xxyy[None, None, ::stride, ::stride, :]
diff = (diff * diff).sum(4).sqrt()
diff = diff.pow(pow)
smcorr = F.softmax(corr.reshape(H, W, -1), dim=2)
smcorr = smcorr.reshape(corr.shape)
L = diff * smcorr
loss += L.sum()
return loss / (H * W * B)
@staticmethod
def backward(ctx, grad_output):
"""Compute the folded dense correlation loss backward pass.
Args:
(torch.Tensor): The gradient of the total loss with respect to the
output of the dense correlation loss.
Returns:
(torch.Tensor): N x C x h x h tensor of gradients
(torch.Tensor): N x C x h x w tensor of gradients
(None): H x W x 2 grid of uniform sampling locations
(None): no gradient for `xxyy`
(None): no gradient for `batch_grid_u`
(None): no gradient for `stride`
(None): no gradient for `pow`
"""
if PROFILE:
batch_tic = time.time()
tic = time.time()
timings = defaultdict(float)
feats1, feats2, xxyy, batch_grid_u, params, pow = ctx.saved_tensors
"""We needed to store the integers as part of a tensor, so the
unpacking code here is a little convoluted."""
B, C, H, W, stride = [x.item() for x in params]
pow = pow.item()
"""This is a pattern that is very convenient - at the top of backward
unpack saved_tensors and initialize all gradients w.r.t. inputs to
None. Thanks to the fact that additional trailing Nones are
ignored, the return statement is simple even when the function has
optional inputs."""
grad_feats1 = grad_feats2 = grad_xxyy = grad_batch_u = None
grad_stride = grad_pow = None
"""Returning gradients for inputs that don't require it is
not an error."""
assert ctx.needs_input_grad[0], "expected feats1 to need grad"
assert ctx.needs_input_grad[1], "expected feats2 to need grad"
assert not ctx.needs_input_grad[2], "expected xxyy does not need grad"
assert not ctx.needs_input_grad[3], "expected batch_grid_u does not need grad"
assert not ctx.needs_input_grad[4], "expected stride does not need grad"
if PROFILE:
timings["back-init"] = time.time() - tic
tic = time.time()
with torch.no_grad():
if feats1.is_cuda:
# TODO: clean up types here
if feats1.dtype == torch.float32:
grad_feats1 = torch.cuda.FloatTensor(B, C, H, W).fill_(0)
grad_feats2 = torch.cuda.FloatTensor(B, C, H, W).fill_(0)
elif feats1.dtype == torch.float16:
grad_feats1 = torch.cuda.HalfTensor(B, C, H, W).fill_(0)
grad_feats2 = torch.cuda.HalfTensor(B, C, H, W).fill_(0)
else:
grad_feats1 = torch.zeros((B, C, H, W), dtype=feats1.dtype)
grad_feats2 = torch.zeros((B, C, H, W), dtype=feats2.dtype)
grad_loss = grad_output / (H * W * B)
if PROFILE:
timings["data transfer"] = time.time() - batch_tic
for b in range(B):
if PROFILE:
tic = time.time()
with torch.no_grad():
diff = batch_grid_u[b, :, :, None, None, :] - \
xxyy[None, None, ::stride, ::stride, :]
diff = (diff * diff).sum(4).sqrt()
diff = diff.pow(pow)
if PROFILE:
timings["diff-grid"] += time.time() - tic
tic = time.time()
# loss gradient for the current minibatch element (expand to tensor)
grad_loss_b = grad_loss
grad_smcorr = grad_loss_b * diff
if LOCAL_CHECKS:
grad_loss_b_ = torch.ones(diff.shape, dtype=diff.dtype) * grad_loss
smcorr_ = torch.randn(diff.shape, dtype=torch.double, requires_grad=True)
with torch.autograd.enable_grad():
L_ = diff * smcorr_
d_smcorr = torch.autograd.grad(
outputs=L_,
inputs=smcorr_,
grad_outputs=grad_loss_b_,
)
grad_smcorr = grad_loss_b * diff
rel_diff(grad_smcorr, d_smcorr, "smax")
grad_smcorr = grad_smcorr.view(H, W, -1)
f1_ = feats1[b].view(C, H * W)
f2_ = feats2[b].view(C, H * W)
if PROFILE:
timings["scale-feats"] += time.time() - tic
tic = time.time()
# This is where the memory usage gets serious
corr = torch.matmul(f1_.t(), f2_)
if PROFILE:
timings["correlation"] += time.time() - tic
tic = time.time()
# Direct backward pass for softmax
corr = corr.view(H, W, -1)
smcorr = F.softmax(corr, dim=2)
smcorr = smcorr.view(corr.shape)
sum_term = torch.sum(grad_smcorr * smcorr, dim=2, keepdim=True)
grad_corr = smcorr * (grad_smcorr - sum_term)
if PROFILE:
timings["softmax"] += time.time() - tic
tic = time.time()
# safety checks
if LOCAL_CHECKS:
with torch.enable_grad():
corr_num = corr.clone().requires_grad_()
smcorr_num = F.softmax(corr_num, dim=2)
grad_corr_num = torch.autograd.grad(
outputs=smcorr_num,
inputs=(corr_num,),
grad_outputs=grad_smcorr,
)
rel_diff(grad_corr, grad_corr_num[0], "smax")
"""The main work is done by some fairly beefy MM ops to compute
pairwise correlations:"""
grad_corr = grad_corr.view(H * W, H * W)
grad_f1 = torch.matmul(grad_corr, f2_.t()).t()
grad_f2 = torch.matmul(f1_, grad_corr)
if PROFILE:
timings["corr-back"] += time.time() - tic
tic = time.time()
if LOCAL_CHECKS:
with torch.enable_grad():
f1_num = f1_.clone().requires_grad_()
f2_num = f2_.clone().requires_grad_()
corr_num = torch.matmul(f1_num.t(), f2_num)
grad_f1_num, grad_f2_num = torch.autograd.grad(
outputs=corr_num,
inputs=(f1_num, f2_num),
grad_outputs=grad_corr,
)
rel_diff(grad_f1, grad_f1_num, "corr->f1")
rel_diff(grad_f2, grad_f2_num, "corr->f2")
grad_f1_inner = grad_f1_num
grad_f2_inner = grad_f2_num
# safety checks over the whole inner loop
if LOCAL_CHECKS:
with torch.enable_grad():
f1_num = feats1[b].clone().detach().requires_grad_()
f2_num = feats2[b].clone().detach().requires_grad_()
# BLock 1 ------------------------------------------
f1_num = f1_num.reshape(C, H * W) # source
f2_num = f2_num.reshape(C, H * W) # target
# BLock 1 ------------------------------------------
# BLock 2 ------------------------------------------
corr_num = torch.matmul(f1_num.t(), f2_num)
corr_num = corr_num.reshape(H, W, H, W)
# BLock 2 ------------------------------------------
corr_num = corr_num.reshape(H, W, -1)
smcorr_num = F.softmax(corr_num, dim=2)
smcorr_num = smcorr_num.reshape(corr_num.shape)
grad_f1_num, grad_f2_num = torch.autograd.grad(
outputs=smcorr_num,
inputs=(f1_num, f2_num),
grad_outputs=grad_smcorr,
)
grad_f1_outer = grad_f1_num
grad_f2_outer = grad_f2_num
rel_diff(grad_f1, grad_f1_num, "df1_")
rel_diff(grad_f2, grad_f2_num, "df2_")
grad_feats1[b] = grad_f1.reshape((C, H, W))
grad_feats2[b] = grad_f2.reshape((C, H, W))
if PROFILE:
timings["feat-assign"] += time.time() - tic
"""Distribute the gradients back among the input tensor features that
require them."""
# grad_feats1 = grad_feats1.unsqueeze(0).repeat(B, 1, 1, 1)
# grad_feats2 = grad_feats2.unsqueeze(0).repeat(B, 1, 1, 1)
if LOCAL_CHECKS:
with torch.enable_grad():
loss = 0.
grad_loss_ = grad_loss * (H * W * B) # unscale
for b in range(B):
f1 = feats1[b].reshape(C, H * W) # source
f2 = feats2[b].reshape(C, H * W) # target
corr = torch.matmul(f1.t(), f2)
corr = corr.reshape(H, W, H, W)
with torch.no_grad():
diff = batch_grid_u[b, :, :, None, None, :] - \
xxyy[None, None, ::stride, ::stride, :]
diff = (diff * diff).sum(4).sqrt()
diff = diff.pow(pow)
smcorr = F.softmax(corr.reshape(H, W, -1), dim=2).reshape(corr.shape)
L = diff * smcorr
loss += L.sum()
loss = loss / (H * W * B)
grad_f1_num, grad_f2_num = torch.autograd.grad(
outputs=loss,
inputs=(feats1, feats2),
grad_outputs=grad_loss_,
)
rel_diff(grad_feats1, grad_f1_num, "full-loop f2")
rel_diff(grad_feats2, grad_f2_num, "full-loop f2")
if PROFILE:
tic = time.time()
"""Clear up all intermediate structures to avoid autograd
implosions."""
del grad_loss_b
del b
del grad_f1
del grad_f2
del smcorr
del corr
del diff
del params
if PROFILE:
timings["cleanup"] += time.time() - tic
if PROFILE:
timings["minibatch"] = time.time() - batch_tic
print("==============")
total_ratios = 0
for key in timings:
ratio = 100 * timings[key] / timings["minibatch"]
msg = "{:.3f} ({:.2f}%) >>> {}"
print(msg.format(timings[key], ratio, key))
total_ratios += ratio
msg = "{:.3f}s >>> ratio total {}"
print(msg.format(timings["minibatch"], total_ratios - 100))
print("==============")
return (grad_feats1, grad_feats2, grad_xxyy, grad_batch_u,
grad_stride, grad_pow)
def rel_diff(x1, x2, name):
out = torch.abs(x1 - x2).sum() / torch.abs(x2).mean()
print("rel diff for {}: {}".format(name, out))
def dense_corr_check():
# gradcheck takes a tuple of tensors as input, check if your gradient
# evaluated with these tensors are close enough to numerical
# approximations and returns True if they all verify this condition.
dense_corr = DenseCorr.apply
dve_dim = 4
stride = 2
B, C, H, W = 4, dve_dim, 4, 4
common = {"dtype": torch.double, "requires_grad": True}
| |
<gh_stars>0
# -*- coding: utf-8 -*-
import functools
import random
import time
from collections import namedtuple
from py_zipkin._encoding_helpers import create_endpoint
from py_zipkin.exception import ZipkinError
from py_zipkin.logging_helper import zipkin_logger
from py_zipkin.logging_helper import ZipkinLoggerHandler
from py_zipkin.logging_helper import ZipkinLoggingContext
from py_zipkin.stack import ThreadLocalStack
from py_zipkin.util import generate_random_128bit_string
from py_zipkin.util import generate_random_64bit_string
"""
Holds the basic attributes needed to log a zipkin trace
:param trace_id: Unique trace id
:param span_id: Span Id of the current request span
:param parent_span_id: Parent span Id of the current request span
:param flags: stores flags header. Currently unused
:param is_sampled: pre-computed boolean whether the trace should be logged
"""
ZipkinAttrs = namedtuple(
'ZipkinAttrs',
['trace_id', 'span_id', 'parent_span_id', 'flags', 'is_sampled'],
)
STANDARD_ANNOTATIONS = {
'client': {'cs', 'cr'},
'server': {'ss', 'sr'},
}
STANDARD_ANNOTATIONS_KEYS = frozenset(STANDARD_ANNOTATIONS.keys())
ERROR_KEY = 'error'
class zipkin_span(object):
"""Context manager/decorator for all of your zipkin tracing needs.
Usage #1: Start a trace with a given sampling rate
This begins the zipkin trace and also records the root span. The required
params are service_name, transport_handler, and sample_rate.
# Start a trace with do_stuff() as the root span
def some_batch_job(a, b):
with zipkin_span(
service_name='my_service',
span_name='my_span_name',
transport_handler=some_handler,
port=22,
sample_rate=0.05,
):
do_stuff()
Usage #2: Trace a service call.
The typical use case is instrumenting a framework like Pyramid or Django. Only
ss and sr times are recorded for the root span. Required params are
service_name, zipkin_attrs, transport_handler, and port.
# Used in a pyramid tween
def tween(request):
zipkin_attrs = some_zipkin_attr_creator(request)
with zipkin_span(
service_name='my_service,'
span_name='my_span_name',
zipkin_attrs=zipkin_attrs,
transport_handler=some_handler,
port=22,
) as zipkin_context:
response = handler(request)
zipkin_context.update_binary_annotations(
some_binary_annotations)
return response
Usage #3: Log a span within the context of a zipkin trace
If you're already in a zipkin trace, you can use this to log a span inside. The
only required param is service_name. If you're not in a zipkin trace, this
won't do anything.
# As a decorator
@zipkin_span(service_name='my_service', span_name='my_function')
def my_function():
do_stuff()
# As a context manager
def my_function():
with zipkin_span(service_name='my_service', span_name='do_stuff'):
do_stuff()
"""
def __init__(
self,
service_name,
span_name='span',
zipkin_attrs=None,
transport_handler=None,
max_span_batch_size=None,
annotations=None,
binary_annotations=None,
port=0,
sample_rate=None,
include=('client', 'server'),
add_logging_annotation=False,
report_root_timestamp=False,
use_128bit_trace_id=False,
host=None,
context_stack=None,
firehose_handler=None
):
"""Logs a zipkin span. If this is the root span, then a zipkin
trace is started as well.
:param service_name: The name of the called service
:type service_name: string
:param span_name: Optional name of span, defaults to 'span'
:type span_name: string
:param zipkin_attrs: Optional set of zipkin attributes to be used
:type zipkin_attrs: ZipkinAttrs
:param transport_handler: Callback function that takes a message parameter
and handles logging it
:type transport_handler: function
:param max_span_batch_size: Spans in a trace are sent in batches,
max_span_batch_size defines max size of one batch
:type max_span_batch_size: int
:param annotations: Optional dict of str -> timestamp annotations
:type annotations: dict of str -> int
:param binary_annotations: Optional dict of str -> str span attrs
:type binary_annotations: dict of str -> str
:param port: The port number of the service. Defaults to 0.
:type port: int
:param sample_rate: Rate at which to sample; 0.0 - 100.0. If passed-in
zipkin_attrs have is_sampled=False and the sample_rate param is > 0,
a new span will be generated at this rate. This means that if you
propagate sampling decisions to downstream services, but still have
sample_rate > 0 in those services, the actual rate of generated
spans for those services will be > sampling_rate.
:type sample_rate: float
:param include: which annotations to include
can be one of {'client', 'server'}
corresponding to ('cs', 'cr') and ('ss', 'sr') respectively
:type include: iterable
:param add_logging_annotation: Whether to add a 'logging_end'
annotation when py_zipkin finishes logging spans
:type add_logging_annotation: boolean
:param report_root_timestamp: Whether the span should report timestamp
and duration. Only applies to "root" spans in this local context,
so spans created inside other span contexts will always log
timestamp/duration. Note that this is only an override for spans
that have zipkin_attrs passed in. Spans that make their own
sampling decisions (i.e. are the root spans of entire traces) will
always report timestamp/duration.
:type report_root_timestamp: boolean
:param use_128bit_trace_id: If true, generate 128-bit trace_ids
:type use_128bit_trace_id: boolean
:param host: Contains the ipv4 value of the host. The ipv4 value isn't
automatically determined in a docker environment
:type host: string
:param context_stack: explicit context stack for storing
zipkin attributes
:type context_stack: object
:param firehose_handler: [EXPERIMENTAL] Similar to transport_handler,
except that it will receive 100% of the spans regardless of trace
sampling rate
:type firehose_handler: function
"""
self.service_name = service_name
self.span_name = span_name
self.zipkin_attrs = zipkin_attrs
self.transport_handler = transport_handler
self.max_span_batch_size = max_span_batch_size
self.annotations = annotations or {}
self.binary_annotations = binary_annotations or {}
self.port = port
self.sample_rate = sample_rate
self.include = include
self.add_logging_annotation = add_logging_annotation
self.report_root_timestamp_override = report_root_timestamp
self.use_128bit_trace_id = use_128bit_trace_id
self.host = host
self._context_stack = context_stack or ThreadLocalStack()
self.firehose_handler = firehose_handler
self.logging_context = None
self.logging_configured = False
self.do_pop_attrs = False
# Spans that log a 'cs' timestamp can additionally record a
# 'sa' binary annotation that shows where the request is going.
self.sa_endpoint = None
# Validation checks
if self.zipkin_attrs or self.sample_rate is not None:
if self.transport_handler is None:
raise ZipkinError(
'Root spans require a transport handler to be given')
if self.sample_rate is not None and not (0.0 <= self.sample_rate <= 100.0):
raise ZipkinError('Sample rate must be between 0.0 and 100.0')
if not set(include).issubset(STANDARD_ANNOTATIONS_KEYS):
raise ZipkinError(
'Only %s are supported as annotations' %
STANDARD_ANNOTATIONS_KEYS
)
else:
# get a list of all of the mapped annotations
self.annotation_filter = set()
for include_name in include:
self.annotation_filter.update(STANDARD_ANNOTATIONS[include_name])
def __call__(self, f):
@functools.wraps(f)
def decorated(*args, **kwargs):
with zipkin_span(
service_name=self.service_name,
span_name=self.span_name,
zipkin_attrs=self.zipkin_attrs,
transport_handler=self.transport_handler,
annotations=self.annotations,
binary_annotations=self.binary_annotations,
port=self.port,
sample_rate=self.sample_rate,
include=self.include,
host=self.host,
context_stack=self._context_stack,
firehose_handler=self.firehose_handler,
):
return f(*args, **kwargs)
return decorated
def __enter__(self):
return self.start()
def start(self):
"""Enter the new span context. All annotations logged inside this
context will be attributed to this span. All new spans generated
inside this context will have this span as their parent.
In the unsampled case, this context still generates new span IDs and
pushes them onto the threadlocal stack, so downstream services calls
made will pass the correct headers. However, the logging handler is
never attached in the unsampled case, so the spans are never logged.
"""
self.do_pop_attrs = False
# If zipkin_attrs are passed in or this span is doing its own sampling,
# it will need to actually log spans at __exit__.
perform_logging = bool(self.zipkin_attrs or
self.sample_rate is not None or
self.firehose_handler is not None)
report_root_timestamp = False
if self.sample_rate is not None:
if self.zipkin_attrs and not self.zipkin_attrs.is_sampled:
report_root_timestamp = True
self.zipkin_attrs = create_attrs_for_span(
sample_rate=self.sample_rate,
trace_id=self.zipkin_attrs.trace_id,
use_128bit_trace_id=self.use_128bit_trace_id,
)
elif not self.zipkin_attrs:
report_root_timestamp = True
self.zipkin_attrs = create_attrs_for_span(
sample_rate=self.sample_rate,
use_128bit_trace_id=self.use_128bit_trace_id,
)
if not self.zipkin_attrs:
# This span is inside the context of an existing trace
existing_zipkin_attrs = self._context_stack.get()
if existing_zipkin_attrs:
self.zipkin_attrs = ZipkinAttrs(
trace_id=existing_zipkin_attrs.trace_id,
span_id=generate_random_64bit_string(),
parent_span_id=existing_zipkin_attrs.span_id,
flags=existing_zipkin_attrs.flags,
is_sampled=existing_zipkin_attrs.is_sampled,
)
elif self.firehose_handler is not None:
# If it has gotten here, the only thing that is
# causing a trace is the firehose. So we force a trace
# with sample rate of 0
report_root_timestamp = True
self.zipkin_attrs = create_attrs_for_span(
sample_rate=0.0,
use_128bit_trace_id=self.use_128bit_trace_id,
)
# If zipkin_attrs are not set up by now, that means this span is not
# configured to perform logging itself, and it's not in an existing
# Zipkin trace. That means there's nothing else to do and it can exit
# early.
if not self.zipkin_attrs:
return self
self._context_stack.push(self.zipkin_attrs)
self.do_pop_attrs = True
self.start_timestamp = time.time()
if perform_logging:
# Don't set up any logging if we're not sampling
if not self.zipkin_attrs.is_sampled and not self.firehose_handler:
return self
endpoint = create_endpoint(self.port, self.service_name, self.host)
client_context = set(self.include) == {'client'}
self.log_handler = ZipkinLoggerHandler(self.zipkin_attrs)
self.logging_context = ZipkinLoggingContext(
self.zipkin_attrs,
endpoint,
self.log_handler,
self.span_name,
self.transport_handler,
report_root_timestamp or self.report_root_timestamp_override,
binary_annotations=self.binary_annotations,
add_logging_annotation=self.add_logging_annotation,
client_context=client_context,
max_span_batch_size=self.max_span_batch_size,
firehose_handler=self.firehose_handler,
)
self.logging_context.start()
self.logging_configured = True
return self
else:
# Patch the ZipkinLoggerHandler.
# Be defensive about logging setup. Since ZipkinAttrs are local to
# the thread, multithreaded frameworks can get in strange states.
# The logging is not going to be correct in these cases, so we set
# a flag that turns off logging on __exit__.
try:
# Assume there's only a | |
first need to check if it is Multipolygon
if self.Multipolygon:
#then we append all the floor and roof fottprints into one with associate height
for idx1,poly1 in enumerate(DB.geometry.coordinates[:-1]):
for idx2,poly2 in enumerate(DB.geometry.coordinates[idx1+1:]):
if poly1 == poly2:
polycoor = []
for j in poly1[0]:
new = (j[0], j[1])
new_coor = new#[]
# for ii in range(len(RefCoord)):
# new_coor.append((new[ii] - RefCoord[ii]))
polycoor.append(tuple(new_coor))
if polycoor[0]==polycoor[-1]:
polycoor = polycoor[:-1]
#even before skewed angle, we need to check for tiny edge below the tolerance onsdered aftward (0.5m)
pt2remove = []
for edge in Polygon2D(polycoor).edges:
if edge.length < DistTol:
pt2remove.append(edge.p2)
for pt in pt2remove:
if len(polycoor)>3:
polycoor.remove(pt)
newpolycoor, node = core_perim.CheckFootprintNodes(polycoor,5)
node2remove.append(node)
#polycoor.reverse()
coord.append(polycoor)
BlocHeight.append(round(abs(DB.geometry.poly3rdcoord[idx1]-DB.geometry.poly3rdcoord[idx2+idx1+1]),1))
#these following lines are here to highlight holes in footprint and split it into two blocs...
#it may appear some errors for other building with several blocs and some with holes (these cases havn't been checked)
poly2merge = []
for idx, coor in enumerate(coord):
for i in range(len(coord)-idx-1):
if Polygon(coor).contains(Polygon(coord[idx+i+1])):
poly2merge.append([idx,idx+i+1])
try:
for i,idx in enumerate(poly2merge):
new_surfaces = break_polygons(Polygon3D(coord[idx[0]]), Polygon3D(coord[idx[1]]))
xs,ys,zs = zip(*list(new_surfaces[0]))
coord[idx[0]] = [(xs[nbv],ys[nbv]) for nbv in range(len(xs))]
xs,ys,zs = zip(*list(new_surfaces[1]))
coord[idx[1]] = [(xs[nbv],ys[nbv]) for nbv in range(len(xs))]
BlocHeight[idx[1]] = BlocHeight[idx[0]]
msg ='[Geom Cor] There is a hole that will split the main surface in two blocs \n'
GrlFct.Write2LogFile(msg, LogFile)
except:
msg = '[Poly Error] Some error are present in the polygon parts. Some are identified as being inside others...\n'
print(msg[:-1])
GrlFct.Write2LogFile(msg, LogFile)
import matplotlib.pyplot as plt
fig = plt.figure(0)
for i in coord:
xs,ys = zip(*i)
plt.plot(xs,ys,'-.')
#titre = 'FormularId : '+str(DB.properties['FormularId'])+'\n 50A_UUID : '+str(DB.properties['50A_UUID'])
# plt.title(titre)
# plt.savefig(self.name+ '.png')
# plt.close(fig)
#we need to clean the footprint from the node2remove but not if there are part of another bloc
newbloccoor= []
for idx,coor in enumerate(coord):
newcoor = []
FilteredNode2remove = []
single = False
for node in node2remove[idx]:
single = True
for idx1,coor1 in enumerate(coord):
if idx!=idx1:
if coor[node] in coor1 and coor[node] not in [n for i,n in enumerate(coor1[idx1]) if i in node2remove[idx1]]:
single =False
if single:
FilteredNode2remove.append(node)
for nodeIdx,node in enumerate(coor):
if not nodeIdx in FilteredNode2remove:
newcoor.append(node)
newbloccoor.append(newcoor)
coord = newbloccoor
else:
#for dealing with 2D files
for j in DB.geometry.coordinates[0]:
new = (j[0], j[1])
new_coor = new#[]
# for ii in range(len(self.RefCoord)):
# new_coor.append((new[ii] - self.RefCoord[ii]))
coord.append(tuple(new_coor))
BlocNbFloor.append(nbfloor)
BlocHeight.append(self.height)
newpolycoor, node = core_perim.CheckFootprintNodes(coord, 5)
coord= [newpolycoor]
#before submitting the full coordinates, we need to check correspondance in case of multibloc
coord, validFootprint = CheckMultiBlocFootprint(coord,tol = DistTol)
if not validFootprint:
msg = '[Poly Error] The different bloc are not adjacent...\n'
#print(msg[:-1])
GrlFct.Write2LogFile(msg, LogFile)
return
# multibloc should share at least one edge and not a polygon as bld:a3848e24-d29e-44bc-a395-a25b5fd26598 in area : 0180C3170 of Sodermalm_V4
SmallEdge = False
for bloc in coord:
if [val for val in Polygon2D(bloc).edges_length if val < 2]:
SmallEdge = True
if SmallEdge:
msg = '[Geom Warning] This building has at least one edge length below 2m\n'
#print(msg[:-1])
GrlFct.Write2LogFile(msg, LogFile)
return coord, BlocHeight, BlocNbFloor
def EvenFloorCorrection(self,BlocHeight,nbfloor,BlocNbFloor,coord,LogFile):
# we compute a storey height as well to choosen the one that correspond to the highest part of the building afterward
BlocNbFloor=[] #the number of blocks is reset to comply with the old 2D geojson files is anyway empty for multipolygons files
StoreyHeigth = 3
if nbfloor !=0:
storeyRatio = StoreyHeigth / (max(BlocHeight) / nbfloor) if (max(BlocHeight) / nbfloor) > 0.5 else 1
msg = '[Geom Info] The max bloc height is : ' + str(round(max(BlocHeight), 2)) + ' for ' + str(
nbfloor) + ' floors declared in the EPC \n'
else:
nbfloor= round(max(BlocHeight)/StoreyHeigth)
try:
storeyRatio = StoreyHeigth / (max(BlocHeight) / nbfloor) if (max(BlocHeight) / nbfloor) > 0.5 else 1
except:
storeyRatio = 0
msg = '[Geom Info] The max bloc height is : ' + str(round(max(BlocHeight), 2)) + ' for ' + str(
nbfloor) + ' floors computed from max bloc height\n'
GrlFct.Write2LogFile(msg, LogFile)
msg = '[Geom Cor] A ratio of ' + str(storeyRatio) + ' will be applied on each bloc height\n'
GrlFct.Write2LogFile(msg, LogFile)
for height in range(len(BlocHeight)):
BlocHeight[height] *= storeyRatio
for idx, Height in enumerate(BlocHeight):
val = int(round(Height, 1) / StoreyHeigth)
BlocNbFloor.append(max(1, val)) # the height is ed to the closest 10cm
BlocHeight[idx] = BlocNbFloor[-1] * StoreyHeigth
msg = '[Geom Info] Bloc height : ' + str(BlocHeight[idx]) + ' with ' + str(BlocNbFloor[-1]) + ' nb of floors\n'
GrlFct.Write2LogFile(msg, LogFile)
msg = '[Geom Info] This bloc has a footprint with : ' + str(len(coord[idx])) + ' vertexes\n'
GrlFct.Write2LogFile(msg, LogFile)
if val == 0:
try:
LogFile.write(
'[WARNING] /!\ This bloc as a height below 3m, it has been raized to 3m to enable construction /!\ \n')
except:
pass
return BlocHeight, BlocNbFloor, StoreyHeigth
def getEPHeatedArea(self,LogFile):
"get the heated area based on the footprint and the number of floors"
self.BlocFootprintArea=[]
EPHeatedArea = 0
for i,foot in enumerate(self.footprint):
EPHeatedArea += Polygon(foot).area*self.BlocNbFloor[i]
self.BlocFootprintArea.append(Polygon(foot).area)
msg = '[Geom Info] Blocs footprint areas : '+ str(self.BlocFootprintArea)+'\n'
GrlFct.Write2LogFile(msg, LogFile)
msg = '[Geom Info] The total heated area is : ' + str(EPHeatedArea)+' for a declared ATemp of : '+str(self.ATemp)+' --> discrepancy of : '+str(round((self.ATemp-EPHeatedArea)/self.ATemp*100,2))+'\n'
GrlFct.Write2LogFile(msg, LogFile)
return EPHeatedArea
def getsurface(self,DB, DBL,LogFile):
"Get the surface from the input file, ATemp"
try: ATemp = int(getDBValue(DB.properties, DBL['surface_key']))
except: ATemp = 1
if ATemp == 1:
msg = '[Geom Error] Atemp not recognized as number, fixed to 1\n'
GrlFct.Write2LogFile(msg, LogFile)
ATemp = checkLim(ATemp,DBL['surface_lim'][0],DBL['surface_lim'][1])
self.ATempOr= ATemp #this is to keep the original value as some correction might done afterward if more then 1 bld is present in 1 Id
return ATemp
def getnbfloor(self,DB, DBL,LogFile):
"Get the number of floor above ground"
try: nbfloor=int(getDBValue(DB.properties, DBL['nbfloor_key']))
except: nbfloor = 0
if nbfloor == 0:
msg = '[EPCs Warning] The nb of floors is 0. It will be defined using the max bloc height and a storey height of 3m\n'
GrlFct.Write2LogFile(msg, LogFile)
nbfloor = checkLim(nbfloor,DBL['nbfloor_lim'][0],DBL['nbfloor_lim'][1])
return nbfloor
def getnbStairwell(self,DB, DBL):
"Get the number of stariwell, need for natural stack effect on infiltration"
try: nbStairwell = int(getDBValue(DB.properties, DBL['nbStairwell_key']))
except: nbStairwell=0
nbStairwell = checkLim(nbStairwell,DBL['nbStairwell_lim'][0],DBL['nbStairwell_lim'][1])
return nbStairwell
def getnbBasefloor(self,DB, DBL):
"Get the number of floor below ground"
try: nbBasefloor = int(getDBValue(DB.properties, DBL['nbBasefloor_key']))
except: nbBasefloor = 0
nbBasefloor = checkLim(nbBasefloor,DBL['nbBasefloor_lim'][0],DBL['nbBasefloor_lim'][1])
return nbBasefloor
def getyear(self,DB, DBL):
"Get the year of construction in the input file"
try: year = int(getDBValue(DB.properties, DBL['year_key']))
except: year = 1900
year = checkLim(year,DBL['year_lim'][0],DBL['year_lim'][1])
return year
def getEPCMeters(self,DB,EPC,LogFile):
"Get the EPC meters values"
Meters = {}
for key1 in EPC:
Meters[key1] = {}
for key2 in EPC[key1]:
if '_key' in key2:
try:
Meters[key1][key2[:-4]] = DB.properties[EPC[key1][key2]]
Meters[key1][key2[:-4]] = int(DB.properties[EPC[key1][key2]])*EPC[key1][key2[:-4]+'COP']
except:
pass
return Meters
def getnbAppartments(self, DB, DBL):
"Get the number of appartment in the building"
try: nbApp = int(getDBValue(DB.properties, DBL['nbAppartments_key']))
except: nbApp = 0
nbApp = checkLim(nbApp,DBL['nbAppartments_lim'][0],DBL['nbAppartments_lim'][1])
return nbApp
def getheight(self, DB, DBL):
"Get the building height from the input file, but not used if 3D coordinates in the footprints"
try: height = int(getDBValue(DB.properties, DBL['height_key']))
except: height = 0
height = checkLim(height,DBL['height_lim'][0],DBL['height_lim'][1])
return height
def getAggregatedFootprint(self):
# lets compute the aggregaded external footprint of the different blocs
# starting with the first one
AggregFootprint = self.footprint[0]
RemainingBlocs = self.footprint[1:]
idx = 0
while RemainingBlocs:
Intersectionline = Polygon(AggregFootprint).intersection(Polygon(RemainingBlocs[idx]))
if Intersectionline and type(Intersectionline) != Point:
AggregFootprint = list(Polygon(AggregFootprint).union(Polygon(RemainingBlocs[idx])).exterior.coords)
RemainingBlocs.remove(RemainingBlocs[idx])
idx = 0
else:
idx += 1
# in order to close the loop if not already done
if AggregFootprint[0] != AggregFootprint[-1]:
AggregFootprint.append(AggregFootprint[0])
return AggregFootprint
def getshade(self, DB,Shadingsfile,Buildingsfile,GE,LogFile,PlotOnly = True):
"Get all the shading surfaces to be build for surrounding building effect"
shades = {}
try:
shadesID = DB.properties[GE['ShadingIdKey']]
except:
return shades
ModifiedShadeVertexes ={'ShadeId' : [], 'OldCoord': [], 'NewCoord' : []} #this dict will log the changes in the vertex coordinate to adjust other shading if necesseray afterward
RelativeAgregFootprint = [(node[0] - self.RefCoord[0], node[1] - self.RefCoord[1]) for node in self.AggregFootprint]
Meancoordx = list(Polygon(RelativeAgregFootprint).centroid.coords)[0][0]
Meancoordy = list(Polygon(RelativeAgregFootprint).centroid.coords)[0][1]
currentRef = self.getRefCoord()
ref = (0, 0) if currentRef==self.RefCoord else self.RefCoord
idlist = [-1]
for m in re.finditer(';', | |
import argparse
import multiprocessing
from queue import Empty
import random
import time
import os
import re
import glob
import pickle
import numpy as np
import time
import pommerman
from pommerman.agents import BaseAgent, SimpleAgent
from pommerman import constants
from keras.models import Model, load_model, model_from_json
from keras.layers import Input, Conv2D, Flatten, Dense
from keras.callbacks import EarlyStopping
from keras.initializers import RandomNormal
import keras.backend as K
import tensorflow as tf
NUM_AGENTS = 4
NUM_ACTIONS = len(constants.Action)
class MCTSNode(object):
def __init__(self, p):
# values for 6 actions
self.Q = np.zeros(NUM_ACTIONS)
self.W = np.zeros(NUM_ACTIONS)
self.N = np.zeros(NUM_ACTIONS, dtype=np.uint32)
assert p.shape == (NUM_ACTIONS,)
self.P = p
def action(self):
U = args.mcts_c_puct * self.P * np.sqrt(np.sum(self.N)) / (1 + self.N)
# TODO: use random tie-breaking for equal values
return np.argmax(self.Q + U)
def update(self, action, reward):
self.W[action] += reward
self.N[action] += 1
self.Q[action] = self.W[action] / self.N[action]
def probs(self, temperature=1):
if temperature == 0:
p = np.zeros(NUM_ACTIONS)
p[np.argmax(self.N)] = 1
return p
else:
Nt = self.N ** (1.0 / temperature)
return Nt / np.sum(Nt)
class MCTSAgent(BaseAgent):
def __init__(self, model_file=None, train=False, agent_id=0):
super().__init__()
self.agent_id = agent_id
if train:
self.env = self.make_env()
if model_file is None:
self.model = make_model()
else:
self.model = load_model(model_file)
self.reset_tree()
def make_env(self):
agents = []
for agent_id in range(NUM_AGENTS):
if agent_id == self.agent_id:
agents.append(self)
else:
agents.append(SimpleAgent())
return pommerman.make('PommeFFACompetition-v0', agents)
def reset_tree(self):
self.tree = {}
# for statistics
self.hit_probs = []
self.avg_lengths = []
self.entropies = []
self.iters_sec = []
def observation_to_features(self, obs):
# TODO: history of n moves?
board = obs['board']
# convert board items into bitmaps
maps = [board == i for i in range(1, 10)]
maps.append(obs['bomb_blast_strength'])
maps.append(obs['bomb_life'])
# duplicate ammo, blast_strength and can_kick over entire map
maps.append(np.full(board.shape, obs['ammo']))
maps.append(np.full(board.shape, obs['blast_strength']))
maps.append(np.full(board.shape, obs['can_kick']))
# add my position as bitmap
position = np.zeros(board.shape)
position[obs['position']] = 1
maps.append(position)
# add teammate
if obs['teammate'] is not None:
maps.append(board == obs['teammate'].value)
else:
maps.append(np.zeros(board.shape))
# add enemies
enemies = [board == e.value for e in obs['enemies']]
maps.append(np.any(enemies, axis=0))
return np.stack(maps, axis=2)
def search(self, root, num_iters, temperature=1):
# remember current game state
self.env._init_game_state = root
root = str(self.env.get_json_info())
# for statistics
hits = 0
misses = 0
total_length = 0
start_time = time.time()
for i in range(num_iters):
# restore game state to root node
obs = self.env.reset()
print('\rStep %d: iteration %d' % (self.env._step_count, i + 1), end=' ')
# serialize game state
state = str(self.env.get_json_info())
trace = []
done = False
while not done:
if state in self.tree:
node = self.tree[state]
# choose actions based on Q + U
action = node.action()
trace.append((node, action))
#print("Action from tree:", constants.Action(action).name)
hits += 1
else:
# initialize action probabilities with policy network
feats = self.observation_to_features(obs[self.agent_id])
feats = feats[np.newaxis, ...]
probs, values = self.model.predict(feats)
probs = probs[0]
reward = values[0, 0]
# add Dirichlet noise to root node for added exploration
# Hex people didn't find it necessary
#if len(trace) == 0:
# noise = np.random.dirichlet([args.mcts_dirichlet_alpha] * len(probs))
# probs = (1 - args.mcts_dirichlet_epsilon) * probs + args.mcts_dirichlet_epsilon * noise
# add new node to the tree
self.tree[state] = MCTSNode(probs)
misses += 1
#print("Leaf node")
# stop at leaf node
break
# ensure we are not called recursively
assert self.env.training_agent == self.agent_id
# make other agents act
actions = self.env.act(obs)
# add my action to list of actions
actions.insert(self.agent_id, action)
# step environment forward
obs, rewards, done, info = self.env.step(actions)
reward = rewards[self.agent_id]
#print("Rewards:", rewards)
state = str(self.env.get_json_info())
total_length += len(trace)
#print("Finished rollout, length:", len(trace))
#print("Backpropagating reward:", reward)
# update tree nodes with rollout results
for node, action in reversed(trace):
node.update(action, reward)
reward *= args.discount
#print("Root Q:")
#print(self.tree[root].Q)
#print("Root N:")
#print(self.tree[root].N)
print(self.tree[root].N, self.tree[root].Q, end='')
#print("(tree hits: %0.2f, avg. len: %0.2f, tree size: %d)" % (hits / (hits + misses), total_length / num_iters, len(self.tree)))
elapsed = time.time() - start_time
self.iters_sec.append(num_iters / elapsed)
self.hit_probs.append(hits / (hits + misses))
self.avg_lengths.append(total_length / num_iters)
# reset env back where we were
self.env.set_json_info()
self.env._init_game_state = None
# return action probabilities
pi = self.tree[root].probs(temperature)
print()
idx = (pi != 0)
self.entropies.append(-np.sum(pi[idx] * np.log(pi[idx])))
return pi
def rollout(self, shared_buffer, finished):
# reset search tree in the beginning of each rollout
self.reset_tree()
# guarantees that we are not called recursively
# and episode ends when this agent dies
self.env.training_agent = self.agent_id
obs = self.env.reset()
trace = []
done = False
while not done and not finished.value:
if args.render:
self.env.render()
# copy weights from trainer
self.model.set_weights(pickle.loads(shared_buffer.raw))
# use temperature 1 for first 30 steps and temperature 0 afterwards
#temp = 0 if self.env._step_count < 30 else 0
# TODO: only works when agent has access to the env
root = self.env.get_json_info()
# do Monte-Carlo tree search
pi = self.search(root, args.mcts_iters, args.temperature)
# sample action from probabilities
action = np.random.choice(NUM_ACTIONS, p=pi)
# record observations and action probabilities
feats = self.observation_to_features(obs[self.agent_id])
trace.append((feats, pi))
# ensure we are not called recursively
assert self.env.training_agent == self.agent_id
# make other agents act
actions = self.env.act(obs)
# add my action to list of actions
actions.insert(self.agent_id, action)
# step environment
obs, rewards, done, info = self.env.step(actions)
assert self == self.env._agents[self.agent_id]
print("Agent:", self.agent_id, "Step:", self.env._step_count, "Actions:", [constants.Action(a).name for a in actions], "Probs:", [round(p, 2) for p in pi], "Entropy: %.2f" % self.entropies[-1], "Iters/s: %.2f" % self.iters_sec[-1], "Rewards:", rewards, "Done:", done)
#print("Rollout finished:", finished.value)
reward = rewards[self.agent_id]
#print("Agent:", self.agent_id, "Reward:", reward, "Len trace:", len(trace))
return trace, reward, rewards
def act(self, obs, action_space):
obs = self.observation_to_features(obs)
obs = np.array([obs])
probs, reward = self.model.predict(obs)
probs = probs[0]
return np.argmax(probs)
# sample action from probabilities
#return np.random.choice(NUM_ACTIONS, p=pi)
class ReplayMemory(object):
def __init__(self, size=100000):
self.observations = np.empty((size, constants.BOARD_SIZE, constants.BOARD_SIZE, 17))
self.action_probs = np.empty((size, NUM_ACTIONS))
self.state_values = np.empty((size,))
self.size = size
self.current = 0
self.count = 0
def add_sample(self, obs, pi, z):
self.observations[self.current] = obs
self.action_probs[self.current] = pi
self.state_values[self.current] = z
self.current = (self.current + 1) % self.size
if self.count < self.size:
self.count += 1
def dataset(self):
return self.observations[:self.count], self.action_probs[:self.count], self.state_values[:self.count]
def make_model():
c = x = Input(shape=(constants.BOARD_SIZE, constants.BOARD_SIZE, 17))
for i in range(args.conv_layers):
c = Conv2D(args.conv_filters, args.conv_filter_size, activation='relu', padding='valid')(c)
h = Flatten()(c)
for i in range(args.hidden_layers):
h = Dense(args.hidden_nodes, activation='relu')(h)
hp = h
for i in range(args.policy_hidden_layers):
hp = Dense(args.hidden_nodes, activation='relu')(hp)
p = Dense(6, activation='softmax', kernel_initializer=RandomNormal(0.0, 0.001), name="policy")(hp)
hv = h
for i in range(args.value_hidden_layers):
hv = Dense(args.hidden_nodes, activation='relu')(hv)
v = Dense(1, activation='tanh', kernel_initializer=RandomNormal(0.0, 0.001), name="value")(hv)
model = Model(x, [p, v])
model.compile(optimizer='adam', loss=['categorical_crossentropy', 'mse'])
return model
def init_tensorflow():
# make sure TF does not allocate all memory
# NB! this needs to be done also in subprocesses!
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
K.set_session(tf.Session(config=config))
def runner(id, model_file, shared_buffer, fifo, finished, _args):
import sys
sys.stdin = open("/dev/stdin", "r")
# make args accessible to MCTSAgent
global args
args = _args
# initialize tensorflow
init_tensorflow()
# make sure agents play at all positions
agent_id = id % NUM_AGENTS
agent = MCTSAgent(model_file, train=True, agent_id=agent_id)
while not finished.value:
# do rollout
trace, reward, rewards = agent.rollout(shared_buffer, finished)
# don't put last trace into fifo
if finished.value:
break
# add data samples to training set
fifo.put((trace, reward, rewards, agent_id, agent.hit_probs, agent.avg_lengths, len(agent.tree), agent.entropies, agent.iters_sec))
#print("Runner finished:", finished.value)
#print("Runner done")
def trainer(num_episodes, fifos, shared_buffer, model, memory, writer):
callbacks = [EarlyStopping(monitor='val_loss', min_delta=0.001, patience=5, verbose=1, mode='auto')]
while num_episodes < args.num_episodes:
while True:
# pick random fifo (agent)
fifo = random.choice(fifos)
try:
# wait for a new trajectory and statistics
trace, reward, rewards, agent_id, hit_probs, avg_lengths, tree_size, entropies, iters_sec = fifo.get(timeout=args.queue_timeout)
# break out of the infinite loop
break
except Empty:
# just ignore empty fifos
pass
num_episodes += 1
# add samples to replay memory
# TODO: add_batch would be more efficient?
for obs, pi in trace:
memory.add_sample(obs, pi, reward)
add_summary(writer, "tree/size", tree_size, num_episodes)
add_summary(writer, "tree/mean_hit_prob", float(np.mean(hit_probs)), num_episodes)
add_summary(writer, "tree/mean_rollout_len", float(np.mean(avg_lengths)), num_episodes)
add_summary(writer, "tree/iters_sec", float(np.mean(iters_sec)), num_episodes)
add_histogram(writer, "tree/hit_probability", hit_probs, num_episodes)
add_histogram(writer, "tree/rollout_length", avg_lengths, num_episodes)
add_histogram(writer, "tree/entropies", entropies, num_episodes)
add_summary(writer, "episode/mean_entropy", float(np.mean(entropies)), num_episodes)
add_summary(writer, "episode/reward", reward, num_episodes)
add_summary(writer, "episode/length", len(trace), num_episodes)
add_summary(writer, "rewards/agent_id", agent_id, num_episodes)
for i in range(len(rewards)):
add_summary(writer, "rewards/agent%d" % i, rewards[i], num_episodes)
add_summary(writer, "replay_memory/size", memory.size, num_episodes)
add_summary(writer, "replay_memory/count", memory.count, num_episodes)
add_summary(writer, "replay_memory/current", memory.current, num_episodes)
#print("Replay memory size: %d, count: %d, current: %d" % | |
<reponame>oholsen/hagedag
import asyncio
import math
from math import sin, cos, pi
import cmath
from dataclasses import dataclass, replace
import logging
import time
import random
import RobotMessages
import RobotModel
from state import State
from PID import PID
from Plotting import Plot
from abc import ABC
from shapely.geometry import Point, Polygon, MultiPolygon, LineString, JOIN_STYLE
from shapely.ops import nearest_points
log = logging.getLogger(__name__)
class Control(ABC):
def update(self, t: float, state: State): # -> (speed, omega)
return None, None
def end(self, t: float, state) -> bool:
return False
class GetStateControl(Control):
"""Terminates immediately just to get current state"""
def update(self, t: float, state: State): # -> (speed, omega)
assert False
def end(self, t: float, state) -> bool:
return True
class CompositeControl2(Control):
"""Use sequence of controls, end() on the current control triggers using next control"""
def __init__(self, controls):
self.controls = iter(controls)
self.control = next(self.controls)
log.info("Starting %s", self.control)
def update(self, t: float, state: State): # -> (speed, omega)
assert self.control is not None
# log.debug("Composite update %s %f %s", self.control, t, state)
while self.control.end(t, state):
# log.debug("Composite end %s %f %s", self.control, t, state)
try:
self.control = self.controls.send((t, state))
except (StopAsyncIteration, StopIteration):
log.info("Stopped CompositeControl2")
self.control = None
return None, None
log.info("Starting %s %s", t, self.control)
return self.control.update(t, state)
def end(self, t: float, state) -> bool:
return self.control is None
class StraightControl(Control):
def __init__(self, speed: float, end):
self._end = end
self.speed = speed
def __str__(self):
return f"StraightControl({self.speed})"
def update(self, t: float, state: State): # -> (speed, omega)
# state vectors [x y yaw v]'
return self.speed, 0
def end(self, t: float, state: State) -> bool:
return self._end(t, state)
def norm_angle(a: float) -> float:
while a > pi:
a -= 2 * pi
while a <= -pi:
a += 2 * pi
return a
class HLineControl(Control):
def __init__(self, y: float, right: bool, end_x: float, speed=0.2, omega=0.2):
self.pid = PID(0.4, 1, 1.5, 0.2)
self.y = y
self.right = right
self.end_x = end_x
self.speed = speed
self.omega = omega
self.t_last = None
def __str__(self):
return f"HLineControl({self.right},{self.end_x})"
def reset(self):
self.pid.clear()
def update(self, t: float, state: State): # -> (speed, omega)
d = self.y - state.y
# angle is 90 for large d
theta = (pi/2) * (1 - math.exp(-(d/0.1)**2))
if d < 0:
theta = -theta
if not self.right:
theta = pi - theta
dtheta = norm_angle(theta - state.theta)
# reduce speed if theta is very wrong, 1 at 0, 0.2 at pi/2
# also reduce speed if will overshoot in one iteration:
# sin(theta) * speed * 1s <= abs(d)
# speed <= abs(d) / sin(theta)
speed = self.speed * math.exp(-abs(4*dtheta)**2)
if math.sin(theta) > 0:
speed = min(speed, abs(d) / math.sin(theta) + 0.02)
omega = math.copysign(min(abs(dtheta) + 0.02, self.omega), dtheta)
domega = 0
if False and self.t_last is not None and abs(d) < 0.4:
# only use PID if near the line
# TODO: remove this discontinuity
domega = -self.pid.update(d, t - self.t_last)
if self.right:
domega = -domega
else:
# TODO: reset PID? or update PID without using result?
pass
#print(t, d, theta, state.theta, dtheta, speed, domega)
self.t_last = t
return speed, omega + domega
def end(self, t: float, state: State) -> bool:
return self.right == (state.x >= self.end_x)
class LineControl(Control):
def __init__(self, p0, p1, speed, omega):
self.line = LineString([p0, p1])
self.p0 = complex(*p0)
self.p1 = complex(*p1)
self.dp = self.p1 - self.p0
self.p2 = self.p1 + self.dp # p2 is p0 mirrored around p1
self.theta = cmath.phase(self.dp)
self.speed = speed
self.omega = omega
self._end = nearest(self.p0.real, self.p0.imag, self.p2.real, self.p2.imag)
def __str__(self):
return f"LineControl({self.line})"
def update(self, t: float, state: State): # -> (speed, omega)
p = complex(state.x, state.y)
# cross product: gives distance and which side
dp = p - self.p0
d = dp.real * self.dp.imag - dp.imag * self.dp.real
d /= abs(self.dp)
# angle wrt line. dtheta is 90 for large d
dtheta = (pi/2) * (1 - math.exp(-(d/0.2)**2))
# limit overshoot - could account for omega too,
# find arc to hit line
speed = math.inf
if math.sin(dtheta) > 0:
speed = abs(d) / math.sin(dtheta) + 0.02
# which side is it on????
if d < 0:
dtheta = -dtheta
dtheta = norm_angle(self.theta + dtheta - state.theta)
# print("theta", self.theta, state.theta, dtheta)
# reduce speed if theta is very wrong, 1 at 0, 0.2 at pi/2
speed = min(speed, self.speed * math.exp(-abs(4*dtheta)**2))
# also reduce speed to not overshoot crossing line
d1 = abs(p - self.p1)
if d1 < 2 * state.speed:
speed = min(d1/4 + 0.02, speed)
omega = math.copysign(min(abs(dtheta), self.omega), dtheta)
# print(t, d, theta, state.theta, dtheta, speed)
return speed, omega
def end(self, t: float, state: State) -> bool:
#return False
return self._end(t, state)
class PointControl(Control):
def __init__(self, x: float, y: float, speed: float, omega: float, end):
self.x = x
self.y = y
self.speed = speed
self.omega = omega
self._end = end
def __str__(self):
return f"PointControl({self.x},{self.y})"
def update(self, t: float, state: State): # -> (speed, omega)
dx = self.x - state.x
dy = self.y - state.y
theta = math.atan2(dy, dx)
dtheta = norm_angle(theta - state.theta)
# reduce speed if theta is very wrong, 1 at 0, 0.2 at pi/2
speed = self.speed * math.exp(-abs(dtheta/0.1)**2)
# reduce speed near point since 1s update rate can overshoot
d = math.hypot(dx, dy)
if d < 2 * speed: # two seconds
speed = min(speed, d/3 + 0.05)
# relax towards desired _theta
omega = math.copysign(min(abs(dtheta), self.omega), dtheta)
return speed, omega
def end(self, t: float, state: State) -> bool:
return self._end(t, state)
def distance(x, y, r):
def d(t: float, state: State):
dx = x - state.x
dy = y - state.y
d = math.hypot(dx, dy)
return d < r
return d
def nearest(x0, y0, x1, y1):
def end(t: float, state: State):
d0 = math.hypot(x0 - state.x, y0 - state.y)
d1 = math.hypot(x1 - state.x, y1 - state.y)
return d1 <= d0
return end
class TimeControl(Control):
def __init__(self, speed: float, omega: float, time: float):
self.speed = speed
self.omega = omega
self.time = time # seconds
self.t0 = None
def __str__(self):
return f"TimeControl({self.time})"
def update(self, t: float, state: State): # -> (speed, omega)
if self.t0 is None:
self.t0 = t
return self.speed, self.omega
def end(self, t: float, state: State) -> bool:
return self.t0 is not None and t >= self.t0 + self.time
class TimeControl2(Control):
def __init__(self, speed: float, omega: float, end_time: float):
self.speed = speed
self.omega = omega
self.end_time = end_time
def __str__(self):
return f"TimeControl2"
def update(self, t: float, state: State): # -> (speed, omega)
return self.speed, self.omega
def end(self, t: float, state: State) -> bool:
return t >= self.end_time
def start_arc(radius, speed, omega, direction):
# speed and omega may be too high for outer motor in tight turn
# if outer motor speed exceeds max, then have to reduce speed (and omega)
# outer motor speed is speed + omega * wheelbase / 2
# max_speed_of_motor = speed + speed * wheelbase / radius / 2
# speed = min(speed, RobotModel.MAX_SPEED / (1 + 0.5 * RobotModel.WHEEL_BASE / radius))
_omega = speed / radius
if _omega < omega:
omega = _omega
else:
speed = omega * radius
max_speed = speed + omega * RobotModel.WHEEL_BASE / 2
assert max_speed < RobotModel.MAX_SPEED
if not direction:
omega = -omega
return speed, omega
class ArcControl(Control):
def __init__(self, speed, omega, end_theta):
self.end_theta = end_theta
self.dtheta_last = None
self.speed = speed
self.omega = omega
def __str__(self):
return f"ArcControl({self.end_theta})"
def update(self, t: float, state: State): # -> (speed, omega)
# TODO: slow down (both!) near end state such that it just overshoots
# zero crossing on the next update
return self.speed, self.omega
def end(self, t: float, state: State) -> bool:
# stop at first zero crossing, but not a random jump +-pi
dtheta = norm_angle(state.theta - self.end_theta)
if self.dtheta_last is None:
self.dtheta_last = dtheta
return False
if abs(dtheta) < 0.9:
if self.dtheta_last < 0 and dtheta >= 0: return True
if self.dtheta_last > 0 and dtheta <= 0: return True
self.dtheta_last = dtheta
return False
def LineTest(xl, xr, y0):
end_x = {True: xr, False: xl}
speed_max = 0.20
omega_max | |
<filename>edl/kegg.py
#! /usr/bin/python
"""
Library of methods and regular expressions for parsing files from the
KEGG gene ontology
"""
import logging
import os
import re
import sys
logger = logging.getLogger(__name__)
##############
# Classes #
##############
################
# compiled REs #
################
koRE = re.compile(r'\b(K\d{5})\b')
kokoRE = re.compile(r'^ENTRY\s+(K\d{5})\b')
endSectionRE = re.compile(r'^\S')
definitionRE = re.compile(r'^DEFINITION\s+(\S.*\S)\s*$')
classRE = re.compile(r'^CLASS\s+(\S.*)$')
ecRE = re.compile(r'^DEFINITION.*\[(EC:[-0-9\.]+)\]')
genesRE = re.compile(r'^GENES\s+(\S.*\S)\s*$')
trailingBracketRE = re.compile(r'\s*\[[^\[\]]+\]\s*$')
kegkoRE = re.compile(r'^D\s+.+\b(K\d{5})(?:<\/a>)?\s*(.*)$')
britekoRE = re.compile(r'^[A-Z]\s+(K\d{5})\s*(.*)$')
geneListRE = re.compile(r'(?<=\s)([a-zA-Z0-9_.-]+)\b')
orgRE = re.compile('^([A-Z]{3,4}):')
cogGroupRE = re.compile(r'(.+)\[(.+)\]')
cogMapRE = re.compile(r'^\[(\S+)\]\s+(\S+)\s+(\S.+)$')
#############
# Functions #
#############
def readSEEDTree(treeFile):
"""
Return nested dictionary where first dict is map from levels (1,2,3)
and next dict is map from role to name.
This is a simply formatted file with 4 columns:
"role\tsubsystem\tlevel 2\t level 1"
"""
seedTree = {'1': {}, '2': {}, '3': {}}
with open(treeFile) as f:
for line in f:
(role, l3, l2, l1) = line.rstrip().split('\t')
seedTree['1'][role] = l1
seedTree['3'][role] = l3
seedTree['2'][role] = l2
return seedTree
def readCogTree(mapFile):
"""
return maps from CDD id to COGID, COG description, and COG category
"""
cogMap = {'gene': {}, 'description': {}, 'group': {}}
with open(mapFile) as f:
for line in f:
cdd, cog, gene, description, count = line.rstrip().split('\t')
description, group = cogGroupRE.match(description).groups()
cogMap['gene'][cdd] = cog
cogMap['description'][cdd] = description
groups = [re.sub(' +', ' ', g.strip()) for g in group.split("/")]
cogMap['group'][cdd] = groups
# hack to make things work with the previous methods (ie SEED)
cogMap['3'] = cogMap['group']
return cogMap
def readCogTreeFromWhog(mapFile):
"""
Return a map from COG id to category
"""
cogMap = {'gene': {}, 'group': {}}
with open(mapFile) as f:
for line in f:
line.rstrip()
m = cogMapRE.maatch(line)
if m:
category = m.group(1)
cog = m.group(2)
description = m.group(3)
cogMap['gene'][cog] = description
cogMap['group'][cog] = category
return cogMap
def parseSeedMap(mapFile):
"""
Return a dictionary mapping from accession to subsystem.
The SEED map (refseq2md52role.gz) starts with two summary lines followed
by three columns (accession\thash/sum\tSubsystem):
Mapped roles:9004(subsys.txt 2 subsystems2peg)
Unmapped roles:521(s ubsys.txt)
YP_9218461b41910965945b806d5defc49ad1a224CO dehydrogenases \
maturation factor, CoxF family
YP_0012863261e472ed51c0df8feb03ee296a0 e55de4CO dehydrogenases \
maturation factor, CoxF family
"""
accMap = {}
with open(mapFile) as f:
f.next()
f.next()
for line in f:
# logger.debug(line)
(acc, code, subsys) = line.rstrip('\r\n').split('\t', 2)
# logger.debug("Mapped %s to %s (sum: %s)" % (acc,subsys,code))
accMap.setdefault(acc.strip(), []).append(subsys.strip())
return accMap
def _stripKeggKeyPrefix(key):
return key.split(":", 1)[1]
def parseLinkFile(mapFile, stripKeys=False, stripVals=True):
"""
Parse the gene_ko.list file from KEGG
hsa:10001 ko:K15128
hsa:10002 ko:K08546
hsa:10003 ko:K01301
with the possibility of duplicate records
"""
if mapFile is None:
return None
logger.info("parsing map file: %s" % (mapFile))
translation = {}
rows = 0
badRows = 0
lastKey = None
for line in open(mapFile):
cells = line.split('\t')
if len(cells) > 1:
rows += 1
key = cells[0].strip()
if stripKeys:
key = _stripKeggKeyPrefix(key)
value = cells[1].strip()
if stripVals:
# strip 'ko:' from start of each value
value = _stripKeggKeyPrefix(value)
if key == lastKey:
translation[key].append(value)
else:
translation[key] = [value, ]
else:
badRows += 1
if badRows > 0:
logger.warn("%d rows in map file had too few columns!" % (badRows))
logger.info(
"Read %d records from %d lines of %s" %
(len(translation), rows, mapFile))
return translation
def parseModuleMap(mapFile):
"""
Parse module file to dict
"""
return parseLinkFile(mapFile, stripKeys=True, stripVals=False)
def parseGeneKOMap(koFile):
"""
scan ko file an build map from gene names to kos
"""
koMap = {}
koCount = 0
inGenes = False
logger.info("Reading kos and gene names from %s" % koFile)
for line in open(koFile):
# find KO first
# looking for a KO line
match = kokoRE.match(line)
if match:
ko = match.group(1)
logger.debug("Start of %s" % (ko))
koCount += 1
if logger.getEffectiveLevel() >= logging.DEBUG:
if koCount % 1000 == 0:
logging.debug("Parsed %d KOs" % koCount)
continue
# look for information on this KO
if not inGenes:
# looking for a GENE line
match = genesRE.match(line)
if match:
inGenes = True
geneString = match.group(1)
logger.debug("found genes: %s" % (geneString))
_mapGenes(koMap, ko, geneString)
continue
# everyline is a gene line until further notice
elif not endSectionRE.match(line):
# not the end of gene section: reading more genes
geneString = line.strip()
logger.debug("found genes: %s" % (geneString))
_mapGenes(koMap, ko, geneString)
else:
# found all gene strings
logger.debug("End of genes")
inGenes = False
ko = None
logger.info("Mapped %d genes to %d kos" % (len(koMap), koCount))
return koMap
def _mapGenes(koMap, ko, geneString):
"""
geneString looks like "ORG: geneid(genename) geneid(genename) geneid geneid
while ids in KeggGenes look like: "org:geneid"
"""
org = orgRE.match(geneString).group(1).lower()
genes = geneListRE.findall(geneString)
for gene in genes:
kGene = "%s:%s" % (org, gene)
koMap.setdefault(kGene, []).append(ko)
def parse_KEGG_file(k_file, kegg_level):
""" Checks filename and runs:
parse_KO_file if base filename is ko
parse_keg_file if file extension is .keg """
if os.path.basename(k_file) == 'ko':
return parse_ko_file(k_file, kegg_level)
elif len(k_file) > 4 and k_file[-4:] == ".keg":
return parse_keg_file(k_file, kegg_level)
else:
raise Exception("I don't know what to do with file: %s"
% (os.path.basename(k_file)))
def parse_ko_file(ko_file, level):
""" Parse KEGG metadata from the ko metadata file:
level: one of
* PATH, PATHWAY, or PATHWAYS
* NAME, DEFINITION, DESCRIPTION, or FUNCITON
* 1,2 or 3 (for a level in the BRITE:ko00001 hier.
* EG: ko00002:2 (for level 2 of BRITE:ko00002)
"""
# synonyms
if level in ['PATHWAYS', 'PATH', 'PATHS']:
level = 'PATHWAY'
if level in ['DESCRIPTION', 'FUNCTION']:
level = 'DEFINITION'
if level == 'EC':
level = 'ko01000:4'
if re.match(r'(ko\d\d\d\d\d:)?(\d+)', str(level)):
# these are in the BRITE heirachy
# eg: k00001:2 for level to of the main hierarchy
brite_hier, brite_level = \
re.match(r'(?:(ko\d\d\d\d\d):)?(\d+)', str(level)).groups()
brite_level = int(brite_level)
if brite_hier is None:
# if its just a number, assume k00001
brite_hier = 'ko00001'
logger.debug(f"Looking for level {brite_level} in {brite_hier}")
with open(ko_file) as ko_handle:
# look for single line per entry
if level in ['NAME', 'DEFINITION']:
kw_expr = re.compile(r'^(ENTRY|{})(\s+)(\S.*)?'.format(level))
try:
for i, line in enumerate(ko_handle):
m = kw_expr.match(line)
if m:
keyword, spaces, value = m.groups()
if keyword == 'ENTRY':
ko = value.split()[0].strip()
elif keyword == level:
results[ko] = value.strip()
except Exception as exc:
print(f'Error on line {i}:\n{line}')
raise exc
# there can be multiple pathways after and including the PATHWAY line
elif level == 'PATHWAY':
kw_expr = re.compile(r'^(ENTRY|{})(\s+)(\S.*)?'.format(level))
def skip(line, indent, pathways):
return
def add_pathway(line, indent, pathways):
pathways.append(line[indent:-1])
pathways, indent = None, 0
for line in ko_handle:
m = kw_expr.match(line)
if m:
keyword, spaces, value = m.groups()
if keyword == 'ENTRY':
ko = value.split()[0].strip()
indent = 5 + len(spaces)
process_line = skip
continue
elif keyword == level:
process_line = add_pathway
pathways = results.setdefault(ko, [])
else:
process_line = skip
continue
process_line(line, indent, pathways)
else:
# BRITE
entry_rexp = re.compile(r'^ENTRY\s+(K\d+)')
brite_rexp = \
re.compile(r'^((?:BRITE)?\s+)(\S.+\S)\s*\[BR:(ko\d+)\]')
end_brite_rexp = re.compile(r'^\S')
level_rexp = re.compile(r'^(\s+)(\S.+)')
lines = iter(enumerate(ko_handle))
try:
# outer loop looping over Entries
while True:
# find next Entry line
for i, line in lines:
m = entry_rexp.match(line)
if m:
ko = m.group(1)
break
else:
# no more entries
break
# find start of BRITE
for i, line in lines:
m = brite_rexp.match(line)
if m:
spaces, name, hierarchy = m.groups()
if hierarchy == brite_hier:
brite_indent = len(spaces)
brite_levels = results.setdefault(ko, [])
break
# process BRITE lines
for i, line in lines:
if end_brite_rexp.match(line) or \
brite_rexp.match(line):
# start of next hierarchy or next keyword section
break
spaces, level_name = level_rexp.match(line).groups()
# level is number of spaces beyond original indent
if len(spaces) - brite_indent == brite_level:
brite_levels.append(level_name)
# end while outer loop
except StopIteration:
# I don't think we ever get here
pass
except Exception as exc:
print(f"error on line {i}:\n{line}")
print(f"found {len(results)} kos so far")
raise exc
return results
def parse_keg_file(keg_file, level):
""" Parse KEGG metadata from brite .keg files
level: one of
* PATH, PATHWAY, or PATHWAYS
* 1 - 6 or A - F
* DEFINITION, DESCRIPTION, or FUNCITON
"""
# synonyms
if level in ['PATHWAYS', 'PATHWAY', 'PATHS']:
level = 'PATH'
if level in ['DESCRIPTION', 'FUNCTION']:
level = 'DEFINITION'
if str(level) in {'1', '2', '3', '4', '5', '6'}:
level = 'ABCDEF'[int(level) - 1]
ko_def_rexp = re.compile(r'^[B-F]\s+(K\d\d\d\d\d)\s+(\S.+\S)\s*$')
level_rexp = re.compile(r'^([A-F])\s*(\S.+)')
path_rexp = re.compile(r'\s*\[PATH:\s*ko\d+\s*\]')
html_rexp = re.compile(r'</?[a-z]+/?>')
results = {}
with open(keg_file) as keg_handle:
# two types of parsing
if level == 'DEFINITION':
# just looking for the line with the K# and description
# (ignore hierarchy)
for line in keg_handle:
m = ko_def_rexp.match(line)
if m:
| |
<gh_stars>1-10
#!/usr/bin/env python
from __future__ import print_function
import matplotlib
matplotlib.use('Agg')
import os
import heapq
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.colors as colors
import matplotlib.cm as cm
from numpy import *
from random import sample, seed, randint
from os.path import getsize as getFileSize
import math
import random
import csv
from cycler import cycler
from io import StringIO
#np.set_printoptions(threshold=np.nan)
from collections import Counter
from matplotlib.colors import LogNorm
from mpl_toolkits.axes_grid1 import AxesGrid
from astropy import units as u
from astropy import cosmology
import matplotlib.ticker as mtick
import PlotScripts
import ReadScripts
import AllVars
import GalaxyPhotoion as photo
import ObservationalData as Obs
import gnedin_analytic as ga
from mpi4py import MPI
import sys
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
AllVars.Set_Params_Kali()
AllVars.Set_Constants()
PlotScripts.Set_Params_Plot()
output_format = ".png"
# For the Tiamat extended results there is a weird hump when calculating the escape fraction.
# This hump occurs at a halo mass of approximately 10.3.
# The calculation of fesc skips this hump range (defined from kink_low to kink_high)
kink_low = 10.3
kink_high = 10.30000001
m_low = 7.0 # We only sum the photons coming from halos within the mass range m_low < Halo Mass < m_high
m_high = 15.0
m_gal_low = 3.0
m_gal_high = 12.0
m_low_SAGE = pow(10, m_low)/1.0e10 * AllVars.Hubble_h
m_high_SAGE = pow(10, m_high)/1.0e10 * AllVars.Hubble_h
bin_width = 0.2
NB = int((m_high - m_low) / bin_width)
NB_gal = int((m_gal_high - m_gal_low) / bin_width)
fej_low = 0.0
fej_high = 1.0
fej_bin_width = 0.05
NB_fej = int((fej_high - fej_low) / fej_bin_width)
def raise_list_power(my_list, n):
return [pow(x, n) for x in my_list]
def raise_power_list(my_list, n):
return [pow(n, x) for x in my_list]
def calculate_beta(MUV, z):
'''
Calculation of the dust attenuation parameter Beta. Fit values are from Bouwens (2015) ApJ 793, 115.
For z = 5 and 6, Bouwens uses a piece-wise linear relationship and a linear relationship for higher redshift. ##
Parameters
----------
MUV : `float'
A value of the absolute magnitude in the UV (generally M1600) in the AB magnitude system.
z : `float'
Redshift the attenuation is calculated at.
Returns
------
beta : `float'
Value of the UV continuum paramaeter beta.
'''
if (z >= 4.5 and z < 5.5): # z = 5 fits.
if (MUV > -18.8):
dB = -0.08
else:
dB = -0.17
B = -2.05
offset = 18.8
elif (z >= 5.5 and z < 6.5): # z = 6 fits.
if (MUV > -18.8):
dB = -0.08
else:
dB = -0.24
B = -2.22
offset = 18.8
elif (z >= 6.5 and z < 7.5): # z = 7 fits.
dB = -0.20
B = -2.05
offset = 19.5
elif (z >= 7.5 and z < 8.5): # z = 8 fits.
dB = -0.15
B = -2.13
offset = 19.5
elif (z >= 8.5 and z < 9.5): # z = 9 fits.
dB = -0.16
B = -2.19
offset = 19.5
elif (z >= 9.5 and z < 10.5): # z = 10 fits.
dB = -0.16
B = -2.16
offset = 19.5
beta = dB * (MUV + offset) + B
return beta
def multiply(array):
'''
Performs element wise multiplication.
Parameters
----------
array : `~numpy.darray'
The array to be multiplied.
Returns
-------
total : `float'
Total of the elements multiplied together.
'''
total = 1
for i in range(0, len(array)):
total *= array[i]
return total
##
def Sum_Log(array):
'''
Performs an element wise sum of an array who's elements are in log-space.
Parameters
----------
array : array
Array with elements in log-space.
Returns
------
sum_total : float
Value of the elements taken to the power of 10 and summed.
Units
-----
All units are kept the same as the inputs.
'''
sum_total = 0.0
for i in range(0, len(array)):
sum_total += 10**array[i]
return sum_total
##
def Std_Log(array, mean):
'''
Calculates the standard deviation of an array with elements in log-space.
Parameters
----------
array : array
Array with elements in log-space.
mean : float
Mean of the array (not in log).
Returns
------
std : float
Standard deviation of the input array taken to the power of 10.
Units
-----
All units are kept the same as the inputs.
'''
sum_total = 0.0
for i in range(0, len(array)):
sum_total += (10**array[i] - mean)**2
sum_total *= 1.0/len(array)
std = np.sqrt(sum_total)
return std
###
def collect_across_tasks(mean_per_task, std_per_task, N_per_task, SnapList,
BinSnapList=[], binned=False, m_bin_low=0.0,
m_bin_high=0.0, my_bin_width=bin_width):
"""
Reduces arrays that are unique to each task onto the master task.
The dimensions of the input arrays will change slightly if we are collecting a statistics
that is binned across e.g., halo mass or galaxy stellar mass.
Parameters
----------
mean_per_task, std_per_task, N_per_task: Nested 2D (or 3D if binned == True) arrays of floats.
Outer length is equal to the number of models.
Inner length is equal to the number of snapshots the data has been calculated for.
Most inner length is equal to the number of bins.
Contains the mean/standard deviation/number of objects unique for each task.
SnapList: Nested 2D arrays of integers. Outer length is equal to the number of models.
Contains the snapshot numbers the data has been calculated for each model.
BinSnapList: Nested 2D arrays of integers. Outer length is equal to the number of models.
Often statistics are calculated for ALL snapshots but we only wish to plot for a subset of snapshots.
This variable allows the binned data to be collected for only a subset of the snapshots.
binned: Boolean.
Dictates whether the collected data is a 2D or 3D array with the inner-most array being binned across e.g., halo mass.
Returns
----------
master_mean, master_std, master_N: Nested 2D (or 3D if binned == True) arrays of floats.
Shape is identical to the input mean_per_task etc.
If rank == 0 these contain the collected statistics.
Otherwise these will be none.
master_bin_middle: Array of floats.
Contains the location of the middle of the bins for the data.
"""
master_mean = []
master_std = []
master_N = []
master_bin_middle = []
for model_number in range(0, len(SnapList)):
master_mean.append([])
master_std.append([])
master_N.append([])
master_bin_middle.append([])
# If we're collecting a binned statistic (e.g., binned across halo mass), then we need to perform the collecting per snapshot.
if binned:
count = 0
for snapshot_idx in range(len(SnapList[model_number])):
if SnapList[model_number][snapshot_idx] == BinSnapList[model_number][count]:
master_mean[model_number], master_std[model_number], master_N[model_number] = calculate_pooled_stats(master_mean[model_number], master_std[model_number], master_N[model_number], mean_per_task[model_number][snapshot_idx], std_per_task[model_number][snapshot_idx], N_per_task[model_number][snapshot_idx])
master_bin_middle[model_number].append(np.arange(m_bin_low,
m_bin_high+my_bin_width,
my_bin_width)[:-1]
+ my_bin_width* 0.5)
count += 1
if count == len(BinSnapList[model_number]):
break
else:
master_mean[model_number], master_std[model_number], master_N[model_number] = calculate_pooled_stats(master_mean[model_number], master_std[model_number], master_N[model_number],
mean_per_task[model_number], std_per_task[model_number],
N_per_task[model_number])
if rank == 0:
master_mean[model_number] = master_mean[model_number][0]
master_std[model_number] = master_std[model_number][0]
master_N[model_number] = master_N[model_number][0]
return master_mean, master_std, master_N, master_bin_middle
###
def calculate_pooled_stats(mean_pool, std_pool, N_pool, mean_local, std_local, N_local):
'''
Calculates the pooled mean and standard deviation from multiple processors and appends it to an input array.
Formulae taken from https://en.wikipedia.org/wiki/Pooled_variance
As we only care about these stats on the rank 0 process, we make use of junk inputs/outputs for other ranks.
NOTE: Since the input data may be an array (e.g. pooling the mean/std for a stellar mass function).
Parameters
----------
mean_pool, std_pool, N_pool : array of floats.
Arrays that contain the current pooled means/standard deviation/number of data points (for rank 0) or just a junk input (for other ranks).
mean_local, mean_std : float or array of floats.
The non-pooled mean and standard deviation unique for each process.
N_local : floating point number or array of floating point numbers.
Number of data points used to calculate the mean/standard deviation that is going to be added to the pool.
NOTE: Use floating point here so we can use MPI.DOUBLE for all MPI functions.
Returns
-------
mean_pool, std_pool : array of floats.
Original array with the new pooled mean/standard deviation appended (for rank 0) or the new pooled mean/standard deviation only (for other ranks).
Units
-----
All units are the same as the input.
All inputs MUST BE real-space (not log-space).
'''
if isinstance(mean_local, list) == True:
if len(mean_local) != len(std_local):
print("len(mean_local) = {0} \t len(std_local) = {1}".format(len(mean_local), len(std_local)))
raise ValueError("Lengths of mean_local and std_local should be equal")
if ((type(mean_local).__module__ == np.__name__) == True or | |
Returns
----------
(file_handle, filename, metadata)
See `modulegraph._find_module()` for details.
Raises
----------
ImportError
If this module is _not_ found.
"""
if parent is not None:
# assert path is not None
fullname = parent.identifier + '.' + name
else:
fullname = name
node = self.findNode(fullname)
if node is not None:
self.msg(3, "find_module: already included?", node)
raise ImportError(name)
if path is None:
if name in sys.builtin_module_names:
return (None, None, ("", "", imp.C_BUILTIN))
path = self.path
return self._find_module_path(fullname, name, path)
def _find_module_path(self, fullname, module_name, search_dirs):
"""
3-tuple describing the physical location of the module with the passed
name if this module is physically findable _or_ raise `ImportError`.
This low-level function is a variant on the standard `imp.find_module()`
function with additional support for:
* Multiple search paths. The passed list of absolute paths will be
iteratively searched for the first directory containing a file
corresponding to this module.
* Compressed (e.g., zipped) packages.
For efficiency, the higher level `ModuleGraph._find_module()` method
wraps this function with support for module caching.
Parameters
----------
module_name : str
Fully-qualified name of the module to be found.
search_dirs : list
List of the absolute paths of all directories to search for this
module (in order). Searching will halt at the first directory
containing this module.
Returns
----------
(file_handle, filename, metadata)
3-tuple describing the physical location of this module, where:
* `file_handle` is an open read-only file handle from which the
on-disk contents of this module may be read if this is a
pure-Python module or `None` otherwise (e.g., if this is a
package or C extension).
* `filename` is the absolute path of this file.
* `metadata` is itself a 3-tuple `(filetype, open_mode, imp_type)`.
See the `_IMPORTABLE_FILETYPE_TO_METADATA` dictionary for
details.
Raises
----------
ImportError
If this module is _not_ found.
"""
self.msgin(4, "_find_module_path <-", fullname, search_dirs)
# TODO: Under:
#
# * Python 3.3, the following logic should be replaced by logic
# leveraging only the "importlib" module.
# * Python 3.4, the following logic should be replaced by a call to
# importlib.util.find_spec().
# Top-level 3-tuple to be returned.
path_data = None
# File handle to be returned.
file_handle = None
# List of the absolute paths of all directories comprising the
# namespace package to which this module belongs if any.
namespace_dirs = []
try:
for search_dir in search_dirs:
# PEP 302-compliant importer making loaders for this directory.
importer = pkgutil.get_importer(search_dir)
# If this directory is not importable, continue.
if importer is None:
# self.msg(4, "_find_module_path importer not found", search_dir)
continue
# Get the PEP 302-compliant loader object loading this module.
#
# If this importer defines the PEP 302-compliant find_loader()
# method, prefer that.
if hasattr(importer, 'find_loader'):
loader, loader_namespace_dirs = importer.find_loader(
module_name)
namespace_dirs.extend(loader_namespace_dirs)
# Else if this importer defines the Python 2-specific
# find_module() method, fall back to that. Despite the method
# name, this method returns a loader rather than a module.
elif hasattr(importer, 'find_module'):
loader = importer.find_module(module_name)
# Else, raise an exception.
else:
raise ImportError(
"Module %r importer %r loader unobtainable" % (module_name, importer))
# If this module is not loadable from this directory, continue.
if loader is None:
# self.msg(4, "_find_module_path loader not found", search_dir)
continue
# 3-tuple of metadata to be returned.
metadata = None
# Absolute path of this module. If this module resides in a
# compressed archive, this is the absolute path of this module
# after extracting this module from that archive and hence
# should not exist; else, this path should typically exist.
pathname = None
# If this loader defines the PEP 302-compliant get_filename()
# method, preferably call that method first. Most if not all
# loaders (including zipimporter objects) define this method.
if hasattr(loader, 'get_filename'):
pathname = loader.get_filename(module_name)
# Else if this loader provides a "path" attribute, defer to that.
elif hasattr(loader, 'path'):
pathname = loader.path
# Else, raise an exception.
else:
raise ImportError(
"Module %r loader %r path unobtainable" % (module_name, loader))
# If no path was found, this is probably a namespace package. In
# such case, continue collecting namespace directories.
if pathname is None:
self.msg(4, "_find_module_path path not found", pathname)
continue
# If this loader defines the PEP 302-compliant is_package()
# method returning True, this is a non-namespace package.
if hasattr(loader, 'is_package') and loader.is_package(module_name):
metadata = ('', '', imp.PKG_DIRECTORY)
# Else, this is either a module or C extension.
else:
# In either case, this path must have a filetype.
# os.path.splitext won't work here since we sometimes need
# to match more than just the file extension.
filetype = [filetype
for filetype in _IMPORTABLE_FILETYPE_EXTS
if pathname.endswith(filetype)]
if filetype:
# at least one extension matched,
# pick the first (longest) one
filetype = filetype[0]
else:
raise ImportError(
'Non-package module %r path %r has no filetype' % (module_name, pathname))
# 3-tuple of metadata specific to this filetype.
metadata = _IMPORTABLE_FILETYPE_TO_METADATA.get(
filetype, None)
if metadata is None:
raise ImportError(
'Non-package module %r filetype %r unrecognized' % (pathname, filetype))
# See "_IMPORTABLE_FILETYPE_TO_METADATA" for details.
open_mode = metadata[1]
imp_type = metadata[2]
# If this is a C extension, leave this path unopened.
if imp_type == imp.C_EXTENSION:
pass
# Else, this is a module.
#
# If this loader defines the PEP 302-compliant get_source()
# method, open the returned string as a file-like buffer.
elif imp_type == imp.PY_SOURCE and hasattr(loader, 'get_source'):
file_handle = StringIO(loader.get_source(module_name))
# If this loader defines the PEP 302-compliant get_code()
# method, open the returned object as a file-like buffer.
elif imp_type == imp.PY_COMPILED and hasattr(loader, 'get_code'):
try:
code_object = loader.get_code(module_name)
if code_object is None:
file_handle = BytesIO(b'\0\0\0\0\0\0\0\0')
else:
file_handle = _code_to_file(code_object)
except ImportError:
# post-bone the ImportError until load_module
file_handle = BytesIO(b'\0\0\0\0\0\0\0\0')
# If this is an uncompiled file under Python 3, open this
# file for encoding-aware text reading.
elif imp_type == imp.PY_SOURCE and sys.version_info[0] == 3:
with open(pathname, 'rb') as file_handle:
encoding = util.guess_encoding(file_handle)
file_handle = open(
pathname, open_mode, encoding=encoding)
# Else, this is either a compiled file or an uncompiled
# file under Python 2. In either case, open this file.
else:
file_handle = open(pathname, open_mode)
# Return such metadata.
path_data = (file_handle, pathname, metadata)
break
# Else if this is a namespace package, return such metadata.
else:
if namespace_dirs:
path_data = (None, namespace_dirs[0], (
'', namespace_dirs, imp.PKG_DIRECTORY))
except UnicodeDecodeError as exc:
self.msgout(1, "_find_module_path -> unicode error", exc)
# Ensure that exceptions are logged, as this function is typically
# called by the import_module() method which squelches ImportErrors.
except Exception as exc:
self.msgout(4, "_find_module_path -> exception", exc)
raise
# If this module was not found, raise an exception.
self.msgout(4, "_find_module_path ->", path_data)
if path_data is None:
raise ImportError("No module named " + repr(module_name))
return path_data
def create_xref(self, out=None):
global header, footer, entry, contpl, contpl_linked, imports
if out is None:
out = sys.stdout
scripts = []
mods = []
for mod in self.flatten():
name = os.path.basename(mod.identifier)
if isinstance(mod, Script):
scripts.append((name, mod))
else:
mods.append((name, mod))
scripts.sort()
mods.sort()
scriptnames = [sn for sn, m in scripts]
scripts.extend(mods)
mods = scripts
title = "modulegraph cross reference for " + ', '.join(scriptnames)
print(header % {"TITLE": title}, file=out)
def sorted_namelist(mods):
lst = [os.path.basename(mod.identifier) for mod in mods if mod]
lst.sort()
return lst
for name, m in mods:
content = ""
if isinstance(m, BuiltinModule):
content = contpl % {"NAME": name,
"TYPE": "<i>(builtin module)</i>"}
elif isinstance(m, Extension):
content = contpl % {"NAME": name,
"TYPE": "<tt>%s</tt>" % m.filename}
else:
url = pathname2url(m.filename or "")
content = contpl_linked % {"NAME": name, "URL": url,
'TYPE': m.__class__.__name__}
oute, ince = map(sorted_namelist, self.get_edges(m))
if oute:
links = []
for n in oute:
links.append(""" <a href="#%s">%s</a>\n""" % (n, n))
# #8226 = bullet-point; can't use html-entities since the
# test-suite uses xml.etree.ElementTree.XMLParser, which
# does't supprot them.
links = " • ".join(links)
content += imports % {"HEAD": "imports", "LINKS": links}
if ince:
links = []
for | |
= [strategies]*self.num_reservoirs
if type(spectral_scales) not in [list, np.ndarray]:
spectral_scales = [spectral_scales]*self.num_reservoirs
if type(offsets) not in [list, np.ndarray]:
offsets = [offsets]*self.num_reservoirs
if sparsities is not None:
assert len(sparsities) == self.num_reservoirs
else:
sparsities = [sparsity]*self.num_reservoirs
for i, (strat, scale, offset, sp) in enumerate(
zip(strategies, spectral_scales, offsets, sparsities)
):
self.reservoirs[i].initialize_reservoir_weights(strat, scale, offset, sparsity=sp)
@abstractmethod
def __forward_routing_rule__(self, u_n):
"""
Abstract function describing how the inputs are passed from layer to layer.
It should take the input signal as input, and return an array containing
the concatenated states of all reservoirs.
This base version returns an empty array, which will cause the network to
do linear regression on the input signal only.
"""
return np.array(0)
@abstractmethod
def __reservoir_input_size_rule__(self, *args):
pass
def forward(self, u_n, calculate_output=True, add_bias=True):
"""
Forward-propagate signal through network.
If calculate_output = True: returns output signal, y_n.
else: returns updated system states, x_n.
"""
u_n = u_n.squeeze()
assert (self.K == 1 and u_n.shape == ()) or len(u_n) == self.K
x_n = self.__forward_routing_rule__(u_n)
if add_bias:
u_n = np.hstack((u_n, 1))
if calculate_output:
z_n = np.append(x_n, u_n)
output = self.output_activation(np.dot(self.W_out, z_n))
return output.squeeze()
else:
return x_n
def train(self, X, y, add_bias=True):
assert X.shape[1] == self.K, "Training data has unexpected dimensionality (%s). K = %d." % (X.shape, self.K)
X = X.reshape(-1, self.K)
y = y.reshape(-1, self.L)
# First, run a few inputs into the reservoir to get it echoing
initial_data = X[:self.init_echo_timesteps]
for u_n in initial_data:
_ = self.forward(u_n, calculate_output=False, add_bias=False)
# Now train the output weights
X_train = X[self.init_echo_timesteps:]
D = y[self.init_echo_timesteps:]
S = np.zeros((X_train.shape[0], self.N+self.K))
for n, u_n in enumerate(X_train):
x_n = self.forward(u_n, calculate_output=False, add_bias=False)
z_n = np.append(x_n, u_n)
S[n, :] = z_n
if add_bias:
S = np.hstack([S, np.ones((S.shape[0], 1))])
# Solve linear system
T1 = np.dot(D.T, S)
T2 = la.inv(np.dot(S.T, S) + self.regulariser * np.eye(self.K + self.N+1))
self.W_out = np.dot(T1, T2)
def reset_reservoir_states(self):
for reservoir in self.reservoirs:
reservoir.state *= 0.
def getInputSize(self): return self.K
def getOutputSize(self): return self.L
def info(self):
inp_scales = [r.input_weights_scale for r in self.reservoirs]
spec_scales = [r.spectral_scale for r in self.reservoirs]
echo_prms = [r.echo_param for r in self.reservoirs]
sps = [r.sparsity for r in self.reservoirs]
out = """
num_res: %d\nres_sizes:%s\necho_params:%s\ninput_scales:%s\nspectral_scales:%s
""" % (self.num_reservoirs, self.reservoir_sizes, echo_prms, inp_scales, spec_scales)
out += 'sparsities:%s\nregulariser: %f' % (sps, self.regulariser)
return out
class DHESN(LayeredESN):
def __init__(self, *args, **kwargs):
assert 'dims_reduce' in kwargs.keys() or kwargs['dims_reduce'] is list, "MUST UNCLUDE DIMS AS LIST."
self.dims_reduce = kwargs['dims_reduce']
del kwargs['dims_reduce']
if 'train_epochs' not in kwargs.keys():
self.train_epochs = 2 # should make this specific to only VAEs but being quick for now
else:
self.train_epochs = kwargs['train_epochs']
del kwargs['train_epochs']
if 'train_batches' not in kwargs.keys():
self.train_batches = 64 # should make this specific to only VAEs but being quick for now
else:
self.train_batches = kwargs['train_batches']
del kwargs['train_batches']
if 'encoder_type' not in kwargs.keys():
self.encoder_type = 'PCA'
else:
self.encoder_type = kwargs['encoder_type']
del kwargs['encoder_type']
if 'encode_norm' not in kwargs.keys():
self.encode_norm = False # similar to batch norm (without trained std/mean) - we normalise AFTER the encoding
else:
self.encode_norm = kwargs['encode_norm']
del kwargs['encode_norm']
super(DHESN, self).__init__(*args, **kwargs)
# print(self.dims_reduce)
self.data_mean = None
# normalisation data for reservoir outputs
self.reservoir_means = [
np.zeros(N_i) for N_i in self.reservoir_sizes
]
self.reservoir_stds = [
np.zeros(N_i) for N_i in self.reservoir_sizes
]
# normalisation data for encoder outputs
self.encoder_means = [
np.zeros(N_i) for N_i in self.dims_reduce
]
self.encoder_stds = [
np.zeros(N_i) for N_i in self.dims_reduce
]
self.encoders = []
if self.encoder_type == 'PCA':
for j in range(1, self.num_reservoirs):
# self.encoders.append(PCA(n_components=self.reservoirs[j-1].N))
self.encoders.append(PCA(n_components=self.dims_reduce[j-1]))
elif self.encoder_type == 'VAE':
for j in range(1, self.num_reservoirs):
self.encoders.append(VAE(input_size=self.reservoir_sizes[j-1], latent_variable_size=self.dims_reduce[j-1],
epochs=self.train_epochs, batch_size=self.train_batches))
# epochs=self.train_epochs*j, batch_size=self.train_batches))
else:
raise NotImplementedError('non-PCA/VAE encodings not done yet')
# signals of the encoders
self.encoder_signals = [[] for _ in range(self.num_reservoirs-1)]
def __reservoir_input_size_rule__(self, reservoir_sizes, echo_params, activation):
self.reservoirs.append(Reservoir(self.K, reservoir_sizes[0], echo_params[0],
idx=0, debug=self.debug))
for i, (size, echo_prm) in enumerate(zip(reservoir_sizes, echo_params)[1:]):
# self.reservoirs.append(Reservoir(
# input_size=self.reservoirs[i].N, num_units=size, echo_param=echo_prm,
# idx=i+1, activation=activation, debug=self.debug
# ))
self.reservoirs.append(Reservoir(
input_size=self.dims_reduce[i], num_units=size, echo_param=echo_prm,
idx=i+1, activation=activation, debug=self.debug
))
def __forward_routing_rule__(self, u_n):
x_n = np.zeros(0)
# u_n = (u_n.reshape(-1, self.K) - self.data_mean).squeeze()
for i, (reservoir, encoder) in enumerate(zip(self.reservoirs, self.encoders)):
u_n = np.array(reservoir.forward(u_n))
# u_n -= self.reservoir_means[i]
if self.encoder_type == 'PCA':
# normalising prior to PCA could be good of bad (https://stats.stackexchange.com/questions/69157/why-do-we-need-to-normalize-data-before-principal-component-analysis-pca)
#u_n -= self.reservoir_means[i]
#u_n /= self.reservoir_stds[i]
u_n = encoder.transform(u_n.reshape(1, -1)).squeeze()
elif self.encoder_type == 'VAE':
# we must normalise the input prior to VAE input
u_n -= self.reservoir_means[i]
u_n /= self.reservoir_stds[i]
u_n = encoder.encode(Variable(th.FloatTensor(u_n)))[0].data.numpy()
# normalise the outputs of the encoders
if self.encode_norm:
#print(np.shape(u_n))
#print(np.shape(self.encoder_means[i]))
#print(np.shape(self.encoder_stds[i]))
u_n = np.array((u_n - self.encoder_means[i])/self.encoder_stds[i])
#u_n -= self.encoder_means[i]
#u_n /= self.encoder_stds[i]
# store the encoded signals of each encoder
self.encoder_signals[i].append(u_n.tolist())
x_n = np.append(x_n, u_n)
u_n = self.reservoirs[-1].forward(u_n)
x_n = np.append(x_n, u_n)
return x_n
def train(self, X, y, debug_info=False, add_bias=True):
""" (needs different train() because reservoirs+encoders have to be warmed up+trained one at a time."""
assert X.shape[1] == self.K, "Training data has unexpected dimensionality (%s). K = %d." % (X.shape, self.K)
X = X.reshape(-1, self.K)
y = y.reshape(-1, self.L)
#assert self.encoder_type != 'PCA' or np.mean(X) < 1e-3, "Input data must be zero-mean to use PCA encoding."
# print("X mean: {}, y mean: {}".format(np.mean(X, axis=0), np.mean(y, axis=0)))
self.data_mean = np.mean(X, axis=0)[0]
# plt.plot(range(np.shape(X)[0]), X, label="before")
# print("mean: {}".format(self.data_mean))
# print(np.shape(X))
# print(np.shape(y))
# print(np.shape(self.data_mean))
# X -= self.data_mean
# y -= self.data_mean
# print("X mean: {}, y mean: {}".format(np.mean(X, axis=0), np.mean(y, axis=0)))
# plt.plot(range(np.shape(X)[0]), X, label="after")
# plt.show()
T = len(X) - self.init_echo_timesteps*self.num_reservoirs
# S = np.zeros((T, self.N+self.K))
# S = np.zeros((T, 5))
S = np.zeros((T, np.sum(self.dims_reduce)+self.K+self.reservoirs[-1].N))
# S: collection of extended system states (encoder outputs plus inputs)
# at each time-step t
S[:, -self.K:] = X[self.init_echo_timesteps*self.num_reservoirs:]
# delim = np.array([0]+[r.N for r in self.reservoirs])
delim = np.array([0]+self.dims_reduce+[self.reservoirs[-1].N])
for i in range(1, len(delim)):
delim[i] += delim[i-1]
# inputs = X[:self.init_echo_timesteps, :]
# inputs_next = X[self.init_echo_timesteps:(self.init_echo_timesteps*2), :]
burn_in = X[:self.init_echo_timesteps] # feed a unique input set to all reservoirs
inputs = X[self.init_echo_timesteps:]
# Now send data into each reservoir one at a time,
# and train each encoder one at a time
for i in range(self.num_reservoirs):
reservoir = self.reservoirs[i]
# burn-in period (init echo timesteps) ===============================================
for u_n in burn_in:
_ = reservoir.forward(u_n)
# ==================
N_i = reservoir.N
S_i = np.zeros((np.shape(inputs)[0], N_i)) # reservoir i's states over T timesteps
# Now collect the real state data for encoders to train on
for n, u_n in enumerate(inputs):
S_i[n, :] = reservoir.forward(u_n)
# All reservoirs except the last output into an autoencoder
if i != self.num_reservoirs - 1:
encoder = self.encoders[i]
res_mean = np.mean(S_i, axis=0)
res_std = np.std(S_i, axis=0) + 1e-8
# print("MEAN: {}".format(res_mean))
# print("STD: {}".format(res_std))
# print(res_mean)
self.reservoir_means[i] = res_mean
self.reservoir_stds[i] = res_std
# S_i -= res_mean
# Now train the encoder using the gathered state data
if self.encoder_type == 'PCA':
encoder.fit(S_i) # sklearn PCA automatically zero-means the data
S_i = np.array(encoder.transform(S_i))
elif self.encoder_type == 'VAE':
# print(S_i[:3])
S_i -= res_mean
# print(S_i[:3])
# print("="*10)
S_i /= res_std
# encoder.train_full(Variable(th.FloatTensor(S_i)))
# S_i = encode.encode(S_i).data().numpy()
S_i_train = np.array(S_i[:-1000, :])
S_i_test = np.array(S_i[-1000:, :])
encoder.train_full(th.FloatTensor(S_i_train), th.FloatTensor(S_i_test))
S_i = np.array(encoder.encode(Variable(th.FloatTensor(S_i)))[0].data.numpy())
# S_i = encoder.encode(Variable(th.FloatTensor(S_i))).data.numpy()
# S_i += res_mean[:100]
# compute the mean output of the encoders
enc_mean = np.mean(S_i, axis=0)
enc_std = np.std(S_i, axis=0)+1e-8
self.encoder_means[i] = np.array(enc_mean) # this would be ~0 anyway because we normalise prior to encoding (but still...)
self.encoder_stds[i] = np.array(enc_std)
# first few are for the next burn-in
burn_in = S_i[:self.init_echo_timesteps, :]
# rest are the next inputs
inputs = S_i[self.init_echo_timesteps:, :]
# print(np.shape(inputs))
# print(np.shape(S_i))
# print(np.shape(S))
lb, ub = delim[i], delim[i+1]
S[:, lb:ub] = S_i[(self.init_echo_timesteps*(self.num_reservoirs-i-1)):, :]
# inputs = S_i
if debug_info:
print('res %d mean state magnitude: %.4f' % (i, np.mean(np.abs(S_i))))
if add_bias:
S = np.hstack([S, np.ones((S.shape[0], 1))])
D = y[self.init_echo_timesteps*self.num_reservoirs:]
# Solve linear system
T1 = np.dot(D.T, S)
# T2 = la.inv(np.dot(S.T, S) + self.regulariser * np.eye(self.K + self.N))
T2 = la.inv(np.dot(S.T, S) + self.regulariser * np.eye(np.sum(self.dims_reduce)+self.K+self.reservoirs[-1].N+1))
self.W_out = np.dot(T1, T2)
@property
def input_size(self):
return self.K
@property
def output_size(self):
return | |
True
elif reply == QMessageBox.Cancel:
self.saveToken = False
if self.saveToken:
for f in os.listdir('temp_audio'):
os.remove('temp_audio\%s' % f)
cmd = ['ffmpeg.exe', '-i', videoPath]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
p.wait()
try:
for l in p.stdout.readlines(): # FFMpeg这蛋疼的视频信息格式
l = l.decode('gb18030', 'ignore')
if 'Duration' in l:
self.duration = calSubTime(l.split(' ')[3][:-1])
self.bitrate = int(l.split(' ')[-2])
if 'Stream' in l and 'DAR' in l:
args = l.split(',')
for resolution in args[2:5]:
resolution = resolution.replace(' ', '')
if '[' in resolution:
resolution = resolution.split('[')[0]
if 'x' in resolution:
self.videoWidth, self.videoHeight = map(int, resolution.split('x'))
for arg in args:
if 'fps' in arg:
self.fps = float(arg.split('fps')[0])
break
except:
self.duration = 114514 # 万一读取不上来视频长度就先随便分配个 之后在timeout修正
# 向输出页面发送视频信息
self.videoDecoder.setDefault(videoPath, self.videoWidth, self.videoHeight, self.duration,
self.bitrate, self.fps, self.subtitleDict, self.styleNameList)
self.subPreview = os.path.splitext(videoPath)[0] # 设置主界面实时预览字幕路径 采用k-lite解码器读取视频目录下同名的ass文件来加载
if os.path.exists(self.subPreview + '.ass'): # 防止覆盖已存在的ass文件 若有 复制一个备份
with codecs.open(self.subPreview + '.ass', 'r', 'utf_8_sig') as sub:
sub = sub.readlines()
if os.path.getsize(self.subPreview + '.ass') != 682 or len(sub) != 20: # 字幕文件大小不等于烤肉机默认输出大小或行数不等于20
self.backupASS = '%s_备份_%s.ass' % (self.subPreview, time.strftime("%H%M%S", time.localtime()))
shutil.copy(self.subPreview + '.ass', self.backupASS)
else:
self.backupASS = ''
self.subPreview += '.ass'
self.videoDecoder.writeAss(self.subPreview, False, True) # 创建空白文件
self.position = 0
self.oldPosition = 0
url = QUrl.fromLocalFile(videoPath)
self.stack.setCurrentIndex(1)
self.player.stop()
self.player.setMedia(url)
self.player.setPlaybackRate(1)
self.playStatus = True
self.saveToken = False
self.videoSlider.setEnabled(True)
w, h = self.videoWindowSizePreset[self.videoWindowSizeIndex]
self.stack.setFixedSize(w, h)
self.view.setFixedSize(w, h)
self.scene.setSceneRect(5, 5, w - 10, h - 10)
self.playerWidget.setSize(QSizeF(w, h))
self.playRange = [0, self.duration] # 播放范围
if self.sepMain.isRunning: # 检测上一个视频的切片主音频进程是否还在进行
self.sepMain.terminate()
self.sepMain.quit()
self.sepMain.wait()
self.sepMain = sepMainAudio(videoPath, self.duration) # 开始切片主音频
self.sepMain.mainAudioWave.connect(self.addMainAudioWave)
self.sepMain.start()
self.refreshMainAudioToken = False # 刷新主音频
self.mainAudioWaveX = []
self.mainAudioWaveY = []
self.refreshVoiceToken = False # 刷新AI识别人声音频
self.voiceWaveX = []
self.voiceWaveY = []
self.bgmWaveY = []
self.secondeMedia = False # False: 播放原音轨;True: 播放第二音轨
self.voiceMedia = True # True: 人声音轨;False: BGM音轨
self.mainAudio.plot([0], [0], 0, 1)
self.voiceAudio.plot([0], [0], True, 0, 1)
self.mediaPlay()
self.timer.stop()
self.subTimer.stop()
self.graphTimer.stop()
try: # 尝试断开三个timer
self.timer.timeout.disconnect(self.timeOut)
except:
pass
try:
self.subTimer.timeout.disconnect(self.subTimeOut)
except:
pass
try:
self.graphTimer.timeout.disconnect(self.refreshGraph)
except:
pass
self.timer.start()
self.timer.timeout.connect(self.timeOut)
self.subTimer.start()
self.subTimer.timeout.connect(self.subTimeOut)
self.graphTimer.start() # 音频图timer启动
self.graphTimer.timeout.connect(self.refreshGraph)
def changeSetting(self, settingDict): # 配置设置参数
self.settingDict = settingDict
self.tableRefresh = [True, False][int(settingDict['tableRefresh'])]
self.redLineLeft = [10 * x for x in range(11)][int(settingDict['redLinePos'])]
self.redLineRight = 100 - self.redLineLeft
self.tableRefreshLimit = [15, 30, 50, 100][int(settingDict['tableRefreshFPS'])]
self.timer.setInterval(self.tableRefreshLimit)
self.graphTimer.setInterval([15, 30, 50, 100][int(settingDict['graphRefreshFPS'])])
def popDnld(self):
self.releaseKeyboard()
self.dnldWindow.hide()
self.dnldWindow.show()
def popPreview(self):
self.releaseKeyboard()
self.previewSubtitle.hide()
self.previewSubtitle.show()
def popSeparate(self):
self.releaseKeyboard()
self.separate.setDefault(self.videoPath, self.duration, self.subtitleDict)
self.separate.hide()
self.separate.show()
def popAnime4K(self):
self.releaseKeyboard()
self.anime4KWindow.setDefault(self.videoPath, self.duration, self.videoWidth, self.videoHeight)
self.anime4KWindow.hide()
self.anime4KWindow.show()
def reloadVideo(self):
position = self.player.position()
self.player.stop()
self.player.setMedia(QUrl.fromLocalFile(''))
self.player.stop()
self.player.setMedia(QUrl.fromLocalFile(self.videoPath))
self.player.setPosition(position)
def addMainAudioWave(self, x, y): # 添加主音频数据
self.mainAudioWaveX = x
self.mainAudioWaveY = y
self.mainAudioMax = max(self.mainAudioWaveY) // 100 * 50 # 50%
self.refreshMainAudioToken = True
def addVoiceWave(self, x, y, bgm): # 添加人声音轨
self.voiceWaveX = x
self.voiceWaveY = y
self.bgmWaveY = bgm
self.refreshVoiceToken = True
def refreshGraph(self, force=False):
position = self.player.position()
step = int(self.globalInterval / 100) + 1 # 波形列表切片步长
if self.oldPosition != position or self.oldGlobalInterval != self.globalInterval or force:
self.oldPosition = position
self.oldGlobalInterval = self.globalInterval
if self.refreshMainAudioToken: # 绘制主音频波形
pos = int((position / self.duration) * len(self.mainAudioWaveX))
if pos > len(self.mainAudioWaveX):
pos = len(self.mainAudioWaveX) - int(self.globalInterval * self.redLineRight) - 1
start = pos - int(self.globalInterval * self.redLineLeft)
if start < 0:
start = 0
start = start // step * step
end = pos + int(self.globalInterval * self.redLineRight) # 显示当前间隔x100的区域
if end > len(self.mainAudioWaveX):
end = len(self.mainAudioWaveX)
end = end // step * step
xList = self.mainAudioWaveX[start:end:step]
yList = self.mainAudioWaveY[start:end:step]
subtitleLine = {0: [], 1: [], 2: [], 3: [], 4: []}
for x, subData in self.subtitleDict.items():
for sub_start in subData.keys():
sub_end = sub_start + subData[sub_start][0]
if (sub_start > start and sub_start < end) or (sub_end > start and sub_end < end) or\
(sub_start < start and sub_end > end):
subtitleLine[x].append([sub_start, sub_end])
self.mainAudio.mp3Path = os.path.join(self.dir, r'temp_audio\audio_original.aac')
self.mainAudio.plot(xList, yList, position, step, [-self.mainAudioMax, self.mainAudioMax], subtitleLine)
if self.refreshVoiceToken: # 绘制AI产生的人声波形
pos = int((position / self.duration) * len(self.voiceWaveX))
if pos > len(self.voiceWaveX):
pos = len(self.voiceWaveX) - int(self.globalInterval * self.redLineRight) - 1
start = pos - int(self.globalInterval * self.redLineLeft)
if start < 0:
start = 0
start = start // step * step
end = pos + int(self.globalInterval * self.redLineRight) # 显示当前间隔x100的区域
if end > len(self.voiceWaveX):
end = len(self.voiceWaveX)
end = end // step * step
xList = self.voiceWaveX[start:end:step]
if self.voiceMedia:
yList = self.voiceWaveY[start:end:step]
# mp3Path = os.path.join(self.dir, r'temp_audio\vocals.mp3')
else:
yList = self.bgmWaveY[start:end:step]
# mp3Path = os.path.join(, r'temp_audio\bgm.mp3')
self.voiceAudio.mp3Path = self.dir + r'\temp_audio'
self.voiceAudio.plot(xList, yList, self.voiceMedia, position, step,
[-self.mainAudioMax, self.mainAudioMax], subtitleLine)
def playMainAudio(self): # 播放主音频
# if not self.playStatus:
self.secondeMedia = False
self.player_vocal.setMuted(True)
self.player.setMuted(False)
def playVocal(self): # 播放人声音频
if os.path.exists(r'temp_audio\vocals.mp3') and self.videoPath:
self.player.setMuted(True)
self.player_vocal.setMuted(False)
if not self.secondeMedia: # 从原音轨切换至第二音轨
self.secondeMedia = not self.secondeMedia
if self.player_vocal.mediaStatus() == QMediaPlayer.MediaStatus.NoMedia:
url = QUrl.fromLocalFile(r'temp_audio\vocals.mp3')
self.player_vocal.setMedia(url)
else: # 第二音轨在人声音轨和背景音轨之间来回切换
if self.voiceMedia: # 人声音轨切换至背景音轨
url = QUrl.fromLocalFile(r'temp_audio\bgm.mp3')
self.player_vocal.setMedia(url)
else: # 背景音轨切换至人声音轨
url = QUrl.fromLocalFile(r'temp_audio\vocals.mp3')
self.player_vocal.setMedia(url)
self.voiceMedia = not self.voiceMedia
self.refreshGraph(True)
self.player_vocal.setPosition(self.player.position())
if not self.playStatus:
self.player_vocal.play()
else:
self.player_vocal.pause()
def setTablePreset(self, preset):
self.tablePreset = preset # 填充字符 输出列
def clearSubtitle(self, index, videoStart, videoEnd): # 清空字幕轴
startTime = videoStart * 60000
endTime = videoEnd * 60000
subtitle = copy.deepcopy(self.subtitleDict[index])
for start, subData in subtitle.items():
end = start + subData[0]
if start >= startTime and start <= endTime or end >= startTime and end <= endTime: # 判断是否落在所选范围内
del self.subtitleDict[index][start]
self.updateBackend()
self.refreshTable()
def setAutoSubtitle(self, voiceList): # AI自动打轴更新至字幕字典里
for t in voiceList:
start, delta = t
txt, index = self.tablePreset
self.subtitleDict[index][start] = [delta, txt]
self.updateBackend()
self.refreshTable() # 刷新表格
self.refreshSubPreview()
def updateTranslateResult(self, result):
start, delta, source, target, sourceIndex, targetIndex = result # 更新翻译结果至字典里
if sourceIndex != 5:
self.subtitleDict[sourceIndex][start] = [delta, source]
self.subtitleDict[targetIndex][start] = [delta, target]
self.refreshTable() # 刷新表格
self.refreshSubPreview()
def decode(self):
self.releaseKeyboard()
self.videoDecoder.setDefault(self.videoPath, self.videoWidth, self.videoHeight, self.duration,
self.bitrate, self.fps, self.subtitleDict, self.styleNameList)
self.videoDecoder.hide()
self.videoDecoder.show()
def popPayment(self):
self.pay.hide()
self.pay.show()
def popHotKeyInfo(self):
self.hotKeyInfo.hide()
self.hotKeyInfo.show()
def popSettingPage(self):
self.setting.hide()
self.setting.show()
def popTutorial(self):
QDesktopServices.openUrl(QUrl('https://www.bilibili.com/video/BV1p5411b7o7'))
def popReleases(self):
self.releases.hide()
self.releases.show()
def mediaPlay(self):
if self.playStatus:
self.stack.setCurrentIndex(1)
self.player.play()
try:
self.player_vocal.play()
except:
pass
self.grabKeyboard()
self.timeStart()
self.playStatus = False
self.playAction.setIcon(self.pauseIcon)
self.playAction.setText('暂停')
else:
self.player.pause()
try:
self.player_vocal.pause()
except:
pass
self.timeStop()
self.playStatus = True
self.playAction.setIcon(self.playIcon)
self.playAction.setText('播放')
def mediaPlayOnly(self):
self.grabKeyboard()
try:
timeText = self.videoPositionEdit.text().replace(':', ':').split(':')
m, s = timeText[:2]
if not m:
m = '00'
if not s:
s = '00'
if len(m) > 3:
m = m[:3]
if len(s) > 2:
s = s[:2]
m = int(m)
s = int(s)
if s > 60:
s = 60
total_m = self.player.duration() // 60000
if m > total_m:
m = total_m
self.player.setPosition(m * 60000 + s * 1000)
self.player_vocal.setPosition(m * 60000 + s * 1000)
# self.videoSlider.setValue(self.player.position() * self.videoSlider.width() / self.player.duration())
except:
pass
self.videoPositionEdit.setReadOnly(True)
self.videoSlider.setValue(self.player.position() * self.videoSlider.width() // self.player.duration())
def mediaPauseOnly(self):
self.releaseKeyboard()
self.videoPositionEdit.setReadOnly(False)
self.player.pause()
self.timeStop()
self.playStatus = True
self.playAction.setIcon(self.playIcon)
self.playAction.setText('播放')
def timeOut(self):
if self.duration == 114514 or not self.duration:
self.duration = self.player.duration()
position = 0
if self.player.position() <= self.playRange[0] or self.player.position() >= self.playRange[1]: # 循环播放
if self.player.position() >= self.playRange[1] and self.replay == 2: # 单次播放超出范围
position = self.playRange[0]
self.player.setPosition(position)
self.player_vocal.setPosition(position)
self.videoSlider.setValue(position * self.videoSlider.width() // self.player.duration())
self.setTimeLabel(position)
self.replay = 0 # 恢复播放范围
self.playRange = [0, self.duration]
if not self.playStatus:
self.mediaPlay()
else:
self.player.setPosition(self.playRange[0])
self.player_vocal.setPosition(self.playRange[0])
self.refreshTable(position)
try:
self.videoSlider.setValue(self.player.position() * self.videoSlider.width() / self.player.duration())
self.setTimeLabel()
except:
pass
def timeStop(self):
self.timer.stop()
def timeStart(self):
self.timer.start()
def videoSliderClick(self, p):
x = p.x()
if x < 0: # 限制
x = 0
if x > self.videoSlider.width():
x = self.videoSlider.width()
self.videoSlider.setValue(x)
position = x * self.duration // self.videoSlider.width()
self.player.setPosition(position)
self.player_vocal.setPosition(position)
self.refreshTable(position)
self.setTimeLabel(position)
def setVolume(self, p):
self.volumeValue = p.x()
if self.volumeValue > 100:
self.volumeValue = 100
if self.volumeValue < 0:
self.volumeValue = 0
self.volSlider.setValue(self.volumeValue)
self.player.setVolume(self.volumeValue)
self.player_vocal.setVolume(self.volumeValue)
self.volSlider.setToolTip(str(self.volSlider.value()))
if self.volumeValue:
self.volumeStatus = True
self.volumeAction.setIcon(self.volumeIcon)
else:
self.volumeStatus = False
self.volumeAction.setIcon(self.volumeMuteIcon)
def volumeMute(self):
if self.volumeStatus:
self.volumeStatus = False
self.old_volumeValue = self.player.volume()
self.player.setVolume(0)
self.volSlider.setValue(0)
self.volumeAction.setIcon(self.volumeMuteIcon)
else:
self.volumeStatus = True
self.player.setVolume(self.old_volumeValue)
self.volSlider.setValue(self.old_volumeValue)
self.volumeAction.setIcon(self.volumeIcon)
def setTimeLabel(self, pos=0):
if not pos:
now = splitTime(self.player.position())
else:
now = splitTime(pos)
total = splitTime(self.player.duration())
self.videoPositionEdit.setText(now)
self.videoPositionLabel.setText(' / %s ' % total)
def setSaveToken(self, token):
self.saveToken = token
def eventFilter(self, obj, event):
if obj == self.subtitle.verticalScrollBar(): # 过滤表格滚轮事件 用于刷新超出表格视窗范围的滚动
if event.type() == QEvent.Wheel:
scrollBarValue = self.subtitle.verticalScrollBar().value()
if scrollBarValue == self.oldScrollBarValue:
delta = event.delta() // 30 # 滚轮四倍速!!!(120 / 30)
if scrollBarValue > 0 and delta < | |
the new symmetry blocks to be added to the `basis_dict` attribute of the class,
as dictionaries or `hamiltonian` objects.
basis_con : :obj:`basis`
Basis constructor used to build the basis objects to create the new block diagonal Hamiltonians.
basis_args : tuple
This argument is passed as the first argument for `basis_con`.
Contains all required arguments for the basis.
compute_all_blocks : bool, optional
Flag which tells the `block_ops` class to compute all symmetry blocks at initialization.
Default is `False`.
"""
blocks = list(blocks)
for block in blocks:
if str(block) not in self._basis_dict.keys():
b = basis_con(*basis_args,**block)
if b.Ns > 0:
self._basis_dict[str(block)]=b
if compute_all_blocks:
self.compute_all_blocks()
def compute_all_blocks(self):
"""Sets `compute_all_blocks = True`.
Examples
--------
The example below builds on the code snippet shown in the description of the `block_ops` class.
.. literalinclude:: ../../doc_examples/block_ops-example.py
:linenos:
:language: python
:lines: 57-58
"""
from ..operators import hamiltonian
for key,b in _iteritems(self._basis_dict):
if self._P_dict.get(key) is None:
p = b.get_proj(self.dtype,**self._get_proj_kwargs)
self._P_dict[key] = p
if self._H_dict.get(key) is None:
if not self._checked:
H = hamiltonian(self._static,self._dynamic,basis=b,dtype=self.dtype,**self._checks)
self._checked=True
else:
H = hamiltonian(self._static,self._dynamic,basis=b,dtype=self.dtype,**self._no_checks)
self._H_dict[key] = H
def _get_P(self,key):
if self._P_dict.get(key) is None:
p = self._basis_dict[key].get_proj(self.dtype,**self._get_proj_kwargs)
if self._save:
self._P_dict[key] = p
return p
else:
return self._P_dict[key]
def _get_H(self,key):
from ..operators import hamiltonian
if self._H_dict.get(key) is None:
if not self._checked:
H = hamiltonian(self._static,self._dynamic,basis=self._basis_dict[key],dtype=self.dtype,**self._checks)
self._checked=True
else:
H = hamiltonian(self._static,self._dynamic,basis=self._basis_dict[key],dtype=self.dtype,**self._no_checks)
if self._save:
self._H_dict[key] = H
return H
else:
return self._H_dict[key]
def evolve(self,psi_0,t0,times,iterate=False,n_jobs=1,block_diag=False,stack_state=False,imag_time=False,solver_name="dop853",**solver_args):
"""Creates symmetry blocks of the Hamiltonian and then uses them to run `hamiltonian.evolve()` in parallel.
**Arguments NOT described below can be found in the documentation for the `hamiltonian.evolve()` method.**
Examples
--------
The example below builds on the code snippet shown in the description of the `block_ops` class.
.. literalinclude:: ../../doc_examples/block_ops-example.py
:linenos:
:language: python
:lines: 69-
Parameters
-----------
psi_0 : numpy.ndarray, list, tuple
Quantum state which defined on the full Hilbert space of the problem.
Does not need to obey and sort of symmetry.
t0 : float
Inistial time to start the evolution at.
times : numpy.ndarray, list
Contains the times to compute the solution at. Must be some an iterable object.
iterate : bool, optional
Flag to return generator when set to `True`. Otherwise the output is an array of states.
Default is 'False'.
n_jobs : int, optional
Number of processes requested for the computation time evolution dynamics.
NOTE: one of those processes is used to gather results. For best performance, all blocks
should be approximately the same size and `n_jobs-1` must be a common devisor of the number of
blocks, such that there is roughly an equal workload for each process. Otherwise the computation
will be as slow as the slowest process.
block_diag : bool, optional
When set to `True`, this flag puts the Hamiltonian matrices for the separate symemtry blocks
into a list and then loops over it to do time evolution. When set to `False`, it puts all
blocks in a single giant sparse block diagonal matrix. Default is `False`.
This flag is useful if there are a lot of smaller-sized blocks.
Returns
--------
obj
if `iterate = True`, returns generator which generates the time dependent state in the
full H-space basis.
if `iterate = False`, returns `numpy.ndarray` which has the time-dependent states in the
full H-space basis in the rows.
Raises
------
ValueError
Variable `imag_time=True` option on `hamiltonian.evolve()` method not supported.
ValueError
`iterate=True` requires `times` to be an array or a list.
RuntimeError
Terminates when initial state has no projection onto the specified symmetry blocks.
"""
if imag_time:
raise ValueError("imaginary time not supported for block evolution.")
P = []
H_list = []
psi_blocks = []
for key,b in _iteritems(self._basis_dict):
p = self._get_P(key)
if _sp.issparse(psi_0):
psi = p.H.dot(psi_0).toarray()
else:
psi = p.H.dot(psi_0)
psi = _np.asarray(psi).ravel()
if _np.linalg.norm(psi) > 1000*_np.finfo(self.dtype).eps:
psi_blocks.append(psi)
P.append(p.tocoo())
H_list.append(self._get_H(key))
if block_diag and H_list:
N_H = len(H_list)
n_pp = N_H//n_jobs
n_left = n_pp + N_H%n_jobs
H_list_prime = []
psi_blocks_prime = []
if n_left != 0:
H_list_prime.append(block_diag_hamiltonian(H_list[:n_left],None,None,None,None,self._dtype,get_proj=False,**self._no_checks))
psi_list_prime.append(_np.hstack(psi_blocks[:n_left]))
for i in range(n_jobs-1):
i1 = n_left + i*n_pp
i2 = n_left + (i+1)*n_pp
H_list_prime.append(block_diag_hamiltonian(H_list[i1:i2],None,None,None,None,self._dtype,get_proj=False,**self._no_checks))
psi_list_prime.append(_np.hstack(psi_blocks[i1:i2]))
H_list = H_list_prime
psi_blocks = psi_blocks_prime
if len(H_list) > 0:
P = _sp.hstack(P,format="csr")
if iterate:
if _np.isscalar(times):
raise ValueError("If iterate=True times must be a list/array.")
return _block_evolve_iter(psi_blocks,H_list,P,t0,times,stack_state,imag_time,solver_name,solver_args,n_jobs)
else:
psi_t = _Parallel(n_jobs = n_jobs)(_delayed(_block_evolve_helper)(H,psi,t0,times,stack_state,imag_time,solver_name,solver_args) for psi,H in _izip(psi_blocks,H_list))
psi_t = _np.vstack(psi_t)
psi_t = P.dot(psi_t)
return psi_t
else:
raise RuntimeError("initial state has no projection on to specified blocks.")
def expm(self,psi_0,H_time_eval=0.0,iterate=False,n_jobs=1,block_diag=False,a=-1j,start=None,stop=None,endpoint=None,num=None,shift=None):
"""Creates symmetry blocks of the Hamiltonian and then uses them to run `_expm_multiply()` in parallel.
**Arguments NOT described below can be found in the documentation for the `exp_op` class.**
Examples
--------
The example below builds on the code snippet shown in the description of the `block_ops` class.
.. literalinclude:: ../../doc_examples/block_ops-example.py
:linenos:
:language: python
:lines: 60-67
Parameters
-----------
psi_0 : numpy.ndarray, list, tuple
Quantum state which defined on the full Hilbert space of the problem.
Does not need to obey and sort of symmetry.
t0 : float
Inistial time to start the evolution at.
H_time_eval : numpy.ndarray, list
Times to evaluate the Hamiltonians at when doing the matrix exponentiation.
iterate : bool, optional
Flag to return generator when set to `True`. Otherwise the output is an array of states.
Default is 'False'.
n_jobs : int, optional
Number of processes requested for the computation time evolution dynamics.
NOTE: one of those processes is used to gather results. For best performance, all blocks
should be approximately the same size and `n_jobs-1` must be a common devisor of the number of
blocks, such that there is roughly an equal workload for each process. Otherwise the computation
will be as slow as the slowest process.
block_diag : bool, optional
When set to `True`, this flag puts the Hamiltonian matrices for the separate symemtri blocks
into a list and then loops over it to do time evolution. When set to `False`, it puts all
blocks in a single giant sparse block diagonal matrix. Default is `False`.
This flag is useful if there are a lot of smaller-sized blocks.
Returns
--------
obj
if `iterate = True`, returns generator which generates the time dependent state in the
full H-space basis.
if `iterate = False`, returns `numpy.ndarray` which has the time-dependent states in the
full H-space basis in the rows.
Raises
------
ValueError
Various `ValueError`s of `exp_op` class.
RuntimeError
Terminates when initial state has no projection onto the specified symmetry blocks.
"""
from ..operators import hamiltonian
if iterate:
if start is None and stop is None:
raise ValueError("'iterate' can only be True with time discretization. must specify 'start' and 'stop' points.")
if num is not None:
if type(num) is not int:
raise ValueError("expecting integer for 'num'.")
else:
num = 50
if endpoint is not None:
if type(endpoint) is not bool:
raise ValueError("expecting bool for 'endpoint'.")
else:
endpoint = True
else:
if start is None and stop is None:
if num != None:
raise ValueError("unexpected argument 'num'.")
if endpoint != None:
raise ValueError("unexpected argument 'endpoint'.")
else:
if not (_np.isscalar(start) and _np.isscalar(stop)):
raise ValueError("expecting scalar values for 'start' and 'stop'")
if not (_np.isreal(start) and _np.isreal(stop)):
raise ValueError("expecting real values for 'start' and 'stop'")
if num is not None:
if type(num) is not int:
raise ValueError("expecting integer for 'num'.")
else:
num = 50
if endpoint is not None:
if type(endpoint) is not bool:
raise ValueError("expecting bool for 'endpoint'.")
else:
endpoint = True
P = []
H_list = []
psi_blocks = []
for key,b in _iteritems(self._basis_dict):
p = self._get_P(key)
if _sp.issparse(psi_0):
psi = p.H.dot(psi_0).toarray()
else:
psi = p.H.dot(psi_0)
psi = psi.ravel()
if _np.linalg.norm(psi) > 1000*_np.finfo(self.dtype).eps:
psi_blocks.append(psi)
P.append(p.tocoo())
H = self._get_H(key)
H = H(H_time_eval)*a
if shift is not None:
H += a*shift*_sp.identity(b.Ns,dtype=self.dtype)
H_list.append(H)
if block_diag and H_list:
N_H = len(H_list)
n_pp = N_H//n_jobs
n_left = n_pp + N_H%n_jobs
H_list_prime = []
psi_blocks_prime = []
psi_block = _np.hstack(psi_blocks[:n_left])
H_block = _sp.block_diag(H_list[:n_left],format="csr")
H_list_prime.append(H_block)
psi_blocks_prime.append(psi_block)
for i in range(n_jobs-1):
i1 = n_left + i*n_pp
i2 = n_left + (i+1)*n_pp
psi_block = _np.hstack(psi_blocks[i1:i2])
H_block = _sp.block_diag(H_list[i1:i2],format="csr")
H_list_prime.append(H_block)
psi_blocks_prime.append(psi_block)
H_list = H_list_prime
psi_blocks = psi_blocks_prime
H_is_complex = _np.iscomplexobj([_np.float32(1.0).astype(H.dtype) for H in H_list])
if H_list:
P = _sp.hstack(P,format="csr")
if iterate:
return _block_expm_iter(psi_blocks,H_list,P,start,stop,num,endpoint,n_jobs)
else:
ver = [int(v) for v in _scipy.__version__.split(".")]
if H_is_complex and (start,stop,num,endpoint) != (None,None,None,None) and ver[1] < 19:
mats = _block_expm_iter(psi_blocks,H_list,P,start,stop,num,endpoint,n_jobs)
return _np.array([mat for mat in mats]).T
else:
psi_t = _Parallel(n_jobs = n_jobs)(_delayed(_expm_multiply)(H,psi,start=start,stop=stop,num=num,endpoint=endpoint) for psi,H in _izip(psi_blocks,H_list))
psi_t = _np.hstack(psi_t).T
psi_t = P.dot(psi_t)
return psi_t
else:
raise RuntimeError("initial state has no projection on to specified blocks.")
'''
# TO DO
=======
class block_diag_ensemble(object):
def __init__(self,blocks,static,dynamic,basis_con,basis_args,dtype,get_proj_kwargs={},save_previous_data=True,compute_all_blocks=False,check_symm=True,check_herm=True,check_pcon=True):
"""
This class is used to split the dynamics of a state up over various symmetry sectors if the initial state does
not obey the symmetry but the hamiltonian does. Moreover we provide a multiprocessing option which allows the
user to split the dynamics up over multiple cores.
---arguments---
* blocks: (required) list/tuple/iterator which contains the blocks the user would like to put into the hamiltonian as dictionaries.
* static: (required) the static operator list which is used to construct the block hamiltonians. follows hamiltonian format.
* dynamic: (required) the dynamic operator list which is used to construct the block hamiltonians. follows hamiltonian format.
* basis_con: (required) the basis constructor used to construct the basis objects which will create the block diagonal hamiltonians.
* basis_args: (required) tuple which gets passed as the first argument for basis_con, contains required arguments.
* check_symm: (optional) flag to check symmetry
* dtype: (required) the data type to construct the hamiltonian with.
* save_previous_data: (optional) when doing the evolution this class has to construct the hamiltonians. this takes
some time and so by setting this to true, the class will keep | |
from sys import exit
import pygame
import MathStuff as MATH
import GUICreator
StaticButton = GUICreator.StaticButton
DynamicButton = GUICreator.DynamicButton
StaticTextButton = GUICreator.StaticTextButton
DynamicTextButton = GUICreator.DynamicTextButton
class MathStuffGUI:
"""Create a GUI for my calculators."""
def __init__(self):
"""Initialize variables."""
pygame.init()
self.manage_fps = pygame.time.Clock()
self.FPS = 30
# Initialize the screen.
display_info = pygame.display.Info()
max_width = display_info.current_w
max_height = display_info.current_h
display_ratio = max_width/max_height
if display_ratio <= 16/9:
x = int(max_width * 0.8)
y = int(max_width/(16/9) * 0.8)
else:
x = int(max_height*(16/9) * 0.8)
y = int(max_height * 0.8)
self.screen = pygame.display.set_mode((x, y), pygame.RESIZABLE)
pygame.display.set_caption("Math Stuff")
self.screen_rect = self.screen.get_rect()
self.create_all_gui()
self.universal_menu_variables()
def run_program(self):
"""Run the program."""
while True:
self.manage_fps.tick(self.FPS)
self.get_events()
self.update_screen()
def universal_menu_variables(self):
"""Create flags for the menus."""
self.active_menu = 'main_menu'
self.active_button = ''
self.input_box_1 = ''
self.input_box_2 = ''
self.input_box_3 = ''
self.input_box_4 = ''
self.input_box_5 = ''
self.input_box_6 = ''
self.input_box_7 = ''
self.input_box_1_mod = ''
self.input_box_2_mod = ''
self.input_box_3_mod = ''
self.input_box_4_mod = ''
self.input_box_5_mod = ''
self.input_box_6_mod = ''
self.input_box_7_mod = ''
self.input_value_1 = 0
self.input_value_2 = 0
self.input_value_3 = 0
self.input_value_4 = 0
self.input_value_5 = 0
self.input_value_6 = 0
self.input_value_7 = 0
self.answer = ''
self.answer_text_x_mod = 0
self.hover_text = ''
self.input_buttons = 0
def reset_variables(self, menu):
"""Reset the variables and change the active menu."""
self.active_button = ''
self.active_menu = menu
self.answer = ''
self.input_box_1 = ''
self.input_box_2 = ''
self.input_box_3 = ''
self.input_box_4 = ''
self.input_box_5 = ''
self.input_box_6 = ''
self.input_box_7 = ''
self.input_box_1_mod = ''
self.input_box_2_mod = ''
self.input_box_3_mod = ''
self.input_box_4_mod = ''
self.input_box_5_mod = ''
self.input_box_6_mod = ''
self.input_box_7_mod = ''
self.input_value_1 = 0
self.input_value_2 = 0
self.input_value_3 = 0
self.input_value_4 = 0
self.input_value_5 = 0
self.input_value_6 = 0
self.input_value_7 = 0
self.answer_text_x_mod = 0
def event_check_number(self, event):
"""Check that the key that was pressed is valid for a number."""
if (event.unicode == '1'
or event.unicode == '2'
or event.unicode == '3'
or event.unicode == '4'
or event.unicode == '5'
or event.unicode == '6'
or event.unicode == '7'
or event.unicode == '8'
or event.unicode == '9'
or event.unicode == '0'
or event.unicode == '.'
or event.unicode == '-'):
return True
else:
return False
def event_check_letter(self, event):
"""Check that the key that was pressed is a letter."""
if (event.unicode == 'a'
or event.unicode == 'b'
or event.unicode == 'c'
or event.unicode == 'd'
or event.unicode == 'e'
or event.unicode == 'f'
or event.unicode == 'g'
or event.unicode == 'h'
or event.unicode == 'i'
or event.unicode == 'j'
or event.unicode == 'k'
or event.unicode == 'l'
or event.unicode == 'm'
or event.unicode == 'n'
or event.unicode == 'o'
or event.unicode == 'p'
or event.unicode == 'q'
or event.unicode == 'r'
or event.unicode == 's'
or event.unicode == 't'
or event.unicode == 'u'
or event.unicode == 'v'
or event.unicode == 'w'
or event.unicode == 'x'
or event.unicode == 'y'
or event.unicode == 'z'
or event.unicode == 'A'
or event.unicode == 'B'
or event.unicode == 'C'
or event.unicode == 'D'
or event.unicode == 'E'
or event.unicode == 'F'
or event.unicode == 'G'
or event.unicode == 'H'
or event.unicode == 'I'
or event.unicode == 'J'
or event.unicode == 'K'
or event.unicode == 'L'
or event.unicode == 'M'
or event.unicode == 'N'
or event.unicode == 'O'
or event.unicode == 'P'
or event.unicode == 'Q'
or event.unicode == 'R'
or event.unicode == 'S'
or event.unicode == 'T'
or event.unicode == 'U'
or event.unicode == 'V'
or event.unicode == 'W'
or event.unicode == 'X'
or event.unicode == 'Y'
or event.unicode == 'Z'):
return True
else:
return False
# Event handling.
def get_events(self):
"""Get pygame events."""
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit()
elif event.type == pygame.VIDEORESIZE:
self.screen_rect = self.screen.get_rect()
self.create_all_gui()
elif event.type == pygame.KEYDOWN:
self.manage_keydown(event)
elif event.type == pygame.KEYUP:
self.manage_keyup(event)
elif event.type == pygame.MOUSEBUTTONDOWN:
self.manage_mousebuttondown(event)
elif event.type == pygame.MOUSEBUTTONUP:
self.manage_mousebuttonup(event)
elif event.type == pygame.MOUSEMOTION:
self.manage_mousemotion(event)
def manage_mousemotion(self, event):
mouse_pos = pygame.mouse.get_pos()
if (self.active_menu != 'main_menu'
and self.active_menu != 'factors_menu'
and self.active_menu != 'converters_menu'
and self.active_menu != 'geometry_menu'
and self.active_menu != 'algebra_menu'
and self.active_menu != 'data_processing_menu'):
if self.answer_button.button.collidepoint(mouse_pos):
self.hover_text = 'Scroll to move the text left and right.'
elif self.calculate_button.button.collidepoint(mouse_pos):
self.hover_text = 'Click to calculate answer, or press the enter key.'
elif self.back_button.button.collidepoint(mouse_pos):
self.hover_text = 'Return to previous menu.'
elif self.hover_text_help.button.collidepoint(mouse_pos):
self.hover_text = 'Do I need to explain everything?'
else:
self.hover_text = 'Hover cursor over a button for help.'
# Get factors menu.
if self.active_menu == 'get_factors_menu':
if self.input_button_4.button.collidepoint(mouse_pos):
self.hover_text = 'Enter the number you want to factor.'
# Maintain ratio menu.
elif self.active_menu == 'mar_menu':
if self.input_button_1.button.collidepoint(mouse_pos):
self.hover_text = 'Width of source surface.'
elif self.input_button_3.button.collidepoint(mouse_pos):
self.hover_text = 'Height of source surface.'
elif self.input_button_5.button.collidepoint(mouse_pos):
self.hover_text = 'Width of destination surface.'
elif self.input_button_7.button.collidepoint(mouse_pos):
self.hover_text = 'Height of destination surface.'
# Solid polygon menu.
elif self.active_menu == 'solid_polygon_menu':
if self.input_button_4.button.collidepoint(mouse_pos):
self.hover_text = 'Enter the number of sides of a base.'
# Temperature convert menu.
elif self.active_menu == 'temp_menu':
if self.input_button_2.button.collidepoint(mouse_pos):
self.hover_text = 'Original temperature.'
elif self.input_button_4.button.collidepoint(mouse_pos):
self.hover_text = 'Current temperature scale (F, C, K).'
elif self.input_button_6.button.collidepoint(mouse_pos):
self.hover_text = 'Temperature scale to convert to (F, C, K).'
# Root menu.
elif self.active_menu == 'root_menu':
if self.input_button_3.button.collidepoint(mouse_pos):
self.hover_text = 'Radicand.'
elif self.input_button_5.button.collidepoint(mouse_pos):
self.hover_text = 'Index.'
# Extrapolate menu.
elif self.active_menu == 'extrapolate_menu':
if self.input_button_1.button.collidepoint(mouse_pos):
self.hover_text = 'Start number.'
if self.input_button_3.button.collidepoint(mouse_pos):
self.hover_text = 'Modifier.'
if self.input_button_5.button.collidepoint(mouse_pos):
self.hover_text = 'Operation (Add, subtract, multiply, divide, exponent, root).'
if self.input_button_7.button.collidepoint(mouse_pos):
self.hover_text = 'Number of values from the start number to return.'
# Other menus.
elif (self.active_menu == 'common_factors_menu'
or self.active_menu == 'gcf_menu'
or self.active_menu == 'lcf_menu'
or self.active_menu == 'average_menu'
or self.active_menu == 'median_menu'
or self.active_menu == 'mode_menu'
or self.active_menu == 'range_menu'):
if self.input_button_1.button.collidepoint(mouse_pos):
self.hover_text = "First number (You don't need to use all four)."
if self.input_button_3.button.collidepoint(mouse_pos):
self.hover_text = "Second number (You don't need to use all four)."
if self.input_button_5.button.collidepoint(mouse_pos):
self.hover_text = "Third number (You don't need to use all four)."
if self.input_button_7.button.collidepoint(mouse_pos):
self.hover_text = "Fourth number (You don't need to use all four)."
def manage_keydown(self, event):
"""Manage pygame KEYDOWN events."""
if event.key == pygame.K_ESCAPE:
exit()
# Get factors input menu.
elif self.active_menu == 'get_factors_input_1':
if event.key == pygame.K_RETURN:
if self.input_box_4 != '':
self.input_value_4 = int(self.input_box_4)
self.answer = str(MATH.get_factors_(self.input_value_4))
self.answer_text_x_mod = 0
self.active_button = ''
self.active_menu = 'get_factors_menu'
elif event.key == pygame.K_BACKSPACE:
self.input_box_4 = self.input_box_4[:-1]
elif len(self.input_box_4) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_4 += event.unicode
# Common factors input menu.
elif (self.active_menu == 'common_factors_input_1'
or self.active_menu == 'common_factors_input_2'
or self.active_menu == 'common_factors_input_3'
or self.active_menu == 'common_factors_input_4'):
if event.key == pygame.K_RETURN:
value_list = []
if self.input_box_1 != '':
self.input_value_1 = int(self.input_box_1)
value_list.append(self.input_value_1)
if self.input_box_3 != '':
self.input_value_3 = int(self.input_box_3)
value_list.append(self.input_value_3)
if self.input_box_5 != '':
self.input_value_5 = int(self.input_box_5)
value_list.append(self.input_value_5)
if self.input_box_7 != '':
self.input_value_7 = int(self.input_box_7)
value_list.append(self.input_value_7)
if value_list != []:
self.answer = str(MATH.common_factors_(value_list))
self.answer_text_x_mod = 0
self.active_menu = 'common_factors_menu'
self.active_button = ''
if self.active_menu == 'common_factors_input_1':
if event.key == pygame.K_BACKSPACE:
self.input_box_1 = self.input_box_1[:-1]
elif len(self.input_box_1) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_1 += event.unicode
elif self.active_menu == 'common_factors_input_2':
if event.key == pygame.K_BACKSPACE:
self.input_box_3 = self.input_box_3[:-1]
elif len(self.input_box_3) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_3 += event.unicode
elif self.active_menu == 'common_factors_input_3':
if event.key == pygame.K_BACKSPACE:
self.input_box_5 = self.input_box_5[:-1]
elif len(self.input_box_5) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_5 += event.unicode
elif self.active_menu == 'common_factors_input_4':
if event.key == pygame.K_BACKSPACE:
self.input_box_7 = self.input_box_7[:-1]
elif len(self.input_box_7) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_7 += event.unicode
# Greatest common factors input menu.
elif (self.active_menu == 'gcf_input_1'
or self.active_menu == 'gcf_input_2'
or self.active_menu == 'gcf_input_3'
or self.active_menu == 'gcf_input_4'):
if event.key == pygame.K_RETURN:
value_list = []
if self.input_box_1 != '':
self.input_value_1 = int(self.input_box_1)
value_list.append(self.input_value_1)
if self.input_box_3 != '':
self.input_value_3 = int(self.input_box_3)
value_list.append(self.input_value_3)
if self.input_box_5 != '':
self.input_value_5 = int(self.input_box_5)
| |
from PIL import Image
from PIL import ImageChops
import datetime
import screenshooter.saves
class Differ:
"""
Differ is a service that allows the diffing of images.
"""
def __init__(self, user_config):
if user_config is None:
raise UnboundLocalError("A configuration file has not been referenced, please provide one.")
self.config = user_config
self.imgs = dict()
self.diff = None
self.original_img = None
self.modified_img = None
self.archive_time = None
self.img_type = self.config.picture_type
def equals(self, first_img, second_img):
"""
Identifies if first_img and second_img are identical to each other.
Args:
first_img: a PIL image object of the original image
second_img: a PIL image object of the modified image
Returns:
A boolean stating whether or not the two images are identical, True means they are
identical.
"""
if first_img is None or second_img is None:
return False
self.original_img = first_img
self.modified_img = second_img
self.diff = ImageChops.difference(first_img, second_img)
if self.diff.getbbox() != None:
return False
else:
self.diff = None
return True
def archive_imgs(self, img_loc):
"""
Archive the image given by img_loc and it's corresponding diff and change images.
Args:
img_loc: a dictionary representing the location of the image within the multi-dimensional
dictionary i.e. {'View': 'SomeView', 'Date': 'SomeDate', 'Function': 'SomeFunction'}
"""
view = img_loc['View']
date = img_loc['Date']
function = img_loc['Function']
if self.archive_time is None:
self.archive_time = datetime.datetime.now().isoformat()
if self.archive_time not in self.imgs[view]:
self.imgs[view][self.archive_time] = dict()
self.imgs[view][self.archive_time]["new" + function] = self.imgs[view][date][function]
img_name = function.partition('.')
if img_name[0] + "Diff" + self.img_type in self.imgs[view][date]:
self.imgs[view][self.archive_time]["new" + img_name[0] + "Diff.png"] = self.imgs[view][date][img_name[0] + "Diff" + self.img_type]
if img_name[0] + "Change" + self.img_type in self.imgs[view][date]:
self.imgs[view][self.archive_time]["new" + img_name[0] + "Change.png"] = self.imgs[view][date][img_name[0] + "Change" + self.img_type]
def store_screenshot(self, img_loc):
"""
Moves the screenshot from it's temporary location in the multi-dimensional dictionary to a portion
where it will actually be saved.
Args:
img_loc: a dictionary representing the location of the image within the multi-dimensional
dictionary i.e. {'View': 'SomeView', 'Date': 'SomeDate', 'Function': 'SomeFunction'}
Returns:
A boolean stating whether or not it was successful, True means successful.
"""
if img_loc is None:
return False
view = img_loc['View']
date = img_loc['Date']
function = img_loc['Function']
try:
if view not in self.imgs:
self.imgs[view] = dict()
if date not in self.imgs[view]:
self.imgs[view][date] = dict()
if function in self.imgs[view][date]:
self.archive_imgs(img_loc)
self.imgs[view][date]["new" + function] = self.imgs['tmp'][view][date][function]
except KeyError:
del self.imgs[view][date]["new" + function]
return False
return True
def store_diff(self, img_loc, diff_img):
"""
Adds the diff image to the multi-dimensional dictionary so it can be saved.
Args:
img_loc: a dictionary representing the location of the image that was used to create the diff image within
the multi-dimensional dictionary i.e. {'View': 'SomeView', 'Date': 'SomeDate', 'Function': 'SomeFunction'}
diff_img: a PIL image object of the diff image
Returns:
A boolean stating whether or not it was successful, True means successful.
"""
if diff_img is None:
return False
view = img_loc['View']
date = img_loc['Date']
img_name = img_loc['Function'].partition('.')
diff_name = "new" + img_name[0] + "Diff" + self.img_type
self.imgs[view][date][diff_name] = diff_img
return True
def store_change(self, img_loc, change_img):
"""
Adds the change image to the multi-dimensional dictionary so it can be saved.
Args:
img_loc: a dictionary representing the location of the image that was used to create the change image within
the multi-dimensional dictionary i.e. {'View': 'SomeView', 'Date': 'SomeDate', 'Function': 'SomeFunction'}
change_img: a PIL image object of the diff image
Returns:
A boolean stating whether or not it was successful, True means successful.
"""
if change_img is None:
return False
view = img_loc['View']
date = img_loc['Date']
img_name = img_loc['Function'].partition('.')
change_name = "new" + img_name[0] + "Change" + self.img_type
self.imgs[view][date][change_name] = change_img
return True
def store(self, img_loc = None, diff_img = None, change_img = None):
"""
Adds the screenshot and it's corresponding diff and change images to the multi-dimensional dictionary
so they can be saved.
Args:
img_loc: a dictionary representing the location of the screenshot within the
multi-dimensional dictionary i.e. {'View': 'SomeView', 'Date': 'SomeDate', 'Function': 'SomeFunction'}
diff_img: a PIL image object of the diff image
change_img: a PIL image object of the change image
Returns:
A boolean stating whether or not it was successful, True means successful.
"""
if img_loc is None and diff_img is None and change_img is None:
return False
if self.store_screenshot(img_loc):
self.store_diff(img_loc, diff_img)
self.store_change(img_loc, change_img)
else:
return False
return True
def locate_img_for_diff(self, loc, service_name = self.config.service):
"""
Locates the image to diff against.
Args:
loc: a dictionary representing the location of the modified image within the
multi-dimensional dictionary i.e. {'View': 'SomeView', 'Date': 'SomeDate', 'Function': 'SomeFunction'}
service_name: the name of the service to grab the image from (Defaults to the service in the config file)
Returns:
A dictionary representing the location of the original image to diff against in the
multi-dimensional dictionary
"""
if service_name.upper() == "S3":
service = screenshooter.saves.s3_service()
elif service_name.upper() == "FILESYSTEM":
service = screenshooter.saves.fs_service()
return service.collect_img(self.imgs, loc)
def get_img(self, loc, tmp = False):
"""
Retrieves the PIL image object from the multi-dimensional dictionary.
Args:
loc: a dictionary representing the location of the image within the
multi-dimensional dictionary i.e. {'View': 'SomeView', 'Date': 'SomeDate', 'Function': 'SomeFunction'}
tmp: a boolean stating whether or not to grab from the tmp dictionary, True means grab from the tmp
dictionary (Defaults to False)
Returns:
A PIL image object
"""
view = loc['View']
date = loc['Date']
function = loc['Function']
try:
if tmp:
return self.imgs['tmp'][view][date][function]
return self.imgs[view][date][function]
except KeyError:
return None
def sanitize_for_diff(self, original_loc, modified_loc):
"""
Checks to make sure all the information is acceptable before performing the diff.
Args:
original_loc: a dictionary representing the location of the original image within the
multi-dimensional dictionary i.e. {'View': 'SomeView', 'Date': 'SomeDate', 'Function': 'SomeFunction'}
modified_loc: a dictionary representing the location of the modified image within the
multi-dimensional dictionary i.e. {'View': 'SomeView', 'Date': 'SomeDate', 'Function': 'SomeFunction'}
Returns:
The pixel difference between the two images.
Raises:
UnboundLocalError: The class level images are null and the parameters do not provide locations to open them.
"""
if (original_loc is None and modified_loc is None) and (self.original_img is None or self.modified_img is None) and self.diff is None:
raise UnboundLocalError("The class level images are null and the parameters" +
" do not provide locations to open them")
if original_loc is not None and modified_loc is not None:
try:
self.original_img = self.get_img(original_loc)
self.modified_img = self.get_img(modified_loc, True)
self.diff = ImageChops.difference(self.original_img, self.modified_img)
if self.diff.getbbox() is None:
return None
return self.diff
except (IOError, KeyError, TypeError):
return None
elif self.diff is not None:
return self.diff
else:
return None
def get_diff(self, original_loc = None, modified_loc = None):
"""
Gets the difference between two images and applies a highlight over them if specified.
Args:
original_loc: a dictionary representing the location of the original image within the
multi-dimensional dictionary i.e. {'View': 'SomeView', 'Date': 'SomeDate', 'Function': 'SomeFunction'}
(Defaults to None)
modified_loc: a dictionary representing the location of the modified image within the
multi-dimensional dictionary i.e. {'View': 'SomeView', 'Date': 'SomeDate', 'Function': 'SomeFunction'}
(Defaults to None)
Returns:
A PIL image object of the diff image.
"""
dif = self.sanitize_for_diff(original_loc, modified_loc)
if dif is None:
return None
color = self.config.highlight_color
dif = ImageChops.invert(dif)
if not color:
return Image.blend(dif, self.original_img, 0.2)
width = dif.size[0]
height = dif.size[1]
pixel = dif.load()
for x in range(width):
for y in range(height):
if pixel[x, y] == (255, 255, 255, 255):
continue
pixel[x, y] = color
return Image.blend(dif, self.original_img, 0.2)
def subtract_pixels(self, first, second):
"""
Subtract two pixels.
Args:
first: An RGBA pixel value
second: An RGBA pixel value
Returns:
A tuple containing the result of the absolute value of the subtraction of the two pixels.
"""
return tuple([abs(first[0] - second[0]), abs(first[1] - second[1]), abs(first[2] - second[2]), abs(first[3] - second[3])])
def get_change(self, original_loc = None, modified_loc = None):
"""
Gets the changed difference between two images and applies a highlight over them if specified.
Args:
original_loc: a dictionary representing the location of the original image within the
multi-dimensional dictionary i.e. {'View': 'SomeView', 'Date': 'SomeDate', 'Function': 'SomeFunction'}
(Defaults to None)
modified_loc: a dictionary representing the location of the modified image within the
multi-dimensional dictionary i.e. {'View': 'SomeView', 'Date': 'SomeDate', 'Function': 'SomeFunction'}
(Defaults | |
# -*- coding: utf-8 -*-
"""Copie de DLII_project.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1c4gMlzfMedMPnITOScXl3ihKCval8Rtk
"""
colab = True
if colab:
from google_drive_downloader import GoogleDriveDownloader as gdd
gdd.download_file_from_google_drive(file_id='1nnsLbtFQ944iU-bw06I34kuQS7n-VBdL', dest_path='./data.zip')
from zipfile import ZipFile
with ZipFile('data.zip', 'r') as zipObj:
# Extract all the contents of zip file in current directory
zipObj.extractall()
# Unzip mnist data
!gzip -d ./data/t10k-images-idx3-ubyte.gz ./data/t10k-labels-idx1-ubyte.gz ./data/train-labels-idx1-ubyte.gz ./data/train-images-idx3-ubyte.gz
# Install idx2numpy for reading MNIST
!pip install idx2numpy
"""<h1 align='center'>Projet Deep Learning II</h1>
<h4 align='center'>Authors : <NAME> & <NAME></h4>
## Table of Contents
1. [Introduction](#intro)
2. [Données](#donnees)
3. [Fonctions élémentaires](#fctelementaire)
+ [Construction d'un RBM et test sur Binary AlphaDigits](#constructionRBM)
+ [Construction d’un DBN et test sur Binary AlphaDigits](#constructionDBN)
+ [Construction d’un DNN et test sur MNIST](#constructionDNN)
4. [Travail préliminaire (Binary AlphaDigit)](#travailprelim)
5. [Etude à réaliser (MNIST)](#mnist)
"""
# Commented out IPython magic to ensure Python compatibility.
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import random
import copy
import scipy.io
import scipy
from scipy.special import softmax
import idx2numpy
"""# <a id='intro'>1. Introduction</a>
L’objectif du projet est de réaliser un réseau de neurones profond pré-entraîné ou non pour la classification de chiffres manuscrits. On va comparer les performances, en terme de taux de bonnes classifications, d’un réseau pré-entrainé et d’un réseau initialisé aléatoirement, en fonction du nombre de données d’apprentissage, du nombre de couches du réseau et enfin du nombre de neurones par couches.
# <a id='donnees'>2. Données</a>
**To download :**
+ http://yann.lecun.com/exdb/mnist/ (les 4 fichiers)
+ https://cs.nyu.edu/~roweis/data.html (Binary Alphadigits)
"""
path_to_data = './data/'
# Function for Binary Alphadigits dataset
alphadigs = scipy.io.loadmat(path_to_data + 'binaryalphadigs.mat')
def lire_alpha_digit(index_carac):
"""
Get alpha digit data for given character.
Parameters
----------
index_carac : list
The characters that we want to learn.
Returns
-------
data : ndarray
Matrix containing the data with rows corresponding to samples and columns to pixels.
"""
nb_pixel = alphadigs['dat'][0,0].shape[0] * alphadigs['dat'][0,0].shape[1]
data = np.empty(shape=(1, nb_pixel))
for i in index_carac:
data = np.append(data, np.array([img.flatten() for img in alphadigs['dat'][i,:]]), axis=0)
return data[1:,:]
alphadigs_data = lire_alpha_digit([10])
alphadigs_data.shape
# Function for MNIST dataset
def to_black_white(images):
images[images < 128] = 0
images[images >= 128] = 1
return images
def lire_MNIST():
"""
Get alpha digit data for given character.
Parameters
----------
Returns
-------
data : ndarray
Matrix containing the data with rows corresponding to samples and columns to pixels.
"""
train_image_file = 'data/train-images-idx3-ubyte'
train_label_file = 'data/train-labels-idx1-ubyte'
test_image_file = 'data/t10k-images-idx3-ubyte'
test_label_file = 'data/t10k-labels-idx1-ubyte'
train_image = idx2numpy.convert_from_file(train_image_file)
train_image = to_black_white(np.array([img.flatten() for img in train_image]))
test_image = idx2numpy.convert_from_file(test_image_file)
test_image = to_black_white(np.array([img.flatten() for img in test_image]))
train_label = idx2numpy.convert_from_file(train_label_file)
test_label = idx2numpy.convert_from_file(test_label_file)
return train_image, train_label, test_image, test_label
train_image, train_label, test_image, test_label = lire_MNIST()
nb_labels = np.unique(train_label).shape[0]
print(train_image.shape)
print(train_label.shape)
print(test_image.shape)
print(test_label.shape)
plt.imshow(np.reshape(train_image[1], newshape=(28,28)), cmap=plt.cm.binary)
"""# <a id='fctelementaire'>3. Fonctions élémentaires</a>
## <a id='constructionRBM'>3.1 Construction d'un RBM et test sur Binary AlphaDigits</a>
"""
def sigmoid(x):
"""
Sigmoid function.
Parameters
----------
x : float
Returns
-------
result : float
"""
return 1 / (1 + np.exp(-x))
def init_RBM(p, q):
"""
Create a RBM structure with weights and biases initialized.
Parameters
----------
p : int
Number of visible units.
q : int
Number of hidden units.
Returns
-------
RBM : dict
"""
RBM = {
"W" : np.random.randn(p, q) * 0.1,
"a" : np.zeros(shape=(1, p)),
"b" : np.zeros(shape=(1, q)),
"p" : p,
"q" : q
}
return RBM
def entree_sortie_RBM(RBM, input_data):
"""
Compute the values of the hidden layer for given input data by the RBM.
Parameters
----------
RBM : dict
A RBM structure.
input_data : ndarray
Returns
-------
result : ndarray
"""
return sigmoid(RBM['b'] + input_data.dot(RBM['W']))
def sortie_entree_RBM(RBM, output_data):
"""
Compute the values of the visible layer for given output data by the RBM.
Parameters
----------
RBM : dict
A RBM structure.
output_data : ndarray
Returns
-------
result : ndarray
"""
return sigmoid(RBM['a'] + output_data.dot(RBM['W'].T))
def train_RBM(RBM, input_data, nb_iter=100, lr=0.1, batch_size=32, verbose=True):
"""
Train a RBM by Contrastive-Divergence (CD-1) algorithm.
Parameters
----------
RBM : dict
A RBM structure.
input_data : ndarray
Input data.
nb_iter : int
Number of iterations.
lr : float
Learning rate.
batch_size : int
The batch size.
verbose : bool
Enable verbose output.
Returns
-------
RBM : dict
A RBM structure.
"""
n = input_data.shape[0]
p = RBM['p']
q = RBM['q']
for i in range(nb_iter):
np.random.shuffle(input_data)
for batch in range(batch_size):
x = input_data[np.minimum(np.arange(batch * batch_size, (batch + 1) * batch_size), n-1),:]
v_0 = x
h_0 = (np.random.uniform(0, 1, size=(x.shape[0], q)) < entree_sortie_RBM(RBM, x)).astype(int)
v_1 = (np.random.uniform(0, 1, size=(x.shape[0], p)) < sortie_entree_RBM(RBM, h_0)).astype(int)
da = np.sum(v_0 - v_1, axis=0)
db = np.sum(entree_sortie_RBM(RBM, x) - entree_sortie_RBM(RBM, v_1), axis=0)
dW = v_0.T.dot(entree_sortie_RBM(RBM, x)) - v_1.T.dot(entree_sortie_RBM(RBM, v_1))
RBM['W'] += lr * dW / batch_size
RBM['a'] += lr * da / batch_size
RBM['b'] += lr * db / batch_size
if verbose:
# erreur quadratique
sortie = entree_sortie_RBM(RBM, input_data)
new_entree = sortie_entree_RBM(RBM, sortie)
erreur_reconstruction = np.sum((input_data - new_entree)**2/n)
print("iteration %d \t : \t erreur reconstruction %.2f" % (i, erreur_reconstruction))
return RBM
def generer_image_RBM(RBM, image_shape, nb_images=3, nb_iter_gibbs=100, plot=True):
"""
Generate samples following a RBM by Gibbs sampling algorithm.
Parameters
----------
RBM : dict
A RBM structure.
image_shape : tuple
Shape of the images
nb_images : int
Number of images to generate.
nb_iter_gibbs : int
Number of iterations in Gibbs sampling.
plot : bool
Plot the images.
Returns
-------
images : list of arrays
Generated samples.
"""
p = RBM['p']
q = RBM['q']
images = []
if plot:
plt.figure(figsize=(20, 20))
for i in range(nb_images):
x = (np.random.uniform(0, 1, size=(1, p)) < 0.5).astype(int)
for j in range(nb_iter_gibbs):
h = (np.random.uniform(0, 1, size=(1, q)) < entree_sortie_RBM(RBM, x)).astype(int)
x = (np.random.uniform(0, 1, size=(1, p)) < sortie_entree_RBM(RBM, h)).astype(int)
images.append(x)
# Plot image
if plot:
x = np.reshape(x, newshape=image_shape)
plt.subplot(1, nb_images, i+1)
plt.axis('off')
plt.imshow(x, cmap='gray')
return images
def principal_RBM_alpha(q, index_carac, nb_images, nb_iter=100, lr=0.1, batch_size=32, nb_iter_gibbs=100, verbose=1, plot=True):
"""
Learn characters of Binary AlphaDigits with a RBM.
Parameters
----------
q : int
Number of hidden units.
index_carac : list
Index of characters to learn.
nb_images : int
Number of images to generate.
nb_iter : int
Number of iterations in the Gradient Descent.
lr : float
Learning rate.
batch_size : int
The batch size.
nb_iter_gibbs : int
Number of iterations in Gibbs sampling.
verbose : bool
Enable verbose output.
plot : bool
Plot the images.
Returns
-------
RBM : dict
A RBM structure.
"""
data = lire_alpha_digit(index_carac)
p = data.shape[1]
image_shape = (20,16)
RBM = init_RBM(p, q)
RBM_trained = train_RBM(RBM, data, nb_iter, lr=lr, batch_size=batch_size, verbose=verbose)
generer_image_RBM(RBM_trained, image_shape, nb_images, nb_iter_gibbs, plot=plot)
return RBM_trained
"""## <a id='constructionDBN'>3.2 Construction d'un DBN et test sur Binary AlphaDigits</a>"""
def init_DBN(p, q=32, n_layers=2):
"""
Create a DBN structure with weights and biases initialized.
Parameters
----------
p : int
Number of units of visible layer.
q : int
Number of units of hidden layers.
n_layers : int
Number of layers.
Returns
-------
DBN : list of dicts
"""
DBN = [init_RBM(p, q)]
for i in range(n_layers):
DBN.append(init_RBM(q, q))
return DBN
def train_DBN(DBN, input_data, nb_iter=100, lr=0.1, batch_size=32, verbose=True):
"""
Train a DBN in a greedy layer-wise fashion.
Parameters
----------
DBN : list of dicts
A DBN structure.
input_data : ndarray
Input data.
nb_iter : int
Number of iterations in the Gradient Descent.
lr : float
Learning rate.
batch_size : int
The batch size.
verbose : bool
Enable verbose output.
Returns
-------
DBN : list of dicts
A DBN structure.
"""
DBN[0] = train_RBM(DBN[0], input_data, nb_iter, lr=lr, batch_size=batch_size, verbose=verbose)
proba = entree_sortie_RBM(DBN[0], input_data)
new_input_data = (np.random.uniform(0, 1, size=(input_data.shape[0], proba.shape[1])) < proba).astype(int)
for k in range(1, len(DBN)):
DBN[k] = train_RBM(DBN[k], new_input_data, nb_iter, lr=lr, batch_size=batch_size, verbose=verbose)
proba = entree_sortie_RBM(DBN[k], new_input_data)
new_input_data = (np.random.uniform(0, 1, size=(new_input_data.shape[0], proba.shape[1])) < proba).astype(int)
return DBN
def generer_image_DBN(DBN, image_shape, nb_images=3, nb_iter_gibbs=100, plot=True):
"""
Generate samples following a DBN.
Parameters
----------
DBN : list of dicts
A DBN structure.
image_shape : tuple
Shape of the images
nb_images : int
Number of | |
an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_Timestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_Timestamp: If op_Timestamp is specified, the field named in this input will be compared to the value in Timestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_Timestamp must be specified if op_Timestamp is specified.
:type val_f_Timestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_Timestamp: If op_Timestamp is specified, this value will be compared to the value in Timestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_Timestamp must be specified if op_Timestamp is specified.
:type val_c_Timestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_UpdatedAt: The operator to apply to the field UpdatedAt. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. UpdatedAt: The date and time this record was last modified. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_UpdatedAt: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_UpdatedAt: If op_UpdatedAt is specified, the field named in this input will be compared to the value in UpdatedAt using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_UpdatedAt must be specified if op_UpdatedAt is specified.
:type val_f_UpdatedAt: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_UpdatedAt: If op_UpdatedAt is specified, this value will be compared to the value in UpdatedAt using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_UpdatedAt must be specified if op_UpdatedAt is specified.
:type val_c_UpdatedAt: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` GroupID
:param sort: The data field(s) to use for sorting the output. Default is GroupID. Valid values are GroupID, GroupName, Criteria, Rank, SNMPPolling, FlowCollection, PerfFrequency, Timestamp, MemberCount, UpdatedAt.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each IfGroupDefn. Valid values are GroupID, GroupName, Criteria, Rank, SNMPPolling, FlowCollection, PerfFrequency, Timestamp, MemberCount, UpdatedAt. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return if_group_defns: An array of the IfGroupDefn objects that match the specified input criteria.
:rtype if_group_defns: Array of IfGroupDefn
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def create(self, **kwargs):
"""Creates a new if group defn.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param Criteria: The group belongs to defined criteria in an interface group.
:type Criteria: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FlowCollection: The level of flow collection in an interface group.
:type FlowCollection: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param GroupName: The name of the group in the Interface.
:type GroupName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param MemberCount: The total number of member in an interface group.
:type MemberCount: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PerfFrequency: The performance frequency level in an interface group.
:type PerfFrequency: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param Rank: The rank specified for the group in an interface.
:type Rank: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SNMPPolling: The SNMP polling state in an interface group definition.
:type SNMPPolling: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Timestamp: The date and time this record was collected.
:type Timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param UpdatedAt: The date and time this record was last modified.
:type UpdatedAt: DateTime
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return id: The id of the newly created if group defn.
:rtype id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return model: The class name of the newly created if group defn.
:rtype model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return uri: A URI that may be used to retrieve the newly created if group defn.
:rtype uri: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return if_group_defn: The newly created if group defn.
:rtype if_group_defn: IfGroupDefn
"""
return self.api_request(self._get_method_fullname("create"), kwargs)
def destroy(self, **kwargs):
"""Deletes the specified if group defn from NetMRI.
**Inputs**
| ``api | |
###%file:dart_kernel.py
#
# MyBatch Jupyter Kernel
# generated by MyPython
#
from math import exp
from queue import Queue
from threading import Thread
from ipykernel.kernelbase import Kernel
from pexpect import replwrap, EOF
from jinja2 import Environment, PackageLoader, select_autoescape,Template
from abc import ABCMeta, abstractmethod
from typing import List, Dict, Tuple, Sequence
from shutil import copyfile,move
from urllib.request import urlopen
import base64
import urllib.request
import urllib.parse
import platform
import pexpect
import signal
import typing
import typing as t
import re
import signal
import subprocess
import tempfile
import os
import stat
import sys
import traceback
import os.path as path
import codecs
import time
import importlib
import importlib.util
import inspect
from plugins.ISpecialID import IStag,IDtag,IBtag,ITag,ICodePreproc
from plugins._filter2_magics import Magics
#
# MyPython Jupyter Kernel
#
###%include:../src/head.py
###%include:../src/common.py
class IREPLWrapper(replwrap.REPLWrapper):
def __init__(self, write_to_stdout, write_to_stderr, read_from_stdin,
cmd_or_spawn,replsetip, orig_prompt, prompt_change,
extra_init_cmd=None, line_output_callback=None):
self._write_to_stdout = write_to_stdout
self._write_to_stderr = write_to_stderr
self._read_from_stdin = read_from_stdin
self.line_output_callback = line_output_callback
self.replsetip=replsetip
self.startflag=True
self.startexpecttimeout=60
# x = time.localtime(time.time())
self.start_time = time.time()
replwrap.REPLWrapper.__init__(self, cmd_or_spawn, orig_prompt,
prompt_change,extra_init_cmd=extra_init_cmd)
def _expect_prompt(self, timeout=-1):
if timeout ==None :
# "None" means we are executing code from a Jupyter cell by way of the run_command
# in the do_execute() code below, so do incremental output.
retry=0
received=False
cmdstart_time = time.time()
cmdexectimeout=10
while True:
if self.startflag :
cmdexectimeout=None
run_time = time.time() - cmdstart_time
if run_time > self.startexpecttimeout:
self.startflag=False
self.line_output_callback(self.child.before + '\r\n')
# self.line_output_callback("\nEnd of startup process\n")
break
try:
pos = self.child.expect_exact([self.prompt, self.continuation_prompt, self.replsetip, pexpect.EOF, pexpect.TIMEOUT],timeout=cmdexectimeout)
if pos == 2:
# End of line received
if self.child.terminated:
self.line_output_callback("\nprocess.terminated\n")
self.line_output_callback(self.child.before +self.replsetip+ '\r\n')
self.line_output_callback("\nEnd of startup process\n")
self.replsetip=u'\r\n'
cmdexectimeout=None
self.startflag=False
break
elif pos ==3:
if len(self.child.before) != 0:
self.line_output_callback(self.child.before + '\r\n')
self.line_output_callback('The process has exited.\r\n')
break
elif pos == 0:
self.line_output_callback(self.child.before + '\n')
cmdstart_time = time.time()
if self.prompt!="\r\n":break
else:
if len(self.child.before) != 0:
# prompt received, but partial line precedes it
self.line_output_callback(self.child.before)
cmdstart_time = time.time()
else:
if self.startflag :
continue
run_time = time.time() - cmdstart_time
if run_time > 10:
break
except Exception as e:
# self.line_output_callback(self.child.before)
self._write_to_stderr("[MyCkernel] Error:Executable _expect_prompt error! "+str(e)+"\n")
else:
# Otherwise, use existing non-incremental code
pos = replwrap.REPLWrapper._expect_prompt(self, timeout=timeout)
# Prompt received, so return normally
return pos
class RealTimeSubprocess(subprocess.Popen):
inputRequest = "<inputRequest>"
kobj=None
def setkobj(self,k=None):
self.kobj=k
def __init__(self, cmd, write_to_stdout, write_to_stderr, read_from_stdin,
cwd=None,shell=False,env=None,kobj=None,outencode='UTF-8'):
self.outencode=outencode
self.kobj=kobj
self._write_to_stdout = write_to_stdout
self._write_to_stderr = write_to_stderr
self._read_from_stdin = read_from_stdin
if env!=None and len(env)<1:env=None
super().__init__(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE,
bufsize=0,cwd=cwd,shell=shell,env=env)
self._stdout_queue = Queue()
self._stdout_thread = Thread(target=RealTimeSubprocess._enqueue_output, args=(self.stdout, self._stdout_queue))
self._stdout_thread.daemon = True
self._stdout_thread.start()
self._stderr_queue = Queue()
self._stderr_thread = Thread(target=RealTimeSubprocess._enqueue_output, args=(self.stderr, self._stderr_queue))
self._stderr_thread.daemon = True
self._stderr_thread.start()
@staticmethod
def _enqueue_output(stream, queue):
for line in iter(lambda: stream.read(4096), b''):
queue.put(line)
stream.close()
def write_contents(self,magics=None):
def read_all_from_queue(queue):
res = b''
size = queue.qsize()
while size != 0:
res += queue.get_nowait()
size -= 1
return res
stderr_contents = read_all_from_queue(self._stderr_queue)
if stderr_contents:
if self.kobj!=None:
self.kobj._logln(stderr_contents.decode('UTF-8', errors='ignore'),3)
else:
self._write_to_stderr(stderr_contents.decode('UTF-8', errors='ignore'))
stdout_contents = read_all_from_queue(self._stdout_queue)
if stdout_contents:
if self.kobj.get_magicsSvalue(magics,"outputtype").startswith("image"):
self._write_to_stdout(stdout_contents,magics)
##reset outputtype
magics['_st']["outputtype"]="text/plain"
return
contents=''
if self.outencode=='UTF-8':
contents = stdout_contents.decode('UTF-8', errors='ignore')
else:
contents = stdout_contents.decode(self.outencode, errors='ignore')
# if there is input request, make output and then
# ask frontend for input
start = contents.find(self.__class__.inputRequest)
if(start >= 0):
contents = contents.replace(self.__class__.inputRequest, '')
if(len(contents) > 0):
self._write_to_stdout(contents,magics)
readLine = ""
while(len(readLine) == 0):
readLine = self._read_from_stdin()
# need to add newline since it is not captured by frontend
readLine += "\n"
self.stdin.write(readLine.encode())
else:
self._write_to_stdout(contents,magics)
def wait_end(self,magics):
while self.poll() is None:
if self.kobj.get_magicsSvalue(magics,"outputtype").startswith("text"):
self.write_contents(magics)
pass
continue
self.write_contents(magics)
if self.kobj==None:
self._write_to_stdout("The process end:"+str(self.pid)+"\n",magics)
else:
self.kobj._logln("The process end:"+str(self.pid))
############################################
# self.write_contents(magics)
# wait for threads to finish, so output is always shown
self._stdout_thread.join()
self._stderr_thread.join()
# self.write_contents(magics)
return self.returncode
class MyKernel(Kernel):
implementation = 'jupyter-MyPython-kernel'
implementation_version = '1.0'
language = 'Python'
language_version = sys.version.split()[0]
language_info = {'name': 'python',
'version': sys.version.split()[0],
'mimetype': 'text/x-python',
'codemirror_mode': {
'name': 'ipython',
'version': sys.version_info[0]
},
'pygments_lexer': 'ipython%d' % 3,
'nbconvert_exporter': 'python',
'file_extension': '.py'}
banner = "MyPython kernel.\n" \
"Uses gcc, compiles in C11, and creates source code files and executables in temporary folder.\n"
kernelinfo="[MyPython]"
main_head = "\n" \
"\n" \
"int main(List<String> arguments){\n"
main_foot = "\nreturn 0;\n}"
def __init__(self, *args, **kwargs):
super(MyKernel, self).__init__(*args, **kwargs)
self._allow_stdin = True
self.readOnlyFileSystem = False
self.bufferedOutput = True
self.linkMaths = True # always link math library
self.wAll = True # show all warnings by default
self.wError = False # but keep comipiling for warnings
self.sys = platform.system()
self.subsys=self.getossubsys()
self.files = []
self.isdstr=False
self.issstr=False
self._loglevel='1'
# mastertemp = tempfile.mkstemp(suffix='.out')
# os.close(mastertemp[0])
# self.master_path = mastertemp[1]
# self.resDir = path.join(path.dirname(path.realpath(__file__)), 'resources')
self.chk_replexit_thread = Thread(target=self.chk_replexit, args=(self.g_rtsps))
self.chk_replexit_thread.daemon = True
self.chk_replexit_thread.start()
self.init_plugin()
self.mag=Magics(self,self.plugins,self.ICodePreprocs)
pausestr='''
get_char()
{
SAVEDSTTY=`stty -g`
stty -echo
stty cbreak
dd if=/dev/tty bs=1 count=1 2> /dev/null
stty -raw
stty echo
stty $SAVEDSTTY
}
echo ""
echo "Press any key to start...or Press Ctrl+c to cancel"
char=`get_char`
echo "OK"
'''
silent=None
jinja2_env = Environment()
g_rtsps={}
g_chkreplexit=True
def get_retinfo(self, rettype:int=0):
retinfo={'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}}
return retinfo
def chkjoptions(self,magics,jarfile,targetdir):
if len(self.addmagicsSkey(magics,'joptions'))>-1:
index=-1
try:
index=self.addmagicsSkey(magics,'joptions').index('-cp')
except Exception as e:
pass
if(index<0):
magics['_st']['joptions']+=['-cp']
magics['_st']['joptions']+=[':']
index=index+1
cpstr=magics['_st']['joptions'][index+1]
cpstr=cpstr+":"+jarfile+":"+targetdir
if cpstr.strip().startswith(':'):
cpstr=cpstr[1:]
# self._log(cpstr)
magics['_st']['joptions'][index+1]=cpstr
def resolving_enveqval(self, envstr):
if envstr is None or len(envstr.strip())<1:
return os.environ
# env_dict={}
argsstr=self.replacemany(self.replacemany(self.replacemany(envstr.strip(),(' '),' '),('= '),'='),' =','=')
pattern = re.compile(r'([^\s*]*)="(.*?)"|([^\s*]*)=(\'.*?\')|([^\s*]*)=(.[^\s]*)')
for argument in pattern.findall(argsstr):
li=list(argument)
li= [i for i in li if i != '']
# env_dict[str(li[0])]=li[1]
os.environ.setdefault(str(li[0]),li[1])
# envstr=str(str(envstr.split("|")).split("=")).replace(" ","").replace("\'","").replace("\"","").replace("[","").replace("]","").replace("\\","")
# env_list=envstr.split(",")
# for i in range(0,len(env_list),2):
# os.environ.setdefault(env_list[i],env_list[i+1])
return os.environ
def resolving_eqval2dict(self,argsstr):
if not argsstr or len(argsstr.strip())<1:
return None
env_dict={}
argsstr=self.replacemany(self.replacemany(self.replacemany(argsstr.strip(),(' '),' '),('= '),'='),' =','=')
pattern = re.compile(r'([^\s*]*)="(.*?)"|([^\s*]*)=(\'.*?\')|([^\s*]*)=(.[^\s]*)')
for argument in pattern.findall(argsstr):
li=list(argument)
li= [i for i in li if i != '']
env_dict[str(li[0])]=li[1]
return env_dict
##//%include:../src/_templateHander.py
##//%include:../src/_readtemplatefile.py
##内核公共代码部分2
def get_magicsSvalue(self,magics:Dict,key:str):
return self.addmagicsSkey(magics,key)
def get_magicsBvalue(self,magics:Dict,key:str):
return self.addmagicsBkey(magics,key)
def get_magicsbykey(self,magics:Dict,key:str):
return self.addkey2dict(magics,key)
def addmagicsSLkey(self,magics:Dict,key:str,value=None,func=None):
return self.addmagicskey2(magics=magics,key=key,type='_sline',func=func,value=value)
def addmagicsSkey(self,magics:Dict,key:str,func=None):
return self.addmagicskey2(magics=magics,key=key,type='_st',func=func)
def addmagicsBkey(self,magics:Dict,key:str,value=None,func=None):
return self.addmagicskey2(magics=magics,key=key,type='_bt',func=func,value=value)
def addmagicskey2(self,magics:Dict,key:str,type:str,func=None,value=None):
if not magics[type].__contains__(key):
##添加 key
d={key:[]}
if value!=None:
d={key:value}
magics[type].update(d)
if not magics[type+'f'].__contains__(key):
##添加 key相关回调函数
d={key:[]}
magics[type+'f'].update(d)
if func!=None:
magics[type+'f'][key]+=[func]
return magics[type][key]
def addkey2dict(self,magics:Dict,key:str,type:str=None):
if not magics.__contains__(key):
d={key:[]}
if type!=None and type=="dict":
d={key:{}}
magics.update(d)
return magics[key]
##内核公共代码部分
usleep = lambda x: time.sleep(x/1000000.0)
##全部替换our_str字符串里的to_be_replaced为replace_with
def replacemany(self,our_str, to_be_replaced:str, replace_with:str):
while (to_be_replaced in our_str):
our_str = our_str.replace(to_be_replaced, replace_with)
return our_str
##处理acb=xxx 这样的参数字符串到字典{}里
def _filter_dict(self,argsstr):
if not argsstr or len(argsstr.strip())<1:
return None
env_dict={}
argsstr=self.replacemany(self.replacemany(self.replacemany(argsstr.strip(),(' '),' '),('= '),'='),' =','=')
pattern = re.compile(r'([^\s*]*)="(.*?)"|([^\s*]*)=(\'.*?\')|([^\s*]*)=(.[^\s]*)')
for argument in pattern.findall(argsstr):
li=list(argument)
li= [i for i in li if i != '']
env_dict[str(li[0])]=li[1]
return env_dict
##文件处理器
def _fileshander(self,files:List,srcfilename,magics)->str:
index=-1
fristfile=srcfilename
try:
for newsrcfilename in files:
index=index+1
newsrcfilename = os.path.join(os.path.abspath(''),newsrcfilename)
if os.path.exists(newsrcfilename):
if magics!=None and len(self.addkey2dict(magics,'overwritefile'))<1:
newsrcfilename +=".new.py"
if not os.path.exists(os.path.dirname(newsrcfilename)) :
os.makedirs(os.path.dirname(newsrcfilename))
if index==0:
os.rename(srcfilename,newsrcfilename)
fristfile=newsrcfilename
files[0]=newsrcfilename
else:
self._write_to_stdout("copy to :"+newsrcfilename+"\n")
copyfile(fristfile,newsrcfilename)
except Exception as e:
self._log(str(e),2)
return files[0]
def _is_specialID(self,line):
if line.strip().startswith('##%') or line.strip().startswith('//%'):
return True
return False
def _is_test_begin(self,line):
if line==None or line=='':return ''
return line.strip().startswith('##test_begin') or line.strip().startswith('//test_begin')
def _is_test_end(self,line):
if line==None or line=='':return ''
return line.strip().startswith('##test_end') or line.strip().startswith('//test_end')
def _is_dqm_begin(self,line):
if line==None or line=='':return ''
line=self.replacemany(line.strip(),(' '),'')
if '=\"\"\"' in line:
self.isdstr=True
return False
if '\"\"\"' in line:
if self.isdstr:return False
self.isdstr=False
return True
return line.lstrip().startswith('\"\"\"')
def _is_dqm_end(self,line):
if line==None or line=='':return ''
if self.isdqm:
return line.rstrip().endswith('\"\"\"')
return False
def _is_sqm_begin(self,line):
if line==None or line=='':return ''
line=self.replacemany(line.strip(),(' '),'')
if '=\'\'\'' in line:
self.issstr=True
return False
if '\'\'\'' in line:
if self.issstr:return False
self.issstr=False
return True
return line.lstrip().startswith('\'\'\'')
def _is_sqm_end(self,line):
if line==None or line=='':return ''
if self.issqm:
return line.rstrip().endswith('\'\'\'')
return False
def cleanCdqm(self,code):
return re.sub(r"/\*.*?\*/", "", code, flags=re.M|re.S)
def cleanCnotes(self,code):
return re.sub(r"//.*", "", code)
def cleannotes(self,line):
return '' if (not self._is_specialID(line)) and (line.lstrip().startswith('## ') or line.lstrip().startswith('//')) else line
isdqm=False##清除双引号多行注释
def cleandqmA(self,code):
return re.sub(r"\"\"\".*?\"\"\"", "", code, flags=re.M|re.S)
def cleandqm(self,line):
if not self.isdqm:
istb=self._is_dqm_begin(line)
if istb:
self.isdqm=True
if len(line.strip())>5:
iste=self._is_dqm_end(line)
if iste:self.isdqm=False
return ''
iste=self._is_dqm_end(line)
if iste:
self.isdqm=False
return ''
line= "" if self.isdqm else line
return line
issqm=False
def cleansqmA(self,code):
return re.sub(r"\'\'\'.*?\'\'\'", "", code, flags=re.M|re.S)
def cleansqm(self,line):
if not self.issqm:
istb=self._is_sqm_begin(line)
if istb:
self.issqm=True
if len(line.strip())>5:
iste=self._is_sqm_end(line)
if iste:self.issqm=False
return ''
iste=self._is_sqm_end(line)
if iste:
self.issqm=False
return ''
line= "" if self.issqm else line
return line
istestcode=False
def cleantestcodeA(self,code):
code=re.sub(r"\/\/test_begin.*?\/\/test_end", "", code, flags=re.M|re.S)
return re.sub(r"\#\#test_begin.*?\#\#test_end", "", code, flags=re.M|re.S)
def cleantestcode(self,line):
if not self.istestcode:
istb=self._is_test_begin(line)
if istb:
self.istestcode=True
if len(line.strip())>5:
iste=self._is_test_end(line)
if iste:self.istestcode=False
return ''
iste=self._is_test_end(line)
if iste:
self.istestcode=False
return ''
line= "" if self.istestcode else line
return line
def repl_listpid(self,cmd=None):
if len(self.g_rtsps)>0:
self._write_to_stdout("--------All replpid--------\n")
for key in self.g_rtsps:
self._write_to_stdout(key+"\n")
else:
self._write_to_stdout("--------All replpid--------\nNone\n")
def chk_replexit(self,grtsps): | |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'Macsec.Mka.Interfaces.Interface.Session.SessionSummary.OuterTag' : {
'meta_info' : _MetaInfoClass('Macsec.Mka.Interfaces.Interface.Session.SessionSummary.OuterTag',
False,
[
_MetaInfoClassMember('cfi', ATTRIBUTE, 'int' , None, None,
[('0', '255')], [],
''' cfi
''',
'cfi',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('etype', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' etype
''',
'etype',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('priority', ATTRIBUTE, 'int' , None, None,
[('0', '255')], [],
''' priority
''',
'priority',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('vlan-id', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' vlan id
''',
'vlan_id',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
],
'Cisco-IOS-XR-crypto-macsec-mka-oper',
'outer-tag',
_yang_ns._namespaces['Cisco-IOS-XR-crypto-macsec-mka-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_macsec_mka_oper'
),
},
'Macsec.Mka.Interfaces.Interface.Session.SessionSummary.InnerTag' : {
'meta_info' : _MetaInfoClass('Macsec.Mka.Interfaces.Interface.Session.SessionSummary.InnerTag',
False,
[
_MetaInfoClassMember('cfi', ATTRIBUTE, 'int' , None, None,
[('0', '255')], [],
''' cfi
''',
'cfi',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('etype', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' etype
''',
'etype',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('priority', ATTRIBUTE, 'int' , None, None,
[('0', '255')], [],
''' priority
''',
'priority',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('vlan-id', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' vlan id
''',
'vlan_id',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
],
'Cisco-IOS-XR-crypto-macsec-mka-oper',
'inner-tag',
_yang_ns._namespaces['Cisco-IOS-XR-crypto-macsec-mka-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_macsec_mka_oper'
),
},
'Macsec.Mka.Interfaces.Interface.Session.SessionSummary' : {
'meta_info' : _MetaInfoClass('Macsec.Mka.Interfaces.Interface.Session.SessionSummary',
False,
[
_MetaInfoClassMember('algo-agility', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Alogorithm Agility
''',
'algo_agility',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('capability', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' MACSec Capability
''',
'capability',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('cipher-str', ATTRIBUTE, 'str' , None, None,
[], [],
''' Cipher String
''',
'cipher_str',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('confidentiality-offset', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Confidentiality Offset
''',
'confidentiality_offset',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('delay-protect', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Delay Protect
''',
'delay_protect',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('inherited-policy', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is Inherited Policy
''',
'inherited_policy',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('inner-tag', REFERENCE_CLASS, 'InnerTag' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_macsec_mka_oper', 'Macsec.Mka.Interfaces.Interface.Session.SessionSummary.InnerTag',
[], [],
''' VLAN Inner TAG
''',
'inner_tag',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' macsec configured interface
''',
'interface_name',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('key-chain', ATTRIBUTE, 'str' , None, None,
[], [],
''' Key Chain name
''',
'key_chain',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('mac-sec-desired', ATTRIBUTE, 'bool' , None, None,
[], [],
''' MACSec Desired
''',
'mac_sec_desired',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('my-mac', ATTRIBUTE, 'str' , None, None,
[], [],
''' My MAC
''',
'my_mac',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('outer-tag', REFERENCE_CLASS, 'OuterTag' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_macsec_mka_oper', 'Macsec.Mka.Interfaces.Interface.Session.SessionSummary.OuterTag',
[], [],
''' VLAN Outer TAG
''',
'outer_tag',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('policy', ATTRIBUTE, 'str' , None, None,
[], [],
''' Policy Name
''',
'policy',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('priority', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Key Server Priority
''',
'priority',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('replay-protect', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Replay Protect
''',
'replay_protect',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('window-size', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Replay Window Size
''',
'window_size',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
],
'Cisco-IOS-XR-crypto-macsec-mka-oper',
'session-summary',
_yang_ns._namespaces['Cisco-IOS-XR-crypto-macsec-mka-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_macsec_mka_oper'
),
},
'Macsec.Mka.Interfaces.Interface.Session.Vp' : {
'meta_info' : _MetaInfoClass('Macsec.Mka.Interfaces.Interface.Session.Vp',
False,
[
_MetaInfoClassMember('cipher-suite', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' SAK Cipher Suite
''',
'cipher_suite',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('latest-an', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Latest SAK AN
''',
'latest_an',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('latest-ki', ATTRIBUTE, 'str' , None, None,
[], [],
''' Latest SAK KI
''',
'latest_ki',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('latest-kn', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Latest SAK KN
''',
'latest_kn',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('latest-rx', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Latest Rx status
''',
'latest_rx',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('latest-tx', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Latest Tx status
''',
'latest_tx',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('my-sci', ATTRIBUTE, 'str' , None, None,
[], [],
''' Local SCI(MAC)
''',
'my_sci',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('old-an', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Old SAK AN
''',
'old_an',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('old-ki', ATTRIBUTE, 'str' , None, None,
[], [],
''' Old SAK KI
''',
'old_ki',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('old-kn', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Old SAK KN
''',
'old_kn',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('old-rx', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Old Rx status
''',
'old_rx',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('old-tx', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Old Tx status
''',
'old_tx',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('retire-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' SAK Retire time
''',
'retire_time',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('ssci', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' SSCI of the Local TxSC
''',
'ssci',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('virtual-port-id', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Virtual Port ID
''',
'virtual_port_id',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('wait-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' SAK Transmit Wait Time
''',
'wait_time',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
],
'Cisco-IOS-XR-crypto-macsec-mka-oper',
'vp',
_yang_ns._namespaces['Cisco-IOS-XR-crypto-macsec-mka-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_macsec_mka_oper'
),
},
'Macsec.Mka.Interfaces.Interface.Session.Ca.LivePeer' : {
'meta_info' : _MetaInfoClass('Macsec.Mka.Interfaces.Interface.Session.Ca.LivePeer',
False,
[
_MetaInfoClassMember('mi', ATTRIBUTE, 'str' , None, None,
[], [],
''' Member ID
''',
'mi',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('mn', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Message Number
''',
'mn',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('priority', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' KS Priority
''',
'priority',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('sci', ATTRIBUTE, 'str' , None, None,
[], [],
''' Rx SCI
''',
'sci',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('ssci', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Peer SSCI
''',
'ssci',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
],
'Cisco-IOS-XR-crypto-macsec-mka-oper',
'live-peer',
_yang_ns._namespaces['Cisco-IOS-XR-crypto-macsec-mka-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_macsec_mka_oper'
),
},
'Macsec.Mka.Interfaces.Interface.Session.Ca.PotentialPeer' : {
'meta_info' : _MetaInfoClass('Macsec.Mka.Interfaces.Interface.Session.Ca.PotentialPeer',
False,
[
_MetaInfoClassMember('mi', ATTRIBUTE, 'str' , None, None,
[], [],
''' Member ID
''',
'mi',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('mn', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Message Number
''',
'mn',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('priority', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' KS Priority
''',
'priority',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('sci', ATTRIBUTE, 'str' , None, None,
[], [],
''' Rx SCI
''',
'sci',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('ssci', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Peer SSCI
''',
'ssci',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
],
'Cisco-IOS-XR-crypto-macsec-mka-oper',
'potential-peer',
_yang_ns._namespaces['Cisco-IOS-XR-crypto-macsec-mka-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_macsec_mka_oper'
),
},
'Macsec.Mka.Interfaces.Interface.Session.Ca.DormantPeer' : {
'meta_info' : _MetaInfoClass('Macsec.Mka.Interfaces.Interface.Session.Ca.DormantPeer',
False,
[
_MetaInfoClassMember('mi', ATTRIBUTE, 'str' , None, None,
[], [],
''' Member ID
''',
'mi',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('mn', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Message Number
''',
'mn',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('priority', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' KS Priority
''',
'priority',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('sci', ATTRIBUTE, 'str' , None, None,
[], [],
''' Rx SCI
''',
'sci',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('ssci', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Peer SSCI
''',
'ssci',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
],
'Cisco-IOS-XR-crypto-macsec-mka-oper',
'dormant-peer',
_yang_ns._namespaces['Cisco-IOS-XR-crypto-macsec-mka-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_macsec_mka_oper'
),
},
'Macsec.Mka.Interfaces.Interface.Session.Ca' : {
'meta_info' : _MetaInfoClass('Macsec.Mka.Interfaces.Interface.Session.Ca',
False,
[
_MetaInfoClassMember('authenticator', ATTRIBUTE, 'bool' , None, None,
[], [],
''' authenticator
''',
'authenticator',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('ckn', ATTRIBUTE, 'str' , None, None,
[], [],
''' CKN
''',
'ckn',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('dormant-peer', REFERENCE_LIST, 'DormantPeer' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_macsec_mka_oper', 'Macsec.Mka.Interfaces.Interface.Session.Ca.DormantPeer',
[], [],
''' Dormant Peer List
''',
'dormant_peer',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('first-ca', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is First CA
''',
'first_ca',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('is-key-server', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is Key Server
''',
'is_key_server',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('live-peer', REFERENCE_LIST, 'LivePeer' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_macsec_mka_oper', 'Macsec.Mka.Interfaces.Interface.Session.Ca.LivePeer',
[], [],
''' Live Peer List
''',
'live_peer',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('my-mi', ATTRIBUTE, 'str' , None, None,
[], [],
''' Member Identifier
''',
'my_mi',
'Cisco-IOS-XR-crypto-macsec-mka-oper', False),
_MetaInfoClassMember('my-mn', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Message Number
''',
| |
= drone_1.position[5]
except IndexError:
print("map was nut fully updated yet")
class Local_Map:
def __init__(self, resolution, map_origin, map_matrix):
self.resolution = resolution # width, height, pixel_size
self.map_origin = map_origin # [x,y,z, x,y,z,w] x, y, z and orientation: x, y, z, w
self.map_matrix = map_matrix # [[-1, -1, 0, 100], [-1, -1, -1, 0], [....]...] -1: unknown; 0: not occupied; 80: safety_distance; 100: occupied
def map_Callback(self, msg):
pixel_size = round(msg.info.resolution, 2)
width = msg.info.width
height = msg.info.height
origin = [msg.info.origin.position.x,
msg.info.origin.position.y,
msg.info.origin.position.z,
msg.info.origin.orientation.x,
msg.info.origin.orientation.y,
msg.info.origin.orientation.z,
msg.info.origin.orientation.w ]
self.resolution = {"width": width, "height": height , "pixel_size": pixel_size }
self.map_origin = origin
map_vector = msg.data
map_matrix = np.zeros((height, width))
i = 0 #height
k = 0 # position in Map_matrix
while (i <= height -1):
j = 0
while (j <= width -1):
map_matrix[i][j] = map_vector[k]
k = k +1
j = j +1
i = i +1
self.map_matrix = map_matrix
def get_occupancy_status(self, indexes): # indexes = [x,y]
x = indexes[0]
y = indexes[1]
# Use it like this:
#map_matrix[height][width] --> map_matrix[y][x]
try:
if local_map.map_matrix[y][x] > 30:
return False
else:
return True
# True: Cell is not occupied
# False: Cell is occupied
except IndexError: #When width and height already at correct values but matrix is not saved yet
print("Map matrix not ready yet!")
def getOccupancyStatus30andUP(self, indexes):
x = indexes[0]
y = indexes[1]
# Use it like this:
#map_matrix[height][width] --> map_matrix[y][x]
try:
if local_map.map_matrix[y][x] >= 30:
return False
else:
return True
# True: Cell is not occupied
# False: Cell is occupied
except IndexError: #When width and height already at correct values but matrix is not saved yet
print("Map matrix not ready yet!")
def calc_direction(target_waypoint, min_distance=True):
start_p = [drone_1.position[0], drone_1.position[1] ]
end_p = target_waypoint
d_x = end_p[0] - start_p[0]
d_y = end_p[1] - start_p[1]
#North (=y-axis)
#East (=x-axis)
# when under 0.7 m distance keep direction
if( d_x*d_x + d_y*d_y <= 0.5) and min_distance==True:
#print("distance under 0.7 m ")
return drone_1.position[5]
else:
#print("distance above 0.7 m")
#North-East:
if (d_x > 0 and d_y >= 0 ):
#print("North-East")
rad = math.atan(d_y / d_x)
#North-West:
if (d_x < 0 and d_y >= 0):
#print("North-West")
rad = math.pi + math.atan(d_y / d_x)
#South-West:
if (d_y <= 0 and d_x < 0 ):
#print("South-West")
rad = (math.pi - math.atan( d_y / d_x)) * (-1)
#South-East:
if (d_y <= 0 and d_x >0):
#print("South-East")
rad = ( math.atan(d_y / d_x))
elif():
#print ("CAUTION: flight direction calculations: UNEXPECTED OUTCOME")
if ( d_x == 0):
rad = 0
#East - West - North or South
if (d_x == 0 ):
#print("d_x == 0")
#North
if (d_x == 0 and d_y >= 0 ):
rad = math.pi / 2
#South
if (d_x == 0 and d_y <= 0):
rad = - math.pi /2
#East
if (d_x >= 0 and d_y == 0):
rad = 0
#West
if (d_x <= 0 and d_y == 0 ):
rad = math.pi / 2
else:
print ("CAUTION: flight direction calculations wrong")
rad = 0
return rad
def recalculate_cost(path, cells_passing_rim = 0):
#Do not recalculate cost when safe path was just deleted
#(considers dynamic behavior of the drone)
if drone_1.pos_hold == True:
return float('inf')
i = 0
cost = 0
if len(path) == 1:
d_x = path[0][0] - drone_1.position[0]
d_y = path[0][1] - drone_1.position[1]
distance = math.sqrt(d_x**2 + d_y**2)
if distance <= 0.5:
return float('inf')
else:
return distance
else:
while i <= len(path) -2:
#print("in while 3")
if i == 0:
d_x = (path[i][0] - drone_1.position[0])**2
d_y = (path[i][1] - drone_1.position[1])**2
cost = cost + math.sqrt(d_x + d_y)
d_x = (path[i+1][0] - path[i][0])**2
d_y = (path[i+1][1] - path[i +1][1])**2
cost = cost + math.sqrt(d_x + d_y)
i = i+1
continue
x_1 ,y_1 = path[i][0], path[i][1]
x_2, y_2 = path[i+1][0], path[i +1][1]
d_x = (x_2 - x_1) **2
d_y = (y_2 - y_1) **2
cost = cost + math.sqrt(d_x + d_y)
i = i +1
cost += cells_passing_rim*local_map.resolution.get('pixel_size')*0.8
return cost
def get_voxel_index(vec): #vec = [x,y,z] in true position
#print(vec)
x = vec[0]
y = vec[1]
map_res = local_map.resolution.get('pixel_size')
map_width = local_map.resolution.get('width') #width: x-axis
map_height = local_map.resolution.get('height') #height: y-axis
if x < local_map.map_origin[0] or y < local_map.map_origin[1]:
#print(vec)
#print("Requested position is outside of the map!")
return float('NaN'), float('NaN')
else:
if x > local_map.map_origin[0] + map_width*map_res or y > local_map.map_origin[1] + map_height*map_res:
#print("Requested position is outside of the map!!!")
return float('NaN'), float('NaN')
else:
x_index = int((x - local_map.map_origin[0] - map_res) // map_res) + 1
y_index = int((y - local_map.map_origin[1] - map_res) // map_res) + 1
if x_index <0:
x_index = 0
if y_index <0:
y_index = 0
index = [x_index, y_index]
return index
def subscripe():
rospy.Subscriber("/mavros/local_position/pose", geometry_msgs.msg.PoseStamped, drone_1.position_Callback )
rospy.Subscriber('/mavros/local_position/velocity_local', geometry_msgs.msg.TwistStamped, drone_1.velocity_Callback)
rospy.Subscriber('/mavros/trajectory/desired', mavros_msgs.msg.Trajectory, mission.trajectory_Callback )
rospy.Subscriber('/local_map', OccupancyGrid, local_map.map_Callback )
rospy.Subscriber('/safe_path', Path_coll, rrt_path.safe_waypoints )
def publish_trajectory(msg):
pub = rospy.Publisher("/mavros/trajectory/generated", mavros_msgs.msg.Trajectory, queue_size= 10)
pub.publish(msg)
#when x_1 = 2 and x_2 = -2 dx = -4 not 4
def distance(x_1, x_2):
if x_1 > x_2: #returns negative per definition
return abs(x_1 - x_2)*(-1)
if x_2 > x_1: #returns positiv number
return abs(x_2 - x_1)
if x_2 == x_1:
return 0
def get_list_of_voxels_to_check(start, end): #d_x & d_y in: +- ; angle = simple_angle ; start = [x, y] in meter; end = [x,y]
angle_1, angle_2 = calc_angle(start, end)
simple_angle = math.radians(angle_1)
d_x = distance(start[0], end[0])
d_y = distance(start[1], end[1])
resolution = local_map.resolution.get('pixel_size')
voxel_to_check= []
multiplier_x = 1 #in positive direction
multiplier_y = 1 #in pos direction
if d_x < 0:
multiplier_x = -1
if d_y < 0:
multiplier_y = -1
i = 0
if(d_y ==0 and d_x != 0) :
simple_angle = math.radians(90)
condition_1 = (abs(d_x) - abs((i*resolution*math.cos(simple_angle))))
condition_2 = (abs(d_y) - abs((i*resolution*math.sin(simple_angle))))
while ( ( condition_1 >= 0) or ( condition_2 >=0) ):
x = start[0] + (d_x - multiplier_x*i*math.sin(simple_angle)*resolution)
y = start[1] + (d_y - multiplier_y*i*math.cos(simple_angle)*resolution)
#######CAUTION !!!!!!!!!!!!!
if (i >= 2500):
print("SOMETHING WENT WRONG!!!!")
sys.exit() #PX4 will go into failsafe
if (d_x == 0):
break
############################
x_ind, y_ind = get_voxel_index([x, y])
if(x_ind == -1 or y_ind == -1):
i = i+1
condition_1 = (abs(d_x) - abs((i*resolution*math.sin(simple_angle))))
condition_2 = (abs(d_y) - abs((i*resolution*math.cos(simple_angle))))
continue
if (math.isnan(x_ind) == True or math.isnan(y_ind) == True ):
i = i+1
condition_1 = (abs(d_x) - abs((i*resolution*math.sin(simple_angle))))
condition_2 = (abs(d_y) - abs((i*resolution*math.cos(simple_angle))))
continue
check_voxel = [x_ind, y_ind] # width, height = x , y
if (len(voxel_to_check) <=0):
voxel_to_check.insert(0, check_voxel)
elif ( voxel_to_check[len(voxel_to_check) -1] != check_voxel):
voxel_to_check.insert(0 , check_voxel)
i = i +1
condition_1 = (abs(d_x) - abs((i*resolution*math.sin(simple_angle))))
condition_2 = (abs(d_y) - abs((i*resolution*math.cos(simple_angle))))
return voxel_to_check
def cell_no_to_position(cell_no): #cell_no = [x,y]
width = local_map.resolution.get('width')
height = local_map.resolution.get('height')
resolution = local_map.resolution.get('pixel_size')
x_origin = local_map.map_origin[0]
y_origin = local_map.map_origin[1]
x_cell = cell_no[0] +1 #0...resolution is cell no 0
y_cell = cell_no[1] +1
x_pos = x_origin + x_cell*resolution + resolution/2
y_pos = y_origin + y_cell*resolution + resolution/2
if cell_no[0] > width or cell_no[1] > height:
return []
try:
z_pos = mission.incremental_wp[mission._current_increment][2]
return [x_pos, y_pos, z_pos]
except IndexError:
if mission._current_increment == 1 and len(mission.incremental_wp) <= 1:
z_pos = mission.incremental_wp[0][2]
return [x_pos, y_pos, z_pos]
else:
z_pos = mission.incremental_wp[mission._current_increment -1 ][2]
return [x_pos, y_pos, z_pos]
def calc_3D_distance(vec_1, vec_2):
d_x = vec_2[0] - vec_1[0]
d_y = vec_2[1] - vec_1[1]
d_z = vec_2[2] - vec_1[2]
return math.sqrt(d_x**2 + d_y**2 + d_z**2)
def go_safe_position():
#print("going to safe")
msg = copy.deepcopy(mission.empty_msg)
msg.point_1.yaw_rate = -0.0
msg.header.stamp = rospy.get_rostime()
msg.point_valid = [1, 0, 0, 0, 0]
msg.command = [65535, 65535, 65535, 65535, 65535]
msg.point_1.yaw = calc_direction(drone_1.last_safe_position)
msg.point_1.position.x = drone_1.last_safe_position[0]
msg.point_1.position.y = drone_1.last_safe_position[1]
msg.point_1.position.z = drone_1.position[2]
publish_trajectory(msg)
def request_RRT_Star_path(mission_index = None):
pub_start_request = rospy.Publisher("/start_planning", RRT_Star_Call, queue_size=1)
msg = RRT_Star_Call()
msg.stamp = rospy.get_rostime()
msg.start_planning = True
try:
msg.target_position = mission.incremental_wp[mission._current_increment]
if mission_index is not None:
#if mission._current_increment +2 <= len(mission.incremental_wp):
# INDEX = get_voxel_index([mission.incremental_wp[mission._current_increment]])
msg.target_position = mission.incremental_wp[mission_index]
pub_start_request.publish(msg)
except IndexError:
if mission._current_increment == 1:
msg.target_position = mission.incremental_wp[0]
pub_start_request.publish(msg)
| |
<reponame>ocobacho/flexx
""" HVLayout
The HVLayout and its subclasses provide a simple mechanism to horizontally
or vertically stack child widgets. This can be done in different *modes*:
box mode is suited for aligning content where natural size matters. The
fix mode and split mode are more suited for high-level layout. See
the HVLayout class for details.
Interactive Box layout example:
.. UIExample:: 200
from flexx import app, event, ui
class Example(ui.HBox):
def init(self):
self.b1 = ui.Button(text='Horizontal', flex=0)
self.b2 = ui.Button(text='Vertical', flex=1)
self.b3 = ui.Button(text='Horizontal reversed', flex=2)
self.b4 = ui.Button(text='Vertical reversed', flex=3)
@event.reaction('b1.pointer_down')
def _to_horizontal(self, *events):
self.set_orientation('h')
@event.reaction('b2.pointer_down')
def _to_vertical(self, *events):
self.set_orientation('v')
@event.reaction('b3.pointer_down')
def _to_horizontal_rev(self, *events):
self.set_orientation('hr')
@event.reaction('b4.pointer_down')
def _to_vertical_r(self, *events):
self.set_orientation('vr')
Also see examples: :ref:`app_layout.py`, :ref:`splitters.py`,
:ref:`box_vs_fix_layout.py`, :ref:`mondriaan.py`.
"""
"""
## Notes on performance and layout boundaries.
In layout one can see multiple streams of information:
- Information about available size streams downward.
- Information about minimum and maxium allowed sizes streams upward.
- Information about natural sizes streams upward.
The first two streams are not problematic, as they are very much
one-directional, and minimum/maximum sizes are often quite static.
The flow of natural size is important to obtain good looking layouts, but
adds complications because of its recursive effect; a change in size may
need several document reflows to get the layout right, which can cause
severe performance penalties if many elements are involved. Therefore it
is important to introduce "layout boundaries" in the higher levels of a UI
so that layout can be established within individual parts of the UI without
affecting the other parts.
This module implements horizontal/vertical layouts that support natural sizes
(box) and layouts that do not (fix and split). The former is implemented with
CSS flexbox (the browser does all the work, and maintains the upward stream
of natural sizes). The latter is implemented with absolute positioning (we make
JavaScript do all the work). We realize good compatibility by maintaining the
first two streams of information.
To clearify, it would be possible to implement split and fix with flexbox,
and this could result in a "nicety" that a VSplit with content can still
have a natural horizontal size (and used as such in an HBox with flex 0).
However, one can see how this will require additional document reflows
(since a change in width can change the natural height and vice versa).
Split and Fix layouts provide an intuitive way to introduce layout boundaries.
For an element to be a layout boundary it must:
- Not be display inline or inline-block
- Not have a percentage height value.
- Not have an implicit or auto height value.
- Not have an implicit or auto width value.
- Have an explicit overflow value (scroll, auto or hidden).
- Not be a descendant of a <table> element.
Most Widgets inside a HVLayout in split or fix mode conform to this:
they are typically not table elements, the Widget sets overflow, the layout
itself uses CSS to set display, and sets height and weight.
More reading:
- http://wilsonpage.co.uk/introducing-layout-boundaries/
- https://css-tricks.com/snippets/css/a-guide-to-flexbox/
"""
from ... import event, app
from ...event import Property
from . import Layout
class OrientationProp(Property):
""" A property that represents a pair of float values, which can also be
set using a scalar.
"""
_default = 'h'
def _validate(self, v, name, data):
if isinstance(v, str):
v = v.lower().replace('-', '')
v = {'horizontal': 'h', 0: 'h', 'lefttoright': 'h',
'vertical': 'v', 1: 'v', 'toptobottom': 'v',
'righttoleft': 'hr', 'bottomtotop': 'vr'}.get(v, v)
if v not in ('h', 'v', 'hr', 'vr'):
raise ValueError('%s.orientation got unknown value %r' % (self.id, v))
return v
class HVLayout(Layout):
""" A layout widget to distribute child widgets horizontally or vertically.
This is a versatile layout class which can operate in different
orientations (horizontal, vertical, reversed), and in different modes:
In 'fix' mode, all available space is simply distributed corresponding
to the children's flex values. This can be convenient to e.g. split
a layout in two halves.
In 'box' mode, each widget gets at least its natural size (if available),
and any *additional* space is distributed corresponding to the children's
flex values. This is convenient for low-level layout of widgets, e.g. to
align one or more buttons. It is common to use flex values of zero to
give widgets just the size that they needs and use an empty widget with a
flex of 1 to fill up any remaining space. This mode is based on CSS flexbox.
In 'split' mode, all available space is initially distributed corresponding
to the children's flex values. The splitters between the child widgets
can be dragged by the user and positioned via an action. This is useful
to give the user more control over the (high-level) layout.
In all modes, the layout is constrained by the minimum and maximum size
of the child widgets (as set via style/CSS). Note that flexbox (and thus
box mode) may not honour min/max sizes of widgets in child layouts.
Note that widgets with a flex value of zero may collapse if used inside
a fix/split layout, or in a box layout but lacking a natural size. This
can be resolved by assigning a minimum width/height to the widget. The
exception is if all child widgets have a flex value of zero, in which
case the available space is divided equally.
The ``node`` of this widget is a
`<div> <https://developer.mozilla.org/docs/Web/HTML/Element/div>`_. The
outer nodes of the child widgets are layed-out using JavaScript of CSS,
depending on the mode.
Also see the convenience classes: HFix, VFix, HBox, VBox, HSplit, VSplit.
"""
_DEFAULT_ORIENTATION = 'h'
_DEFAULT_MODE = 'box'
CSS = """
/* === for box layout === */
.flx-HVLayout > .flx-Widget {
margin: 0; /* the layout handles the margin */
}
.flx-box {
display: -webkit-flex;
display: -ms-flexbox; /* IE 10 */
display: -ms-flex; /* IE 11 */
display: -moz-flex;
display: flex;
/* How space is divided when all flex-factors are 0:
start, end, center, space-between, space-around */
-webkit-justify-content: space-around;
-ms-justify-content: space-around;
-moz-justify-content: space-around;
justify-content: space-around;
/* How items are aligned in the other direction:
center, stretch, baseline */
-webkit-align-items: stretch;
-ms-align-items: stretch;
-moz-align-items: stretch;
align-items: stretch;
}
.flx-box.flx-horizontal {
-webkit-flex-flow: row;
-ms-flex-flow: row;
-moz-flex-flow: row;
flex-flow: row;
width: 100%;
}
.flx-box.flx-vertical {
-webkit-flex-flow: column;
-ms-flex-flow: column;
-moz-flex-flow: column;
flex-flow: column;
height: 100%; width: 100%;
}
.flx-box.flx-horizontal.flx-reversed {
-webkit-flex-flow: row-reverse;
-ms-flex-flow: row-reverse;
-moz-flex-flow: row-reverse;
flex-flow: row-reverse;
}
.flx-box.flx-vertical.flx-reversed {
-webkit-flex-flow: column-reverse;
-ms-flex-flow: column-reverse;
-moz-flex-flow: column-reverse;
flex-flow: column-reverse;
}
/* Make child widgets (and layouts) size correctly */
.flx-box.flx-horizontal > .flx-Widget {
height: auto;
width: auto;
}
.flx-box.flx-vertical > .flx-Widget {
width: auto;
height: auto;
}
/* If a boxLayout is in a compound widget, we need to make that widget
a flex container (done with JS in Widget class), and scale here */
.flx-Widget > .flx-box {
flex-grow: 1;
flex-shrink: 1;
}
/* === For split and fix layout === */
.flx-split > .flx-Widget {
/* Let child widgets position well, and help them become a layout
* boundary. We cannot do "display: block;", as that would break stuff.
/* overflow is set in Widget.CSS, setting here breaks scrollable widgets
*/
position: absolute;
}
.flx-split.flx-dragging { /* Fix for odd drag behavior on Chrome */
-webkit-user-select: none;
-moz-user-select: none;
-ms-user-select: none;
user-select: none;
}
.flx-split.flx-dragging iframe { /* disable iframe during drag */
pointer-events: none;
}
.flx-split.flx-horizontal > .flx-split-sep,
.flx-split.flx-horizontal.flx-dragging {
cursor: ew-resize;
}
.flx-split.flx-vertical > .flx-split-sep,
.flx-split.flx-vertical.flx-dragging {
cursor: ns-resize;
}
.flx-split-sep {
z-index: 2;
position: absolute;
-webkit-user-select: none;
-moz-user-select: none;
-ms-user-select: none;
user-select: none;
box-sizing: border-box;
background: rgba(0, 0, 0, 0); /* transparent */
/* background: #fff; /* hide underlying widgets */
}
"""
mode = event.EnumProp(('box', 'fix', 'split'), settable=True, doc="""
The mode in which this layout operates:
* 'BOX': (default) each widget gets at least its natural size, and
additional space is distributed corresponding to the flex values.
* 'FIX': all available space is distributed corresponding to the flex values.
* 'SPLIT': available space is initially distributed correspondong to the
flex values, and can be modified by the user by dragging the splitters.
""")
orientation = OrientationProp(settable=True, doc="""
The orientation of the child | |
# 11. Conference Win Pct.
df_tenure['Win_Pct_Conf'] = df_tenure.apply(
lambda row: row['W_Conf'] / row['G_Conf'] if row['G_Conf'] != 0 else 0, axis = 1)
# if (len(df_tenure) == 1) and (int(df_tenure['G_Conf']) == 0):
# df_tenure['Win_Pct_Conf'] = 0
# else:
# df_tenure['Win_Pct_Conf'] = df_tenure.apply(lambda row: row['W_Conf'] / row['G_Conf']
# if row['G_Conf'] != 0 else 0, axis = 1)
# 12. Create top 25 opponent win/loss flag
list_top25_results = []
for index, row in df_tenure.iterrows():
if (row['Result'] == 'W') and (~np.isnan(row['Rank_Opp'])):
list_top25_results.append('W')
elif (row['Result'] == 'L') and (~np.isnan(row['Rank_Opp'])):
list_top25_results.append('L')
elif (row['Result'] == 'T') and (~np.isnan(row['Rank_Opp'])):
list_top25_results.append('T')
else:
list_top25_results.append('')
df_tenure['Result_Top25_Opp'] = list_top25_results
# 13. Wins vs. AP Top-25
df_tenure['W_vs_Rank'] = df_tenure.Result_Top25_Opp.eq('W').cumsum()
# 14. Losses vs. AP Top-25
df_tenure['L_vs_Rank'] = df_tenure.Result_Top25_Opp.eq('L').cumsum()
# 15. Ties vs AP Top-25
df_tenure['T_vs_Rank'] = df_tenure.Result_Top25_Opp.eq('T').cumsum()
# 16. Win Pct. vs AP Top-25
df_tenure['Win_Pct_vs_Rank'] = df_tenure.apply(
lambda row: row['W_vs_Rank'] / (row['W_vs_Rank'] + row['L_vs_Rank'] + row['T_vs_Rank'])
if (row['W_vs_Rank'] + row['L_vs_Rank'] + row['T_vs_Rank']) != 0 else 0, axis = 1)
# 17. Total bowl games
df_tenure['Bowl_G'] = df_tenure.Notes.str.contains('Bowl').eq(True).cumsum()
# 18. Create bowl win/loss flag
list_bowl_results = []
for index, row in df_tenure.iterrows():
if (row['Result'] == 'W') and ('Bowl' in str(row['Notes'])):
list_bowl_results.append('W')
elif (row['Result'] == 'L') and ('Bowl' in str(row['Notes'])):
list_bowl_results.append('L')
elif (row['Result'] == 'T') and ('Bowl' in str(row['Notes'])):
list_bowl_results.append('T')
else:
list_bowl_results.append('')
df_tenure['Result_Bowl'] = list_bowl_results
# 19. Bowl Wins
df_tenure['Bowl_W'] = df_tenure.Result_Bowl.eq('W').cumsum()
# 20. Bowl Losses
df_tenure['Bowl_L'] = df_tenure.Result_Bowl.eq('L').cumsum()
# 21. Bowl Ties
df_tenure['Bowl_T'] = df_tenure.Result_Bowl.eq('T').cumsum()
# 22. Bowl Win Pct.
df_tenure['Win_Pct_Bowl'] = df_tenure.apply(
lambda row: row['Bowl_W'] / (row['Bowl_W'] + row['Bowl_L'] + row['Bowl_T'])
if (row['Bowl_W'] + row['Bowl_L'] + row['Bowl_T']) != 0 else 0, axis = 1)
# 23. Calculate # of seasons with pre-post season AP Top 25 rankings
list_AP_Pre_counts = []
list_AP_Post_25_counts = []
list_AP_Post_10_counts = []
list_AP_Post_5_counts = []
list_game_counts = []
for season, grp in df_tenure.groupby('Season'):
list_AP_Pre_counts = list_AP_Pre_counts + [1 if ~np.isnan(grp.AP_Pre.iloc[0]) else 0]
list_AP_Post_25_counts = list_AP_Post_25_counts + [1 if grp.AP_Post.iloc[0] <= 25 else 0]
list_AP_Post_10_counts = list_AP_Post_10_counts + [1 if grp.AP_Post.iloc[0] <= 10 else 0]
list_AP_Post_5_counts = list_AP_Post_5_counts + [1 if grp.AP_Post.iloc[0] <= 5 else 0]
list_game_counts = list_game_counts + [len(grp)]
series_AP_Pre_counts = pd.Series(list_AP_Pre_counts).cumsum()
series_AP_Post_25_counts = pd.Series(list_AP_Post_25_counts).cumsum()
series_AP_Post_10_counts = pd.Series(list_AP_Post_10_counts).cumsum()
series_AP_Post_5_counts = pd.Series(list_AP_Post_5_counts).cumsum()
# 24. Total Years in AP Top-25 (Preaseason)
df_tenure['AP_Pre_count'] = sum([[x]*y for x,y in zip(series_AP_Pre_counts, list_game_counts)], [])
# 25. Total Years in AP Top-25 (Postseason)
df_tenure['AP_Post_25_count'] = sum([[x]*y for x,y in zip(series_AP_Post_25_counts, list_game_counts)], [])
# 26. Total Years in AP Top-10 (Postseason)
df_tenure['AP_Post_10_count'] = sum([[x]*y for x,y in zip(series_AP_Post_10_counts, list_game_counts)], [])
# 27. Total Years in AP Top-5 (Postseason)
df_tenure['AP_Post_5_count'] = sum([[x]*y for x,y in zip(series_AP_Post_5_counts, list_game_counts)], [])
# 28. Total Weeks in AP Top-25
df_tenure['Weeks_Ranked'] = list(pd.Series([1 if ~np.isnan(x) else 0 for x in df_tenure.Rank]).cumsum())
# 29. Weeks Ranked in AP Top-25 Pct.
df_tenure['Weeks_Ranked_Pct.'] = df_tenure.apply(lambda row: row['Weeks_Ranked'] / row['G'], axis = 1)
# 30. Season Conference Wins
list_conf_wins = []
for season, grp in df_tenure.groupby(['Season']):
list_conf_wins = list_conf_wins + list(grp.Result_Conf.eq('W').cumsum())
df_tenure['W_Sn_Conf'] = list_conf_wins
# 31. Season Conference Losses
list_conf_losses = []
for season, grp in df_tenure.groupby(['Season']):
list_conf_losses = list_conf_losses + list(grp.Result_Conf.eq('L').cumsum())
df_tenure['L_Sn_Conf'] = list_conf_losses
# 31. Season Conference Ties
list_conf_ties = []
for season, grp in df_tenure.groupby(['Season']):
list_conf_ties = list_conf_ties + list(grp.Result_Conf.eq('T').cumsum())
df_tenure['T_Sn_Conf'] = list_conf_ties
# 32. Season Win Pct.
df_tenure['Win_Pct_Sn'] = df_tenure.apply(lambda row: row['W_Sn'] / row['Week'], axis = 1)
# 33. Season Conference Win Pct.
df_tenure['Win_Pct_Sn_Conf'] = df_tenure.apply(
lambda row: row['W_Sn_Conf'] / (row['W_Sn_Conf'] + row['L_Sn_Conf'] + row['T_Sn_Conf'])
if (row['W_Sn_Conf'] + row['L_Sn_Conf'] + row['T_Sn_Conf']) != 0 else 0, axis = 1)
# 34. Winning Seasons
list_final_win_pct = list(df_tenure.groupby('Season').tail(1).Win_Pct_Sn)
list_winning_seasons = [1 if x > .5 else 0 for x in list_final_win_pct]
list_win_sn_cnt = []
for idx in range(0,len(row_counts)):
list_win_sn_cnt = list_win_sn_cnt + ([list_winning_seasons[idx]] * row_counts[idx])
df_tenure['Winning_Sns'] = list_win_sn_cnt
# 35. Create a flag for win/loss vs Power 5 teams
list_p5_results = []
for index, row in df_tenure.iterrows():
if (row['Result'] == 'W') and (row['Power5_Opp'] == True):
list_p5_results.append('W')
elif (row['Result'] == 'L') and (row['Power5_Opp'] == True):
list_p5_results.append('L')
elif (row['Result'] == 'T') and (row['Power5_Opp'] == True):
list_p5_results.append('T')
else:
list_p5_results.append('')
df_tenure['Results_P5'] = list_p5_results
# 36. Games vs. Power 5 teams
df_tenure['G_P5'] = df_tenure.Results_P5.ne('').cumsum()
# 37. Wins vs. Power 5 teams
df_tenure['W_P5'] = df_tenure.Results_P5.eq('W').cumsum()
# 38. Losses vs. Power 5 teams
df_tenure['L_P5'] = df_tenure.Results_P5.eq('L').cumsum()
# 39. Ties vs. Power 5 teams
df_tenure['T_P5'] = df_tenure.Results_P5.eq('T').cumsum()
# 40. Win Pct. vs Power 5 teams
df_tenure['Win_Pct_P5'] = df_tenure.apply(
lambda row: row['W_P5'] / row['G_P5'] if row['G_P5'] != 0 else 0, axis = 1)
# 41. Create a flag for win/loss vs. teams with > .500 records
list_winning_results = []
for index, row in df_tenure.iterrows():
if (row['Result'] == 'W') and (row['Opp_Winning_Record'] == True):
list_winning_results.append('W')
elif (row['Result'] == 'L') and (row['Opp_Winning_Record'] == True):
list_winning_results.append('L')
elif (row['Result'] == 'T') and (row['Opp_Winning_Record'] == True):
list_winning_results.append('T')
else:
list_winning_results.append('')
df_tenure['Results_vs_Winning'] = list_winning_results
# 42. Games vs. teams with winning (> .500) records
df_tenure['G_vs_Winning'] = df_tenure.Results_vs_Winning.ne('').cumsum()
# 43. Wins vs. teams with winning (> .500) records
df_tenure['W_vs_Winning'] = df_tenure.Results_vs_Winning.eq('W').cumsum()
# 44. Losses vs. teams with winning (> .500) records
df_tenure['L_vs_Winning'] = df_tenure.Results_vs_Winning.eq('L').cumsum()
# 45. Ties vs. teams with winning (> .500) records
df_tenure['T_vs_Winning'] = df_tenure.Results_vs_Winning.eq('T').cumsum()
# 46. Win Pct. vs. teams with winning (> .500 ) records
df_tenure['Win_Pct_vs_Winning'] = df_tenure.apply(
lambda row: row['W_vs_Winning'] / row['G_vs_Winning'] if row['G_vs_Winning'] != 0 else 0, axis = 1)
# 47. Create a flag for win/loss vs. teams with > .500 records in conference
list_winning_results_conf = []
for index, row in df_tenure.iterrows():
if ((row['Result'] == 'W') and (
row['Opp_Conf_Winning_Record'] == True)) and (
row['Conf'] == row['Conf_Opp']):
list_winning_results_conf.append('W')
elif ((row['Result'] == 'L') and (
row['Opp_Conf_Winning_Record'] == True)) and (
row['Conf'] == row['Conf_Opp']):
list_winning_results_conf.append('L')
elif ((row['Result'] == 'T') and (
row['Opp_Conf_Winning_Record'] == True)) and (
row['Conf'] == row['Conf_Opp']):
list_winning_results_conf.append('T')
else:
list_winning_results_conf.append('')
df_tenure['Results_vs_Winning_Conf'] = list_winning_results_conf
# 48. Games vs. teams with winning (> .500) records in conference
df_tenure['G_vs_Winning_Conf'] = df_tenure.Results_vs_Winning_Conf.ne('').cumsum()
# 49. Wins vs. teams with winning (> .500) records in conference
df_tenure['W_vs_Winning_Conf'] = df_tenure.Results_vs_Winning_Conf.eq('W').cumsum()
# 50. Losses vs. teams with winning (> .500) records in conference
df_tenure['L_vs_Winning_Conf'] = df_tenure.Results_vs_Winning_Conf.eq('L').cumsum()
# 51. Ties vs. teams with winning (> .500) records in conference
df_tenure['T_vs_Winning_Conf'] = df_tenure.Results_vs_Winning_Conf.eq('T').cumsum()
# 52. Win Pct. vs. teams with winning (> .500) records in conference
df_tenure['Win_Pct_vs_Winning_Conf'] = df_tenure.apply(
lambda row: row['W_vs_Winning_Conf'] / row['G_vs_Winning_Conf'] if row['G_vs_Winning_Conf'] != 0 else 0, axis = 1)
# test = df_tenure[['Season', 'Week', 'Opponent', 'Win_Pct_Opp', 'Opp_Winning_Record', 'Results_vs_Winning', 'G_vs_Winning', 'W_vs_Winning', 'L_vs_Winning', 'Win_Pct_vs_Winning']]
# test = df_tenure[['Season', 'Week', 'Opponent', 'Win_Pct_Conf_Opp', 'Opp_Conf_Winning_Record',
# 'Results_vs_Winning_Conf', 'G_vs_Winning_Conf',
# 'W_vs_Winning_Conf', 'L_vs_Winning_Conf', 'Win_Pct_vs_Winning_Conf']]
# 53. Calculate the coach's winning pct at the same number of games as SF's current total
if len(df_tenure) >= games_sf:
df_tenure['Win_Pct_at_SF'] = [float(df_tenure[df_tenure['G'] == games_sf]['Win_Pct'])] * len(df_tenure)
else:
df_tenure['Win_Pct_at_SF'] = [np.nan] * len(df_tenure)
# 54. Reorder columns
df_tenure = df_tenure[['Season', 'Week', 'Date', 'Day', 'Rank', 'School',
'Coach', 'Conf', 'Power5', 'Home_Away', 'Rank_Opp',
'Opponent', 'Conf_Opp', 'Power5_Opp', 'Result', 'Pts', 'Pts_Opp',
'Sn', 'G', 'W', 'L', 'T', 'Win_Pct',
'G_Conf', 'W_Conf', 'L_Conf', 'T_Conf', 'Win_Pct_Conf',
'G_P5', 'W_P5', 'L_P5', 'T_P5', 'Win_Pct_P5',
'G_vs_Winning', 'W_vs_Winning', 'L_vs_Winning', 'T_vs_Winning', 'Win_Pct_vs_Winning',
'G_vs_Winning_Conf', 'W_vs_Winning_Conf', 'L_vs_Winning_Conf', 'T_vs_Winning_Conf', 'Win_Pct_vs_Winning_Conf',
'W_Sn', 'L_Sn', 'T_Sn', 'Win_Pct_Sn',
'W_Sn_Conf', 'L_Sn_Conf', 'T_Sn_Conf', 'Win_Pct_Sn_Conf',
'W_vs_Rank', 'L_vs_Rank', 'T_vs_Rank', 'Win_Pct_vs_Rank',
'Winning_Sns',
'Bowl_G', 'Bowl_W', 'Bowl_L', 'Bowl_T', 'Win_Pct_Bowl',
'AP_Pre', 'AP_High', 'AP_Post',
'AP_Pre_count', 'AP_Post_25_count', 'AP_Post_10_count', 'AP_Post_5_count',
'Weeks_Ranked', 'Weeks_Ranked_Pct.',
'Win_Pct_at_SF',
'Notes', 'url_boxscore']]
return df_tenure
#==============================================================================
# Working Code
#==============================================================================
# Set the project working directory
# path_dir = pathlib.Path(r'C:\Users\reideej1\Projects\a_Personal\cfbAnalysis')
path_dir = pathlib.Path(os.getcwd())
if 'cfbAnalysis' not in str(path_dir):
path_dir = path_dir.joinpath('cfbAnalysis')
os.chdir(path_dir)
#------------------------------------------------------------------------------
# Scrape and compile data for individual team games
#------------------------------------------------------------------------------
# Scrape updated results for most recent season
scrapeCfbResultsAllYears(2021)
df_all_games = pd.DataFrame()
for fname in tqdm.tqdm(list(path_dir.joinpath('data/raw/Team History').glob('records*.csv'))):
# load file
df = pd.read_csv(fname)
# drop rows without scores
df = df[df['Result'].notna()]
if '2020' in str(fname):
list_years = []
for | |
#!/usr/bin/env python
# To do:
# - Allow choice of gene label.
# - Make transparent background.
# - What if there are nan's in the probability file?
"""
Classes:
ClusterData
PlotLayout Holds layouts for each individual pieces of the plot.
HeatmapLayout
ColorbarLayout
DendrogramLayout
GeneDendrogramLayout
ArrayDendrogramLayout
GeneClusterLayout
ArrayClusterLayout
GeneLabelLayout
ArrayLabelLayout
Functions:
process_data_set
make_layout
calc_coords_for_layout
convert_to_pcl
filter_matrix
normalize_matrix
cluster_matrix
pretty_scale_matrix
find_data_files
read_data_set
write_data_set
plot
plot_matrix
plot_dendrogram
plot_gene_clusters
plot_array_clusters
plot_gene_labels
plot_array_labels
_cluster
_cleanup_cluster
_get_gene_ids Return unique IDs for the genes.
_get_array_ids Return unique IDs for the arrays.
_get_gene_labels Return pretty labels for the genes.
_get_array_labels Return pretty labels for the arrays.
_parse_gene_names
_parse_color
_calc_colorbar_size
_calc_colorbar_ticks
_get_color
_exists_nz
"""
import os, sys
## Detect if I'm being run from within GenePattern. If so, then add
## the current directory to the library path.
#if os.path.split(__file__)[0].endswith("gp_pybinreg"):
# sys.path.append(os.path.split(__file__)[0])
MIN_FONTSIZE = 6
MAX_MEGAPIXELS = 256 # No more than 256 megapixel plot.
class ClusterData:
def __init__(
self, gene_tree, array_tree, gene_tree_cluster, array_tree_cluster,
gene_cluster, array_cluster):
self.gene_tree = gene_tree
self.array_tree = array_tree
self.gene_tree_cluster = gene_tree_cluster
self.array_tree_cluster = array_tree_cluster
self.gene_cluster = gene_cluster
self.array_cluster = array_cluster
class PlotLayout:
def __init__(self, heatmap, colorbar, gene_dendrogram, array_dendrogram,
gene_cluster, array_cluster, gene_label, array_label):
self.heatmap = heatmap
self.colorbar = colorbar
self.gene_dendrogram = gene_dendrogram
self.array_dendrogram = array_dendrogram
self.gene_cluster = gene_cluster
self.array_cluster = array_cluster
self.gene_label = gene_label
self.array_label = array_label
class PlotCoords:
def __init__(self, hm_x, hm_y, cb_x, cb_y, gd_x, gd_y, ad_x, ad_y,
gc_x, gc_y, ac_x, ac_y, gl_x, gl_y, al_x, al_y):
self.hm_x, self.hm_y = hm_x, hm_y
self.cb_x, self.cb_y = cb_x, cb_y
self.gd_x, self.gd_y = gd_x, gd_y
self.ad_x, self.ad_y = ad_x, ad_y
self.gc_x, self.gc_y = gc_x, gc_y
self.ac_x, self.ac_y = ac_x, ac_y
self.gl_x, self.gl_y = gl_x, gl_y
self.al_x, self.al_y = al_x, al_y
class HeatmapLayout:
def __init__(
self, nrow, ncol, boxwidth, boxheight, scale_border, grid,
inverse_colors, black0, color_fn):
# Looks OK with even 1 pixel.
#MIN_GRID = 1
#if boxwidth < MIN_GRID or boxheight < MIN_GRID:
# grid = False
self.nrow = nrow
self.ncol = ncol
self.boxwidth = boxwidth
self.boxheight = boxheight
self.inverse_colors = inverse_colors
self.black0 = black0
self.color_fn = color_fn
self.BORDER = int(round(min(boxwidth, boxheight)*0.20) * scale_border)
self.GRID_SIZE = int(round(min(boxwidth, boxheight)*0.10))
if not grid:
self.GRID_SIZE = 0
assert self.GRID_SIZE <= self.BORDER
def width(self):
return self.size()[0]
def height(self):
return self.size()[1]
def size(self):
height = self.BORDER*2
width = self.BORDER*2
height += self.boxheight * self.nrow
width += self.boxwidth * self.ncol
height += (self.nrow-1) * self.GRID_SIZE
width += (self.ncol-1) * self.GRID_SIZE
return width, height
def coord(self, row, col):
x = self.BORDER
y = self.BORDER
x += col * (self.boxwidth + self.GRID_SIZE)
y += row * (self.boxheight + self.GRID_SIZE)
return x, y, self.boxwidth, self.boxheight
def color(self, x):
# x is from [0, 1]. find the nearest color.
import math
if x is None or math.isnan(x):
# Missing value. Return a white box.
#return _get_color(0.5, self.color_fn)
return (255, 255, 255)
assert x >= 0 and x <= 1, "x out of range: %g" % x
return _get_color(
x, self.color_fn, flip_colors=self.inverse_colors,
black0=self.black0)
class ColorbarLayout:
def __init__(
self, cb_width, cb_height, signal_0, signal_1,
ticks, tick_labels, label_sizes, fontsize, inverse_colors, color_fn):
TICK_SIZE = 0.15 # relative to BAR_SHORT
TICK_BUFFER = 0.15 # relative to BAR_SHORT
assert len(ticks) == len(tick_labels)
assert len(ticks) == len(label_sizes)
self.TICK_SIZE = TICK_SIZE
self.TICK_BUFFER = TICK_BUFFER
self._cb_width = cb_width # Width of just the bar.
self._cb_height = cb_height
self._signal_0 = signal_0
self._signal_1 = signal_1
self._ticks = ticks
self._tick_labels = tick_labels
self._label_sizes = label_sizes
self._fontsize = fontsize
self._inverse_colors = inverse_colors
#self._black0 = black0
self._color_fn = color_fn
def is_vertical(self):
return self._cb_height > self._cb_width
def width(self):
width = self._cb_width
if self.is_vertical():
# Vertical skinny colorbar.
# Tick mark.
width += self._cb_width * self.TICK_SIZE
# BUFFER between tick mark and label.
width += self._cb_width * self.TICK_BUFFER
# Text.
text_width = max([x[1] for x in self._label_sizes])
# PIL doesn't calculate text widths very accurately.
# Compensate with a fudge factor. 2 is not big enough.
text_width *= 2.5
width += text_width
width = int(width)
return width
def height(self):
height = self._cb_height
# Bug: For vertical colorbar, does not take into account
# height of labels. Might be cut off.
if not self.is_vertical():
# Horizontal colorbar.
# Tick mark.
height += self._cb_height * self.TICK_SIZE
# BUFFER between tick mark and label.
height += self._cb_height * self.TICK_BUFFER
# Text.
text_height = max([x[0] for x in self._label_sizes])
height += text_height
height = int(height)
return height
def size(self):
# Size taken by the entire color bar, including labels.
return self.width(), self.height()
def bar_width(self):
# Size of just the bar.
return self._cb_width
def bar_height(self):
return self._cb_height
def num_ticks(self):
return len(self._tick_labels)
def tick_coord(self, i):
assert i >= 0 and i < len(self._ticks)
tick = self._ticks[i]
perc = float(tick-self._signal_0)/(self._signal_1-self._signal_0)
if self.is_vertical():
width = self._cb_width * self.TICK_SIZE
height = 1
x = self._cb_width
#y = perc * self._cb_height # high numbers on bottom
y = (1.0-perc) * self._cb_height # high numbers on top
y = min(y, self._cb_height-height)
else:
width = 1
height = self._cb_height * self.TICK_SIZE
x = perc * self._cb_width
y = self._cb_height
x = min(x, self._cb_width-width)
x, y, width, height = int(x), int(y), int(width), int(height)
return x, y, width, height
def tick_label(self, i):
assert i >= 0 and i < len(self._tick_labels)
return self._tick_labels[i]
def label_coord(self, i):
x = self.tick_coord(i)
tick_x, tick_y, tick_width, tick_height = x
label_width, label_height = self._label_sizes[i]
if self.is_vertical():
x = tick_x + tick_width + self._cb_width*self.TICK_BUFFER
y = tick_y - label_height/2.0
else:
x = tick_x - label_width/2.0
y = tick_y + tick_height + self._cb_height*self.TICK_BUFFER
x, y = int(x), int(y)
return x, y
def label_size(self, i):
return self._label_sizes[i]
def fontsize(self):
return self._fontsize
def color(self, x):
# x is from [0, 1]. find the nearest color.
assert x >= 0 and x <= 1, "x out of range: %g" % x
return _get_color(x, self._color_fn, flip_colors=self._inverse_colors)
class DendrogramLayout:
def __init__(
self, num_items, num_other_items,
pixels_per_item, pixels_per_other_item,
size_scale, thickness_scale, tree, tree_cluster, color_fn):
# This dendrogram is measured in 2 dimensions: the dimension
# that spans across the branches, and the dimension that spans
# across the phylogenetic distance. color_fn is for the
# clusters.
import math
self.num_items = num_items
self.pixels_per_item = pixels_per_item
self.tree = tree
self.tree_cluster = tree_cluster
self.color_fn = color_fn
self.max_cluster = None
if self.tree_cluster:
self.max_cluster = max(self.tree_cluster.values())
self._item_size = num_items * pixels_per_item
# Should be the same height as the heatmap. The width should
# be 0.625x the height (perfect ratio).
RATIO = 1.0 / 1.6 / 2.0
# Both dendrograms should have symmetric sizes. So base the
# RATIO on the smaller of the two dimensions.
x1 = num_items * pixels_per_item
x2 = num_other_items * pixels_per_other_item
x = min(x1, x2)
#x = max(x1, x2)
self._dist_size = int(math.ceil(x * RATIO * size_scale))
# Convert the distances from clustering to percentages. The
# percentages indicate how far across the plot to place the
# node. 0% is the furthest distance, while 100% is the
# closest.
# These are actually similarity metrics, so put 1.0 at 100%,
# and the lowest value given in the tree (can be negative) at
# 0%.
lowest = highest = None
for node in tree:
left, right, distance = node
if lowest is None or distance < lowest:
lowest = distance
if highest is None or distance > highest:
highest = distance
assert highest <= 1.0
# Set the closest to always be 1.0.
highest = 1.0
# Set a small border at the end for the root.
self.ROOT_SIZE = 0.15 * (highest - lowest)
lowest -= self.ROOT_SIZE
self.lowest, self.highest = lowest, highest
min_ppi = min(pixels_per_item, pixels_per_other_item)
#min_ppi = pixels_per_item
x = int(math.ceil(min_ppi*0.20 * thickness_scale))
#x = min(max(x, 1), min_ppi)
x = min(max(x, 1), pixels_per_item)
self.LINEWIDTH = x
#print min_ppi, thickness_scale, min_ppi, self.LINEWIDTH
def vthicken(self, x, y, width, height):
import math
np = self.LINEWIDTH - width
if np <= 0:
return x, y, width, height
hnp = int(math.floor(np/2.0))
return x-hnp, y, width+np, height
def hthicken(self, x, y, width, height):
import math
np = self.LINEWIDTH - height
if np <= 0:
return x, y, width, height
hnp = int(math.floor(np/2.0))
return x, y-hnp, width, height+np
def item_size(self):
return self._item_size
def dist_size(self):
return self._dist_size
def color(self, id):
c = 0, 0, 0
n = None
if self.tree_cluster:
n = self.tree_cluster[id]
if n is not None and self.max_cluster:
# If requested, should I use the inverse of the color | |
* gradient(theta,
scaling_params['range_min'],
scaling_params['range_max'])
)
# Step error function
elif config['angular_scaling']['err_func_type'] == "step":
scaling_params = config['angular_scaling']['step']
break_points = list(zip(scaling_params['transition_pos'],
scaling_params['transition_widths']))
self._aeff['Area_new'] = self._aeff['Area_new'] * (
1 + scaling_params['scale'] * step(theta, break_points)
)
else:
raise ValueError("Aeff angular scaling: unknown scaling function type '{:s}'"
.format(config['angular_scaling']['err_func_type']))
# ------------------------------------------
# Recording the scaled Aeff
input_irf_file['Effective area'].data['EffArea'][0] = self._aeff['Area_new'].transpose()
def _scale_edisp(self, input_irf_file, config):
"""
This internal method scales the IRF energy dispersion through the Migration Matrix.
Two scalings can be applied: (1) vs energy and (2) vs off-axis angle. In both cases
the scaling function is taken as (1 + scale * tanh((x-x0)/dx)). In case (1) the scaling
is performed in log-energy.
Parameters
----------
input_irf_file: pyfits.HDUList
Open pyfits IRF file, which contains the Migration Matrix that should be scaled.
config: dict
A dictionary with the scaling settings. Must have following keys defined:
"energy_scaling": dict
Contains setting for the energy scaling (see the structure below).
"angular_scaling": dict
Contains setting for the off-center angle scaling (see the structure below).
In both cases, internally the above dictionaries should contain:
"err_func_type": str
The name of the scaling function to use. Accepted values are: "constant",
"gradient" and "step".
If err_func_type == "constant":
scale: float
The scale factor. passing 1.0 results in no scaling.
If err_func_type == "gradient":
scale: float
The scale factor. passing 0.0 results in no scaling.
range_min: float
The x value (energy or off-center angle), that corresponds to -1 scale.
range_max: float
The x value (energy or off-center angle), that corresponds to +1 scale.
If err_func_type == "step":
scale: float
The scale factor. passing 0.0 results in no scaling.
transition_pos: list
The list of x values (energy or off-center angle), at which
step-like transitions occur. If scaling the energy dependence,
values must be in TeVs, if angular - in degrees.
transition_widths: list
The list of step-like transition widths, that correspond to transition_pos.
For energy scaling the widths must be in log10 scale.
Returns
-------
None
"""
# Reading the MATRIX parameters
self._edisp = dict()
self._edisp['Elow'] = input_irf_file['ENERGY DISPERSION'].data['ETRUE_LO'][0].copy()
self._edisp['Ehigh'] = input_irf_file['ENERGY DISPERSION'].data['ETRUE_HI'][0].copy()
self._edisp['ThetaLow'] = input_irf_file['ENERGY DISPERSION'].data['THETA_LO'][0].copy()
self._edisp['ThetaHi'] = input_irf_file['ENERGY DISPERSION'].data['THETA_HI'][0].copy()
self._edisp['Mlow'] = input_irf_file['ENERGY DISPERSION'].data['MIGRA_LO'][0].copy()
self._edisp['Mhigh'] = input_irf_file['ENERGY DISPERSION'].data['MIGRA_HI'][0].copy()
self._edisp['Matrix_'] = input_irf_file['ENERGY DISPERSION'].data['MATRIX'][0].transpose().copy()
self._edisp['E'] = scipy.sqrt(self._edisp['Elow'] * self._edisp['Ehigh'])
self._edisp['M'] = (self._edisp['Mlow'] + self._edisp['Mhigh']) / 2.0
self._edisp['T'] = (self._edisp['ThetaLow'] + self._edisp['ThetaHi']) / 2.0
# Creating the energy-migration matix-theta mesh grid
energy, migration, theta = scipy.meshgrid(self._edisp['E'], self._edisp['M'], self._edisp['T'], indexing='ij')
# -------------------------------------------
# Scaling the Matrix energy dependence
# Constant error function
if config['energy_scaling']['err_func_type'] == "constant":
self._edisp['Matrix_new'] = self._edisp['Matrix_'] * config['energy_scaling']['constant']['scale']
# Gradients error function
elif config['energy_scaling']['err_func_type'] == "gradient":
scaling_params = config['energy_scaling']['gradient']
self._edisp['Matrix_new'] = self._edisp['Matrix_'] * (
1. + scaling_params['scale'] * gradient(scipy.log10(energy),
scipy.log10(scaling_params['range_min']),
scipy.log10(scaling_params['range_max']))
)
# Step error function
elif config['energy_scaling']['err_func_type'] == "step":
scaling_params = config['energy_scaling']['step']
break_points = list(zip(scipy.log10(scaling_params['transition_pos']),
scaling_params['transition_widths']))
self._edisp['Matrix_new'] = self._edisp['Matrix_'] * (
1 + scaling_params['scale'] * step(scipy.log10(energy), break_points)
)
else:
raise ValueError("Edisp energy scaling: unknown scaling function type '{:s}'"
.format(config['energy_scaling']['err_func_type'])
)
# -------------------------------------------
# Scaling the Matrix off-axis angle dependence
# Constant error function
if config['angular_scaling']['err_func_type'] == "constant":
self._edisp['Matrix_new'] = self._edisp['Matrix_new'] * config['angular_scaling']['constant']['scale']
# Gradients error function
elif config['angular_scaling']['err_func_type'] == "gradient":
scaling_params = config['angular_scaling']['gradient']
self._edisp['Matrix_new'] = self._edisp['Matrix_new'] * (
1. + scaling_params['scale'] * gradient(scipy.log10(theta),
scipy.log10(scaling_params['range_min']),
scipy.log10(scaling_params['range_max']))
)
# Step error function
elif config['angular_scaling']['err_func_type'] == "step":
scaling_params = config['angular_scaling']['step']
break_points = list(zip(scipy.log10(scaling_params['transition_pos']),
scaling_params['transition_widths']))
self._edisp['Matrix_new'] = self._edisp['Matrix_new'] * (
1 + scaling_params['scale'] * step(scipy.log10(theta), break_points)
)
else:
raise ValueError("Edisp angular scaling: unknown scaling function type '{:s}'"
.format(config['energy_scaling']['err_func_type'])
)
# ------------------------------------------
# Recording the scaled Matrix
input_irf_file['ENERGY DISPERSION'].data['MATRIX'][0] = self._edisp['Matrix_new'].transpose()
# ------------------------------------------
def _append_irf_to_db(self, output_irf_name, output_irf_file_name):
"""
This internal method appends the new IRF data to the existing calibration data base.
Parameters
----------
output_irf_name: str
The name of the IRF to append, e.g. "Aeff_modified". Current IRF name will be added as a prefix.
output_irf_file_name: str
Name of the file, which stores the new IRF, e.g. "irf_North_z20_50h_modified.fits"
Returns
-------
None
"""
db_file_path = '{path:s}/data/cta/{caldb:s}/caldb.indx'.format(path=self.caldb_path, caldb=self.caldb_name)
# Making a backup
shutil.copy(db_file_path, db_file_path + '.bak')
# Opening the database file
db_file = pyfits.open(db_file_path)
# Creating a new IRF table which will contain 4 more entries - new PSF/Aeff/Edisp/bkg.
nrows_orig = len(db_file['CIF'].data)
nrows_new = nrows_orig + 4
hdu = pyfits.BinTableHDU.from_columns(db_file['CIF'].columns, nrows=nrows_new)
# Aeff entry data
aeff_vals = ['CTA', self.caldb_name, 'NONE', 'NONE', 'ONLINE',
'data/cta/{db:s}/bcf/{irf:s}'.format(db=self.caldb_name, irf=self.irf),
output_irf_file_name,
'BCF', 'DATA', 'EFF_AREA', 'NAME({:s})'.format(output_irf_name), 1,
'2014-01-30', '00:00:00', 51544.0, 0, '14/01/30', 'CTA effective area']
# PSF entry data
psf_vals = ['CTA', self.caldb_name, 'NONE', 'NONE', 'ONLINE',
'data/cta/{db:s}/bcf/{irf:s}'.format(db=self.caldb_name, irf=self.irf),
output_irf_file_name,
'BCF', 'DATA', 'RPSF', 'NAME({:s})'.format(output_irf_name), 1,
'2014-01-30', '00:00:00', 51544.0, 0, '14/01/30', 'CTA point spread function']
# Edisp entry data
edisp_vals = ['CTA', self.caldb_name, 'NONE', 'NONE', 'ONLINE',
'data/cta/{db:s}/bcf/{irf:s}'.format(db=self.caldb_name, irf=self.irf),
output_irf_file_name,
'BCF', 'DATA', 'EDISP', 'NAME({:s})'.format(output_irf_name), 1,
'2014-01-30', '00:00:00', 51544.0, 0, '14/01/30', 'CTA energy dispersion']
# Background entry data
bkg_vals = ['CTA', self.caldb_name, 'NONE', 'NONE', 'ONLINE',
'data/cta/{db:s}/bcf/{irf:s}'.format(db=self.caldb_name, irf=self.irf),
output_irf_file_name,
'BCF', 'DATA', 'BKG', 'NAME({:s})'.format(output_irf_name), 1,
'2014-01-30', '00:00:00', 51544.0, 0, '14/01/30', 'CTA background']
# Filling the columns of the new table
for col_i, colname in enumerate(hdu.columns.names):
# First fill the previously existing data
hdu.data[colname][:nrows_orig] = db_file['CIF'].data[colname]
# Now fill the newly created entries
hdu.data[colname][nrows_orig + 0] = aeff_vals[col_i]
hdu.data[colname][nrows_orig + 1] = psf_vals[col_i]
hdu.data[colname][nrows_orig + 2] = edisp_vals[col_i]
hdu.data[colname][nrows_orig + 3] = bkg_vals[col_i]
# Replacing the old IRF table
db_file['CIF'].data = hdu.data
# Saving the data base
db_file.writeto(db_file_path, overwrite=True)
db_file.close()
def scale_irf(self, config):
"""
This method performs scaling of the loaded IRF - both PSF and Aeff, if necessary.
For the collection area two scalings can be applied: (1) vs energy and
(2) vs off-axis angle. In both cases the scaling function is taken as
(1 + scale * tanh((x-x0)/dx)). In case (1) the scaling value x is log10(energy).
Parameters
----------
config: dict
A dictionary with the scaling settings. Must have following keys defined:
"general", "aeff", "psf".
Key "general" must be a dictionary, containing the following:
caldb: str
CALDB name, e.g. '1dc' or 'prod3b'.
irf: str
IRF name, e.g. 'South_z20_50h'
output_irf_name: str
The name of output IRF, say "my_irf".
output_irf_file_name: str:
The name of the output IRF file, e.g. 'irf_scaled_version.fits' (the name
must follow the "irf_*.fits" template, "irf_scaled_version.fits"). The file
will be put to the main directory of the chosen IRF.
Keys "aeff" and "psf" must be dictionaries, containing the following:
"energy_scaling": dict
Contains setting for the energy scaling (see the structure below).
"angular_scaling": dict
Contains setting for the off-center angle scaling (see the structure below).
In both cases, internally the above dictionaries should contain:
"err_func_type": str
The name of the scaling function to use. Accepted values are: "constant",
"gradient" and "step".
If err_func_type == "constant":
scale: float
The scale factor. passing 1.0 results in no scaling.
If err_func_type == "gradient":
scale: float
The scale factor. passing 0.0 results in no scaling.
range_min: float
The x value (energy or off-center angle), that corresponds to -1 scale.
range_max: float
The x value (energy or off-center angle), that corresponds to +1 scale.
If err_func_type == "step":
scale: float
The scale factor. passing 0.0 results in no scaling.
transition_pos: list
The list of x values (energy or off-center angle), at which
step-like transitions occur. If scaling the energy dependence,
values must be in TeVs, if angular - in degrees.
transition_widths: list
The list of step-like transition widths, that correspond to transition_pos.
For energy scaling the widths must be in log10 scale.
Returns
-------
None
"""
if self.am_ok:
# Opening the IRF input file
input_irf_file = pyfits.open(self.input_irf_file_name, 'readonly')
# Scaling the PSF
self._scale_psf(input_irf_file, config['psf'])
# Scaling the Aeff
self._scale_aeff(input_irf_file, config['aeff'])
# Scaling the Edisp
self._scale_edisp(input_irf_file, config['edisp'])
# Getting the new IRF and output file names
# IRF name
output_irf_name = config['general']['output_irf_name']
# Output file name
output_irf_file_name = config['general']['output_irf_file_name']
# Figuring out the output path
output_path = '{path:s}/data/cta/{caldb:s}/bcf/{irf:s}'.format(path=self.caldb_path,
caldb=self.caldb_name,
irf=self.irf)
# Writing the scaled IRF
input_irf_file.writeto(output_path + "/" + output_irf_file_name, overwrite=True)
# Updating the calibration data base with the new IRF
self._append_irf_to_db(output_irf_name, output_irf_file_name)
else:
print("ERROR: something's wrong with the CALDB/IRF names. So can | |
create the storages versions.
"""
print("Realizing setup...")
# read, load, adjust configs from the file
self._load_config() # check and load config file
self._setup_logger()
self.logger.info("Continuing setup...")
self._load_app_key()
self.logger.debug("Creating local storage...")
self.local_storage = localstorage.LocalStorage(self)
self.logger.debug("Local storage created. Creating remote storage...")
self.remote_storage = remote_storage_type(self)
self.logger.debug(
"Remote storage created. Realizing the remote storage setup..."
)
self.remote_storage.setup()
self.logger.debug("Remote storage setup done.")
self._load_salted_key() # load the salted password, the user key
self.logger.info("Setup done.")
def _pull_group_set(self, signatures_set, files_dict):
"""
Acquire once each file by the signature and copy into FileNode paths given by the list.
:param signatures_set set(str): set of keys to select which files to acquire.
:param files_dict dict(key=signature, value=list(FileNode,...)): dictionary with signature as key and a list of FileNode as values.
"""
temp_file_path = self.local_storage.get_new_tempfile()
temp_file_path2 = self.local_storage.get_new_tempfile()
counter1 = 0
counter2 = 0
try:
for signature in signatures_set:
counter1 += 1
# Acquire it once, into the temp file
if self.remote_storage.get_file(signature, temp_file_path):
# Unpack it on the temp file 2
if self.local_storage.unpack(temp_file_path, temp_file_path2):
# If we succesfully unpacked, we check the signature before proceeding.
if (
self.local_storage.get_signature(temp_file_path2)
== signature
):
# Copy the correct file on all places its copy is needed.
for file_node in files_dict[signature]:
counter2 += 1
if self.local_storage.ensure_folder_exists(
file_node.parent
):
self.local_storage.copy(
temp_file_path2,
self.local_storage.abs_path(file_node),
)
else:
self.logger.error(
"Could not ensure that parent folder exists, for this file: {}".format(
str(file_node.get_path())
)
)
else:
self.logger.error(
"File unpacked resulted in a different signature from original, affecting those files:\n{}".format(
"\n".join(
[
str(file_node.get_path())
for file_node in files_dict[signature]
]
)
)
)
else:
self.logger.error(
"Could not unpack file by signature: {}".format(signature)
)
else:
self.logger.error(
"Could not acquire file signature: {}".format(signature)
)
finally:
self.local_storage.dispose_tempfile(temp_file_path)
self.local_storage.dispose_tempfile(temp_file_path2)
self.logger.debug(
"(total files added on local:{} / files acquired from remote: {} / files copied from downloaded: {})".format(
counter2, counter1, counter2 - counter1
)
)
def pull(self, authorized=False, local_manifest=None, remote_manifest=None):
"""
Pull files from RemoteStorage into LocalStorage, warns you before continuing if the result
operation will delete or overwrite any file or folder.
This operation follow the file system structure described on the pickled remote Manifest.
Files are decrypted, decompressed and signature checked before copied into LocalStorage.
:param authorized bool: If False, when a file deletion is required a confirmation will be asked from the user.
Otherwise it will just proceed without confirmation.
:param local_manifest manifest.Manifest: cached version the local manifest, used to avoid reload/rebuilding, passed by sync
:param remote_manifest manifest.Manifest: cached version the remote manifest, used to avoid downloading, passed by sync
"""
self.logger.info("Pulling...")
if self.remote_storage.has_files():
if self.remote_storage.has_manifest():
if remote_manifest is None:
remote_manifest = self.remote_storage.get_manifest()
# check if all files in remote manifest are present on remote storage
if not self.remote_storage.check_manifest_consistency(remote_manifest):
self.logger.error(
"Not all files in the manifest are present, can't proceed."
)
self._exit(1)
if self.local_storage.has_files():
# local_storage has files, we need to clean up before acquiring more files
if not authorized:
if self._confirm_proceeding(
"Proceeding with the 'pull' may cause some files/folders in {} to be REPLACE or ERASED!".format(
self.config.local_folder_path
)
):
authorized = True
else:
self.logger.error(
"Could not proceed with clean up, operation not authorized by the user."
)
self._exit(1)
# do a local filesystem scan
if local_manifest is None:
local_manifest = self.local_storage.build_manifest()
# group files by signatures
local_group_dict = local_manifest.get_signatures_dict()
remote_group_dict = remote_manifest.get_signatures_dict()
counter1 = 0
counter2 = 0
counter3 = 0
# keys = signatures that only exist on local (excluding the ones shared with remote)
# delete files that exist only on local
for signature in local_group_dict.keys() - remote_group_dict.keys():
for file_node in local_group_dict[signature]:
self.local_storage.delete_node(file_node)
counter1 += 1
del local_group_dict[signature]
# move or copy local files: on files that exist on remote and local
for signature in local_group_dict.keys() & remote_group_dict.keys():
# all files in this loop have the same signature
# here files that already exist on local
local_files_dict = {
file_node.get_path_str(): file_node
for file_node in local_group_dict[signature]
}
# here files that need to exist, following what the remote manifest says
remote_files_dict = {
file_node.get_path_str(): file_node
for file_node in remote_group_dict[signature]
}
# keys = we use the path as a common hashable key between local and remote
# iterate over files that need to be created
for remote_key in (
remote_files_dict.keys() - local_files_dict.keys()
):
# check if we have a local file that is in the wrong place and move to the path needed in the remote_files_dict[remote_key]
for local_key in (
local_files_dict.keys() - remote_files_dict.keys()
):
self.local_storage.ensure_folder_exists(
remote_files_dict[remote_key]
)
self.local_storage.move_node(
local_files_dict[local_key],
remote_files_dict[remote_key],
)
local_files_dict[remote_key] = local_files_dict[
local_key
] # add the new path
del local_files_dict[local_key] # delete previous path
counter2 += 1
break # execute once
# this 'else' here only executes if we finished the above 'for' without a break (counts for case where len() == 0)
else:
# get any existing local file and make a copy to the path needed in the remote_files_dict[remote_key]
for local_key in local_files_dict.keys():
self.local_storage.ensure_folder_exists(
remote_files_dict[remote_key]
)
self.local_storage.copy_node(
local_files_dict[local_key],
remote_files_dict[remote_key],
)
local_files_dict[remote_key] = local_files_dict[
local_key
]
counter3 += 1
break # execute once
# delete exceeding files
for local_key in (
local_files_dict.keys() - remote_files_dict.keys()
):
self.local_storage.delete_node(local_files_dict[local_key])
counter1 += 1
del local_files_dict, remote_files_dict
self.logger.info(
"(files removed from local: {} / files moved from local: {} / files copied from local: {})".format(
counter1, counter2, counter3
)
)
# acquire missing files
self._pull_group_set(
remote_group_dict.keys() - local_group_dict.keys(),
remote_group_dict,
)
del local_group_dict, remote_group_dict
# do a fast scan and delete all folders that shouldnt exist
local_manifest = self.local_storage.build_manifest(
include_files=False
)
for folder_node in local_manifest.diff_nodes(
remote_manifest, include_files=False
):
self.local_storage.delete_empty_folders(folder_node)
# do another fast scan and create empty folders existing on remote_folder and not on local (yet)
local_manifest = self.local_storage.build_manifest(
include_files=False
)
for folder_node in remote_manifest.diff_nodes(
local_manifest, include_files=False
):
self.local_storage.ensure_folder_exists(folder_node)
else:
# there's no local files, no need to clean up and all files need to be acquired from zero
remote_group_dict = remote_manifest.get_signatures_dict()
self._pull_group_set(remote_group_dict.keys(), remote_group_dict)
# Do a final scan for consistency check
local_manifest = self.local_storage.build_manifest()
# If they are not equal, exit with error.
if local_manifest != remote_manifest:
self.logger.error("Local consistency check failed.")
diff = [
file_node.get_path_str()
for file_node in local_manifest.diff_nodes(remote_manifest)
]
self.logger.warn("local-remote files:\n{}".format("\n".join(diff)))
diff = [
file_node.get_path_str()
for file_node in remote_manifest.diff_nodes(local_manifest)
]
self.logger.warn("remote-local files:\n{}".format("\n".join(diff)))
self._exit(1)
# Otherwise replace the local manifest
else:
self.local_storage.set_manifest(remote_manifest)
self.logger.info("Local consistency checked.")
self.logger.info("Pull finished.")
else:
if not self._confirm_proceeding(
"Since remote_manifest was not found and there's orphaned files into the {folder}/files folder.\nProceeding will cause all files into {folder}/files folder do be ERASED!".format(
folder=self.config.remote_folder_path
)
):
self.logger.error(
"Could not proceed with clean up, operation not authorized by the user."
)
self._exit(1)
self.remote_storage.clear_all_files()
self.logger.error("No files on the server to pull from.")
self._exit(1)
else:
self.logger.error("No files on the server to pull from.")
def push(self, authorized=False, local_manifest=None, remote_manifest=None):
"""
Push files from LocalStorage into RemoteStorage, warns you before continuing if the resulting
operation will delete or overwrite any file or folder.
This operation create files into the remote folder files, which their signature being used as name.
Files are compressed and encrypted before copied into RemoteStorage.
:param authorized bool: If False, when a file deletion is required a confirmation will be asked from the user.
Otherwise it will just proceed without confirmation.
:param local_manifest manifest.Manifest: cached version the local manifest, used to avoid reload/rebuilding, passed by sync
:param remote_manifest manifest.Manifest: cached version the remote manifest, used to avoid downloading, passed by sync
"""
self.logger.info("Pushing...")
# check if theres local files, otherwise there's nothing to push
if self.local_storage.has_files():
if local_manifest is None:
local_manifest = (
self.local_storage.build_manifest()
) # scan local filesystem and build a manifest to compare next
if self.remote_storage.has_manifest():
if remote_manifest is None:
remote_manifest = self.remote_storage.get_manifest()
if self.remote_storage.has_files():
if not authorized:
if self._confirm_proceeding(
"Proceeding with the 'push' may cause some files/folders in {} to be REPLACE or ERASED!".format(
self.config.remote_folder_path
)
):
authorized = True
else:
self.logger.error(
"Could not proceed with clean up, operation not authorized by the user."
)
self._exit(1)
# we got remote manifest and remote files:
if not self.remote_storage.check_manifest_consistency(
remote_manifest
):
self.logger.error(
"Not all files in the manifest are present, | |
<reponame>typeworld/appengine
# project
import typeworldserver
from typeworldserver import mq
from typeworldserver import definitions
from typeworldserver import classes
from typeworldserver import helpers
# other
from google.cloud import ndb
import logging
import typeworld
import typeworld.client
import time
import urllib
import re
import random
import json
import semver
from flask import abort, g, redirect, Response
import traceback
typeworldserver.app.config["modules"].append("api")
a = 10
def getStat(domain="api"):
month = time.strftime("%Y-%m")
stat = classes.SystemStatistics.get_or_insert(month) # get_or_insert
keys = [domain]
if domain == "api":
keys.append("testing" if g.form._get("testing") == "true" else "live")
stat.setDomain(keys)
return stat
@typeworldserver.app.route("/registerNewAPIEndpoint", methods=["POST"])
def registerNewAPIEndpoint():
if not g.user:
return abort(401)
success, message = typeworld.client.urlIsValid(g.form._get("canonicalURL"))
if not success:
g.html.warning(message)
return g.html.generate(), 900
# Skipping this because server may not be reachable from here
# success, message = twClient.getEndpointCommand(g.form._get('canonicalURL'))
# if not success:
# g.html.warning(message)
# return g.html.generate(), 900
endpoint = classes.APIEndpoint.get_or_insert(g.form._get("canonicalURL")) # , read_consistency=ndb.STRONG
if endpoint.userKey and endpoint.userKey != g.user.key:
g.html.warning("This API Endpoint is already registered with another user account.")
return g.html.generate(), 900
endpoint.userKey = g.user.key
endpoint.updateJSON()
endpoint.put()
if not g.form._get("dataContainer") and g.form._get("reloadURL"):
g.html.SCRIPT()
g.html.T(
"AJAX('#stage', '%s');"
% helpers.addAttributeToURL(urllib.parse.unquote(g.form._get("reloadURL")), "inline=true")
)
g.html._SCRIPT()
return g.html.generate()
userAccountTabs = [
["/account", '<span class="material-icons-outlined">account_circle</span> Account'],
[
"/account/signin",
'<span class="material-icons-outlined">app_registration</span> Sign-In Apps & Websites',
],
]
@typeworldserver.app.route("/account", methods=["POST", "GET"])
def account():
if not g.user:
return redirect("/")
g.html.tabs(userAccountTabs, "/account")
g.html.DIV(class_="content")
g.html.area("Type.World Account")
g.user.container("userAccountView")
g.html._area()
g.html.T('<script src="https://js.stripe.com/v3/"></script>')
g.html.T('<script src="/static/js/billing-stripe.js?v=' + g.instanceVersion + '"></script>')
g.html.area("Pro User Subscription")
g.user.container(
"accountSubscriptionsView",
parameters={"products": ["world.type.professional_user_plan"]},
)
g.html._area()
g.html._DIV()
return g.html.generate()
@typeworldserver.app.route("/account/signin", methods=["POST", "GET"])
def account_signin():
if not g.user:
return redirect("/")
g.html.tabs(userAccountTabs, "/account/signin")
g.html.DIV(class_="content")
g.html.area("Sign-In Apps & Websites")
tokens = classes.OAuthToken.query(
classes.OAuthToken.userKey == g.user.key, classes.OAuthToken.revoked == False # noqa E712
).fetch()
if tokens:
for token in tokens:
app = token.getApp()
g.html.P()
g.html.T(f'<b>{app.name}</b> at <a href="{app.websiteURL}">{app.websiteURL}</a>')
g.html.BR()
g.html.T(
"Scopes:"
f" <em>{', '.join([definitions.SIGNINSCOPES[x]['name'] for x in token.oauthScopes.split(',')])}</em>"
)
g.html.BR()
g.html.T(f"Authorization first given: {token.created}")
g.html.BR()
g.html.T(f"Last data access by app/website: {token.lastAccess}")
g.html.BR()
g.html.A(
class_="button",
onclick=(
"if(confirm('Are you sure that you want to revoke this authorization? Hint: This will not delete"
" data of yours that the app/website already has. But it will disable future access to that"
f" data.')) {{ AJAX('#action', '/auth/revoketoken', {{'token': '{token.authToken}'}}); }}"
),
)
g.html.T("Revoke This Authorization")
g.html._A()
g.html._P()
else:
g.html.P()
g.html.T("You have no running app/website authorizations.")
g.html._P()
g.html._area()
g.html._DIV()
return g.html.generate()
@typeworldserver.app.route("/addTestUserForAPIEndpoint", methods=["POST", "GET"])
def addTestUserForAPIEndpoint():
# Security
if not g.user:
return abort(401)
endpoint = classes.APIEndpoint.get_or_insert(g.form._get("canonicalURL")) # , read_consistency=ndb.STRONG
# Endpoint doesn't exist
if not endpoint:
return abort(404)
# User doesn't hold endpoint
if endpoint not in g.user.APIEndpoints():
return abort(403)
# Process
testUsersForAPIEndpoint = endpoint.testUsers()
testUsers = [x.userKey.get(read_consistency=ndb.STRONG) for x in testUsersForAPIEndpoint]
if len(testUsers) >= definitions.AMOUNTTESTUSERSFORAPIENTPOINT:
g.html.warning("Maximum amount of test users reached.")
return g.html.generate()
newUser = classes.User.query(classes.User.email == g.form._get("email")).get(read_consistency=ndb.STRONG)
if not newUser:
g.html.warning("User account doesn’t exist.")
return g.html.generate()
if newUser in testUsers:
g.html.warning("User has already been added.")
return g.html.generate()
# Success:
newTestUserForAPIEndpoint = classes.TestUserForAPIEndpoint(parent=endpoint.key, userKey=newUser.key)
newTestUserForAPIEndpoint.put()
if g.form._get("reloadURL"):
g.html.SCRIPT()
g.html.T(
"AJAX('#stage', '%s');"
% helpers.addAttributeToURL(urllib.parse.unquote(g.form._get("reloadURL")), "inline=true")
)
g.html._SCRIPT()
return g.html.generate()
@typeworldserver.app.route("/stat", defaults={"month": None}, methods=["GET", "POST"])
@typeworldserver.app.route("/stat/<month>", methods=["GET", "POST"])
def statistics(month):
if not g.admin:
return abort(401)
g.html.mediumSeparator()
g.html.DIV(class_="content")
g.html.area("Server")
g.html.P()
g.html.T(f"GAE: {typeworldserver.GAE}")
g.html.BR()
g.html.T(f"LIVE: {typeworldserver.LIVE}")
g.html._P()
g.html._area()
allStats = classes.SystemStatistics.query().fetch(read_consistency=ndb.STRONG)
allStats.sort(key=lambda x: x.key.id())
g.html.area("Statistics")
g.html.P()
for stat in allStats:
g.html.A(href=f"/statistics/{stat.key.id()}")
g.html.T(stat.key.id())
g.html._A()
g.html.BR()
g.html._P()
g.html._area()
if month:
g.html.area(month)
stat = classes.SystemStatistics.get_or_insert(month)
g.html.PRE()
g.html.T(json.dumps(stat.jsonStats, indent=4, sort_keys=True))
g.html._PRE()
g.html._area()
g.html._DIV()
return g.html.generate()
def billNonCumulativeMetrics():
# Type.World Font Distribution
endpoints = classes.APIEndpoint.query().fetch()
for endpoint in endpoints:
if endpoint.user():
success, message = endpoint.billNonCumulativeMetrics()
print(endpoint.key, success, message)
# if not success:
# print(endpoint.key, message)
# Type.World Sign-In
apps = classes.SignInApp.query().fetch()
for app in apps:
success, message = app.billNonCumulativeMetrics()
# if not success:
print(app.name, success, message)
print("done")
def saveStatistics():
stat = getStat("statistics")
stat.bump(["amountUsers"], equals=classes.User.query().count())
stat.bump(["amountAPIEndpoint"], equals=classes.APIEndpoint.query().count())
stat.put()
return "done"
@typeworldserver.app.route("/verifyemail/<code>", methods=["GET"])
def verifyemail(code):
user = classes.User.query(classes.User.emailVerificationCode == code).get(read_consistency=ndb.STRONG)
if not user:
g.html.DIV(class_="content")
g.html.T("No user could be found to verify for this code.")
g.html._DIV()
return g.html.generate()
else:
# Save before overwriting
redirectURL = user.emailVerificationRedirectURL
user.emailVerified = True
user.emailVerificationCode = None
user.emailVerificationRedirectURL = None
if user.emailToChange:
user.email = user.emailToChange
user.emailToChange = None
user.putnow()
user.propagateEmailChange()
user.announceChange()
if redirectURL:
return redirect(redirectURL)
else:
g.html.DIV(class_="content")
g.html.T(
"Your email address has been verified, thank you. You may close this and return to the Type.World App."
)
g.html._DIV()
return g.html.generate()
@typeworldserver.app.route("/v1/<commandName>", methods=["POST"])
def v1(commandName):
# import cProfile
# profile = cProfile.Profile()
# profile.enable()
# Log
# stat = getStat()
starttime = time.time()
responses = {
"response": "success",
}
testScenario = g.form._get("testScenario")
if testScenario == "simulateCentralServerProgrammingError":
assert ewrfgnorebg # noqa F821
if testScenario == "simulateCentralServerErrorInResponse":
responses["response"] = "simulateCentralServerErrorInResponse"
logging.warning("API Call Finished: %.2f s. Responses: %s" % (time.time() - starttime, responses))
return Response(json.dumps(responses), mimetype="application/json")
# Check if command exists
if not commandName or commandName not in definitions.APICOMMANDS:
responses["response"] = "commandUnknown"
# stat.bump(['commandUnknown', '__undefined__'], 1)
# stat.put()
logging.warning("API Call Finished: %.2f s. Responses: %s" % (time.time() - starttime, responses))
return Response(json.dumps(responses), mimetype="application/json")
command = definitions.APICOMMANDS[commandName]
print(
"API Call: %s, Parameters: %s"
% (
commandName,
", ".join([f"{x}: {g.form._get(x)}" for x in list(command["parameters"])]),
)
)
logging.warning(
"API Call: %s, Parameters: %s"
% (
commandName,
", ".join([f"{x}: {g.form._get(x)}" for x in list(command["parameters"])]),
)
)
# Version
if (
g.form._get("clientVersion")
and "clientVersion" in command["parameters"]
and command["parameters"]["clientVersion"]["required"] is True
):
try:
semver.parse(g.form._get("clientVersion"))
except ValueError:
responses["response"] = "clientVersionInvalid"
# stat.bump(['clientVersionInvalid'], 1)
# stat.put()
logging.warning("API Call Finished: %.2f s. Responses: %s" % (time.time() - starttime, responses))
return Response(json.dumps(responses), mimetype="application/json")
# logging.warning('API Call %s after version was verified: %.2f s'
# % (commandName, time.time() - starttime))
parameterStrings = []
for parameter in command["parameters"]:
parameterStrings.append("%s=%s" % (parameter, g.form._get(parameter)))
if command["parameters"][parameter]["required"] is True and not g.form._get(parameter):
responses["response"] = "Required parameter %s is missing." % parameter
# stat.bump(['command', commandName, 'failed', 'missingParameter',
# parameter], 1)
# stat.put()
logging.warning(
"API Call %s finished: %.2f s. Responses: %s" % (commandName, time.time() - starttime, responses)
)
return Response(json.dumps(responses), mimetype="application/json")
# logging.warning('API Call %s after parameters were verified: %.2f s' %
# (commandName, time.time() - starttime))
# Call method
if commandName in globals():
globals()[commandName](responses)
logging.warning("API Call %s after method was called: %.2f s" % (commandName, time.time() - starttime))
else:
responses["response"] = "commandUnknown"
# stat.bump(['commandUnknown', commandName], 1)
# stat.put()
logging.warning(
"API Call %s finished: %.2f s. Responses: %s" % (commandName, time.time() - starttime, responses)
)
return Response(json.dumps(responses), mimetype="application/json")
# profile.disable()
# profile.print_stats(sort="time")
# stat.bump(['command', commandName, responses['response']], 1)
# stat.put()
logging.warning("API Call %s finished: %.2f s. Responses: %s" % (commandName, time.time() - starttime, responses))
return Response(json.dumps(responses), mimetype="application/json")
def validateAPIEndpoint(responses):
if g.instanceVersion:
MOTHERSHIP = f"https://{g.instanceVersion}-dot-typeworld2.appspot.com/v1"
else:
MOTHERSHIP = "https://api.type.world/v1"
profiles = g.form._get("profiles").split(",")
import typeworld.tools.validator
responses = typeworld.tools.validator.validateAPIEndpoint(
g.form._get("subscriptionURL"),
profiles,
endpointURL=MOTHERSHIP,
responses=responses,
)
def linkTypeWorldUserAccount(responses):
userKey = ndb.Key(urlsafe=g.form._get("anonymousUserID").encode())
if not userKey.id():
responses["response"] = "userUnknown"
return
user = userKey.get(read_consistency=ndb.STRONG)
if g.form._get("secretKey") != user.secretKey:
responses["response"] = "secretKeyInvalid"
return
elif (
not user.stripeSubscriptionReceivesService("world.type.professional_user_plan")
and len(user.appInstances()) == 1
):
responses["response"] = "linkingMoreAppInstancesRequiresProUserAccount"
return
else:
# Exists
appInstance = classes.AppInstance(parent=userKey, id=g.form._get("anonymousAppID"))
appInstance.updateUsage()
appInstance.put()
responses["userEmail"] = user.email
responses["userName"] = user.name
print(user)
print(user.appInstances())
def unlinkTypeWorldUserAccount(responses):
if not g.form._get("anonymousUserID"):
responses["response"] = "userUnknown"
return
k = ndb.Key(urlsafe=g.form._get("anonymousUserID").encode())
if not k.id() or not k.get(read_consistency=ndb.STRONG):
responses["response"] = "userUnknown"
return
user = k.get(read_consistency=ndb.STRONG)
if g.form._get("secretKey") != user.secretKey:
responses["response"] = "secretKeyInvalid"
return
else:
deleted = False
for appInstance in user.appInstances():
if appInstance.key.id() == g.form._get("anonymousAppID"):
appInstance.key.delete()
deleted = True
if not deleted:
responses["response"] = "appInstanceUnknown"
return
@ndb.transactional()
def createUserAccount(responses):
previousUser = classes.User.query(classes.User.email == g.form._get("email")).get(read_consistency=ndb.STRONG)
if previousUser:
responses["response"] = "userExists"
return
# verify
if helpers.verifyEmail(g.form._get("email")) is False:
responses["response"] = "emailInvalid"
return
# actually create user
user = classes.User()
user.email = g.form._get("email")
user.secretKey = helpers.Garbage(40)
success, message = user.setPassword(g.form._get("password"))
if not success:
responses["response"] = message
return
user.name = g.form._get("name")
# Email verification
if g.form._get("SECRETKEY") == typeworldserver.secret("TEST"):
user.emailVerified = True
else:
print("redirected_from:", g.form._get("redirected_from"))
if g.form._get("redirected_from") != "email-verification":
user.sendEmailVerificationLink()
user.put()
responses["anonymousUserID"] = user.publicID()
responses["secretKey"] = user.secretKey
responses["name"] = user.name
@ndb.transactional()
def deleteUserAccount(responses):
user = classes.User.query(classes.User.email == g.form._get("email")).get(read_consistency=ndb.STRONG)
if user:
if user.checkPassword(g.form._get("password")):
# Delete
user.key.delete()
print("deleted key", user.key)
else:
responses["response"] = "passwordInvalid"
return
else:
responses["response"] = "userUnknown"
return
user = classes.User.query(classes.User.email == g.form._get("email")).get(read_consistency=ndb.STRONG)
if user:
responses["response"] = "userStillExists"
return
def logInUserAccount(responses):
user = classes.User.query(classes.User.email == g.form._get("email")).get(read_consistency=ndb.STRONG)
if user:
if user.checkPassword(g.form._get("password")):
responses["anonymousUserID"] = user.publicID()
responses["secretKey"] = user.secretKey
responses["name"] = user.name
else:
responses["response"] = "passwordInvalid"
return
else:
responses["response"] = "userUnknown"
return
def versions(responses):
responses["typeworld"] = typeworld.api.VERSION
def registerAPIEndpoint(responses):
if typeworld.client.urlIsValid(g.form._get("url"))[0] is False:
responses["response"] = "urlInvalid"
return
endpoint = classes.APIEndpoint.get_or_insert(g.form._get("url")) # , read_consistency=ndb.STRONG
updated, message = endpoint.updateJSON()
if updated is True:
| |
import json
import boto3
from datetime import datetime, timedelta
from botocore.exceptions import ClientError
import os
import time
def get_message_for_slack(event_details, event_type, affected_accounts, affected_entities, slack_webhook):
message = ""
summary = ""
if slack_webhook == "webhook":
if len(affected_entities) >= 1:
affected_entities = "\n".join(affected_entities)
if affected_entities == "UNKNOWN":
affected_entities = "All resources\nin region"
else:
affected_entities = "All resources\nin region"
if len(affected_accounts) >= 1:
affected_accounts = "\n".join(affected_accounts)
else:
affected_accounts = "All accounts\nin region"
if event_type == "create":
summary += (
f":rotating_light:*[NEW] AWS Health reported an issue with the {event_details['successfulSet'][0]['event']['service'].upper()} service in "
f"the {event_details['successfulSet'][0]['event']['region'].upper()} region.*"
)
message = {
"text": summary,
"attachments": [
{
"color": "danger",
"fields": [
{ "title": "Account(s)", "value": affected_accounts, "short": True },
{ "title": "Resource(s)", "value": affected_entities, "short": True },
{ "title": "Service", "value": event_details['successfulSet'][0]['event']['service'], "short": True },
{ "title": "Region", "value": event_details['successfulSet'][0]['event']['region'], "short": True },
{ "title": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime']), "short": True },
{ "title": "Status", "value": event_details['successfulSet'][0]['event']['statusCode'], "short": True },
{ "title": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn'], "short": False },
{ "title": "Updates", "value": get_last_aws_update(event_details), "short": False }
],
}
]
}
elif event_type == "resolve":
summary += (
f":heavy_check_mark:*[RESOLVED] The AWS Health issue with the {event_details['successfulSet'][0]['event']['service'].upper()} service in "
f"the {event_details['successfulSet'][0]['event']['region'].upper()} region is now resolved.*"
)
message = {
"text": summary,
"attachments": [
{
"color": "00ff00",
"fields": [
{ "title": "Account(s)", "value": affected_accounts, "short": True },
{ "title": "Resource(s)", "value": affected_entities, "short": True },
{ "title": "Service", "value": event_details['successfulSet'][0]['event']['service'], "short": True },
{ "title": "Region", "value": event_details['successfulSet'][0]['event']['region'], "short": True },
{ "title": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime']), "short": True },
{ "title": "End Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['endTime']), "short": True },
{ "title": "Status", "value": event_details['successfulSet'][0]['event']['statusCode'], "short": True },
{ "title": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn'], "short": False },
{ "title": "Updates", "value": get_last_aws_update(event_details), "short": False }
],
}
]
}
else:
if len(affected_entities) >= 1:
affected_entities = "\n".join(affected_entities)
if affected_entities == "UNKNOWN":
affected_entities = "All resources\nin region"
else:
affected_entities = "All resources in region"
if len(affected_accounts) >= 1:
affected_accounts = "\n".join(affected_accounts)
else:
affected_accounts = "All accounts in region"
if event_type == "create":
summary += (
f":rotating_light:*[NEW] AWS Health reported an issue with the {event_details['successfulSet'][0]['event']['service'].upper()} service in "
f"the {event_details['successfulSet'][0]['event']['region'].upper()} region.*"
)
message = {
"text": summary,
"accounts": affected_accounts,
"resources": affected_entities,
"service": event_details['successfulSet'][0]['event']['service'],
"region": event_details['successfulSet'][0]['event']['region'],
"start_time": cleanup_time(event_details['successfulSet'][0]['event']['startTime']),
"status": event_details['successfulSet'][0]['event']['statusCode'],
"event_arn": event_details['successfulSet'][0]['event']['arn'],
"updates": get_last_aws_update(event_details)
}
elif event_type == "resolve":
summary += (
f":heavy_check_mark:*[RESOLVED] The AWS Health issue with the {event_details['successfulSet'][0]['event']['service'].upper()} service in "
f"the {event_details['successfulSet'][0]['event']['region'].upper()} region is now resolved.*"
)
message = {
"text": summary,
"accounts": affected_accounts,
"resources": affected_entities,
"service": event_details['successfulSet'][0]['event']['service'],
"region": event_details['successfulSet'][0]['event']['region'],
"start_time": cleanup_time(event_details['successfulSet'][0]['event']['startTime']),
"status": event_details['successfulSet'][0]['event']['statusCode'],
"event_arn": event_details['successfulSet'][0]['event']['arn'],
"updates": get_last_aws_update(event_details)
}
print("Message sent to Slack: ", message)
return message
def get_message_for_eventbridge(event_details, event_type, affected_accounts, affected_entities):
message = ""
if len(affected_entities) >= 1:
affected_entities = "\n".join(affected_entities)
if affected_entities == "UNKNOWN":
affected_entities = "All resources\nin region"
else:
affected_entities = "All resources\nin region"
if len(affected_accounts) >= 1:
affected_accounts = "\n".join(affected_accounts)
else:
affected_accounts = "All accounts\nin region"
if event_type == "create":
message = {
"attachments": [
{
"fields": [
{ "title": "Account(s)", "value": affected_accounts, "short": True },
{ "title": "Resource(s)", "value": affected_entities, "short": True },
{ "title": "Service", "value": event_details['successfulSet'][0]['event']['service'], "short": True },
{ "title": "Region", "value": event_details['successfulSet'][0]['event']['region'], "short": True },
{ "title": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime']), "short": True },
{ "title": "Status", "value": event_details['successfulSet'][0]['event']['statusCode'], "short": True },
{ "title": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn'], "short": False },
{ "title": "Updates", "value": get_last_aws_update(event_details), "short": False }
],
}
]
}
elif event_type == "resolve":
message = {
"attachments": [
{
"fields": [
{ "title": "Account(s)", "value": affected_accounts, "short": True },
{ "title": "Resource(s)", "value": affected_entities, "short": True },
{ "title": "Service", "value": event_details['successfulSet'][0]['event']['service'], "short": True },
{ "title": "Region", "value": event_details['successfulSet'][0]['event']['region'], "short": True },
{ "title": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime']), "short": True },
{ "title": "End Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['endTime']), "short": True },
{ "title": "Status", "value": event_details['successfulSet'][0]['event']['statusCode'], "short": True },
{ "title": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn'], "short": False },
{ "title": "Updates", "value": get_last_aws_update(event_details), "short": False }
],
}
]
}
print("SHD Message generated for EventBridge : ", message)
return message
def get_org_message_for_eventbridge(event_details, event_type, affected_org_accounts, affected_org_entities):
message = ""
if len(affected_org_entities) >= 1:
affected_org_entities = "\n".join(affected_org_entities)
else:
affected_org_entities = "All resources\nin region"
if len(affected_org_accounts) >= 1:
affected_org_accounts = "\n".join(affected_org_accounts)
else:
affected_org_accounts = "All accounts\nin region"
if event_type == "create":
message = {
"attachments": [
{
"fields": [
{ "title": "Account(s)", "value": affected_org_accounts, "short": True },
{ "title": "Resource(s)", "value": affected_org_entities, "short": True },
{ "title": "Service", "value": event_details['successfulSet'][0]['event']['service'], "short": True },
{ "title": "Region", "value": event_details['successfulSet'][0]['event']['region'], "short": True },
{ "title": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime']), "short": True },
{ "title": "Status", "value": event_details['successfulSet'][0]['event']['statusCode'], "short": True },
{ "title": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn'], "short": False },
{ "title": "Updates", "value": get_last_aws_update(event_details), "short": False }
],
}
]
}
elif event_type == "resolve":
message = {
"attachments": [
{
"fields": [
{ "title": "Account(s)", "value": affected_org_accounts, "short": True },
{ "title": "Resource(s)", "value": affected_org_entities, "short": True },
{ "title": "Service", "value": event_details['successfulSet'][0]['event']['service'], "short": True },
{ "title": "Region", "value": event_details['successfulSet'][0]['event']['region'], "short": True },
{ "title": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime']), "short": True },
{ "title": "End Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['endTime']), "short": True },
{ "title": "Status", "value": event_details['successfulSet'][0]['event']['statusCode'], "short": True },
{ "title": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn'], "short": False },
{ "title": "Updates", "value": get_last_aws_update(event_details), "short": False }
],
}
]
}
json.dumps(message)
print("PHD/SHD Message generated for Event Bridge: ", message)
return message
def get_org_message_for_slack(event_details, event_type, affected_org_accounts, affected_org_entities, slack_webhook):
message = ""
summary = ""
if slack_webhook == "webhook":
if len(affected_org_entities) >= 1:
affected_org_entities = "\n".join(affected_org_entities)
else:
affected_org_entities = "All resources\nin region"
if len(affected_org_accounts) >= 1:
affected_org_accounts = "\n".join(affected_org_accounts)
else:
affected_org_accounts = "All accounts\nin region"
if event_type == "create":
summary += (
f":rotating_light:*[NEW] AWS Health reported an issue with the {event_details['successfulSet'][0]['event']['service'].upper()} service in "
f"the {event_details['successfulSet'][0]['event']['region'].upper()} region.*"
)
message = {
"text": summary,
"attachments": [
{
"color": "danger",
"fields": [
{ "title": "Account(s)", "value": affected_org_accounts, "short": True },
{ "title": "Resource(s)", "value": affected_org_entities, "short": True },
{ "title": "Service", "value": event_details['successfulSet'][0]['event']['service'], "short": True },
{ "title": "Region", "value": event_details['successfulSet'][0]['event']['region'], "short": True },
{ "title": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime']), "short": True },
{ "title": "Status", "value": event_details['successfulSet'][0]['event']['statusCode'], "short": True },
{ "title": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn'], "short": False },
{ "title": "Updates", "value": get_last_aws_update(event_details), "short": False }
],
}
]
}
elif event_type == "resolve":
summary += (
f":heavy_check_mark:*[RESOLVED] The AWS Health issue with the {event_details['successfulSet'][0]['event']['service'].upper()} service in "
f"the {event_details['successfulSet'][0]['event']['region'].upper()} region is now resolved.*"
)
message = {
"text": summary,
"attachments": [
{
"color": "00ff00",
"fields": [
{ "title": "Account(s)", "value": affected_org_accounts, "short": True },
{ "title": "Resource(s)", "value": affected_org_entities, "short": True },
{ "title": "Service", "value": event_details['successfulSet'][0]['event']['service'], "short": True },
{ "title": "Region", "value": event_details['successfulSet'][0]['event']['region'], "short": True },
{ "title": "Start Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['startTime']), "short": True },
{ "title": "End Time (UTC)", "value": cleanup_time(event_details['successfulSet'][0]['event']['endTime']), "short": True },
{ "title": "Status", "value": event_details['successfulSet'][0]['event']['statusCode'], "short": True },
{ "title": "Event ARN", "value": event_details['successfulSet'][0]['event']['arn'], "short": False },
{ "title": "Updates", "value": get_last_aws_update(event_details), "short": False }
],
}
]
}
else:
if len(affected_org_entities) >= 1:
affected_org_entities = "\n".join(affected_org_entities)
else:
affected_org_entities = "All resources in region"
if len(affected_org_accounts) >= 1:
affected_org_accounts = "\n".join(affected_org_accounts)
else:
affected_org_accounts = "All accounts in region"
if event_type == "create":
summary += (
f":rotating_light:*[NEW] AWS Health reported an issue with the {event_details['successfulSet'][0]['event']['service'].upper()} service in "
f"the {event_details['successfulSet'][0]['event']['region'].upper()} region.*"
)
message = {
"text": summary,
"accounts": affected_org_accounts,
"resources": affected_org_entities,
"service": event_details['successfulSet'][0]['event']['service'],
"region": event_details['successfulSet'][0]['event']['region'],
"start_time": cleanup_time(event_details['successfulSet'][0]['event']['startTime']),
"status": event_details['successfulSet'][0]['event']['statusCode'],
"event_arn": event_details['successfulSet'][0]['event']['arn'],
"updates": get_last_aws_update(event_details)
}
elif event_type == "resolve":
summary += (
f":heavy_check_mark:*[RESOLVED] The AWS Health issue with the {event_details['successfulSet'][0]['event']['service'].upper()} service in "
f"the {event_details['successfulSet'][0]['event']['region'].upper()} region is now resolved.*"
)
message = {
"text": summary,
"accounts": affected_org_accounts,
"resources": affected_org_entities,
"service": event_details['successfulSet'][0]['event']['service'],
"region": event_details['successfulSet'][0]['event']['region'],
"start_time": cleanup_time(event_details['successfulSet'][0]['event']['startTime']),
"status": event_details['successfulSet'][0]['event']['statusCode'],
"event_arn": event_details['successfulSet'][0]['event']['arn'],
"updates": get_last_aws_update(event_details)
}
json.dumps(message)
print("Message sent to Slack: ", message)
return message
def | |
<filename>dev/archery/archery/release.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from abc import abstractmethod
from collections import defaultdict
import functools
import re
import pathlib
import shelve
import warnings
from git import Repo
from jira import JIRA
from semver import VersionInfo as SemVer
from .utils.source import ArrowSources
from .utils.report import JinjaReport
from .utils.logger import logger
def cached_property(fn):
return property(functools.lru_cache(maxsize=1)(fn))
class Version(SemVer):
__slots__ = ('released', 'release_date')
def __init__(self, released=False, release_date=None, **kwargs):
super().__init__(**kwargs)
self.released = released
self.release_date = release_date
@classmethod
def parse(cls, version, **kwargs):
return cls(**SemVer.parse(version).to_dict(), **kwargs)
@classmethod
def from_jira(cls, jira_version):
return cls.parse(
jira_version.name,
released=jira_version.released,
release_date=getattr(jira_version, 'releaseDate', None)
)
class Issue:
def __init__(self, key, type, summary):
self.key = key
self.type = type
self.summary = summary
@classmethod
def from_jira(cls, jira_issue):
return cls(
key=jira_issue.key,
type=jira_issue.fields.issuetype.name,
summary=jira_issue.fields.summary
)
@property
def project(self):
return self.key.split('-')[0]
@property
def number(self):
return int(self.key.split('-')[1])
class Jira(JIRA):
def __init__(self, url='https://issues.apache.org/jira'):
super().__init__(url)
def project_version(self, version_string, project='ARROW'):
# query version from jira to populated with additional metadata
versions = {str(v): v for v in self.project_versions(project)}
return versions[version_string]
def project_versions(self, project):
versions = []
for v in super().project_versions(project):
try:
versions.append(Version.from_jira(v))
except ValueError:
# ignore invalid semantic versions like JS-0.4.0
continue
return sorted(versions, reverse=True)
def issue(self, key):
return Issue.from_jira(super().issue(key))
def project_issues(self, version, project='ARROW'):
query = "project={} AND fixVersion={}".format(project, version)
issues = super().search_issues(query, maxResults=False)
return list(map(Issue.from_jira, issues))
class CachedJira:
def __init__(self, cache_path, jira=None):
self.jira = jira or Jira()
self.cache_path = cache_path
def __getattr__(self, name):
attr = getattr(self.jira, name)
return self._cached(name, attr) if callable(attr) else attr
def _cached(self, name, method):
def wrapper(*args, **kwargs):
key = str((name, args, kwargs))
with shelve.open(self.cache_path) as cache:
try:
result = cache[key]
except KeyError:
cache[key] = result = method(*args, **kwargs)
return result
return wrapper
_TITLE_REGEX = re.compile(
r"(?P<issue>(?P<project>(ARROW|PARQUET))\-\d+)?\s*:?\s*"
r"(?P<minor>(MINOR))?\s*:?\s*"
r"(?P<components>\[.*\])?\s*(?P<summary>.*)"
)
_COMPONENT_REGEX = re.compile(r"\[([^\[\]]+)\]")
class CommitTitle:
def __init__(self, summary, project=None, issue=None, minor=None,
components=None):
self.project = project
self.issue = issue
self.components = components or []
self.summary = summary
self.minor = bool(minor)
def __str__(self):
return self.to_string()
def __eq__(self, other):
return (
self.summary == other.summary and
self.project == other.project and
self.issue == other.issue and
self.minor == other.minor and
self.components == other.components
)
def __hash__(self):
return hash(
(self.summary, self.project, self.issue, tuple(self.components))
)
@classmethod
def parse(cls, headline):
matches = _TITLE_REGEX.match(headline)
if matches is None:
warnings.warn(
"Unable to parse commit message `{}`".format(headline)
)
return CommitTitle(headline)
values = matches.groupdict()
components = values.get('components') or ''
components = _COMPONENT_REGEX.findall(components)
return CommitTitle(
values['summary'],
project=values.get('project'),
issue=values.get('issue'),
minor=values.get('minor'),
components=components
)
def to_string(self, with_issue=True, with_components=True):
out = ""
if with_issue and self.issue:
out += "{}: ".format(self.issue)
if with_components and self.components:
for component in self.components:
out += "[{}]".format(component)
out += " "
out += self.summary
return out
class Commit:
def __init__(self, wrapped):
self._title = CommitTitle.parse(wrapped.summary)
self._wrapped = wrapped
def __getattr__(self, attr):
if hasattr(self._title, attr):
return getattr(self._title, attr)
else:
return getattr(self._wrapped, attr)
def __repr__(self):
template = '<Commit sha={!r} issue={!r} components={!r} summary={!r}>'
return template.format(self.hexsha, self.issue, self.components,
self.summary)
@property
def url(self):
return 'https://github.com/apache/arrow/commit/{}'.format(self.hexsha)
@property
def title(self):
return self._title
class ReleaseCuration(JinjaReport):
templates = {
'console': 'release_curation.txt.j2'
}
fields = [
'release',
'within',
'outside',
'nojira',
'parquet',
'nopatch',
'minimal'
]
class JiraChangelog(JinjaReport):
templates = {
'markdown': 'release_changelog.md.j2',
'html': 'release_changelog.html.j2'
}
fields = [
'release',
'categories'
]
class Release:
def __new__(self, version, jira=None, repo=None):
if isinstance(version, str):
version = Version.parse(version)
elif not isinstance(version, Version):
raise TypeError(version)
# decide the type of the release based on the version number
if version.patch == 0:
if version.minor == 0:
klass = MajorRelease
elif version.major == 0:
# handle minor releases before 1.0 as major releases
klass = MajorRelease
else:
klass = MinorRelease
else:
klass = PatchRelease
return super().__new__(klass)
def __init__(self, version, jira, repo):
if jira is None:
jira = Jira()
elif isinstance(jira, str):
jira = Jira(jira)
elif not isinstance(jira, (Jira, CachedJira)):
raise TypeError("`jira` argument must be a server url or a valid "
"Jira instance")
if repo is None:
arrow = ArrowSources.find()
repo = Repo(arrow.path)
elif isinstance(repo, (str, pathlib.Path)):
repo = Repo(repo)
elif not isinstance(repo, Repo):
raise TypeError("`repo` argument must be a path or a valid Repo "
"instance")
if isinstance(version, str):
version = jira.project_version(version, project='ARROW')
elif not isinstance(version, Version):
raise TypeError(version)
self.version = version
self.jira = jira
self.repo = repo
def __repr__(self):
if self.version.released:
status = "released_at={self.version.release_date!r}"
else:
status = "pending"
return f"<{self.__class__.__name__} {self.version!r} {status}>"
@staticmethod
def from_jira(version, jira=None, repo=None):
return Release(version, jira, repo)
@property
def is_released(self):
return self.version.released
@property
def tag(self):
return f"apache-arrow-{self.version}"
@property
@abstractmethod
def branch(self):
"""
Target branch that serves as the base for the release.
"""
...
@property
@abstractmethod
def siblings(self):
"""
Releases to consider when calculating previous and next releases.
"""
...
@cached_property
def previous(self):
# select all non-patch releases
position = self.siblings.index(self.version)
try:
previous = self.siblings[position + 1]
except IndexError:
# first release doesn't have a previous one
return None
else:
return Release.from_jira(previous, jira=self.jira, repo=self.repo)
@cached_property
def next(self):
# select all non-patch releases
position = self.siblings.index(self.version)
if position <= 0:
raise ValueError("There is no upcoming release set in JIRA after "
f"version {self.version}")
upcoming = self.siblings[position - 1]
return Release.from_jira(upcoming, jira=self.jira, repo=self.repo)
@cached_property
def issues(self):
issues = self.jira.project_issues(self.version, project='ARROW')
return {i.key: i for i in issues}
@cached_property
def commits(self):
"""
All commits applied between two versions.
"""
if self.previous is None:
# first release
lower = ''
else:
lower = self.repo.tags[self.previous.tag]
if self.version.released:
upper = self.repo.tags[self.tag]
else:
try:
upper = self.repo.branches[self.branch]
except IndexError:
warnings.warn(f"Release branch `{self.branch}` doesn't exist.")
return []
commit_range = f"{lower}..{upper}"
return list(map(Commit, self.repo.iter_commits(commit_range)))
def curate(self, minimal=False):
# handle commits with parquet issue key specially and query them from
# jira and add it to the issues
release_issues = self.issues
within, outside, nojira, parquet = [], [], [], []
for c in self.commits:
if c.issue is None:
nojira.append(c)
elif c.issue in release_issues:
within.append((release_issues[c.issue], c))
elif c.project == 'PARQUET':
parquet.append((self.jira.issue(c.issue), c))
else:
outside.append((self.jira.issue(c.issue), c))
# remaining jira tickets
within_keys = {i.key for i, c in within}
nopatch = [issue for key, issue in release_issues.items()
if key not in within_keys]
return ReleaseCuration(release=self, within=within, outside=outside,
nojira=nojira, parquet=parquet, nopatch=nopatch,
minimal=minimal)
def changelog(self):
issue_commit_pairs = []
# get organized report for the release
curation = self.curate()
# jira tickets having patches in the release
issue_commit_pairs.extend(curation.within)
# parquet patches in the release
issue_commit_pairs.extend(curation.parquet)
# jira tickets without patches
for issue in curation.nopatch:
issue_commit_pairs.append((issue, None))
# organize issues into categories
issue_types = {
'Bug': 'Bug Fixes',
'Improvement': 'New Features and Improvements',
'New Feature': 'New Features and Improvements',
'Sub-task': 'New Features and Improvements',
'Task': 'New Features and Improvements',
'Test': 'Bug Fixes',
'Wish': 'New Features and Improvements',
}
categories = defaultdict(list)
for issue, commit in issue_commit_pairs:
categories[issue_types[issue.type]].append((issue, commit))
# sort issues by the issue key in ascending order
for issues in categories.values():
issues.sort(key=lambda pair: (pair[0].project, pair[0].number))
return JiraChangelog(release=self, categories=categories)
def commits_to_pick(self, exclude_already_applied=True):
# collect commits applied on the main branch since the root of the
# maintenance branch (the previous major release)
commit_range = f"{self.previous.tag}..master"
# keeping the original order of the commits helps to minimize the merge
# conflicts during cherry-picks
commits = map(Commit, self.repo.iter_commits(commit_range))
# exclude patches that have been already applied to the maintenance
# branch, we cannot identify patches based on sha because it changes
# after the cherry pick so use commit title instead
if exclude_already_applied:
already_applied = {c.title for c in self.commits}
else:
already_applied = set()
# iterate over the commits applied on the main branch and filter out
# the ones that are included in the jira release
patches_to_pick = [c for c in commits if
c.issue in self.issues and
c.title not in already_applied]
return | |
"z", "delta", "extent", "origin"]
# self.nlevels = 7
self.set_max_nlevels = 8
self.list_nlevels = [0 for it in range(len(self.list_iterations))]
# storage
self.dfile_matrix = [0 for it in range(len(self.list_iterations))]
self.grid_matrix = [0 for it in range(len(self.list_iterations))]
self.grid_data_matrix = [[[np.zeros(0,)
for v_n in range(len(self.list_grid_v_ns))]
for rl in range(self.set_max_nlevels)]
for it in range(len(self.list_iterations))]
def update_storage_lists(self, new_iterations=np.zeros(0,), new_times=np.zeros(0,)):
"""
In case iteration/times are updated -- call
:return:
"""
if len(new_iterations) > 0 or len(new_times) > 0:
assert len(new_iterations) == len(new_times)
self.list_iterations = list(new_iterations)
self.list_times = np.array(new_times)
#
self.dfile_matrix = [0 for it in range(len(self.list_iterations))]
self.grid_matrix = [0 for it in range(len(self.list_iterations))]
self.grid_data_matrix = [[[np.zeros(0,)
for v_n in range(len(self.list_grid_v_ns))]
for rl in range(self.set_max_nlevels)]
for it in range(len(self.list_iterations))]
def check_prof_v_n(self, v_n):
if not v_n in self.list_prof_v_ns:
raise NameError("v_n:{} not in list of profile v_ns:{}"
.format(v_n, self.list_prof_v_ns))
def check_it(self, it):
if not int(it) in self.list_iterations:
raise NameError("it:{} not in list of iterations:{}"
.format(it, self.list_iterations))
def i_it(self, it):
return int(self.list_iterations.index(it))
def check_grid_v_n(self, v_n):
if not v_n in self.list_grid_v_ns:
raise NameError("v_n:{} not in list_grid_v_ns"
.format(v_n, self.list_grid_v_ns))
def i_grid_v_n(self, v_n):
return int(self.list_grid_v_ns.index(v_n))
# ---
def load_dfile(self, it):
fpath = self.profpath + str(it) + ".h5"
if not os.path.isfile(fpath):
raise IOError("Expected file:{} NOT found"
.format(fpath))
try:
dfile = h5py.File(fpath, "r")
except IOError:
raise IOError("Cannot open file: {}".format(fpath))
reflevels = 0
for key in dfile.keys():
if key.__contains__("reflevel="):
reflevels += 1
# print("it:{} len(dfile.keys():{} dfile.keys():{} | {}".format(it, len(dfile.keys()), dfile.keys(), reflevels))
self.list_nlevels[self.i_it(it)] = reflevels
self.dfile_matrix[self.i_it(it)] = dfile
def is_dfile_loaded(self, it):
if isinstance(self.dfile_matrix[self.i_it(it)], int): # dfile! not grid_matrix
self.load_dfile(it)
def get_profile_dfile(self, it):
self.check_it(it)
self.is_dfile_loaded(it)
return self.dfile_matrix[self.i_it(it)]
# self.symmetry = symmetry
# self.nlevels = 7
# self.profile = fname
# self.dfile = h5py.File(fname, "r")
# group_0 = self.dfile["reflevel={}".format(0)]
# self.time = group_0.attrs["time"] * 0.004925794970773136 * 1e-3 # [sec]
# self.iteration = group_0.attrs["iteration"]
# print("\t\t symmetry: {}".format(self.symmetry))
# print("\t\t time: {}".format(self.time))
# print("\t\t iteration: {}".format(self.iteration))
# self.grid = self.read_carpet_grid(self.dfile)
#
# # print("grid: {}".format(self.grid))
#
#
#
# if self.symmetry == "pi" and not str(self.profile).__contains__("_PI"):
# raise NameError("profile {} does not seem to have a pi symmetry. Check"
# .format(self.profile))
def get_nlevels(self, it):
self.check_it(it)
self.is_dfile_loaded(it)
return int(self.list_nlevels[self.i_it(it)])
# ---
def get_group(self, it, rl):
self.check_it(it)
dfile = self.get_profile_dfile(it)
return dfile["reflevel={}".format(int(rl))]
def get_prof_time(self, it):
group0 = self.get_group(it, 0)
time = group0.attrs["time"] * 0.004925794970773136 * 1e-3 # [sec]
return time
# ---
def read_carpet_grid(self, it):
import scidata.carpet.grid as grid
L = []
dfile = self.get_profile_dfile(it)
nlevels = self.get_nlevels(it)
if self.enforce_xy_grid:
for il in range(nlevels):
gname = "reflevel={}".format(il)
group = dfile[gname]
level = grid.basegrid()
level.delta = np.array(group.attrs["delta"])[:-1]
# print(level.delta); exit(1)
# print("delta: {} ".format(np.array(group.attrs["delta"]))); exit(1)
level.dim = 2
level.time = group.attrs["time"]
# level.timestep = group.attrs["timestep"]
level.directions = range(2)
level.iorigin = np.array([0, 0], dtype=np.int32)
# print("origin {} ".format(np.array(group.attrs["extent"][0::2])))
if self.symmetry == 'pi':
origin = np.array(group.attrs["extent"][0::2])
origin[0] = origin[1] # x = y extend
elif self.symmetry == None:
origin = np.array(group.attrs["extent"][0::2])
# print(origin)
else:
raise NameError("symmetry is not recognized in a parfile. Set None or pi. Given:{}"
.format(self.symmetry))
level.origin = origin[:-1] # [-1044. -1044. -20.]
# print("sym: {} origin {} ".format(self.symmetry, origin)); exit()
# level.n = np.array(group["rho"].shape, dtype=np.int32)
level.n = np.array(self.get_prof_arr(it, il, 'rho').shape, dtype=np.int32)
level.rlevel = il
L.append(level)
else:
for il in range(nlevels):
gname = "reflevel={}".format(il)
group = dfile[gname]
level = grid.basegrid()
level.delta = np.array(group.attrs["delta"])
# print("delta: {} ".format(np.array(group.attrs["delta"]))); exit(1)
level.dim = 3
level.time = group.attrs["time"]
# level.timestep = group.attrs["timestep"]
level.directions = range(3)
level.iorigin = np.array([0, 0, 0], dtype=np.int32)
# print("origin {} ".format(np.array(group.attrs["extent"][0::2])))
if self.symmetry == 'pi':
origin = np.array(group.attrs["extent"][0::2])
origin[0] = origin[1] # x = y extend
elif self.symmetry == None:
origin = np.array(group.attrs["extent"][0::2])
else:
raise NameError("symmetry is not recognized in a parfile. Set None or pi. Given:{}"
.format(self.symmetry))
level.origin = origin
# print("sym: {} origin {} ".format(self.symmetry, origin)); exit()
# level.n = np.array(group["rho"].shape, dtype=np.int32)
level.n = np.array(self.get_prof_arr(it, il, 'rho').shape, dtype=np.int32)
level.rlevel = il
L.append(level)
self.grid_matrix[self.i_it(it)] = \
grid.grid(sorted(L, key=lambda x: x.rlevel))
def is_grid_extracted(self, it):
if isinstance(self.grid_matrix[self.i_it(it)], int):
self.read_carpet_grid(it)
def get_grid(self, it):
self.check_it(it)
self.is_grid_extracted(it)
return self.grid_matrix[self.i_it(it)]
# ---
def extract_prof_grid_data(self, it, rl):
if self.enforce_xy_grid:
grid = self.get_grid(it)
x, y = grid.mesh()[rl]
delta = grid[rl].delta
extent = self.get_group(it, rl).attrs["extent"]
origin = grid[rl].origin
self.grid_data_matrix[self.i_it(it)][rl][self.i_grid_v_n("x")] = x
self.grid_data_matrix[self.i_it(it)][rl][self.i_grid_v_n("y")] = y
self.grid_data_matrix[self.i_it(it)][rl][self.i_grid_v_n("delta")] = delta
self.grid_data_matrix[self.i_it(it)][rl][self.i_grid_v_n("extent")] = extent
self.grid_data_matrix[self.i_it(it)][rl][self.i_grid_v_n("origin")] = origin
else:
grid = self.get_grid(it)
x, y, z = grid.mesh()[rl]
delta = grid[rl].delta
extent = self.get_group(it, rl).attrs["extent"]
origin = grid[rl].origin
self.grid_data_matrix[self.i_it(it)][rl][self.i_grid_v_n("x")] = x
self.grid_data_matrix[self.i_it(it)][rl][self.i_grid_v_n("y")] = y
self.grid_data_matrix[self.i_it(it)][rl][self.i_grid_v_n("z")] = z
self.grid_data_matrix[self.i_it(it)][rl][self.i_grid_v_n("delta")] = delta
self.grid_data_matrix[self.i_it(it)][rl][self.i_grid_v_n("extent")] = extent
self.grid_data_matrix[self.i_it(it)][rl][self.i_grid_v_n("origin")] = origin
def is_grid_data_extracted(self, it, rl):
if len(self.grid_data_matrix[self.i_it(it)][rl][self.i_grid_v_n("x")]) == 0:
self.extract_prof_grid_data(it, rl)
def get_grid_data(self, it, rl, v_n):
self.check_it(it)
self.check_grid_v_n(v_n)
self.is_grid_data_extracted(it, rl)
return self.grid_data_matrix[self.i_it(it)][rl][self.i_grid_v_n(v_n)]
# ---
def get_prof_arr(self, it, rl, v_n):
self.check_it(it)
self.check_prof_v_n(v_n)
group = self.get_group(it, rl)# self.dfile["reflevel={}".format(rl)]
try:
if self.enforce_xy_grid:
arr = np.array(group[v_n])[:, :, 0]
if self.symmetry == 'pi':
# print("rl: {} x:({}):[{:.1f},{:.1f}] y:({}):[{:.1f},{:.1f}] z:({}):[{:.1f},{:.1f}]"
# .format(rl, arr.shape, arr[0, 0, 0], arr[-1, 0, 0],
# arr.shape, arr[0, 0, 0], arr[0, -1, 0],
# arr.shape, arr[0, 0, 0], arr[0, 0, -1]))
### removing ghosts x[-2] x[-1] | x[0] x[1] x[2], to attach the x[-1] ... x[2] x[1] there
arr = np.delete(arr, 0, axis=0)
arr = np.delete(arr, 0, axis=0)
arr = np.delete(arr, 0, axis=0)
## flipping the array to get the following: Consider for section of xy plane:
## y>0 empy | [1] y>0 [2][::-1] | [1]
## y<0 empy | [2] -> y<0 [1][::-1] | [2]
## x<0 x>0 x<0 x>0
## This fills the grid from -x[-1] to x[-1], reproduing Pi symmetry.
arr_n = arr[::-1, ::-1]
arr = np.concatenate((arr_n, arr), axis=0)
# print("rl: {} x:({}):[{:.1f},{:.1f}] y:({}):[{:.1f},{:.1f}] z:({}):[{:.1f},{:.1f}]"
# .format(rl, arr.shape, arr[0, 0, 0], arr[-1, 0, 0],
# arr.shape, arr[0, 0, 0], arr[0, -1, 0],
# arr.shape, arr[0, 0, 0], arr[0, 0, -1]))
else:
arr = np.array(group[v_n])
if self.symmetry == 'pi':
# print("rl: {} x:({}):[{:.1f},{:.1f}] y:({}):[{:.1f},{:.1f}] z:({}):[{:.1f},{:.1f}]"
# .format(rl, arr.shape, arr[0, 0, 0], arr[-1, 0, 0],
# arr.shape, arr[0, 0, 0], arr[0, -1, 0],
# arr.shape, arr[0, 0, 0], arr[0, 0, -1]))
### removing ghosts x[-2] x[-1] | x[0] x[1] x[2], to attach the x[-1] ... x[2] x[1] there
arr = np.delete(arr, 0, axis=0)
arr = np.delete(arr, 0, axis=0)
arr = np.delete(arr, 0, axis=0)
## flipping the array to get the following: Consider for section of xy plane:
## y>0 empy | [1] y>0 [2][::-1] | [1]
## y<0 empy | [2] -> y<0 [1][::-1] | [2]
## x<0 x>0 x<0 x>0
## This fills the grid from -x[-1] to x[-1], reproduing Pi symmetry.
arr_n = arr[::-1, ::-1, :]
arr = np.concatenate((arr_n, arr), axis=0)
# print("rl: {} x:({}):[{:.1f},{:.1f}] y:({}):[{:.1f},{:.1f}] z:({}):[{:.1f},{:.1f}]"
# .format(rl, arr.shape, arr[0, 0, 0], arr[-1, 0, 0],
# arr.shape, arr[0, 0, 0], arr[0, -1, 0],
# arr.shape, arr[0, 0, 0], arr[0, 0, -1]))
except:
print('\nAvailable Parameters:')
print(list(v_n_aval for v_n_aval in group))
print('\n')
raise ValueError('Error extracting v_n:{} from profile for it:{} rl:{}'.format(v_n, it, rl))
return arr
# def __delete__(self, instance):
#
# instance.dfile_matrix = [0
# for it in range(len(self.list_iterations))]
# instance.grid_matrix = [0
# for it in range(len(self.list_iterations))]
# instance.grid_data_matrix = [[[np.zeros(0,)
# for v_n in range(len(self.list_grid_v_ns))]
# for rl in range(7)]
# for it in range(len(self.list_iterations))]
class COMPUTE_STORE(LOAD_PROFILE):
def __init__(self, sim, symmetry=None):
LOAD_PROFILE.__init__(self, sim, symmetry)
self.list_comp_v_ns = [
"density", "vup", "metric", "shift",
"enthalpy", "shvel", "u_0", "hu_0",
"vlow", "vphi", "vr",
"dens_unb_geo", "dens_unb_bern", "dens_unb_garch",
"ang_mom", "ang_mom_flux",
"theta", "r", "phi" # assumes cylindircal coordinates. r = x^2 + y^2
]
self.list_all_v_ns = self.list_prof_v_ns + \
self.list_grid_v_ns + \
self.list_comp_v_ns
self.data_matrix = [[[np.zeros(0,)
for y in range(len(self.list_all_v_ns))]
for x in range(self.set_max_nlevels)]
for i in range(len(self.list_iterations))]
def check_v_n(self, v_n):
if v_n not in self.list_all_v_ns:
raise NameError("v_n:{} not in the v_n list \n{}"
.format(v_n, self.list_all_v_ns))
def i_v_n(self, v_n):
self.check_v_n(v_n)
return int(self.list_all_v_ns.index(v_n))
def set_data(self, it, rl, v_n, arr):
self.data_matrix[self.i_it(it)][rl][self.i_v_n(v_n)] = arr
def extract_data(self, it, rl, v_n):
data = self.get_prof_arr(it, rl, v_n)
self.data_matrix[self.i_it(it)][rl][self.i_v_n(v_n)] = data
def extract_grid_data(self, it, rl, v_n):
if v_n in ["x", "y", "z"]:
self.data_matrix[self.i_it(it)][rl][self.i_v_n("x")] = self.get_grid_data(it, rl, "x")
self.data_matrix[self.i_it(it)][rl][self.i_v_n("y")] = self.get_grid_data(it, rl, "y")
self.data_matrix[self.i_it(it)][rl][self.i_v_n("z")] = self.get_grid_data(it, rl, "z")
elif v_n == "delta":
self.data_matrix[self.i_it(it)][rl][self.i_v_n("delta")] = self.get_grid_data(it, rl, "delta")
| |
self._leakDetector._index2containerId2len[self._index] = {}
ids = self._leakDetector.getContainerIds()
# record the current len of each container
for objId in ids:
yield None
try:
for result in self._leakDetector.getContainerByIdGen(objId):
yield None
container = result
except Exception, e:
# this container no longer exists
if self.notify.getDebug():
for contName in self._leakDetector.getContainerNameByIdGen(objId):
yield None
self.notify.debug(
'%s no longer exists; caught exception in getContainerById (%s)' % (
contName, e))
self._leakDetector.removeContainerById(objId)
continue
if container is None:
# this container no longer exists
if self.notify.getDebug():
for contName in self._leakDetector.getContainerNameByIdGen(objId):
yield None
self.notify.debug('%s no longer exists; getContainerById returned None' %
contName)
self._leakDetector.removeContainerById(objId)
continue
try:
cLen = len(container)
except Exception, e:
# this container no longer exists
if self.notify.getDebug():
for contName in self._leakDetector.getContainerNameByIdGen(objId):
yield None
self.notify.debug(
'%s is no longer a container, it is now %s (%s)' %
(contName, safeRepr(container), e))
self._leakDetector.removeContainerById(objId)
continue
self._leakDetector._index2containerId2len[self._index][objId] = cLen
# compare the current len of each container to past lens
if self._index > 0:
idx2id2len = self._leakDetector._index2containerId2len
for objId in idx2id2len[self._index]:
yield None
if objId in idx2id2len[self._index-1]:
diff = idx2id2len[self._index][objId] - idx2id2len[self._index-1][objId]
"""
# this check is too spammy
if diff > 20:
if diff > idx2id2len[self._index-1][objId]:
minutes = (self._leakDetector._index2delay[self._index] -
self._leakDetector._index2delay[self._index-1]) / 60.
name = self._leakDetector.getContainerNameById(objId)
if idx2id2len[self._index-1][objId] != 0:
percent = 100. * (float(diff) / float(idx2id2len[self._index-1][objId]))
try:
for container in self._leakDetector.getContainerByIdGen(objId):
yield None
except:
# TODO
self.notify.debug('caught exception in getContainerByIdGen (1)')
else:
self.notify.warning(
'%s (%s) grew %.2f%% in %.2f minutes (%s items at last measurement, current contents: %s)' % (
name, itype(container), percent, minutes, idx2id2len[self._index][objId],
fastRepr(container, maxLen=CheckContainers.ReprItems)))
yield None
"""
if (self._index > 2 and
objId in idx2id2len[self._index-2] and
objId in idx2id2len[self._index-3]):
diff2 = idx2id2len[self._index-1][objId] - idx2id2len[self._index-2][objId]
diff3 = idx2id2len[self._index-2][objId] - idx2id2len[self._index-3][objId]
if self._index <= 4:
if diff > 0 and diff2 > 0 and diff3 > 0:
name = self._leakDetector.getContainerNameById(objId)
try:
for container in self._leakDetector.getContainerByIdGen(objId):
yield None
except:
# TODO
self.notify.debug('caught exception in getContainerByIdGen (2)')
else:
msg = ('%s (%s) consistently increased in size over the last '
'3 periods (%s items at last measurement, current contents: %s)' %
(name, itype(container), idx2id2len[self._index][objId],
fastRepr(container, maxLen=CheckContainers.ReprItems)))
self.notify.warning(msg)
yield None
elif (objId in idx2id2len[self._index-4] and
objId in idx2id2len[self._index-5]):
# if size has consistently increased over the last 5 checks,
# send out a warning
diff4 = idx2id2len[self._index-3][objId] - idx2id2len[self._index-4][objId]
diff5 = idx2id2len[self._index-4][objId] - idx2id2len[self._index-5][objId]
if diff > 0 and diff2 > 0 and diff3 > 0 and diff4 > 0 and diff5 > 0:
name = self._leakDetector.getContainerNameById(objId)
try:
for container in self._leakDetector.getContainerByIdGen(objId):
yield None
except:
# TODO
self.notify.debug('caught exception in getContainerByIdGen (3)')
else:
msg = ('leak detected: %s (%s) consistently increased in size over the last '
'5 periods (%s items at last measurement, current contents: %s)' %
(name, itype(container), idx2id2len[self._index][objId],
fastRepr(container, maxLen=CheckContainers.ReprItems)))
self.notify.warning(msg)
yield None
messenger.send(self._leakDetector.getLeakEvent(), [container, name])
if config.GetBool('pdb-on-leak-detect', 0):
import pdb;pdb.set_trace()
pass
except Exception, e:
print 'CheckContainers job caught exception: %s' % e
if __dev__:
raise
yield Job.Done
class FPTObjsOfType(Job):
def __init__(self, name, leakDetector, otn, doneCallback=None):
Job.__init__(self, name)
self._leakDetector = leakDetector
self.notify = self._leakDetector.notify
self._otn = otn
self._doneCallback = doneCallback
self._ldde = self._leakDetector._getDestroyEvent()
self.accept(self._ldde, self._handleLDDestroy)
ContainerLeakDetector.addPrivateObj(self.__dict__)
def destroy(self):
self.ignore(self._ldde)
self._leakDetector = None
self._doneCallback = None
ContainerLeakDetector.removePrivateObj(self.__dict__)
Job.destroy(self)
def _handleLDDestroy(self):
self.destroy()
def getPriority(self):
return Job.Priorities.High
def run(self):
ids = self._leakDetector.getContainerIds()
try:
for id in ids:
getInstance = (self._otn.lower() not in 'dict')
yield None
try:
for container in self._leakDetector.getContainerByIdGen(
id, getInstance=getInstance):
yield None
except:
pass
else:
if hasattr(container, '__class__'):
cName = container.__class__.__name__
else:
cName = container.__name__
if (self._otn.lower() in cName.lower()):
try:
for ptc in self._leakDetector.getContainerNameByIdGen(
id, getInstance=getInstance):
yield None
except:
pass
else:
print 'GPTC(' + self._otn + '):' + self.getJobName() + ': ' + ptc
except Exception, e:
print 'FPTObjsOfType job caught exception: %s' % e
if __dev__:
raise
yield Job.Done
def finished(self):
if self._doneCallback:
self._doneCallback(self)
class FPTObjsNamed(Job):
def __init__(self, name, leakDetector, on, doneCallback=None):
Job.__init__(self, name)
self._leakDetector = leakDetector
self.notify = self._leakDetector.notify
self._on = on
self._doneCallback = doneCallback
self._ldde = self._leakDetector._getDestroyEvent()
self.accept(self._ldde, self._handleLDDestroy)
ContainerLeakDetector.addPrivateObj(self.__dict__)
def destroy(self):
self.ignore(self._ldde)
self._leakDetector = None
self._doneCallback = None
ContainerLeakDetector.removePrivateObj(self.__dict__)
Job.destroy(self)
def _handleLDDestroy(self):
self.destroy()
def getPriority(self):
return Job.Priorities.High
def run(self):
ids = self._leakDetector.getContainerIds()
try:
for id in ids:
yield None
try:
for container in self._leakDetector.getContainerByIdGen(id):
yield None
except:
pass
else:
name = self._leakDetector._id2ref[id].getFinalIndirectionStr()
if self._on.lower() in name.lower():
try:
for ptc in self._leakDetector.getContainerNameByIdGen(id):
yield None
except:
pass
else:
print 'GPTCN(' + self._on + '):' + self.getJobName() + ': ' + ptc
except Exception, e:
print 'FPTObjsNamed job caught exception: %s' % e
if __dev__:
raise
yield Job.Done
def finished(self):
if self._doneCallback:
self._doneCallback(self)
class PruneObjectRefs(Job):
"""
Job to destroy any container refs that are no longer valid.
Checks validity by asking for each container
"""
def __init__(self, name, leakDetector):
Job.__init__(self, name)
self._leakDetector = leakDetector
self.notify = self._leakDetector.notify
ContainerLeakDetector.addPrivateObj(self.__dict__)
def destroy(self):
ContainerLeakDetector.removePrivateObj(self.__dict__)
Job.destroy(self)
def getPriority(self):
return Job.Priorities.Normal
def run(self):
try:
ids = self._leakDetector.getContainerIds()
for id in ids:
yield None
try:
for container in self._leakDetector.getContainerByIdGen(id):
yield None
except:
# reference is invalid, remove it
self._leakDetector.removeContainerById(id)
_id2baseStartRef = self._leakDetector._findContainersJob._id2baseStartRef
ids = _id2baseStartRef.keys()
for id in ids:
yield None
try:
for container in _id2baseStartRef[id].getContainerGen():
yield None
except:
# reference is invalid, remove it
del _id2baseStartRef[id]
_id2discoveredStartRef = self._leakDetector._findContainersJob._id2discoveredStartRef
ids = _id2discoveredStartRef.keys()
for id in ids:
yield None
try:
for container in _id2discoveredStartRef[id].getContainerGen():
yield None
except:
# reference is invalid, remove it
del _id2discoveredStartRef[id]
except Exception, e:
print 'PruneObjectRefs job caught exception: %s' % e
if __dev__:
raise
yield Job.Done
class ContainerLeakDetector(Job):
"""
Low-priority Python object-graph walker that looks for leaking containers.
To reduce memory usage, this does a random walk of the Python objects to
discover containers rather than keep a set of all visited objects; it may
visit the same object many times but eventually it will discover every object.
Checks container sizes at ever-increasing intervals.
"""
notify = directNotify.newCategory("ContainerLeakDetector")
# set of containers that should not be examined
PrivateIds = set()
def __init__(self, name, firstCheckDelay = None):
Job.__init__(self, name)
self._serialNum = serialNum()
self._findContainersJob = None
self._checkContainersJob = None
self._pruneContainersJob = None
if firstCheckDelay is None:
firstCheckDelay = 60. * 15.
# divide by two, since the first check just takes length measurements and
# doesn't check for leaks
self._nextCheckDelay = firstCheckDelay/2.
self._checkDelayScale = config.GetFloat('leak-detector-check-delay-scale', 1.5)
self._pruneTaskPeriod = config.GetFloat('leak-detector-prune-period', 60. * 30.)
# main dict of id(container)->containerRef
self._id2ref = {}
# storage for results of check-container job
self._index2containerId2len = {}
self._index2delay = {}
if config.GetBool('leak-container', 0):
_createContainerLeak()
if config.GetBool('leak-tasks', 0):
_createTaskLeak()
# don't check our own tables for leaks
ContainerLeakDetector.addPrivateObj(ContainerLeakDetector.PrivateIds)
ContainerLeakDetector.addPrivateObj(self.__dict__)
self.setPriority(Job.Priorities.Min)
jobMgr.add(self)
def destroy(self):
messenger.send(self._getDestroyEvent())
self.ignoreAll()
if self._pruneContainersJob is not None:
jobMgr.remove(self._pruneContainersJob)
self._pruneContainersJob = None
if self._checkContainersJob is not None:
jobMgr.remove(self._checkContainersJob)
self._checkContainersJob = None
jobMgr.remove(self._findContainersJob)
self._findContainersJob = None
del self._id2ref
del self._index2containerId2len
del self._index2delay
def _getDestroyEvent(self):
# sent when leak detector is about to be destroyed
return 'cldDestroy-%s' % self._serialNum
def getLeakEvent(self):
# sent when a leak is detected
# passes description string as argument
return 'containerLeakDetected-%s' % self._serialNum
@classmethod
def addPrivateObj(cls, obj):
cls.PrivateIds.add(id(obj))
@classmethod
def removePrivateObj(cls, obj):
cls.PrivateIds.remove(id(obj))
def _getCheckTaskName(self):
return 'checkForLeakingContainers-%s' % self._serialNum
def _getPruneTaskName(self):
return 'pruneLeakingContainerRefs-%s' % self._serialNum
def getContainerIds(self):
return self._id2ref.keys()
def getContainerByIdGen(self, id, **kwArgs):
# return a generator to look up a container
return self._id2ref[id].getContainerGen(**kwArgs)
def getContainerById(self, id):
for result in self._id2ref[id].getContainerGen():
pass
return result
def getContainerNameByIdGen(self, id, **kwArgs):
return self._id2ref[id].getEvalStrGen(**kwArgs)
def getContainerNameById(self, id):
if id in self._id2ref:
return repr(self._id2ref[id])
return '<unknown container>'
def removeContainerById(self, id):
if id in self._id2ref:
self._id2ref[id].destroy()
del self._id2ref[id]
def run(self):
# start looking for containers
self._findContainersJob = FindContainers(
'%s-findContainers' % self.getJobName(), self)
jobMgr.add(self._findContainersJob)
self._scheduleNextLeakCheck()
self._scheduleNextPruning()
while True:
yield Job.Sleep
def getPathsToContainers(self, name, ot, doneCallback=None):
j = FPTObjsOfType(name, self, ot, doneCallback)
jobMgr.add(j)
return j
def getPathsToContainersNamed(self, name, on, doneCallback=None):
j = FPTObjsNamed(name, self, on, doneCallback)
jobMgr.add(j)
return j
def _scheduleNextLeakCheck(self):
taskMgr.doMethodLater(self._nextCheckDelay, self._checkForLeaks,
self._getCheckTaskName())
# delay between checks
# fib: 1 1 2 3 5 8 13 21 34 55 89
# * 2.: 1 2 4 8 16 32 64 128 256 512 1024
# * 1.5: 1 1.5 2.3 3.4 5.1 7.6 11.4 17.1 25.6 38.4 57.7
#
| |
<reponame>coldfix/probnum
"""
Continuous-Time priors for ODE solvers.
Currently, they are only relevant in the context of ODEs.
If needed in a more general setting, it is easy to move
them to statespace module (->thoughts?)
Matern will be easy to implement, just reuse the template
provided by IOUP and change parameters
"""
import warnings
import numpy as np
from scipy.special import binom # for Matern
from probnum.filtsmooth.statespace.continuous import LTISDEModel
from probnum.prob import RandomVariable, Normal
class ODEPrior(LTISDEModel):
"""
Prior dynamic model for ODE filtering and smoothing.
An ODE prior is an continuous LTI state space model with attributes:
* order of integration :math:`q`
* spatial dimension of the underlying ODE
* projection to :math:`X_i(t)`
(the :math:`(i-1)`-th derivative estimate)
* A preconditioner :math:`P` (see below)
Instead of the LTI SDE
.. math:: d X(t) = [F X(t) + u] dt + L dB(t)
the prior for the ODE Dynamics is given by
.. math:: dX(t) = P F P^{-1} X(t) dt + P L dB(t)
where :math:`P` is a preconditioner matrix ensuring stability
of the iterations. Note that ODE priors do not have a drift term.
By default, we choose :math:`P` to be the
matrix that maps to filtering iteration to the Nordsieck vector,
.. math:: P = \\text{diag }(1, h, h^2, ..., h^q).
Here, :math:`h` is some expected average step size. Note that we
ignored the factorials in this matrix. Our setting makes it easy to
recover "no preconditioning" by choosing :math:`h=1`.
- If no expected step size is available we choose :math:`h=1.0`.
This recovers :math:`P=I_{d(q+1)}`, hence no preconditioning.
- For fixed step size algorithms this quantity :math:`h` is easy
to choose
- For adaptive steps it is a bit more involved.
Since it doesn't have to be exact, any more or less appropriate
choice will do well. The main effect of this preconditioning is that
the predictive covariances inside each filter iteration are
well-conditioned: for IBM(:math:`q`) priors, the condition number
of the predictive covariances only depends on order of integration
:math:`q` and not on the step size anymore. Nb: this only holds
if all required derivatives of the RHS vector field of the ODE are
specified: None for IBM(1), Jacobian of :math:`f` for IBM(2),
Hessian of :math:`f` for IBM(3). If this is not the case the
preconditioner still helps but is not as powerful anymore.
Without preconditioning they can be numerically singular for small
steps and higher order methods which especially makes smoothing
algorithms unstable.
The matrices :math:`F, u, L` are the usual matrices for
IBM(:math:`q`), IOUP(:math:`q`) or Matern(:math:`q+1/2`) processes.
As always, :math:`B(t)` is
s-dimensional Brownian motion with unit diffusion matrix :math:`Q`.
"""
def __init__(self, driftmat, dispmat, ordint, spatialdim, precond_step=1.0):
""" """
self.ordint = ordint
self.spatialdim = spatialdim
self.precond, self.invprecond = self.precond2nordsieck(precond_step)
driftmat = self.precond @ driftmat @ self.invprecond
dispmat = self.precond @ dispmat
forcevec = np.zeros(len(driftmat))
diffmat = np.eye(spatialdim)
super().__init__(driftmat, forcevec, dispmat, diffmat)
def proj2coord(self, coord):
"""
Projection matrix to :math:`i`-th coordinates.
Computes the matrix
.. math:: H_i = \\left[ I_d \\otimes e_i \\right] P^{-1},
where :math:`e_i` is the :math:`i`-th unit vector,
that projects to the :math:`i`-th coordinate of a vector.
If the ODE is multidimensional, it projects to **each** of the
:math:`i`-th coordinates of each ODE dimension.
Parameters
----------
coord : int
Coordinate index :math:`i` which to project to.
Expected to be in range :math:`0 \\leq i \\leq q + 1`.
Returns
-------
np.ndarray, shape=(d, d*(q+1))
Projection matrix :math:`H_i`.
"""
projvec1d = np.eye(self.ordint + 1)[:, coord]
projmat1d = projvec1d.reshape((1, self.ordint + 1))
projmat = np.kron(np.eye(self.spatialdim), projmat1d)
projmat1d_with_precond = projmat @ self.invprecond
return projmat1d_with_precond
def precond2nordsieck(self, step):
"""
Computes preconditioner inspired by Nordsieck.
Computes the matrix :math:`P` given by
.. math:: P = I_d \\otimes diag (1, h, h^2, ..., h^q)
as well as its inverse :math:`P^{-1}`.
Parameters
----------
step : float
Step size :math:`h` used for preconditioning. If :math:`h`
is so small that :math:`h^q! < 10^{-15}`, it is being
set to :math:`h = (\\cdot 10^{-15})^{1/q}`.
Returns
-------
precond : np.ndarray, shape=(d(q+1), d(q+1))
Preconditioner matrix :math:`P`.
invprecond : np.ndarray, shape=(d(q+1), d(q+1))
Inverse preconditioner matrix :math:`P^{-1}`.
"""
smallval = step ** self.ordint
if smallval < 1e-15:
warnmsg = (
"Preconditioner contains values below "
"machine precision (%.1e)" % smallval
)
warnings.warn(message=warnmsg, category=RuntimeWarning)
step = 1e-15 ** (1 / self.ordint)
powers = np.arange(self.ordint + 1)
diags = step ** powers
precond = np.kron(np.eye(self.spatialdim), np.diag(diags))
invprecond = np.kron(np.eye(self.spatialdim), np.diag(1.0 / diags))
return precond, invprecond
@property
def preconditioner(self):
"""
Convenience property to return the readily-computed
preconditioner without having to remember abbreviations.
Returns
-------
np.ndarray, shape=(d(q+1), d(q+1))
Preconditioner matrix :math:`P`
"""
return self.precond
@property
def inverse_preconditioner(self):
"""
Convenience property to return the readily-computed
inverse preconditioner without having to remember abbreviations.
Returns
-------
np.ndarray, shape=(d(q+1), d(q+1))
Inverse preconditioner matrix :math:`P^{-1}`
"""
return self.invprecond
class IBM(ODEPrior):
"""
Integrated Brownian motion of order :math:`q` prior.
The integrated Brownian motion prior is represented through
the LTI SDE
.. math:: dX(t) = F X(t) dt + L dB(t)
where for readibility reasons we did not write the preconditioner
matrix :math:`P`; see :class:`ODEPrior` for explanations.
- It has driftmatrix :math:`F` given by
.. math:: F = I_d \\otimes \\tilde F, \\quad
\\tilde F = \\begin{pmatrix} 0 & I_q \\\\ 0 & 0 \\end{pmatrix}
where the top left zero-vector has :math:`q` rows and 1 column.
- It has dispersion matrix :math:`L` given by
.. math:: L = I_d \\otimes \\tilde L, \\quad
\\tilde L = \\sigma \\, e_{q+1}
where :math:`\\sigma` is the diffusion constant, that is,
:math:`\\sigma^2` is the intensity of each dimension of the
:math:`d`-dimensional Brownian motion driving the SDE and
:math:`e_{q+1}=(0, ..., 0, 1)` is the :math:`(q+1)`-st unit vector.
- The Brownian motion :math:`B=B(t)` driving the SDE has unit
diffusion :math:`Q = I_d`.
Parameters
----------
ordint : int
Order of integration :math:`q`. The higher :math:`q`, the higher
the order of the ODE filter.
spatialdim : int
Spatial dimension :math:`d` of the ordinary differential
equation that is to be modelled.
diffconst : float
Diffusion constant :math:`sigma` of the stochastic process.
precond_step : float, optional
Expected step size :math:`h` used in the ODE filter.
This quantity is used for preconditioning, see :class:`ODEPrior`
for a clear explanation. Default is :math:`h=1`.
"""
def __init__(self, ordint, spatialdim, diffconst, precond_step=1.0):
"""
ordint : this is "q"
spatialdim : d
diffconst : sigma
"""
self.diffconst = diffconst
driftmat = _driftmat_ibm(ordint, spatialdim)
dispmat = _dispmat(ordint, spatialdim, diffconst)
super().__init__(driftmat, dispmat, ordint, spatialdim, precond_step)
def chapmankolmogorov(self, start, stop, step, randvar, *args, **kwargs):
"""
Closed form solution to the Chapman-Kolmogorov equations
for the integrated Brownian motion.
This is more stable than the matrix-exponential implementation
in :meth:`super().chapmankolmogorov` which is relevant for
combinations of large order :math:`q` and small steps :math:`h`.
In these cases even the preconditioning is subject to numerical
instability if the transition matrices :math:`A(h)`
and :math:`Q(h)` are computed with matrix exponentials.
"step" variable is obsolent here and is ignored.
"""
mean, covar = randvar.mean(), randvar.cov()
ah = self._trans_ibm(start, stop)
qh = self._transdiff_ibm(start, stop)
mpred = ah @ mean
crosscov = covar @ ah.T
cpred = ah @ crosscov + qh
return RandomVariable(distribution=Normal(mpred, cpred)), crosscov
def _trans_ibm(self, start, stop):
"""
Computes closed form solution for the transition matrix A(h).
"""
step = stop - start
ah_1d = np.array(
[
[
self._trans_ibm_element(step, row, col)
for col in range(self.ordint + 1)
]
for row in range(self.ordint + 1)
]
)
ah = np.kron(np.eye(self.spatialdim), ah_1d)
return self.precond @ ah @ self.invprecond
def _trans_ibm_element(self, stp, rw, cl):
"""Closed form for A(h)_ij"""
if rw <= cl:
return stp ** (cl - rw) / np.math.factorial(cl - rw)
else:
return 0.0
def _transdiff_ibm(self, start, stop):
"""
Computes closed form solution for the diffusion matrix Q(h).
"""
step = stop - start
qh_1d = np.array(
[
[
self._transdiff_ibm_element(step, row, col)
for col in range(self.ordint + 1)
]
for row in range(self.ordint + 1)
]
)
qh = np.kron(np.eye(self.spatialdim), qh_1d)
return self.precond @ qh @ self.precond.T
def _transdiff_ibm_element(self, stp, rw, cl):
"""Closed form for | |
self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
visual_shape = list(input_shape)
visual_shape[1] = self.config.image_feature_pool_shape[0] * self.config.image_feature_pool_shape[1]
visual_shape = torch.Size(visual_shape)
final_shape = list(input_shape)
final_shape[1] += visual_shape[1]
final_shape = torch.Size(final_shape)
visual_bbox = self._calc_visual_bbox(self.config.image_feature_pool_shape, bbox, device, final_shape)
final_bbox = torch.cat([bbox, visual_bbox], dim=1)
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
visual_attention_mask = torch.ones(visual_shape, device=device)
final_attention_mask = torch.cat([attention_mask, visual_attention_mask], dim=1)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if position_ids is None:
seq_length = input_shape[1]
position_ids = self.embeddings.position_ids[:, :seq_length]
position_ids = position_ids.expand(input_shape)
visual_position_ids = torch.arange(0, visual_shape[1], dtype=torch.long, device=device).repeat(
input_shape[0], 1
)
final_position_ids = torch.cat([position_ids, visual_position_ids], dim=1)
if bbox is None:
bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device)
text_layout_emb = self._calc_text_embeddings(
input_ids=input_ids,
bbox=bbox,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
)
visual_emb = self._calc_img_embeddings(
image=image,
bbox=visual_bbox,
position_ids=visual_position_ids,
)
final_emb = torch.cat([text_layout_emb, visual_emb], dim=1)
extended_attention_mask = final_attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.to(dtype=next(self.parameters()).dtype)
else:
head_mask = [None] * self.config.num_hidden_layers
encoder_outputs = self.encoder(
final_emb,
extended_attention_mask,
bbox=final_bbox,
position_ids=final_position_ids,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"""
LayoutLMv2 Model with a sequence classification head on top (a linear layer on top of the concatenation of the
final hidden state of the [CLS] token, average-pooled initial visual embeddings and average-pooled final visual
embeddings, e.g. for document image classification tasks such as the `RVL-CDIP
<https://www.cs.cmu.edu/~aharley/rvl-cdip/>`__ dataset.
""",
LAYOUTLMV2_START_DOCSTRING,
)
class LayoutLMv2ForSequenceClassification(LayoutLMv2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.layoutlmv2 = LayoutLMv2Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size * 3, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.layoutlmv2.embeddings.word_embeddings
@add_start_docstrings_to_model_forward(LAYOUTLMV2_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
bbox=None,
image=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
Examples:
```python
>>> from transformers import LayoutLMv2Processor, LayoutLMv2ForSequenceClassification
>>> from PIL import Image
>>> import torch
>>> processor = LayoutLMv2Processor.from_pretrained('microsoft/layoutlmv2-base-uncased')
>>> model = LayoutLMv2ForSequenceClassification.from_pretrained('microsoft/layoutlmv2-base-uncased')
>>> image = Image.open("name_of_your_document - can be a png file, pdf, etc.").convert("RGB")
>>> encoding = processor(image, return_tensors="pt")
>>> sequence_label = torch.tensor([1])
>>> outputs = model(**encoding, labels=sequence_label)
>>> loss = outputs.loss
>>> logits = outputs.logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
visual_shape = list(input_shape)
visual_shape[1] = self.config.image_feature_pool_shape[0] * self.config.image_feature_pool_shape[1]
visual_shape = torch.Size(visual_shape)
final_shape = list(input_shape)
final_shape[1] += visual_shape[1]
final_shape = torch.Size(final_shape)
visual_bbox = self.layoutlmv2._calc_visual_bbox(
self.config.image_feature_pool_shape, bbox, device, final_shape
)
visual_position_ids = torch.arange(0, visual_shape[1], dtype=torch.long, device=device).repeat(
input_shape[0], 1
)
initial_image_embeddings = self.layoutlmv2._calc_img_embeddings(
image=image,
bbox=visual_bbox,
position_ids=visual_position_ids,
)
outputs = self.layoutlmv2(
input_ids=input_ids,
bbox=bbox,
image=image,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
sequence_output, final_image_embeddings = outputs[0][:, :seq_length], outputs[0][:, seq_length:]
cls_final_output = sequence_output[:, 0, :]
# average-pool the visual embeddings
pooled_initial_image_embeddings = initial_image_embeddings.mean(dim=1)
pooled_final_image_embeddings = final_image_embeddings.mean(dim=1)
# concatenate with cls_final_output
sequence_output = torch.cat(
[cls_final_output, pooled_initial_image_embeddings, pooled_final_image_embeddings], dim=1
)
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
LayoutLMv2 Model with a token classification head on top (a linear layer on top of the text part of the hidden
states) e.g. for sequence labeling (information extraction) tasks such as `FUNSD
<https://guillaumejaume.github.io/FUNSD/>`__, `SROIE <https://rrc.cvc.uab.es/?ch=13>`__, `CORD
<https://github.com/clovaai/cord>`__ and `Kleister-NDA <https://github.com/applicaai/kleister-nda>`__.
""",
LAYOUTLMV2_START_DOCSTRING,
)
class LayoutLMv2ForTokenClassification(LayoutLMv2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.layoutlmv2 = LayoutLMv2Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.layoutlmv2.embeddings.word_embeddings
@add_start_docstrings_to_model_forward(LAYOUTLMV2_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
bbox=None,
image=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
Returns:
Examples:
```python
>>> from transformers import LayoutLMv2Processor, LayoutLMv2ForTokenClassification
>>> from PIL import Image
>>> processor = LayoutLMv2Processor.from_pretrained('microsoft/layoutlmv2-base-uncased', revision="no_ocr")
>>> model = LayoutLMv2ForTokenClassification.from_pretrained('microsoft/layoutlmv2-base-uncased')
>>> image = Image.open("name_of_your_document - can be a png file, pdf, etc.").convert("RGB")
>>> words = ["hello", "world"]
>>> boxes = [[1, 2, 3, 4], [5, 6, 7, 8]] # make sure to normalize your bounding boxes
>>> word_labels = [0, 1]
>>> encoding = processor(image, words, boxes=boxes, word_labels=word_labels, return_tensors="pt")
>>> outputs = model(**encoding)
>>> loss = outputs.loss
>>> logits = outputs.logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.layoutlmv2(
input_ids=input_ids,
bbox=bbox,
image=image,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
# only take the text part of the output representations
sequence_output = outputs[0][:, :seq_length]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
LayoutLMv2 Model with a span classification head on top for extractive question-answering tasks such as `DocVQA
<https://rrc.cvc.uab.es/?ch=17>`__ (a linear layer on top of the text part of the hidden-states output to compute
`span start logits` and `span end logits`).
""",
LAYOUTLMV2_START_DOCSTRING,
)
class LayoutLMv2ForQuestionAnswering(LayoutLMv2PreTrainedModel):
def __init__(self, config, has_visual_segment_embedding=True):
super().__init__(config)
self.num_labels = config.num_labels
config.has_visual_segment_embedding = has_visual_segment_embedding
self.layoutlmv2 = LayoutLMv2Model(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.layoutlmv2.embeddings.word_embeddings
@add_start_docstrings_to_model_forward(LAYOUTLMV2_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
bbox=None,
image=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
Returns:
| |
# Copyright (c) 2014 Evalf
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
The matrix module defines an abstract :class:`Matrix` object and several
implementations. Matrix objects support basic addition and subtraction
operations and provide a consistent insterface for solving linear systems.
Matrices can be converted into other forms suitable for external processing via
the ``export`` method.
"""
from . import numpy, numeric, warnings, cache, types, config, util
import abc, sys, ctypes, treelog as log
class MatrixError(Exception): pass
class Backend(metaclass=abc.ABCMeta):
'backend base class'
def __enter__(self):
if hasattr(self, '_old_backend'):
raise RuntimeError('This context manager is not reentrant.')
global _current_backend
self._old_backend = _current_backend
_current_backend = self
return self
def __exit__(self, etype, value, tb):
if not hasattr(self, '_old_backend'):
raise RuntimeError('This context manager is not yet entered.')
global _current_backend
_current_backend = self._old_backend
del self._old_backend
@abc.abstractmethod
def assemble(self, data, index, shape):
'''Assemble a (sparse) tensor based on index-value pairs.
.. Note:: This function is abstract.
'''
class Matrix(metaclass=types.CacheMeta):
'matrix base class'
def __init__(self, shape):
assert len(shape) == 2
self.shape = shape
@abc.abstractmethod
def __add__(self, other):
'add two matrices'
@abc.abstractmethod
def __mul__(self, other):
'multiply matrix with a scalar'
@abc.abstractmethod
def __neg__(self, other):
'negate matrix'
def __sub__(self, other):
return self.__add__(-other)
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
return self.__mul__(1/other)
@property
@abc.abstractmethod
def T(self):
'transpose matrix'
@property
def size(self):
return numpy.prod(self.shape)
def rowsupp(self, tol=0):
'return row indices with nonzero/non-small entries'
data, (row, col) = self.export('coo')
supp = numpy.zeros(self.shape[0], dtype=bool)
supp[row[abs(data) > tol]] = True
return supp
@abc.abstractmethod
def solve(self, rhs=None, *, lhs0=None, constrain=None, rconstrain=None, **solverargs):
'''Solve system given right hand side vector and/or constraints.
Args
----
rhs : :class:`float` vector or :any:`None`
Right hand side vector. `None` implies all zeros.
lhs0 : class:`float` vector or :any:`None`
Initial values. `None` implies all zeros.
constrain : :class:`float` or :class:`bool` array, or :any:`None`
Column constraints. For float values, a number signifies a constraint,
NaN signifies a free dof. For boolean, a True value signifies a
constraint to the value in `lhs0`, a False value signifies a free dof.
`None` implies no constraints.
rconstrain : :class:`bool` array or :any:`None`
Row constrains. A True value signifies a constrains, a False value a free
dof. `None` implies that the constraints follow those defined in
`constrain` (by implication the matrix must be square).
Returns
-------
:class:`numpy.ndarray`
Left hand side vector.
'''
@abc.abstractmethod
def submatrix(self, rows, cols):
'''Create submatrix from selected rows, columns.
Args
----
rows : :class:`bool`/:class:`int` array selecting rows for keeping
cols : :class:`bool`/:class:`int` array selecting columns for keeping
Returns
-------
:class:`Matrix`
Matrix instance of reduced dimensions
'''
def export(self, form):
'''Export matrix data to any of supported forms.
Args
----
form : :class:`str`
- "dense" : return matrix as a single dense array
- "csr" : return matrix as 3-tuple of (data, indices, indptr)
- "coo" : return matrix as 2-tuple of (data, (row, col))
'''
raise NotImplementedError('cannot export {} to {!r}'.format(self.__class__.__name__, form))
def __repr__(self):
return '{}<{}x{}>'.format(type(self).__qualname__, *self.shape)
def preparesolvearguments(wrapped):
'''Make rhs optional, add lhs0, constrain, rconstrain arguments.
See Matrix.solve.'''
def solve(self, rhs=None, *, lhs0=None, constrain=None, rconstrain=None, **solverargs):
nrows, ncols = self.shape
if lhs0 is None:
x = numpy.zeros(ncols)
else:
x = numpy.array(lhs0, dtype=float)
assert x.shape == (ncols,)
if constrain is None:
J = numpy.ones(ncols, dtype=bool)
else:
assert constrain.shape == (ncols,)
if constrain.dtype == bool:
J = ~constrain
else:
J = numpy.isnan(constrain)
x[~J] = constrain[~J]
if rconstrain is None:
assert nrows == ncols
I = J
else:
assert rconstrain.shape == (nrows,) and constrain.dtype == bool
I = ~rconstrain
assert I.sum() == J.sum(), 'constrained matrix is not square: {}x{}'.format(I.sum(), J.sum())
if rhs is None:
rhs = 0.
b = (rhs - self.matvec(x))[J]
if b.any():
x[J] += wrapped(self if I.all() and J.all() else self.submatrix(I, J), b, **solverargs)
if not numpy.isfinite(x).all():
raise MatrixError('solver returned non-finite left hand side')
log.info('solver returned with residual {:.0e}'.format(numpy.linalg.norm((rhs - self.matvec(x))[J])))
else:
log.info('skipping solver because initial vector is exact')
return x
return log.withcontext(solve)
## NUMPY BACKEND
class Numpy(Backend):
'''matrix backend based on numpy array'''
def assemble(self, data, index, shape):
array = numeric.accumulate(data, index, shape)
return NumpyMatrix(array) if len(shape) == 2 else array
class NumpyMatrix(Matrix):
'''matrix based on numpy array'''
def __init__(self, core):
assert numeric.isarray(core)
self.core = core
super().__init__(core.shape)
def __add__(self, other):
if not isinstance(other, NumpyMatrix) or self.shape != other.shape:
return NotImplemented
return NumpyMatrix(self.core + other.core)
def __mul__(self, other):
if not numeric.isnumber(other):
return NotImplemented
return NumpyMatrix(self.core * other)
def __neg__(self):
return NumpyMatrix(-self.core)
@property
def T(self):
return NumpyMatrix(self.core.T)
def matvec(self, vec):
return numpy.dot(self.core, vec)
def export(self, form):
if form == 'dense':
return self.core
if form == 'coo':
ij = self.core.nonzero()
return self.core[ij], ij
if form == 'csr':
rows, cols = self.core.nonzero()
return self.core[rows, cols], cols, rows.searchsorted(numpy.arange(self.shape[0]+1))
raise NotImplementedError('cannot export NumpyMatrix to {!r}'.format(form))
def rowsupp(self, tol=0):
return numpy.greater(abs(self.core), tol).any(axis=1)
@preparesolvearguments
def solve(self, rhs):
try:
return numpy.linalg.solve(self.core, rhs)
except numpy.linalg.LinAlgError as e:
raise MatrixError(e) from e
def submatrix(self, rows, cols):
return NumpyMatrix(self.core[numpy.ix_(rows, cols)])
## SCIPY BACKEND
try:
import scipy.sparse.linalg
except ImportError:
pass
else:
class Scipy(Backend):
'''matrix backend based on scipy's sparse matrices'''
def assemble(self, data, index, shape):
if len(shape) < 2:
return numeric.accumulate(data, index, shape)
if len(shape) == 2:
csr = scipy.sparse.csr_matrix((data, index), shape)
return ScipyMatrix(csr)
raise MatrixError('{}d data not supported by scipy backend'.format(len(shape)))
class ScipyMatrix(Matrix):
'''matrix based on any of scipy's sparse matrices'''
def __init__(self, core):
self.core = core
super().__init__(core.shape)
def __add__(self, other):
if not isinstance(other, ScipyMatrix) or self.shape != other.shape:
return NotImplemented
return ScipyMatrix(self.core + other.core)
def __sub__(self, other):
if not isinstance(other, ScipyMatrix) or self.shape != other.shape:
return NotImplemented
return ScipyMatrix(self.core - other.core)
def __mul__(self, other):
if not numeric.isnumber(other):
return NotImplemented
return ScipyMatrix(self.core * other)
def __neg__(self):
return ScipyMatrix(-self.core)
def matvec(self, vec):
return self.core.dot(vec)
def export(self, form):
if form == 'dense':
return self.core.toarray()
if form == 'csr':
csr = self.core.tocsr()
return csr.data, csr.indices, csr.indptr
if form == 'coo':
coo = self.core.tocoo()
return coo.data, (coo.row, coo.col)
raise NotImplementedError('cannot export NumpyMatrix to {!r}'.format(form))
@property
def T(self):
return ScipyMatrix(self.core.transpose())
@preparesolvearguments
def solve(self, rhs, atol=0, solver='spsolve', callback=None, precon=None, **solverargs):
if solver == 'spsolve':
log.info('solving system using sparse direct solver')
return scipy.sparse.linalg.spsolve(self.core, rhs)
assert atol, 'tolerance must be specified for iterative solver'
rhsnorm = numpy.linalg.norm(rhs)
if rhsnorm <= atol:
return numpy.zeros(self.shape[1])
log.info('solving system using {} iterative solver'.format(solver))
solverfun = getattr(scipy.sparse.linalg, solver)
myrhs = rhs / rhsnorm # normalize right hand side vector for best control over scipy's stopping criterion
mytol = atol / rhsnorm
niter = numpy.array(0)
def mycallback(arg):
niter[...] += 1
# some solvers provide the residual, others the left hand side vector
res = numpy.linalg.norm(myrhs - self.matvec(arg)) if numpy.ndim(arg) == 1 else float(arg)
if callback:
callback(res)
with log.context('residual {:.2e} ({:.0f}%)'.format(res, 100. * numpy.log10(res) / numpy.log10(mytol) if res > 0 else 0)):
pass
M = self.getprecon(precon) if isinstance(precon, str) else precon(self.core) if callable(precon) else precon
mylhs, status = solverfun(self.core, myrhs, M=M, tol=mytol, callback=mycallback, **solverargs)
if status != 0:
raise MatrixError('{} solver failed with status {}'.format(solver, status))
log.info('solver converged in {} iterations'.format(niter))
return mylhs * rhsnorm
def getprecon(self, name):
name = name.lower()
assert self.shape[0] == self.shape[1], 'constrained matrix must be square'
log.info('building {} preconditioner'.format(name))
if name == 'splu':
try:
precon = scipy.sparse.linalg.splu(self.core.tocsc()).solve
except RuntimeError as | |
2}
else:
self.bands_rgb = {}
for k in bands_rgb.keys():
band = bands_rgb[k]
ind = band_prefixes.index(band)
self.bands_rgb[k] = ind
else:
for f in self.files:
extension = f.split('.')[-1]
if extension == 'fz' or extension == 'gz':
extension = '.'.join(f.split('.')[-2:])
if extension in self.known_file_types:
try:
astro_img = AstroImage([f],
file_type=extension,
fits_index=fits_index)
images[astro_img.name] = astro_img
except Exception as e:
msg = "Cannot read image " + f + "\n \
Exception is: " + (str)(e)
logging_tools.log(msg, level="ERROR")
if len(list(images.keys())) == 0:
msg = "No images found, Astronomaly cannot proceed."
logging_tools.log(msg, level="ERROR")
raise IOError(msg)
try:
self.window_size_x = window_size[0]
self.window_size_y = window_size[1]
except TypeError:
self.window_size_x = window_size
self.window_size_y = window_size
# Allows sliding windows
if window_shift is not None:
try:
self.window_shift_x = window_shift[0]
self.window_shift_y = window_shift[1]
except TypeError:
self.window_shift_x = window_shift
self.window_shift_y = window_shift
else:
self.window_shift_x = self.window_size_x
self.window_shift_y = self.window_size_y
self.images = images
self.transform_function = transform_function
if display_transform_function is None:
self.display_transform_function = transform_function
else:
self.display_transform_function = display_transform_function
self.plot_square = plot_square
self.plot_cmap = plot_cmap
self.catalogue = catalogue
self.display_image_size = display_image_size
self.band_prefixes = band_prefixes
self.metadata = pd.DataFrame(data=[])
if self.catalogue is None:
self.create_catalogue()
else:
self.convert_catalogue_to_metadata()
print('A catalogue of ', len(self.metadata),
'sources has been provided.')
if 'original_image' in self.metadata.columns:
for img in np.unique(self.metadata.original_image):
if img not in images.keys():
logging_tools.log('Image ' + img + """ found in catalogue
but not in provided image data. Removing from
catalogue.""", level='WARNING')
msk = self.metadata.original_image == img
self.metadata.drop(self.metadata.index[msk], inplace=True)
print('Catalogue reduced to ', len(self.metadata),
'sources')
self.index = self.metadata.index.values
def create_catalogue(self):
"""
If a catalogue is not supplied, this will generate one by cutting up
the image into cutouts.
"""
print('No catalogue found, one will automatically be generated by \
splitting the image into cutouts governed by the window_size..')
for image_name in list(self.images.keys()):
astro_img = self.images[image_name]
img_shape = astro_img.get_image_shape()
# Remember, numpy array index of [row, column]
# corresponds to [y, x]
xvals = np.arange(self.window_size_x // 2,
img_shape[1] - self.window_size_x // 2,
self.window_shift_x)
yvals = np.arange(self.window_size_y // 2,
img_shape[0] - self.window_size_y // 2,
self.window_shift_y)
X, Y = np.meshgrid(xvals, yvals)
x_coords = X.ravel()
y_coords = Y.ravel()
ra, dec = astro_img.get_coords(x_coords, y_coords)
original_image_names = [image_name] * len(x_coords)
new_df = pd.DataFrame(data={
'original_image': original_image_names,
'x': x_coords,
'y': y_coords,
'ra': ra,
'dec': dec,
'peak_flux': [-1] * len(ra)})
self.metadata = pd.concat((self.metadata, new_df),
ignore_index=True)
self.metadata.index = self.metadata.index.astype('str')
print('A catalogue of ', len(self.metadata), 'cutouts has been \
created.')
print('Done!')
def convert_catalogue_to_metadata(self):
if 'original_image' not in self.catalogue.columns:
if len(self.images) > 1:
logging_tools.log("""If multiple fits images are used the
original_image column must be provided in
the catalogue to identify which image the
source belongs to.""",
level='ERROR')
raise ValueError("Incorrect input supplied")
else:
self.catalogue['original_image'] = \
[list(self.images.keys())[0]] * len(self.catalogue)
if 'objid' not in self.catalogue.columns:
self.catalogue['objid'] = np.arange(len(self.catalogue))
if 'peak_flux' not in self.catalogue.columns:
self.catalogue['peak_flux'] = [np.NaN] * len(self.catalogue)
cols = ['original_image', 'x', 'y']
for c in cols[1:]:
if c not in self.catalogue.columns:
logging_tools.log("""If a catalogue is provided the x and y
columns (corresponding to pixel values) must be present""",
level='ERROR')
raise ValueError("Incorrect input supplied")
if 'ra' in self.catalogue.columns:
cols.append('ra')
if 'dec' in self.catalogue.columns:
cols.append('dec')
if 'peak_flux' in self.catalogue.columns:
cols.append('peak_flux')
met = {}
for c in cols:
met[c] = self.catalogue[c].values
the_index = np.array(self.catalogue['objid'].values, dtype='str')
self.metadata = pd.DataFrame(met, index=the_index)
self.metadata['x'] = self.metadata['x'].astype('int')
self.metadata['y'] = self.metadata['y'].astype('int')
def get_sample(self, idx):
"""
Returns the data for a single sample in the dataset as indexed by idx.
Parameters
----------
idx : string
Index of sample
Returns
-------
nd.array
Array of image cutout
"""
x0 = self.metadata.loc[idx, 'x']
y0 = self.metadata.loc[idx, 'y']
original_image = self.metadata.loc[idx, 'original_image']
this_image = self.images[original_image]
x_wid = self.window_size_x // 2
y_wid = self.window_size_y // 2
y_start = y0 - y_wid
y_end = y0 + y_wid
x_start = x0 - x_wid
x_end = x0 + x_wid
invalid_y = y_start < 0 or y_end > this_image.metadata['NAXIS1']
invalid_x = x_start < 0 or x_end > this_image.metadata['NAXIS2']
if invalid_y or invalid_x:
naxis3_present = 'NAXIS3' in this_image.metadata.keys()
if naxis3_present and this_image.metadata['NAXIS3'] > 1:
shp = [self.window_size_y,
self.window_size_x,
this_image.metadata['NAXIS3']]
else:
shp = [self.window_size_y, self.window_size_x]
cutout = np.ones((shp)) * np.nan
else:
cutout = this_image.get_image_data(y_start, y_end, x_start, x_end)
if self.metadata.loc[idx, 'peak_flux'] == -1:
if np.any(np.isnan(cutout)):
flx = -1
else:
flx = np.max(cutout)
self.metadata.loc[idx, 'peak_flux'] = flx
cutout = apply_transform(cutout, self.transform_function)
return cutout
def get_display_data(self, idx):
"""
Returns a single instance of the dataset in a form that is ready to be
displayed by the web front end.
Parameters
----------
idx : str
Index (should be a string to avoid ambiguity)
Returns
-------
png image object
Object ready to be passed directly to the frontend
"""
try:
img_name = self.metadata.loc[idx, 'original_image']
except KeyError:
return None
this_image = self.images[img_name]
x0 = self.metadata.loc[idx, 'x']
y0 = self.metadata.loc[idx, 'y']
factor = 1.5
xmin = (int)(x0 - self.window_size_x * factor)
xmax = (int)(x0 + self.window_size_x * factor)
ymin = (int)(y0 - self.window_size_y * factor)
ymax = (int)(y0 + self.window_size_y * factor)
xstart = max(xmin, 0)
xend = min(xmax, this_image.metadata['NAXIS1'])
ystart = max(ymin, 0)
yend = min(ymax, this_image.metadata['NAXIS2'])
tot_size_x = int(2 * self.window_size_x * factor)
tot_size_y = int(2 * self.window_size_y * factor)
naxis3_present = 'NAXIS3' in this_image.metadata.keys()
if naxis3_present and this_image.metadata['NAXIS3'] > 1:
shp = [tot_size_y, tot_size_x, this_image.metadata['NAXIS3']]
else:
shp = [tot_size_y, tot_size_x]
cutout = np.zeros(shp)
# cutout[ystart - ymin:tot_size_y - (ymax - yend),
# xstart - xmin:tot_size_x - (xmax - xend)] = img[ystart:yend,
#
# xstart:xend]
img_data = this_image.get_image_data(ystart, yend, xstart, xend)
cutout[ystart - ymin:yend - ymin,
xstart - xmin:xend - xmin] = img_data
cutout = np.nan_to_num(cutout)
cutout = apply_transform(cutout, self.display_transform_function)
if len(cutout.shape) > 2 and cutout.shape[-1] >= 3:
new_cutout = np.zeros([cutout.shape[0], cutout.shape[1], 3])
new_cutout[:, :, 0] = cutout[:, :, self.bands_rgb['r']]
new_cutout[:, :, 1] = cutout[:, :, self.bands_rgb['g']]
new_cutout[:, :, 2] = cutout[:, :, self.bands_rgb['b']]
cutout = new_cutout
if self.plot_square:
offset_x = (tot_size_x - self.window_size_x) // 2
offset_y = (tot_size_y - self.window_size_y) // 2
x1 = offset_x
x2 = tot_size_x - offset_x
y1 = offset_y
y2 = tot_size_y - offset_y
mx = cutout.max()
cutout[y1:y2, x1] = mx
cutout[y1:y2, x2] = mx
cutout[y1, x1:x2] = mx
cutout[y2, x1:x2] = mx
min_edge = min(cutout.shape[:2])
max_edge = max(cutout.shape[:2])
if max_edge != self.display_image_size:
new_max = self.display_image_size
new_min = int(min_edge * new_max / max_edge)
if cutout.shape[0] <= cutout.shape[1]:
new_shape = [new_min, new_max]
else:
new_shape = [new_max, new_min]
if len(cutout.shape) > 2:
new_shape.append(cutout.shape[-1])
cutout = resize(cutout, new_shape, anti_aliasing=False)
return convert_array_to_image(cutout, plot_cmap=self.plot_cmap)
class ImageThumbnailsDataset(Dataset):
def __init__(self, display_image_size=128, transform_function=None,
display_transform_function=None,
catalogue=None, additional_metadata=None, **kwargs):
"""
Read in a set of images that have already been cut into thumbnails.
This would be uncommon with astronomical data but is needed to read a
dataset like galaxy zoo. Inherits from Dataset class.
Parameters
----------
filename : str
If a single file (of any time) is to be read from, the path can be
given using this kwarg.
directory : str
A directory can be given instead of an explicit list of files. The
child class will load all appropriate files in this directory.
list_of_files : list
Instead of the above, a list of files to be loaded can be
explicitly given.
output_dir : str
The directory to save the log file and all outputs to. Defaults to
display_image_size : The size of the image to be displayed on the
web page. If the image is smaller than this, it will be
interpolated up to the higher number of pixels. If larger, it will
be downsampled.
transform_function : function or list, optional
The transformation function or list of functions that will be
applied to each cutout. The function should take an input 2d array
(the cutout) and return an output 2d array. If a list is provided,
each function is applied in the order of the list.
catalogue : pandas.DataFrame or similar
A catalogue of the positions of sources around which cutouts will
be extracted. Note that a cutout of size | |
# pylint: disable=invalid-name
"""
SAS generic computation and sld file readers
"""
from __future__ import print_function
import os
import sys
import copy
import logging
from periodictable import formula
from periodictable import nsf
import numpy as np
from . import _sld2i
from .BaseComponent import BaseComponent
logger = logging.getLogger(__name__)
if sys.version_info[0] < 3:
def decode(s):
return s
else:
def decode(s):
return s.decode() if isinstance(s, bytes) else s
MFACTOR_AM = 2.853E-12
MFACTOR_MT = 2.3164E-9
METER2ANG = 1.0E+10
#Avogadro constant [1/mol]
NA = 6.02214129e+23
def mag2sld(mag, v_unit=None):
"""
Convert magnetization to magnatic SLD
sldm = Dm * mag where Dm = gamma * classical elec. radius/(2*Bohr magneton)
Dm ~ 2.853E-12 [A^(-2)] ==> Shouldn't be 2.90636E-12 [A^(-2)]???
"""
if v_unit == "A/m":
factor = MFACTOR_AM
elif v_unit == "mT":
factor = MFACTOR_MT
else:
raise ValueError("Invalid valueunit")
sld_m = factor * mag
return sld_m
def transform_center(pos_x, pos_y, pos_z):
"""
re-center
:return: posx, posy, posz [arrays]
"""
posx = pos_x - (min(pos_x) + max(pos_x)) / 2.0
posy = pos_y - (min(pos_y) + max(pos_y)) / 2.0
posz = pos_z - (min(pos_z) + max(pos_z)) / 2.0
return posx, posy, posz
class GenSAS(BaseComponent):
"""
Generic SAS computation Model based on sld (n & m) arrays
"""
def __init__(self):
"""
Init
:Params sld_data: MagSLD object
"""
# Initialize BaseComponent
BaseComponent.__init__(self)
self.sld_data = None
self.data_pos_unit = None
self.data_x = None
self.data_y = None
self.data_z = None
self.data_sldn = None
self.data_mx = None
self.data_my = None
self.data_mz = None
self.data_vol = None #[A^3]
self.is_avg = False
## Name of the model
self.name = "GenSAS"
## Define parameters
self.params = {}
self.params['scale'] = 1.0
self.params['background'] = 0.0
self.params['solvent_SLD'] = 0.0
self.params['total_volume'] = 1.0
self.params['Up_frac_in'] = 1.0
self.params['Up_frac_out'] = 1.0
self.params['Up_theta'] = 0.0
self.description = 'GenSAS'
## Parameter details [units, min, max]
self.details = {}
self.details['scale'] = ['', 0.0, np.inf]
self.details['background'] = ['[1/cm]', 0.0, np.inf]
self.details['solvent_SLD'] = ['1/A^(2)', -np.inf, np.inf]
self.details['total_volume'] = ['A^(3)', 0.0, np.inf]
self.details['Up_frac_in'] = ['[u/(u+d)]', 0.0, 1.0]
self.details['Up_frac_out'] = ['[u/(u+d)]', 0.0, 1.0]
self.details['Up_theta'] = ['[deg]', -np.inf, np.inf]
# fixed parameters
self.fixed = []
def set_pixel_volumes(self, volume):
"""
Set the volume of a pixel in (A^3) unit
:Param volume: pixel volume [float]
"""
if self.data_vol is None:
raise TypeError("data_vol is missing")
self.data_vol = volume
def set_is_avg(self, is_avg=False):
"""
Sets is_avg: [bool]
"""
self.is_avg = is_avg
def _gen(self, qx, qy):
"""
Evaluate the function
:Param x: array of x-values
:Param y: array of y-values
:Param i: array of initial i-value
:return: function value
"""
pos_x = self.data_x
pos_y = self.data_y
pos_z = self.data_z
if self.is_avg is None:
pos_x, pos_y, pos_z = transform_center(pos_x, pos_y, pos_z)
sldn = copy.deepcopy(self.data_sldn)
sldn -= self.params['solvent_SLD']
# **** WARNING **** new_GenI holds pointers to numpy vectors
# be sure that they are contiguous double precision arrays and make
# sure the GC doesn't eat them before genicom is called.
# TODO: rewrite so that the parameters are passed directly to genicom
args = (
(1 if self.is_avg else 0),
pos_x, pos_y, pos_z,
sldn, self.data_mx, self.data_my,
self.data_mz, self.data_vol,
self.params['Up_frac_in'],
self.params['Up_frac_out'],
self.params['Up_theta'])
model = _sld2i.new_GenI(*args)
if len(qy):
qx, qy = _vec(qx), _vec(qy)
I_out = np.empty_like(qx)
#print("npoints", qx.shape, "npixels", pos_x.shape)
_sld2i.genicomXY(model, qx, qy, I_out)
#print("I_out after", I_out)
else:
qx = _vec(qx)
I_out = np.empty_like(qx)
_sld2i.genicom(model, qx, I_out)
vol_correction = self.data_total_volume / self.params['total_volume']
result = (self.params['scale'] * vol_correction * I_out
+ self.params['background'])
return result
def set_sld_data(self, sld_data=None):
"""
Sets sld_data
"""
self.sld_data = sld_data
self.data_pos_unit = sld_data.pos_unit
self.data_x = _vec(sld_data.pos_x)
self.data_y = _vec(sld_data.pos_y)
self.data_z = _vec(sld_data.pos_z)
self.data_sldn = _vec(sld_data.sld_n)
self.data_mx = _vec(sld_data.sld_mx)
self.data_my = _vec(sld_data.sld_my)
self.data_mz = _vec(sld_data.sld_mz)
self.data_vol = _vec(sld_data.vol_pix)
self.data_total_volume = sum(sld_data.vol_pix)
self.params['total_volume'] = sum(sld_data.vol_pix)
def getProfile(self):
"""
Get SLD profile
: return: sld_data
"""
return self.sld_data
def run(self, x=0.0):
"""
Evaluate the model
:param x: simple value
:return: (I value)
"""
if isinstance(x, list):
if len(x[1]) > 0:
msg = "Not a 1D."
raise ValueError(msg)
# 1D I is found at y =0 in the 2D pattern
out = self._gen(x[0], [])
return out
else:
msg = "Q must be given as list of qx's and qy's"
raise ValueError(msg)
def runXY(self, x=0.0):
"""
Evaluate the model
:param x: simple value
:return: I value
:Use this runXY() for the computation
"""
if isinstance(x, list):
return self._gen(x[0], x[1])
else:
msg = "Q must be given as list of qx's and qy's"
raise ValueError(msg)
def evalDistribution(self, qdist):
"""
Evaluate a distribution of q-values.
:param qdist: ndarray of scalar q-values (for 1D) or list [qx,qy]
where qx,qy are 1D ndarrays (for 2D).
"""
if isinstance(qdist, list):
return self.run(qdist) if len(qdist[1]) < 1 else self.runXY(qdist)
else:
mesg = "evalDistribution is expecting an ndarray of "
mesg += "a list [qx,qy] where qx,qy are arrays."
raise RuntimeError(mesg)
def _vec(v):
return np.ascontiguousarray(v, 'd')
class OMF2SLD(object):
"""
Convert OMFData to MAgData
"""
def __init__(self):
"""
Init
"""
self.pos_x = None
self.pos_y = None
self.pos_z = None
self.mx = None
self.my = None
self.mz = None
self.sld_n = None
self.vol_pix = None
self.output = None
self.omfdata = None
def set_data(self, omfdata, shape='rectangular'):
"""
Set all data
"""
self.omfdata = omfdata
length = int(omfdata.xnodes * omfdata.ynodes * omfdata.znodes)
pos_x = np.arange(omfdata.xmin,
omfdata.xnodes*omfdata.xstepsize + omfdata.xmin,
omfdata.xstepsize)
pos_y = np.arange(omfdata.ymin,
omfdata.ynodes*omfdata.ystepsize + omfdata.ymin,
omfdata.ystepsize)
pos_z = np.arange(omfdata.zmin,
omfdata.znodes*omfdata.zstepsize + omfdata.zmin,
omfdata.zstepsize)
self.pos_x = np.tile(pos_x, int(omfdata.ynodes * omfdata.znodes))
self.pos_y = pos_y.repeat(int(omfdata.xnodes))
self.pos_y = np.tile(self.pos_y, int(omfdata.znodes))
self.pos_z = pos_z.repeat(int(omfdata.xnodes * omfdata.ynodes))
self.mx = omfdata.mx
self.my = omfdata.my
self.mz = omfdata.mz
self.sld_n = np.zeros(length)
if omfdata.mx is None:
self.mx = np.zeros(length)
if omfdata.my is None:
self.my = np.zeros(length)
if omfdata.mz is None:
self.mz = np.zeros(length)
self._check_data_length(length)
self.remove_null_points(False, False)
mask = np.ones(len(self.sld_n), dtype=bool)
if shape.lower() == 'ellipsoid':
try:
# Pixel (step) size included
x_c = max(self.pos_x) + min(self.pos_x)
y_c = max(self.pos_y) + min(self.pos_y)
z_c = max(self.pos_z) + min(self.pos_z)
x_d = max(self.pos_x) - min(self.pos_x)
y_d = max(self.pos_y) - min(self.pos_y)
z_d = max(self.pos_z) - min(self.pos_z)
x_r = (x_d + omfdata.xstepsize) / 2.0
y_r = (y_d + omfdata.ystepsize) / 2.0
z_r = (z_d + omfdata.zstepsize) / 2.0
x_dir2 = ((self.pos_x - x_c / 2.0) / x_r)
x_dir2 *= x_dir2
y_dir2 = ((self.pos_y - y_c / 2.0) / y_r)
y_dir2 *= y_dir2
z_dir2 = ((self.pos_z - z_c / 2.0) / z_r)
z_dir2 *= z_dir2
mask = (x_dir2 + y_dir2 + z_dir2) <= 1.0
except Exception as exc:
logger.error(exc)
self.output = MagSLD(self.pos_x[mask], self.pos_y[mask],
self.pos_z[mask], self.sld_n[mask],
self.mx[mask], self.my[mask], self.mz[mask])
self.output.set_pix_type('pixel')
self.output.set_pixel_symbols('pixel')
def get_omfdata(self):
"""
Return all data
"""
return self.omfdata
def get_output(self):
"""
Return output
"""
return self.output
def _check_data_length(self, length):
"""
Check if the data lengths are consistent
:Params length: data length
"""
parts = (self.pos_x, self.pos_y, self.pos_z, self.mx, self.my, self.mz)
if any(len(v) != length for v in parts):
raise ValueError("Error: Inconsistent data length.")
def remove_null_points(self, remove=False, recenter=False):
"""
Removes any mx, my, and mz = 0 points
"""
if remove:
is_nonzero = (np.fabs(self.mx) + np.fabs(self.my) +
np.fabs(self.mz)).nonzero()
if len(is_nonzero[0]) > 0:
self.pos_x = self.pos_x[is_nonzero]
self.pos_y = self.pos_y[is_nonzero]
self.pos_z = self.pos_z[is_nonzero]
self.sld_n = self.sld_n[is_nonzero]
self.mx = self.mx[is_nonzero]
self.my = self.my[is_nonzero]
self.mz = self.mz[is_nonzero]
if recenter:
self.pos_x -= (min(self.pos_x) + max(self.pos_x)) / 2.0
self.pos_y -= (min(self.pos_y) + max(self.pos_y)) / 2.0
self.pos_z -= (min(self.pos_z) + max(self.pos_z)) / 2.0
def get_magsld(self):
"""
return MagSLD
"""
return self.output
class OMFReader(object):
"""
Class to load omf/ascii files (3 columns w/header).
"""
## File type
type_name = "OMF ASCII"
## Wildcards
type = ["OMF files (*.OMF, *.omf)|*.omf"]
## List of allowed extensions
ext = ['.omf', '.OMF']
def read(self, path):
"""
Load data file
:param path: file path
:return: x, y, z, sld_n, sld_mx, sld_my, sld_mz
"""
desc = ""
mx = np.zeros(0)
my = np.zeros(0)
mz = np.zeros(0)
try:
input_f = open(path, 'rb')
buff = decode(input_f.read())
lines = buff.split('\n')
input_f.close()
output = OMFData()
valueunit = None
for line in lines:
line = line.strip()
# Read data
if line and not line.startswith('#'):
try:
toks = line.split()
_mx = float(toks[0])
_my = float(toks[1])
_mz = float(toks[2])
_mx = mag2sld(_mx, valueunit)
_my = mag2sld(_my, valueunit)
_mz = mag2sld(_mz, valueunit)
mx = np.append(mx, _mx)
my = np.append(my, _my)
mz = np.append(mz, _mz)
except Exception as exc:
# Skip non-data | |
Electric (Australia)",
"000C82": "NETWORK TECHNOLOGIES INC",
"000C83": "Logical Solutions",
"000C84": "Eazix, Inc.",
"000C85": "CISCO SYSTEMS, INC.",
"000C86": "CISCO SYSTEMS, INC.",
"000C87": "AMD",
"000C88": "Apache Micro Peripherals, Inc.",
"000C89": "AC Electric Vehicles, Ltd.",
"000C8A": "Bose Corporation",
"000C8B": "Connect Tech Inc",
"000C8C": "KODICOM CO.,LTD.",
"000C8D": "MATRIX VISION GmbH",
"000C8E": "Mentor Engineering Inc",
"000C8F": "Nergal s.r.l.",
"000C90": "Octasic Inc.",
"000C91": "Riverhead Networks Inc.",
"000C92": "WolfVision Gmbh",
"000C93": "Xeline Co., Ltd.",
"000C94": "United Electronic Industries, Inc. (EUI)",
"000C95": "PrimeNet",
"000C96": "OQO, Inc.",
"000C97": "NV ADB TTV Technologies SA",
"000C98": "LETEK Communications Inc.",
"000C99": "HITEL LINK Co.,Ltd",
"000C9A": "Hitech Electronics Corp.",
"000C9B": "EE Solutions, Inc",
"000C9C": "Chongho information & communications",
"000C9D": "UbeeAirWalk, Inc.",
"000C9E": "MemoryLink Corp.",
"000C9F": "NKE Corporation",
"000CA0": "StorCase Technology, Inc.",
"000CA1": "SIGMACOM Co., LTD.",
"000CA2": "Harmonic Video Network",
"000CA3": "Rancho Technology, Inc.",
"000CA4": "Prompttec Product Management GmbH",
"000CA5": "Naman NZ LTd",
"000CA6": "Mintera Corporation",
"000CA7": "Metro (Suzhou) Technologies Co., Ltd.",
"000CA8": "Garuda Networks Corporation",
"000CA9": "Ebtron Inc.",
"000CAA": "Cubic Transportation Systems Inc",
"000CAB": "COMMEND International",
"000CAC": "Citizen Watch Co., Ltd.",
"000CAD": "BTU International",
"000CAE": "Ailocom Oy",
"000CAF": "TRI TERM CO.,LTD.",
"000CB0": "Star Semiconductor Corporation",
"000CB1": "Salland Engineering (Europe) BV",
"000CB2": "Comstar Co., Ltd.",
"000CB3": "ROUND Co.,Ltd.",
"000CB4": "AutoCell Laboratories, Inc.",
"000CB5": "Premier Technolgies, Inc",
"000CB6": "NANJING SEU MOBILE & INTERNET TECHNOLOGY CO.,LTD",
"000CB7": "Nanjing Huazhuo Electronics Co., Ltd.",
"000CB8": "MEDION AG",
"000CB9": "LEA",
"000CBA": "Jamex, Inc.",
"000CBB": "ISKRAEMECO",
"000CBC": "Iscutum",
"000CBD": "Interface Masters, Inc",
"000CBE": "Innominate Security Technologies AG",
"000CBF": "Holy Stone Ent. Co., Ltd.",
"000CC0": "Gener<NAME>",
"000CC1": "Cooper Industries Inc.",
"000CC2": "ControlNet (India) Private Limited",
"000CC3": "BeWAN systems",
"000CC4": "Tiptel AG",
"000CC5": "Nextlink Co., Ltd.",
"000CC6": "Ka-Ro electronics GmbH",
"000CC7": "Intelligent Computer Solutions Inc.",
"000CC8": "Xytronix Research & Design, Inc.",
"000CC9": "ILWOO DATA & TECHNOLOGY CO.,LTD",
"000CCA": "HGST a Western Digital Company",
"000CCB": "Design Combus Ltd",
"000CCC": "Aeroscout Ltd.",
"000CCD": "IEC - TC57",
"000CCE": "CISCO SYSTEMS, INC.",
"000CCF": "CISCO SYSTEMS, INC.",
"000CD0": "Symetrix",
"000CD1": "SFOM Technology Corp.",
"000CD2": "Schaffner EMV AG",
"000CD3": "Prettl Elektronik Radeberg GmbH",
"000CD4": "Positron Public Safety Systems inc.",
"000CD5": "Passave Inc.",
"000CD6": "PARTNER TECH",
"000CD7": "Nallatech Ltd",
"000CD8": "<NAME> GmbH & Co",
"000CD9": "Itcare Co., Ltd",
"000CDA": "FreeHand Systems, Inc.",
"000CDB": "Brocade Communications Systems, Inc",
"000CDC": "BECS Technology, Inc",
"000CDD": "AOS Technologies AG",
"000CDE": "ABB STOTZ-KONTAKT GmbH",
"000CDF": "PULNiX America, Inc",
"000CE0": "Trek Diagnostics Inc.",
"000CE1": "The Open Group",
"000CE2": "Rolls-Royce",
"000CE3": "Option International N.V.",
"000CE4": "NeuroCom International, Inc.",
"000CE5": "ARRIS Group, Inc.",
"000CE6": "Meru Networks Inc",
"000CE7": "MediaTek Inc.",
"000CE8": "GuangZhou AnJuBao Co., Ltd",
"000CE9": "BLOOMBERG L.P.",
"000CEA": "aphona Kommunikationssysteme",
"000CEB": "CNMP Networks, Inc.",
"000CEC": "Spectracom Corp.",
"000CED": "Real Digital Media",
"000CEE": "jp-embedded",
"000CEF": "Open Networks Engineering Ltd",
"000CF0": "M & N GmbH",
"000CF1": "Intel Corporation",
"000CF2": "GAMESA E\u00f3lica",
"000CF3": "CALL IMAGE SA",
"000CF4": "AKATSUKI ELECTRIC MFG.CO.,LTD.",
"000CF5": "InfoExpress",
"000CF6": "Sitecom Europe BV",
"000CF7": "Nortel Networks",
"000CF8": "Nortel Networks",
"000CF9": "Xylem Water Solutions",
"000CFA": "Digital Systems Corp",
"000CFB": "Korea Network Systems",
"000CFC": "S2io Technologies Corp",
"000CFD": "Hyundai ImageQuest Co.,Ltd.",
"000CFE": "Grand Electronic Co., Ltd",
"000CFF": "MRO-TEK LIMITED",
"000D00": "Seaway Networks Inc.",
"000D01": "P&E Microcomputer Systems, Inc.",
"000D02": "NEC AccessTechnica, Ltd.",
"000D03": "Matrics, Inc.",
"000D04": "Foxboro Eckardt Development GmbH",
"000D05": "cybernet manufacturing inc.",
"000D06": "Compulogic Limited",
"000D07": "Calrec Audio Ltd",
"000D08": "AboveCable, Inc.",
"000D09": "Yuehua(Zhuhai) Electronic CO. LTD",
"000D0A": "Projectiondesign as",
"000D0B": "Buffalo Inc.",
"000D0C": "MDI Security Systems",
"000D0D": "ITSupported, LLC",
"000D0E": "Inqnet Systems, Inc.",
"000D0F": "Finlux Ltd",
"000D10": "Embedtronics Oy",
"000D11": "DENTSPLY - Gendex",
"000D12": "AXELL Corporation",
"000D13": "Wilhelm Rutenbeck GmbH&Co.KG",
"000D14": "Vtech Innovation LP dba Advanced American Telephones",
"000D15": "Voipac s.r.o.",
"000D16": "UHS Systems Pty Ltd",
"000D17": "Turbo Networks Co.Ltd",
"000D18": "Mega-Trend Electronics CO., LTD.",
"000D19": "ROBE Show lighting",
"000D1A": "Mustek System Inc.",
"000D1B": "Kyoto Electronics Manufacturing Co., Ltd.",
"000D1C": "Amesys Defense",
"000D1D": "HIGH-TEK HARNESS ENT. CO., LTD.",
"000D1E": "Control Techniques",
"000D1F": "AV Digital",
"000D20": "ASAHIKASEI TECHNOSYSTEM CO.,LTD.",
"000D21": "WISCORE Inc.",
"000D22": "Unitronics LTD",
"000D23": "Smart Solution, Inc",
"000D24": "SENTEC E&E CO., LTD.",
"000D25": "SANDEN CORPORATION",
"000D26": "Primagraphics Limited",
"000D27": "MICROPLEX Printware AG",
"000D28": "CISCO SYSTEMS, INC.",
"000D29": "CISCO SYSTEMS, INC.",
"000D2A": "Scanmatic AS",
"000D2B": "Racal Instruments",
"000D2C": "Patapsco Designs Ltd",
"000D2D": "NCT Deutschland GmbH",
"000D2E": "Matsushita Avionics Systems Corporation",
"000D2F": "AIN Comm.Tech.Co., LTD",
"000D30": "IceFyre Semiconductor",
"000D31": "Compellent Technologies, Inc.",
"000D32": "DispenseSource, Inc.",
"000D33": "Prediwave Corp.",
"000D34": "Shell International Exploration and Production, Inc.",
"000D35": "PAC International Ltd",
"000D36": "Wu Han Routon Electronic Co., Ltd",
"000D37": "WIPLUG",
"000D38": "NISSIN INC.",
"000D39": "Network Electronics",
"000D3A": "Microsoft Corp.",
"000D3B": "Microelectronics Technology Inc.",
"000D3C": "i.Tech Dynamic Ltd",
"000D3D": "Hammerhead Systems, Inc.",
"000D3E": "APLUX Communications Ltd.",
"000D3F": "VTI Instruments Corporation",
"000D40": "Verint Loronix Video Solutions",
"000D41": "Siemens AG ICM MP UC RD IT KLF1",
"000D42": "Newbest Development Limited",
"000D43": "DRS Tactical Systems Inc.",
"000D44": "Audio BU - Logitech",
"000D45": "Tottori SANYO Electric Co., Ltd.",
"000D46": "Parker SSD Drives",
"000D47": "Collex",
"000D48": "AEWIN Technologies Co., Ltd.",
"000D49": "Triton Systems of Delaware, Inc.",
"000D4A": "Steag ETA-Optik",
"000D4B": "Roku, LLC",
"000D4C": "Outline Electronics Ltd.",
"000D4D": "Ninelanes",
"000D4E": "NDR Co.,LTD.",
"000D4F": "Kenwood Corporation",
"000D50": "Galazar Networks",
"000D51": "DIVR Systems, Inc.",
"000D52": "Comart system",
"000D53": "Beijing 5w Communication Corp.",
"000D54": "3Com Ltd",
"000D55": "SANYCOM Technology Co.,Ltd",
"000D56": "Dell Inc",
"000D57": "Fujitsu I-Network Systems Limited.",
"000D58": "PRIVATE",
"000D59": "Amity Systems, Inc.",
"000D5A": "Tiesse SpA",
"000D5B": "Smart Empire Investments Limited",
"000D5C": "Robert Bosch GmbH, VT-ATMO",
"000D5D": "Raritan Computer, Inc",
"000D5E": "NEC Personal Products",
"000D5F": "Minds Inc",
"000D60": "IBM Corp",
"000D61": "Giga-Byte Technology Co., Ltd.",
"000D62": "Funkwerk Dabendorf GmbH",
"000D63": "DENT Instruments, Inc.",
"000D64": "COMAG Handels AG",
"000D65": "CISCO SYSTEMS, INC.",
"000D66": "CISCO SYSTEMS, INC.",
"000D67": "Ericsson",
"000D68": "Vinci Systems, Inc.",
"000D69": "TMT&D Corporation",
"000D6A": "Redwood Technologies LTD",
"000D6B": "Mita-Teknik A/S",
"000D6C": "M-Audio",
"000D6D": "K-Tech Devices Corp.",
"000D6E": "K-Patents Oy",
"000D6F": "Ember Corporation",
"000D70": "Datamax Corporation",
"000D71": "boca systems",
"000D72": "2Wire, Inc",
"000D73": "Technical Support, Inc.",
"000D74": "Sand Network Systems, Inc.",
"000D75": "Kobian Pte Ltd - Taiwan Branch",
"000D76": "Hokuto Denshi Co,. Ltd.",
"000D77": "FalconStor Software",
"000D78": "Engineering & Security",
"000D79": "Dynamic Solutions Co,.Ltd.",
"000D7A": "DiGATTO Asia Pacific Pte Ltd",
"000D7B": "Consensys Computers Inc.",
"000D7C": "Codian Ltd",
"000D7D": "Afco Systems",
"000D7E": "Axiowave Networks, Inc.",
"000D7F": "MIDAS COMMUNICATION TECHNOLOGIES PTE LTD ( Foreign Branch)",
"000D80": "Online Development Inc",
"000D81": "Pepperl+Fuchs GmbH",
"000D82": "PHS srl",
"000D83": "Sanmina-SCI Hungary Ltd.",
"000D84": "Makus Inc.",
"000D85": "Tapwave, Inc.",
"000D86": "Huber + | |
<gh_stars>0
import sys
import os
import glob
import time
from warnings import warn
from threading import Event
from threading import Timer
from threading import Thread
from threading import Lock
import subprocess
from astropy import units as u
import Pyro4
from pocs.utils import current_time
from pocs.utils.logger import get_root_logger
from pocs.utils import load_module
from pocs.camera import AbstractCamera
from huntsman.utils import load_config
# Enable local display of remote tracebacks
sys.excepthook = Pyro4.util.excepthook
class Camera(AbstractCamera):
"""
Class representing the client side interface to a distributed camera
"""
def __init__(self,
uri,
name='Pyro Camera',
model='pyro',
*args, **kwargs):
super().__init__(name=name, port=uri, model=model, *args, **kwargs)
self._uri = uri
# Connect to camera
self.connect()
# Properties
@AbstractCamera.uid.getter
def uid(self):
# Need to overide this because the base class only returns the 1st 6 characters of the
# serial number, which is not a unique identifier for most of the camera types.
return self._serial_number
@property
def ccd_temp(self):
"""
Current temperature of the camera's image sensor.
"""
return self._proxy.ccd_temp * u.Celsius
@property
def ccd_set_point(self):
"""
Current value of the CCD set point, the target temperature for the camera's
image sensor cooling control.
Can be set by assigning an astropy.units.Quantity.
"""
return self._proxy.ccd_set_point * u.Celsius
@ccd_set_point.setter
def ccd_set_point(self, set_point):
if isinstance(set_point, u.Quantity):
set_point = set_point.to(u.Celsius).value
self._proxy.ccd_set_point = float(set_point)
@property
def ccd_cooling_enabled(self):
"""
Current status of the camera's image sensor cooling system (enabled/disabled).
For some cameras it is possible to change this by assigning a boolean
"""
return self._proxy.ccd_cooling_enabled
@ccd_cooling_enabled.setter
def ccd_cooling_enabled(self, enabled):
self._proxy.ccd_cooling_enabled = bool(enabled)
@property
def ccd_cooling_power(self):
"""
Current power level of the camera's image sensor cooling system (typically as
a percentage of the maximum).
"""
return self._proxy.ccd_cooling_power
# Methods
def connect(self):
"""
(re)connect to the distributed camera.
"""
self.logger.debug('Connecting to {} on {}'.format(self.name, self._uri))
# Get a proxy for the camera
try:
self._proxy = Pyro4.Proxy(self._uri)
except Pyro4.errors.NamingError as err:
msg = "Couldn't get proxy to camera {}: {}".format(self.name, err)
warn(msg)
self.logger.error(msg)
return
# Set sync mode
Pyro4.asyncproxy(self._proxy, asynchronous=False)
# Force camera proxy to connect by getting the camera uid.
# This will trigger the remote object creation & (re)initialise the camera & focuser,
# which can take a long time with real hardware.
uid = self._proxy.get_uid()
if not uid:
msg = "Couldn't connect to {} on {}!".format(self.name, self._uri)
warn(msg)
self.logger.error(msg)
return
self._connected = True
self._serial_number = uid
self.model = self._proxy.model
self._file_extension = self._proxy.file_extension
self._readout_time = self._proxy.readout_time
self.filter_type = self._proxy.filter_type
self.logger.debug("{} connected".format(self))
def take_exposure(self,
seconds=1.0 * u.second,
filename=None,
dark=False,
blocking=False,
timeout=None,
*args,
**kwargs):
"""
Take exposure for a given number of seconds and saves to provided filename.
Args:
seconds (u.second, optional): Length of exposure
filename (str, optional): Image is saved to this filename
dark (bool, optional): Exposure is a dark frame (don't open shutter), default False
blocking (bool, optional): If False (default) returns immediately after starting
the exposure, if True will block until it completes.
timeout (u.second, optional): Length of time beyond the length the exposure to wait
for exposures to complete. If not given will wait indefinitely.
Returns:
threading.Event: Event that will be set when exposure is complete
"""
assert self.is_connected, self.logger.error("Camera must be connected for take_exposure!")
assert filename is not None, self.logger.warning("Must pass filename for take_exposure")
# Want exposure time as a builtin type for Pyro serialisation
if isinstance(seconds, u.Quantity):
seconds = seconds.to(u.second).value
seconds = float(seconds)
if isinstance(timeout, u.Quantity):
timeout = timeout.to(u.second).value
if timeout is not None:
timeout = float(timeout)
dir_name, base_name = os.path.split(filename)
# Make sure dir_name has one and only one trailing slash, otherwise rsync may fail
dir_name = os.path.normpath(dir_name) + '/'
# Make sure proxy is in async mode
Pyro4.asyncproxy(self._proxy, asynchronous=True)
# Start the exposure
self.logger.debug('Taking {} second exposure on {}: {}'.format(
seconds, self.name, base_name))
# Remote method call to start the exposure
exposure_result = self._proxy.take_exposure(seconds=seconds,
base_name=base_name,
dark=bool(dark),
*args,
**kwargs)
# Tag the file transfer on the end.
exposure_result = exposure_result.then(self._file_transfer, dir_name)
# Tag empty directory cleanup on the end & keep future result to check for completion
exposure_result = exposure_result.then(self._clean_directories)
# Start a thread that will set an event once exposure has completed
exposure_event = Event()
exposure_thread = Timer(interval=seconds + self.readout_time,
function=self._async_wait,
args=(exposure_result, 'exposure', exposure_event, timeout))
exposure_thread.start()
if blocking:
exposure_event.wait()
return exposure_event
def autofocus(self,
seconds=None,
focus_range=None,
focus_step=None,
thumbnail_size=None,
keep_files=None,
take_dark=None,
merit_function='vollath_F4',
merit_function_kwargs={},
mask_dilations=None,
coarse=False,
make_plots=False,
blocking=False,
timeout=None,
*args, **kwargs):
"""
Focuses the camera using the specified merit function. Optionally performs
a coarse focus to find the approximate position of infinity focus, which
should be followed by a fine focus before observing.
Args:
seconds (scalar, optional): Exposure time for focus exposures, if not
specified will use value from config.
focus_range (2-tuple, optional): Coarse & fine focus sweep range, in
encoder units. Specify to override values from config.
focus_step (2-tuple, optional): Coarse & fine focus sweep steps, in
encoder units. Specify to override values from config.
thumbnail_size (int, optional): Size of square central region of image
to use, default 500 x 500 pixels.
keep_files (bool, optional): If True will keep all images taken
during focusing. If False (default) will delete all except the
first and last images from each focus run.
take_dark (bool, optional): If True will attempt to take a dark frame
before the focus run, and use it for dark subtraction and hot
pixel masking, default True.
merit_function (str, optional): Merit function to use as a
focus metric, default vollath_F4.
merit_function_kwargs (dict, optional): Dictionary of additional
keyword arguments for the merit function.
mask_dilations (int, optional): Number of iterations of dilation to perform on the
saturated pixel mask (determine size of masked regions), default 10
coarse (bool, optional): Whether to perform a coarse focus, otherwise will perform
a fine focus. Default False.
make_plots (bool, optional: Whether to write focus plots to images folder, default
False.
blocking (bool, optional): Whether to block until autofocus complete, default False.
timeout (u.second, optional): Total length of time to wait for autofocus sequences
to complete. If not given will wait indefinitely.
Returns:
threading.Event: Event that will be set when autofocusing is complete
"""
# Make certain that all the argument are builtin types for easy Pyro serialisation
if isinstance(seconds, u.Quantity):
seconds = seconds.to(u.second).value
if seconds is not None:
seconds = float(seconds)
if focus_range is not None:
focus_range = (int(limit) for limit in focus_range)
if focus_step is not None:
focus_step = (int(step) for step in focus_step)
if keep_files is not None:
keep_files = bool(keep_files)
if take_dark is not None:
take_dark = bool(take_dark)
if thumbnail_size is not None:
thumbnail_size = int(thumbnail_size)
merit_function = str(merit_function)
merit_function_kwargs = dict(merit_function_kwargs)
if mask_dilations is not None:
mask_dilations = int(mask_dilations)
if coarse is not None:
coarse = bool(coarse)
if make_plots is not None:
make_plots = bool(make_plots)
if isinstance(timeout, u.Quantity):
timeout = timeout.to(u.second).value
if timeout is not None:
timeout = float(timeout)
# Compile aruments into a dictionary
autofocus_kwargs = {'seconds': seconds,
'focus_range': focus_range,
'focus_step': focus_step,
'keep_files': keep_files,
'take_dark': take_dark,
'thumbnail_size': thumbnail_size,
'merit_function': merit_function,
'merit_function_kwargs': merit_function_kwargs,
'mask_dilations': mask_dilations,
'coarse': coarse,
'make_plots': make_plots}
autofocus_kwargs.update(kwargs)
focus_dir = os.path.join(os.path.abspath(self.config['directories']['images']), 'focus/')
# Make sure proxy is in async mode
Pyro4.asyncproxy(self._proxy, asynchronous=True)
# Start autofocus
autofocus_result = {}
self.logger.debug('Starting autofocus on {}'.format(self.name))
# Remote method call to start the autofocus
autofocus_result = self._proxy.autofocus(*args, **autofocus_kwargs)
# Tag the file transfer on the end.
autofocus_result = autofocus_result.then(self._file_transfer, focus_dir)
# Tag empty directory cleanup on the end & keep future result to check for completion
autofocus_result = autofocus_result.then(self._clean_directories)
# Start a thread that will set an event once autofocus has completed
autofocus_event = Event()
autofocus_thread = Thread(target=self._async_wait,
args=(autofocus_result, 'autofocus', autofocus_event, timeout))
autofocus_thread.start()
if blocking:
autofocus_event.wait()
return autofocus_event
# Private Methods
def _clean_directories(self, source):
"""
Clean up empty directories left behind by rsysc.
Args:
source (str): remote path to clean up empty directories from, in
user@host:/directory/subdirectory format.
"""
user_at_host, path = source.split(':')
path_root = path.split('/./')[0]
try:
result = subprocess.run(['ssh',
user_at_host,
'find {} -empty -delete'.format(path_root)],
check=True)
except subprocess.CalledProcessError as err:
msg = "Clean up of empty directories in {}:{} failed".format(user_at_host, path_root)
warn(msg)
self.logger.error(msg)
raise err
self.logger.debug("Clean up of empty directories in {}:{} complete".format(user_at_host,
path_root))
return source
def _file_transfer(self, source, destination):
"""
Used rsync to | |
do corpo lúteo'),
('N83.2', 'Outros cistos ovarianos e os não especificados'),
('N83.3', 'Atrofia adquirida do ovário e da trompa de Falópio'),
('N83.4', 'Prolapso e hérnia do ovário e da trompa de Falópio'),
('N83.5', 'Torção do ovário, do pedículo ovariano e da trompa de Falópio'),
('N83.6', 'Hematossalpinge'),
('N83.7', 'Hematoma do ligamento largo'),
('N83.8', 'Outros transtornos não-inflamatórios do ovário, da trompa de Falópio e do ligamento largo'),
('N83.9', 'Transtornos não-inflamatórios do ovário, da trompa de Falópio e do ligamento largo, não especificados'),
('N84.0', 'Pólipo do corpo do útero'),
('N84.1', 'Pólipo do colo do útero'),
('N84.2', 'Pólipo da vagina'),
('N84.3', 'Pólipo da vulva'),
('N84.8', 'Pólipo de outras partes do trato genital feminino'),
('N84.9', 'Pólipo do trato genital feminino não especificado'),
('N85.0', 'Hiperplasia glandular endometrial'),
('N85.1', 'Hiperplasia adenomatosa endometrial'),
('N85.2', 'Hipertrofia do útero'),
('N85.3', 'Subinvolução do útero'),
('N85.4', 'Posição anormal do útero'),
('N85.5', 'Inversão do útero'),
('N85.6', 'Sinéquias intra-uterinas'),
('N85.7', 'Hematometra'),
('N85.8', 'Outros transtornos não-inflamatórios especificados do útero'),
('N85.9', 'Transtornos não-inflamatórios do útero, não especificados'),
('N86', ' Erosão e ectrópio do colo do útero'),
('N87.0', 'Displasia cervical leve'),
('N87.1', 'Displasia cervical moderada'),
('N87.2', 'Displasia cervical grave, não classificada em outra parte'),
('N87.9', 'Displasia do colo do útero, não especificada'),
('N88.0', 'Leucoplasia do colo do útero'),
('N88.1', 'Laceração antiga do colo do útero'),
('N88.2', 'Estreitamento e estenose do colo do útero'),
('N88.3', 'Incompetência do colo do útero'),
('N88.4', 'Alongamento hipertrófico do colo do útero'),
('N88.8', 'Outros transtornos não-inflamatórios especificados do colo do útero'),
('N88.9', 'Transtorno não-inflamatório e não especificado do colo do útero'),
('N89.0', 'Displasia vaginal leve'),
('N89.1', 'Displasia vaginal moderada'),
('N89.2', 'Displasia vaginal grave, não classificada em outra parte'),
('N89.3', 'Displasia da vagina, não especificada'),
('N89.4', 'Leucoplasia da vagina'),
('N89.5', 'Estreitamento e atresia da vagina'),
('N89.6', 'Anel himenal apertado'),
('N89.7', 'Hematocolpos'),
('N89.8', 'Outros transtornos não-inflamatórios especificados da vagina'),
('N89.9', 'Transtorno não-inflamatório da vagina, não especificado'),
('N90.0', 'Displasia vulvar leve'),
('N90.1', 'Displasia vulvar moderada'),
('N90.2', 'Displasia vulvar grave, não classificada em outra parte'),
('N90.3', 'Displasia de vulva, não especificada'),
('N90.4', 'Leucoplasia de vulva'),
('N90.5', 'Atrofia da vulva'),
('N90.6', 'Hipertrofia da vulva'),
('N90.7', 'Cisto vulvar'),
('N90.8', 'Outros transtornos não-inflamatórios especificados da vulva e do períneo');
INSERT INTO servicos_cid (cid_id, descricao) VALUES
('N90.9', 'Transtorno não-inflamatório e não especificado da vulva e do períneo'),
('N91.0', 'Amenorréia primária'),
('N91.1', 'Amenorréia secundária'),
('N91.2', 'Amenorréia, não especificada'),
('N91.3', 'Oligomenorréia primária'),
('N91.4', 'Oligomenorréia secundária'),
('N91.5', 'Oligomenorréia, não especificada'),
('N92.0', 'Menstruação excessiva e freqüente com ciclo regular'),
('N92.1', 'Menstruação excessiva e freqüente com ciclo irregular'),
('N92.2', 'Menstruação excessiva na puberdade'),
('N92.3', 'Sangramento da ovulação'),
('N92.4', 'Sangramento abundante na pré-menopausa'),
('N92.5', 'Outros tipos especificados de irregularidade da menstruação'),
('N92.6', 'Menstruação irregular, não especificada'),
('N93.0', 'Sangramentos pós-coito ou de contato'),
('N93.8', 'Outros sangramentos anormais especificados do útero e da vagina'),
('N93.9', 'Sangramento anormal do útero ou da vagina, não especificado'),
('N94.0', 'Ovulação dolorosa [Mittelschmerz]'),
('N94.1', 'Dispareunia'),
('N94.2', 'Vaginismo'),
('N94.3', 'Síndrome de tensão pré-menstrual'),
('N94.4', 'Dismenorréia primária'),
('N94.5', 'Dismenorréia secundária'),
('N94.6', 'Dismenorréia não especificada'),
('N94.8', 'Outras afecções especificadas associadas com os órgãos genitais femininos e com o ciclo menstrual'),
('N94.9', 'Afecções não especificadas associadas com os órgãos genitais femininos e com o ciclo menstrual'),
('N95.0', 'Sangramento pós-menopausa'),
('N95.1', 'Estado da menopausa e do climatério feminino'),
('N95.2', 'Vaginite atrófica pós-menopausa'),
('N95.3', 'Condições associadas com a menopausa artificial'),
('N95.8', 'Outros transtornos especificados da menopausa e da perimenopausa'),
('N95.9', 'Transtorno não especificado da menopausa e da perimenopausa'),
('N96', ' Abortamento habitual'),
('N97.0', 'Infertilidade feminina associada à anovulação'),
('N97.1', 'Infertilidade feminina de origem tubária'),
('N97.2', 'Infertilidade feminina de origem uterina'),
('N97.3', 'Infertilidade feminina de origem cervical'),
('N97.4', 'Infertilidade feminina associada à fatores do parceiro'),
('N97.5', 'Infertilidade feminina de outra origem'),
('N97.9', 'Infertilidade feminina não especificada'),
('N98.0', 'Infecção associada à inseminação artificial'),
('N98.1', 'Hiperestimulação dos ovários'),
('N98.2', 'Complicações relacionadas com a tentativa de introdução do óvulo fecundado artificialmente (in vitro)'),
('N98.3', 'Complicações relacionadas com a tentativa de transferência do embrião'),
('N98.8', 'Outras complicações associadas à fecundação artificial'),
('N98.9', 'Complicações não especificadas associadas à fecundação artificial'),
('N99.0', 'Insuficiência renal pós-procedimentos'),
('N99.1', 'Estreitamento de uretra pós-procedimentos'),
('N99.2', 'Aderências pós-operatórias da vagina'),
('N99.3', 'Prolapso de cúpula de vagina pós-histerectomia'),
('N99.4', 'Aderências do peritônio pélvico pós-procedimentos'),
('N99.5', 'Mau funcionamento de abertura externa (estoma) do trato urinário'),
('N99.8', 'Outros transtornos pós-procedimentos do aparelho geniturinário'),
('N99.9', 'Transtorno pós-procedimento não especificado do aparelho geniturinário'),
('O00.0', 'Gravidez abdominal'),
('O00.1', 'Gravidez tubária'),
('O00.2', 'Gravidez ovariana'),
('O00.8', 'Outras formas de gravidez ectópica'),
('O00.9', 'Gravidez ectópica, não especificada'),
('O01.0', 'Mola hidatiforme clássica'),
('O01.1', 'Mola hidatiforme incompleta ou parcial'),
('O01.9', 'Mola hidatiforme não especificada'),
('O02.0', 'Ovo claro e mola não-hidatiforme'),
('O02.1', 'Aborto retido'),
('O02.8', 'Outros produtos anormais da concepção especificados'),
('O02.9', 'Produto anormal da concepção, não especificado'),
('O03.0', 'Aborto espontâneo - incompleto, complicado por infecção do trato genital ou dos órgãos pélvicos'),
('O03.1', 'Aborto espontâneo - incompleto, complicado por hemorragia excessiva ou tardia'),
('O03.2', 'Aborto espontâneo - incompleto, complicado por embolia'),
('O03.3', 'Aborto espontâneo - incompleto, com outras complicações ou com complicações não especificadas'),
('O03.4', 'Aborto espontâneo - incompleto, sem complicações'),
('O03.5', 'Aborto espontâneo - completo ou não especificado, complicado por infecções do trato genital ou dos órgãos pélvicos'),
('O03.6', 'Aborto espontâneo - completo ou não especificado, complicado por hemorragia excessiva ou tardia'),
('O03.7', 'Aborto espontâneo - completo ou não especificado, complicado por embolia'),
('O03.8', 'Aborto espontâneo - completo ou não especificado, com outras complicações ou com complicações não especificadas'),
('O03.9', 'Aborto espontâneo - completo ou não especificado, sem complicações'),
('O04.0', 'Aborto por razões médicas e legais - incompleto, complicado por infecção do trato genital ou dos órgãos pélvicos'),
('O04.1', 'Aborto por razões médicas e legais - incompleto, complicado por hemorragia excessiva ou tardia'),
('O04.2', 'Aborto por razões médicas e legais - incompleto, complicado por embolia'),
('O04.3', 'Aborto por razões médicas e legais - incompleto, com outras complicações ou com complicações não especificadas'),
('O04.4', 'Aborto por razões médicas e legais - incompleto, sem complicações'),
('O04.5', 'Aborto por razões médicas e legais - completo ou não especificado, complicado por infecções do trato genital ou dos órgãos pélvicos'),
('O04.6', 'Aborto por razões médicas e legais - completo ou não especificado, complicado por hemorragia excessiva ou tardia'),
('O04.7', 'Aborto por razões médicas e legais - completo ou não especificado, complicado por embolia'),
('O04.8', 'Aborto por razões médicas e legais - completo ou não especificado, com outras complicações ou com complicações não especificadas'),
('O04.9', 'Aborto por razões médicas e legais - completo ou não especificado, sem complicações'),
('O05.0', 'Outros tipos de aborto - incompleto, complicado por infecção do trato genital ou dos órgãos pélvicos'),
('O05.1', 'Outros tipos de aborto - incompleto, complicado por hemorragia excessiva ou tardia'),
('O05.2', 'Outros tipos de aborto - incompleto, complicado por embolia'),
('O05.3', 'Outros tipos de aborto - incompleto, com outras complicações ou com complicações não especificadas'),
('O05.4', 'Outros tipos de aborto - incompleto, sem complicações'),
('O05.5', 'Outros tipos de aborto - completo ou não especificado, complicado por infecções do trato genital ou dos órgãos pélvicos'),
('O05.6', 'Outros tipos de aborto - completo ou não especificado, complicado por hemorragia excessiva ou tardia'),
('O05.7', 'Outros tipos de aborto - completo ou não especificado, complicado por embolia'),
('O05.8', 'Outros tipos de aborto - completo ou não especificado, com outras complicações ou com complicações não especificadas'),
('O05.9', 'Outros tipos de aborto - completo ou não especificado, sem complicações'),
('O06.0', 'Aborto não especificado - incompleto, complicado por infecção do trato genital ou dos órgãos pélvicos'),
('O06.1', 'Aborto não especificado - incompleto, complicado por hemorragia excessiva ou tardia'),
('O06.2', 'Aborto não especificado - incompleto, complicado por embolia'),
('O06.3', 'Aborto não especificado - incompleto, com outras complicações ou com complicações não especificadas'),
('O06.4', 'Aborto não especificado - incompleto, sem complicações'),
('O06.5', 'Aborto não especificado - completo ou não especificado, complicado por infecções do trato genital ou dos órgãos pélvicos'),
('O06.6', 'Aborto não especificado - completo ou não especificado, complicado por hemorragia excessiva ou tardia'),
('O06.7', 'Aborto não especificado - completo ou não especificado, complicado por embolia'),
('O06.8', 'Aborto não especificado - completo ou não especificado, com outras complicações ou com complicações não especificadas'),
('O06.9', 'Aborto não especificado - completo ou não especificado, sem complicações'),
('O07.0', 'Falha de aborto provocado por razões médicas, complicado por infecção do trato genital ou dos órgãos pélvicos'),
('O07.1', 'Falha de aborto provocado por razões médicas, complicado por hemorragia tardia ou excessiva'),
('O07.2', 'Falha de aborto provocado por razões médicas, complicado por embolia'),
('O07.3', 'Falha de aborto provocado por razões médicas, com outras complicações ou com complicações não especificadas'),
('O07.4', 'Falha de aborto provocado por razões médicas, sem complicações'),
('O07.5', 'Outras formas, e as não especificadas, de falha na provocação de aborto, complicadas por infecção do trato genital e por infecção dos órgãos pélvicos'),
('O07.6', 'Outras formas, e as não especificadas, de falha na provocação de aborto, complicadas por hemorragia tardia ou excessiva'),
('O07.7', 'Outras formas, e as não especificadas, de falha na provocação de aborto, complicadas por embolia'),
('O07.8', 'Outras formas, e as não | |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF-agents suite for loading Adversarial environments.
Adds two new functions: reset_agent, and step_adversary in addition to usual
RL env functions. Therefore we have the following environment functions:
env.reset(): completely resets the environment and removes anything the
adversary has built.
env.reset_agent(): resets the position of the agent, but does not
remove the obstacles the adversary has created when building the env.
env.step(): steps the agent as before in the environment. i.e. if the agent
passes action 'left' it will move left.
env.step_adversary(): processes an adversary action, which involves choosing
the location of the agent, goal, or an obstacle.
Adds additional functions for logging metrics related to the generated
environments, like the shortest path length to the goal.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin
import gym
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.environments import batched_py_environment
from tf_agents.environments import gym_wrapper
from tf_agents.environments import tf_py_environment
from tf_agents.environments import wrappers
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import time_step as ts_lib
from tf_agents.utils import nest_utils
@gin.configurable
def load(environment_name,
discount=1.0,
max_episode_steps=None,
gym_env_wrappers=(),
env_wrappers=(),
spec_dtype_map=None,
gym_kwargs=None,
auto_reset=True):
"""Loads the selected environment and wraps it with the specified wrappers.
Note that by default a TimeLimit wrapper is used to limit episode lengths
to the default benchmarks defined by the registered environments.
Args:
environment_name: Name for the environment to load.
discount: Discount to use for the environment.
max_episode_steps: If None the max_episode_steps will be set to the default
step limit defined in the environment's spec. No limit is applied if set
to 0 or if there is no max_episode_steps set in the environment's spec.
gym_env_wrappers: Iterable with references to wrapper classes to use
directly on the gym environment.
env_wrappers: Iterable with references to wrapper classes to use on the
gym_wrapped environment.
spec_dtype_map: A dict that maps gym specs to tf dtypes to use as the
default dtype for the tensors. An easy way to configure a custom
mapping through Gin is to define a gin-configurable function that returns
desired mapping and call it in your Gin config file, for example:
`suite_gym.load.spec_dtype_map = @get_custom_mapping()`.
gym_kwargs: The kwargs to pass to the Gym environment class.
auto_reset: If True (default), reset the environment automatically after a
terminal state is reached.
Returns:
A PyEnvironment instance.
"""
gym_kwargs = gym_kwargs if gym_kwargs else {}
gym_spec = gym.spec(environment_name)
gym_env = gym_spec.make(**gym_kwargs)
if max_episode_steps is None and gym_spec.max_episode_steps is not None:
max_episode_steps = gym_spec.max_episode_steps
for wrapper in gym_env_wrappers:
gym_env = wrapper(gym_env)
env = AdversarialGymWrapper(
gym_env,
discount=discount,
spec_dtype_map=spec_dtype_map,
auto_reset=auto_reset,
)
if max_episode_steps is not None and max_episode_steps > 0:
env = wrappers.TimeLimit(env, max_episode_steps)
for wrapper in env_wrappers:
env = wrapper(env)
return env
class AdversarialGymWrapper(gym_wrapper.GymWrapper):
"""Wrapper implementing PyEnvironment interface for adversarial environments.
Implements special reset_agent and step_adversary functions that are not
present in a normal Gym environment.
"""
def __init__(self,
gym_env,
discount=1.0,
spec_dtype_map=None,
match_obs_space_dtype=True,
auto_reset=False,
simplify_box_bounds=True):
super(AdversarialGymWrapper, self).__init__(
gym_env, discount, spec_dtype_map, match_obs_space_dtype, auto_reset,
simplify_box_bounds)
self.adversary_observation_spec = gym_wrapper.spec_from_gym_space(
self._gym_env.adversary_observation_space, name='observation')
self.adversary_action_spec = gym_wrapper.spec_from_gym_space(
self._gym_env.adversary_action_space, name='action')
self.adversary_time_step_spec = ts_lib.time_step_spec(
self.adversary_observation_spec, self.reward_spec())
self.adversary_flat_obs_spec = tf.nest.flatten(
self.adversary_observation_spec)
def _reset(self):
observation = self._gym_env.reset()
self._info = None
self._done = False
if self._match_obs_space_dtype:
observation = self._adversary_to_obs_space_dtype(observation)
reset_step = ts_lib.restart(observation, reward_spec=self.reward_spec())
return reset_step
def reset_random(self):
observation = self._gym_env.reset_random()
self._info = None
self._done = False
if self._match_obs_space_dtype:
observation = self._to_obs_space_dtype(observation)
self._current_time_step = ts_lib.restart(
observation, reward_spec=self.reward_spec())
return self._current_time_step
def reset_agent(self):
observation = self._gym_env.reset_agent()
self._info = None
self._done = False
if self._match_obs_space_dtype:
observation = self._to_obs_space_dtype(observation)
self._current_time_step = ts_lib.restart(
observation, reward_spec=self.reward_spec())
return self._current_time_step
def _adversary_to_obs_space_dtype(self, observation):
# Make sure we handle cases where observations are provided as a list.
flat_obs = nest_utils.flatten_up_to(
self.adversary_observation_spec, observation)
matched_observations = []
for spec, obs in zip(self.adversary_flat_obs_spec, flat_obs):
matched_observations.append(np.asarray(obs, dtype=spec.dtype))
return tf.nest.pack_sequence_as(self.adversary_observation_spec,
matched_observations)
def _step(self, action):
# Automatically reset the environments on step if they need to be reset.
if self._auto_reset and self._done:
return self.reset_agent()
action = action.item() if self._action_is_discrete else action
observation, reward, self._done, self._info = self._gym_env.step(action)
if self._match_obs_space_dtype:
observation = self._to_obs_space_dtype(observation)
reward = np.asarray(reward, dtype=self.reward_spec().dtype)
outer_dims = nest_utils.get_outer_array_shape(reward, self.reward_spec())
if self._done:
return ts_lib.termination(observation, reward, outer_dims=outer_dims)
else:
return ts_lib.transition(observation, reward, self._discount,
outer_dims=outer_dims)
def step_adversary(self, action):
action = action.item() if self._action_is_discrete else action
observation, reward, self._done, self._info = self._gym_env.step_adversary(
action)
if self._match_obs_space_dtype:
observation = self._adversary_to_obs_space_dtype(observation)
reward = np.asarray(reward, dtype=self.reward_spec().dtype)
outer_dims = nest_utils.get_outer_array_shape(reward, self.reward_spec())
if self._done:
return ts_lib.termination(observation, reward, outer_dims=outer_dims)
else:
return ts_lib.transition(observation, reward, self._discount,
outer_dims=outer_dims)
@gin.configurable
class AdversarialBatchedPyEnvironment(
batched_py_environment.BatchedPyEnvironment):
"""Batch together multiple adversarial py environments acting as single batch.
The environments should only access shared python variables using
shared mutex locks (from the threading module).
"""
def __init__(self, envs, multithreading=True):
super(AdversarialBatchedPyEnvironment, self).__init__(
envs, multithreading=multithreading)
self.adversary_action_spec = self._envs[0].adversary_action_spec
self.adversary_observation_spec = self._envs[0].adversary_observation_spec
self.adversary_time_step_spec = self._envs[0].adversary_time_step_spec
def get_num_blocks(self):
if self._num_envs == 1:
return nest_utils.batch_nested_tensors(
tf.cast(self._envs[0].n_clutter_placed, tf.float32))
else:
return tf.stack(
lambda env: tf.cast(env.n_clutter_placed, tf.float32), self._envs)
def get_distance_to_goal(self):
if self._num_envs == 1:
return nest_utils.batch_nested_tensors(
tf.cast(self._envs[0].distance_to_goal, tf.float32))
else:
return tf.stack(
lambda env: tf.cast(env.distance_to_goal, tf.float32), self._envs)
def get_deliberate_placement(self):
if self._num_envs == 1:
return nest_utils.batch_nested_tensors(
tf.cast(self._envs[0].deliberate_agent_placement, tf.float32))
else:
return tf.stack(
lambda env: tf.cast(env.deliberate_agent_placement, tf.float32),
self._envs)
def get_goal_x(self):
if self._num_envs == 1:
return nest_utils.batch_nested_tensors(
tf.cast(self._envs[0].get_goal_x(), tf.float32))
else:
return tf.stack(
lambda env: tf.cast(env.get_goal_x(), tf.float32),
self._envs)
def get_goal_y(self):
if self._num_envs == 1:
return nest_utils.batch_nested_tensors(
tf.cast(self._envs[0].get_goal_y(), tf.float32))
else:
return tf.stack(
lambda env: tf.cast(env.get_goal_y(), tf.float32),
self._envs)
def get_passable(self):
if self._num_envs == 1:
return nest_utils.batch_nested_tensors(
tf.cast(self._envs[0].passable, tf.float32))
else:
return tf.stack(
lambda env: tf.cast(env.passable, tf.float32),
self._envs)
def get_shortest_path_length(self):
if self._num_envs == 1:
return nest_utils.batch_nested_tensors(
tf.cast(self._envs[0].shortest_path_length, tf.float32))
else:
return tf.stack(
lambda env: tf.cast(env.shortest_path_length, tf.float32),
self._envs)
def reset_agent(self):
if self._num_envs == 1:
return nest_utils.batch_nested_array(self._envs[0].reset_agent())
else:
time_steps = self._execute(lambda env: env.reset_agent(), self._envs)
return nest_utils.stack_nested_arrays(time_steps)
def reset_random(self):
if self._num_envs == 1:
return nest_utils.batch_nested_array(self._envs[0].reset_random())
else:
time_steps = self._execute(lambda env: env.reset_random(), self._envs)
return nest_utils.stack_nested_arrays(time_steps)
def step_adversary(self, actions):
if self._num_envs == 1:
actions = nest_utils.unbatch_nested_array(actions)
time_steps = self._envs[0].step_adversary(actions)
return nest_utils.batch_nested_array(time_steps)
else:
unstacked_actions = batched_py_environment.unstack_actions(actions)
if len(unstacked_actions) != self.batch_size:
raise ValueError(
'Primary dimension of action items does not match '
'batch size: %d vs. %d' % (len(unstacked_actions), self.batch_size))
time_steps = self._execute(
lambda env_action: env_action[0].step_adversary(env_action[1]),
zip(self._envs, unstacked_actions))
return nest_utils.stack_nested_arrays(time_steps)
class AdversarialTFPyEnvironment(tf_py_environment.TFPyEnvironment):
"""Override TFPyEnvironment to add support for additional adversary functions.
Note that the 'step' function resets the agent, but 'reset' resets the whole
environment. Therefore use 'reset_agent' to reset just the agent to its
initial location, without resetting the environment the adversary has created.
The time_step_spec and other specs relate to the agent's observations, and
there are additional specs for the adversarial policy that alters the
environment.
The adversary's specs should match the output of reset(), step_adversary(),
_current_time_step(), and self.time_step, while the agent's specs should
match reset_agent(), step(), _current_agent_time_step(), and
self._agent_time_step.
"""
def __init__(self, environment, check_dims=False, isolation=False):
"""Calls parent constructors and initializes adversary specs.
Args:
environment: A tf-agents PyEnvironment, or a `callable` that returns
an environment of this form.
check_dims: Whether the batch dimensions should be checked in the 'step'
function.
isolation: If True, create a dedicated thread for interactions with the
environment. If Falso, interactions with the environment occur within
whichever thread calls a method belonging to this class. See tf-agents
parent class documentation for more details.
"""
# Prevent parent class from using its own batched environment
super(AdversarialTFPyEnvironment, self).__init__(
environment, check_dims=check_dims, isolation=isolation)
if not environment.batched:
self._env = AdversarialBatchedPyEnvironment(
[environment], multithreading=not self._pool)
self._agent_time_step = None
self.adversary_action_spec = tensor_spec.from_spec(
self._env.adversary_action_spec)
self.adversary_time_step_spec = tensor_spec.from_spec(
self._env.adversary_time_step_spec)
self.adversary_observation_spec = tensor_spec.from_spec(
self._env.adversary_observation_spec)
self._adversary_time_step_dtypes = [
s.dtype for s in tf.nest.flatten(self.adversary_time_step_spec)
]
# Make sure this is called without conversion from tf.function.
@tf.autograph.experimental.do_not_convert()
def reset_agent(self):
def _reset_py():
with tf_py_environment._check_not_called_concurrently(self._lock): # pylint:disable=protected-access
self._agent_time_step = self._env.reset_agent()
def _isolated_reset_py():
return self._execute(_reset_py)
with tf.name_scope('reset_agent'):
reset_op = tf.numpy_function(
_isolated_reset_py,
[], # No inputs.
[],
name='reset_py_func')
| |
function: ascardlist()
try:
self.cat_name = cards['CAT-NAME']
return hdu
except KeyError:
try:
self.cat_name = cards['EXTNAME']
return hdu
except KeyError:
self.cat_name = self.class_module.catid
pass
#No CAT-NAME or EXTNAME found, just return second HDU
if len(fits_cat)>=2:
return fits_cat[1]
#Only one HDU, no name info, return None
return
def _get_ids(self):
"""Find source name information and return as a list."""
name_key = ''
# deprecated, removed at 3.2
#cards = self.hdu.header.ascardlist()
#cards = pf.CardList(self.hdu.header.cards)
cards = self.hdu.header.cards
#First check for UCD in header
for card in cards:
if card.keyword[:5]=='TBUCD' and card in ['ID_MAIN','meta.id;meta.main']:
name_key = cards['TTYPE'+card.key[5:8]]
break
#Sometimes UCDs are declared in comments
#May be fragile - depends on specific format for comments as in gamma-egr catalog
value_comment = card.image.split('/') ##ascardimage().split('/')
if len(value_comment)>1:
comment = value_comment[1]
ucd_string = comment[comment.find('UCD'):].split()
if ucd_string:
try:
if ucd_string[0].split('=')[1].strip('.')=='ID_MAIN':
# changed pyfits
name_key = cards[''.join(['TTYPE',card.keyword[5:8]])].value
break
except IndexError:
pass
if card.keyword[:5]=='TTYPE' and (
card.value.upper() in ['NAME','ID','PSR_NAME','SOURCE_NAME', 'SOURCE NAME' ]
or card.value.upper().endswith('NAME')
):
name_key = card.value
break
if name_key=='':
print 'Catalog %s: did not find name column' %self.class_module
try:
return self.hdu.data.field(name_key)
except KeyError:
print 'srcid: key %s not found in list %s' % (name_key, self.hdu.data.field)
raise
return
def _get_positions(self):
"""Find columns containing position info and return a list of SkyDirs"""
#the Header class has been rewritten, and the CardList class is deprecated.
#http://stsdas.stsci.edu/stsci_python_sphinxdocs_2.13/pyfits/users_guide/users_headers.html
#cards = pf.CardList(self.hdu.header.cards)
#ucds = cards.filter_list('TBUCD*') #### Use :meth:`Header.values` instead.
#ttypes = cards.filter_list('TTYPE*') ####
ucds = self.hdu.header['TBUCD*']
ttypes= self.hdu.header['TTYPE*']
lon_key = lat_key = ''
if not lon_key:
if 'POS_EQ_RA_MAIN' in ucds.values():
ucd = ucds.keys()[ucds.values().index('POS_EQ_RA_MAIN')]
lon_key = ttypes[''.join(['TTYPE',ucd[5:8]])]
#Assumes that if POS_EQ_RA_MAIN exists, POS_EQ_DEC_MAIN does too.
ucd = ucds.keys()[ucds.values().index('POS_EQ_DEC_MAIN')]
lat_key = ttypes[''.join(['TTYPE',ucd[5:8]])]
elif 'RAdeg' in ttypes.values():
lon_key = ttypes[ttypes.keys()[ttypes.values().index('RAdeg')]]
lat_key = ttypes[ttypes.keys()[ttypes.values().index('DEdeg')]]
elif '_RAJ2000' in ttypes.values():
lon_key = ttypes[ttypes.keys()[ttypes.values().index('_RAJ2000')]]
lat_key = ttypes[ttypes.keys()[ttypes.values().index('_DEJ2000')]]
elif 'RAJ2000' in ttypes.values():
lon_key = ttypes[ttypes.keys()[ttypes.values().index('RAJ2000')]]
lat_key = ttypes[ttypes.keys()[ttypes.values().index('DEJ2000')]]
elif 'RAJD' in ttypes.values(): # only for bigbfile?
lon_key = ttypes[ttypes.keys()[ttypes.values().index('RAJD')]]
lat_key = ttypes[ttypes.keys()[ttypes.values().index('DECJD')]]
elif 'RA' in ttypes.values():
lon_key = ttypes[ttypes.keys()[ttypes.values().index('RA')]]
try:
lat_key = ttypes[ttypes.keys()[ttypes.values().index('DE')]]
except ValueError:
lat_key = ttypes[ttypes.keys()[ttypes.values().index('DEC')]]
elif 'RA (J2000.0)' in ttypes.values(): # new bzcat, at least
lon_key = ttypes[ttypes.keys()[ttypes.values().index('RA (J2000.0)')]]
lat_key = ttypes[ttypes.keys()[ttypes.values().index('Dec (J2000.0)')]]
if not lon_key:
self.coords = skymaps.SkyDir.GALACTIC
if 'POS_GAL_LON' in ucds.values():
lon_key = ucds.keys()[ucds.values().index('POS_GAL_LON')]
lat_key = ucds.keys()[ucds.values().index('POS_GAL_LAT')]
elif '_GLON' in ttypes.values():
lon_key = ttypes[ttypes.keys()[ttypes.values().index('_GLON')]]
lat_key = ttypes[ttypes.keys()[ttypes.values().index('_GLAT')]]
elif 'GLON' in ttypes.values():
lon_key = ttypes[ttypes.keys()[ttypes.values().index('GLON')]]
lat_key = ttypes[ttypes.keys()[ttypes.values().index('GLAT')]]
if lon_key:
return (self.hdu.data.field(lon_key).astype('float'),
self.hdu.data.field(lat_key).astype('float'))
else:
raise SrcidError('did not find position info in catalog' )
def _make_selection(self):
"""Make selections specified in class module."""
selections = [x for x in self.class_module.selection if (x != '' and 'ANGSEP' not in x)]
dat = self.hdu.data
mask = np.array([True]*len(dat))
if not selections: return mask
catid_pattern = re.compile('@%s_([A-Za-z0-9]+)'%self.class_module.catid.upper())
fields = {}
for sel in selections:
field_names = catid_pattern.findall(sel)
for f in field_names:
field = dat.field(f)
sel = catid_pattern.sub(f,sel,1)
defnull_pattern = re.compile('DEFNULL\(%s,([0-9e\+\.]+)\)'%f)
dn_matches = defnull_pattern.findall(sel)
for dnm in dn_matches:
field[np.isnan(field)]=dnm
fields[f]=field
sel = defnull_pattern.sub(f,sel)
sel = sel.replace(f,'fields["%s"]'%f)
if '||' in sel:
sel = 'np.logical_or(%s)'%(sel.replace('||',',').strip('()'))
elif '&&' in sel:
sel = 'np.logical_and(%s)'%(sel.replace('&&',',').strip('()'))
try:
mask = np.logical_and(mask,eval(sel))
except:
print 'failed to evaluate selection "%s"' % sel
return mask
def _get_foms(self):
"""Compute figure of merit for sources, as specified in class module."""
fom = self.class_module.figure_of_merit
dat = self.hdu.data
if not fom:
return
catid_pattern = re.compile('@%s_([A-Za-z0-9_]+)'%self.class_module.catid.upper())
fields = {'Name':self._get_ids()}
field_names = catid_pattern.findall(fom)
for f in field_names:
field = dat.field(f)
fom = catid_pattern.sub(f,fom,1)
#Treat DEFNULL in a log slightly differently
#This is an awful kludge to deal with the pulsar_fom catalog.
#Hopefully it won't break anything in the future.
defnull_pattern = re.compile('(?<!LOG10\()DEFNULL\(%s,([0-9e\.\+]+)\)'%f)
dn_matches = defnull_pattern.findall(fom)
for dnm in dn_matches:
field[np.isnan(field)] = dnm
defnull_pattern2 = re.compile('(?<=LOG10\()DEFNULL\(%s,([0-9e\.\+]+)\)'%f)
dn_matches2 = defnull_pattern2.findall(fom)
for dnm in dn_matches2:
field[np.logical_or(np.isnan(field),field==0)] = dnm
fields[f] = field
fom = defnull_pattern.sub(f,fom)
fom = defnull_pattern2.sub(f,fom)
fom = fom.replace(f,'fields["%s"]'%f)
fom = fom.replace('exp','np.exp')
fom = fom.replace('LOG10','np.log10')
fom_dict = dict(zip([' '.join([self.name_prefix,x]).strip() for x in fields['Name']],eval(fom)))
for source in self.sources:
source.fom = fom_dict[source.name]
def select_circle(self,position,radius,trapezoid=False):
"""Return an array of CatalogSources within radius degrees of position.
Arguments:
position : SkyDir for center of selection region.
radius : radius of selection region.
"""
ras,decs = self.ras,self.decs
if trapezoid:
tmask = trap_mask(self.ras,self.decs,position,radius)
sources = self.sources[tmask]
rmask = fitstools.rad_mask(self.ras[tmask],self.decs[tmask],position,radius,mask_only=True)
return sources[rmask]
else:
rmask = fitstools.rad_mask(self.ras,self.decs,position,radius,mask_only=True)
return self.sources[rmask]
def local_density(self,position,radius=4,fom=1.0,trap_mask=False):
"""Return the local density of catalog sources in a radius-degree region about position.
Only counts sources with figures of merit >= fom. The default fom for CatalogSources should be 1.,
so the default fom=0 should not cut anything out. However, this method ought to be independent
of the implementation of the fom in CatalogSource, so this should get refactored at some point."""
n_sources = sum([1. for source in self.select_circle(position,radius,trapezoid = trap_mask) if
source.fom >= fom])
#If no sources within radius, set n_sources = 1 to give lower limit on density
#Maybe better to expand the radius in this case?
if n_sources < 1 : n_sources = 1
solid_angle = (1-math.cos(np.radians(radius)))*2*math.pi
solid_angle = np.degrees(np.degrees(solid_angle))
return n_sources/solid_angle
def associate(self,position,error_ellipse,unique = True,trap_mask=False,accept_in_r95=False):
"""Given a skydir and error ellipse, return associations.
Arguments:
position : A SkyDir representing the location of the source to be associated.
error_ellipse : Sequence of length 3 representing major and minor axes and position angle of an
error ellipse in degrees
accept_in_r95 : If True, accept anything within r95 as an association
Returns all sources with posterior probability greater than prob_threshold,
sorted by probability.
"""
try:
assert(len(error_ellipse)==3)
except TypeError:
if self.verbosity > 0:
print("Got scalar instead of sequence for error ellipse: Assuming this is 1-sigma error radius in degrees")
error_ellipse = [error_ellipse]*2+[0]
except AssertionError:
print("Wrong length for error_ellipse: Needed length 3, got %i"%len(error_ellipse))
return
if self.source_mask_radius is None:
self.source_mask_radius = error_ellipse[0]*conv95*5
if np.isnan(self.source_mask_radius) or self.source_mask_radius == 0.:
self.source_mask_radius = 1.
#filter sources by position, ~5-sigma radius
sources = self.select_circle(position,self.source_mask_radius,trapezoid=trap_mask)
post_probs = np.array([source.posterior_probability(position,error_ellipse,trap_mask=trap_mask)
for source in sources])
delta_ts_list = np.array([2*source.delta_logl(position, error_ellipse) for source in sources])
#If desired, require no more than 1 counterpart per LAT source.
if unique:
inv_probs = 1-post_probs
phk = np.zeros(post_probs.shape) #P(Hk) as defined in 1FGL paper
norm = inv_probs.prod()
for i in xrange(post_probs.shape[0]):
mask = np.zeros(post_probs.shape,dtype='bool')
mask[i] = True
phk[i] = post_probs[mask]*inv_probs[~mask].prod()
norm += phk[i]
post_probs = phk/norm
#return sources above threshold with posterior probability, sorted by posterior probability
if accept_in_r95:
source_list = [(source, prob, delta_ts) for prob,source, delta_ts in zip(post_probs,sources,delta_ts_list)\
if prob > self.prob_threshold or np.degrees(source.skydir.difference(position))<error_ellipse[0]*conv95]
else:
source_list = [(source, prob, delta_ts) for prob,source, delta_ts in zip(post_probs,sources,delta_ts_list)\
if prob > self.prob_threshold]
source_list.sort(key = lambda x:-x[1])
#source_dict = dict((el[1].name,el) for el in source_list[:self.max_counterparts])
return source_list
class GammaCatalog(Catalog):
"""A catalog of gamma-ray sources (i.e. sources with error circles comparable to LAT)"""
def __init__(self,class_module,catalog_dir,verbosity = 1):
self.names,lons,lats = self.init(class_module,catalog_dir,verbosity = verbosity)
errors = self.get_position_errors()
self.source_mask_radius = 3*max(errors)
self.mask = self._make_selection()
self.sources = np.array([GammaRaySource(self,name,skymaps.SkyDir(lon,lat,self.coords),error)
for name,lon,lat,error in zip(self.names,lons,lats,errors)])[self.mask]
#Save ras and decs for use in select_circle. MUCH faster than using the SkyDirs.
if self.coords == skymaps.SkyDir.GALACTIC:
self.ras = np.array([source.skydir.ra() for source in self.sources])
self.decs = np.array([source.skydir.dec() for source in self.sources])
else:
self.ras,self.decs = lons[self.mask],lats[self.mask]
def get_position_errors(self):
q = [x for x in self.class_module.new_quantity if
(('LAT' not in x) and ('ECOM' not in x))][0]
patt = re.compile('@%s_([A-Za-z0-9_]+)'%self.class_module.catid.upper())
lhs,rhs = q.split('=')
match = patt.search(rhs)
if match:
error_field = match.groups()[0]
rhs = patt.sub('self.hdu.data.field("%s")'%error_field,rhs)
return eval(rhs.split('*')[0])
else:
raise CatalogError(self.cat_file,'Could not find position uncertainties.')
class ExtendedCatalog(Catalog):
"""A catalog of extended sources"""
def __init__(self,class_module,catalog_dir,verbosity = 1):
self.names,lons,lats= self.init(class_module,catalog_dir,verbosity = verbosity)
radii = self.get_radii()
self.source_mask_radius = max(radii)*3
self.mask = self._make_selection()
self.sources = np.array([ExtendedSource(self,name,skymaps.SkyDir(lon,lat,self.coords),radius)
for name,lon,lat,radius in zip(self.names,lons,lats,radii)])[self.mask]
if self.coords == skymaps.SkyDir.GALACTIC:
self.ras = np.array([source.skydir.ra() for source in self.sources])
self.decs = np.array([source.skydir.dec() for source in self.sources])
else:
self.ras,self.decs = lons[self.mask],lats[self.mask]
def get_radii(self):
q = self.class_module.new_quantity[0]
lhs,rhs = q.split('=')
terms = rhs.split('+')
rad_term = [term.strip() for term in terms if '@%s'%self.class_module.catid.upper() in term][0]
radius = rad_term.replace('@%s_'%self.class_module.catid.upper(),'')
num,denom = radius.split('/')
num = 'self.hdu.data.field("%s")'%num
radius = '/'.join([num,denom])
return eval(radius)
class CatalogSource(object):
"""A class representing a catalog source."""
def __init__(self,catalog,name,skydir):
self.catalog = catalog
self.name = name
self.skydir = skydir
self.fom = 1.
def __str__(self):
return '\t'.join([self.catalog.cat_name,self.name,str(self.skydir.ra()),str(self.skydir.dec())])
| |
<gh_stars>1-10
import caffe
from caffe import layers as L
from caffe import params as P
def fc_relu_drop(bottom, fc_param, dropout_ratio=0.5):
fc = L.InnerProduct(bottom, num_output=fc_param['num_output'],
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type=fc_param['weight_type'], std=fc_param['weight_std']),
bias_filler=dict(type='constant', value=fc_param['bias_value']))
relu = L.ReLU(fc, in_place=True)
drop = L.Dropout(fc, in_place=True,
dropout_param=dict(dropout_ratio=dropout_ratio))
return fc, relu, drop
def factorization_conv_bn_scale_relu(bottom, num_output=64, kernel_size=3, stride=1, pad=0):
conv = L.Convolution(bottom, num_output=num_output, kernel_size=kernel_size, stride=stride, pad=pad,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', std=1),
bias_filler=dict(type='constant', value=0.2))
conv_bn = L.BatchNorm(conv, use_global_stats=False, in_place=True)
conv_scale = L.Scale(conv, scale_param=dict(bias_term=True), in_place=True)
conv_relu = L.ReLU(conv, in_place=True)
return conv, conv_bn, conv_scale, conv_relu
def inception(bottom, conv_output):
conv_1x1 = L.Convolution(bottom, kernel_size=1, num_output=conv_output['conv_1x1'],
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', weight_std=1),
bias_filler=dict(type='constant', value=0.2))
conv_1x1_relu = L.ReLU(conv_1x1, in_place=True)
conv_3x3_reduce = L.Convolution(bottom, kernel_size=1, num_output=conv_output['conv_3x3_reduce'],
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', weight_std=1),
bias_filler=dict(type='constant', value=0.2))
conv_3x3_reduce_relu = L.ReLU(conv_3x3_reduce, in_place=True)
conv_3x3 = L.Convolution(conv_3x3_reduce, kernel_size=3, num_output=conv_output['conv_3x3'], pad=1,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', weight_std=1),
bias_filler=dict(type='constant', value=0.2))
conv_3x3_relu = L.ReLU(conv_3x3, in_place=True)
conv_5x5_reduce = L.Convolution(bottom, kernel_size=1, num_output=conv_output['conv_5x5_reduce'],
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', weight_std=1),
bias_filler=dict(type='constant', value=0.2))
conv_5x5_reduce_relu = L.ReLU(conv_5x5_reduce, in_place=True)
conv_5x5 = L.Convolution(conv_5x5_reduce, kernel_size=5, num_output=conv_output['conv_5x5'], pad=2,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', weight_std=1),
bias_filler=dict(type='constant', value=0.2))
conv_5x5_relu = L.ReLU(conv_5x5, in_place=True)
pool = L.Pooling(bottom, kernel_size=3, stride=1, pad=1, pool=P.Pooling.MAX)
pool_proj = L.Convolution(pool, kernel_size=1, num_output=conv_output['pool_proj'],
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant', value=0.2))
pool_proj_relu = L.ReLU(pool_proj, in_place=True)
concat = L.Concat(conv_1x1, conv_3x3, conv_5x5, pool_proj)
return conv_1x1, conv_1x1_relu, conv_3x3_reduce, conv_3x3_reduce_relu, conv_3x3, conv_3x3_relu, conv_5x5_reduce, \
conv_5x5_reduce_relu, conv_5x5, conv_5x5_relu, pool, pool_proj, pool_proj_relu, concat
def inception_bn(bottom, conv_output):
conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=conv_output['conv_1x1'], kernel_size=1)
conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=conv_output['conv_3x3_reduce'], kernel_size=1)
conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu = \
factorization_conv_bn_scale_relu(conv_3x3_reduce, num_output=conv_output['conv_3x3'], kernel_size=3, pad=1)
conv_5x5_reduce, conv_5x5_reduce_bn, conv_5x5_reduce_scale, conv_5x5_reduce_relu = \
factorization_conv_bn_scale_relu(bottom, num_output=conv_output['conv_5x5_reduce'], kernel_size=1)
conv_5x5, conv_5x5_bn, conv_5x5_scale, conv_5x5_relu = \
factorization_conv_bn_scale_relu(conv_5x5_reduce, num_output=conv_output['conv_5x5'], kernel_size=5, pad=2)
pool = L.Pooling(bottom, kernel_size=3, stride=1, pad=1, pool=P.Pooling.MAX)
pool_proj, pool_proj_bn, pool_proj_scale, pool_proj_relu = \
factorization_conv_bn_scale_relu(pool, num_output=conv_output['pool_proj'], kernel_size=1)
concat = L.Concat(conv_1x1, conv_3x3, conv_5x5, pool_proj)
return conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu, conv_3x3_reduce, conv_3x3_reduce_bn, \
conv_3x3_reduce_scale, conv_3x3_reduce_relu, conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu, \
conv_5x5_reduce, conv_5x5_reduce_bn, conv_5x5_reduce_scale, conv_5x5_reduce_relu, conv_5x5, conv_5x5_bn, \
conv_5x5_scale, conv_5x5_relu, pool, pool_proj, pool_proj_bn, pool_proj_scale, pool_proj_relu, concat
class InceptionV1(object):
def __init__(self, lmdb_train, lmdb_test, num_output):
self.train_data = lmdb_train
self.test_data = lmdb_test
self.classifier_num = num_output
def inception_v1_proto(self, batch_size, phase='TRAIN'):
n = caffe.NetSpec()
if phase == 'TRAIN':
source_data = self.train_data
mirror = True
else:
source_data = self.test_data
mirror = False
n.data, n.label = L.Data(source=source_data, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(crop_size=227, mean_value=[104, 117, 123], mirror=mirror))
n.conv1_7x7_s2 = L.Convolution(n.data, num_output=64, kernel_size=7, stride=2, pad=3,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', weight_std=1),
bias_filler=dict(type='constant', value=0.2))
n.conv1_relu_7x7 = L.ReLU(n.conv1_7x7_s2, in_place=True)
n.pool1_3x3_s2 = L.Pooling(n.conv1_7x7_s2, kernel_size=3, stride=1, pad=1, pool=P.Pooling.MAX)
n.pool1_norm1 = L.LRN(n.pool1_3x3_s2, local_size=5, alpha=1e-4, beta=0.75)
n.conv2_3x3_reduce = L.Convolution(n.pool1_norm1, kernel_size=1, num_output=64, stride=1,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', weight_std=1),
bias_filler=dict(type='constant', value=0.2))
n.conv2_relu_3x3_reduce = L.ReLU(n.conv2_3x3_reduce, in_place=True)
n.conv2_3x3 = L.Convolution(n.conv2_3x3_reduce, num_output=192, kernel_size=3, stride=1, pad=1,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', weight_std=1),
bias_filler=dict(type='constant', value=0.2))
n.conv2_relu_3x3 = L.ReLU(n.conv2_3x3, in_place=True)
n.conv2_norm2 = L.LRN(n.conv2_3x3, local_size=5, alpha=1e-4, beta=0.75)
n.pool2_3x3_s2 = L.Pooling(n.conv2_norm2, kernel_size=3, stride=1, pad=1, pool=P.Pooling.MAX)
n.inception_3a_1x1, n.inception_3a_relu_1x1, n.inception_3a_3x3_reduce, n.inception_3a_relu_3x3_reduce, \
n.inception_3a_3x3, n.inception_3a_relu_3x3, n.inception_3a_5x5_reduce, n.inception_3a_relu_5x5_reduce, \
n.inception_3a_5x5, n.inception_3a_relu_5x5, n.inception_3a_pool, n.inception_3a_pool_proj, \
n.inception_3a_relu_pool_proj, n.inception_3a_output = \
inception(n.pool2_3x3_s2, dict(conv_1x1=64, conv_3x3_reduce=96, conv_3x3=128, conv_5x5_reduce=16,
conv_5x5=32, pool_proj=32))
n.inception_3b_1x1, n.inception_3b_relu_1x1, n.inception_3b_3x3_reduce, n.inception_3b_relu_3x3_reduce, \
n.inception_3b_3x3, n.inception_3b_relu_3x3, n.inception_3b_5x5_reduce, n.inception_3b_relu_5x5_reduce, \
n.inception_3b_5x5, n.inception_3b_relu_5x5, n.inception_3b_pool, n.inception_3b_pool_proj, \
n.inception_3b_relu_pool_proj, n.inception_3b_output = \
inception(n.inception_3a_output, dict(conv_1x1=128, conv_3x3_reduce=128, conv_3x3=192, conv_5x5_reduce=32,
conv_5x5=96, pool_proj=64))
n.pool3_3x3_s2 = L.Pooling(n.inception_3b_output, kernel_size=3, stride=2, pool=P.Pooling.MAX)
n.inception_4a_1x1, n.inception_4a_relu_1x1, n.inception_4a_3x3_reduce, n.inception_4a_relu_3x3_reduce, \
n.inception_4a_3x3, n.inception_4a_relu_3x3, n.inception_4a_5x5_reduce, n.inception_4a_relu_5x5_reduce, \
n.inception_4a_5x5, n.inception_4a_relu_5x5, n.inception_4a_pool, n.inception_4a_pool_proj, \
n.inception_4a_relu_pool_proj, n.inception_4a_output = \
inception(n.pool3_3x3_s2, dict(conv_1x1=192, conv_3x3_reduce=96, conv_3x3=208, conv_5x5_reduce=16,
conv_5x5=48, pool_proj=64))
# loss 1
n.loss1_ave_pool = L.Pooling(n.inception_4a_output, kernel_size=5, stride=3, pool=P.Pooling.AVE)
n.loss1_conv = L.Convolution(n.loss1_ave_pool, num_output=128, kernel_size=1, stride=1,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', weight_std=1),
bias_filler=dict(type='constant', value=0.2))
n.loss1_relu_conv = L.ReLU(n.loss1_conv, in_place=True)
n.loss1_fc, n.loss1_relu_fc, n.loss1_drop_fc = \
fc_relu_drop(n.loss1_conv, dict(num_output=1024, weight_type='xavier', weight_std=1, bias_type='constant',
bias_value=0.2), dropout_ratio=0.7)
n.loss1_classifier = L.InnerProduct(n.loss1_fc, num_output=self.classifier_num,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant', value=0))
n.loss1_loss = L.SoftmaxWithLoss(n.loss1_classifier, n.label, loss_weight=0.3)
if phase == 'TRAIN':
pass
else:
n.loss1_accuracy_top1 = L.Accuracy(n.loss1_classifier, n.label, include=dict(phase=1))
n.loss1_accuracy_top5 = L.Accuracy(n.loss1_classifier, n.label, include=dict(phase=1),
accuracy_param=dict(top_k=5))
n.inception_4b_1x1, n.inception_4b_relu_1x1, n.inception_4b_3x3_reduce, n.inception_4b_relu_3x3_reduce, \
n.inception_4b_3x3, n.inception_4b_relu_3x3, n.inception_4b_5x5_reduce, n.inception_4b_relu_5x5_reduce, \
n.inception_4b_5x5, n.inception_4b_relu_5x5, n.inception_4b_pool, n.inception_4b_pool_proj, \
n.inception_4b_relu_pool_proj, n.inception_4b_output = \
inception(n.inception_4a_output, dict(conv_1x1=160, conv_3x3_reduce=112, conv_3x3=224, conv_5x5_reduce=24,
conv_5x5=64, pool_proj=64))
n.inception_4c_1x1, n.inception_4c_relu_1x1, n.inception_4c_3x3_reduce, n.inception_4c_relu_3x3_reduce, \
n.inception_4c_3x3, n.inception_4c_relu_3x3, n.inception_4c_5x5_reduce, n.inception_4c_relu_5x5_reduce, \
n.inception_4c_5x5, n.inception_4c_relu_5x5, n.inception_4c_pool, n.inception_4c_pool_proj, \
n.inception_4c_relu_pool_proj, n.inception_4c_output = \
inception(n.inception_4b_output, dict(conv_1x1=128, conv_3x3_reduce=128, conv_3x3=256, conv_5x5_reduce=24,
conv_5x5=64, pool_proj=64))
n.inception_4d_1x1, n.inception_4d_relu_1x1, n.inception_4d_3x3_reduce, n.inception_4d_relu_3x3_reduce, \
n.inception_4d_3x3, n.inception_4d_relu_3x3, n.inception_4d_5x5_reduce, n.inception_4d_relu_5x5_reduce, \
n.inception_4d_5x5, n.inception_4d_relu_5x5, n.inception_4d_pool, n.inception_4d_pool_proj, \
n.inception_4d_relu_pool_proj, n.inception_4d_output = \
inception(n.inception_4c_output, dict(conv_1x1=112, conv_3x3_reduce=144, conv_3x3=288, conv_5x5_reduce=32,
conv_5x5=64, pool_proj=64))
# loss 2
n.loss2_ave_pool = L.Pooling(n.inception_4d_output, kernel_size=5, stride=3, pool=P.Pooling.AVE)
n.loss2_conv = L.Convolution(n.loss2_ave_pool, num_output=128, kernel_size=1, stride=1,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', weight_std=1),
bias_filler=dict(type='constant', value=0.2))
n.loss2_relu_conv = L.ReLU(n.loss2_conv, in_place=True)
n.loss2_fc, n.loss2_relu_fc, n.loss2_drop_fc = \
fc_relu_drop(n.loss2_conv, dict(num_output=1024, weight_type='xavier', weight_std=1, bias_type='constant',
bias_value=0.2), dropout_ratio=0.7)
n.loss2_classifier = L.InnerProduct(n.loss2_fc, num_output=self.classifier_num,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant', value=0))
n.loss2_loss = L.SoftmaxWithLoss(n.loss2_classifier, n.label, loss_weight=0.3)
if phase == 'TRAIN':
pass
else:
n.loss2_accuracy_top1 = L.Accuracy(n.loss2_classifier, n.label, include=dict(phase=1))
n.loss2_accuracy_top5 = L.Accuracy(n.loss2_classifier, n.label, include=dict(phase=1),
accuracy_param=dict(top_k=5))
n.inception_4e_1x1, n.inception_4e_relu_1x1, n.inception_4e_3x3_reduce, n.inception_4e_relu_3x3_reduce, \
n.inception_4e_3x3, n.inception_4e_relu_3x3, n.inception_4e_5x5_reduce, n.inception_4e_relu_5x5_reduce, \
n.inception_4e_5x5, n.inception_4e_relu_5x5, n.inception_4e_pool, n.inception_4e_pool_proj, \
n.inception_4e_relu_pool_proj, n.inception_4e_output = \
inception(n.inception_4d_output, dict(conv_1x1=256, conv_3x3_reduce=160, conv_3x3=320, conv_5x5_reduce=32,
conv_5x5=128, pool_proj=128))
n.pool4_3x3_s2 = L.Pooling(n.inception_4e_output, kernel_size=3, stride=2, pool=P.Pooling.MAX)
n.inception_5a_1x1, n.inception_5a_relu_1x1, n.inception_5a_3x3_reduce, n.inception_5a_relu_3x3_reduce, \
n.inception_5a_3x3, n.inception_5a_relu_3x3, n.inception_5a_5x5_reduce, n.inception_5a_relu_5x5_reduce, \
n.inception_5a_5x5, n.inception_5a_relu_5x5, n.inception_5a_pool, n.inception_5a_pool_proj, \
n.inception_5a_relu_pool_proj, n.inception_5a_output = \
inception(n.pool4_3x3_s2, dict(conv_1x1=256, conv_3x3_reduce=160, conv_3x3=320, conv_5x5_reduce=32,
conv_5x5=128, pool_proj=128))
n.inception_5b_1x1, n.inception_5b_relu_1x1, n.inception_5b_3x3_reduce, n.inception_5b_relu_3x3_reduce, \
n.inception_5b_3x3, n.inception_5b_relu_3x3, n.inception_5b_5x5_reduce, n.inception_5b_relu_5x5_reduce, \
n.inception_5b_5x5, n.inception_5b_relu_5x5, n.inception_5b_pool, n.inception_5b_pool_proj, \
n.inception_5b_relu_pool_proj, n.inception_5b_output = \
inception(n.inception_5a_output, dict(conv_1x1=384, conv_3x3_reduce=192, conv_3x3=384, conv_5x5_reduce=48,
conv_5x5=128, pool_proj=128))
n.pool5_7x7_s1 = L.Pooling(n.inception_5b_output, kernel_size=7, stride=1, pool=P.Pooling.AVE)
n.pool5_drop_7x7_s1 = L.Dropout(n.pool5_7x7_s1, in_place=True,
dropout_param=dict(dropout_ratio=0.4))
n.loss3_classifier = L.InnerProduct(n.pool5_7x7_s1, num_output=self.classifier_num,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant', value=0))
n.loss3_loss = L.SoftmaxWithLoss(n.loss3_classifier, n.label, loss_weight=1)
if phase == 'TRAIN':
pass
else:
n.loss3_accuracy_top1 = L.Accuracy(n.loss3_classifier, n.label, include=dict(phase=1))
n.loss3_accuracy_top5 = L.Accuracy(n.loss3_classifier, n.label, include=dict(phase=1),
accuracy_param=dict(top_k=5))
return n.to_proto()
def inception_bn_proto(self, batch_size, phase='TRAIN'): # inception_bn
n = caffe.NetSpec()
if phase == 'TRAIN':
source_data = self.train_data
mirror = True
else:
source_data = self.test_data
mirror = False
n.data, n.label = L.Data(source=source_data, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(crop_size=227, mean_value=[104, 117, 123], mirror=mirror))
n.conv1_7x7_s2, n.conv1_7x7_s2_bn, n.conv1_7x7_s2_scale, n.conv1_7x7_relu = \
factorization_conv_bn_scale_relu(n.data, num_output=64, kernel_size=7, stride=2, pad=3)
n.pool1_3x3_s2 = L.Pooling(n.conv1_7x7_s2, kernel_size=3, stride=2, pool=P.Pooling.MAX)
n.conv2_3x3_reduce, n.conv2_3x3_reduce_bn, n.conv2_3x3_reduce_scale, n.conv2_3x3_reduce_relu = \
factorization_conv_bn_scale_relu(n.pool1_3x3_s2, num_output=64, kernel_size=1)
n.conv2_3x3, n.conv2_3x3_bn, n.conv2_3x3_scale, n.conv2_3x3_relu = \
factorization_conv_bn_scale_relu(n.conv2_3x3_reduce, num_output=192, kernel_size=3, pad=1)
n.pool2_3x3_s2 = L.Pooling(n.conv2_3x3, kernel_size=3, stride=2, pool=P.Pooling.MAX)
n.inception_3a_1x1, n.inception_3a_1x1_bn, n.inception_3a_1x1_scale, n.inception_3a_relu_1x1, \
n.inception_3a_3x3_reduce, n.inception_3a_3x3_reduce_bn, n.inception_3a_3x3_reduce_scale, \
n.inception_3a_relu_3x3_reduce, n.inception_3a_3x3, n.inception_3a_3x3_bn, n.inception_3a_3x3_scale, \
n.inception_3a_relu_3x3, n.inception_3a_5x5_reduce, n.inception_3a_5x5_reduce_bn, \
n.inception_3a_5x5_reduce_scale, n.inception_3a_relu_5x5_reduce, n.inception_3a_5x5, n.inception_3a_5x5_bn, \
n.inception_3a_5x5_scale, n.inception_3a_relu_5x5, n.inception_3a_pool, n.inception_3a_pool_proj, \
n.inception_3a_pool_proj_bn, n.inception_3a_pool_proj_scale, n.inception_3a_relu_pool_proj, \
n.inception_3a_output = \
inception_bn(n.pool2_3x3_s2, dict(conv_1x1=64, conv_3x3_reduce=96, conv_3x3=128, conv_5x5_reduce=16,
conv_5x5=32, pool_proj=32))
n.inception_3b_1x1, n.inception_3b_1x1_bn, n.inception_3b_1x1_scale, n.inception_3b_relu_1x1, \
n.inception_3b_3x3_reduce, n.inception_3b_3x3_reduce_bn, n.inception_3b_3x3_reduce_scale, \
n.inception_3b_relu_3x3_reduce, n.inception_3b_3x3, n.inception_3b_3x3_bn, n.inception_3b_3x3_scale, \
n.inception_3b_relu_3x3, n.inception_3b_5x5_reduce, n.inception_3b_5x5_reduce_bn, \
n.inception_3b_5x5_reduce_scale, n.inception_3b_relu_5x5_reduce, n.inception_3b_5x5, n.inception_3b_5x5_bn, \
n.inception_3b_5x5_scale, n.inception_3b_relu_5x5, n.inception_3b_pool, n.inception_3b_pool_proj, \
n.inception_3b_pool_proj_bn, n.inception_3b_pool_proj_scale, n.inception_3b_relu_pool_proj, \
n.inception_3b_output = \
inception_bn(n.inception_3a_output, dict(conv_1x1=128, conv_3x3_reduce=128, conv_3x3=192,
conv_5x5_reduce=32, conv_5x5=96, pool_proj=64))
n.pool3_3x3_s2 = L.Pooling(n.inception_3b_output, kernel_size=3, stride=2, pool=P.Pooling.MAX)
n.inception_4a_1x1, n.inception_4a_1x1_bn, n.inception_4a_1x1_scale, n.inception_4a_relu_1x1, \
n.inception_4a_3x3_reduce, n.inception_4a_3x3_reduce_bn, n.inception_4a_3x3_reduce_scale, \
n.inception_4a_relu_3x3_reduce, n.inception_4a_3x3, n.inception_4a_3x3_bn, n.inception_4a_3x3_scale, \
n.inception_4a_relu_3x3, n.inception_4a_5x5_reduce, n.inception_4a_5x5_reduce_bn, \
n.inception_4a_5x5_reduce_scale, n.inception_4a_relu_5x5_reduce, n.inception_4a_5x5, n.inception_4a_5x5_bn, \
n.inception_4a_5x5_scale, n.inception_4a_relu_5x5, n.inception_4a_pool, n.inception_4a_pool_proj, \
n.inception_4a_pool_proj_bn, n.inception_4a_pool_proj_scale, n.inception_4a_relu_pool_proj, \
n.inception_4a_output = \
inception_bn(n.pool3_3x3_s2, dict(conv_1x1=192, conv_3x3_reduce=96, conv_3x3=208, conv_5x5_reduce=16,
conv_5x5=48, pool_proj=64))
# loss 1
n.loss1_ave_pool = L.Pooling(n.inception_4a_output, kernel_size=5, stride=3, pool=P.Pooling.AVE)
n.loss1_conv, n.loss1_conv_bn, n.loss1_conv_scale, n.loss1_relu_conv = \
factorization_conv_bn_scale_relu(n.loss1_ave_pool, num_output=128, kernel_size=1)
n.loss1_fc, n.loss1_relu_fc, n.loss1_drop_fc = \
fc_relu_drop(n.loss1_conv, dict(num_output=1024, weight_type='xavier', weight_std=1,
bias_type='constant', bias_value=0.2), dropout_ratio=0.7)
n.loss1_classifier = L.InnerProduct(n.loss1_fc, num_output=self.classifier_num,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant', value=0))
n.loss1_loss = L.SoftmaxWithLoss(n.loss1_classifier, n.label, loss_weight=0.3)
if phase == 'TRAIN':
pass
else:
n.loss1_accuracy_top1 = L.Accuracy(n.loss1_classifier, n.label, include=dict(phase=1))
n.loss1_accuracy_top5 = L.Accuracy(n.loss1_classifier, n.label, include=dict(phase=1),
accuracy_param=dict(top_k=5))
n.inception_4b_1x1, n.inception_4b_1x1_bn, n.inception_4b_1x1_scale, n.inception_4b_relu_1x1, \
n.inception_4b_3x3_reduce, n.inception_4b_3x3_reduce_bn, n.inception_4b_3x3_reduce_scale, \
n.inception_4b_relu_3x3_reduce, n.inception_4b_3x3, n.inception_4b_3x3_bn, n.inception_4b_3x3_scale, \
n.inception_4b_relu_3x3, n.inception_4b_5x5_reduce, n.inception_4b_5x5_reduce_bn, \
n.inception_4b_5x5_reduce_scale, n.inception_4b_relu_5x5_reduce, n.inception_4b_5x5, n.inception_4b_5x5_bn, \
n.inception_4b_5x5_scale, n.inception_4b_relu_5x5, n.inception_4b_pool, n.inception_4b_pool_proj, \
n.inception_4b_pool_proj_bn, n.inception_4b_pool_proj_scale, n.inception_4b_relu_pool_proj, \
n.inception_4b_output = \
inception_bn(n.inception_4a_output, dict(conv_1x1=160, conv_3x3_reduce=112, conv_3x3=224,
conv_5x5_reduce=24, conv_5x5=64, pool_proj=64))
n.inception_4c_1x1, n.inception_4c_1x1_bn, n.inception_4c_1x1_scale, n.inception_4c_relu_1x1, \
n.inception_4c_3x3_reduce, n.inception_4c_3x3_reduce_bn, n.inception_4c_3x3_reduce_scale, \
n.inception_4c_relu_3x3_reduce, n.inception_4c_3x3, n.inception_4c_3x3_bn, n.inception_4c_3x3_scale, \
n.inception_4c_relu_3x3, n.inception_4c_5x5_reduce, n.inception_4c_5x5_reduce_bn, \
n.inception_4c_5x5_reduce_scale, n.inception_4c_relu_5x5_reduce, n.inception_4c_5x5, n.inception_4c_5x5_bn, \
n.inception_4c_5x5_scale, n.inception_4c_relu_5x5, n.inception_4c_pool, n.inception_4c_pool_proj, \
n.inception_4c_pool_proj_bn, n.inception_4c_pool_proj_scale, n.inception_4c_relu_pool_proj, \
n.inception_4c_output = \
inception_bn(n.inception_4b_output, dict(conv_1x1=128, conv_3x3_reduce=128, conv_3x3=256,
conv_5x5_reduce=24, conv_5x5=64, pool_proj=64))
n.inception_4d_1x1, n.inception_4d_1x1_bn, n.inception_4d_1x1_scale, n.inception_4d_relu_1x1, \
n.inception_4d_3x3_reduce, n.inception_4d_3x3_reduce_bn, n.inception_4d_3x3_reduce_scale, \
n.inception_4d_relu_3x3_reduce, n.inception_4d_3x3, n.inception_4d_3x3_bn, n.inception_4d_3x3_scale, \
n.inception_4d_relu_3x3, n.inception_4d_5x5_reduce, n.inception_4d_5x5_reduce_bn, \
n.inception_4d_5x5_reduce_scale, n.inception_4d_relu_5x5_reduce, n.inception_4d_5x5, n.inception_4d_5x5_bn, \
n.inception_4d_5x5_scale, n.inception_4d_relu_5x5, n.inception_4d_pool, n.inception_4d_pool_proj, \
n.inception_4d_pool_proj_bn, n.inception_4d_pool_proj_scale, n.inception_4d_relu_pool_proj, \
n.inception_4d_output = \
inception_bn(n.inception_4c_output, dict(conv_1x1=112, conv_3x3_reduce=144, | |
parameter `name` when calling `proxy_get_namespaced_node`")
resource_path = '/api/v1/proxy/nodes/{name}'.replace('{format}', 'json')
method = 'GET'
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def proxy_head_namespaced_node(self, name, **kwargs):
"""
proxy HEAD requests to Node
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.proxy_head_namespaced_node(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the Node (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method proxy_head_namespaced_node" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `proxy_head_namespaced_node`")
resource_path = '/api/v1/proxy/nodes/{name}'.replace('{format}', 'json')
method = 'HEAD'
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def proxy_put_namespaced_node(self, name, **kwargs):
"""
proxy PUT requests to Node
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.proxy_put_namespaced_node(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the Node (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method proxy_put_namespaced_node" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `proxy_put_namespaced_node`")
resource_path = '/api/v1/proxy/nodes/{name}'.replace('{format}', 'json')
method = 'PUT'
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def proxy_post_namespaced_node(self, name, **kwargs):
"""
proxy POST requests to Node
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.proxy_post_namespaced_node(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the Node (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method proxy_post_namespaced_node" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `proxy_post_namespaced_node`")
resource_path = '/api/v1/proxy/nodes/{name}'.replace('{format}', 'json')
method = 'POST'
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def proxy_delete_namespaced_node(self, name, **kwargs):
"""
proxy DELETE requests to Node
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.proxy_delete_namespaced_node(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the Node (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method proxy_delete_namespaced_node" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `proxy_delete_namespaced_node`")
resource_path = '/api/v1/proxy/nodes/{name}'.replace('{format}', 'json')
method = 'DELETE'
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def proxy_options_namespaced_node(self, name, **kwargs):
"""
proxy OPTIONS requests to Node
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.proxy_options_namespaced_node(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the Node (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method proxy_options_namespaced_node" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `proxy_options_namespaced_node`")
resource_path = '/api/v1/proxy/nodes/{name}'.replace('{format}', 'json')
method = 'OPTIONS'
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def proxy_get_namespaced_node_19(self, name, path, **kwargs):
"""
proxy GET requests to Node
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.proxy_get_namespaced_node_19(name, path, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the Node (required)
:param str path: path to the resource (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'path']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
| |
transaction_type: ty.Literal[
"withdrawal", "deposit", "transfer", "reconciliation", "opening balance"
]
date: datetime
value_date: datetime
amount: float
description: str
source_id: ty.Optional[int] = None
source_name: ty.Optional[str] = None
destination_id: ty.Optional[int] = None
destination_name: ty.Optional[str] = None
budget_id: ty.Optional[int] = None
budget_name: ty.Optional[str] = None
category_name: ty.Optional[str] = None
bill_name: ty.Optional[str] = None
tags: ty.List[str] = field(default_factory=list)
notes: ty.Optional[str] = None
sepa_mandate_identifier: ty.Optional[str] = None
sepa_creditor_identifier: ty.Optional[str] = None
def is_valid(self):
return (self.source_id is not None or self.source_name is not None) and (
self.destination_id is not None or self.destination_name is not None
)
def resolve(self, api: "FireflyClient"):
if self.source_name:
self.source_id = api.get_account(self.source_name).instance_id
if self.destination_name:
self.destination_id = api.get_account(self.destination_name).instance_id
if self.budget_name:
self.budget_id = api.get_budget(self.budget_name)["id"]
def is_equivalent(self, other: "FireflyTransaction") -> bool:
return (
all(
getattr(self, attr) == getattr(other, attr)
for attr in [
"transaction_type",
"date",
"value_date",
"amount",
"sepa_mandate_identifier",
"sepa_creditor_identifier",
"category_name",
]
)
and all(st == ot for st, ot in zip(set(self.tags), set(other.tags)))
)
def update_with(self, other: "FireflyTransaction") -> None:
for attr in self.attributes:
val_other = getattr(other, attr)
if val_other is not None:
setattr(self, attr, val_other)
def __repr__(self) -> str:
return self._get_representation()
@staticmethod
def summary_format(
transaction_type,
date,
description,
amount,
source_name,
destination_name,
category,
tags,
):
"""Format the summary string correctly.
This function was needed due to the potential presence of ANSI escape
sequences in the strings that were making the output of string.format
badly aligned.
"""
return " ".join(
[
_rfill(transaction_type, 10),
_rfill(date, 10),
_rfill(description, 32),
_lfill(amount, 8) + "€",
_lfill(source_name, 32),
"=>",
_rfill(destination_name, 32),
_lfill(category, 20),
_lfill(tags, 30),
]
)
@staticmethod
def summary_str_header() -> str:
return FireflyTransaction.summary_format(
transaction_type="type",
date="date",
description="description",
amount="amount",
source_name="source",
destination_name="destination",
category="category",
tags="tags",
)
def summary_str(self) -> str:
amount = self.amount
if self.transaction_type == "withdrawal":
amount = -amount
category = self.category_name or "NO_CATEGORY"
tags = ",".join(self.tags) or "NO_TAG"
return FireflyTransaction.summary_format(
transaction_type=self.transaction_type,
date=str(self.date.date()),
description=self.description.strip(),
amount=f"{amount:.2f}",
source_name=self.source_name,
destination_name=self.destination_name,
category=category,
tags=tags,
)
@staticmethod
def summary_diff_lines(
old_transaction: "FireflyTransaction", new_transaction: "FireflyTransaction"
) -> ty.Tuple[str, str]:
data = [
[
t.transaction_type,
str(t.date.date()),
t.description.strip(),
(
f"{-t.amount:.2f}"
if t.transaction_type == "withdrawal"
else f"{t.amount:.2f}"
),
t.source_name,
t.destination_name,
t.category_name or "NO_CATEGORY",
",".join(t.tags) or "NO_TAGS",
]
for t in [old_transaction, new_transaction]
]
for i in range(len(data[0])):
if data[0][i] != data[1][i]:
data[0][i] = colorama.Fore.RED + data[0][i] + colorama.Style.RESET_ALL
data[1][i] = colorama.Fore.GREEN + data[1][i] + colorama.Style.RESET_ALL
return (
FireflyTransaction.summary_format(
transaction_type=data[0][0],
date=data[0][1],
description=data[0][2],
amount=data[0][3],
source_name=data[0][4],
destination_name=data[0][5],
category=data[0][6],
tags=data[0][7],
),
FireflyTransaction.summary_format(
transaction_type=data[1][0],
date=data[1][1],
description=data[1][2],
amount=data[1][3],
source_name=data[1][4],
destination_name=data[1][5],
category=data[1][6],
tags=data[1][7],
),
)
@dataclass
class FireflyAccount(FireflyAPIDataClass):
_ATTRIBUTE_TO_API_MAPPING: ty.ClassVar[ty.Mapping[str, str]] = {
"account_type": "type",
}
_IGNORED_ATTRIBUTES: ty.ClassVar[ty.Set[str]] = {"created_at", "updated_at"}
def __hash__(self):
return hash(self.iban) if self.iban is not None else hash(self.name)
def __eq__(self, other):
return isinstance(other, FireflyAccount) and (
self.iban == other.iban
if (self.iban is not None and other.iban is not None)
else self.name == other.name
)
name: str
account_type: ty.Literal[
"asset",
"expense",
"import",
"revenue",
"cash",
"liability",
"liabilities",
"initial-balance",
"reconciliation",
]
iban: ty.Optional[str] = None
bic: ty.Optional[str] = None
account_number: ty.Optional[str] = None
opening_balance: ty.Optional[float] = None
opening_balance_date: ty.Optional[datetime] = None
currency_code: ty.Optional[str] = None
account_role: ty.Optional[
ty.Literal[
"defaultAsset",
"sharedAsset",
"savingAsset",
"ccAsset",
"cashWalletAsset",
]
] = None
credit_card_type: ty.Optional[ty.Literal["monthlyFull"]] = None
monthly_payment_date: ty.Optional[datetime] = None
liability_type: ty.Optional[ty.Literal["loan", "debt", "mortgage"]] = None
liability_direction: ty.Optional[ty.Literal["debit", "credit"]] = None
interest: ty.Optional[float] = None
interest_period: ty.Optional[ty.Literal["daily", "monthly", "yearly"]] = None
notes: ty.Optional[str] = None
created_at: ty.Optional[datetime] = None
updated_at: ty.Optional[datetime] = None
def is_valid(self) -> bool:
return (
(self.account_type != "asset" or self.account_role is not None)
and (
self.account_role != "ccAsset"
or (
self.credit_card_type is not None
and self.monthly_payment_date is not None
)
)
and (
self.account_type != "liability"
or (
self.liability_type is not None
and self.interest is not None
and self.interest_period is not None
)
)
)
def __eq__(self, other) -> bool:
return isinstance(other, FireflyAccount) and self.name == other.name
def __repr__(self) -> str:
return self._get_representation()
class FireflyApi:
def __init__(self, api_hostname: str, token: str):
self.session = requests.sessions.Session()
self.headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": f"Bearer {token}",
}
self.api_url = f"{api_hostname.rstrip('/')}/api/v1/"
def __build_uri(self, endpoint: str):
return f"{self.api_url}{endpoint}"
def post(self, endpoint: str, payload: JSON) -> JSON:
uri = self.__build_uri(endpoint)
response = self.session.post(url=uri, json=payload, headers=self.headers)
log = logger.debug if (response.status_code == 200) else logger.warning
log(f"POST {uri} returned {response.status_code}.")
return response.json()
def get(self, endpoint, params=None):
uri = self.__build_uri(endpoint)
response = self.session.get(url=uri, params=params, headers=self.headers)
log = logger.debug if (response.status_code == 200) else logger.warning
log(f"GET {uri} returned {response.status_code}.")
return response.json()
def delete(self, endpoint, params=None) -> None:
uri = self.__build_uri(endpoint)
response = self.session.delete(url=uri, params=params, headers=self.headers)
log = logger.debug if (response.status_code == 204) else logger.warning
log(f"DELETE {uri} returned {response.status_code}.")
def put(self, endpoint: str, payload: JSON):
uri = self.__build_uri(endpoint)
response = self.session.put(url=uri, json=payload, headers=self.headers)
log = logger.debug if (response.status_code == 200) else logger.warning
log(f"PUT {uri} returned {response.status_code}.")
return response.json()
class FireflyClient:
def __init__(self, api_hostname: str, token: str):
self.api = FireflyApi(api_hostname, token)
def get_custom(self, endpoint: str, params: ty.Optional[ty.Dict] = None):
response = self.api.get(endpoint, params=params)
if "message" in response:
raise Exception(response["message"])
return response
def _iterate_over(
self, endpoint: str, params: ty.Optional[ty.Dict] = None
) -> ty.Iterable[ty.Dict]:
if params is None:
params = {}
if "page" in params:
del params["page"]
api_answer = self.get_custom(endpoint, params)
yield from api_answer["data"]
for page_number in range(
api_answer["meta"]["pagination"]["current_page"] + 1,
api_answer["meta"]["pagination"]["total_pages"] + 1,
):
params["page"] = page_number
yield from self.get_custom(endpoint, params)["data"]
def iterate_over_accounts(
self,
params: ty.Optional[ty.Dict] = None,
) -> ty.Iterable[ty.Tuple[int, ty.Dict]]:
if params is None:
params = dict()
yield from (
(account_dict["id"], account_dict["attributes"])
for account_dict in self._iterate_over("accounts", params=params)
)
def iterate_over_transactions(
self,
params: ty.Optional[ty.Dict] = None,
) -> ty.Iterable[ty.Tuple[int, ty.Dict]]:
if params is None:
params = dict()
for transactions_dict in self._iterate_over("transactions", params=params):
for transaction in transactions_dict["attributes"]["transactions"]:
yield transaction["transaction_journal_id"], transaction
def iterate_over_account_transactions(
self,
account: FireflyAccount,
params: ty.Optional[ty.Dict] = None,
) -> ty.Iterable[ty.Tuple[int, ty.Dict]]:
if params is None:
params = dict()
for transactions_dict in self._iterate_over(
f"accounts/{account.instance_id}/transactions", params=params
):
for transaction in transactions_dict["attributes"]["transactions"]:
yield transaction["transaction_journal_id"], transaction
def get_account(self, account_name: str) -> FireflyAccount:
account: ty.List[ty.Tuple[int, ty.Dict[str, ty.Any]]] = [
(id_, account)
for id_, account in self.iterate_over_accounts()
if account["name"] == account_name
]
if len(account) == 0:
raise Exception(f"account '{account_name}' not found.")
return FireflyAPIDataClass.from_json(
FireflyAccount, account[0][1], account[0][0]
)
def get_accounts(self) -> ty.List[FireflyAccount]:
"""Perform an API call to recover all the accounts."""
accounts: ty.List[FireflyAccount] = [
FireflyAPIDataClass.from_json(FireflyAccount, account, id_)
for id_, account in self.iterate_over_accounts()
]
return accounts
def create_account(self, account: FireflyAccount) -> FireflyAccount:
response = self.api.post(endpoint="accounts", payload=account.to_dict())
if "errors" in response:
logger.error(response)
raise Exception(
f"request error: {response['message']} Fields {list(response['errors'].keys())}"
)
data = response["data"]
logger.info(
f" => Account {account} added with id {data['id']} at {data['attributes']['created_at']}"
)
return FireflyAPIDataClass.from_json(
FireflyAccount, data["attributes"], data["id"]
)
def create_account_if_not_present(self, account: FireflyAccount) -> FireflyAccount:
account_name = account.name
potential_accounts = [
(account_id, account)
for account_id, account in self.iterate_over_accounts()
if account["name"] == account_name
]
if not potential_accounts:
account = self.create_account(account)
else:
account = FireflyAPIDataClass.from_json(
FireflyAccount, potential_accounts[0][1], potential_accounts[0][0]
)
return account
def delete_all_accounts(self) -> None:
for account in self._iterate_over("accounts"):
logger.info(f"Deleting account '{account['attributes']['name']}'.")
self.api.delete(endpoint=f"accounts/{account['id']}")
def get_budget(self, budget_name: str) -> ty.Dict:
budgets = self.get_custom("budgets")
budget = [
budget
for budget in budgets["data"]
if budget["attributes"]["name"] == budget_name
]
if len(budget) == 0:
raise Exception(f"budget '{budget_name}' not found.")
return budget[0]
def insert_transaction(self, transaction: "FireflyTransaction"):
response = self.api.post(
endpoint="transactions", payload={"transactions": [transaction.to_dict()]}
)
if "errors" in response:
logger.error(response)
raise Exception(
f"request error: {response['message']} Fields {list(response['errors'].keys())}\n"
f"Transaction: {transaction}."
)
data = response["data"]
logger.info(
f" => Transaction {transaction} added with id {data['id']} at {data['attributes']['created_at']}"
)
def last_transaction_date(self, account: FireflyAccount) -> datetime:
long_before = (
datetime.now(tz=timezone(timedelta(hours=1))) - timedelta(weeks=52 * 100)
).replace(minute=0, second=0, microsecond=0)
return max(
(
FireflyAPIDataClass.from_json(
FireflyTransaction, transaction_id, transaction
).date
for transaction, transaction_id in self.iterate_over_account_transactions(
account
)
),
default=long_before,
)
def update_transaction_with_rules(
transaction: FireflyTransaction, rules: Rules
) -> FireflyTransaction:
"""Update the given transaction with the given rules."""
if transaction.transaction_type not in ["deposit", "withdrawal"]:
return transaction
information: InformationContainer = InformationContainer(
{
"instance_id": transaction.instance_id,
"source": transaction.source_name,
"destination": transaction.destination_name,
"date": transaction.date,
"value_date": transaction.value_date,
"amount": transaction.amount,
"description": transaction.description,
"notes": transaction.notes,
"sepa_mandate_identifier": transaction.sepa_mandate_identifier,
"sepa_creditor_identifier": transaction.sepa_creditor_identifier,
"tags": ",".join(transaction.tags),
},
{
"operation_type": transaction.transaction_type,
"linked_account": (
transaction.destination_name
if transaction.transaction_type == "deposit"
else transaction.source_name
),
},
)
information = rules.apply_rules(information)
information["tags"] = [
t.strip() for t in information["tags"].split(",") if t.strip()
]
update_information_keys_to_firefly_inplace(information)
return FireflyTransaction(**information)
def update_firefly_transactions(
firefly_url: str,
firefly_token: str,
rules_path: Path,
) -> None:
with RunningOperation("Connecting | |
<filename>examples/paper_replication/runner.py
import datetime
import logging
import os
import random
import shutil
import sys
from collections import OrderedDict
import numpy as np
import pandas as pd
import torch
import fire
import tqdm
import util
from aum import AUMCalculator
from losses import losses
from models import models
from torchvision import datasets
from torchvision import models as tvmodels
from torchvision import transforms
class _Dataset(torch.utils.data.Dataset):
"""
A wrapper around existing torch datasets to add purposefully mislabeled samplesa and threshold samples.
:param :obj:`torch.utils.data.Dataset` base_dataset: Dataset to wrap
:param :obj:`torch.LongTensor` indices: List of indices of base_dataset to include (used to create valid. sets)
:param dict flip_dict: (optional) List mapping sample indices to their (incorrect) assigned label
:param bool use_threshold_samples: (default False) Whether or not to add threshold samples to this datasets
:param bool threshold_samples_set_idx: (default 1) Which set of threshold samples to use.
"""
def __init__(self,
base_dataset,
indices=None,
flip_dict=None,
use_threshold_samples=False,
threshold_samples_set_idx=1):
super().__init__()
self.dataset = base_dataset
self.flip_dict = flip_dict or {}
self.indices = torch.arange(len(self.dataset)) if indices is None else indices
# Create optional extra class (for threshold samples)
self.use_threshold_samples = use_threshold_samples
if use_threshold_samples:
num_threshold_samples = len(self.indices) // (self.targets.max().item() + 1)
start_index = (threshold_samples_set_idx - 1) * num_threshold_samples
end_index = (threshold_samples_set_idx) * num_threshold_samples
self.threshold_sample_indices = torch.randperm(len(self.indices))[start_index:end_index]
@property
def targets(self):
"""
(Hidden) ground-truth labels
"""
if not hasattr(self, "_target_memo"):
try:
self.__target_memo = torch.tensor(self.dataset.targets)[self.indices]
except Exception:
self.__target_memo = torch.tensor([target
for _, target in self.dataset])[self.indices]
if torch.is_tensor(self.__target_memo):
return self.__target_memo
else:
return torch.tensor(self.__target_memo)
@property
def assigned_targets(self):
"""
(Potentially incorrect) assigned labels
"""
if not hasattr(self, "_assigned_target_memo"):
self._assigned_target_memo = self.targets.clone()
# Change labels of mislabeled samples
if self.flip_dict is not None:
for i, idx in enumerate(self.indices.tolist()):
if idx in self.flip_dict.keys():
self._assigned_target_memo[i] = self.flip_dict[idx]
# Change labels of threshold samples
if self.use_threshold_samples:
extra_class = (self.targets.max().item() + 1)
self._assigned_target_memo[self.threshold_sample_indices] = extra_class
return self._assigned_target_memo
def __len__(self):
return len(self.indices)
def __getitem__(self, index):
input, _ = self.dataset[self.indices[index].item()]
target = self.assigned_targets[index].item()
res = input, target, index
return res
class Runner(object):
"""
Main module for running experiments. Can call `load`, `save`, `train`, `test`, etc.
:param str data: Directory to load data from
:param str save: Directory to save model/results
:param str dataset: (cifar10, cifar100, tiny_imagenet, webvision50, clothing100k)
:param int num_valid: (default 5000) What size validation set to use (comes from train set, indices determined by seed)
:param int seed: (default 0) Random seed
:param int split_seed: (default 0) Which random seed to use for creating trian/val split and for flipping random labels.
If this arg is not supplied, the split_seed will come from the `seed` arg.
:param float perc_mislabeled: (default 0.) How many samples will be intentionally mislabeled.
Default is 0. - i.e. regular training without flipping any labels.
:param str noise_type: (uniform, flip) Mislabeling noise model to use.
:param bool use_threshold_samples: (default False) Whether to add indictaor samples
:param bool threshold_samples_set_idx: (default 1) Which set of threshold samples to use (based on index)
:param str loss_type: (default cross-entropy) Loss type
:param bool oracle_training: (default False) If true, the network will be trained only on clean data
(i.e. all training points with flipped labels will be discarded).
:param str net_type: (resnet, densenet, wide_resnet) Which network to use.
:param **model_args: Additional argumets to pass to the model
"""
def __init__(self,
data,
save,
dataset="cifar10",
num_valid=5000,
seed=0,
split_seed=None,
noise_type="uniform",
perc_mislabeled=0.,
use_threshold_samples=False,
threshold_samples_set_idx=1,
loss_type="cross-entropy",
oracle_training=False,
net_type="resnet",
pretrained=False,
**model_args):
if not os.path.exists(save):
os.makedirs(save)
if not os.path.isdir(save):
raise Exception('%s is not a dir' % save)
self.data = data
self.savedir = save
self.perc_mislabeled = perc_mislabeled
self.noise_type = noise_type
self.dataset = dataset
self.net_type = net_type
self.num_valid = num_valid
self.use_threshold_samples = use_threshold_samples
self.threshold_samples_set_idx = threshold_samples_set_idx
self.split_seed = split_seed if split_seed is not None else seed
self.seed = seed
self.loss_func = losses[loss_type]
self.oracle_training = oracle_training
self.pretrained = pretrained
# Seed
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
random.seed(0)
# Logging
self.timestring = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
logging.basicConfig(
format='%(message)s',
handlers=[
logging.StreamHandler(sys.stdout),
logging.FileHandler(os.path.join(self.savedir, 'log-%s.log' % self.timestring)),
],
level=logging.INFO,
)
logging.info('Data dir:\t%s' % data)
logging.info('Save dir:\t%s\n' % save)
# Make model
self.num_classes = self.test_set.targets.max().item() + 1
if use_threshold_samples:
self.num_classes += 1
self.num_data = len(self.train_set)
logging.info(f"\nDataset: {self.dataset}")
logging.info(f"Num train: {self.num_data}")
logging.info(f"Num valid: {self.num_valid}")
logging.info(f"Extra class: {self.use_threshold_samples}")
logging.info(f"Num classes: {self.num_classes}")
if self.perc_mislabeled:
logging.info(f"Noise type: {self.noise_type}")
logging.info(f"Flip perc: {self.perc_mislabeled}\n")
if self.oracle_training:
logging.info(f"Training with Oracle Only")
# Model
if self.dataset == "imagenet" or "webvision" in self.dataset or "clothing" in self.dataset:
big_models = dict((key, val) for key, val in tvmodels.__dict__.items())
self.model = big_models[self.net_type](pretrained=False, num_classes=self.num_classes)
if self.pretrained:
try:
self.model.load_state_dict(
big_models[self.net_type](pretrained=True).state_dict(), strict=False)
except RuntimeError:
pass
# Fix pooling issues
if "inception" in self.net_type:
self.avgpool_1a = torch.nn.AdaptiveAvgPool2d((1, 1))
else:
self.model = models[self.net_type](
num_classes=self.num_classes,
initial_stride=(2 if "tiny" in self.dataset.lower() else 1),
**model_args)
logging.info(f"Model type: {self.net_type}")
logging.info(f"Model args:")
for key, val in model_args.items():
logging.info(f" - {key}: {val}")
logging.info(f"Loss type: {loss_type}")
logging.info("")
def _make_datasets(self):
try:
dataset_cls = getattr(datasets, self.dataset.upper())
self.big_model = False
except Exception:
dataset_cls = datasets.ImageFolder
if "tiny" in self.dataset.lower():
self.big_model = False
else:
self.big_model = True
# Get constants
if dataset_cls == datasets.ImageFolder:
tmp_set = dataset_cls(root=os.path.join(self.data, "train"))
else:
tmp_set = dataset_cls(root=self.data, train=True, download=True)
if self.dataset.upper() == 'CIFAR10':
tmp_set.targets = tmp_set.train_labels
num_train = len(tmp_set) - self.num_valid
num_valid = self.num_valid
num_classes = int(max(tmp_set.targets)) + 1
# Create train/valid split
torch.manual_seed(self.split_seed)
torch.cuda.manual_seed_all(self.split_seed)
random.seed(self.split_seed)
train_indices, valid_indices = torch.randperm(num_train + num_valid).split(
[num_train, num_valid])
# dataset indices flip
flip_dict = {}
if self.perc_mislabeled:
# Generate noisy labels from random transitions
transition_matrix = torch.eye(num_classes)
if self.noise_type == "uniform":
transition_matrix.mul_(1 - self.perc_mislabeled * (num_classes / (num_classes - 1)))
transition_matrix.add_(self.perc_mislabeled / (num_classes - 1))
elif self.noise_type == "flip":
source_classes = torch.arange(num_classes)
target_classes = (source_classes + 1).fmod(num_classes)
transition_matrix.mul_(1 - self.perc_mislabeled)
transition_matrix[source_classes, target_classes] = self.perc_mislabeled
else:
raise ValueError(f"Unknonwn noise type {self.noise}")
true_targets = (torch.tensor(tmp_set.targets) if hasattr(tmp_set, "targets") else
torch.tensor([target for _, target in self]))
transition_targets = torch.distributions.Categorical(
probs=transition_matrix[true_targets, :]).sample()
# Create a dictionary of transitions
if not self.oracle_training:
flip_indices = torch.nonzero(transition_targets != true_targets).squeeze(-1)
flip_targets = transition_targets[flip_indices]
for index, target in zip(flip_indices, flip_targets):
flip_dict[index.item()] = target.item()
else:
# In the oracle setting, don't add transitions
oracle_indices = torch.nonzero(transition_targets == true_targets).squeeze(-1)
train_indices = torch.from_numpy(
np.intersect1d(oracle_indices.numpy(), train_indices.numpy())).long()
# Reset the seed for dataset/initializations
torch.manual_seed(self.split_seed)
torch.cuda.manual_seed_all(self.split_seed)
random.seed(self.split_seed)
# Define trainsforms
if self.big_model:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
test_transforms = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(227 if "inception" in self.net_type else 224),
transforms.ToTensor(),
normalize,
])
train_transforms = transforms.Compose([
transforms.RandomResizedCrop(227 if "inception" in self.net_type else 224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
elif self.dataset == "tiny_imagenet":
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
test_transforms = transforms.Compose([
transforms.ToTensor(),
normalize,
])
train_transforms = transforms.Compose([
transforms.RandomCrop(64, padding=8),
transforms.RandomHorizontalFlip(),
test_transforms,
])
elif self.dataset == "cifar10":
normalize = transforms.Normalize(mean=[0.4914, 0.4824, 0.4467],
std=[0.2471, 0.2435, 0.2616])
test_transforms = transforms.Compose([
transforms.ToTensor(),
normalize,
])
train_transforms = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
test_transforms,
])
elif self.dataset == "cifar100":
normalize = transforms.Normalize(mean=[0.4914, 0.4824, 0.4467],
std=[0.2471, 0.2435, 0.2616])
test_transforms = transforms.Compose([
transforms.ToTensor(),
normalize,
])
train_transforms = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
test_transforms,
])
elif self.dataset == "mnist":
normalize = transforms.Normalize(mean=(0.1307, ), std=(0.3081, ))
test_transforms = transforms.Compose([
transforms.ToTensor(),
normalize,
])
train_transforms = test_transforms
else:
raise ValueError(f"Unknown dataset {self.dataset}")
# Get train set
if dataset_cls == datasets.ImageFolder:
self._train_set_memo = _Dataset(
dataset_cls(
root=os.path.join(self.data, "train"),
transform=train_transforms,
),
flip_dict=flip_dict,
indices=train_indices,
use_threshold_samples=self.use_threshold_samples,
threshold_samples_set_idx=self.threshold_samples_set_idx,
)
if os.path.exists(os.path.join(self.data, "test")):
self._valid_set_memo = _Dataset(
dataset_cls(root=os.path.join(self.data, "val"), transform=test_transforms))
self._test_set_memo = _Dataset(
dataset_cls(root=os.path.join(self.data, "test"), transform=test_transforms))
else:
self._valid_set_memo = _Dataset(
dataset_cls(root=os.path.join(self.data, "train"), transform=test_transforms),
indices=valid_indices,
) if len(valid_indices) else None
self._test_set_memo = _Dataset(
dataset_cls(root=os.path.join(self.data, "val"), transform=test_transforms))
else:
self._train_set_memo = _Dataset(
dataset_cls(root=self.data, train=True, transform=train_transforms),
flip_dict=flip_dict,
indices=train_indices,
use_threshold_samples=self.use_threshold_samples,
threshold_samples_set_idx=self.threshold_samples_set_idx,
)
self._valid_set_memo = _Dataset(dataset_cls(
root=self.data, train=True, transform=test_transforms),
indices=valid_indices) if len(valid_indices) else None
self._test_set_memo = _Dataset(
dataset_cls(root=self.data, train=False, transform=test_transforms))
@property
def test_set(self):
if not hasattr(self, "_test_set_memo"):
self._make_datasets()
return self._test_set_memo
@property
def train_set(self):
if not hasattr(self, "_train_set_memo"):
self._make_datasets()
return self._train_set_memo
@property
def valid_set(self):
if not hasattr(self, "_valid_set_memo"):
self._make_datasets()
return self._valid_set_memo
def generate_aum_details(self, load=None):
"""
Script for accumulating both aum values and other sample details at the end of training.
It makes a dataframe that contains AUMs Clean for all samples
The results are saved to the file `aum_details.csv` in the model folder.
:param str load: (optional) If set to some value - it will assemble aum info from the model stored in the `load` folder.
Otherwise - it will comptue aums from the runner's model.
:return: self
"""
load = load or | |
(bool): Right button status.
mbutton (bool): Middle button status.
lbutton_pressed (bool): Left button pressed event.
rbutton_pressed (bool): Right button pressed event.
mbutton_pressed (bool): Middle button pressed event.
wheel_up (bool): Wheel up event.
wheel_down (bool): Wheel down event.
"""
def __init__(self, x=0, y=0, dx=0, dy=0, cx=0, cy=0, dcx=0, dcy=0,
**kargs):
if isinstance(x, ffi.CData):
self.cdata = x
return
self.cdata = ffi.new('TCOD_mouse_t*')
self.x = x
self.y = y
self.dx = dx
self.dy = dy
self.cx = cx
self.cy = cy
self.dcx = dcx
self.dcy = dcy
for attr, value in kargs.items():
setattr(self, attr, value)
def __repr__(self):
"""Return a representation of this Mouse object."""
params = []
for attr in ['x', 'y', 'dx', 'dy', 'cx', 'cy', 'dcx', 'dcy']:
if getattr(self, attr) == 0:
continue
params.append('%s=%r' % (attr, getattr(self, attr)))
for attr in ['lbutton', 'rbutton', 'mbutton',
'lbutton_pressed', 'rbutton_pressed', 'mbutton_pressed',
'wheel_up', 'wheel_down']:
if getattr(self, attr):
params.append('%s=%r' % (attr, getattr(self, attr)))
return 'tcod.Mouse(%s)' % ', '.join(params)
def bsp_new_with_size(x, y, w, h):
"""Create a new BSP instance with the given rectangle.
Args:
x (int): Rectangle left coordinate.
y (int): Rectangle top coordinate.
w (int): Rectangle width.
h (int): Rectangle height.
Returns:
BSP: A new BSP instance.
.. deprecated:: 2.0
Call the :any:`BSP` class instead.
"""
return Bsp(x, y, w, h)
def bsp_split_once(node, horizontal, position):
"""
.. deprecated:: 2.0
Use :any:`BSP.split_once` instead.
"""
node.split_once(horizontal, position)
def bsp_split_recursive(node, randomizer, nb, minHSize, minVSize, maxHRatio,
maxVRatio):
"""
.. deprecated:: 2.0
Use :any:`BSP.split_recursive` instead.
"""
node.split_recursive(nb, minHSize, minVSize,
maxHRatio, maxVRatio, randomizer)
@deprecate("Assign values via attribute instead.")
def bsp_resize(node, x, y, w, h):
"""
.. deprecated:: 2.0
Assign directly to :any:`BSP` attributes instead.
"""
node.x = x
node.y = y
node.width = w
node.height = h
@deprecate("Access children with 'node.children' instead.")
def bsp_left(node):
"""
.. deprecated:: 2.0
Use :any:`BSP.children` instead.
"""
return None if not node.children else node.children[0]
@deprecate("Access children with 'node.children' instead.")
def bsp_right(node):
"""
.. deprecated:: 2.0
Use :any:`BSP.children` instead.
"""
return None if not node.children else node.children[1]
@deprecate("Get the parent with 'node.parent' instead.")
def bsp_father(node):
"""
.. deprecated:: 2.0
Use :any:`BSP.parent` instead.
"""
return node.parent
@deprecate("Check for children with 'bool(node.children)' instead.")
def bsp_is_leaf(node):
"""
.. deprecated:: 2.0
Use :any:`BSP.children` instead.
"""
return not node.children
@deprecate("Use 'node.contains' instead.")
def bsp_contains(node, cx, cy):
"""
.. deprecated:: 2.0
Use :any:`BSP.contains` instead.
"""
return node.contains(cx, cy)
@deprecate("Use 'node.find_node' instead.")
def bsp_find_node(node, cx, cy):
"""
.. deprecated:: 2.0
Use :any:`BSP.find_node` instead.
"""
return node.find_node(cx, cy)
def _bsp_traverse(node_iter, callback, userData):
"""pack callback into a handle for use with the callback
_pycall_bsp_callback
"""
for node in node_iter:
callback(node, userData)
@deprecate("Iterate over nodes using 'for n in node.pre_order():' instead.")
def bsp_traverse_pre_order(node, callback, userData=0):
"""Traverse this nodes hierarchy with a callback.
.. deprecated:: 2.0
Use :any:`BSP.pre_order` instead.
"""
_bsp_traverse(node.pre_order(), callback, userData)
@deprecate("Iterate over nodes using 'for n in node.in_order():' instead.")
def bsp_traverse_in_order(node, callback, userData=0):
"""Traverse this nodes hierarchy with a callback.
.. deprecated:: 2.0
Use :any:`BSP.in_order` instead.
"""
_bsp_traverse(node.in_order(), callback, userData)
@deprecate("Iterate over nodes using 'for n in node.post_order():' instead.")
def bsp_traverse_post_order(node, callback, userData=0):
"""Traverse this nodes hierarchy with a callback.
.. deprecated:: 2.0
Use :any:`BSP.post_order` instead.
"""
_bsp_traverse(node.post_order(), callback, userData)
@deprecate("Iterate over nodes using 'for n in node.level_order():' instead.")
def bsp_traverse_level_order(node, callback, userData=0):
"""Traverse this nodes hierarchy with a callback.
.. deprecated:: 2.0
Use :any:`BSP.level_order` instead.
"""
_bsp_traverse(node.level_order(), callback, userData)
@deprecate("Iterate over nodes using "
"'for n in node.inverted_level_order():' instead.")
def bsp_traverse_inverted_level_order(node, callback, userData=0):
"""Traverse this nodes hierarchy with a callback.
.. deprecated:: 2.0
Use :any:`BSP.inverted_level_order` instead.
"""
_bsp_traverse(node.inverted_level_order(), callback, userData)
@deprecate("Delete bsp children using 'node.children = ()' instead.")
def bsp_remove_sons(node):
"""Delete all children of a given node. Not recommended.
.. note::
This function will add unnecessary complexity to your code.
Don't use it.
.. deprecated:: 2.0
BSP deletion is automatic.
"""
node.children = ()
@deprecate("libtcod objects are deleted automatically.")
def bsp_delete(node):
# type: (Any) -> None
"""Exists for backward compatibility. Does nothing.
BSP's created by this library are automatically garbage collected once
there are no references to the tree.
This function exists for backwards compatibility.
.. deprecated:: 2.0
BSP deletion is automatic.
"""
def color_lerp(c1, c2, a):
"""Return the linear interpolation between two colors.
``a`` is the interpolation value, with 0 returing ``c1``,
1 returning ``c2``, and 0.5 returing a color halfway between both.
Args:
c1 (Union[Tuple[int, int, int], Sequence[int]]):
The first color. At a=0.
c2 (Union[Tuple[int, int, int], Sequence[int]]):
The second color. At a=1.
a (float): The interpolation value,
Returns:
Color: The interpolated Color.
"""
return Color._new_from_cdata(lib.TCOD_color_lerp(c1, c2, a))
def color_set_hsv(c, h, s, v):
"""Set a color using: hue, saturation, and value parameters.
Does not return a new Color. ``c`` is modified inplace.
Args:
c (Union[Color, List[Any]]): A Color instance, or a list of any kind.
h (float): Hue, from 0 to 360.
s (float): Saturation, from 0 to 1.
v (float): Value, from 0 to 1.
"""
new_color = ffi.new('TCOD_color_t*')
lib.TCOD_color_set_HSV(new_color, h, s, v)
c[:] = new_color.r, new_color.g, new_color.b
def color_get_hsv(c):
"""Return the (hue, saturation, value) of a color.
Args:
c (Union[Tuple[int, int, int], Sequence[int]]):
An (r, g, b) sequence or Color instance.
Returns:
Tuple[float, float, float]:
A tuple with (hue, saturation, value) values, from 0 to 1.
"""
hsv = ffi.new('float [3]')
lib.TCOD_color_get_HSV(c, hsv, hsv + 1, hsv + 2)
return hsv[0], hsv[1], hsv[2]
def color_scale_HSV(c, scoef, vcoef):
"""Scale a color's saturation and value.
Does not return a new Color. ``c`` is modified inplace.
Args:
c (Union[Color, List[int]]): A Color instance, or an [r, g, b] list.
scoef (float): Saturation multiplier, from 0 to 1.
Use 1 to keep current saturation.
vcoef (float): Value multiplier, from 0 to 1.
Use 1 to keep current value.
"""
color_p = ffi.new('TCOD_color_t*')
color_p.r, color_p.g, color_p.b = c.r, c.g, c.b
lib.TCOD_color_scale_HSV(color_p, scoef, vcoef)
c[:] = color_p.r, color_p.g, color_p.b
def color_gen_map(colors, indexes):
"""Return a smoothly defined scale of colors.
If ``indexes`` is [0, 3, 9] for example, the first color from ``colors``
will be returned at 0, the 2nd will be at 3, and the 3rd will be at 9.
All in-betweens will be filled with a gradient.
Args:
colors (Iterable[Union[Tuple[int, int, int], Sequence[int]]]):
Array of colors to be sampled.
indexes (Iterable[int]): A list of indexes.
Returns:
List[Color]: A list of Color instances.
Example:
>>> tcod.color_gen_map([(0, 0, 0), (255, 128, 0)], [0, 5])
[Color(0,0,0), Color(51,25,0), Color(102,51,0), Color(153,76,0), \
Color(204,102,0), Color(255,128,0)]
"""
ccolors = ffi.new('TCOD_color_t[]', colors)
cindexes = ffi.new('int[]', indexes)
cres = ffi.new('TCOD_color_t[]', max(indexes) + 1)
lib.TCOD_color_gen_map(cres, len(colors), ccolors, cindexes)
return [Color._new_from_cdata(cdata) for cdata in cres]
def console_init_root(
w: int, h: int, title: Optional[AnyStr]=None, fullscreen: bool=False,
renderer: Optional[int]=None, order: str='C') -> tcod.console.Console:
"""Set up the primary display and return the root console.
`w` and `h` are the columns and rows of the new window (in tiles.)
`title` is an optional string to display on the windows title bar.
`fullscreen` determines if the window will start in fullscreen. Fullscreen
mode is unreliable unless the renderer is set to `RENDERER_SDL2` or
`RENDERER_OPENGL2`.
`renderer` is the rendering back-end that libtcod will use. Options are:
* `tcod.RENDERER_SDL`
* `tcod.RENDERER_OPENGL`
* `tcod.RENDERER_GLSL`
* `tcod.RENDERER_SDL2`
* `tcod.RENDERER_OPENGL2`
`order` will affect how the array attributes of the returned root console
are indexed. `order='C'` is the default, but `order='F'` is recommended.
.. versionchanged:: 4.3
Added `order` parameter.
`title` parameter is now optional.
.. versionchanged:: 8.0
The default `renderer` is now automatic instead of always being
`RENDERER_SDL`.
"""
if title is None:
# Use the scripts filename as the title.
title = os.path.basename(sys.argv[0])
if renderer is None:
renderer = RENDERER_GLSL # Stable for now.
lib.TCOD_console_init_root(w, h, _bytes(title), fullscreen, renderer)
return tcod.console.Console._get_root(order)
def console_set_custom_font(fontFile: AnyStr, flags:
int=FONT_LAYOUT_ASCII_INCOL,
nb_char_horiz: int=0, nb_char_vertic: int=0):
"""Load the custom font file at `fontFile`.
Call this before function before calling :any:`tcod.console_init_root`.
Flags can be a mix of the following:
* tcod.FONT_LAYOUT_ASCII_INCOL:
Decode tileset raw in column-major order.
* tcod.FONT_LAYOUT_ASCII_INROW:
Decode tileset raw in row-major order.
* tcod.FONT_TYPE_GREYSCALE:
Force tileset to be read as greyscale.
* tcod.FONT_TYPE_GRAYSCALE
* tcod.FONT_LAYOUT_TCOD:
Unique layout used by libtcod.
* tcod.FONT_LAYOUT_CP437:
Decode a row-major Code Page 437 tileset into Unicode.
`nb_char_horiz` and `nb_char_vertic` are the columns and rows of the font
file respectfully.
"""
if not os.path.exists(fontFile):
raise RuntimeError("File not found:\n\t%s"
% (os.path.realpath(fontFile),))
| |
baton, char relpath,
char name, svn_string_t value, apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_invoke_get_wc_prop_func, args)
def svn_ra_invoke_set_wc_prop_func(*args):
"""
svn_ra_invoke_set_wc_prop_func(svn_ra_set_wc_prop_func_t _obj, void baton, char path,
char name, svn_string_t value, apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_invoke_set_wc_prop_func, args)
def svn_ra_invoke_push_wc_prop_func(*args):
"""
svn_ra_invoke_push_wc_prop_func(svn_ra_push_wc_prop_func_t _obj, void baton, char path,
char name, svn_string_t value, apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_invoke_push_wc_prop_func, args)
def svn_ra_invoke_invalidate_wc_props_func(*args):
"""
svn_ra_invoke_invalidate_wc_props_func(svn_ra_invalidate_wc_props_func_t _obj, void baton,
char path, char name, apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_invoke_invalidate_wc_props_func, args)
def svn_ra_invoke_get_latest_revnum_func(*args):
"""
svn_ra_invoke_get_latest_revnum_func(svn_ra_get_latest_revnum_func_t _obj, void session_baton,
svn_revnum_t latest_revnum) -> svn_error_t
"""
return apply(_ra.svn_ra_invoke_get_latest_revnum_func, args)
def svn_ra_invoke_get_client_string_func(*args):
"""
svn_ra_invoke_get_client_string_func(svn_ra_get_client_string_func_t _obj, void baton, char name,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_invoke_get_client_string_func, args)
def svn_ra_invoke_file_rev_handler(*args):
"""
svn_ra_invoke_file_rev_handler(svn_ra_file_rev_handler_t _obj, void baton, char path,
svn_revnum_t rev, apr_hash_t rev_props, svn_txdelta_window_handler_t delta_handler,
void delta_baton,
apr_array_header_t prop_diffs,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_invoke_file_rev_handler, args)
def svn_ra_invoke_lock_callback(*args):
"""
svn_ra_invoke_lock_callback(svn_ra_lock_callback_t _obj, void baton, char path,
svn_boolean_t do_lock, svn_lock_t lock, svn_error_t ra_err,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_invoke_lock_callback, args)
def svn_ra_invoke_progress_notify_func(*args):
"""
svn_ra_invoke_progress_notify_func(svn_ra_progress_notify_func_t _obj, apr_off_t progress,
apr_off_t total, void baton, apr_pool_t pool)
"""
return apply(_ra.svn_ra_invoke_progress_notify_func, args)
def svn_ra_invoke_replay_revstart_callback(*args):
"""
svn_ra_invoke_replay_revstart_callback(svn_ra_replay_revstart_callback_t _obj, svn_revnum_t revision,
void replay_baton, svn_delta_editor_t editor,
void edit_baton, apr_hash_t rev_props,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_invoke_replay_revstart_callback, args)
def svn_ra_invoke_replay_revfinish_callback(*args):
"""
svn_ra_invoke_replay_revfinish_callback(svn_ra_replay_revfinish_callback_t _obj, svn_revnum_t revision,
void replay_baton, svn_delta_editor_t editor,
void edit_baton, apr_hash_t rev_props,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_invoke_replay_revfinish_callback, args)
def svn_ra_invoke_init_func(*args):
"""
svn_ra_invoke_init_func(svn_ra_init_func_t _obj, int abi_version, apr_pool_t pool,
apr_hash_t hash) -> svn_error_t
"""
return apply(_ra.svn_ra_invoke_init_func, args)
class svn_ra_get_wc_prop_func_t:
"""Proxy of C svn_ra_get_wc_prop_func_t struct"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, svn_ra_get_wc_prop_func_t, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, svn_ra_get_wc_prop_func_t, name)
def __init__(self, *args, **kwargs): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
def set_parent_pool(self, parent_pool=None):
"""Create a new proxy object for svn_ra_get_wc_prop_func_t"""
import libsvn.core, weakref
self.__dict__["_parent_pool"] = \
parent_pool or libsvn.core.application_pool;
if self.__dict__["_parent_pool"]:
self.__dict__["_is_valid"] = weakref.ref(
self.__dict__["_parent_pool"]._is_valid)
def assert_valid(self):
"""Assert that this object is using valid pool memory"""
if "_is_valid" in self.__dict__:
assert self.__dict__["_is_valid"](), "Variable has already been deleted"
def __getattr__(self, name):
"""Get an attribute from this object"""
self.assert_valid()
value = _swig_getattr(self, self.__class__, name)
members = self.__dict__.get("_members")
if members is not None:
old_value = members.get(name)
if (old_value is not None and value is not None and
value is not old_value):
try:
value.__dict__.update(old_value.__dict__)
except AttributeError:
pass
if hasattr(value, "assert_valid"):
value.assert_valid()
return value
def __setattr__(self, name, value):
"""Set an attribute on this object"""
self.assert_valid()
self.__dict__.setdefault("_members",{})[name] = value
return _swig_setattr(self, self.__class__, name, value)
def __call__(self, *args):
return svn_ra_invoke_get_wc_prop_func(self, *args)
svn_ra_get_wc_prop_func_t_swigregister = _ra.svn_ra_get_wc_prop_func_t_swigregister
svn_ra_get_wc_prop_func_t_swigregister(svn_ra_get_wc_prop_func_t)
class svn_ra_set_wc_prop_func_t:
"""Proxy of C svn_ra_set_wc_prop_func_t struct"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, svn_ra_set_wc_prop_func_t, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, svn_ra_set_wc_prop_func_t, name)
def __init__(self, *args, **kwargs): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
def set_parent_pool(self, parent_pool=None):
"""Create a new proxy object for svn_ra_set_wc_prop_func_t"""
import libsvn.core, weakref
self.__dict__["_parent_pool"] = \
parent_pool or libsvn.core.application_pool;
if self.__dict__["_parent_pool"]:
self.__dict__["_is_valid"] = weakref.ref(
self.__dict__["_parent_pool"]._is_valid)
def assert_valid(self):
"""Assert that this object is using valid pool memory"""
if "_is_valid" in self.__dict__:
assert self.__dict__["_is_valid"](), "Variable has already been deleted"
def __getattr__(self, name):
"""Get an attribute from this object"""
self.assert_valid()
value = _swig_getattr(self, self.__class__, name)
members = self.__dict__.get("_members")
if members is not None:
old_value = members.get(name)
if (old_value is not None and value is not None and
value is not old_value):
try:
value.__dict__.update(old_value.__dict__)
except AttributeError:
pass
if hasattr(value, "assert_valid"):
value.assert_valid()
return value
def __setattr__(self, name, value):
"""Set an attribute on this object"""
self.assert_valid()
self.__dict__.setdefault("_members",{})[name] = value
return _swig_setattr(self, self.__class__, name, value)
def __call__(self, *args):
return svn_ra_invoke_set_wc_prop_func(self, *args)
svn_ra_set_wc_prop_func_t_swigregister = _ra.svn_ra_set_wc_prop_func_t_swigregister
svn_ra_set_wc_prop_func_t_swigregister(svn_ra_set_wc_prop_func_t)
class svn_ra_push_wc_prop_func_t:
"""Proxy of C svn_ra_push_wc_prop_func_t struct"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, svn_ra_push_wc_prop_func_t, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, svn_ra_push_wc_prop_func_t, name)
def __init__(self, *args, **kwargs): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
def set_parent_pool(self, parent_pool=None):
"""Create a new proxy object for svn_ra_push_wc_prop_func_t"""
import libsvn.core, weakref
self.__dict__["_parent_pool"] = \
parent_pool or libsvn.core.application_pool;
if self.__dict__["_parent_pool"]:
self.__dict__["_is_valid"] = weakref.ref(
self.__dict__["_parent_pool"]._is_valid)
def assert_valid(self):
"""Assert that this object is using valid pool memory"""
if "_is_valid" in self.__dict__:
assert self.__dict__["_is_valid"](), "Variable has already been deleted"
def __getattr__(self, name):
"""Get an attribute from this object"""
self.assert_valid()
value = _swig_getattr(self, self.__class__, name)
members = self.__dict__.get("_members")
if members is not None:
old_value = members.get(name)
if (old_value is not None and value is not None and
value is not old_value):
try:
value.__dict__.update(old_value.__dict__)
except AttributeError:
pass
if hasattr(value, "assert_valid"):
value.assert_valid()
return value
def __setattr__(self, name, value):
"""Set an attribute on this object"""
self.assert_valid()
self.__dict__.setdefault("_members",{})[name] = value
return _swig_setattr(self, self.__class__, name, value)
def __call__(self, *args):
return svn_ra_invoke_push_wc_prop_func(self, *args)
svn_ra_push_wc_prop_func_t_swigregister = _ra.svn_ra_push_wc_prop_func_t_swigregister
svn_ra_push_wc_prop_func_t_swigregister(svn_ra_push_wc_prop_func_t)
class svn_ra_invalidate_wc_props_func_t:
"""Proxy of C svn_ra_invalidate_wc_props_func_t struct"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, svn_ra_invalidate_wc_props_func_t, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, svn_ra_invalidate_wc_props_func_t, name)
def __init__(self, *args, **kwargs): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
def set_parent_pool(self, parent_pool=None):
"""Create a new proxy object for svn_ra_invalidate_wc_props_func_t"""
import libsvn.core, weakref
self.__dict__["_parent_pool"] = \
parent_pool or libsvn.core.application_pool;
if self.__dict__["_parent_pool"]:
self.__dict__["_is_valid"] = weakref.ref(
self.__dict__["_parent_pool"]._is_valid)
def assert_valid(self):
"""Assert that this object is using valid pool memory"""
if "_is_valid" in self.__dict__:
assert self.__dict__["_is_valid"](), "Variable has already been deleted"
def __getattr__(self, name):
"""Get an attribute from this object"""
self.assert_valid()
value = _swig_getattr(self, self.__class__, name)
members = self.__dict__.get("_members")
if members is not None:
old_value = members.get(name)
if (old_value is not None and value is not None and
value is not old_value):
try:
value.__dict__.update(old_value.__dict__)
except AttributeError:
pass
if hasattr(value, "assert_valid"):
value.assert_valid()
return value
def __setattr__(self, name, value):
"""Set an attribute on this object"""
self.assert_valid()
self.__dict__.setdefault("_members",{})[name] = value
return _swig_setattr(self, self.__class__, name, value)
def __call__(self, *args):
return svn_ra_invoke_invalidate_wc_props_func(self, *args)
svn_ra_invalidate_wc_props_func_t_swigregister = _ra.svn_ra_invalidate_wc_props_func_t_swigregister
svn_ra_invalidate_wc_props_func_t_swigregister(svn_ra_invalidate_wc_props_func_t)
class svn_ra_get_latest_revnum_func_t:
"""Proxy of C svn_ra_get_latest_revnum_func_t struct"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, svn_ra_get_latest_revnum_func_t, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, svn_ra_get_latest_revnum_func_t, name)
def __init__(self, *args, **kwargs): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
def set_parent_pool(self, parent_pool=None):
"""Create a new proxy object for svn_ra_get_latest_revnum_func_t"""
import libsvn.core, weakref
self.__dict__["_parent_pool"] = \
parent_pool or libsvn.core.application_pool;
if self.__dict__["_parent_pool"]:
self.__dict__["_is_valid"] = weakref.ref(
self.__dict__["_parent_pool"]._is_valid)
def assert_valid(self):
"""Assert that this object is using valid pool memory"""
if "_is_valid" in self.__dict__:
assert self.__dict__["_is_valid"](), "Variable has already been deleted"
def __getattr__(self, name):
"""Get an attribute from this object"""
self.assert_valid()
value = _swig_getattr(self, self.__class__, name)
members = self.__dict__.get("_members")
if members is not None:
old_value = members.get(name)
if (old_value is not None and value is not None and
value is not old_value):
try:
value.__dict__.update(old_value.__dict__)
except AttributeError:
pass
if hasattr(value, "assert_valid"):
value.assert_valid()
return value
def __setattr__(self, name, value):
"""Set an attribute on this object"""
self.assert_valid()
self.__dict__.setdefault("_members",{})[name] = value
return _swig_setattr(self, self.__class__, name, value)
def __call__(self, *args):
return svn_ra_invoke_get_latest_revnum_func(self, *args)
svn_ra_get_latest_revnum_func_t_swigregister = _ra.svn_ra_get_latest_revnum_func_t_swigregister
svn_ra_get_latest_revnum_func_t_swigregister(svn_ra_get_latest_revnum_func_t)
class svn_ra_get_client_string_func_t:
"""Proxy of C svn_ra_get_client_string_func_t struct"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, svn_ra_get_client_string_func_t, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, svn_ra_get_client_string_func_t, name)
def __init__(self, *args, **kwargs): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
def set_parent_pool(self, parent_pool=None):
"""Create a new proxy object for svn_ra_get_client_string_func_t"""
import libsvn.core, weakref
self.__dict__["_parent_pool"] = \
parent_pool or libsvn.core.application_pool;
if self.__dict__["_parent_pool"]:
self.__dict__["_is_valid"] = weakref.ref(
self.__dict__["_parent_pool"]._is_valid)
def assert_valid(self):
"""Assert that this object is using valid pool memory"""
if "_is_valid" in self.__dict__:
assert self.__dict__["_is_valid"](), "Variable has already been deleted"
def __getattr__(self, name):
"""Get an attribute from this object"""
self.assert_valid()
value = _swig_getattr(self, self.__class__, name)
members = self.__dict__.get("_members")
if members is not None:
old_value = members.get(name)
if (old_value is not None and value is not None and
value is not old_value):
try:
value.__dict__.update(old_value.__dict__)
except AttributeError:
pass
if hasattr(value, "assert_valid"):
value.assert_valid()
return value
def __setattr__(self, name, value):
"""Set an attribute on this object"""
self.assert_valid()
self.__dict__.setdefault("_members",{})[name] = value
return _swig_setattr(self, self.__class__, name, value)
def __call__(self, *args):
return svn_ra_invoke_get_client_string_func(self, *args)
svn_ra_get_client_string_func_t_swigregister = _ra.svn_ra_get_client_string_func_t_swigregister
svn_ra_get_client_string_func_t_swigregister(svn_ra_get_client_string_func_t)
class svn_ra_file_rev_handler_t:
"""Proxy of C svn_ra_file_rev_handler_t struct"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, svn_ra_file_rev_handler_t, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda | |
== "'":
# Remove padding \x00 in the value string
ValStr = "'%s'" % ValStr[1:-1].rstrip('\x00')
Struct = ConfigDict['struct']
if Struct in self._StructType:
# Format the array using its struct type
Unit = int(Struct[4:]) // 8
Value = Array2Val(ConfigDict['value'])
Loop = ConfigDict['length'] // Unit
Values = []
for Each in range(Loop):
Values.append (Value & ((1 << (Unit * 8)) - 1))
Value = Value >> (Unit * 8)
ValStr = '{ ' + ', '.join ([('0x%%0%dX' % (Unit * 2)) % x for x in Values]) + ' }'
return ValStr
def FormatListValue(self, ConfigDict):
Struct = ConfigDict['struct']
if Struct not in self._StructType:
return
DataList = self.ValueToList(ConfigDict['value'], ConfigDict['length'])
Unit = int(Struct[4:]) // 8
if int(ConfigDict['length']) != Unit * len(DataList):
# Fallback to byte array
Unit = 1
if int(ConfigDict['length']) != len(DataList):
raise Exception("Array size is not proper for '%s' !" % ConfigDict['cname'])
ByteArray = []
for Value in DataList:
for Loop in range(Unit):
ByteArray.append("0x%02X" % (Value & 0xFF))
Value = Value >> 8
NewValue = '{' + ','.join(ByteArray) + '}'
ConfigDict['value'] = NewValue
return ""
def GetOrderNumber (self, Offset, Order, BitOff = 0):
if isinstance(Order, int):
if Order == -1:
Order = Offset << 16
else:
(Major, Minor) = Order.split('.')
Order = (int (Major, 16) << 16) + ((int (Minor, 16) & 0xFF) << 8)
return Order + (BitOff & 0xFF)
def SubtituteLine (self, Line, Args):
Args = Args.strip()
Vars = Args.split(':')
Line = self.ExpandMacros(Line, True)
for Idx in range(len(Vars)-1, 0, -1):
Line = Line.replace('$(%d)' % Idx, Vars[Idx].strip())
return Line
def CfgDuplicationCheck (self, CfgDict, Name):
if not self.Debug:
return
if Name == 'Dummy':
return
if Name not in CfgDict:
CfgDict[Name] = 1
else:
print ("WARNING: Duplicated item found '%s' !" % ConfigDict['cname'])
def AddBsfChildPage (self, Child, Parent = 'root'):
def AddBsfChildPageRecursive (PageTree, Parent, Child):
Key = next(iter(PageTree))
if Parent == Key:
PageTree[Key].append({Child : []})
return True
else:
Result = False
for Each in PageTree[Key]:
if AddBsfChildPageRecursive (Each, Parent, Child):
Result = True
break
return Result
return AddBsfChildPageRecursive (self._CfgPageTree, Parent, Child)
def ParseDscFile (self, DscFile):
self._DscLines = []
self._CfgItemList = []
self._CfgPageDict = {}
self._CfgBlkDict = {}
self._BsfTempDict = {}
self._CfgPageTree = {'root' : []}
self._DscFile = DscFile
CfgDict = {}
SectionNameList = ["Defines".lower(), "PcdsFeatureFlag".lower(),
"PcdsDynamicVpd.Tmp".lower(), "PcdsDynamicVpd.Upd".lower()]
IsDefSect = False
IsPcdSect = False
IsUpdSect = False
IsTmpSect = False
TemplateName = ''
IfStack = []
ElifStack = []
Error = 0
ConfigDict = {}
DscFd = open(DscFile, "r")
DscLines = DscFd.readlines()
DscFd.close()
BsfRegExp = re.compile("(%s):{(.+?)}(?:$|\s+)" % '|'.join(self._BsfKeyList))
HdrRegExp = re.compile("(%s):{(.+?)}" % '|'.join(self._HdrKeyList))
CfgRegExp = re.compile("^([_a-zA-Z0-9]+)\s*\|\s*(0x[0-9A-F]+|\*)\s*\|\s*(\d+|0x[0-9a-fA-F]+)\s*\|\s*(.+)")
TksRegExp = re.compile("^(g[_a-zA-Z0-9]+\.)(.+)")
SkipLines = 0
while len(DscLines):
DscLine = DscLines.pop(0).strip()
if SkipLines == 0:
self._DscLines.append (DscLine)
else:
SkipLines = SkipLines - 1
if len(DscLine) == 0:
continue
Handle = False
Match = re.match("^\[(.+)\]", DscLine)
if Match is not None:
IsDefSect = False
IsPcdSect = False
IsUpdSect = False
IsTmpSect = False
SectionName = Match.group(1).lower()
if SectionName == SectionNameList[0]:
IsDefSect = True
if SectionName == SectionNameList[1]:
IsPcdSect = True
elif SectionName == SectionNameList[2]:
IsTmpSect = True
elif SectionName == SectionNameList[3]:
ConfigDict = {
'header' : 'ON',
'page' : '',
'name' : '',
'find' : '',
'struct' : '',
'embed' : '',
'marker' : '',
'option' : '',
'comment' : '',
'condition' : '',
'order' : -1,
'subreg' : []
}
IsUpdSect = True
Offset = 0
else:
if IsDefSect or IsPcdSect or IsUpdSect or IsTmpSect:
Match = False if DscLine[0] != '!' else True
if Match:
Match = re.match("^!(else|endif|ifdef|ifndef|if|elseif|include)\s*(.+)?$", DscLine)
Keyword = Match.group(1) if Match else ''
Remaining = Match.group(2) if Match else ''
Remaining = '' if Remaining is None else Remaining.strip()
if Keyword in ['if', 'elseif', 'ifdef', 'ifndef', 'include'] and not Remaining:
raise Exception ("ERROR: Expression is expected after '!if' or !elseif' for line '%s'" % DscLine)
if Keyword == 'else':
if IfStack:
IfStack[-1] = not IfStack[-1]
else:
raise Exception ("ERROR: No paired '!if' found for '!else' for line '%s'" % DscLine)
elif Keyword == 'endif':
if IfStack:
IfStack.pop()
Level = ElifStack.pop()
if Level > 0:
del IfStack[-Level:]
else:
raise Exception ("ERROR: No paired '!if' found for '!endif' for line '%s'" % DscLine)
elif Keyword == 'ifdef' or Keyword == 'ifndef':
Result = self.EvaulateIfdef (Remaining)
if Keyword == 'ifndef':
Result = not Result
IfStack.append(Result)
ElifStack.append(0)
elif Keyword == 'if' or Keyword == 'elseif':
Result = self.EvaluateExpress(Remaining)
if Keyword == "if":
ElifStack.append(0)
IfStack.append(Result)
else: #elseif
if IfStack:
IfStack[-1] = not IfStack[-1]
IfStack.append(Result)
ElifStack[-1] = ElifStack[-1] + 1
else:
raise Exception ("ERROR: No paired '!if' found for '!elif' for line '%s'" % DscLine)
else:
if IfStack:
Handle = reduce(lambda x,y: x and y, IfStack)
else:
Handle = True
if Handle:
if Keyword == 'include':
Remaining = self.ExpandMacros(Remaining)
# Relative to DSC filepath
IncludeFilePath = os.path.join(os.path.dirname(DscFile), Remaining)
if not os.path.exists(IncludeFilePath):
# Relative to repository to find dsc in common platform
IncludeFilePath = os.path.join(os.path.dirname (os.path.realpath(__file__)), "../..", Remaining)
try:
IncludeDsc = open(IncludeFilePath, "r")
except:
raise Exception ("ERROR: Cannot open file '%s'." % IncludeFilePath)
NewDscLines = IncludeDsc.readlines()
IncludeDsc.close()
DscLines = NewDscLines + DscLines
del self._DscLines[-1]
else:
if DscLine.startswith('!'):
raise Exception ("ERROR: Unrecoginized directive for line '%s'" % DscLine)
if not Handle:
continue
if IsDefSect:
Match = re.match("^\s*(?:DEFINE\s+)*(\w+)\s*=\s*(.+)", DscLine)
if Match:
self._MacroDict[Match.group(1)] = Match.group(2)
if self.Debug:
print ("INFO : DEFINE %s = [ %s ]" % (Match.group(1), Match.group(2)))
elif IsPcdSect:
Match = re.match("^\s*([\w\.]+)\s*\|\s*(\w+)", DscLine)
if Match:
self._PcdsDict[Match.group(1)] = Match.group(2)
if self.Debug:
print ("INFO : PCD %s = [ %s ]" % (Match.group(1), Match.group(2)))
elif IsTmpSect:
# !BSF DEFT:{GPIO_TMPL:START}
Match = re.match("^\s*#\s+(!BSF)\s+DEFT:{(.+?):(START|END)}", DscLine)
if Match:
if Match.group(3) == 'START' and not TemplateName:
TemplateName = Match.group(2).strip()
self._BsfTempDict[TemplateName] = []
if Match.group(3) == 'END' and (TemplateName == Match.group(2).strip()) and TemplateName:
TemplateName = ''
else:
if TemplateName:
Match = re.match("^!include\s*(.+)?$", DscLine)
if Match:
continue
self._BsfTempDict[TemplateName].append(DscLine)
else:
Match = re.match("^\s*#\s+(!BSF|!HDR)\s+(.+)", DscLine)
if Match:
Remaining = Match.group(2)
if Match.group(1) == '!BSF':
Result = BsfRegExp.findall (Remaining)
if Result:
for Each in Result:
Key = Each[0]
Remaining = Each[1]
if Key == 'BLOCK':
Match = re.match("NAME:\"(.+)\"\s*,\s*VER:\"(.+)\"\s*", Remaining)
if Match:
self._CfgBlkDict['name'] = Match.group(1)
self._CfgBlkDict['ver'] = Match.group(2)
elif Key == 'SUBT':
#GPIO_TMPL:1:2:3
Remaining = Remaining.strip()
Match = re.match("(\w+)\s*:", Remaining)
if Match:
TemplateName = Match.group(1)
for Line in self._BsfTempDict[TemplateName][::-1]:
NewLine = self.SubtituteLine (Line, Remaining)
DscLines.insert(0, NewLine)
SkipLines += 1
elif Key == 'PAGES':
# !BSF PAGES:{HSW:"Haswell System Agent", LPT:"Lynx Point PCH"}
PageList = Remaining.split(',')
for Page in PageList:
Page = Page.strip()
Match = re.match('(\w+):(\w*:)?\"(.+)\"', Page)
if Match:
PageName = Match.group(1)
ParentName = Match.group(2)
if not ParentName or ParentName == ':' :
ParentName = 'root'
else:
ParentName = ParentName[:-1]
if not self.AddBsfChildPage (PageName, ParentName):
raise Exception("Cannot find parent page '%s'!" % ParentName)
self._CfgPageDict[PageName] = Match.group(3)
else:
raise Exception("Invalid page definitions '%s'!" % Page)
elif Key in ['NAME', 'HELP', 'OPTION'] and Remaining.startswith('+'):
# Allow certain options to be extended to multiple lines
ConfigDict[Key.lower()] += Remaining[1:]
else:
if Key == 'NAME':
Remaining = Remaining.strip()
elif Key == 'CONDITION':
Remaining = self.ExpandMacros(Remaining.strip())
ConfigDict[Key.lower()] = Remaining
else:
Match = HdrRegExp.match(Remaining)
if Match:
Key = Match.group(1)
Remaining = Match.group(2)
if Key == 'EMBED':
Parts = Remaining.split(':')
Names = Parts[0].split(',')
DummyDict = ConfigDict.copy()
if len(Names) > 1:
Remaining = Names[0] + ':' + ':'.join(Parts[1:])
DummyDict['struct'] = Names[1]
else:
DummyDict['struct'] = Names[0]
DummyDict['cname'] = 'Dummy'
DummyDict['name'] = ''
DummyDict['embed'] = Remaining
DummyDict['offset'] = Offset
DummyDict['length'] = 0
DummyDict['value'] = '0'
DummyDict['type'] = 'Reserved'
DummyDict['help'] = ''
DummyDict['subreg'] = []
self._CfgItemList.append(DummyDict)
else:
ConfigDict[Key.lower()] = Remaining
# Check CFG line
# gCfgData.VariableName | * | 0x01 | 0x1
Clear = False
Match = TksRegExp.match (DscLine)
if Match:
DscLine = 'gCfgData.%s' % Match.group(2)
if DscLine.startswith('gCfgData.'):
Match = CfgRegExp.match(DscLine[9:])
else:
Match = None
if Match:
ConfigDict['space'] = 'gCfgData'
ConfigDict['cname'] = Match.group(1)
if Match.group(2) != '*':
Offset = int (Match.group(2), 16)
ConfigDict['offset'] = Offset
ConfigDict['order'] = self.GetOrderNumber (ConfigDict['offset'], ConfigDict['order'])
Value = Match.group(4).strip()
if Match.group(3).startswith("0x"):
Length = int (Match.group(3), 16)
else :
Length = int (Match.group(3))
Offset += Length
ConfigDict['length'] = Length
Match = re.match("\$\((\w+)\)", Value)
if Match:
| |
from exchangelib import Q, Message, ExtendedProperty
from exchangelib.errors import ErrorDeleteDistinguishedFolder, ErrorObjectTypeChanged, DoesNotExist, \
MultipleObjectsReturned, ErrorItemSave, ErrorItemNotFound
from exchangelib.folders import Calendar, DeletedItems, Drafts, Inbox, Outbox, SentItems, JunkEmail, Messages, Tasks, \
Contacts, Folder, RecipientCache, GALContacts, System, AllContacts, MyContactsExtended, Reminders, Favorites, \
AllItems, ConversationSettings, Friends, RSSFeeds, Sharing, IMContactList, QuickContacts, Journal, Notes, \
SyncIssues, MyContacts, ToDoSearch, FolderCollection, DistinguishedFolderId, Files, \
DefaultFoldersChangeHistory, PassThroughSearchResults, SmsAndChatsSync, GraphAnalytics, Signal, \
PdpProfileV2Secured, VoiceMail, FolderQuerySet, SingleFolderQuerySet, SHALLOW, RootOfHierarchy, Companies, \
OrganizationalContacts, PeopleCentricConversationBuddies
from exchangelib.properties import Mailbox, InvalidField
from exchangelib.services import GetFolder
from .common import EWSTest, get_random_string, get_random_int, get_random_bool, get_random_datetime, get_random_bytes,\
get_random_byte
def get_random_str_tuple(tuple_length, str_length):
return tuple(get_random_string(str_length, spaces=False) for _ in range(tuple_length))
class FolderTest(EWSTest):
def test_folders(self):
for f in self.account.root.walk():
if isinstance(f, System):
# No access to system folder, apparently
continue
f.test_access()
# Test shortcuts
for f, cls in (
(self.account.trash, DeletedItems),
(self.account.drafts, Drafts),
(self.account.inbox, Inbox),
(self.account.outbox, Outbox),
(self.account.sent, SentItems),
(self.account.junk, JunkEmail),
(self.account.contacts, Contacts),
(self.account.tasks, Tasks),
(self.account.calendar, Calendar),
):
with self.subTest(f=f, cls=cls):
self.assertIsInstance(f, cls)
f.test_access()
# Test item field lookup
self.assertEqual(f.get_item_field_by_fieldname('subject').name, 'subject')
with self.assertRaises(ValueError):
f.get_item_field_by_fieldname('XXX')
def test_find_folders(self):
folders = list(FolderCollection(account=self.account, folders=[self.account.root]).find_folders())
self.assertGreater(len(folders), 40, sorted(f.name for f in folders))
def test_find_folders_with_restriction(self):
# Exact match
folders = list(FolderCollection(account=self.account, folders=[self.account.root])
.find_folders(q=Q(name='Top of Information Store')))
self.assertEqual(len(folders), 1, sorted(f.name for f in folders))
# Startswith
folders = list(FolderCollection(account=self.account, folders=[self.account.root])
.find_folders(q=Q(name__startswith='Top of ')))
self.assertEqual(len(folders), 1, sorted(f.name for f in folders))
# Wrong case
folders = list(FolderCollection(account=self.account, folders=[self.account.root])
.find_folders(q=Q(name__startswith='top of ')))
self.assertEqual(len(folders), 0, sorted(f.name for f in folders))
# Case insensitive
folders = list(FolderCollection(account=self.account, folders=[self.account.root])
.find_folders(q=Q(name__istartswith='top of ')))
self.assertEqual(len(folders), 1, sorted(f.name for f in folders))
def test_get_folders(self):
folders = list(FolderCollection(account=self.account, folders=[self.account.root]).get_folders())
self.assertEqual(len(folders), 1, sorted(f.name for f in folders))
# Test that GetFolder can handle FolderId instances
folders = list(FolderCollection(account=self.account, folders=[DistinguishedFolderId(
id=Inbox.DISTINGUISHED_FOLDER_ID,
mailbox=Mailbox(email_address=self.account.primary_smtp_address)
)]).get_folders())
self.assertEqual(len(folders), 1, sorted(f.name for f in folders))
def test_get_folders_with_distinguished_id(self):
# Test that we return an Inbox instance and not a generic Messages or Folder instance when we call GetFolder
# with a DistinguishedFolderId instance with an ID of Inbox.DISTINGUISHED_FOLDER_ID.
inbox = list(GetFolder(account=self.account).call(
folders=[DistinguishedFolderId(
id=Inbox.DISTINGUISHED_FOLDER_ID,
mailbox=Mailbox(email_address=self.account.primary_smtp_address))
],
shape='IdOnly',
additional_fields=[],
))[0]
self.assertIsInstance(inbox, Inbox)
def test_folder_grouping(self):
# If you get errors here, you probably need to fill out [folder class].LOCALIZED_NAMES for your locale.
for f in self.account.root.walk():
with self.subTest(f=f):
if isinstance(f, (
Messages, DeletedItems, AllContacts, MyContactsExtended, Sharing, Favorites, SyncIssues,
MyContacts
)):
self.assertEqual(f.folder_class, 'IPF.Note')
elif isinstance(f, Companies):
self.assertEqual(f.folder_class, 'IPF.Contact.Company')
elif isinstance(f, OrganizationalContacts):
self.assertEqual(f.folder_class, 'IPF.Contact.OrganizationalContacts')
elif isinstance(f, PeopleCentricConversationBuddies):
self.assertEqual(f.folder_class, 'IPF.Contact.PeopleCentricConversationBuddies')
elif isinstance(f, GALContacts):
self.assertEqual(f.folder_class, 'IPF.Contact.GalContacts')
elif isinstance(f, RecipientCache):
self.assertEqual(f.folder_class, 'IPF.Contact.RecipientCache')
elif isinstance(f, Contacts):
self.assertEqual(f.folder_class, 'IPF.Contact')
elif isinstance(f, Calendar):
self.assertEqual(f.folder_class, 'IPF.Appointment')
elif isinstance(f, (Tasks, ToDoSearch)):
self.assertEqual(f.folder_class, 'IPF.Task')
elif isinstance(f, Reminders):
self.assertEqual(f.folder_class, 'Outlook.Reminder')
elif isinstance(f, AllItems):
self.assertEqual(f.folder_class, 'IPF')
elif isinstance(f, ConversationSettings):
self.assertEqual(f.folder_class, 'IPF.Configuration')
elif isinstance(f, Files):
self.assertEqual(f.folder_class, 'IPF.Files')
elif isinstance(f, Friends):
self.assertEqual(f.folder_class, 'IPF.Note')
elif isinstance(f, RSSFeeds):
self.assertEqual(f.folder_class, 'IPF.Note.OutlookHomepage')
elif isinstance(f, IMContactList):
self.assertEqual(f.folder_class, 'IPF.Contact.MOC.ImContactList')
elif isinstance(f, QuickContacts):
self.assertEqual(f.folder_class, 'IPF.Contact.MOC.QuickContacts')
elif isinstance(f, Journal):
self.assertEqual(f.folder_class, 'IPF.Journal')
elif isinstance(f, Notes):
self.assertEqual(f.folder_class, 'IPF.StickyNote')
elif isinstance(f, DefaultFoldersChangeHistory):
self.assertEqual(f.folder_class, 'IPM.DefaultFolderHistoryItem')
elif isinstance(f, PassThroughSearchResults):
self.assertEqual(f.folder_class, 'IPF.StoreItem.PassThroughSearchResults')
elif isinstance(f, SmsAndChatsSync):
self.assertEqual(f.folder_class, 'IPF.SmsAndChatsSync')
elif isinstance(f, GraphAnalytics):
self.assertEqual(f.folder_class, 'IPF.StoreItem.GraphAnalytics')
elif isinstance(f, Signal):
self.assertEqual(f.folder_class, 'IPF.StoreItem.Signal')
elif isinstance(f, PdpProfileV2Secured):
self.assertEqual(f.folder_class, 'IPF.StoreItem.PdpProfileSecured')
elif isinstance(f, VoiceMail):
self.assertEqual(f.folder_class, 'IPF.Note.Microsoft.Voicemail')
else:
self.assertIn(f.folder_class, (None, 'IPF'), (f.name, f.__class__.__name__, f.folder_class))
self.assertIsInstance(f, Folder)
def test_counts(self):
# Test count values on a folder
f = Folder(parent=self.account.inbox, name=get_random_string(16)).save()
f.refresh()
self.assertEqual(f.total_count, 0)
self.assertEqual(f.unread_count, 0)
self.assertEqual(f.child_folder_count, 0)
# Create some items
items = []
for i in range(3):
subject = 'Test Subject %s' % i
item = Message(account=self.account, folder=f, is_read=False, subject=subject, categories=self.categories)
item.save()
items.append(item)
# Refresh values and see that total_count and unread_count changes
f.refresh()
self.assertEqual(f.total_count, 3)
self.assertEqual(f.unread_count, 3)
self.assertEqual(f.child_folder_count, 0)
for i in items:
i.is_read = True
i.save()
# Refresh values and see that unread_count changes
f.refresh()
self.assertEqual(f.total_count, 3)
self.assertEqual(f.unread_count, 0)
self.assertEqual(f.child_folder_count, 0)
self.bulk_delete(items)
# Refresh values and see that total_count changes
f.refresh()
self.assertEqual(f.total_count, 0)
self.assertEqual(f.unread_count, 0)
self.assertEqual(f.child_folder_count, 0)
# Create some subfolders
subfolders = []
for i in range(3):
subfolders.append(Folder(parent=f, name=get_random_string(16)).save())
# Refresh values and see that child_folder_count changes
f.refresh()
self.assertEqual(f.total_count, 0)
self.assertEqual(f.unread_count, 0)
self.assertEqual(f.child_folder_count, 3)
for sub_f in subfolders:
sub_f.delete()
# Refresh values and see that child_folder_count changes
f.refresh()
self.assertEqual(f.total_count, 0)
self.assertEqual(f.unread_count, 0)
self.assertEqual(f.child_folder_count, 0)
f.delete()
def test_refresh(self):
# Test that we can refresh folders
for f in self.account.root.walk():
with self.subTest(f=f):
if isinstance(f, System):
# Can't refresh the 'System' folder for some reason
continue
old_values = {}
for field in f.FIELDS:
old_values[field.name] = getattr(f, field.name)
if field.name in ('account', 'id', 'changekey', 'parent_folder_id'):
# These are needed for a successful refresh()
continue
if field.is_read_only:
continue
setattr(f, field.name, self.random_val(field))
f.refresh()
for field in f.FIELDS:
if field.name == 'changekey':
# folders may change while we're testing
continue
if field.is_read_only:
# count values may change during the test
continue
self.assertEqual(getattr(f, field.name), old_values[field.name], (f, field.name))
# Test refresh of root
all_folders = sorted(f.name for f in self.account.root.walk())
self.account.root.refresh()
self.assertIsNone(self.account.root._subfolders)
self.assertEqual(
sorted(f.name for f in self.account.root.walk()),
all_folders
)
folder = Folder()
with self.assertRaises(ValueError):
folder.refresh() # Must have root folder
folder.root = self.account.root
with self.assertRaises(ValueError):
folder.refresh() # Must have an id
def test_parent(self):
self.assertEqual(
self.account.calendar.parent.name,
'Top of Information Store'
)
self.assertEqual(
self.account.calendar.parent.parent.name,
'root'
)
def test_children(self):
self.assertIn(
'Top of Information Store',
[c.name for c in self.account.root.children]
)
def test_parts(self):
self.assertEqual(
[p.name for p in self.account.calendar.parts],
['root', 'Top of Information Store', self.account.calendar.name]
)
def test_absolute(self):
self.assertEqual(
self.account.calendar.absolute,
'/root/Top of Information Store/' + self.account.calendar.name
)
def test_walk(self):
self.assertGreaterEqual(len(list(self.account.root.walk())), 20)
self.assertGreaterEqual(len(list(self.account.contacts.walk())), 2)
def test_tree(self):
self.assertTrue(self.account.root.tree().startswith('root'))
def test_glob(self):
self.assertGreaterEqual(len(list(self.account.root.glob('*'))), 5)
self.assertEqual(len(list(self.account.contacts.glob('GAL*'))), 1)
self.assertEqual(len(list(self.account.contacts.glob('gal*'))), 1) # Test case-insensitivity
self.assertGreaterEqual(len(list(self.account.contacts.glob('/'))), 5)
self.assertGreaterEqual(len(list(self.account.contacts.glob('../*'))), 5)
self.assertEqual(len(list(self.account.root.glob('**/%s' % self.account.contacts.name))), 1)
self.assertEqual(len(list(self.account.root.glob('Top of*/%s' % self.account.contacts.name))), 1)
def test_collection_filtering(self):
self.assertGreaterEqual(self.account.root.tois.children.all().count(), 0)
self.assertGreaterEqual(self.account.root.tois.walk().all().count(), 0)
self.assertGreaterEqual(self.account.root.tois.glob('*').all().count(), 0)
def test_empty_collections(self):
self.assertEqual(self.account.trash.children.all().count(), 0)
self.assertEqual(self.account.trash.walk().all().count(), 0)
self.assertEqual(self.account.trash.glob('XXX').all().count(), 0)
self.assertEqual(list(self.account.trash.glob('XXX').get_folders()), [])
self.assertEqual(list(self.account.trash.glob('XXX').find_folders()), [])
def test_div_navigation(self):
self.assertEqual(
(self.account.root / 'Top of Information Store' / self.account.calendar.name).id,
self.account.calendar.id
)
self.assertEqual(
(self.account.root / 'Top of Information Store' / '..').id,
self.account.root.id
)
self.assertEqual(
(self.account.root / '.').id,
self.account.root.id
)
def test_double_div_navigation(self):
self.account.root.refresh() # Clear the cache
# Test normal navigation
self.assertEqual(
(self.account.root // 'Top of Information Store' // self.account.calendar.name).id,
self.account.calendar.id
)
self.assertIsNone(self.account.root._subfolders)
# Test parent ('..') syntax. Should not work
with self.assertRaises(ValueError) as e:
_ = self.account.root // 'Top of Information Store' // '..'
self.assertEqual(e.exception.args[0], 'Cannot get parent without a folder cache')
self.assertIsNone(self.account.root._subfolders)
# Test self ('.') syntax
self.assertEqual(
(self.account.root // '.').id,
self.account.root.id
)
self.assertIsNone(self.account.root._subfolders)
def test_extended_properties(self):
# Test extended properties on folders and folder roots. This extended prop gets the size (in bytes) of a folder
class FolderSize(ExtendedProperty):
property_tag = 0x0e08
property_type = 'Integer'
try:
Folder.register('size', FolderSize)
self.account.inbox.refresh()
self.assertGreater(self.account.inbox.size, 0)
finally:
Folder.deregister('size')
try:
RootOfHierarchy.register('size', FolderSize)
self.account.root.refresh()
self.assertGreater(self.account.root.size, 0)
finally:
RootOfHierarchy.deregister('size')
# Register is only allowed on Folder and RootOfHierarchy classes
with self.assertRaises(TypeError):
self.account.calendar.register(FolderSize)
with self.assertRaises(TypeError):
self.account.root.register(FolderSize)
def test_create_update_empty_delete(self):
f = Messages(parent=self.account.inbox, name=get_random_string(16))
f.save()
self.assertIsNotNone(f.id)
self.assertIsNotNone(f.changekey)
new_name = get_random_string(16)
f.name = new_name
f.save()
f.refresh()
self.assertEqual(f.name, new_name)
with self.assertRaises(ErrorObjectTypeChanged):
# FolderClass may not be changed
f.folder_class = get_random_string(16)
f.save(update_fields=['folder_class'])
# Create a subfolder
Messages(parent=f, name=get_random_string(16)).save()
self.assertEqual(len(list(f.children)), 1)
f.empty()
self.assertEqual(len(list(f.children)), 1)
f.empty(delete_sub_folders=True)
self.assertEqual(len(list(f.children)), 0)
# Create a subfolder again, and delete it by wiping
Messages(parent=f, name=get_random_string(16)).save()
self.assertEqual(len(list(f.children)), 1)
f.wipe()
self.assertEqual(len(list(f.children)), 0)
f.delete()
with self.assertRaises(ValueError):
# No longer has an ID
f.refresh()
# Delete all subfolders of inbox
for c in self.account.inbox.children:
c.delete()
with self.assertRaises(ErrorDeleteDistinguishedFolder):
self.account.inbox.delete()
def test_generic_folder(self):
f = Folder(parent=self.account.inbox, name=get_random_string(16))
f.save()
f.name = get_random_string(16)
f.save()
f.delete()
def test_folder_query_set(self):
# Create a folder hierarchy and test a folder queryset
#
# -f0
# - f1
# - f2
# - f21
# - f22
f0 = Folder(parent=self.account.inbox, name=get_random_string(16)).save()
f1 = Folder(parent=f0, name=get_random_string(16)).save()
f2 = Folder(parent=f0, name=get_random_string(16)).save()
f21 = Folder(parent=f2, name=get_random_string(16)).save()
f22 = Folder(parent=f2, name=get_random_string(16)).save()
folder_qs = SingleFolderQuerySet(account=self.account, folder=f0)
try:
# Test all()
self.assertSetEqual(
set(f.name for f in folder_qs.all()),
{f.name for f in (f1, f2, f21, f22)}
)
# Test only()
self.assertSetEqual(
set(f.name for f in folder_qs.only('name').all()),
{f.name for f in (f1, f2, f21, f22)}
)
self.assertSetEqual(
set(f.child_folder_count for f in folder_qs.only('name').all()),
{None}
)
# Test depth()
self.assertSetEqual(
set(f.name for f in folder_qs.depth(SHALLOW).all()),
{f.name for f in (f1, f2)}
)
# Test filter()
self.assertSetEqual(
set(f.name for f in folder_qs.filter(name=f1.name)),
{f.name for f in (f1,)}
)
self.assertSetEqual(
set(f.name for f in folder_qs.filter(name__in=[f1.name, f2.name])),
{f.name for | |
"hum_frac = br_mr*0+0.64" '+tmpdir+'/clm_params.nc '+tmpdir+'/clm_params.nc')
os.system(myncap+' -O -s "humhol_dist = br_mr*0+1.0" '+tmpdir+'/clm_params.nc '+tmpdir+'/clm_params.nc')
os.system(myncap+' -O -s "qflx_h2osfc_surfrate = br_mr*0+1.0e-7" '+tmpdir+'/clm_params.nc '+tmpdir+'/clm_params.nc')
os.system(myncap+' -O -s "rsub_top_globalmax = br_mr*0+1.2e-5" '+tmpdir+'/clm_params.nc '+tmpdir+'/clm_params.nc')
os.system('chmod u+w ' +tmpdir+'/clm_params.nc')
if (options.parm_file != ''):
pftfile = tmpdir+'/clm_params.nc'
if ('/' not in options.parm_file):
#assume in pointclm directory
input = open(PTCLMdir+'/'+options.parm_file)
else: #assume full path given
input = open(os.path.abspath(options.parm_file))
for s in input:
if s[0:1] != '#':
values = s.split()
thisvar = nffun.getvar(pftfile, values[0])
if (len(values) == 2):
thisvar[...] = float(values[1])
elif (len(values) == 3):
if (float(values[1]) > 0):
thisvar[int(values[1])] = float(values[2])
else:
thisvar[...] = float(values[2])
ierr = nffun.putvar(pftfile, values[0], thisvar)
input.close()
if (options.parm_vals != ''):
pftfile = tmpdir+'/clm_params.nc'
parms = options.parm_vals.split('/')
nparms = len(parms)
for n in range(0,nparms):
parm_data=parms[n].split(',')
thisvar = nffun.getvar(pftfile, parm_data[0])
if (len(parm_data) == 2):
thisvar[...] = float(parm_data[1])
elif (len(parm_data) == 3):
if (float(parm_data[1]) >= 0):
thisvar[int(parm_data[1])] = float(parm_data[2])
else:
thisvar[...] = float(parm_data[2])
ierr = nffun.putvar(pftfile, parm_data[0], thisvar)
#parameter (soil order dependent) modifications if desired ::X.YANG
if (options.mymodel == 'ELM'):
if (options.mod_parm_file_P != ''):
os.system('cp '+options.mod_parm_file_P+' '+tmpdir+'/CNP_parameters.nc')
else:
os.system('cp '+options.ccsm_input+'/lnd/clm2/paramdata/CNP_parameters_'+CNPstamp+'.nc ' \
+tmpdir+'/CNP_parameters.nc')
os.system('chmod u+w ' +tmpdir+'/CNP_parameters.nc')
if (options.parm_file_P != ''):
soilorderfile = tmpdir+'/CNP_parameters.nc'
if ('/' not in options.parm_file_P):
#assume in pointclm directory
input = open(PTCLMdir+'/'+options.parm_file_P)
else: #assume full path given
input = open(os.path.abspath(options.parm_file_P))
input = open(os.path.abspath(options.parm_file_P))
for s in input:
if s[0:1] != '#':
values = s.split()
thisvar = nffun.getvar(soilorderfile, values[0])
if (len(values) == 2):
thisvar[...] = float(values[1])
elif (len(values) == 3):
if (float(values[1]) >= 0):
thisvar[int(values[1])] = float(values[2])
else:
thisvar[...] = float(values[2])
ierr = nffun.putvar(soilorderfile, values[0], thisvar)
input.close()
#set number of run years for ad, exit spinup cases
if (options.ny_ad != options.run_n and options.ad_spinup):
options.run_n = options.ny_ad
elif (options.exit_spinup):
options.run_n = 1
#create new case
cmd = './create_newcase --case '+casedir+' --mach '+options.machine+' --compset '+ \
options.compset+' --res '+options.res+' --mpilib '+ \
options.mpilib+' --walltime '+str(options.walltime)+ \
':00:00 '+'--handle-preexisting-dirs u'
if (options.mymodel == 'CLM5'):
cmd = cmd+' --run-unsupported'
if (options.project != ''):
cmd = cmd+' --project '+options.project
if (options.compiler != ''):
cmd = cmd+' --compiler '+options.compiler
cmd = cmd+' > create_newcase.log'
result = os.system(cmd)
if (os.path.isdir(casedir)):
print(casedir+' created. See create_newcase.log for details')
os.system('mv create_newcase.log '+casedir+'/')
else:
print('Error: runcase.py Failed to create case. See create_newcase.log for details')
sys.exit(1)
os.chdir(casedir)
#env_build
result = os.system('./xmlchange SAVE_TIMING=FALSE')
result = os.system('./xmlchange EXEROOT='+exeroot)
if (options.mymodel == 'ELM'):
result = os.system('./xmlchange MOSART_MODE=NULL')
#if (options.debug):
# result = os.system('./xmlchange DEBUG=TRUE')
#clm 4_5 cn config options
#clmcn_opts = "'-phys clm4_5 -cppdefs -DMODAL_AER'"
#if (options.mymodel == 'ELM'):
# os.system("./xmlchange CLM_CONFIG_OPTS="+clmcn_opts)
if (options.machine == 'userdefined'):
os.system("./xmlchange COMPILER="+options.compiler)
os.system("./xmlchange OS=linux")
os.system("./xmlchange EXEROOT="+runroot+'/'+casename+"/bld")
#-------------- env_run.xml modifications -------------------------
os.system('./xmlchange RUNDIR='+rundir)
os.system('./xmlchange DOUT_S=TRUE')
os.system('./xmlchange DOUT_S_ROOT='+runroot+'/archive/'+casename)
os.system('./xmlchange DIN_LOC_ROOT='+options.ccsm_input)
#define mask and resoultion
if (isglobal == False):
os.system('./xmlchange CLM_USRDAT_NAME='+str(numxpts)+'x'+str(numypts)+'pt_'+options.site)
if (options.ad_spinup):
if (options.mymodel == 'ELM'):
os.system("./xmlchange --append CLM_BLDNML_OPTS='-bgc_spinup on'")
elif (options.mymodel == 'CLM5'):
os.system('./xmlchange CLM_ACCELERATED_SPINUP=on')
os.system('./xmlchange CLM_FORCE_COLDSTART=on')
if (int(options.run_startyear) > -1):
os.system('./xmlchange RUN_STARTDATE='+str(options.run_startyear)+'-01-01')
print("Setting run start date to "+str(options.run_startyear)+'-01-01')
if (options.domainfile == ''):
os.system('./xmlchange ATM_DOMAIN_PATH="\${RUNDIR}"')
os.system('./xmlchange LND_DOMAIN_PATH="\${RUNDIR}"')
os.system('./xmlchange ATM_DOMAIN_FILE=domain.nc')
os.system('./xmlchange LND_DOMAIN_FILE=domain.nc')
else:
domainpath = '/'.join(options.domainfile.split('/')[:-1])
domainfile = options.domainfile.split('/')[-1]
os.system('./xmlchange ATM_DOMAIN_PATH='+domainpath)
os.system('./xmlchange LND_DOMAIN_PATH='+domainpath)
os.system('./xmlchange ATM_DOMAIN_FILE='+domainfile)
os.system('./xmlchange LND_DOMAIN_FILE='+domainfile)
#turn off archiving
os.system('./xmlchange DOUT_S=FALSE')
#datm options
if (not cpl_bypass):
if (use_reanalysis):
os.system('./xmlchange DATM_MODE=CLMCRUNCEP')
else:
if (isglobal == False):
os.system('./xmlchange DATM_MODE=CLM1PT')
os.system('./xmlchange DATM_CLMNCEP_YR_START='+str(startyear))
os.system('./xmlchange DATM_CLMNCEP_YR_END='+str(endyear))
if (options.align_year == -999):
os.system('./xmlchange DATM_CLMNCEP_YR_ALIGN=1')
else:
os.system('./xmlchange DATM_CLMNCEP_YR_ALIGN='+str(options.align_year))
#Change simulation timestep
if (options.tstep != 0.5):
os.system('./xmlchange ATM_NCPL='+str(int(24/float(options.tstep))))
#Branch run options
if (options.branch or options.exit_spinup):
os.system('./xmlchange RUN_TYPE=branch')
os.system('./xmlchange RUN_REFDATE='+finidat_yst[1:]+'-01-01')
os.system('./xmlchange RUN_REFCASE='+options.finidat_case)
else:
if (('CN' in compset or 'BGC' in compset) and options.ad_spinup \
== False and options.coldstart==False):
os.system('./xmlchange RUN_REFDATE='+finidat_yst[1:]+'-01-01')
#adds capability to run with transient CO2
if ('20TR' in compset):
os.system('./xmlchange CCSM_BGC=CO2A')
os.system('./xmlchange CLM_CO2_TYPE=diagnostic')
if (options.run_startyear == -1):
os.system('./xmlchange RUN_STARTDATE=1850-01-01')
#No pnetcdf for small cases on compy
if ('compy' in options.machine and int(options.np) < 80):
os.system('./xmlchange PIO_TYPENAME=netcdf')
comps = ['ATM','LND','ICE','OCN','CPL','GLC','ROF','WAV']
for c in comps:
print 'Setting NTASKS_'+c+' to '+str(options.np)
os.system('./xmlchange NTASKS_'+c+'='+str(options.np))
os.system('./xmlchange NTHRDS_'+c+'=1')
if (int(options.np) > 1):
os.system('./xmlchange MAX_TASKS_PER_NODE='+str(ppn))
os.system('./xmlchange MAX_MPITASKS_PER_NODE='+str(ppn))
if (int(options.ninst) > 1):
os.system('./xmlchange NINST_LND='+options.ninst)
os.system('./xmlchange NTASKS_LND='+options.ninst)
os.system('./xmlchange STOP_OPTION='+options.run_units)
os.system('./xmlchange STOP_N='+str(options.run_n))
if (options.rest_n > 0):
print 'Setting REST_N to '+str(options.rest_n)
os.system('./xmlchange REST_N='+str(options.rest_n))
# user-defined PFT numbers (default is 17)
if (options.maxpatch_pft != 17):
print 'resetting maxpatch_pft to '+str(options.maxpatch_pft)
xval = subprocess.check_output('./xmlquery --value CLM_BLDNML_OPTS', cwd=casedir, shell=True)
xval = '-maxpft '+str(options.maxpatch_pft)+' '+xval
os.system("./xmlchange --id CLM_BLDNML_OPTS --val '" + xval + "'")
# for spinup and transient runs, PIO_TYPENAME is pnetcdf, which now not works well
if('mac' in options.machine or 'cades' in options.machine):
os.system("./xmlchange --id PIO_TYPENAME --val netcdf ")
#--------------------------CESM setup ----------------------------------------
if (options.clean_config):
result = os.system('./case.setup -clean')
if (result > 0):
print('Error: PointCLM.py failed to setup case. Aborting')
sys.exit(1)
os.system('rm -f Macro')
os.system('rm -f user-nl-*')
# Add options for FFLAGS to Macros file here
#clm namelist modifications
for i in range(1,int(options.ninst)+1):
if (int(options.ninst) == 1):
output = open("user_nl_clm",'w')
else:
if (i < 10):
output = open("user_nl_clm_000"+str(i),'w')
elif (i < 100):
output = open("user_nl_clm_00"+str(i),'w')
elif (i < 1000):
output = open("user_nl_clm_0"+str(i),'w')
output.write('&clm_inparm\n')
if (options.namelist_file != ''):
#First assume located in OLMT folder:
if (os.path.isfile(PTCLMdir+'/'+options.namelist_file)):
namelist_in = open(PTCLMdir+'/'+options.namelist_file,'r')
elif (os.path.isfile(options.namelist_file)):
namelist_in = open(options.namelist_file,'r')
else:
print('Error: namelist_file does not exist. Aborting')
sys.exit(1)
for s in namelist_in:
output.write(s)
namelist_in.close()
#history file options
#outputs for SPRUCE MiP and Jiafu's diagnostics code:
var_list_hourly = ['GPP', 'NEE', 'NEP', 'NPP', 'LEAFC_ALLOC', 'AGNPP', 'MR', \
'CPOOL_TO_DEADSTEMC', 'LIVECROOTC_XFER_TO_LIVECROOTC', 'DEADCROOTC_XFER_TO_DEADCROOTC', \
'CPOOL_TO_LIVECROOTC', 'CPOOL_TO_DEADCROOTC', 'FROOTC_ALLOC', 'AR', 'LEAF_MR', 'CPOOL_LEAF_GR',
'TRANSFER_LEAF_GR', 'CPOOL_LEAF_STORAGE_GR', 'LIVESTEM_MR', 'CPOOL_LIVESTEM_GR', \
'TRANSFER_LIVESTEM_GR', 'CPOOL_LIVESTEM_STORAGE_GR', 'CPOOL_DEADSTEM_GR', \
'TRANSFER_DEADSTEM_GR', 'CPOOL_DEADSTEM_STORAGE_GR', 'LIVECROOT_MR', 'CPOOL_LIVECROOT_GR', \
'TRANSFER_LIVECROOT_GR', 'CPOOL_LIVECROOT_STORAGE_GR', 'CPOOL_DEADCROOT_GR', 'TRANSFER_DEADCROOT_GR', 'CPOOL_DEADCROOT_STORAGE_GR', \
'FROOT_MR', 'CPOOL_FROOT_GR', 'TRANSFER_FROOT_GR', 'CPOOL_FROOT_STORAGE_GR', 'FSH', 'EFLX_LH_TOT', \
'Rnet', 'FCTR', 'FGEV', 'FCEV', 'SOILLIQ', 'QOVER', 'QDRAI', 'TOTVEGC', 'LEAFC', 'LIVESTEMC', 'DEADSTEMC', \
'FROOTC', 'LIVECROOTC', 'DEADCROOTC', 'TG', 'TV', 'TSA', 'TSOI', 'DEADSTEMC_STORAGE', \
'LIVESTEMC_STORAGE', 'DEADCROOTC_STORAGE', 'LIVECROOTC_STORAGE', 'CPOOL_TO_DEADSTEMC_STORAGE', \
'CPOOL_TO_LIVESTEMC_STORAGE', 'CPOOL_TO_DEADCROOTC_STORAGE', 'CPOOL_TO_LIVECROOTC_STORAGE', \
'ER', 'HR', 'FROOTC_STORAGE', 'LEAFC_STORAGE', 'LEAFC_XFER', 'FROOTC_XFER', 'LIVESTEMC_XFER', \
'DEADSTEMC_XFER', 'LIVECROOTC_XFER', 'DEADCROOTC_XFER', 'SR', 'HR_vr', 'FIRA',
'FSA', 'FSDS', 'FLDS', 'TBOT', 'RAIN', 'SNOW', 'WIND', 'PBOT', 'QBOT', 'QVEGT', 'QVEGE', 'QSOIL', \
'QFLX_SUB_SNOW', 'QFLX_DEW_GRND', 'QH2OSFC', 'H2OSOI', 'CPOOL_TO_LIVESTEMC', 'TOTLITC', \
'TOTSOMC', 'ZWT', 'SNOWDP', 'TLAI','RH2M','QRUNOFF']
#var_list_hourly_bgc TODO: Separate SP and BGC variables,
var_list_daily = ['TOTLITC', 'TOTSOMC', 'CWDC', 'LITR1C_vr', 'LITR2C_vr', 'LITR3C_vr', 'SOIL1C_vr', \
'SOIL2C_vr', 'SOIL3C_vr', 'H2OSFC', 'ZWT', 'SNOWDP', 'TLAI', 'CPOOL','NPOOL','PPOOL', \
'FPI','FPI_P','FPG','FPG_P','FPI_vr','FPI_P_vr']
var_list_pft = ['GPP', 'NPP', 'LEAFC_ALLOC', 'AGNPP', 'CPOOL_TO_DEADSTEMC', \
'LIVECROOTC_XFER_TO_LIVECROOTC', 'DEADCROOTC_XFER_TO_DEADCROOTC', \
'CPOOL_TO_LIVECROOTC', 'CPOOL_TO_DEADCROOTC', 'FROOTC_ALLOC', 'AR', 'MR', \
'LEAF_MR', 'CPOOL_LEAF_GR', 'TRANSFER_LEAF_GR', 'CPOOL_LEAF_STORAGE_GR', \
'LIVESTEM_MR', 'CPOOL_LIVESTEM_GR', 'TRANSFER_LIVESTEM_GR', \
'CPOOL_LIVESTEM_STORAGE_GR', 'CPOOL_DEADSTEM_GR', 'TRANSFER_DEADSTEM_GR', \
'CPOOL_DEADSTEM_STORAGE_GR', 'LIVECROOT_MR', 'CPOOL_LIVECROOT_GR', \
'TRANSFER_LIVECROOT_GR', 'CPOOL_LIVECROOT_STORAGE_GR', 'CPOOL_DEADCROOT_GR', \
'TRANSFER_DEADCROOT_GR', 'CPOOL_DEADCROOT_STORAGE_GR', 'FROOT_MR', \
'CPOOL_FROOT_GR', 'TRANSFER_FROOT_GR', 'CPOOL_FROOT_STORAGE_GR', 'FCTR', 'FCEV', \
'TOTVEGC', 'LEAFC', 'LIVESTEMC', 'DEADSTEMC', 'FROOTC', 'LIVECROOTC', \
'DEADCROOTC', 'DEADSTEMC_STORAGE', 'LIVESTEMC_STORAGE', 'DEADCROOTC_STORAGE', \
'LIVECROOTC_STORAGE', 'CPOOL_TO_DEADSTEMC_STORAGE', 'CPOOL_TO_LIVESTEMC_STORAGE', \
'CPOOL_TO_DEADCROOTC_STORAGE', 'CPOOL_TO_LIVECROOTC_STORAGE', \
'FROOTC_STORAGE', 'LEAFC_STORAGE', 'LEAFC_XFER', 'FROOTC_XFER', 'LIVESTEMC_XFER', \
'DEADSTEMC_XFER', 'LIVECROOTC_XFER', 'DEADCROOTC_XFER', 'TLAI', 'CPOOL_TO_LIVESTEMC']
if options.var_list_pft != '':
var_list_pft = options.var_list_pft.split(',')
var_list_spinup = ['PPOOL', 'EFLX_LH_TOT', 'RETRANSN', 'PCO2', 'PBOT', 'NDEP_TO_SMINN', 'OCDEP', \
'BCDEP', 'COL_FIRE_CLOSS', 'HDM', 'LNFM', 'NEE', 'GPP', 'FPSN', 'AR', 'HR', \
'MR', 'GR', 'ER', 'NPP', 'TLAI', 'SOIL3C', 'TOTSOMC', 'TOTSOMC_1m', 'LEAFC', \
'DEADSTEMC', 'DEADCROOTC', 'FROOTC', 'LIVESTEMC', 'LIVECROOTC', 'TOTVEGC', 'N_ALLOMETRY','P_ALLOMETRY',\
'TOTCOLC', 'TOTLITC', 'BTRAN', 'SCALARAVG_vr', 'CWDC', 'QVEGE', 'QVEGT', 'QSOIL', 'QDRAI', \
'QRUNOFF', 'FPI', 'FPI_vr', 'FPG', 'FPI_P','FPI_P_vr', 'FPG_P', 'CPOOL','NPOOL', 'PPOOL', 'SMINN', 'HR_vr']
if ('ICBCLM45CB' in compset):
var_list_spinup = ['FPSN','TLAI','QVEGT','QVEGE','QSOIL','EFLX_LH_TOT','FSH','RH2M','TSA','FSDS','FLDS','PBOT', \
'WIND','BTRAN','DAYL','T10','QBOT']
if (options.C14):
var_list_spinup.append('C14_TOTSOMC')
var_list_spinup.append('C14_TOTSOMC_1m')
var_list_spinup.append('C14_TOTVEGC')
#ILAMB diagnostic variables
ilamb_outputs = ['FAREA_BURNED', 'CWDC', 'LEAFC', 'TOTLITC', 'STORVEGC', 'LIVESTEMC', 'DEADSTEMC', \
'TOTPRODC', 'FROOTC', 'LIVECROOTC', 'DEADCROOTC', 'SOIL1C', 'SOIL2C', 'SOIL3C', \
'TOTSOMC', 'TOTVEGC', 'WOODC', 'QSOIL', 'QVEGE', 'COL_FIRE_CLOSS', \
'LITR1C_TO_SOIL1C', 'LITR2C_TO_SOIL2C', 'LITR3C_TO_SOIL3C', 'LAND_USE_FLUX', \
'LITFALL', 'GPP', 'FGR', 'TLAI', 'SNOWLIQ', 'SOILICE', 'SOILLIQ', 'QRUNOFF', \
'QOVER', 'SOILWATER_10CM', 'NBP', 'LEAFC_ALLOC', 'WOODC_ALLOC', 'QINTR', \
'AR', 'GR', 'HR', 'MR', 'FSNO', 'SNOWDP', 'QSNOMELT', 'H2OSNO', 'SNOBCMSL', \
'SNODSTMSL', 'SNOOCMSL', 'QVEGT', 'TSOI', 'WIND', 'EFLX_LH_TOT', 'FCTR', \
'FCEV', 'FGEV', 'FSH', 'RH2M', 'Q2M', 'RAIN', 'SNOW', 'PBOT', 'FLDS', 'FIRE', \
'FSDS', 'FSR', 'TSA', 'QSNOMELT', 'TWS']
if ('CTC' in compset):
var_list_daily.append('SOIL4C_vr')
var_list_spinup.append('SOIL4C')
ilamb_outputs.append('SOIL4C')
if ('20TR' not in compset and int(options.hist_mfilt) == -1):
#default to annual for spinup runs if not specified
options.hist_mfilt = 1
options.hist_nhtfrq = -8760
if (options.hist_mfilt != -1 and not options.diags):
if (options.ad_spinup):
output.write(" hist_mfilt = "+str(options.hist_mfilt)+", "+str(options.hist_mfilt)+"\n")
else:
if (options.dailyrunoff):
#include daily variables related to runoff only
output.write(" hist_mfilt = "+ str(options.hist_mfilt)+",365\n")
if (options.dailyvars):
#include daily column and PFT level output
output.write(" hist_dov2xy = .true., .true., .false.\n")
output.write(" hist_mfilt = "+ str(options.hist_mfilt)+",365,365\n")
else:
output.write(" hist_mfilt = "+ str(options.hist_mfilt)+"\n")
if (options.hist_nhtfrq != -999 and not options.diags):
if (options.ad_spinup):
output.write(" hist_nhtfrq = "+ str(options.hist_nhtfrq)+", "+str(options.hist_nhtfrq)+"\n")
else:
if (options.dailyvars):
output.write(" hist_nhtfrq = "+ str(options.hist_nhtfrq)+",-24,-24\n")
h1varst = "hist_fincl2 = "
h2varst = "hist_fincl3 = "
for v in var_list_hourly:
h1varst | |
import argparse
import json
import logging
import os
import os.path
import time
import argcomplete
import jmespath
import keyring
import requests
from rich.console import Console
from rich.logging import RichHandler
from rich.prompt import Confirm, Prompt
from rich.table import Table
import csv
import io
import yaml
import re
import sys
from . import syncspecs
console = Console()
log = logging.getLogger()
def prompt_configuration():
config = get_configuration()
if not config:
config = {"clientid": None, "clientsecret": None, "customerid": None}
while True:
config["customerid"] = Prompt.ask("CustomerId", default=config["customerid"])
config["clientid"] = Prompt.ask("ClientId", default=config["clientid"])
config["clientsecret"] = Prompt.ask(
"ClientSecret",
password=True,
default=config["clientsecret"],
show_default=False,
)
if Confirm.ask("Please confirm to store this configuration in the keying"):
keyring.set_password("<PASSWORD>", ":customerid", config["customerid"])
keyring.set_password("<PASSWORD>", ":clientid", config["clientid"])
keyring.set_password("<PASSWORD>", ":clientsecret", config["clientsecret"])
# invalidate access_token
keyring.set_password("<PASSWORD>", ":access_token_timestamp", "0")
console.print("Configuration stored successfully.", style="GREEN")
break
def use_environ_keys():
return (
"CXCUSTOMERID" in os.environ
and "CXCLIENTID" in os.environ
and "CXCLIENTSECRET" in os.environ
)
def get_configuration():
if use_environ_keys():
config = {
"customerid": os.environ["CXCUSTOMERID"],
"clientid": os.environ["CXCLIENTID"],
"clientsecret": os.environ["CXCLIENTSECRET"],
}
else:
config = {
"customerid": keyring.get_password("cxcli", ":customerid"),
"clientid": keyring.get_password("cxcli", ":clientid"),
"clientsecret": keyring.get_password("<PASSWORD>", <PASSWORD>"),
}
if (
config["customerid"] is None
or config["clientid"] is None
or config["clientsecret"] is None
):
return None
return config
def config_logging(level):
logging.basicConfig(
level=level, format="%(message)s", datefmt="[%X]", handlers=[RichHandler()]
)
def get_all_services():
metacache_path = os.path.join(syncspecs.APISPECPATH, "metacache.dat")
if not os.path.exists(metacache_path):
console.print("Preparing API specs. Please wait...")
syncspecs.sync_all()
console.print("Done")
with open(metacache_path, "r") as fp:
metacache = json.load(fp)
services = {}
for filename in sorted(os.listdir(syncspecs.APISPECPATH)):
if not filename.endswith(".json"):
continue
service = {}
service["name"] = filename.split(".", 1)[0]
namesplit = service["name"].split("_")
if namesplit[0] in sys.argv and (
len(namesplit) < 2 or namesplit[1] in sys.argv
):
# Performance Tweak: Only load service JSON-files, when we'll use them
with open(os.path.join(syncspecs.APISPECPATH, filename), "r") as read_file:
service["spec"] = json.load(read_file)
service["url"] = service["spec"]["host"]
if "basePath" in service["spec"]:
service["url"] += service["spec"]["basePath"]
else:
try:
title = metacache[filename.replace(".json", "")]
except KeyError:
title = ""
service["spec"] = {"info": {"title": title}, "paths": {}}
services[service["name"]] = service
return services
def should_ignore_parameter(parameter):
return parameter["in"] == "header" and parameter["name"] in (
"Authorization",
"Accept",
"Accept-Charset",
"Citrix-TransactionId",
"X-ActionName",
)
def populate_argpars_component(alloperations, command_subparsers, component_name):
help = component_name
if help == "adm":
# ToDo: Hardcoded hack
help = "Citrix ADM Service"
command_parser = command_subparsers.add_parser(
component_name, help=help, description=help
)
alloperations[component_name] = {"command_parser": command_parser}
command_subparser = command_parser.add_subparsers(
help="Components", dest="commandcomponent"
)
return command_subparser
def populate_argpars_service(alloperations, command_subparsers, service, config):
help = service["spec"]["info"]["title"]
if "description" in service["spec"]["info"]:
help = service["spec"]["info"]["description"]
command_parser = command_subparsers.add_parser(
service["name"], help=help, description=help
)
alloperations[service["originalname"]] = {"command_parser": command_parser}
command_subparser = command_parser.add_subparsers(help="Operations")
for path, path_spec in service["spec"]["paths"].items():
for requesttype, requestspec in path_spec.items():
populate_argpars_operation(
alloperations,
service,
config,
command_subparser,
path,
requesttype,
requestspec,
)
def populate_argpars_operation(
alloperations, service, config, command_subparser, path, requesttype, requestspec
):
originalname = service["originalname"]
if "operationId" not in requestspec:
if "summary" in requestspec:
# ToDo: Hack for administrators API. Fix upstream.
requestspec["operationId"] = re.sub(
"[^a-zA-Z]+", "", requestspec["summary"].title()
)
else:
log.warning(
f"For {service['url']} skipping {path} as there is no operationId"
)
return
operation_id = requestspec["operationId"]
if operation_id in (
"Ping_Get",
"GetPing",
"ping",
"Ping_GetTime",
"Ping_PingAsync",
"Ping_GetAsync",
):
# Skip ping operation
return
if (
"summary" in requestspec
and "[ServiceKey]" in requestspec["summary"]
and "[BearerToken]" not in requestspec["summary"]
):
# Skip operations that don't indicate that they'll work with BearerToken
return
alloperations[originalname][operation_id] = requestspec
alloperations[originalname][operation_id]["method"] = requesttype
alloperations[originalname][operation_id]["url"] = (
"https://" + service["url"] + path
)
help = requestspec["summary"] if "summary" in requestspec else None
command_parser = command_subparser.add_parser(
operation_id,
help=help,
description=help,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
command_parser.set_defaults(subcommand=operation_id)
if "parameters" in requestspec:
for parameter in requestspec["parameters"]:
populate_argpars_parameter(service, parameter, config, command_parser)
group = command_parser.add_mutually_exclusive_group()
group.add_argument(
"--output-as",
help="Try presenting the result in the specified format",
default="JSON",
choices=["json", "yaml", "table", "csv"],
type=str.lower,
)
group.add_argument(
"--output-binary",
help="Store the result at the provided path",
type=argparse.FileType("wb", 0),
metavar="path_to_file",
default=argparse.SUPPRESS,
)
command_parser.add_argument(
"--cliquery",
help="Filter the result using JMESPath (See https://jmespath.org/tutorial.html)",
default=argparse.SUPPRESS,
)
def populate_argpars_parameter(service, parameter, config, command_parser):
parameter = resolve_openapi_references(service, parameter)
if should_ignore_parameter(parameter):
return
required = "required" in parameter and parameter["required"]
# Where a schema is provided... we query individual schema parameters, instead of top-level parameters
if "schema" in parameter and "properties" in parameter["schema"]:
for elementkey, element in parameter["schema"]["properties"].items():
if not isinstance(element, dict) or not "type" in element:
continue
subrequired = (required and "required" not in parameter["schema"]) or (
"required" in parameter["schema"]
and elementkey in parameter["schema"]["required"]
)
populate_argpars_parameter_element(
command_parser, subrequired, elementkey, element, config
)
else:
if "schema" in parameter and "type" in parameter["schema"]:
# ToDo: CVADs spec looks like this
parameter["type"] = parameter["schema"]["type"]
populate_argpars_parameter_element(
command_parser, required, parameter["name"], parameter, config
)
def populate_argpars_parameter_element(
command_parser, parent_required, elementkey, element, config
):
if "type" not in element:
# Hack
element["type"] = "string"
# Don't show None default
parameter_default = argparse.SUPPRESS
if element["type"] == "object":
if "properties" in element:
for propertykey, propertyvalue in element["properties"].items():
required = parent_required and (
"required" not in element or propertykey in element["required"]
)
command_parser.add_argument(
f"--{elementkey}-{propertykey}",
help=get_help_from_element(propertyvalue),
required=required,
default=parameter_default,
)
return
else:
# Interpret object as string, as we don't know what else to do with it
element["type"] = "string"
if element["type"] != "boolean" and "enum" in element:
# True/False enum values are awkward for argparse - try fixing the type for these
isbool = True
for aenum in element["enum"]:
if str(aenum).lower() not in ("true", "false"):
isbool = False
break
if isbool:
element["type"] = "boolean"
if element["type"] in ("string", "integer", "number", "file"):
if config is not None and elementkey.lower() in (
"customer",
"customerid",
"citrix-customerid",
):
# Populate customerid where possible
parameter_default = config["customerid"]
elif elementkey == "isCloud":
# ADM wants this parameter for Cloud hosted instances
parameter_default = "true"
choices = element["enum"] if "enum" in element else None
try:
command_parser.add_argument(
f"--{elementkey}",
help=get_help_from_element(element),
required=parent_required and parameter_default == argparse.SUPPRESS,
default=parameter_default,
choices=choices,
type=get_parameter_type(element),
)
except argparse.ArgumentError as exc:
log.exception(exc)
pass
elif element["type"] == "boolean":
command_parser.add_argument(
f"--{elementkey}",
help=get_help_from_element(element),
required=parent_required and parameter_default == argparse.SUPPRESS,
default=parameter_default,
action="store_true",
)
elif element["type"] == "array":
command_parser.add_argument(
f"--{elementkey}",
help=get_help_from_element(element),
required=parent_required and parameter_default == argparse.SUPPRESS,
default=parameter_default,
type=str,
nargs="+" if parent_required else "*",
)
else:
log.error("Unhanded Type (1): " + element["type"])
def get_parameter_type(element):
if element["type"] == "string":
type = str
elif element["type"] == "integer":
type = int
elif element["type"] == "number":
type = float
elif element["type"] == "file":
type = argparse.FileType("rb", 0)
else:
raise Exception(f'Unhandled Type (2): {element["type"]}')
return type
def get_help_from_element(parameter):
help = None
if "description" in parameter:
help = parameter["description"]
if help is None or len(help.strip()) == 0:
help = "-"
return help
def resolve_openapi_references(service, parameter):
change = True
while change:
change = False
if "$ref" in parameter:
ref = parameter["$ref"].split("/")[-1]
parameter = service["spec"]["parameters"][ref]
change = True
if "schema" in parameter:
if "$ref" in parameter["schema"]:
ref = parameter["schema"]["$ref"].split("/")[-1]
parameter["schema"] = service["spec"]["definitions"][ref]
change = True
if "properties" in parameter["schema"]:
newproperties = {}
for propertykey, propertyvalue in parameter["schema"][
"properties"
].items():
if "$ref" in propertyvalue:
ref = propertyvalue["$ref"].split("/")[-1]
newproperties[propertykey] = service["spec"]["definitions"][ref]
change = True
else:
newproperties[propertykey] = propertyvalue
parameter["schema"]["properties"] = newproperties
return parameter
def process_openapi_specs(alloperations, command_subparsers, config):
superservice_subparsers = {}
for service in get_all_services().values():
service["originalname"] = service["name"]
if "_" in service["name"]:
# The adm service has so many operations, that it seems a good idea
# to split them up by component
component_name = service["name"].split("_", 1)[0]
service["component_name"] = component_name
if component_name not in superservice_subparsers:
superservice_subparsers[component_name] = populate_argpars_component(
alloperations,
command_subparsers,
component_name,
)
# Strip the superservice name
service["name"] = service["name"].split("_")[1]
populate_argpars_service(
alloperations,
superservice_subparsers[component_name],
service,
config,
)
else:
service["component_name"] = ""
populate_argpars_service(alloperations, command_subparsers, service, config)
def get_value(atype, aspec, args):
adict = {}
for parameter in aspec["parameters"]:
if (
"in" not in parameter
or parameter["in"] != atype
or should_ignore_parameter(parameter)
):
continue
argname = parameter["name"].replace("-", "_")
if hasattr(args, argname):
# It's a simple value
value = getattr(args, argname)
if value is not None:
adict[parameter["name"]] = value
elif "schema" in parameter and "properties" in parameter["schema"]:
# It's a more complex structure.. so we need to put a dict together
for elementkey, element in parameter["schema"]["properties"].items():
if not isinstance(element, dict):
# It's not a complex object - move on
break
if element["type"] == "object":
if "properties" in element:
adict[elementkey] = {}
for propertykey in element["properties"].keys():
argname = f"{elementkey}_{propertykey}".replace("-", "_")
try:
value = getattr(args, argname)
if value is not None:
adict[elementkey][propertykey] = value
except AttributeError:
pass
elif element["type"] in ("string", "integer", "number", "array"):
argname = elementkey.replace("-", "_")
try:
value = getattr(args, argname)
if value is not None:
if atype == "body" and isinstance(value, list):
# ToDo: not sure about this, but | |
any(v < 0 for v in token_weights.values()):
max_v = max(token_weights.values())
return {t:math.exp(v - max_v)
for (t,v) in token_weights.items()}
else:
return token_weights
def inverse(token_weights):
'''Make small values big and big values small. All will be positive in the
end, in range (1,+Infty).
>>> pprint(inverse({'a':-1,'b':1}))
{'a': 3.0, 'b': 1.0}
>>> pprint(inverse({'a':-100,'b':100}))
{'a': 201.0, 'b': 1.0}'''
if any(v < 0 for v in token_weights.values()):
max_v = max(token_weights.values())
return {t:max_v + 1. - v
for (t,v) in token_weights.items()}
else:
return token_weights
def wt_neg_feature(tms, tweets, tokenpoints):
'''Weight by max(weights) + 1 - w, where w is a weight_feature. This means
that small weights become large, large weights become small, and all
weights are positive in range (1,+Infty).'''
m = { t: m.features()[model_parms['weight_feature']]
for (t, m) in tms.items() }
m = inverse(m)
return m
def wt_inv_feature(tms, tweets, tokenpoints):
'''Weight by the inverse of feature name specified by parameter
weight_feature). If negative numbers exist, shift all values to be
positive.'''
m = { t: 1 / (1 + m.features()[model_parms['weight_feature']])
for (t, m) in tms.items() }
return scale(m)
class Geo_GMM(base.Location_Estimate, sklearn.mixture.GMM):
'''This is a GMM with a geographic interpretation, which also serves as a
location estimate (hence the multiple inheritance). Adds the following
attributes:
samples .......... List of (Point, log probability, component index)
tuples sampled from the model, ordered by
descending log probability. WARNING: These samples
are *not* guaranteed to be in-bounds (i.e., valid
locations on the globe).
samples_inbound .. geos.MultiPoint of above which are in-bounds.'''
# FIXME: Lame to use tuples for the samples list. Better to use objects?
def __init__(self, *args, **kwargs):
self.samples = None
self.samples_inbound = None
u.call_kw(base.Location_Estimate.__init__, self, **kwargs)
u.call_kw(sklearn.mixture.GMM.__init__, self, **kwargs)
@property
def explanation(self):
return self.tokens
@classmethod
def combine(class_, gmms, weights, coverage):
'''Combine Geo_GMMs using gmm_combine_f. gmms is an iterable of Geo_GMMs
(each with exactly one token of weight 1), while weights is a (token,
weight) mapping that must be a superset of the tokens in gmms.
GMMs with weights close to zero are omitted; at least one must
remain. All component SRIDs must be the same, as must all covariance
types. The result is a prepared Geo_GMM will all the
Location_Estimate juicyness.
For example:
>>> Token.parms_init({'component_sz_min':1})
>>> mp = geos.MultiPoint(geos.Point(1,2), geos.Point(3,4), srid=4326)
>>> m1 = Geo_GMM.from_fit(mp, 1, 'foo')
>>> m2 = Geo_GMM.from_fit(mp, 2, 'bar')
>>> m3 = Geo_GMM.from_fit(mp, 1, 'baz')
>>> combined = Geo_GMM.combine([m1, m2, m3],
... { 'foo':2, 'bar':3, 'baz':1e-6 }, 0.95)
>>> combined.weights_
array([ 0.4, 0.3, 0.3])
>>> pprint(combined.explanation)
{'bar': 0.6, 'foo': 0.4}
>>> combined.n_points
4
>>> [combined.sample(5) for i in range(100)] and None
>>> combined.srid
4326
>>> combined.pred_region.geom_type
'MultiPolygon'
>>> combined.pred_coverage
0.95
>>> print(Geo_GMM.combine([m1, m2, m3],
... { 'foo':0, 'bar':0, 'baz':0 }, 0.95))
None
'''
# sanity checks
assert (len(gmms) >= 1)
srid = gmms[0].srid
covariance_type = gmms[0].covariance_type
assert (srid is not None)
def weight(g):
return weights[next(iter(g.tokens.keys()))]
for g in gmms:
assert (g.srid == srid)
assert (g.covariance_type == covariance_type)
assert (len(g.tokens) > 0)
assert (weight(g) >= 0)
# following aren't fundamental, just not yet supported
assert (len(g.tokens) == 1)
assert (next(iter(g.tokens.values())) == 1.0)
# remove GMMs that don't have enough weight
max_weight = max([weight(g) for g in gmms])
min_weight = max_weight * model_parms['weight_min']
gmms = [g for g in gmms if weight(g) > min_weight]
# all weights are 0. cannot locate.
if (max_weight == 0):
return None
assert (len(gmms) >= 1)
# renormalize weights
relevant_weights = { t: weights[t]
for t in sum([list(g.tokens.keys()) for g in gmms], []) }
total_weight = sum(relevant_weights.values())
weights = { t: w / total_weight
for (t, w) in relevant_weights.items() }
# build a skeleton GMM
n_components = sum([g.n_components for g in gmms])
new = class_(n_components=n_components, covariance_type=covariance_type)
# populate the new GMM
new.srid = srid
new.means_ = np.concatenate([g.means_ for g in gmms])
new.covars_ = np.concatenate([g.covars_ for g in gmms])
new.weights_ = np.concatenate([g.weights_ * weight(g) for g in gmms])
new.converged_ = True
new.tokens = weights
new.n_points = sum([g.n_points for g in gmms])
# prepare
new.prepare(coverage)
return new
@classmethod
def filter_small_components(class_, m, data):
'''Remove components with fewer than component_sz_min points. If none
remain, re-fit with one component.
>>> Token.parms_init({'component_sz_min':2})
>>> x,y = make_blobs(n_samples=100, centers=[[10,10], [20,20]],
... n_features=2, random_state=100)
>>> x = np.vstack((x, [100,100])) # outlier
>>> mp = geos.MultiPoint([geos.Point(tuple(xy)) for xy in x])
>>> m = Geo_GMM.from_fit(mp, 3, 'foo')
>>> m.n_components
2
>>> mp = geos.MultiPoint([geos.Point((10,10)), geos.Point((20,20))])
>>> m = Geo_GMM.from_fit(mp, 2, 'foo')
>>> m.n_components
1'''
cts = Counter(m.predict(data))
tokeep = [idx for (idx,ct) in list(cts.items())
if ct >= model_parms['component_sz_min']]
if len(tokeep) == 0:
m.n_components = 1
m.fit(data)
else:
m.means_ = m.means_[tokeep]
m.covars_ = m.covars_[tokeep]
m.weights_ = m.weights_[tokeep]
m.weights_ = m.weights_ / sum(m.weights_)
m.n_components = len(tokeep)
return m
@classmethod
def from_fit(class_, mp, n_components, tokens=tuple()):
'''Given a MultiPoint, return a new Geo_GMM fitted to those points. If
given, tokens is an iterable of tokens or a single token string.'''
new = class_(n_components=n_components,
covariance_type=model_parms['covariance_type'],
min_covar=model_parms['min_covar'],
random_state=u.rand_np, n_iter=1000)
data = np.array(mp, dtype=np.float) # mp.coords is slow
new.fit(data)
new = Geo_GMM.filter_small_components(new, data)
new.srid = mp.srid
if (isinstance(tokens, str)):
tokens = [tokens]
new.tokens = { t:1 for t in tokens }
new.n_points = mp.num_geom
new.aic_cache = new.aic(data)
new.bic_cache = new.bic(data)
# use average of X and Y variance as the variance
new.var_cache = np.mean((data[:,0].var(), data[:,1].var()))
return new
def cae(self, pt):
return np.mean(srs.geodesic_distance_mp(pt, self.samples_inbound_mp))
def contour(self, pt):
score = self.score_pt(pt)
idx = sum(score < i[1] for i in self.samples)
return (idx / len(self.samples))
def coverst_p_real(self, pt):
return self.score_pt(pt) > self.pred_region_threshold
def likelihood_polygon(self,pg):
'''>>> Token.parms_init({'component_sz_min':1})
>>> mp = geos.MultiPoint(geos.Point(1,1), geos.Point(10,10), srid=4326)
>>> m = Geo_GMM.from_fit(mp, 2, 'foo')
>>> c = Geo_GMM.combine([m], {'foo':1 }, 0.95)
>>> c.likelihood_polygon(geos.Polygon.from_bbox((0.9,0.9,1.1,1.1)))
0.503
>>> c.likelihood_polygon(geos.Polygon.from_bbox((0.95,0.95,1.05,1.05)))
0.385'''
# returns proportion of samples contained in pg
return sum(pg.contains(p[0]) for p in self.samples) / len(self.samples)
def likelihood_polygons(self, polygons, threshold=0.001):
'''Return (index, probability) tuples for the likelihood of each
polygon, trimmed by threshold.
>>> Token.parms_init({'component_sz_min':1})
>>> mp = geos.MultiPoint(geos.Point(1,1), geos.Point(10,10), srid=4326)
>>> m = Geo_GMM.from_fit(mp, 2, 'foo')
>>> combined = Geo_GMM.combine([m], {'foo':1 }, 0.95)
>>> big = geos.Polygon.from_bbox((0.9,0.9,1.1,1.1))
>>> small = geos.Polygon.from_bbox((0.95,0.95,1.05,1.05))
>>> combined.likelihood_polygons([big, small])
[(0, 0.503), (1, 0.387)]'''
scores = [(i, self.likelihood_polygon(p))
for (i,p) in enumerate(polygons)]
return [(i, s) for (i,s) in scores if s >= threshold]
def dump_geoimage(self, basename, width_px):
# FIXME: This method is a mess and needs to be cleaned & split into
# several other methods.
#
# The GDAL documentation for Python is pretty poor, so this is cobbled
# together from a bunch of Googling. Notable sources:
#
# https://gist.github.com/205115
# http://www.gdal.org/gdal_tutorial.html
# http://trac.osgeo.org/gdal/wiki/PythonGotcha
# http://www.gdal.org/frmt_gtiff.html
# Find the bounds and image dimensions in this estimate's SRS, aiming
# for square pixels (which of course may not be square in other SRS).
def t(xy):
return srs.transform(geos.Point(xy, srid=srs.SRID_WGS84), self.srid)
xmin = t((base.GEOIMG_LONMIN, 0)).x
xmax = t((base.GEOIMG_LONMAX, 0)).x
ymin = t((0, base.GEOIMG_LATMIN)).y
ymax = t((0, base.GEOIMG_LATMAX)).y
height_px = int(width_px * (xmax - xmin) / (ymax - ymin))
# Evaluate the model across the world. (FIXME: This could be sped up
# with some smarter choices of bounds.) (FIXME: should we have
# endpoint=False?)
xs = np.linspace(xmin, xmax, num=width_px)
ys = np.linspace(ymin, ymax, num=height_px)
xys = np.dstack(np.meshgrid(xs, ys)).reshape((width_px * height_px, 2))
# FIXME: Token GMMs have a bad self.score, introduced by optimize.py;
# see issue #32. This works around the problem in unpickled objects that
# can't be fixed by simply updating the code; it patches the live
# objects to restore the method. It should be removed when no longer
# needed.
l.warning('workaround code for private issue #32 active')
import numpy
if (isinstance(self.score, numpy.float64)):
l.debug('workaround code for private issue #32 triggered')
from types import MethodType
self.score = MethodType(self.__class__.score, self, self.__class__)
probs = score_to_prob(self.score(xys))
probs = probs.reshape((height_px, width_px))
# FIXME: There is a bug in libgdal
# (http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=695060) which
# prevents it from correctly interpreting files that have distance units
# other than meters. Thus, if we are using one of our SRS with km or Mm
# units, use the following kludge to | |
close().
try:
message1 = ""
message2 = ""
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
reader._fd.xx_close()
reader._fd = "oops"
raise RuntimeError("xyzzy")
except Exception as ex:
message1 = str(ex)
message2 = str(ex.__cause__ or ex.__context__)
assert message1.find("object has no attribute") >= 0
assert message2 == "xyzzy"
def runConversions(filename, zgyReaderFactory):
"""
Verify that coordinate conversion between index, annot, and world works.
"""
with zgyReaderFactory(filename, iocontext = SDCredentials()) as demo:
#dump("", demo, True)
a = demo.indexToAnnot((3, 7))
i = demo.annotToIndex(a)
#print(a, i)
assert(a == (1249, 5692) and i == (3, 7))
w = demo.indexToWorld((0, 0))
i = demo.worldToIndex(w)
#print(w, i)
assert(w == (1000, 1000) and i == (0, 0))
w = demo.indexToWorld((1, 0))
i = demo.worldToIndex(w)
#print(w, i)
assert(w == (1025, 1000) and i == (1, 0))
w = demo.indexToWorld((0, 1))
i = demo.worldToIndex(w)
#print(w, i)
assert(w == (1000, 1030) and i == (0, 1))
w = demo.indexToWorld((3, 7))
i = demo.worldToIndex(w)
#print(w, i)
assert(w == (1000 + 3*25, 1000 + 7*30) and i == (3, 7))
w = demo.annotToWorld(a)
a = demo.worldToAnnot(w)
#print(w, a)
assert(w == (1000 + 3*25, 1000 + 7*30) and a == (1249, 5692))
def runErrorIfNotOpenForRead(filename, zgyReaderFactory):
size = (1, 1, 1)
tmp = np.zeros(size, dtype=np.float32)
pos = (0, 0, 0)
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
reader.close()
with MustThrow("ot open for read"):
reader.read(pos, tmp)
if zgyReaderFactory is not oldzgy.ZgyReader:
with MustThrow("ot open for read"):
reader.readconst(pos, size)
def runDumpToDevNull(filename, zgyReaderFactory):
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader, io.StringIO() as stream:
reader._meta.dumpRaw(file=stream)
# No test on the result, only see that it doesn't crash.
assert len(stream.getvalue()) > 0
def runClone(filename, templatename):
with newzgy.ZgyWriter(filename, iocontext = SDCredentials(), templatename=templatename) as writer:
checkmeta(writer, SampleDataType.int8, (-28,+227))
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
checkmeta(reader, SampleDataType.int8, (-28,+227))
def runUpdate(filename):
with newzgy.ZgyWriter(filename, iocontext = SDCredentials(), templatename=filename) as writer:
checkmeta(writer, SampleDataType.int8, (-28,+227))
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
checkmeta(reader, SampleDataType.int8, (-28,+227))
def runDumpMembers(filename, templatename):
with newzgy.ZgyWriter(filename, iocontext = SDCredentials(), templatename=templatename) as writer:
#dump("\nZgyWriter contents:", writer, verbose=False)
assert not hasMutableMembers(writer, safe=set(("meta",)), verbose=True)
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
#dump("\nZgyReader contents:", reader, verbose=True)
assert not hasMutableMembers(reader, safe=set(("meta",)), verbose=True)
# ----- Separately runnable tests, might need caller to clean up files. ----- #
def testRegisteredCompressors():
#print("Known compressors", ",".join(ZgyKnownCompressors()),
# "decompressors", ",".join(ZgyKnownDecompressors()))
assert "ZFP" in ZgyKnownCompressors()
assert "ZFP" in ZgyKnownDecompressors()
with MustThrow('"XYZZY" not recognized. Must be one of', ZgyMissingFeature):
lossy = ZgyCompressFactory("XYZZY", snr=30)
def testProgressWithDots():
with io.StringIO() as line:
p = ProgressWithDots(length=51, outfile=line)
assert line.getvalue() == ""
p(0, 1000)
assert line.getvalue() == "."
p(1, 1000)
assert line.getvalue() == "."
p(500, 1000)
assert line.getvalue() == "." * 26
p(999, 1000)
assert line.getvalue() == "." * 50
p(1000, 1000)
assert line.getvalue() == "." * 51 + "\n"
def testBadArgumentsOnCreate():
fname = "should-not-exist.zgy"
try:
os.remove(fname)
except FileNotFoundError:
pass
with MustThrow("size must be specified", newzgy.ZgyUserError):
with newzgy.ZgyWriter(fname):
pass
with MustThrow("size must be at least 1", newzgy.ZgyUserError):
with newzgy.ZgyWriter(fname, size=(10,0,20)):
pass
with MustThrow("bricksize must be specified in 3 dimensions", newzgy.ZgyUserError):
with newzgy.ZgyWriter(fname, size=(10,15,20), bricksize=(64,64)):
pass
with MustThrow("bricksize must be >= 4 and a power of 2", newzgy.ZgyUserError):
with newzgy.ZgyWriter(fname, size=(10,15,20), bricksize=(64,64,48)):
pass
with MustThrow("datarange must be specified for integral types", newzgy.ZgyUserError):
with newzgy.ZgyWriter(fname, size=(10,15,20), datatype=SampleDataType.int8):
pass
with MustThrow("datarange must have min < max", newzgy.ZgyUserError):
with newzgy.ZgyWriter(fname, size=(10,15,20), datatype=SampleDataType.int8, datarange=(3,2)):
pass
with MustThrow("datarange must have min < max", newzgy.ZgyUserError):
with newzgy.ZgyWriter(fname, size=(10,15,20), datatype=SampleDataType.int8, datarange=(3,3)):
pass
with MustThrow("datarange must be finite", newzgy.ZgyUserError):
with newzgy.ZgyWriter(fname, size=(10,15,20), datatype=SampleDataType.int8, datarange=(np.nan,np.nan)):
pass
# The consistency checks should be done before actually creating the file.
# Which means that the next call should fail.
with MustThrow(None, FileNotFoundError):
os.remove(fname)
def testBadArgumentsOnReadWrite(filename):
origin = (0, 0, 0)
expect = "Expected a 3d numpy array of np.float32 or np.float32"
with newzgy.ZgyWriter(filename, size=(10,15,20)) as w:
with MustThrow(expect): # no data
w.write(origin, None)
with MustThrow(expect): # not numpy data
w.write(origin, [[[1,1,1]]])
with MustThrow(expect): # wrong data type
w.write(origin, np.array([[[1,1,1]]], dtype=np.int8))
with MustThrow(expect): # wrong number of dimensions
w.write(origin, np.array([1,1,1], dtype=np.float32))
expect = "Expected a writeable 3d numpy array of np.float32 or np.float32"
with newzgy.ZgyReader(filename) as r:
with MustThrow(expect): # no data
r.read(origin, None)
with MustThrow(expect): # not numpy data
r.read(origin, [[[1,1,1]]])
with MustThrow(expect): # wrong data type
r.read(origin, np.array([[[1,1,1]]], dtype=np.int8))
with MustThrow(expect): # wrong number of dimensions
r.read(origin, np.array([1,1,1], dtype=np.float32))
with MustThrow(expect): # buffer not writeable
a = np.array([[[1,1,1]]], dtype=np.float32)
a.setflags(write=False)
r.read(origin, a)
def testAutoDelete():
# It is an error if the expected file is missing.
with MustThrow("", FileNotFoundError):
with LocalFileAutoDelete("xyzzy", silent=True) as fn:
pass
# As above, but if some other error occurred that will have precedence.
with MustThrow("", IndexError):
with LocalFileAutoDelete("xyzzy", silent=True) as fn:
foo = [][1]
# No attempt is made to remove, if we explicitly disarmed.
with LocalFileAutoDelete("xyzzy") as fn:
assert "/tmp-" in fn.name or "\\tmp-" in fn.name or fn.name[:4] == "tmp-"
fn.disarm()
# Actually try creating the file. Auto cleanup happens.
with LocalFileAutoDelete("xyzzy") as fn:
assert "/tmp-" in fn.name or "\\tmp-" in fn.name or fn.name[:4] == "tmp-"
myname = fn.name
with open(fn.name, "w"):
pass
assert os.path.exists(myname)
assert not os.path.exists(myname)
myname = [None, None]
with ExitStack() as cleanup:
fn1 = LocalFileAutoDelete("one")
myname[0] = fn1.name
cleanup.enter_context(fn1)
with open(fn1.name, "w"):
pass
fn2 = LocalFileAutoDelete("two")
myname[1] = fn2.name
cleanup.enter_context(fn2)
with open(fn2.name, "w"):
pass
assert os.path.exists(myname[0])
assert os.path.exists(myname[1])
assert not os.path.exists(myname[0])
assert not os.path.exists(myname[1])
myname = [None, None]
with MustThrow("", FileNotFoundError):
with ExitStack() as cleanup:
fn1 = LocalFileAutoDelete("one")
myname[0] = fn1.name
cleanup.enter_context(fn1)
with open(fn1.name, "w"):
pass
fn2 = LocalFileAutoDelete("two", silent=True)
myname[1] = fn2.name
cleanup.enter_context(fn2)
# I did not get around to creating the second file.
# This means the fn2 context will raise an exception.
# fn1 should still have been deleted though.
assert not os.path.exists(myname[0])
def testHistogramRangeIsCenterNotEdge(filename):
"""
When the histogram gets generated by the ZGY writer, the range gives
the center value of bin 0 and the center value of bin 255. NOT the
lowest value that maps to bin 0 and the highest value that maps to
bin 255. Which would arguably also make sense. Verify that behavior.
"""
with oldzgy.ZgyWriter(filename, iocontext = SDCredentials(),
size = (64, 64, 64),
datatype = SampleDataType.float,
datarange =(0, 255),
zstart = 0, zinc = 4,
annotstart = (1, 1), annotinc = (1, 1),
corners = ((1000, 1000), (1630, 1000),
(1000, 1630), (1630, 1630))
) as writer:
# With the 0..255 histogram range interpreted as the center of the
# first and last bin, we have the following:
# slot 0 is -0.5..+0.5, slot 2 is 1.5..2.5, slot 5 is 4.5..5.5
# If we instead had a 0..256 histogram range interpreted as the
# extreme eddes of the first and last bin, we have this:
# slot 0 is 0..1, slot 2 is 2..3, slot 5 is 5..6, slot 255: 255..256
# That would still be approximately correct at least for the first
# few bins when setting the histogram range to 0..255 instead of
# 0..256. So if the histogram algorithm choose to use the range
# as the extreme limits (which it is NOT supposed to do),
# 1.8 and 2.2 would end up in different slots. And 4.3 and 4.7
# would end up in the same slot. It should be the other way around.
#
writer.write((0, 0, 0), np.full((1, 10, 10), 1.8, dtype=np.float32))
writer.write((1, 0, 0), np.full((1, 1, 1), 2.2, dtype=np.float32))
writer.write((2, 0, 0), np.full((1, 10, 5), 4.3, dtype=np.float32))
writer.write((3, 0, 0), np.full((1, 1, 2), 4.7, dtype=np.float32))
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
#print(reader.histogram)
assert math.isclose(reader.histogram.min, 0.0)
assert math.isclose(reader.histogram.max, 255.0)
assert reader.histogram.bin[2] == 101
assert reader.histogram.bin[4] == 50
assert reader.histogram.bin[5] == 2
def testEmptyFile(filename, zgyWriterFactory = newzgy.ZgyWriter, zgyReaderFactory = newzgy.ZgyReader):
"""
Create a file without writing bulk data to it; make sure it is
well behaved both on write and on read back. Ideally test both
on-prem and cloud, and test all 9 combinations of ZGY, OpenZGY/C++,
and OpenZGY/Python readers and writers. With the current test
framework it gets a bit tricky to test the two OpenZGY/C++ vs.
OpenZGY/Python cases. Can I make a test that imports | |
# AMZ-Driverless
# Copyright (c) 2019 Authors:
# - <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import datetime
import importlib
import os
import shutil
import traceback
import zipfile
import rosbag
import yaml
from rbb_tools.common.logging import Logger
from rbb_tools.common.shell import Command
from rbb_tools.extraction.registry import ProductFactory
class AbstractExtractionPlugin(object):
def __init__(self, configuration, logger, resource_directory):
self._configuration = configuration
self._logger = logger
self._resource_directory = resource_directory
def check_topics(self, topics):
raise NotImplementedError
def extract(self, bag_file, topics, tmp_dir, output_dir, product_factory):
raise NotImplementedError
def get_default_configuration(self):
return {}
def config(self, key):
keys = key.split(".")
# Value is in the user supplied configuration?
node = self._configuration
if node:
user_supplied = True
for k in keys:
if k in node:
node = node[k]
else:
user_supplied = False
break
if user_supplied:
return node
# Value is in the default configuration?
node = self.get_default_configuration()
if node:
default_supplied = True
for k in keys:
if k in node:
node = node[k]
else:
default_supplied = False
break
if default_supplied:
return node
return None
def get_plugin_meta_data(self):
raise NotImplementedError
def get_plugin_name_version(self):
return "%s v%s" % (self.get_plugin_meta_data()['name'], self.get_plugin_meta_data()['version'])
class AbstractMatchingRule(object):
def __init__(self, configuration, logger):
pass
def match(self, topics_and_types):
raise NotImplementedError("Abstract")
def get_mappings(self):
raise NotImplementedError("Abstract")
def to_string(self):
return "AbstractMatchingRule"
class ExtractionProductGenerator:
def __init__(self, plugin, rule, topics):
self._plugin = plugin
self._rule = rule
self._topics = topics
def get_topic_mapping(self):
return self._topics
def get_rule(self):
return self._rule
def get_plugin(self):
return self._plugin
def generate(self, output_folder, temporary_folder):
pass
class ExtractionRule:
def __init__(self, name, matching_rules, plugin):
self._name = name
self._matching_rules = matching_rules
self._plugin = plugin
def get_name(self):
return self._name
def get_rules(self):
return self._matching_rules
def get_plugin(self):
return self._plugin
def get_product_generators(self, topics_and_types):
products_generators = []
for rule in self._matching_rules:
if rule.match(topics_and_types):
for mapping in rule.get_mappings():
products_generators.append(ExtractionProductGenerator(
self._plugin,
self._name,
mapping
))
return products_generators
class Extractor(object):
def __init__(self,
configuration_file,
bag,
temp_dir="",
output_dir="",
dry_run=False,
overwrite=False,
rules=[],
logger=Logger()):
if not os.path.exists(configuration_file):
raise RuntimeError("Configuration '%s' does not exist" % (configuration_file))
if not os.path.isfile(bag):
raise RuntimeError("Rosbag file '%s' does not exist" % (bag))
self._configuration = {}
self._extraction_rules = []
self._extraction_product_generators = []
self._products = []
self._configuration_file = configuration_file
self._resource_directory = ""
self._bag = os.path.abspath(bag)
self._only_rules = rules
self._topics_and_types = {}
self._topic_tuples = {}
self._msg_type_hashes = {}
self._bag_size = 0
self._bag_message_count = 0
self._bag_start = 0
self._bag_end = 0
self._bag_duration = 0
self._errors = []
if not temp_dir:
self._temp_dir = os.getcwd() + "/temp"
else:
self._temp_dir = temp_dir
if not output_dir:
self._output_dir = os.getcwd() + "/output_" + os.path.basename(bag)
else:
self._output_dir = output_dir
self._dry_run = dry_run
self._overwrite = overwrite
self._logger = logger # type: Logger
self._create_dirs()
self._load_configuration()
def _match(self):
generator_counter = 1
self._logger.info("Products that can be generated:")
for i in range(len(self._extraction_rules)):
rule = self._extraction_rules[i]
self._logger.info("- %s (%d/%d)" % (rule.get_name(), i+1, len(self._extraction_rules)))
product_generators = rule.get_product_generators(self._topics_and_types)
if len(product_generators) == 0:
self._logger.info(" None")
else:
for product_generator in product_generators:
self._logger.info(" %d. %s" % (generator_counter, str(product_generator.get_topic_mapping())))
generator_counter += 1
self._extraction_product_generators += product_generators
def _generate(self):
self._logger.info("Starting extraction of products...")
extraction_id = 0
for product_generator in self._extraction_product_generators:
extraction_id += 1
succeeded = True
plugin_object = product_generator.get_plugin()
product_directory = self._output_dir + "/" + str(extraction_id)
self._logger.info("- Extracting product(s) %d/%d (%s)" % (extraction_id,
len(self._extraction_product_generators),
plugin_object.get_plugin_name_version()))
# Ensure correct topics are matched
if succeeded:
if not plugin_object.check_topics(product_generator.get_topic_mapping()):
succeeded = False
self._logger.failure(" Invalid topic mapping")
if succeeded:
if os.path.exists(product_directory) and not self._overwrite:
succeeded = False
self._logger.failure(" Product directory already exists")
if self._dry_run:
self._logger.info(" SKIPPING (dry run)")
continue
# Generate product
if succeeded:
# Create output directory
if self._overwrite and os.path.exists(product_directory):
shutil.rmtree(product_directory)
if not os.path.exists(product_directory):
os.makedirs(product_directory)
self._logger.debug(" output: %s" % (product_directory))
try:
products = plugin_object.extract(
self._bag,
product_generator.get_topic_mapping(),
self._temp_dir,
product_directory,
ProductFactory(self._tag,
product_generator.get_rule(),
plugin_object.get_plugin_name_version(),
product_directory))
except Exception as e:
self._logger.failure(" EXCEPTION '%s'" % repr(e))
traceback.print_exc()
self._errors.append({
'type': 'exception',
'tag': self._tag,
'rule': product_generator.get_rule(),
'plugin': plugin_object.get_plugin_name_version(),
'message': traceback.format_exc()
})
products = None
if isinstance(products, list):
succeeded = True
self._products.extend(products)
self._logger.info("Generated products:")
for p in products:
self._logger.info(" - %s: %s" % (p.get_type(), p.get_title()))
else:
succeeded = False
if succeeded:
self._logger.info(" SUCCEEDED")
else:
self._logger.failure(" FAILED")
self._products.extend(self._get_error_message_product())
def _get_error_message_product(self):
pf = ProductFactory(self._tag, "error-messages", "ATS Errors V1.0", "")
p = pf.new()
p.set_type("error-messages")
p.set_title("Extraction Errors")
p.set_topics([])
p.set_data({'errors': self._errors})
return [p]
def write_manifest(self, path, server_data={}):
products = []
for p in self._products:
products.append(p.to_dict())
data = {
'server_info': {
'server_url': "",
'store_name': "",
'bag_name': "",
} if not 'server_url' in server_data else server_data,
'bag_info': {
'size': 0,
'start_time': 0,
'end_time': 0,
'duration': 0,
'messages': 0,
'topics': []
},
'products': products
}
self._fill_manifest_bag_info(data)
with open(path, 'w') as outfile:
yaml.safe_dump(data, outfile, default_flow_style=False)
def _fill_manifest_bag_info(self, data):
topics = []
for topic in self._topic_tuples:
topic_data = {
'name': topic,
'msg_type': self._topic_tuples[topic].msg_type,
'msg_count': self._topic_tuples[topic].message_count,
'avg_frequency': self._topic_tuples[topic].frequency,
'msg_type_hash': self._msg_type_hashes[self._topic_tuples[topic].msg_type],
'msg_definition': ""
}
topics.append(topic_data)
data['bag_info']['topics'] = topics
data['bag_info']['size'] = self._bag_size
data['bag_info']['start_time'] = datetime.datetime.fromtimestamp(self._bag_start)
data['bag_info']['end_time'] = datetime.datetime.fromtimestamp(self._bag_end)
data['bag_info']['duration'] = self._bag_duration
data['bag_info']['messages'] = self._bag_message_count
def _read_bag(self, reindex=True):
self._topics_and_types = {}
self._logger.debug("Topics in bag:")
try:
with rosbag.Bag(self._bag, 'r') as bag:
info = bag.get_type_and_topic_info()
for topic in info.topics:
self._logger.debug("- %s: %s" % (topic, info.topics[topic].msg_type))
self._topics_and_types[topic] = info.topics[topic].msg_type
self._topic_tuples[topic] = info.topics[topic]
for msg_type in info.msg_types:
self._msg_type_hashes[msg_type] = info.msg_types[msg_type]
self._bag_size = bag.size
self._bag_message_count = bag.get_message_count()
self._bag_start = bag.get_start_time()
self._bag_end = bag.get_end_time()
self._bag_duration = self._bag_end - self._bag_start
except rosbag.ROSBagUnindexedException:
self._logger.warning("Bag is UNINDEXED!")
if reindex:
self._logger.info("Attempting bag reindexing using rosbag reindex.")
reindex_command = Command("rosbag reindex '%s'" % self._bag, None, os.path.dirname(self._bag))
reindex_command.run()
if reindex_command.join() == 0:
self._logger.info("Bag reindexed!")
self._read_bag(reindex=False)
else:
self._logger.failure("Reindex command failed!")
def run(self, auto_write_manifest=True):
self._logger.info("Starting extraction '%s' (%s)..." % (self._title, self._tag))
self._logger.info("Reading bag metadata...")
self._read_bag()
self._match()
self._generate()
if auto_write_manifest:
self.write_manifest(self._output_dir + "/manifest.yaml")
def cleanup(self):
if os.path.exists(self._temp_dir):
shutil.rmtree(self._temp_dir)
def _create_dirs(self):
if not os.path.exists(self._temp_dir):
os.makedirs(self._temp_dir)
if not os.path.exists(self._temp_dir):
raise RuntimeError("Cannot create temporary directory '%s'" % (self._temp_dir))
if not os.path.exists(self._output_dir):
os.makedirs(self._output_dir)
if not os.path.exists(self._output_dir):
raise RuntimeError("Cannot create output directory '%s'" % (self._output_dir))
def _load_configuration(self):
filename, file_extension = os.path.splitext(self._configuration_file)
self._logger.debug("Loading configuration %s" % self._configuration_file)
if file_extension == '.zip':
self._resource_directory = self._temp_dir + "/config"
self._logger.debug("Unpacking config archive in: %s" % self._resource_directory)
if os.path.exists(self._resource_directory):
shutil.rmtree(self._resource_directory)
with zipfile.ZipFile(self._configuration_file, 'r') as config_zip:
config_zip.extractall(self._resource_directory)
self._configuration_file = self._resource_directory + "/config.yaml"
if not os.path.isfile(self._configuration_file):
raise RuntimeError("There is no 'config.yaml' in the provided config archive")
else:
self._resource_directory = os.path.dirname(os.path.realpath(self._configuration_file))
self._logger.debug("Resource directory: %s" % self._resource_directory)
with open(self._configuration_file, 'r') as stream:
try:
self._configuration = yaml.safe_load(stream)
except yaml.YAMLError as exc:
raise RuntimeError("Invalid YAML in configuration file: %s" % str(exc))
if not self._configuration:
raise RuntimeError("Error loading configuration or empty configuration file")
self._title = self._configuration['title']
self._tag = self._configuration['tag']
self._description = self._configuration['description']
self._logger.info("Extraction rules:")
self._load_rules_from_configuration()
def _load_rules_from_configuration(self):
for rule_name in self._configuration['rules']:
# Only load the specified rules
if len(self._only_rules) > 0 and not rule_name in self._only_rules:
continue
rule_config = self._configuration['rules'][rule_name]
# Is the plugin available
try:
plugin = importlib.import_module(rule_config['plugin'])
except ImportError as e:
self._logger.debug("ImportError: %s" % str(e))
raise RuntimeError("Cannot find plugin '%s' used by rule '%s'" % (rule_config['plugin'], rule_name))
# Is the plugin correctly defined
try:
plugin_class = plugin.plugin
if not issubclass(plugin_class, AbstractExtractionPlugin):
raise RuntimeError("Plugin '%s' is not a subclass of AbstractExtractionPlugin" % rule_config['plugin'])
except NameError:
raise RuntimeError("Plugin '%s' has no plugin class defined" % (rule_config['plugin']))
plugin_object = plugin.plugin(rule_config['config'], self._logger, self._resource_directory)
# Load the matchers
matching_rules = []
for matcher in rule_config['topic_matchers']:
try:
module = str.join(".", matcher['type'].split(".")[:-1])
class_name = matcher['type'].split(".")[-1]
matcher_module = importlib.import_module(module)
matcher_class = getattr(matcher_module, class_name)
if not issubclass(matcher_class, AbstractMatchingRule):
raise RuntimeError(
"Matcher '%s' is not a subclass of AbstractMatchingRule" % matcher['type'])
except ImportError | |
<gh_stars>0
#!/usr/bin/env python2.7
# Copyright 2017 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This tool takes in multiple manifest files:
* system image and archive manifest files from each package
* Zircon's bootfs.manifest, optionally using a subset selected by the
"group" syntax (e.g. could specify just "core", or "core,misc" or
"core,misc,test").
* "auxiliary" manifests
** one from the toolchain for the target libraries (libc++ et al)
** one from the Zircon/*-ulib build, which has the Zircon ASan libraries
** the unselected parts of the "main" manifests (i.e. Zircon)
It emits final /boot and /system manifests used to make the actual images,
final archive manifests used to make each package, and the build ID map.
The "auxiliary" manifests just supply a pool of files that might be used to
satisfy dependencies; their files are not included in the output a priori.
The tool examines each file in its main input manifests. If it's not an
ELF file, it just goes into the appropriate output manifest. If it's an
ELF file, then the tool figures out what "variant" it is (if any), such as
"asan" and what other ELF files it requires via PT_INTERP and DT_NEEDED.
It then finds those dependencies and includes them in the output manifest,
and iterates on their dependencies. Each dependency is found either in the
*-shared/ toolchain $root_out_dir for the same variant toolchain that built
the root file, or among the files in auxiliary manifests (i.e. toolchain
and Zircon libraries). For things built in the asan variant, it finds the
asan versions of the toolchain/Zircon libraries.
"""
from collections import namedtuple
import argparse
import elfinfo
import fnmatch
import itertools
import manifest
import os
import sys
import variant
binary_info = variant.binary_info
# An entry for a binary is (manifest.manifest_entry, elfinfo.elf_info).
binary_entry = namedtuple('binary_entry', ['entry', 'info'])
# In recursions of CollectBinaries.AddBinary, this is the type of the
# context argument.
binary_context = namedtuple(
'binary_context', [
'variant',
'soname_map',
'root_dependent',
])
# Each --output argument yields an output_manifest tuple.
output_manifest = namedtuple('output_manifest', ['file', 'manifest'])
# Each --binary argument yields a input_binary tuple.
input_binary = namedtuple('input_binary', ['target_pattern', 'output_group'])
# Collect all the binaries from auxiliary manifests into
# a dictionary mapping entry.target to binary_entry.
def collect_auxiliaries(manifest, examined):
aux_binaries = {}
for entry in manifest:
# TODO(bwb): Temporary workaround for ZN/GN migration. Remove when all drivers are outside of ZN
if "libdriver.so" in entry.source and ".zircon" not in entry.source:
continue
examined.add(entry.source)
info = binary_info(entry.source)
if info:
new_binary = binary_entry(entry, info)
binary = aux_binaries.setdefault(entry.target, new_binary)
if binary.entry.source != new_binary.entry.source:
raise Exception(
"'%s' in both %r and %r" %
(entry.target, binary.entry, entry))
return aux_binaries
# Return an iterable of binary_entry for all the binaries in `manifest` and
# `input_binaries` and their dependencies from `aux_binaries`, and an
# iterable of manifest_entry for all the other files in `manifest`.
def collect_binaries(manifest, input_binaries, aux_binaries, examined):
# As we go, we'll collect the actual binaries for the output
# in this dictionary mapping entry.target to binary_entry.
unexamined_binaries = {}
binaries = {}
# We'll collect entries in the manifest that aren't binaries here.
nonbinaries = []
# This maps GN toolchain (from variant.shared_toolchain) to a
# dictionary mapping DT_SONAME string to binary_entry.
soname_map_by_toolchain = {}
def rewrite_binary_group(old_binary, group_override):
return binary_entry(
old_binary.entry._replace(group=group_override), old_binary.info)
def add_binary(binary, context=None, auxiliary=False):
# TODO(bwb): Temporary workaround for ZN/GN migration. Remove when all drivers are outside of ZN
if "libdriver.so" in binary.entry.source and ".zircon" not in binary.entry.source:
return
# Add a binary by target name.
def add_auxiliary(target, required, group_override=None):
if group_override is None:
group_override = binary.entry.group
aux_context = context
else:
aux_context = None
# Look for the target in auxiliary manifests.
aux_binary = aux_binaries.get(target)
if required:
assert aux_binary, (
"'%s' not in auxiliary manifests, needed by %r via %r" %
(target, binary.entry, context.root_dependent))
if aux_binary:
add_binary(
rewrite_binary_group(aux_binary, group_override),
aux_context, True)
return True
return False
existing_binary = binaries.get(binary.entry.target)
if existing_binary is not None:
if existing_binary.entry.source != binary.entry.source:
raise Exception(
"%r in both %r and %r" %
(binary.entry.target, existing_binary, binary))
# If the old record was in a later group, we still need to
# process all the dependencies again to promote them to
# the new group too.
if existing_binary.entry.group <= binary.entry.group:
return
examined.add(binary.entry.source)
# If we're not part of a recursion, discover the binary's context.
if context is None:
binary_variant, variant_file = variant.find_variant(
binary.info, binary.entry.target)
if variant_file is not None:
# This is a variant that was actually built in a different
# place than its original name says. Rewrite everything to
# refer to the "real" name.
binary = binary_entry(
binary.entry._replace(source=variant_file),
binary.info.rename(variant_file))
examined.add(variant_file)
context = binary_context(
binary_variant,
soname_map_by_toolchain.setdefault(
binary_variant.shared_toolchain, {}), binary)
binaries[binary.entry.target] = binary
assert binary.entry.group is not None, binary
if binary.info.soname:
# This binary has a SONAME, so record it in the map.
soname_binary = context.soname_map.setdefault(
binary.info.soname, binary)
if soname_binary.entry.source != binary.entry.source:
raise Exception(
"SONAME '%s' in both %r and %r" %
(binary.info.soname, soname_binary, binary))
if binary.entry.group < soname_binary.entry.group:
# Update the record to the earliest group.
context.soname_map[binary.info.soname] = binary
# The PT_INTERP is implicitly required from an auxiliary manifest.
if binary.info.interp:
add_auxiliary('lib/' + binary.info.interp, True)
# The variant might require other auxiliary binaries too.
for variant_aux, variant_aux_group in context.variant.aux:
add_auxiliary(variant_aux, True, variant_aux_group)
# Handle the DT_NEEDED list.
for soname in binary.info.needed:
# The vDSO is not actually a file.
if soname == 'libzircon.so':
continue
lib = context.soname_map.get(soname)
if lib and lib.entry.group <= binary.entry.group:
# Already handled this one in the same or earlier group.
continue
# The DT_SONAME is libc.so, but the file is ld.so.1 on disk.
if soname == 'libc.so':
soname = 'ld.so.1'
# Translate the SONAME to a target file name.
target = context.variant.soname_target(soname)
if add_auxiliary(target, auxiliary):
# We found it in an existing manifest.
continue
# An auxiliary's dependencies must all be auxiliaries too.
assert not auxiliary, (
"missing '%s' needed by auxiliary %r via %r" %
(target, binary, context.root_dependent))
# Check if it's elsewhere in the input set.
lib = unexamined_binaries.get(target)
if lib is None:
# It must be in the shared_toolchain output directory.
# Context like group is inherited from the dependent.
lib_entry = binary.entry._replace(
source=os.path.join(
context.variant.shared_toolchain, soname),
target=target)
assert os.path.exists(lib_entry.source), (
"missing %r needed by %r via %r" %
(lib_entry, binary, context.root_dependent))
# Read its ELF info and sanity-check.
lib = binary_entry(lib_entry, binary_info(lib_entry.source))
assert lib.info and lib.info.soname == soname, (
"SONAME '%s' expected in %r, needed by %r via %r" %
(soname, lib, binary, context.root_dependent))
# Recurse.
add_binary(lib, context)
for entry in manifest:
info = None
# Don't inspect data or firmware resources in the manifest. Regardless
# of the bits in these files, we treat them as opaque data.
try:
if not entry.target.startswith(
'data/') and not entry.target.startswith('lib/firmware/'):
info = binary_info(entry.source)
except IOError as e:
raise Exception('%s from %s' % (e, entry))
if info:
if (entry.target not in unexamined_binaries or entry.group <
unexamined_binaries[entry.target].entry.group):
unexamined_binaries[entry.target] = binary_entry(entry, info)
else:
nonbinaries.append(entry)
for binary in unexamined_binaries.itervalues():
add_binary(binary)
for target in unexamined_binaries.iterkeys():
assert target in binaries
matched_binaries = set()
for input_binary in input_binaries:
matches = fnmatch.filter(
aux_binaries.iterkeys(), input_binary.target_pattern)
assert matches, (
"--input-binary='%s' did not match any binaries" %
input_binary.target_pattern)
for target in matches:
assert target not in matched_binaries, (
"'%s' matched by multiple --input-binary patterns" % target)
matched_binaries.add(target)
add_binary(
rewrite_binary_group(
aux_binaries[target], input_binary.output_group),
auxiliary=True)
return binaries.itervalues(), nonbinaries
# Take an iterable of binary_entry, and return list of binary_entry (all
# stripped files), a list of binary_info (all debug files), and a boolean
# saying whether any new stripped output files were written in the process.
def strip_binary_manifest(
manifest, stripped_dir, build_id_dir, clang_lib_dir, examined):
new_output = False
def find_debug_file(filename):
# In the Zircon makefile build, the file to be installed is called
# foo.strip and the unstripped file is called foo. In the new Zircon
# GN build, the file to be installed is called foo and the unstripped
# file is called foo.debug. In the Fuchsia GN build, the file to be
# installed is called foo and the unstripped file has the same name in
# the exe.unstripped or lib.unstripped subdirectory.
if | |
def test_rm_doesnt_break_cache(self):
"""
Test building, removing then rebuilding a package. The package
should be correctly rebuilt.
"""
mydir = os.path.dirname(__file__)
build_path = os.path.join(mydir, './build_simple.yml')
command.build('foo/bar', build_path)
command.rm('foo/bar', force=True)
teststore = store.PackageStore(self._store_dir)
assert not os.path.isdir(teststore.package_path(None, 'foo', 'bar'))
mydir = os.path.dirname(__file__)
build_path = os.path.join(mydir, './build_simple.yml')
command.build('foo/bar', build_path)
from quilt.data.foo import bar
assert isinstance(bar.foo(), pd.DataFrame)
def test_export_nonexistent(self):
# Ensure export raises correct error when user doesn't exist
with pytest.raises(command.CommandException, match="Package .* not found"):
command.export("export_nonexistent_user/package")
# Ensure export raises correct error when user does exist
command.build_package_from_contents(None, 'existent_user', 'testpackage', [], '', {'contents': {}})
from quilt.data.existent_user import testpackage
with pytest.raises(command.CommandException, match="Package .* not found"):
command.export("existent_user/nonexistent_package")
def test_export_dir_file_conflict(self):
# This tests how export handles a conflict between a filename and a dirname,
# for example, "foo/bar" and the file "foo". This may not seem very probable,
# but it's a condition that can definitely be created by re-rooting absolute
# paths into the export dir -- for example, '/foo/bar' and 'foo' would create
# this kind of conflict.
Path = pathlib.Path
mydir = Path(__file__).parent
tempdir = Path() / 'test_export_dir_file_conflict'
pkg_name = "testing/dirfileconflict"
data = os.urandom(200)
# prep
tempdir.mkdir()
command.build(pkg_name, str(mydir / 'build_empty.yml'))
# make FileNode with filename 'foo'
node = command.load(pkg_name)
conflict = (tempdir / 'foo')
conflict.write_bytes(data)
node._set(['foofile'], str(conflict))
command.build(pkg_name, node)
conflict.unlink()
# make FileNode with filepath 'foo/baz'
node = command.load(pkg_name)
conflict.mkdir()
conflict_file = (conflict / 'baz')
conflict_file.write_bytes(data)
node._set(['foodir', 'baz'], str(conflict_file))
command.build(pkg_name, node)
conflict_file.unlink()
conflict.rmdir()
# export conflicted files
with pytest.raises(command.CommandException,
match="Invalid Export: Filename\(s\) conflict with folder name\(s\):"):
command.export(pkg_name, str(tempdir))
def test_export_absolute(self):
Path = pathlib.Path
mydir = Path(__file__).parent
tempdir = Path("test_export_absolute")
pkg_name = "testing/export_absolute"
data = os.urandom(200)
# prep
tempdir.mkdir()
command.build(pkg_name, str(mydir / 'build_empty.yml'))
# make FileNode with absolute path
node = command.load(pkg_name)
abs_file = tempdir.absolute() / "abs_file"
abs_file.write_bytes(data)
# Setting absolute path for a node is not allowed.
with pytest.raises(ValueError, match='Invalid path:'):
node._set(['abs_file'], str(abs_file))
# Set relative path and build dir instead
node._set(['abs_file'], 'abs_file', str(tempdir))
# Circumvent absolute path checks by writing value directly
node.abs_file._meta['_system']['filepath'] = str(abs_file)
# build
command.build(pkg_name, node)
# export
export_dir = tempdir / 'export'
# result should be the full absolute path of abs_file, but relative, and exported to export_dir
expected_file = export_dir.joinpath(*abs_file.parts[1:])
command.export(pkg_name, export_dir)
assert expected_file.exists()
assert expected_file.read_bytes() == data
def test_export(self):
# pathlib will translate paths to windows or posix paths when needed
Path = pathlib.Path
pkg_name = 'testing/foo'
subpkg_name = 'subdir'
single_name = 'single_file'
single_bytes = os.urandom(200)
subdir_test_data = [
(subpkg_name + '/subdir_example', os.urandom(300)),
(subpkg_name + '/9bad-identifier.html', os.urandom(100)),
]
test_data = [
(single_name, single_bytes),
('readme.md', os.urandom(200)),
# these are invalid python identifiers, but should be handled without issue
('3-bad-identifier/bad_parent_identifier.html', os.urandom(100)),
('3-bad-identifier/9{}bad-identifier.html', os.urandom(100)),
] + subdir_test_data
digest = lambda data: hashlib.sha256(data).hexdigest()
temp_dir = Path() / "temp_test_command_export"
install_dir = temp_dir / 'install'
# Create and and install build
for path, data in test_data:
path = install_dir / path
path.parent.mkdir(parents=True, exist_ok=True)
path.write_bytes(data)
command.build(pkg_name, str(install_dir))
## Test export
test_dir = temp_dir / 'test_export'
command.export(pkg_name, str(test_dir))
exported_paths = [path for path in test_dir.glob('**/*') if path.is_file()]
assert len(exported_paths) == len(test_data)
for path, data in test_data:
export_path = test_dir / path
install_path = install_dir / path
# filename matches
assert export_path in exported_paths
# data matches
assert digest(export_path.read_bytes()) == digest(install_path.read_bytes())
## Test export with force=True
# We just exported and checked that the files exist,
# so it's a good spot to check the force option.
command.export(pkg_name, str(test_dir), force=True)
exported_paths = [path for path in test_dir.glob('**/*') if path.is_file()]
assert len(exported_paths) == len(test_data)
for path, data in test_data:
export_path = test_dir / path
install_path = install_dir / path
# filename matches
assert export_path in exported_paths
# data matches
assert digest(export_path.read_bytes()) == digest(install_path.read_bytes())
## Test raise when exporting to overwrite existing files
files = set(f for f in test_dir.glob('*') if f.is_file())
# sorted and reversed means files before their containing dirs
for path in sorted(test_dir.glob('**/*'), reverse=True):
# keep files from root of test_dir
if path in files:
continue
# remove everything else
path.rmdir() if path.is_dir() else path.unlink()
# now there are only files in the export root to conflict with.
with pytest.raises(command.CommandException, match='file already exists'):
command.export(pkg_name, str(test_dir))
## Test raise when exporting to existing dir structure
command.export(pkg_name, str(test_dir), force=True)
for path in test_dir.glob('**/*'):
# leave dirs, remove files
if path.is_dir():
continue
path.unlink()
with pytest.raises(command.CommandException, match='subdir already exists'):
command.export(pkg_name, str(test_dir))
## Test exporting to an unwriteable location
# disabled on windows, for now
# TODO: Windows version of permission failure test
if os.name != 'nt':
test_dir = temp_dir / 'test_write_permissions_fail'
test_dir.mkdir()
try:
orig_mode = test_dir.stat().st_mode
test_dir.chmod(0o111)
with pytest.raises(command.CommandException, match='not writable'):
command.export(pkg_name, str(test_dir))
finally:
if 'orig_mode' in locals(): # may not exist if mode-set failed
# noinspection PyUnboundLocalVariable
test_dir.chmod(orig_mode)
## Test subpackage exports
test_dir = temp_dir / 'test_subpkg_export'
command.export(pkg_name + '/' + subpkg_name, str(test_dir))
exported_paths = [path for path in test_dir.glob('**/*') if path.is_file()]
assert len(exported_paths) == len(subdir_test_data)
for path, data in subdir_test_data:
export_path = test_dir / path
install_path = install_dir / path
# filename matches
assert export_path in exported_paths
# data matches
assert digest(export_path.read_bytes()) == digest(install_path.read_bytes())
## Test single-file exports
test_dir = temp_dir / 'test_single_file_export'
pkg_name_single = pkg_name + '/' + single_name
single_filepath = test_dir / single_name
command.export(pkg_name_single, str(test_dir))
assert single_filepath.exists()
assert digest(single_bytes) == digest(single_filepath.read_bytes())
def test_export_roundtrip(self):
Path = pathlib.Path
temp_dir = Path() / 'test_export_roundtrip'
export_1_path = temp_dir / 'export_1'
export_2_path = temp_dir / 'export_2'
export_1_name = 'export_test/export_1'
export_2_name = 'export_test/export_2'
temp_dir.mkdir()
expected_exports = sorted([
Path('data/README.md'),
Path('data/100Rows13Cols.csv'),
Path('data/100Rows13Cols_tsv.csv'), # originally tsv
Path('data/100Rows13Cols_xlsx.csv'), # originally xlsx
])
# Build, load, and export from build file
command.build(export_1_name, str(Path(__file__).parent / 'build_export.yml'))
export_1_node = command.load(export_1_name)
command.export(export_1_name, export_1_path)
e1_paths = sorted(path.relative_to(export_1_path) for path in export_1_path.glob('**/*'))
assert e1_paths.pop(0) == Path('data')
assert e1_paths == expected_exports
# Build, load, and export from export dir
command.build(export_2_name, str(export_1_path))
export_2_node = command.load(export_2_name)
command.export(export_2_name, export_2_path)
e2_paths = sorted(path.relative_to(export_2_path) for path in export_2_path.glob('**/*'))
assert e2_paths.pop(0) == Path('data')
assert e2_paths == expected_exports
## Compare dir contents to expectation:
# byte-for-byte comparison for binary-consistent files
path = pathlib.Path('data/README.md')
assert (export_1_path / path).read_bytes() == (export_2_path / path).read_bytes()
# dataframe comparison for columnar data with floats
# pandas may import or export these imperfectly, so we just want (at least floats) to be close.
export_pairs = (
(export_1_node.data.csv, export_2_node.data.n100Rows13Cols),
(export_1_node.data.excel, export_2_node.data.n100Rows13Cols_xlsx),
(export_1_node.data.tsv, export_2_node.data.n100Rows13Cols_tsv),
)
for node1, node2 in export_pairs:
d1, d2 = node1(), node2()
assert all(d1.columns == d2.columns)
for column_name in d1.columns:
if column_name.startswith('Double'):
for n in range(len(d1[column_name])):
assert is_close(d1[column_name][n], d2[column_name][n])
else:
for n in range(len(d1[column_name])):
assert d1[column_name][n] == d2[column_name][n]
@quilt_dev_mode
def test_export_symlinks(self):
Path = pathlib.Path
temp_dir = (Path() / 'test_export_symlinks').absolute()
links_path = temp_dir / 'with_links'
nolinks_path = temp_dir / 'without_links'
datadir = Path(__file__).parent
islink, notlink = True, False
temp_dir.mkdir()
command.build('test_export_symlinks/data', str(datadir / 'build_export_symlinks.yml'))
expected_exports = [
(Path('data/README.md'), islink),
(Path('data/100Rows13Cols.xlsx'), islink),
(Path('data/100Rows13Cols_xlsx.csv'), notlink),
(Path('data/subdir/foo.txt'), islink),
(Path('data/subdir/csv_txt.csv'), notlink),
]
command.export('test_export_symlinks/data', str(nolinks_path))
found_paths = [path.relative_to(nolinks_path) for path in nolinks_path.glob('**/*')
if not path.is_dir()]
assert len(found_paths) == len(expected_exports)
for path, linkstate in expected_exports:
assert (nolinks_path / path).exists()
assert not (nolinks_path / path).is_symlink()
command.export('test_export_symlinks/data', str(links_path), symlinks=True)
found_paths = [path.relative_to(links_path) for path in links_path.glob('**/*')
if not path.is_dir()]
assert len(found_paths) == len(expected_exports)
for path, linkstate in expected_exports:
assert path in found_paths
path = links_path / path
assert path.exists()
assert path.is_symlink() == linkstate
def test_parse_package_names(self):
# good parse strings
expected = (None, 'user', 'package')
assert command.parse_package('user/package') == expected
expected = ('team', 'user', 'package')
assert command.parse_package('team:user/package') == expected
expected = (None, 'user', 'package', ['foo', 'bar'])
assert command.parse_package('user/package/foo/bar', True) == expected
expected = ('team', 'user', 'package', ['foo', 'bar'])
assert command.parse_package('team:user/package/foo/bar', True) == expected
expected = ('team', 'user', 'package', [])
assert command.parse_package('team:user/package', True) == expected
# bad parse strings
with pytest.raises(command.CommandException, message='subdir should be rejected'):
command.parse_package('user/package/subdir', allow_subpath=False)
with pytest.raises(command.CommandException, match="Invalid user name"):
command.parse_package('9user/package')
with pytest.raises(command.CommandException, match='Invalid package name'):
command.parse_package('user/!package')
with pytest.raises(command.CommandException, match='Invalid element in subpath'):
command.parse_package('user/package/&subdir', True)
with pytest.raises(command.CommandException, message='subdir should be rejected'):
command.parse_package('team:user/package/subdir', allow_subpath=False)
with pytest.raises(command.CommandException, match='Invalid team name'):
command.parse_package('team%:user/package/subdir', allow_subpath=True)
with pytest.raises(command.CommandException, match="Invalid user name"):
command.parse_package('team:9user/package')
with pytest.raises(command.CommandException, match='Invalid package name'):
command.parse_package('team:user/!package')
with pytest.raises(command.CommandException, match='Invalid element in subpath'):
command.parse_package('team:user/package/&subdir', True)
# XXX: in this case, should | |
# Copyright 2020 Accenture Global Solutions Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools as it
import re
import typing as tp
import pathlib
import logging
import google.protobuf.descriptor_pb2 as pb_desc
import google.protobuf.compiler.plugin_pb2 as pb_plugin
class LocationContext:
def __init__(self, src_locations: tp.List[pb_desc.SourceCodeInfo.Location],
src_loc_code: int, src_loc_index: int, indent: int):
self.src_locations = src_locations
self.src_loc_code = src_loc_code
self.src_loc_index = src_loc_index
self.indent = indent
def for_index(self, index: int) -> 'LocationContext':
return LocationContext(self.src_locations, self.src_loc_code, index, self.indent)
class TracGenerator:
_FieldType = pb_desc.FieldDescriptorProto.Type
PROTO_TYPE_MAPPING = dict({
_FieldType.TYPE_DOUBLE: float,
_FieldType.TYPE_FLOAT: float,
_FieldType.TYPE_INT64: int,
_FieldType.TYPE_UINT64: int,
_FieldType.TYPE_INT32: int,
_FieldType.TYPE_FIXED64: int,
_FieldType.TYPE_FIXED32: int,
_FieldType.TYPE_BOOL: bool,
_FieldType.TYPE_STRING: str,
# Group type is deprecated and not supported in proto3
# _FieldType.TYPE_GROUP
# Do not include a mapping for message type, it will be handle specially
# _FieldType.TYPE_MESSAGE
_FieldType.TYPE_BYTES: bytes, # TODO: Use bytearray?
_FieldType.TYPE_UINT32: int,
# Do not include a mapping for enum type, it will be handle specially
# _FieldType.TYPE_ENUM
_FieldType.TYPE_SFIXED32: int,
_FieldType.TYPE_SFIXED64: int,
_FieldType.TYPE_SINT32: int,
_FieldType.TYPE_SINT64: int
})
INDENT_TEMPLATE = " " * 4
PACKAGE_IMPORT_TEMPLATE = "from .{MODULE_NAME} import {SYMBOL}\n"
FILE_TEMPLATE = \
"""# Code generated by TRAC\n""" \
"""\n""" \
"""{IMPORT_STATEMENTS}\n""" \
"""\n""" \
"""{ENUMS_CODE}\n""" \
"""{MESSAGES_CODE}\n"""
MESSAGE_TEMPLATE = \
"""{INDENT}\n""" \
"""{INDENT}class {CLASS_NAME}:\n""" \
"""{NEXT_INDENT}\n""" \
"""{NEXT_INDENT}\"\"\"\n""" \
"""{DOC_COMMENT}\n""" \
"""{NEXT_INDENT}\"\"\"\n""" \
"""{NEXT_INDENT}\n""" \
"""{NESTED_ENUMS}""" \
"""{NESTED_MESSAGES}""" \
"""{INIT_METHOD}\n"""
INIT_METHOD_TEMPLATE = \
"""{INDENT}def __init__(self{INIT_PARAMS}):{PEP_FLAG}\n""" \
"""{NEXT_INDENT}\n""" \
"""{INIT_VARS}\n"""
INIT_PARAM_TEMPLATE = \
",{PEP_FLAG}\n{INDENT}{PARAM_NAME}: {PARAM_TYPE}"
INIT_VAR_TEMPLATE = \
"{INDENT}self.{IVAR_NAME} = {PARAM_NAME}\n" \
"{IVAR_COMMENT}"
INIT_PASS_TEMPLATE = \
"{INDENT}pass\n"
ENUM_TEMPLATE = \
"""{INDENT}\n""" \
"""{INDENT}class {CLASS_NAME}(enum.Enum):\n""" \
"""{NEXT_INDENT}\n""" \
"""{NEXT_INDENT}\"\"\"\n""" \
"""{DOC_COMMENT}\n""" \
"""{NEXT_INDENT}\"\"\"\n""" \
"""{INDENT}\n""" \
"""{ENUM_VALUES}\n"""
ENUM_VALUE_TEMPLATE = \
"""{INDENT}{ENUM_VALUE_NAME} = {ENUM_VALUE_NUMBER}, {QUOTED_COMMENT}\n"""
INLINE_COMMENT_SINGLE_LINE = \
'\n{INDENT}"""{COMMENT}"""\n' \
INLINE_COMMENT_MULTI_LINE = \
'\n{INDENT}"""\n' \
'{INDENT}{COMMENT}\n' \
'{INDENT}"""\n'
ENUM_COMMENT_SINGLE_LINE = \
'"""{COMMENT}"""'
ENUM_COMMENT_MULTI_LINE = \
'"""{COMMENT}\n' \
'{INDENT}"""'
def __init__(self):
logging.basicConfig(level=logging.DEBUG)
self._log = logging.getLogger(TracGenerator.__name__)
self._enum_type_field = self.get_field_number(pb_desc.FileDescriptorProto, "enum_type")
self._message_type_field = self.get_field_number(pb_desc.FileDescriptorProto, "message_type")
self._message_field_field = self.get_field_number(pb_desc.DescriptorProto, "field")
self._enum_value_field = self.get_field_number(pb_desc.EnumDescriptorProto, "value")
def generate_package(self, package: str, files: tp.List[pb_desc.FileDescriptorProto]) \
-> tp.List[pb_plugin.CodeGeneratorResponse.File]:
output_files = []
# Use the protobuf package as the Python package
package_path = pathlib.Path(*package.split("."))
package_imports = ""
for file_descriptor in files:
# Run the generator to produce code for the Python module
src_locations = file_descriptor.source_code_info.location
file_code = self.generate_file(src_locations, 0, file_descriptor)
# Find the module name inside the package - this is the stem of the .proto file
file_path = pathlib.PurePath(file_descriptor.name)
file_stem = file_path.stem
# Create a generator response for the module
file_response = pb_plugin.CodeGeneratorResponse.File()
file_response.content = file_code
# File name is formed from the python package and the module name (.proto file stem)
file_response.name = str(package_path.joinpath(file_stem + ".py"))
output_files.append(file_response)
# Generate import statements to include in the package-level __init__ file
package_imports += self.generate_package_imports(file_descriptor)
# Add an extra generator response file for the package-level __init__ file
package_init_file = pb_plugin.CodeGeneratorResponse.File()
package_init_file.name = str(package_path.joinpath("__init__.py"))
package_init_file.content = package_imports
output_files.append(package_init_file)
return output_files
def generate_package_imports(self, descriptor: pb_desc.FileDescriptorProto) -> str:
file_path = pathlib.Path(descriptor.name)
module_name = file_path.stem
imports = ""
if len(descriptor.enum_type) > 0 or len(descriptor.message_type) > 0:
imports += "\n"
for enum_type in descriptor.enum_type:
imports += self.PACKAGE_IMPORT_TEMPLATE.format(
MODULE_NAME=module_name,
SYMBOL=enum_type.name)
for message_type in descriptor.message_type:
imports += self.PACKAGE_IMPORT_TEMPLATE.format(
MODULE_NAME=module_name,
SYMBOL=message_type.name)
return imports
def generate_file(self, src_loc, indent: int, descriptor: pb_desc.FileDescriptorProto) -> str:
# print(descriptor.name)
# self._log.info(descriptor.name)
imports = []
imports.append("import typing as tp")
if len(descriptor.enum_type) > 0:
imports.append("import enum")
# Generate imports
for import_proto in descriptor.dependency:
if import_proto.startswith("trac/metadata/"):
import_module = import_proto \
.replace("trac/metadata/", "") \
.replace("/", ".") \
.replace(".proto", "")
imports.append("from .{} import *".format(import_module))
# Generate enums
enum_ctx = self.index_sub_ctx(src_loc, self._enum_type_field, indent)
enum_code = list(it.starmap(self.generate_enum, zip(enum_ctx, descriptor.enum_type)))
# Generate message classes
message_ctx = self.index_sub_ctx(src_loc, self._message_type_field, indent)
message_code = list(it.starmap(self.generate_message, zip(message_ctx, descriptor.message_type)))
# Populate the template
code = self.FILE_TEMPLATE \
.replace("{INDENT}", self.INDENT_TEMPLATE * indent) \
.replace("{IMPORT_STATEMENTS}", "\n".join(imports)) \
.replace("{ENUMS_CODE}", "\n\n".join(enum_code)) \
.replace("{MESSAGES_CODE}", "\n\n".join(message_code))
return code
def generate_message(self, ctx: LocationContext, descriptor: pb_desc.DescriptorProto) -> str:
# Generate comments
filtered_loc = self.filter_src_location(ctx.src_locations, ctx.src_loc_code, ctx.src_loc_index)
raw_comment = self.comment_for_current_location(filtered_loc)
formatted_comment = self.comment_block_translation(ctx, raw_comment)
# Generate nested enums
enum_ctx = self.index_sub_ctx(filtered_loc, self._enum_type_field, ctx.indent + 1)
enum_code = list(it.starmap(self.generate_enum, zip(enum_ctx, descriptor.enum_type)))
# Generate nested message classes
message_ctx = self.index_sub_ctx(filtered_loc, self._message_type_field, ctx.indent + 1)
message_code = list(it.starmap(self.generate_message, zip(message_ctx, descriptor.nested_type)))
# Generate init
init_ctx = LocationContext(filtered_loc, ctx.src_loc_code, ctx.src_loc_index, ctx.indent + 1)
init_method = self.generate_init_method(init_ctx, descriptor)
return self.MESSAGE_TEMPLATE \
.replace("{INDENT}", self.INDENT_TEMPLATE * ctx.indent) \
.replace("{NEXT_INDENT}", self.INDENT_TEMPLATE * (ctx.indent + 1)) \
.replace("{CLASS_NAME}", descriptor.name) \
.replace("{DOC_COMMENT}", formatted_comment) \
.replace("{NESTED_ENUMS}", "\n".join(enum_code)) \
.replace("{NESTED_MESSAGES}", "\n".join(message_code)) \
.replace("{INIT_METHOD}", init_method)
def generate_init_method(self, ctx: LocationContext, descriptor: pb_desc.DescriptorProto) -> str:
fields_ctx = self.index_sub_ctx(ctx.src_locations, self._message_field_field, ctx.indent + 2)
params_iter = it.starmap(self.generate_init_param, zip(fields_ctx, descriptor.field, it.repeat(descriptor)))
fields_ctx = self.index_sub_ctx(ctx.src_locations, self._message_field_field, ctx.indent + 1)
vars_iter = it.starmap(self.generate_init_var, zip(fields_ctx, descriptor.field))
vars_pass = self.INIT_PASS_TEMPLATE.replace("{INDENT}", self.INDENT_TEMPLATE * (ctx.indent + 1))
init_params = "".join(params_iter) if len(descriptor.field) > 0 else ""
init_vars = "\n".join(vars_iter) if len(descriptor.field) > 0 else vars_pass
# Do not apply the PEP flag if there are no parameters
pep_flag = " # noqa" if len(descriptor.field) > 0 else ""
return self.INIT_METHOD_TEMPLATE \
.replace("{INDENT}", self.INDENT_TEMPLATE * ctx.indent) \
.replace("{NEXT_INDENT}", self.INDENT_TEMPLATE * (ctx.indent + 1)) \
.replace("{INIT_PARAMS}", init_params) \
.replace("{INIT_VARS}", init_vars) \
.replace("{PEP_FLAG}", pep_flag)
def generate_init_param(self, ctx: LocationContext, descriptor: pb_desc.FieldDescriptorProto,
message: pb_desc.DescriptorProto):
field_index = ctx.src_loc_index
# Do not apply the PEP flag before the first parameter (i.e. against the 'self' parameter)
pep_flag = " # noqa" if field_index > 0 else ""
field_type = self.python_field_type(descriptor, message)
# Make all fields optional for now
field_type = "tp.Optional[" + field_type + "] = None"
# TODO: For dict and list types, use an empty container
# Since minimum Python version for TRAC will now be 3.7,
# We can change the generator to output dataclasses
# For the time being, this implementation allows work on the engine to proceed
return self.INIT_PARAM_TEMPLATE \
.replace("{INDENT}", self.INDENT_TEMPLATE * ctx.indent) \
.replace("{PEP_FLAG}", pep_flag) \
.replace("{PARAM_NAME}", descriptor.name) \
.replace("{PARAM_TYPE}", field_type)
def generate_init_var(self, ctx: LocationContext, descriptor: pb_desc.FieldDescriptorProto):
filtered_loc = self.filter_src_location(ctx.src_locations, ctx.src_loc_code, ctx.src_loc_index)
raw_comment = self.comment_for_current_location(filtered_loc)
formatted_comment = self.comment_inline_translation(ctx, raw_comment)
return self.INIT_VAR_TEMPLATE \
.replace("{INDENT}", self.INDENT_TEMPLATE * ctx.indent) \
.replace("{PARAM_NAME}", descriptor.name) \
.replace("{IVAR_NAME}", descriptor.name) \
.replace("{IVAR_COMMENT}", formatted_comment)
def python_field_type(self, descriptor: pb_desc.FieldDescriptorProto, message: pb_desc.DescriptorProto):
base_type = self.python_base_type(descriptor)
if descriptor.label == descriptor.Label.LABEL_REPEATED:
sub_type_pattern = re.compile("\'{}\\.(.*)\'".format(message.name))
sub_type_match = sub_type_pattern.match(base_type)
if sub_type_match:
sub_type = sub_type_match.group(1)
sub_descriptor = next(filter(lambda msg: msg.name == sub_type, message.nested_type))
key_type = self.python_base_type(sub_descriptor.field[0])
value_type = self.python_base_type(sub_descriptor.field[1])
return "tp.Dict[{}, {}]".format(key_type, value_type)
else:
return "tp.List[{}]".format(base_type)
else:
return base_type
def python_base_type(self, descriptor: pb_desc.FieldDescriptorProto):
# Messages (classes) and enums use the type name declared in the field
if descriptor.type == descriptor.Type.TYPE_MESSAGE or descriptor.type == descriptor.Type.TYPE_ENUM:
type_name = descriptor.type_name
relative_name = type_name.replace(".trac.metadata.", "", 1)
# Quote all object type names for now
# Types that are already declared or imported could be hinted without quotes
# This would require building a map of type names and tracking which ones are already declared
# Quoted names just work everywhere!
# There is no integrity check, but, protoc will already do this
return "'{}'".format(relative_name)
# For built in types, use a static mapping of proto type names
if descriptor.type in self.PROTO_TYPE_MAPPING:
return self.PROTO_TYPE_MAPPING[descriptor.type].__name__
# Any unrecognised type is an error
raise RuntimeError(
"Unknown type in protobuf field descriptor: field = {}, type code = {}"
.format(descriptor.name, descriptor.type))
def generate_enum(self, ctx: LocationContext, descriptor: pb_desc.EnumDescriptorProto) -> str:
filtered_loc = self.filter_src_location(ctx.src_locations, ctx.src_loc_code, ctx.src_loc_index)
# Generate enum values
values_ctx = self.index_sub_ctx(filtered_loc, self._enum_value_field, ctx.indent + 1)
values_code = list(it.starmap(self.generate_enum_value, zip(values_ctx, descriptor.value)))
raw_comment = self.comment_for_current_location(filtered_loc)
formatted_comment = self.comment_block_translation(ctx, raw_comment)
# Populate the template
code = self.ENUM_TEMPLATE \
.replace("{INDENT}", self.INDENT_TEMPLATE * ctx.indent) \
.replace("{NEXT_INDENT}", self.INDENT_TEMPLATE * (ctx.indent + 1)) \
.replace("{DOC_COMMENT}", formatted_comment) \
.replace("{CLASS_NAME}", descriptor.name) \
.replace("{ENUM_VALUES}", "\n".join(values_code))
return code
def generate_enum_value(self, ctx: LocationContext, descriptor: | |
test ' + test) #TODO
if test=='015811':log('WARNING: skip test ' + test) #TODO
if test=='015812':log('WARNING: skip test ' + test) #TODO
if test=='015813':log('WARNING: skip test ' + test) #TODO
if test=='015814':log('WARNING: skip test ' + test) #TODO
if test=='015815':log('WARNING: skip test ' + test) #TODO
if test=='015816':log('WARNING: skip test ' + test) #TODO
if test=='015817':log('WARNING: skip test ' + test) #TODO
if test=='015818':log('WARNING: skip test ' + test) #TODO
if test=='015819':log('WARNING: skip test ' + test) #TODO
if test=='015820':log('WARNING: skip test ' + test) #TODO
if test=='015821':log('WARNING: skip test ' + test) #TODO
if test=='015822':log('WARNING: skip test ' + test) #TODO
if test=='015823':log('WARNING: skip test ' + test) #TODO
if test=='016000':log('WARNING: skip test ' + test) #TODO
if test=='016001':log('WARNING: skip test ' + test) #TODO
if test=='016002':log('WARNING: skip test ' + test) #TODO
if test=='016003':log('WARNING: skip test ' + test) #TODO
if test=='016004':log('WARNING: skip test ' + test) #TODO
if test=='016005':log('WARNING: skip test ' + test) #TODO
if test=='016100':log('WARNING: skip test ' + test) #TODO
if test=='016101':log('WARNING: skip test ' + test) #TODO
if test=='016102':log('WARNING: skip test ' + test) #TODO
if test=='016103':log('WARNING: skip test ' + test) #TODO
if test=='016104':log('WARNING: skip test ' + test) #TODO
if test=='016105':log('WARNING: skip test ' + test) #TODO
if test=='016300':log('WARNING: skip test ' + test) #TODO
if test=='016301':log('WARNING: skip test ' + test) #TODO
if test=='016302':log('WARNING: skip test ' + test) #TODO
if test=='016303':log('WARNING: skip test ' + test) #TODO
if test=='016304':log('WARNING: skip test ' + test) #TODO
if test=='016305':log('WARNING: skip test ' + test) #TODO
if test=='016500':log('WARNING: skip test ' + test) #TODO
if test=='016501':log('WARNING: skip test ' + test) #TODO
if test=='016502':log('WARNING: skip test ' + test) #TODO
if test=='016503':log('WARNING: skip test ' + test) #TODO
if test=='016504':log('WARNING: skip test ' + test) #TODO
if test=='016505':log('WARNING: skip test ' + test) #TODO
if test=='017000':log('WARNING: skip test ' + test) #TODO
if test=='017001':log('WARNING: skip test ' + test) #TODO
if test=='017002':log('WARNING: skip test ' + test) #TODO
if test=='017003':log('WARNING: skip test ' + test) #TODO
if test=='017004':log('WARNING: skip test ' + test) #TODO
if test=='017005':log('WARNING: skip test ' + test) #TODO
if test=='017500':log('WARNING: skip test ' + test) #TODO
if test=='017501':log('WARNING: skip test ' + test) #TODO
if test=='017502':log('WARNING: skip test ' + test) #TODO
if test=='017503':log('WARNING: skip test ' + test) #TODO
if test=='017504':log('WARNING: skip test ' + test) #TODO
if test=='017505':log('WARNING: skip test ' + test) #TODO
if test=='017600':tests['remote b210']['alu']['fdd'][ '5']['bandrich']['udp']['ul']=True
if test=='017601':tests['remote b210']['alu']['fdd']['10']['bandrich']['udp']['ul']=True
if test=='017602':tests['remote b210']['alu']['fdd']['20']['bandrich']['udp']['ul']=True
if test=='017603':tests['remote b210']['alu']['fdd'][ '5']['bandrich']['udp']['dl']=True
if test=='017604':tests['remote b210']['alu']['fdd']['10']['bandrich']['udp']['dl']=True
if test=='017605':tests['remote b210']['alu']['fdd']['20']['bandrich']['udp']['dl']=True
if test=='017606':tests['remote b210']['alu']['fdd'][ '5']['bandrich']['tcp']['ul']=True
if test=='017607':tests['remote b210']['alu']['fdd']['10']['bandrich']['tcp']['ul']=True
if test=='017608':tests['remote b210']['alu']['fdd']['20']['bandrich']['tcp']['ul']=True
if test=='017609':tests['remote b210']['alu']['fdd'][ '5']['bandrich']['tcp']['dl']=True
if test=='017610':tests['remote b210']['alu']['fdd']['10']['bandrich']['tcp']['dl']=True
if test=='017611':tests['remote b210']['alu']['fdd']['20']['bandrich']['tcp']['dl']=True
if test=='018000':log('WARNING: skip test ' + test) #TODO
if test=='018001':log('WARNING: skip test ' + test) #TODO
if test=='018002':log('WARNING: skip test ' + test) #TODO
if test=='018003':log('WARNING: skip test ' + test) #TODO
if test=='018004':log('WARNING: skip test ' + test) #TODO
if test=='018005':log('WARNING: skip test ' + test) #TODO
if test=='018500':log('WARNING: skip test ' + test) #TODO
if test=='018501':log('WARNING: skip test ' + test) #TODO
if test=='018502':log('WARNING: skip test ' + test) #TODO
if test=='018503':log('WARNING: skip test ' + test) #TODO
if test=='018504':log('WARNING: skip test ' + test) #TODO
if test=='018505':log('WARNING: skip test ' + test) #TODO
if test=='018600':tests['b210']['alu']['tdd'][ '5']['3276']['udp']['ul']=True
if test=='018601':tests['b210']['alu']['tdd']['10']['3276']['udp']['ul']=True
if test=='018602':tests['b210']['alu']['tdd']['20']['3276']['udp']['ul']=True
if test=='018603':tests['b210']['alu']['tdd'][ '5']['3276']['udp']['dl']=True
if test=='018604':tests['b210']['alu']['tdd']['10']['3276']['udp']['dl']=True
if test=='018605':tests['b210']['alu']['tdd']['20']['3276']['udp']['dl']=True
if test=='018606':log('WARNING: skip test ' + test) #TODO
if test=='018607':log('WARNING: skip test ' + test) #TODO
if test=='018608':log('WARNING: skip test ' + test) #TODO
if test=='018609':log('WARNING: skip test ' + test) #TODO
if test=='018610':log('WARNING: skip test ' + test) #TODO
if test=='018611':log('WARNING: skip test ' + test) #TODO
if test=='018612':tests['b210']['alu']['tdd'][ '5']['3276']['tcp']['ul']=True
if test=='018613':tests['b210']['alu']['tdd']['10']['3276']['tcp']['ul']=True
if test=='018614':tests['b210']['alu']['tdd']['20']['3276']['tcp']['ul']=True
if test=='018615':tests['b210']['alu']['tdd'][ '5']['3276']['tcp']['dl']=True
if test=='018616':tests['b210']['alu']['tdd']['10']['3276']['tcp']['dl']=True
if test=='018617':tests['b210']['alu']['tdd']['20']['3276']['tcp']['dl']=True
if test=='018618':log('WARNING: skip test ' + test) #TODO
if test=='018619':log('WARNING: skip test ' + test) #TODO
if test=='018620':log('WARNING: skip test ' + test) #TODO
if test=='018621':log('WARNING: skip test ' + test) #TODO
if test=='018622':log('WARNING: skip test ' + test) #TODO
if test=='018623':log('WARNING: skip test ' + test) #TODO
if test=='025500':log('WARNING: skip test ' + test) #TODO
if test=='025501':log('WARNING: skip test ' + test) #TODO
if test=='025502':log('WARNING: skip test ' + test) #TODO
if test=='025503':log('WARNING: skip test ' + test) #TODO
if test=='025504':log('WARNING: skip test ' + test) #TODO
if test=='025505':log('WARNING: skip test ' + test) #TODO
if test=='025506':log('WARNING: skip test ' + test) #TODO
if test=='025507':log('WARNING: skip test ' + test) #TODO
if test=='025508':log('WARNING: skip test ' + test) #TODO
if test=='025509':log('WARNING: skip test ' + test) #TODO
if test=='025510':log('WARNING: skip test ' + test) #TODO
if test=='025511':log('WARNING: skip test ' + test) #TODO
if test=='025512':log('WARNING: skip test ' + test) #TODO
if test=='025513':log('WARNING: skip test ' + test) #TODO
if test=='025514':log('WARNING: skip test ' + test) #TODO
if test=='025515':log('WARNING: skip test ' + test) #TODO
if test=='025516':log('WARNING: skip test ' + test) #TODO
if test=='025517':log('WARNING: skip test ' + test) #TODO
if test=='025518':log('WARNING: skip test ' + test) #TODO
if test=='025519':log('WARNING: skip test ' + test) #TODO
if test=='025520':log('WARNING: skip test ' + test) #TODO
if test=='025521':log('WARNING: skip test ' + test) #TODO
if test=='025522':log('WARNING: skip test ' + test) #TODO
if test=='025523':log('WARNING: skip test ' + test) #TODO
if test=='025700':log('WARNING: skip test ' + test) #TODO
if test=='025701':log('WARNING: skip test ' + test) #TODO
if test=='025702':log('WARNING: skip test ' + test) #TODO
if test=='025703':log('WARNING: skip test ' + test) #TODO
if test=='025704':log('WARNING: skip test ' + test) #TODO
if test=='025705':log('WARNING: skip test ' + test) #TODO
from alu_test import run_b210_alu
#B210 ALU tests
run_b210_alu(tests, openair_dir, oai_user, oai_password, env)
#for test in todo_tests:
# action = test.findtext('class')
# if action != 'lte-softmodem':
# continue
# if not "start_ltebox" in test.findtext('EPC_main_exec'):
# continue
# id = test.get('id')
# log("INFO: Running ALU test " + id)
# logdir = openair_dir + "/cmake_targets/autotests/log/" + id
# quickshell("mkdir -p " + logdir)
# epc_machine = test.findtext('EPC')
# enb_machine = test.findtext('eNB')
# ue_machine = test.findtext('UE')
#
# #event object used to wait for several tasks at once
# event = threading.Event()
#
# #launch HSS, wait for prompt
# log("INFO: " + id + ": run HSS")
# task_hss = Task("actions/alu_hss.bash",
# "ALU HSS",
# epc_machine,
# oai_user,
# oai_password,
# env,
# logdir + "/alu_hss." + epc_machine, event=event)
# task_hss.waitlog('S6AS_SIM-> ')
#
# #then launch EPC, wait for connection on HSS side
# log("INFO: " + id + ": run EPC")
# task = Task("actions/alu_epc.bash",
# "ALU EPC",
# epc_machine,
# oai_user,
# oai_password,
# env,
# logdir + "/alu_epc." + epc_machine)
# ret = task.wait()
# if ret != 0:
# log("ERROR: EPC start failure");
# os._exit(1)
# task_hss.waitlog('Connected\n')
#
# #compile softmodem
# log("INFO: " + id + ": compile softmodem")
# envcomp = list(env)
# envcomp.append('BUILD_ARGUMENTS="' +
# test.findtext('eNB_compile_prog_args') + '"')
# #we don't care about BUILD_OUTPUT but it's required (TODO: change that)
# envcomp.append('BUILD_OUTPUT=/')
# task = Task("actions/compilation.bash",
# "compile softmodem",
# enb_machine,
# oai_user,
# oai_password,
# envcomp,
# logdir + "/compile_softmodem." + enb_machine)
# ret = task.wait()
# if ret != 0:
# log("ERROR: softmodem compilation failure");
# os._exit(1)
#
## #copy wanted configuration file
## quickshell("sshpass -p " + oai_password +
## " scp config/enb.band7.tm1.usrpb210.conf " +
## oai_user + "@" + enb_machine + ":/tmp/enb.conf")
#
# #run softmodem
# log("INFO: " + id + ": run softmodem")
# task_enb = Task("actions/run_enb.bash",
# "run softmodem",
# enb_machine,
# oai_user,
# oai_password,
# env,
# logdir + "/run_softmodem." + enb_machine, event=event)
# task_enb.waitlog('got sync')
#
# #start UE
# log("INFO: " + id + ": start bandrich UE")
# task_ue = Task("actions/start_bandrich.bash",
# "start bandrich UE",
# ue_machine,
# oai_user,
# oai_password,
# env,
# logdir + "/start_bandrich." + ue_machine, event=event)
# task_ue.waitlog("local IP address", event=event)
#
# event.wait()
#
# #at this point one task has died or we have the line in the log
# if task_ue.waitlog_state != WAITLOG_SUCCESS:
# log("ERROR: " + id + ": bandrich UE did not connect")
# os._exit(1)
#
# event.clear()
#
# if ( not task_enb.alive() or
# not task_hss.alive() or
# not task_ue.alive()):
# log("ERROR: " + id + ": eNB or UE tasks died")
# os._exit(1)
#
# #get bandrich UE IP
# l = open(task_ue.logfile, "r").read()
# ue_ip = re.search("local IP address (.*)\n", l).groups()[0]
# log("INFO: " + id + ": bandrich UE IP address: " + ue_ip)
#
# #run traffic
# log("INFO: " + id + ": run downlink TCP traffic")
#
# log("INFO: " + id + ": launch server")
# | |
from abc import ABC, abstractmethod
from typing import Callable, List, Union
import numpy as np
from torch import Tensor
from torch.nn import Module
from torch.optim import Optimizer
from torch.utils.data import DataLoader
# Supported types of loss and metrics values:
MetricValueType = Union[int, float, np.ndarray, Tensor]
# Supported types of metrics:
MetricFunctionType = Union[Callable[[Tensor, Tensor], MetricValueType], Module]
class Callback(ABC):
"""
Abstract class for a callback to MLRun's pytorch framework package. Each callback must implement this class so it
could be used in MLRun's PyTorch interface. If you wish to implement custom training / evaluation, you should
consider using the 'CallbacksHandler' class.
"""
class _ObjectKeys:
"""
Keys for the objects dictionary of the callback. Each callback can choose what to store in its objects
dictionary.
"""
MODEL = "model"
TRAINING_SET = "training_set"
VALIDATION_SET = "validation_set"
LOSS_FUNCTION = "loss_function"
OPTIMIZER = "optimizer"
METRIC_FUNCTIONS = "metric_functions"
SCHEDULER = "scheduler"
@abstractmethod
def __init__(self):
"""
Initialize the callback with an empty objects dictionary. The objects should be registered on setup.
"""
self._objects = {}
def on_horovod_check(self, rank: int) -> bool:
"""
Check whether this callback is fitting to run by the given horovod rank (worker).
:param rank: The horovod rank (worker) id.
:return: True if the callback is ok to run on this rank and false if not.
"""
pass
def on_setup(
self,
model: Module = None,
training_set: DataLoader = None,
validation_set: DataLoader = None,
loss_function: Module = None,
optimizer: Optimizer = None,
metric_functions: List[MetricFunctionType] = None,
scheduler=None,
):
"""
Basic setup command, storing all the given objects in the callback's objects dictionary.
:param model: The model to be stored in this callback.
:param training_set: The training set to be stored in this callback.
:param validation_set: The validation set to be stored in this callback.
:param loss_function: The loss function to be stored in this callback.
:param optimizer: The optimizer to be stored in this callback.
:param metric_functions: The metric functions to be stored in this callback.
:param scheduler: The scheduler to be stored in this callback.
"""
self._objects[self._ObjectKeys.MODEL] = model
self._objects[self._ObjectKeys.TRAINING_SET] = training_set
self._objects[self._ObjectKeys.VALIDATION_SET] = validation_set
self._objects[self._ObjectKeys.LOSS_FUNCTION] = loss_function
self._objects[self._ObjectKeys.OPTIMIZER] = optimizer
self._objects[self._ObjectKeys.METRIC_FUNCTIONS] = metric_functions
self._objects[self._ObjectKeys.SCHEDULER] = scheduler
def on_run_begin(self):
"""
After the run begins, this method will be called.
"""
pass
def on_run_end(self):
"""
Before the run ends, this method will be called.
"""
pass
def on_epoch_begin(self, epoch: int):
"""
After the epoch begins, this method will be called.
:param epoch: The epoch that is about to begin.
"""
pass
def on_epoch_end(self, epoch: int) -> bool:
"""
Before the epoch ends, this method will be called.
:param epoch: The epoch that has just ended.
:return Can optionally return a boolean value indicating whether or not to continue the training process.
"""
return True
def on_train_begin(self):
"""
After the training of the current epoch begins, this method will be called.
"""
pass
def on_train_end(self) -> bool:
"""
Before the training of the current epoch ends, this method will be called.
:return Can optionally return a boolean value indicating whether or not to continue the training process.
"""
return True
def on_validation_begin(self):
"""
After the validation (in a training case it will be per epoch) begins, this method will be called.
"""
pass
def on_validation_end(
self, loss_value: MetricValueType, metric_values: List[float]
) -> bool:
"""
Before the validation (in a training case it will be per epoch) ends, this method will be called.
:param loss_value: The loss summary of this validation.
:param metric_values: The metrics summaries of this validation.
:return Can optionally return a boolean value indicating whether or not to continue the training / evaluation
process.
"""
return True
def on_train_batch_begin(self, batch: int, x: Tensor, y_true: Tensor):
"""
After the training of the given batch begins, this method will be called.
:param batch: The current batch iteration of when this method is called.
:param x: The input of the current batch.
:param y_true: The ground truth value of the current batch.
"""
pass
def on_train_batch_end(
self, batch: int, x: Tensor, y_pred: Tensor, y_true: Tensor
) -> bool:
"""
Before the training of the given batch ends, this method will be called.
:param batch: The current batch iteration of when this method is called.
:param x: The input of the current batch.
:param y_pred: The prediction (output) of the model for this batch's input ('x').
:param y_true: The ground truth value of the current batch.
:return Can optionally return a boolean value indicating whether or not to continue the training process.
"""
return True
def on_validation_batch_begin(self, batch: int, x: Tensor, y_true: Tensor):
"""
After the validation of the given batch begins, this method will be called.
:param batch: The current batch iteration of when this method is called.
:param x: The input of the current batch.
:param y_true: The ground truth value of the current batch.
"""
pass
def on_validation_batch_end(
self, batch: int, x: Tensor, y_pred: Tensor, y_true: Tensor
) -> bool:
"""
Before the validation of the given batch ends, this method will be called.
:param batch: The current batch iteration of when this method is called.
:param x: The input of the current batch.
:param y_pred: The prediction (output) of the model for this batch's input ('x').
:param y_true: The ground truth value of the current batch.
:return Can optionally return a boolean value indicating whether or not to continue the training / evaluation
process.
"""
return True
def on_inference_begin(self, x: Tensor):
"""
Before the inference of the current batch sample into the model, this method will be called to process the
input.
:param x: The input of the current batch.
"""
pass
def on_inference_end(self, y_pred: Tensor, y_true: Tensor):
"""
After the inference of the current batch sample, this method will be called to process the output along side the
current batch ground truth.
:param y_pred: The prediction (output) of the model for this batch's input ('x').
:param y_true: The ground truth value of the current batch.
"""
pass
def on_train_loss_begin(self):
"""
Before the training calculation of the loss, this method will be called.
"""
pass
def on_train_loss_end(self, loss_value: MetricValueType):
"""
After the training calculation of the loss, this method will be called.
:param loss_value: The recent loss value calculated during training.
"""
pass
def on_validation_loss_begin(self):
"""
Before the validating calculation of the loss, this method will be called.
"""
pass
def on_validation_loss_end(self, loss_value: MetricValueType):
"""
After the validating calculation of the loss, this method will be called.
:param loss_value: The recent loss value calculated during validation.
"""
pass
def on_train_metrics_begin(self):
"""
Before the training calculation of the metrics, this method will be called.
"""
pass
def on_train_metrics_end(self, metric_values: List[MetricValueType]):
"""
After the training calculation of the metrics, this method will be called.
:param metric_values: The recent metric values calculated during training.
"""
pass
def on_validation_metrics_begin(self):
"""
Before the validating calculation of the metrics, this method will be called.
"""
pass
def on_validation_metrics_end(self, metric_values: List[MetricValueType]):
"""
After the validating calculation of the metrics, this method will be called.
:param metric_values: The recent metric values calculated during validation.
"""
pass
def on_backward_begin(self):
"""
Before the backward propagation of the loss function, this method will be called.
"""
pass
def on_backward_end(self):
"""
After the backward propagation of the loss function, this method will be called.
"""
pass
def on_optimizer_step_begin(self):
"""
Before the optimizer 'step' and 'zero_grad' methods are called, this method will be called.
"""
pass
def on_optimizer_step_end(self):
"""
After the optimizer 'step' and 'zero_grad' methods are called, this method will be called.
"""
pass
def on_scheduler_step_begin(self):
"""
Before the scheduler 'step' method is called, this method will be called.
"""
pass
def on_scheduler_step_end(self):
"""
After the scheduler 'step' method is called, this method will be called.
"""
pass
def on_call_check(self) -> bool:
"""
Before the callbacks handler is calling its callbacks, this method will be called to know if this callback
should run. For example, in case | |
from abc import ABCMeta, abstractmethod
from six import add_metaclass
from cogitare.core.model import Model
from cogitare.utils import not_training, training
@add_metaclass(ABCMeta)
class SequentialModel(Model):
"""
.. warning:: This module is experimental and its interface may change in future releases.
SequentialModel is an extension of :class:`~cogitare.Model` that includes support for sequential
models. It's designed to work with RNNs, such as LSTM and GRUs, and can be easily used for any
model that operates over timestep per timestep.
If you are using a RNN, but passing the whole sequence as input, you should consider using
the :class:`~cogitare.Model` interface. This interface is desined for timestep per timestep
and can be used for Many-to-Many models and for Many-to-One.
While training, you can use plugins to watch and interact with the model.
The plugin works like an event mechanism, you register a callback function to
a specific event, and then you gain access to some variables of the model at
specific steps of the training process.
Check the :meth:`~cogitare.Model.register_plugin` for more information about the
available events and variables that the model can interact with.
Methods that your model must implement:
- **forward** (data, hidden, timestep, seqlen): receives the data at
the current timestep, the hidden state, the current timestep, and the sequence size;
- **loss** (output, data, hidden, timestep, seqlen): returns the loss
at the current timestep;
- **get_initial_state** (self, batch): start the RNN hidden state.
Expected input on :meth:`~cogitare.Model.learn`:
- **dataset** : an iterator, that returns one batch of samples per
iteration. Each bach is an iterator, containing data for each timestep.
The batch can be of any type (list, numpy array, tensor, string, etcs).
It is recommended to wrap your dataset using
the :class:`~cogitare.data.SequentialDataSet` object,
that provides a high-performance data loading interface.
"""
def __init__(self):
self.valid_hooks = self.valid_hooks + ('on_start_timestep', 'on_end_timestep')
super(SequentialModel, self).__init__()
@abstractmethod
def get_initial_state(self, batch):
"""Returns the initial state of the RNN.
Args:
batch: the current batch.
Returns:
state (torch.Tensor): the initial state.
"""
pass
def forward_seq(self, sequence):
"""Forward a whole sequence in the model, and return a list of the output
at each timestep.
Args:
sequence (iterable): an iterable with each item being the data for
the current timestep.
Retuns:
output (iterable): a list with the :meth:`~cogitare.SequentialModel.forward` output for each timestep.
"""
outputs = []
hidden = self.get_initial_state([sequence])
seqlen = len(sequence)
for timestep, data in enumerate(sequence, 1):
output, hidden = self.forward(data, hidden, timestep, seqlen)
outputs.append(output)
return outputs
@abstractmethod
def forward(self, data, hidden, timestep, seqlen):
"""
.. note:: When developing a Model, the class must implement this method.
The method receive four parameters, the data obtained by the timestep iterator,
the hidden state at the current timestep, the timestep, and the leghth of the sequence.
It must return a tuple with the model output after forwarding the data and the new hidden state.
Args:
data: this is the data got from iterating over the timesteps, got from
iterating over the batches in the dataset provided in the
:meth:`~cogitare.Model.learn` method. Its type and shape depend exclusively on
the input dataset, no transformations or type checking are made during training.
For most models, this will be a tuple containing ``(x_data_t, y_data_t)``, but can be
anything.
hidden (torch.Tensor): the hidden state at the current timestep. If this is the first timestep,
the hidden state is got from :meth:`~cogitare.SequentialModel.get_initial_state`. Otherwise, it is got
from the :meth:`~cogitare.SequentialModel.forward` returned value.
timestep (int): indicates the current timestem (from 1 to seqlen)
seqlen (int): the number of timesteps in the sequence.
Returns:
(output, hidden): the data after processing the input data, and the new hidden state.
Usually, these are :class:`torch.Tensor`.
"""
pass
@abstractmethod
def loss(self, output, data, hidden, timestep, seqlen):
"""
.. note:: When developing a Model, the class must implement this method.
It will receive the output and the hidden state of the :meth:`~cogitare.Model.forward` method,
with the the data obtained by the timestep iterator (the same used in forward),
and must return the model loss considering the model output and expected output.
If the model is Many-to-Many, it should return a valid loss for each timestep.
If the model is Many-to-One, it should return a valid loss in the last timestep (
when timestep == seqlen), and return None otherwise.
Args:
output: the :meth:`~cogitare.SequentialModel.forward` output
data: this is the data got from iterating over the timesteps, got from
iterating over the batches in the dataset provided in the
:meth:`~cogitare.Model.learn` method. Its type and shape depend exclusively on
the input dataset, no transformations or type checking are made during training.
For most models, this will be a tuple containing ``(x_data_t, y_data_t)``, but can be
anything.
hidden (torch.Tensor): the hidden state at the current timestep. If this is the first timestep,
the hidden state is got from :meth:`~cogitare.SequentialModel.get_initial_state`. Otherwise, it is got
from the :meth:`~cogitare.SequentialModel.forward` returned value.
timestep (int): indicates the current timestem (from 1 to seqlen)
seqlen (int): the number of timesteps in the sequence.
Returns:
loss (torch.Tensor, None): the model loss. The loss will be used to backpropagate the errors.
"""
pass
def _forward_batch(self, batch_num, batch, optimizer):
seqlen = len(batch)
losses = []
total_loss = 0
self.state['num_timesteps'] = seqlen
self.state['losses_timestep'] = losses
self.state['current_timestep'] = None
optimizer.zero_grad()
hidden = self.get_initial_state(batch)
for timestep, data in enumerate(batch, 1):
self.state['current_timestep'] = timestep
self.state['sample_at_timestep'] = data
self.hook('on_start_timestep')
output, hidden = self.forward(data, hidden, timestep, seqlen)
loss = self.loss(output, data, hidden, timestep, seqlen)
if loss is not None:
total_loss += loss
losses.append(loss.data.item())
self.state['output_at_timestep'] = output
self.hook('on_end_timestep')
self.state['output'] = output
self.hook('before_backward')
total_loss.backward()
self.hook('before_step')
optimizer.step()
return sum(losses) / len(losses)
def _start_learn_state(self, dataset, optimizer, validation_dataset, max_epochs):
super(SequentialModel, self)._start_learn_state(dataset, optimizer,
validation_dataset, max_epochs)
self.state.update({'num_timesteps': None,
'losses_timestep': None,
'current_timestep': None,
'sample_at_timestep': None,
'output_at_timestep': None})
@not_training
def evaluate(self, dataset, *args, **kwargs):
"""
Iterate over batches in the dataset and returns a list of the of losses of each batch.
This method does not affect training variables and can be used to evaluate the
model performance in a different data (such as validation and test sets).
Args:
dataset: batch-timestep iterator
args/kwargs: :meth:`~cogitare.SequentialModel.forward` arguments. If provided, the
forward will receive these parameters.
Returns:
output (list): the losses in the provided batches, one loss per batch.
"""
losses = []
for batch in dataset:
hidden = self.get_initial_state(batch)
seqlen = len(batch)
losses_batch = []
for timestep, data in enumerate(batch, 1):
output, hidden = self.forward(data, hidden, timestep, seqlen)
loss = self.loss(output, data, hidden, timestep, seqlen)
if loss is not None:
losses_batch.append(loss.data.item())
losses.append(sum(losses_batch) / len(losses_batch))
return losses
@training
def learn(self, dataset, optimizer, validation_dataset=None, max_epochs=50):
"""
Optimize the model parameters using the dataset. This function use the algorithm::
for epoch in max_epochs:
try:
for batch in data:
# forward the data
hidden = get_initial_state(batch)
seqlen = len(batch[0])
for idx, timestep in enumerate(batch, 1):
output, hidden = forward(timestep, hidden, idx, seqlen)
error = loss(output, timestep, hidden, idx, seqlen)
if error is not None:
# optimize the parameters
backward(error)
optimizer.step()
if validation_dataset:
evaluate_model(validation_dataset)
except StopTraining:
# stop the training process if request by a plugin
If the ``validation_dataset`` is present, it can be used by plugins to evaluate the
validation/test loss/error during training.
To achieve a better performance, and have access to everyday dataset manipulation
features, it's recommended to use the :class:`~cogitare.data.SequentialDataSet` class. It
provides a interface that loads batches using multiple threads/processes
and provides useful tasks such as data splitting, async data loading, shuffling, and more. For
sequential data with variable length, it can automatically pad the sequences such that all of them
have the same length.
Args:
dataset (iterator): an iterator that returns one batch per iteration.
Each batch is an iterator, where each item is a sequence. To have a better
performance and a easy to use interface, it is recommended to
use the :class:`~cogitare.data.SequentialDataSet`.
optimizer (torch.optim): the instance of a :class:`torch.optim.Optimizer` object.
validation_dataset (iterator, optional): if provided, must have the same
caracteristics that the ``dataset``. This may be used by the model and
by plugins | |
= subprocess.check_output(['ioreg', '-rd1', '-c',
'IOPlatformExpertDevice'])
ret = re.search('"IOPlatformSerialNumber" = "(.*)"', output)
if ret:
return ret.group(1)
# Try factory device id
try:
import factory_common # pylint: disable=unused-variable
from cros.factory.test import testlog_goofy
return testlog_goofy.GetDeviceID()
except Exception:
pass
# Try DMI product UUID
try:
with open('/sys/class/dmi/id/product_uuid', 'r') as f:
return f.read().strip()
except Exception:
pass
# Use MAC address if non is available
try:
macs = []
ifaces = sorted(os.listdir('/sys/class/net'))
for iface in ifaces:
if iface == 'lo':
continue
with open('/sys/class/net/%s/address' % iface, 'r') as f:
macs.append(f.read().strip())
return ';'.join(macs)
except Exception:
pass
raise RuntimeError("can't generate machine ID")
def GetProcessWorkingDirectory(self, pid):
if self._platform == 'Linux':
return os.readlink('/proc/%d/cwd' % pid)
elif self._platform == 'Darwin':
PROC_PIDVNODEPATHINFO = 9
proc_vnodepathinfo_size = 2352
vid_path_offset = 152
proc = ctypes.cdll.LoadLibrary(ctypes.util.find_library('libproc'))
buf = ctypes.create_string_buffer('\0' * proc_vnodepathinfo_size)
proc.proc_pidinfo(pid, PROC_PIDVNODEPATHINFO, 0,
ctypes.byref(buf), proc_vnodepathinfo_size)
buf = buf.raw[vid_path_offset:]
n = buf.index('\0')
return buf[:n]
else:
raise RuntimeError('GetProcessWorkingDirectory: unsupported platform')
def Reset(self):
"""Reset state and clear request handlers."""
if self._sock is not None:
self._sock.Close()
self._sock = None
self._reset.clear()
self._last_ping = 0
self._requests = {}
self.LoadProperties()
self._register_status = DISCONNECTED
def SendMessage(self, msg):
"""Serialize the message and send it through the socket."""
self._sock.Send(json.dumps(msg) + _SEPARATOR)
def SendRequest(self, name, args, handler=None,
timeout=_REQUEST_TIMEOUT_SECS):
if handler and not callable(handler):
raise RequestError('Invalid request handler for msg "%s"' % name)
rid = str(uuid.uuid4())
msg = {'rid': rid, 'timeout': timeout, 'name': name, 'params': args}
if timeout >= 0:
self._requests[rid] = [self.Timestamp(), timeout, handler]
self.SendMessage(msg)
def SendResponse(self, omsg, status, params=None):
msg = {'rid': omsg['rid'], 'response': status, 'params': params}
self.SendMessage(msg)
def HandleTTYControl(self, fd, control_str):
msg = json.loads(control_str)
command = msg['command']
params = msg['params']
if command == 'resize':
# some error happened on websocket
if len(params) != 2:
return
winsize = struct.pack('HHHH', params[0], params[1], 0, 0)
fcntl.ioctl(fd, termios.TIOCSWINSZ, winsize)
else:
logging.warn('Invalid request command "%s"', command)
def SpawnTTYServer(self, unused_var):
"""Spawn a TTY server and forward I/O to the TCP socket."""
logging.info('SpawnTTYServer: started')
try:
if self._tty_device is None:
pid, fd = os.forkpty()
if pid == 0:
ttyname = os.ttyname(sys.stdout.fileno())
try:
server = GhostRPCServer()
server.RegisterTTY(self._session_id, ttyname)
server.RegisterSession(self._session_id, os.getpid())
except Exception:
# If ghost is launched without RPC server, the call will fail but we
# can ignore it.
pass
# The directory that contains the current running ghost script
script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
env = os.environ.copy()
env['USER'] = os.getenv('USER', 'root')
env['HOME'] = os.getenv('HOME', '/root')
env['PATH'] = os.getenv('PATH') + ':%s' % script_dir
os.chdir(env['HOME'])
os.execve(_SHELL, [_SHELL], env)
else:
fd = os.open(self._tty_device, os.O_RDWR)
tty.setraw(fd)
attr = termios.tcgetattr(fd)
attr[0] &= ~(termios.IXON | termios.IXOFF)
attr[2] |= termios.CLOCAL
attr[2] &= ~termios.CRTSCTS
attr[4] = termios.B115200
attr[5] = termios.B115200
termios.tcsetattr(fd, termios.TCSANOW, attr)
nonlocals = {'control_state': None, 'control_str': ''}
def _ProcessBuffer(buf):
write_buffer = ''
while buf:
if nonlocals['control_state']:
if chr(_CONTROL_END) in buf:
index = buf.index(chr(_CONTROL_END))
nonlocals['control_str'] += buf[:index]
self.HandleTTYControl(fd, nonlocals['control_str'])
nonlocals['control_state'] = None
nonlocals['control_str'] = ''
buf = buf[index+1:]
else:
nonlocals['control_str'] += buf
buf = ''
else:
if chr(_CONTROL_START) in buf:
nonlocals['control_state'] = _CONTROL_START
index = buf.index(chr(_CONTROL_START))
write_buffer += buf[:index]
buf = buf[index+1:]
else:
write_buffer += buf
buf = ''
if write_buffer:
os.write(fd, write_buffer)
_ProcessBuffer(self._sock.RecvBuf())
while True:
rd, unused_wd, unused_xd = select.select([self._sock, fd], [], [])
if fd in rd:
self._sock.Send(os.read(fd, _BUFSIZE))
if self._sock in rd:
buf = self._sock.Recv(_BUFSIZE)
if not buf:
raise RuntimeError('connection terminated')
_ProcessBuffer(buf)
except Exception as e:
logging.error('SpawnTTYServer: %s', e)
finally:
self._sock.Close()
logging.info('SpawnTTYServer: terminated')
sys.exit(0)
def SpawnShellServer(self, unused_var):
"""Spawn a shell server and forward input/output from/to the TCP socket."""
logging.info('SpawnShellServer: started')
# Add ghost executable to PATH
script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
env = os.environ.copy()
env['PATH'] = '%s:%s' % (script_dir, os.getenv('PATH'))
# Execute shell command from HOME directory
os.chdir(os.getenv('HOME', '/tmp'))
p = subprocess.Popen(self._shell_command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, env=env)
def make_non_block(fd):
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
make_non_block(p.stdout)
make_non_block(p.stderr)
try:
p.stdin.write(self._sock.RecvBuf())
while True:
rd, unused_wd, unused_xd = select.select(
[p.stdout, p.stderr, self._sock], [], [])
if p.stdout in rd:
self._sock.Send(p.stdout.read(_BUFSIZE))
if p.stderr in rd:
self._sock.Send(p.stderr.read(_BUFSIZE))
if self._sock in rd:
ret = self._sock.Recv(_BUFSIZE)
if not ret:
raise RuntimeError('connection terminated')
try:
idx = ret.index(_STDIN_CLOSED * 2)
p.stdin.write(ret[:idx])
p.stdin.close()
except ValueError:
p.stdin.write(ret)
p.poll()
if p.returncode != None:
break
except Exception as e:
logging.error('SpawnShellServer: %s', e)
finally:
# Check if the process is terminated. If not, Send SIGTERM to process,
# then wait for 1 second. Send another SIGKILL to make sure the process is
# terminated.
p.poll()
if p.returncode is None:
try:
p.terminate()
time.sleep(1)
p.kill()
except Exception:
pass
p.wait()
self._sock.Close()
logging.info('SpawnShellServer: terminated')
sys.exit(0)
def InitiateFileOperation(self, unused_var):
if self._file_op[0] == 'download':
try:
size = os.stat(self._file_op[1]).st_size
except OSError as e:
logging.error('InitiateFileOperation: download: %s', e)
sys.exit(1)
self.SendRequest('request_to_download',
{'terminal_sid': self._terminal_session_id,
'filename': os.path.basename(self._file_op[1]),
'size': size})
elif self._file_op[0] == 'upload':
self.SendRequest('clear_to_upload', {}, timeout=-1)
self.StartUploadServer()
else:
logging.error('InitiateFileOperation: unknown file operation, ignored')
def StartDownloadServer(self):
logging.info('StartDownloadServer: started')
try:
with open(self._file_op[1], 'rb') as f:
while True:
data = f.read(_BLOCK_SIZE)
if not data:
break
self._sock.Send(data)
except Exception as e:
logging.error('StartDownloadServer: %s', e)
finally:
self._sock.Close()
logging.info('StartDownloadServer: terminated')
sys.exit(0)
def StartUploadServer(self):
logging.info('StartUploadServer: started')
try:
filepath = self._file_op[1]
dirname = os.path.dirname(filepath)
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except Exception:
pass
with open(filepath, 'wb') as f:
if self._file_op[2]:
os.fchmod(f.fileno(), self._file_op[2])
f.write(self._sock.RecvBuf())
while True:
rd, unused_wd, unused_xd = select.select([self._sock], [], [])
if self._sock in rd:
buf = self._sock.Recv(_BLOCK_SIZE)
if not buf:
break
f.write(buf)
except socket.error as e:
logging.error('StartUploadServer: socket error: %s', e)
except Exception as e:
logging.error('StartUploadServer: %s', e)
finally:
self._sock.Close()
logging.info('StartUploadServer: terminated')
sys.exit(0)
def SpawnPortForwardServer(self, unused_var):
"""Spawn a port forwarding server and forward I/O to the TCP socket."""
logging.info('SpawnPortForwardServer: started')
src_sock = None
try:
src_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
src_sock.settimeout(_CONNECT_TIMEOUT)
src_sock.connect(('localhost', self._port))
src_sock.send(self._sock.RecvBuf())
while True:
rd, unused_wd, unused_xd = select.select([self._sock, src_sock], [], [])
if self._sock in rd:
data = self._sock.Recv(_BUFSIZE)
if not data:
raise RuntimeError('connection terminated')
src_sock.send(data)
if src_sock in rd:
data = src_sock.recv(_BUFSIZE)
if not data:
break
self._sock.Send(data)
except Exception as e:
logging.error('SpawnPortForwardServer: %s', e)
finally:
if src_sock:
src_sock.close()
self._sock.Close()
logging.info('SpawnPortForwardServer: terminated')
sys.exit(0)
def Ping(self):
def timeout_handler(x):
if x is None:
raise PingTimeoutError
self._last_ping = self.Timestamp()
self.SendRequest('ping', {}, timeout_handler, 5)
def HandleFileDownloadRequest(self, msg):
params = msg['params']
filepath = params['filename']
if not os.path.isabs(filepath):
filepath = os.path.join(os.getenv('HOME', '/tmp'), filepath)
try:
with open(filepath, 'r') as _:
pass
except Exception as e:
return self.SendResponse(msg, str(e))
self.SpawnGhost(self.FILE, params['sid'],
file_op=('download', filepath))
self.SendResponse(msg, SUCCESS)
def HandleFileUploadRequest(self, msg):
params = msg['params']
# Resolve upload filepath
filename = params['filename']
dest_path = filename
# If dest is specified, use it first
dest_path = params.get('dest', '')
if dest_path:
if not os.path.isabs(dest_path):
dest_path = os.path.join(os.getenv('HOME', '/tmp'), dest_path)
if os.path.isdir(dest_path):
dest_path = os.path.join(dest_path, filename)
else:
target_dir = os.getenv('HOME', '/tmp')
# Terminal session ID found, upload to it's current working directory
if params.has_key('terminal_sid'):
pid = self._terminal_sid_to_pid.get(params['terminal_sid'], None)
if pid:
try:
target_dir = self.GetProcessWorkingDirectory(pid)
except Exception as e:
logging.error(e)
dest_path = os.path.join(target_dir, filename)
try:
os.makedirs(os.path.dirname(dest_path))
except Exception:
pass
try:
with open(dest_path, 'w') as _:
pass
except Exception as e:
return self.SendResponse(msg, str(e))
# If not check_only, spawn FILE mode ghost agent to handle upload
if not params.get('check_only', False):
self.SpawnGhost(self.FILE, params['sid'],
file_op=('upload', dest_path, params.get('perm', None)))
self.SendResponse(msg, SUCCESS)
def HandleRequest(self, msg):
command = msg['name']
params = msg['params']
if command == 'upgrade':
self.Upgrade()
elif command == 'terminal':
self.SpawnGhost(self.TERMINAL, params['sid'],
tty_device=params['tty_device'])
self.SendResponse(msg, SUCCESS)
elif command == 'shell':
self.SpawnGhost(self.SHELL, params['sid'], command=params['command'])
self.SendResponse(msg, SUCCESS)
elif command == 'file_download':
self.HandleFileDownloadRequest(msg)
elif command == 'clear_to_download':
self.StartDownloadServer()
elif command == 'file_upload':
self.HandleFileUploadRequest(msg)
elif command == 'forward':
self.SpawnGhost(self.FORWARD, params['sid'], port=params['port'])
self.SendResponse(msg, SUCCESS)
def HandleResponse(self, response):
rid = str(response['rid'])
if rid in self._requests:
handler = self._requests[rid][2]
del self._requests[rid]
if callable(handler):
handler(response)
else:
logging.warning('Received unsolicited response, ignored')
def ParseMessage(self, buf, single=True):
if single:
try:
index = buf.index(_SEPARATOR)
except ValueError:
self._sock.UnRecv(buf)
return
msgs_json = [buf[:index]]
self._sock.UnRecv(buf[index + 2:])
else:
msgs_json = buf.split(_SEPARATOR)
self._sock.UnRecv(msgs_json.pop())
for msg_json in msgs_json:
try:
msg = json.loads(msg_json)
except ValueError:
# Ignore mal-formed message.
logging.error('mal-formed JSON request, ignored')
continue
if 'name' in msg:
self.HandleRequest(msg)
elif 'response' in msg:
self.HandleResponse(msg)
else: # Ingnore mal-formed message.
logging.error('mal-formed JSON request, ignored')
def ScanForTimeoutRequests(self):
"""Scans for pending requests which have timed out.
If any timed-out requests are discovered, their handler is called with the
special response value of None.
"""
for rid in self._requests.keys()[:]:
request_time, timeout, handler = self._requests[rid]
| |
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
import numpy as np
from scipy import interpolate, signal
def xkcd_line(x, y, xlim=None, ylim=None, mag=1.0, f1=30, f2=0.05, f3=15):
"""
Mimic a hand-drawn line from (x, y) data
Definition
----------
def xkcd_line(x, y, xlim=None, ylim=None, mag=1.0, f1=30, f2=0.05, f3=15):
Input
-----
x, y array_like; arrays to be modified
Optional Input
--------------
xlim, ylim data range; the assumed plot range for the modification.
If not specified, they will be guessed from the data
mag float; the magnitude of the distortion (default: 1.0)
f1, f2, f3 int, float, int; filtering parameters.
f1 gives the size of the window (default: 50)
f2 gives the high-frequency cutoff (default: 0.01)
f3 gives the size of the filter (default: 15)
Output
------
x, y ndarrays; the modified lines
References
----------
See xkcd below.
Examples
--------
for line in ax.lines:
x, y = line.get_data()
x_int, y_int = xkcd_line(x, y, xlim, ylim, mag, f1, f2, f3)
line.set_data(x_int, y_int)
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2013-2019 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Mar 2013
"""
# assure array
x = np.asarray(x)
y = np.asarray(y)
# get limits for rescaling
if xlim is None: xlim = (x.min(), x.max())
if ylim is None: ylim = (y.min(), y.max())
if xlim[1] == xlim[0]: xlim = ylim
if ylim[1] == ylim[0]: ylim = xlim
# scale the data
x_scaled = (x - xlim[0]) * 1. / (xlim[1] - xlim[0])
y_scaled = (y - ylim[0]) * 1. / (ylim[1] - ylim[0])
# compute the total distance along the path
dx = x_scaled[1:] - x_scaled[:-1]
dy = y_scaled[1:] - y_scaled[:-1]
dist_tot = np.sum(np.sqrt(dx*dx + dy*dy))
# number of interpolated points is proportional to the distance
Nu = int(200 * dist_tot)
u = np.arange(-1, Nu + 1) * 1. / (Nu - 1)
# interpolate curve at sampled points
# k = min(3, len(x) - 1)
k = min(3, x.size - 1)
res = interpolate.splprep([x_scaled, y_scaled], s=0, k=k)
x_int, y_int = interpolate.splev(u, res[0])
# we perturb perpendicular to the drawn line
dx = x_int[2:] - x_int[:-2]
dy = y_int[2:] - y_int[:-2]
# horizontal or vertical lines
# np.sign(np.cumsum(np.random.random(dx.size)-0.5)) emulates something like a Brownian motion
# i.e. auto-correlated random walks around 0; just the sign interests here.
eps = np.maximum(np.abs(np.amax(x_scaled)-np.amin(x_scaled)), np.abs(np.amax(y_scaled)-np.amin(y_scaled)))/Nu
if np.all(np.abs(dx) < eps):
dx = np.sign(np.cumsum(np.random.random(dx.size)-0.5)) * eps
if np.all(np.abs(dy) < eps):
dy = np.sign(np.cumsum(np.random.random(dx.size)-0.5)) * eps
# equal distances
if np.all(np.sign(dx) == np.sign(dx[0])):
dx *= np.sign(np.cumsum(np.random.random(dx.size)-0.5))
if np.all(np.sign(dy) == np.sign(dy[0])):
dy *= np.sign(np.cumsum(np.random.random(dx.size)-0.5))
dist = np.sqrt(dx * dx + dy * dy)
# create a filtered perturbation
# coeffs = mag * np.random.normal(0, 0.01, len(x_int) - 2)
coeffs = mag * np.random.normal(0, 0.01, x_int.size - 2)
b = signal.firwin(f1, f2*dist_tot, window=('kaiser', f3))
response = signal.lfilter(b, 1, coeffs)
x_int[1:-1] += response * dy / dist
y_int[1:-1] += response * dx / dist
# un-scale data
x_int = x_int[1:-1] * (xlim[1] - xlim[0]) + xlim[0]
y_int = y_int[1:-1] * (ylim[1] - ylim[0]) + ylim[0]
return x_int, y_int
def xkcd(ax,
mag=1.0,
f1=50, f2=0.01, f3=15,
bgcolor='w',
title_size=None,
xaxis_loc=None, yaxis_loc=None,
xaxis_arrow='+', yaxis_arrow='+',
ax_extend=0.1,
xlabel_inside=0., ylabel_inside=0.,
ticks=False,
xticks_inside=0., yticks_inside=0.,
):
"""
Make axis look hand-drawn
This adjusts all lines, text, legends, and axes in the figure to look
like xkcd plots, a webcomic from <NAME>. Other plot elements are not modified.
Definition
----------
def xkcd(ax,
mag=1.0,
f1=50, f2=0.01, f3=15,
bgcolor='w',
title_size=None,
xaxis_loc=None, yaxis_loc=None,
xaxis_arrow='+', yaxis_arrow='+',
ax_extend=0.1,
xlabel_inside=0., ylabel_inside=0.,
ticks=False,
xticks_inside=0., yticks_inside=0.,
):
Input
-----
ax Axes instance the axes instance to be modified.
Optional Input
--------------
mag float; the magnitude of the distortion (default: 1.0)
f1, f2, f3 int, float, int; filtering parameters.
f1 gives the size of the window (default: 50)
f2 gives the high-frequency cutoff (default: 0.01)
f3 gives the size of the filter (default: 15)
bgcolor str; color around lines so that axis look brocken,
i.e. lines are overdrawn on axis (default: 'w')
titel_size float; poitn size of plot title. If None, same size as axis labels.
(default: None)
xaxis_loc, yaxis_log float; The locations to draw the x and y axes in data coordinates.
If not specified, they will be drawn from the bottom left of the plot.
(default: None)
xaxis_arrow, yaxis_arrow str; where to draw arrows on the x/y axes
Options are '+', '-', '+-', or '' (default: '+')
ax_extend float; How far (fractionally) to extend the drawn axes beyond
the original axes limits (default: 0.1)
xlabel_inside, ylabel_inside float: By how much the labels are shifted (default: 0.0)
The last two options are not working how with mc_plot_template
ticks True: change tick labels; False: no tick labels are drawn (default: False)
xticks_inside, yticks_inside float: By how much the ticks are shifted (default: 0.0)
Output
------
ax is basically empty and all former elements are redrawn on plot.
Note
----
For reproducible plots, seed the random number generator before each new plot.
If a new line was added, the old lines will look the same. The legend will be different though.
References
----------
This is the modified XKCD plot generator of Jake Vanderplas
http://nbviewer.ipython.org/url/jakevdp.github.com/downloads/notebooks/XKCD_plots.ipynb
The idea for this comes from work by <NAME>
http://www.mail-archive.com/matplotlib-users@lists.sourceforge.net/msg25499.html
Examples
--------
import matplotlib.pylab as plt
fig = plt.figure(1)
ax = fig.add_axes([0.1,0.1,0.5,0.5])
ax.plot(range(10), label='Line')
ax.set_title('Title')
ax.set_xlabel('x label')
ax.set_ylabel('y label')
ax.legend()
xkcd(ax)
License
-------
This file is part of the JAMS Python package, distributed under the MIT License.
Copyright (c) 2013 <NAME> - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Mar 2013
"""
import matplotlib.pylab as plt
import matplotlib.font_manager as fm
# remember random state for later resetting
random_state = np.random.get_state()
# Get axes aspect
ext = ax.get_window_extent().extents
aspect = (ext[3] - ext[1]) / (ext[2] - ext[0])
xlim = ax.get_xlim()
ylim = | |
<gh_stars>10-100
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2018 by Exopy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Main objects used to represent all the aspects of a measurement (main task,
attached tools, dependencies, ...)
"""
import logging
from collections import OrderedDict, defaultdict
from itertools import chain
from datetime import date, datetime
from atom.api import (Atom, Dict, Str, Typed, ForwardTyped, Bool, Enum,
Value)
from configobj import ConfigObj
from ..tasks.api import RootTask
from ..utils.traceback import format_exc
from ..utils.configobj_ops import include_configobj
from ..utils.atom_util import HasPrefAtom
LOGGER = logging.getLogger(__name__)
def measurement_plugin():
"""Delayed to avoid circular references.
"""
from .plugin import MeasurementPlugin
return MeasurementPlugin
class MeasurementDependencies(Atom):
"""Container used to store the dependencies of a measurement.
"""
#: Reference to the Measurement this object is linked to.
measurement = ForwardTyped(lambda: Measurement)
def collect_runtimes(self):
"""Collect all the runtime needed to execute the measurement.
Those can then be accessed using `get_runtime_dependencies`
Returns
-------
result : bool
Boolean indicating whether or not the collection succeeded. Note
that even if the collection failed, some dependencies may have been
collected (other being unavailable) and must hence be released.
msg : str
String explaning why the operation failed if it failed.
errors : dict
Dictionary describing in details the errors. If some dependencies
does exist but cannot be accessed at the time of the query an entry
'unavailable' will be present.
"""
if self._runtime_dependencies:
return True, '', {}
res = self._analyse_task_runtime(self.measurement.root_task)
if not res[0]:
return res
workbench = self.measurement.plugin.workbench
# Check that we know the dependencies of the hooks
for h_id, h in chain(self.measurement.pre_hooks.items(),
self.measurement.post_hooks.items()):
if h_id not in self._runtime_map:
deps = h.list_runtimes(workbench)
if deps is None:
continue # The hook has no runtime dependencies
if deps.errors:
msg = 'Failed to analyse hook %s runtime dependencies.'
return False, msg % h_id, deps.errors
self._runtime_map[h_id] = deps.dependencies
self._update_runtime_analysis(deps.dependencies)
return self._collect_analysed_runtimes()
def collect_task_runtimes(self, task):
"""Collect all the runtime needed to execute a single task.
Those can then be accessed using `get_runtime_dependencies`
Returns
-------
result : bool
Boolean indicating whether or not the collection succeeded. Note
that even if the collection failed, some dependencies may have been
collected (other being unavailable) and must hence be released.
msg : str
String explaning why the operation failed if it failed.
errors : dict
Dictionary describing in details the errors. If some dependencies
does exist but cannot be accessed at the time of the query an entry
'unavailable' will be present.
"""
if task is None:
return False, 'No task was given', {}
if self._runtime_dependencies:
return True, '', {}
res = self._analyse_task_runtime(task)
if not res[0]:
return res
return self._collect_analysed_runtimes()
def release_runtimes(self):
"""Release all the runtimes collected for the execution.
"""
if not self._runtime_dependencies:
return
workbench = self.measurement.plugin.workbench
core = workbench.get_plugin('enaml.workbench.core')
cmd = 'exopy.app.dependencies.release_runtimes'
core.invoke_command(cmd, dict(owner='exopy.measurement',
dependencies=self._runtime_dependencies))
self._runtime_dependencies = None
def get_build_dependencies(self):
"""Get the build dependencies associated with the main task.
Returns
-------
dependencies : BuildContainer
BuildContainer as returned by 'exopy.app.dependencies.collect'.
The errors member should be checked to detect errors.
"""
workbench = self.measurement.plugin.workbench
core = workbench.get_plugin('enaml.workbench.core')
if not self._build_analysis:
cmd = 'exopy.app.dependencies.analyse'
deps = core.invoke_command(cmd,
{'obj': self.measurement.root_task,
'dependencies': ['build']})
if deps.errors:
return deps
self._build_analysis = deps.dependencies
if not self._build_dependencies:
cmd = 'exopy.app.dependencies.collect'
deps = core.invoke_command(cmd,
dict(dependencies=self._build_analysis,
kind='build'))
if not deps.errors:
self._build_dependencies = deps
else:
deps = self._build_dependencies
return deps
def get_runtime_dependencies(self, id):
"""Access the runtime dependencies associated with a hook or the main
task.
Those will correspond to the runtime dependencies that were collected.
Dependencies that have not been collected, because they are not
available for example, will not appear in here. However it is
guaranteed that sections corresponding to each kind of runtime
dependencies will be present even if they are empty.
Parameters
----------
id: unicode
Id of the hook for which to retrieve the runtimes or 'main' for
the main task.
Returns
-------
dependencies : dict
Dependencies for the requested measurement component.
Raises
------
RuntimeError :
Raised if this method is called before collect_runtimes.
"""
if self._runtime_dependencies is None:
raise RuntimeError('Runtime dependencies must be collected '
'(calling collect_runtimes) before they can be '
'queried.')
valids = self._runtime_map.get(id)
if not valids:
return {}
deps = self._runtime_dependencies
queried = {}
for runtime_id, r_deps in valids.items():
if runtime_id in deps:
queried[runtime_id] = {k: deps[runtime_id][k] for k in r_deps}
else:
queried[runtime_id] = {}
return queried
def reset(self):
"""Cleanup all cached values.
"""
if self._runtime_dependencies:
raise RuntimeError('Cannot reset dependencies while holding '
'runtime dependencies')
self._build_analysis.clear()
self._build_dependencies = None
self._runtime_analysis.clear()
self._runtime_map.clear()
# =========================================================================
# --- Private API ---------------------------------------------------------
# =========================================================================
#: Cached build dependencies analysis for the main task.
#: No actual dependency is stored, this dict can be used to collect them
_build_analysis = Dict()
#: Cached build dependencies of the main task.
#: Contains the actual dependencies.
_build_dependencies = Value()
#: Cached runtime dependencies analysis for the main task and the hooks.
#: No actual dependency is stored, this dict can be used to collect them
_runtime_analysis = Typed(defaultdict, (set,))
#: Cached runtime dependencies of the main task and the hooks.
#: Contains the actual dependencies.
#: Set to None when dependencies have not been collected.
_runtime_dependencies = Typed(dict)
#: Mapping determining which component has which dependency.
_runtime_map = Dict()
def _analyse_task_runtime(self, task):
workbench = self.measurement.plugin.workbench
core = workbench.get_plugin('enaml.workbench.core')
# If the dependencies of the main task are not known
if not self._runtime_map.get('main'):
cmd = 'exopy.app.dependencies.analyse'
deps = core.invoke_command(cmd,
{'obj': task,
'dependencies': ['build', 'runtime']})
b_deps, r_deps = deps
msg = 'Failed to analyse main task %s dependencies.'
if b_deps.errors:
return False, msg % 'build', b_deps.errors
if r_deps.errors:
return False, msg % 'runtime', r_deps.errors
self._build_analysis = b_deps.dependencies
self._runtime_map['main'] = r_deps.dependencies
self._update_runtime_analysis(r_deps.dependencies)
return True, '', {}
def _collect_analysed_runtimes(self):
workbench = self.measurement.plugin.workbench
core = workbench.get_plugin('enaml.workbench.core')
cmd = 'exopy.app.dependencies.collect'
deps = core.invoke_command(cmd,
dict(dependencies=self._runtime_analysis,
owner='exopy.measurement',
kind='runtime'))
if deps.errors:
msg = 'Failed to collect some runtime dependencies.'
return False, msg, deps.errors
elif deps.unavailable:
msg = 'Some dependencies are currently unavailable.'
self._runtime_dependencies = deps.dependencies
return False, msg, deps.unavailable
self._runtime_dependencies = deps.dependencies
return True, '', {}
def _update_runtime_analysis(self, new):
"""Update the known runtime dependencies.
"""
analysis = self._runtime_analysis
for k in new:
analysis[k].update(new[k])
class Measurement(HasPrefAtom):
"""Object representing all the aspects of a measurement.
"""
#: Name of the measurement.
name = Str().tag(pref=True)
#: Id of that particular iteration of the measurement. This value is used
#: when saving the measurement before running it. It is also communicated
#: to the root task
id = Str().tag(pref=True)
#: Current measurement status.
status = Enum('READY', 'RUNNING', 'PAUSING', 'PAUSED', 'RESUMING',
'STOPPING', 'EDITING', 'SKIPPED', 'FAILED', 'COMPLETED',
'INTERRUPTED')
#: Detailed information about the measurement status.
infos = Str()
#: Path to the last file in which that measurement was saved.
path = Str()
#: Root task holding the measurement logic.
root_task = Typed(RootTask)
#: Dict of active monitor for this measurement.
monitors = Typed(OrderedDict, ())
#: Dict of pre-measurement execution routines.
pre_hooks = Typed(OrderedDict, ())
#: Dict of post-measurement execution routines.
post_hooks = Typed(OrderedDict, ())
#: Reference to the measurement plugin managing this measurement.
plugin = ForwardTyped(measurement_plugin)
#: Flag signaling whether the user chose to enqueue the measurement knowing
#: some tests are failing.
forced_enqueued = Bool()
#: Object handling the collection and access to the measurement
#: dependencies.
dependencies = Typed(MeasurementDependencies)
#: Result object returned by the engine when the root_task has been
#: executed. Can be used by post-execution hook to adapt their behavior.
task_execution_result = Value()
def __init__(self, **kwargs):
super(Measurement, self).__init__(**kwargs)
self.add_tool('pre-hook', 'exopy.internal_checks')
def save(self, path):
"""Save the measurement as a ConfigObj object.
Parameters
----------
path : unicode
Path of the file to which save the measurement.
"""
config = ConfigObj(indent_type=' ', encoding='utf-8')
config.update(self.preferences_from_members())
# First save the task.
core = self.plugin.workbench.get_plugin(u'enaml.workbench.core')
cmd = u'exopy.tasks.save'
task_prefs = core.invoke_command(cmd, {'task': self.root_task,
'mode': 'config'}, self)
config['root_task'] = {}
include_configobj(config['root_task'], task_prefs)
# Save the state of each monitor, pre-hook, post-hook.
for kind in ('monitors', 'pre_hooks', 'post_hooks'):
config[kind] = {}
for id, | |
<reponame>awgu/tutorials
"""
Language Modeling with nn.Transformer and TorchText
===============================================================
This is a tutorial on training a sequence-to-sequence model that uses the
`nn.Transformer <https://pytorch.org/docs/stable/generated/torch.nn.Transformer.html>`__ module.
The PyTorch 1.2 release includes a standard transformer module based on the
paper `Attention is All You Need <https://arxiv.org/pdf/1706.03762.pdf>`__.
Compared to Recurrent Neural Networks (RNNs), the transformer model has proven
to be superior in quality for many sequence-to-sequence tasks while being more
parallelizable. The ``nn.Transformer`` module relies entirely on an attention
mechanism (implemented as
`nn.MultiheadAttention <https://pytorch.org/docs/stable/generated/torch.nn.MultiheadAttention.html>`__)
to draw global dependencies between input and output. The ``nn.Transformer``
module is highly modularized such that a single component (e.g.,
`nn.TransformerEncoder <https://pytorch.org/docs/stable/generated/torch.nn.TransformerEncoder.html>`__)
can be easily adapted/composed.
.. image:: ../_static/img/transformer_architecture.jpg
"""
######################################################################
# Define the model
# ----------------
#
######################################################################
# In this tutorial, we train a ``nn.TransformerEncoder`` model on a
# language modeling task. The language modeling task is to assign a
# probability for the likelihood of a given word (or a sequence of words)
# to follow a sequence of words. A sequence of tokens are passed to the embedding
# layer first, followed by a positional encoding layer to account for the order
# of the word (see the next paragraph for more details). The
# ``nn.TransformerEncoder`` consists of multiple layers of
# `nn.TransformerEncoderLayer <https://pytorch.org/docs/stable/generated/torch.nn.TransformerEncoderLayer.html>`__.
# Along with the input sequence, a square attention mask is required because the
# self-attention layers in ``nn.TransformerEncoder`` are only allowed to attend
# the earlier positions in the sequence. For the language modeling task, any
# tokens on the future positions should be masked. To produce a probability
# distribution over output words, the output of the ``nn.TransformerEncoder``
# model is passed through a linear layer followed by a log-softmax function.
#
import math
from typing import Tuple
import torch
from torch import nn, Tensor
import torch.nn.functional as F
from torch.nn import TransformerEncoder, TransformerEncoderLayer
from torch.utils.data import dataset
class TransformerModel(nn.Module):
def __init__(self, ntoken: int, d_model: int, nhead: int, d_hid: int,
nlayers: int, dropout: float = 0.5):
super().__init__()
self.model_type = 'Transformer'
self.pos_encoder = PositionalEncoding(d_model, dropout)
encoder_layers = TransformerEncoderLayer(d_model, nhead, d_hid, dropout)
self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
self.encoder = nn.Embedding(ntoken, d_model)
self.d_model = d_model
self.decoder = nn.Linear(d_model, ntoken)
self.init_weights()
def init_weights(self) -> None:
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, src: Tensor, src_mask: Tensor) -> Tensor:
"""
Args:
src: Tensor, shape [seq_len, batch_size]
src_mask: Tensor, shape [seq_len, seq_len]
Returns:
output Tensor of shape [seq_len, batch_size, ntoken]
"""
src = self.encoder(src) * math.sqrt(self.d_model)
src = self.pos_encoder(src)
output = self.transformer_encoder(src, src_mask)
output = self.decoder(output)
return output
def generate_square_subsequent_mask(sz: int) -> Tensor:
"""Generates an upper-triangular matrix of -inf, with zeros on diag."""
return torch.triu(torch.ones(sz, sz) * float('-inf'), diagonal=1)
######################################################################
# ``PositionalEncoding`` module injects some information about the
# relative or absolute position of the tokens in the sequence. The
# positional encodings have the same dimension as the embeddings so that
# the two can be summed. Here, we use ``sine`` and ``cosine`` functions of
# different frequencies.
#
class PositionalEncoding(nn.Module):
def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 5000):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
position = torch.arange(max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
pe = torch.zeros(max_len, 1, d_model)
pe[:, 0, 0::2] = torch.sin(position * div_term)
pe[:, 0, 1::2] = torch.cos(position * div_term)
self.register_buffer('pe', pe)
def forward(self, x: Tensor) -> Tensor:
"""
Args:
x: Tensor, shape [seq_len, batch_size, embedding_dim]
"""
x = x + self.pe[:x.size(0)]
return self.dropout(x)
######################################################################
# Load and batch data
# -------------------
#
######################################################################
# This tutorial uses ``torchtext`` to generate Wikitext-2 dataset.
# To access torchtext datasets, please install torchdata following instructions at https://github.com/pytorch/data.
#
# The vocab object is built based on the train dataset and is used to numericalize
# tokens into tensors. Wikitext-2 represents rare tokens as `<unk>`.
#
# Given a 1-D vector of sequential data, ``batchify()`` arranges the data
# into ``batch_size`` columns. If the data does not divide evenly into
# ``batch_size`` columns, then the data is trimmed to fit. For instance, with
# the alphabet as the data (total length of 26) and ``batch_size=4``, we would
# divide the alphabet into 4 sequences of length 6:
#
# .. math::
# \begin{bmatrix}
# \text{A} & \text{B} & \text{C} & \ldots & \text{X} & \text{Y} & \text{Z}
# \end{bmatrix}
# \Rightarrow
# \begin{bmatrix}
# \begin{bmatrix}\text{A} \\ \text{B} \\ \text{C} \\ \text{D} \\ \text{E} \\ \text{F}\end{bmatrix} &
# \begin{bmatrix}\text{G} \\ \text{H} \\ \text{I} \\ \text{J} \\ \text{K} \\ \text{L}\end{bmatrix} &
# \begin{bmatrix}\text{M} \\ \text{N} \\ \text{O} \\ \text{P} \\ \text{Q} \\ \text{R}\end{bmatrix} &
# \begin{bmatrix}\text{S} \\ \text{T} \\ \text{U} \\ \text{V} \\ \text{W} \\ \text{X}\end{bmatrix}
# \end{bmatrix}
#
# Batching enables more parallelizable processing. However, batching means that
# the model treats each column independently; for example, the dependence of
# ``G`` and ``F`` can not be learned in the example above.
#
from torchtext.datasets import WikiText2
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
train_iter = WikiText2(split='train')
tokenizer = get_tokenizer('basic_english')
vocab = build_vocab_from_iterator(map(tokenizer, train_iter), specials=['<unk>'])
vocab.set_default_index(vocab['<unk>'])
def data_process(raw_text_iter: dataset.IterableDataset) -> Tensor:
"""Converts raw text into a flat Tensor."""
data = [torch.tensor(vocab(tokenizer(item)), dtype=torch.long) for item in raw_text_iter]
return torch.cat(tuple(filter(lambda t: t.numel() > 0, data)))
# train_iter was "consumed" by the process of building the vocab,
# so we have to create it again
train_iter, val_iter, test_iter = WikiText2()
train_data = data_process(train_iter)
val_data = data_process(val_iter)
test_data = data_process(test_iter)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def batchify(data: Tensor, bsz: int) -> Tensor:
"""Divides the data into bsz separate sequences, removing extra elements
that wouldn't cleanly fit.
Args:
data: Tensor, shape [N]
bsz: int, batch size
Returns:
Tensor of shape [N // bsz, bsz]
"""
seq_len = data.size(0) // bsz
data = data[:seq_len * bsz]
data = data.view(bsz, seq_len).t().contiguous()
return data.to(device)
batch_size = 20
eval_batch_size = 10
train_data = batchify(train_data, batch_size) # shape [seq_len, batch_size]
val_data = batchify(val_data, eval_batch_size)
test_data = batchify(test_data, eval_batch_size)
######################################################################
# Functions to generate input and target sequence
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
######################################################################
# ``get_batch()`` generates a pair of input-target sequences for
# the transformer model. It subdivides the source data into chunks of
# length ``bptt``. For the language modeling task, the model needs the
# following words as ``Target``. For example, with a ``bptt`` value of 2,
# we’d get the following two Variables for ``i`` = 0:
#
# .. image:: ../_static/img/transformer_input_target.png
#
# It should be noted that the chunks are along dimension 0, consistent
# with the ``S`` dimension in the Transformer model. The batch dimension
# ``N`` is along dimension 1.
#
bptt = 35
def get_batch(source: Tensor, i: int) -> Tuple[Tensor, Tensor]:
"""
Args:
source: Tensor, shape [full_seq_len, batch_size]
i: int
Returns:
tuple (data, target), where data has shape [seq_len, batch_size] and
target has shape [seq_len * batch_size]
"""
seq_len = min(bptt, len(source) - 1 - i)
data = source[i:i+seq_len]
target = source[i+1:i+1+seq_len].reshape(-1)
return data, target
######################################################################
# Initiate an instance
# --------------------
#
######################################################################
# The model hyperparameters are defined below. The vocab size is
# equal to the length of the vocab object.
#
ntokens = len(vocab) # size of vocabulary
emsize = 200 # embedding dimension
d_hid = 200 # dimension of the feedforward network model in nn.TransformerEncoder
nlayers = 2 # number of nn.TransformerEncoderLayer in nn.TransformerEncoder
nhead = 2 # number of heads in nn.MultiheadAttention
dropout = 0.2 # dropout probability
model = TransformerModel(ntokens, emsize, nhead, d_hid, nlayers, dropout).to(device)
######################################################################
# Run the model
# -------------
#
######################################################################
# We use `CrossEntropyLoss <https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html>`__
# with the `SGD <https://pytorch.org/docs/stable/generated/torch.optim.SGD.html>`__
# (stochastic gradient descent) optimizer. The learning rate is initially set to
# 5.0 and follows a `StepLR <https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.StepLR.html>`__
# schedule. During training, we use `nn.utils.clip_grad_norm\_ <https://pytorch.org/docs/stable/generated/torch.nn.utils.clip_grad_norm_.html>`__
# to prevent gradients from exploding.
#
import copy
import time
criterion = nn.CrossEntropyLoss()
lr = 5.0 # learning rate
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.95)
def train(model: nn.Module) -> None:
model.train() # turn on train mode
total_loss = 0.
log_interval = 200
start_time = time.time()
src_mask = generate_square_subsequent_mask(bptt).to(device)
num_batches = len(train_data) // bptt
for batch, i in enumerate(range(0, train_data.size(0) - 1, bptt)):
data, targets = get_batch(train_data, i)
batch_size = data.size(0)
if batch_size != bptt: # only on last batch
src_mask = src_mask[:batch_size, :batch_size]
output = model(data, src_mask)
loss = criterion(output.view(-1, ntokens), targets)
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optimizer.step()
total_loss += loss.item()
if batch % log_interval == 0 and batch > 0:
lr = scheduler.get_last_lr()[0]
ms_per_batch = (time.time() - start_time) * 1000 / log_interval
cur_loss = total_loss / log_interval
ppl = math.exp(cur_loss)
print(f'| epoch {epoch:3d} | {batch:5d}/{num_batches:5d} batches | '
f'lr {lr:02.2f} | ms/batch {ms_per_batch:5.2f} | '
f'loss {cur_loss:5.2f} | ppl {ppl:8.2f}')
total_loss = 0
start_time = time.time()
def evaluate(model: nn.Module, eval_data: Tensor) -> float:
model.eval() # turn on evaluation mode
total_loss = 0.
src_mask = generate_square_subsequent_mask(bptt).to(device)
with torch.no_grad():
for i | |
import numpy as np
import typing as _typing
import torch
import pickle
from autogl.module.model.encoders.base_encoder import AutoHomogeneousEncoderMaintainer
from ..model import (
EncoderUniversalRegistry,
DecoderUniversalRegistry,
BaseEncoderMaintainer,
BaseDecoderMaintainer,
BaseAutoModel,
ModelUniversalRegistry
)
from ..hpo import AutoModule
import logging
from .evaluation import Evaluation, get_feval, Acc
from ...utils import get_logger
LOGGER_ES = get_logger("early-stopping")
class _DummyModel(torch.nn.Module):
def __init__(self, encoder: _typing.Union[BaseEncoderMaintainer, BaseAutoModel], decoder: _typing.Optional[BaseDecoderMaintainer]):
super().__init__()
if isinstance(encoder, BaseAutoModel):
self.encoder = encoder.model
self.decoder = None
else:
self.encoder = encoder.encoder
self.decoder = None if decoder is None else decoder.decoder
def __str__(self, ):
return "DummyModel(encoder={}, decoder={})".format(self.encoder, self.decoder)
def encode(self, *args, **kwargs):
return self.encoder(*args, **kwargs)
def decode(self, *args, **kwargs):
if self.decoder is None: return args[0]
return self.decoder(*args, **kwargs)
def forward(self, *args, **kwargs):
res = self.encode(*args, **kwargs)
return self.decode(res, *args, **kwargs)
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(
self,
patience=7,
verbose=False,
delta=0,
path="checkpoint.pt",
trace_func=LOGGER_ES.info,
):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
path (str): Path for the checkpoint to be saved to.
Default: 'checkpoint.pt'
trace_func (function): trace print function.
Default: print
"""
self.patience = 100 if patience is None else patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
self.path = path
self.trace_func = trace_func
def __call__(self, val_loss, model):
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model)
elif score <= self.best_score + self.delta:
self.counter += 1
if self.verbose is True:
self.trace_func(
f"EarlyStopping counter: {self.counter} out of {self.patience}"
)
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(val_loss, model)
self.counter = 0
def save_checkpoint(self, val_loss, model):
"""Saves model when validation loss decrease."""
if self.verbose:
self.trace_func(
f"Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ..."
)
self.best_param = pickle.dumps(model.state_dict())
# torch.save(model.state_dict(), self.path)
self.val_loss_min = val_loss
def load_checkpoint(self, model):
"""Load models"""
if hasattr(self, "best_param"):
model.load_state_dict(pickle.loads(self.best_param))
else:
LOGGER_ES.warn("try to load checkpoint while no checkpoint is saved")
class BaseTrainer(AutoModule):
def __init__(
self,
encoder: _typing.Union[BaseAutoModel, BaseEncoderMaintainer, None],
decoder: _typing.Union[BaseDecoderMaintainer, None],
device: _typing.Union[torch.device, str],
feval: _typing.Union[
_typing.Sequence[str], _typing.Sequence[_typing.Type[Evaluation]]
] = (Acc,),
loss: str = "nll_loss",
):
"""
The basic trainer.
Used to automatically train the problems, e.g., node classification, graph classification, etc.
Parameters
----------
model: `BaseModel` or `str`
The (name of) model used to train and predict.
init: `bool`
If True(False), the model will (not) be initialized.
"""
super().__init__(device)
self.encoder = encoder
self.decoder = None if isinstance(encoder, BaseAutoModel) else decoder
self.feval = feval
self.loss = loss
def _compose_model(self):
return _DummyModel(self.encoder, self.decoder).to(self.device)
def _initialize(self):
self.encoder.initialize()
if self.decoder is not None:
self.decoder.initialize(self.encoder)
@property
def feval(self) -> _typing.Sequence[_typing.Type[Evaluation]]:
return self.__feval
@feval.setter
def feval(
self,
_feval: _typing.Union[
_typing.Sequence[str], _typing.Sequence[_typing.Type[Evaluation]]
],
):
self.__feval: _typing.Sequence[_typing.Type[Evaluation]] = get_feval(_feval)
@property
def model(self):
# compatible with v0.2
return self.encoder
@model.setter
def model(self, model):
# compatible with v0.2
self.encoder = model
def to(self, device: _typing.Union[str, torch.device]):
"""
Transfer the trainer to another device
Parameters
----------
device: `str` or `torch.device`
The device this trainer will use
"""
self.device = device
if self.encoder is not None:
self.encoder.to_device(self.device)
if self.decoder is not None:
self.decoder.to_device(self.device)
def get_feval(
self, return_major: bool = False
) -> _typing.Union[
_typing.Type[Evaluation], _typing.Sequence[_typing.Type[Evaluation]]
]:
"""
Parameters
----------
return_major: ``bool``
Wether to return the major ``feval``. Default ``False``.
Returns
-------
``evaluation`` or list of ``evaluation``:
If ``return_major=True``, will return the major ``evaluation`` method.
Otherwise, will return the ``evaluation`` element passed when constructing.
"""
if return_major:
if isinstance(self.feval, _typing.Sequence):
return self.feval[0]
else:
return self.feval
return self.feval
@classmethod
def save(cls, instance, path):
with open(path, "wb") as output:
pickle.dump(instance, output, pickle.HIGHEST_PROTOCOL)
@classmethod
def load(cls, path):
with open(path, "rb") as inputs:
instance = pickle.load(inputs)
return instance
def duplicate_from_hyper_parameter(self, *args, **kwargs) -> "BaseTrainer":
"""Create a new trainer with the given hyper parameter."""
raise NotImplementedError()
def train(self, dataset, keep_valid_result):
"""
Train on the given dataset.
Parameters
----------
dataset: The dataset used in training.
keep_valid_result: `bool`
If True(False), save the validation result after training.
Returns
-------
"""
raise NotImplementedError()
def predict(self, dataset, mask=None):
"""
Predict on the given dataset.
Parameters
----------
dataset: The dataset used in predicting.
mask: `train`, `val`, or `test`.
The dataset mask.
Returns
-------
prediction result
"""
raise NotImplementedError()
def predict_proba(self, dataset, mask=None, in_log_format=False):
"""
Predict the probability on the given dataset.
Parameters
----------
dataset: The dataset used in predicting.
mask: `train`, `val`, or `test`.
The dataset mask.
in_log_format: `bool`.
If True(False), the probability will (not) be log format.
Returns
-------
The prediction result.
"""
raise NotImplementedError()
def get_valid_predict_proba(self):
"""Get the valid result (prediction probability)."""
raise NotImplementedError()
def get_valid_predict(self):
"""Get the valid result."""
raise NotImplementedError()
def get_valid_score(self, return_major=True):
"""Get the validation score."""
raise NotImplementedError()
def __repr__(self) -> str:
raise NotImplementedError
def evaluate(self, dataset, mask=None, feval=None):
"""
Parameters
----------
dataset: The dataset used in evaluation.
mask: `train`, `val`, or `test`.
The dataset mask.
feval: The evaluation methods.
Returns
-------
The evaluation result.
"""
raise NotImplementedError
def update_parameters(self, **kwargs):
"""
Update parameters of this trainer
"""
for k, v in kwargs.items():
if k == "feval":
self.feval = get_feval(v)
elif k == "device":
self.to(v)
elif hasattr(self, k):
setattr(self, k, v)
else:
raise KeyError("Cannot set parameter", k, "for trainer", self.__class__)
def combined_hyper_parameter_space(self):
return {
"trainer": self.hyper_parameter_space,
"encoder": self.encoder.hyper_parameter_space,
"decoder": [] if self.decoder is None else self.decoder.hyper_parameter_space
}
class _BaseClassificationTrainer(BaseTrainer):
""" Base class of trainer for classification tasks """
def __init__(
self,
encoder: _typing.Union[BaseAutoModel, BaseEncoderMaintainer, str, None],
decoder: _typing.Union[BaseDecoderMaintainer, str, None],
num_features: int,
num_classes: int,
last_dim: _typing.Union[int, str] = "auto",
device: _typing.Union[torch.device, str, None] = "auto",
feval: _typing.Union[
_typing.Sequence[str], _typing.Sequence[_typing.Type[Evaluation]]
] = (Acc,),
loss: str = "nll_loss",
):
self._encoder = None
self._decoder = None
self.num_features = num_features
self.num_classes = num_classes
self.last_dim: _typing.Union[int, str] = last_dim
super(_BaseClassificationTrainer, self).__init__(
encoder, decoder, device, feval, loss
)
@property
def encoder(self):
return self._encoder
@encoder.setter
def encoder(self, enc: _typing.Union[BaseAutoModel, BaseEncoderMaintainer, str, None]):
if isinstance(enc, str):
if enc in EncoderUniversalRegistry:
self._encoder = EncoderUniversalRegistry.get_encoder(enc)(
self.num_features, last_dim=self.last_dim, device=self.device, init=self.initialized
)
else:
self._encoder = ModelUniversalRegistry.get_model(enc)(
self.num_features, last_dim=self.last_dim, device=self.device
)
elif isinstance(enc, BaseEncoderMaintainer):
self._encoder = enc
elif isinstance(enc, BaseAutoModel):
self._encoder = enc
if self.decoder is not None:
logging.warn("will disable decoder since a whole model is passed")
self.decoder = None
elif enc is None:
self._encoder = None
else:
raise ValueError("Enc {} is not supported!".format(enc))
self.num_features = self.num_features
self.num_classes = self.num_classes
self.last_dim = self.last_dim
@property
def decoder(self):
return self._decoder
@decoder.setter
def decoder(self, dec: _typing.Union[BaseDecoderMaintainer, str, None]):
if isinstance(self.encoder, BaseAutoModel):
logging.warn("Ignore passed dec since enc is a whole model")
self._decoder = None
return
if isinstance(dec, str):
self._decoder = DecoderUniversalRegistry.get_decoder(dec)(
self.num_classes, input_dim=self.last_dim, device=self.device, init=self.initialized
)
elif isinstance(dec, BaseDecoderMaintainer):
self._decoder = dec
elif dec is None:
self._decoder = None
else:
raise ValueError("Dec {} is not supported!".format(dec))
self.num_features = self.num_features
self.num_classes = self.num_classes
self.last_dim = self.last_dim
@property
def num_classes(self):
return self.__num_classes
@num_classes.setter
def num_classes(self, num_classes):
self.__num_classes = num_classes
if isinstance(self.encoder, BaseAutoModel):
self.encoder.output_dimension = num_classes
elif isinstance(self.decoder, BaseDecoderMaintainer):
self.decoder.output_dimension = num_classes
@property
def last_dim(self):
return self._last_dim
@last_dim.setter
def last_dim(self, dim):
self._last_dim = dim
if isinstance(self.encoder, AutoHomogeneousEncoderMaintainer):
self.encoder.final_dimension = self._last_dim
@property
def num_features(self):
return self._num_features
@num_features.setter
def num_features(self, num_features):
self._num_features = num_features
if self.encoder is not None:
self.encoder.input_dimension = num_features
class BaseNodeClassificationTrainer(_BaseClassificationTrainer):
def __init__(
self,
encoder: _typing.Union[BaseAutoModel, BaseEncoderMaintainer, str, None],
decoder: _typing.Union[BaseDecoderMaintainer, str, None],
num_features: int,
num_classes: int,
device: _typing.Union[torch.device, str, None] = None,
feval: _typing.Union[
_typing.Sequence[str], _typing.Sequence[_typing.Type[Evaluation]]
] = (Acc,),
loss: str = "nll_loss",
):
super(BaseNodeClassificationTrainer, self).__init__(
encoder, decoder, num_features, num_classes, num_classes, device, feval, loss
)
# override num_classes property to support last_dim setting
@property
def num_classes(self):
return self.__num_classes
@num_classes.setter
def num_classes(self, num_classes):
self.__num_classes = num_classes
if isinstance(self.encoder, BaseAutoModel):
self.encoder.output_dimension = num_classes
elif isinstance(self.decoder, BaseDecoderMaintainer):
self.decoder.output_dimension = num_classes
self.last_dim = num_classes
class BaseGraphClassificationTrainer(_BaseClassificationTrainer):
def __init__(
self,
encoder: _typing.Union[BaseAutoModel, BaseEncoderMaintainer, str, None] = None,
decoder: _typing.Union[BaseDecoderMaintainer, str, None] = None,
num_features: _typing.Optional[int] = None,
num_classes: _typing.Optional[int] = None,
num_graph_features: int = 0,
last_dim: _typing.Union[int, str] = "auto",
device: _typing.Union[torch.device, str, | |
<gh_stars>0
import sys
import math
class IV:
def IV(t):
H = [int('6a09e667f3bcc908', 16) ^ int('a5a5a5a5a5a5a5a5', 16),
int('bb67ae8584caa73b', 16) ^ int('a5a5a5a5a5a5a5a5', 16),
int('3c6ef372fe94f82b', 16) ^ int('a5a5a5a5a5a5a5a5', 16),
int('a54ff53a5f1d36f1', 16) ^ int('a5a5a5a5a5a5a5a5', 16),
int('510e527fade682d1', 16) ^ int('a5a5a5a5a5a5a5a5', 16),
int('9b05688c2b3e6c1f', 16) ^ int('a5a5a5a5a5a5a5a5', 16),
int('1f83d9abfb41bd6b', 16) ^ int('a5a5a5a5a5a5a5a5', 16),
int('5be0cd19137e2179', 16) ^ int('a5a5a5a5a5a5a5a5', 16)]
t = hash.sha512_for_t('SHA-512/t'.replace('t', str(t)), H)
H = []
for i in range(0, 8):
H.append(int(t[i*16:(i* 16)+16], 16))
return H
class sched:
# FIPS-180-4 2.2.2
# Rotate bits to the right
@staticmethod
def ROTR(x, n, w=32):
return ((x >> n) | (x << w - n)) & ((1 << w) - 1)
# FIPS-180-4 3.2
# Rotate bits to the right
@staticmethod
def ROTL(x, n, w=32):
return sched.ROTR(x, w-n)
# FIPS-180-4 4.1.2
@staticmethod
def sigma0(x):
return sched.ROTR(x, 7) ^ sched.ROTR(x, 18) ^ (x >> 3)
# FIPS-180-4 4.1.2
@staticmethod
def sigma1(x):
return sched.ROTR(x, 17) ^ sched.ROTR(x, 19) ^ (x >> 10)
# FIPS-180-4 4.1.2
@staticmethod
def sigma0_sha384(x):
return sched.ROTR(x, 1, 64) ^ sched.ROTR(x, 8, 64) ^ (x >> 7)
# FIPS-180-4 4.1.2
@staticmethod
def sigma1_sha384(x):
return sched.ROTR(x, 19, 64) ^ sched.ROTR(x, 61, 64) ^ (x >> 6)
# FIPS-180-4 6.1.2
# (New word function for message schedule)
@staticmethod
def MIX_sha160(t, init_words, w=32):
if t >= 16:
return sched.ROTL(init_words[t-3] ^ init_words[t-8] ^ init_words[t-14] ^ init_words[t-16], 1)
return init_words[t]
@staticmethod
def MIX_sha224(t, init_words, w=32):
if t >= 16:
return int((sched.sigma1(init_words[t-2]) + init_words[t-7] + sched.sigma0(init_words[t-15]) + init_words[t-16]) % 2 ** 32)
return init_words[t]
@staticmethod
def MIX_sha384(t, init_words, w=64):
if t >= 16:
return int((sched.sigma1_sha384(init_words[t-2]) + init_words[t-7] + sched.sigma0_sha384(init_words[t-15]) + init_words[t-16]) % 2 ** 64)
return init_words[t]
@staticmethod
def MIX_512(t, init_words, w=64):
if t >= 16:
return int((sched.sigma1_sha384(init_words[t-2]) + init_words[t-7] + sched.sigma0_sha384(init_words[t-15]) + init_words[t-16]) % 2 ** 64)
return init_words[t]
# FIPS-180-4 6.1.2
# Create message schedule for block i
@staticmethod
def create_schedule_sha160(inital_words):
W = []
for t in range(0, 16):
W.append(sched.MIX_sha160(t, inital_words))
for t in range(16, 80):
W.append(sched.MIX_sha160(t, W))
return W
@staticmethod
def create_schedule_sha224(inital_words):
W = []
for t in range(0, 16):
W.append(sched.MIX_sha224(t, inital_words))
for t in range(16, 64):
W.append(sched.MIX_sha224(t, W))
return W
@staticmethod
def create_schedule_sha384(inital_words):
W = []
for t in range(0, 16):
W.append(sched.MIX_sha384(t, inital_words))
for t in range(16, 80):
W.append(sched.MIX_sha384(t, W))
return W
@staticmethod
def create_schedule_sha512(inital_words):
W = []
for t in range(0, 16):
W.append(sched.MIX_512(t, inital_words))
for t in range(16, 80):
W.append(sched.MIX_512(t, W))
return W
class ppp:
# Convert ASCII to ASCII
def from_str(message):
return message
# Convert integer to ASCII
def from_int(message):
return str(chr(int(message, 10)))
# Convert hexadecimal to ASCII
def from_hex(message):
return str(chr(int(message, 16)))
# Convert binary to ASCII
def from_bin(message):
return str(chr(int(message, 2)))
# Convert octal to ASCII
def from_oct(message):
return str(chr(int(message, 8)))
# Convert file to ASCII
def from_file(filename):
# Open file and store its content in $content
with open(filename, 'rb') as f:
content = f.read()
# Create result variable
rs = ''
# Convert content to ASCII
for c in content:
rs += str(chr(c))
return rs
class prep:
# FIPS-180-4 5.1.1
# This converts from an ASCII string to a binary string, it's lenght being a multiple of the block_size
def padd(message, block_size=512, lenght_block_size=64):
# Convert message to array of integers
ints = []
for c in message:
ints.append(ord(c))
# Convert array of integers to array of strings (Binary representation of the integers)
message = []
for i in ints:
message.append(bin(i)[2:].zfill(8))
# Convert string array (Message in bytes) to string (Message in bits)
message = ''.join(message)
# Get current lenght of message (in bits)
l = len(message)
# Get the lenght of the message in bits (In bits. I'm confused too)
l_bits = bin(l)[2:].zfill(8)
# Add bit (With value of 1) to the end of the message (In bits)
message += '1'
# Padd message with 0's
k = (((block_size - lenght_block_size) % (block_size)) - (1 + l))
while k < 0:
k += block_size
for i in range(0, k):
message += '0'
# Add lenght of message (In bits) to the end of the message, padd with zeroes to size of lenght_block_size
message += l_bits.zfill(lenght_block_size)
return message
# # FIPS-180-4 5.2.1
# Parse message (In bits) into blocks of the size of block_size, thoose are parsed into 16 words of the lenght of w bits
# Returns array of arrays, words are now integers
def parse(message, block_size=512, w=32):
# Create empty list of blocks
M = []
# How many blocks will be created
n = int(len(message) / block_size)
# Iterate over that number
for n in range(0, n):
# Create new block of the size of block_size
m = (message[n*block_size:(n*block_size)+block_size])
# Create empty word list
W = []
# Iterate over how many words are in a block (16)
for i in range(0, 16):
# Append the word to W, now as integer
W.append(int(m[i * w:(i * w) + w],2))
# Add the list of words (Containing the information of the block) to the list of blocks
M.append(W)
return M
# FIPS-180-4 6.2.1
# Pre-proccess a message
# Profiles:
# 0 - sha160
def prep(message, profile=0):
block_size = 512
lenght_block_size = 64
w = 32
if profile == 0:
block_size = 512
lenght_block_size = 64
w = 32
message = prep.padd(message, block_size, lenght_block_size)
message = prep.parse(message, block_size, w)
return message
class prep_sha384:
# FIPS-180-4 5.1.2
# This converts from an ASCII string to a binary string, it's lenght being a multiple of the block_size
def padd(message, block_size=1024, lenght_block_size=128):
# Convert message to array of integers
ints = []
for c in message:
ints.append(ord(c))
# Convert array of integers to array of strings (Binary representation of the integers)
message = []
for i in ints:
message.append(bin(i)[2:].zfill(8))
# Convert string array (Message in bytes) to string (Message in bits)
message = ''.join(message)
# Get current lenght of message (in bits)
l = len(message)
# Get the lenght of the message in bits (In bits. I'm confused too)
l_bits = bin(l)[2:].zfill(8)
# Add bit (With value of 1) to the end of the message (In bits)
message += '1'
# Padd message with 0's
k = (((block_size - lenght_block_size) % (block_size)) - (1 + l))
while k < 0:
k += block_size
for i in range(0, k):
message += '0'
# Add lenght of message (In bits) to the end of the message, padd with zeroes to size of lenght_block_size
message += l_bits.zfill(lenght_block_size)
return message
# # FIPS-180-4 5.2.2
# Parse message (In bits) into blocks of the size of block_size, thoose are parsed into 16 words of the lenght of w bits
# Returns array of arrays, words are now integers
def parse(message, block_size=1024, w=64):
# Create empty list of blocks
M = []
# How many blocks will be created
n = int(len(message) / block_size)
# Iterate over that number
for n in range(0, n):
# Create new block of the size of block_size
m = (message[n*block_size:(n*block_size)+block_size])
# Create empty word list
W = []
# Iterate over how many words are in a block (16)
for i in range(0, 16):
# Append the word to W, now as integer
W.append(int(m[i * w:(i * w) + w],2))
# Add the list of words (Containing the information of the block) to the list of blocks
M.append(W)
return M
# FIPS-180-4 6.2.1
# Pre-proccess a message
def prep(message, profile=0):
block_size = 1024
lenght_block_size = 128
w = 64
message = prep_sha384.padd(message, block_size, lenght_block_size)
message = prep_sha384.parse(message, block_size, w)
return message
class hash:
# FIPS-180-4 4.2.2
# Constant values
K_sha160 = []
# FIPS-180-4 5.3.1
# Constant inital hash values
H_sha160 = [int('67452301', 16),
int('efcdab89', 16),
int('98badcfe', 16),
int('10325476', 16),
int('c3d2e1f0', 16)]
K_sha224 = [int('428a2f98', 16),
int('71374491', 16),
int('b5c0fbcf', 16),
int('e9b5dba5', 16),
int('3956c25b', 16),
int('59f111f1', 16),
int('923f82a4', 16),
int('ab1c5ed5', 16),
int('d807aa98', 16),
int('12835b01', 16),
int('243185be', 16),
int('550c7dc3', 16),
int('72be5d74', 16),
int('80deb1fe', 16),
int('9bdc06a7', 16),
int('c19bf174', 16),
int('e49b69c1', 16),
int('efbe4786', 16),
int('0fc19dc6', 16),
int('240ca1cc', 16),
int('2de92c6f', 16),
int('4a7484aa', | |
error_message
self.name = name
self.region_id = region_id
self.state = state
self.update_time = update_time
self.version = version
self.vpc_id = vpc_id
self.sg_id = sg_id
self.cluster_domain = cluster_domain
def validate(self):
self.validate_required(self.cluster_id, 'cluster_id')
self.validate_required(self.cluster_type, 'cluster_type')
self.validate_required(self.creation_time, 'creation_time')
self.validate_required(self.error_message, 'error_message')
self.validate_required(self.name, 'name')
self.validate_required(self.region_id, 'region_id')
self.validate_required(self.state, 'state')
self.validate_required(self.update_time, 'update_time')
self.validate_required(self.version, 'version')
self.validate_required(self.vpc_id, 'vpc_id')
self.validate_required(self.sg_id, 'sg_id')
self.validate_required(self.cluster_domain, 'cluster_domain')
def to_map(self):
result = {}
result['ClusterId'] = self.cluster_id
result['ClusterType'] = self.cluster_type
result['CreationTime'] = self.creation_time
result['ErrorMessage'] = self.error_message
result['Name'] = self.name
result['RegionId'] = self.region_id
result['State'] = self.state
result['UpdateTime'] = self.update_time
result['Version'] = self.version
result['VpcId'] = self.vpc_id
result['SgId'] = self.sg_id
result['ClusterDomain'] = self.cluster_domain
return result
def from_map(self, map={}):
self.cluster_id = map.get('ClusterId')
self.cluster_type = map.get('ClusterType')
self.creation_time = map.get('CreationTime')
self.error_message = map.get('ErrorMessage')
self.name = map.get('Name')
self.region_id = map.get('RegionId')
self.state = map.get('State')
self.update_time = map.get('UpdateTime')
self.version = map.get('Version')
self.vpc_id = map.get('VpcId')
self.sg_id = map.get('SgId')
self.cluster_domain = map.get('ClusterDomain')
return self
class DescribeIngressGatewaysRequest(TeaModel):
def __init__(self, service_mesh_id=None):
self.service_mesh_id = service_mesh_id
def validate(self):
pass
def to_map(self):
result = {}
result['ServiceMeshId'] = self.service_mesh_id
return result
def from_map(self, map={}):
self.service_mesh_id = map.get('ServiceMeshId')
return self
class DescribeIngressGatewaysResponse(TeaModel):
def __init__(self, request_id=None, ingress_gateways=None):
self.request_id = request_id
self.ingress_gateways = []
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.ingress_gateways, 'ingress_gateways')
if self.ingress_gateways:
for k in self.ingress_gateways:
if k :
k.validate()
def to_map(self):
result = {}
result['RequestId'] = self.request_id
result['IngressGateways'] = []
if self.ingress_gateways is not None:
for k in self.ingress_gateways:
result['IngressGateways'].append(k.to_map() if k else None)
else:
result['IngressGateways'] = None
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
self.ingress_gateways = []
if map.get('IngressGateways') is not None:
for k in map.get('IngressGateways'):
temp_model = map[string,any]()
temp_model = temp_model.from_map(k)
self.ingress_gateways.append(temp_model)
else:
self.ingress_gateways = None
return self
class DescribeUpgradeVersionRequest(TeaModel):
def __init__(self, service_mesh_id=None):
self.service_mesh_id = service_mesh_id
def validate(self):
pass
def to_map(self):
result = {}
result['ServiceMeshId'] = self.service_mesh_id
return result
def from_map(self, map={}):
self.service_mesh_id = map.get('ServiceMeshId')
return self
class DescribeUpgradeVersionResponse(TeaModel):
def __init__(self, request_id=None, version=None):
self.request_id = request_id
self.version = version
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.version, 'version')
if self.version:
self.version.validate()
def to_map(self):
result = {}
result['RequestId'] = self.request_id
if self.version is not None:
result['Version'] = self.version.to_map()
else:
result['Version'] = None
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
if map.get('Version') is not None:
temp_model = DescribeUpgradeVersionResponseVersion()
self.version = temp_model.from_map(map['Version'])
else:
self.version = None
return self
class DescribeUpgradeVersionResponseVersion(TeaModel):
def __init__(self, istio_version=None, istio_operator_version=None, kubernetes_version=None):
self.istio_version = istio_version
self.istio_operator_version = istio_operator_version
self.kubernetes_version = kubernetes_version
def validate(self):
self.validate_required(self.istio_version, 'istio_version')
self.validate_required(self.istio_operator_version, 'istio_operator_version')
self.validate_required(self.kubernetes_version, 'kubernetes_version')
def to_map(self):
result = {}
result['IstioVersion'] = self.istio_version
result['IstioOperatorVersion'] = self.istio_operator_version
result['KubernetesVersion'] = self.kubernetes_version
return result
def from_map(self, map={}):
self.istio_version = map.get('IstioVersion')
self.istio_operator_version = map.get('IstioOperatorVersion')
self.kubernetes_version = map.get('KubernetesVersion')
return self
class UpdateMeshFeatureRequest(TeaModel):
def __init__(self, service_mesh_id=None, tracing=None, trace_sampling=None, locality_load_balancing=None, telemetry=None, open_agent_policy=None, opalog_level=None, oparequest_cpu=None, oparequest_memory=None, opalimit_cpu=None, opalimit_memory=None, enable_audit=None, audit_project=None, cluster_domain=None):
self.service_mesh_id = service_mesh_id
self.tracing = tracing
self.trace_sampling = trace_sampling
self.locality_load_balancing = locality_load_balancing
self.telemetry = telemetry
self.open_agent_policy = open_agent_policy
self.opalog_level = opalog_level
self.oparequest_cpu = oparequest_cpu
self.oparequest_memory = oparequest_memory
self.opalimit_cpu = opalimit_cpu
self.opalimit_memory = opalimit_memory
self.enable_audit = enable_audit
self.audit_project = audit_project
self.cluster_domain = cluster_domain
def validate(self):
self.validate_required(self.service_mesh_id, 'service_mesh_id')
def to_map(self):
result = {}
result['ServiceMeshId'] = self.service_mesh_id
result['Tracing'] = self.tracing
result['TraceSampling'] = self.trace_sampling
result['LocalityLoadBalancing'] = self.locality_load_balancing
result['Telemetry'] = self.telemetry
result['OpenAgentPolicy'] = self.open_agent_policy
result['OPALogLevel'] = self.opalog_level
result['OPARequestCPU'] = self.oparequest_cpu
result['OPARequestMemory'] = self.oparequest_memory
result['OPALimitCPU'] = self.opalimit_cpu
result['OPALimitMemory'] = self.opalimit_memory
result['EnableAudit'] = self.enable_audit
result['AuditProject'] = self.audit_project
result['ClusterDomain'] = self.cluster_domain
return result
def from_map(self, map={}):
self.service_mesh_id = map.get('ServiceMeshId')
self.tracing = map.get('Tracing')
self.trace_sampling = map.get('TraceSampling')
self.locality_load_balancing = map.get('LocalityLoadBalancing')
self.telemetry = map.get('Telemetry')
self.open_agent_policy = map.get('OpenAgentPolicy')
self.opalog_level = map.get('OPALogLevel')
self.oparequest_cpu = map.get('OPARequestCPU')
self.oparequest_memory = map.get('OPARequestMemory')
self.opalimit_cpu = map.get('OPALimitCPU')
self.opalimit_memory = map.get('OPALimitMemory')
self.enable_audit = map.get('EnableAudit')
self.audit_project = map.get('AuditProject')
self.cluster_domain = map.get('ClusterDomain')
return self
class UpdateMeshFeatureResponse(TeaModel):
def __init__(self, request_id=None):
self.request_id = request_id
def validate(self):
self.validate_required(self.request_id, 'request_id')
def to_map(self):
result = {}
result['RequestId'] = self.request_id
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
return self
class UpgradeMeshVersionRequest(TeaModel):
def __init__(self, service_mesh_id=None):
self.service_mesh_id = service_mesh_id
def validate(self):
pass
def to_map(self):
result = {}
result['ServiceMeshId'] = self.service_mesh_id
return result
def from_map(self, map={}):
self.service_mesh_id = map.get('ServiceMeshId')
return self
class UpgradeMeshVersionResponse(TeaModel):
def __init__(self, request_id=None):
self.request_id = request_id
def validate(self):
self.validate_required(self.request_id, 'request_id')
def to_map(self):
result = {}
result['RequestId'] = self.request_id
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
return self
class DescribeServiceMeshesRequest(TeaModel):
def __init__(self):
pass
def validate(self):
pass
def to_map(self):
result = {}
return result
def from_map(self, map={}):
return self
class DescribeServiceMeshesResponse(TeaModel):
def __init__(self, request_id=None, service_meshes=None):
self.request_id = request_id
self.service_meshes = []
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.service_meshes, 'service_meshes')
if self.service_meshes:
for k in self.service_meshes:
if k :
k.validate()
def to_map(self):
result = {}
result['RequestId'] = self.request_id
result['ServiceMeshes'] = []
if self.service_meshes is not None:
for k in self.service_meshes:
result['ServiceMeshes'].append(k.to_map() if k else None)
else:
result['ServiceMeshes'] = None
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
self.service_meshes = []
if map.get('ServiceMeshes') is not None:
for k in map.get('ServiceMeshes'):
temp_model = DescribeServiceMeshesResponseServiceMeshes()
temp_model = temp_model.from_map(k)
self.service_meshes.append(temp_model)
else:
self.service_meshes = None
return self
class DescribeServiceMeshesResponseServiceMeshesEndpoints(TeaModel):
def __init__(self, intranet_api_server_endpoint=None, intranet_pilot_endpoint=None, public_api_server_endpoint=None, public_pilot_endpoint=None, reverse_tunnel_endpoint=None):
self.intranet_api_server_endpoint = intranet_api_server_endpoint
self.intranet_pilot_endpoint = intranet_pilot_endpoint
self.public_api_server_endpoint = public_api_server_endpoint
self.public_pilot_endpoint = public_pilot_endpoint
self.reverse_tunnel_endpoint = reverse_tunnel_endpoint
def validate(self):
self.validate_required(self.intranet_api_server_endpoint, 'intranet_api_server_endpoint')
self.validate_required(self.intranet_pilot_endpoint, 'intranet_pilot_endpoint')
self.validate_required(self.public_api_server_endpoint, 'public_api_server_endpoint')
self.validate_required(self.public_pilot_endpoint, 'public_pilot_endpoint')
self.validate_required(self.reverse_tunnel_endpoint, 'reverse_tunnel_endpoint')
def to_map(self):
result = {}
result['IntranetApiServerEndpoint'] = self.intranet_api_server_endpoint
result['IntranetPilotEndpoint'] = self.intranet_pilot_endpoint
result['PublicApiServerEndpoint'] = self.public_api_server_endpoint
result['PublicPilotEndpoint'] = self.public_pilot_endpoint
result['ReverseTunnelEndpoint'] = self.reverse_tunnel_endpoint
return result
def from_map(self, map={}):
self.intranet_api_server_endpoint = map.get('IntranetApiServerEndpoint')
self.intranet_pilot_endpoint = map.get('IntranetPilotEndpoint')
self.public_api_server_endpoint = map.get('PublicApiServerEndpoint')
self.public_pilot_endpoint = map.get('PublicPilotEndpoint')
self.reverse_tunnel_endpoint = map.get('ReverseTunnelEndpoint')
return self
class DescribeServiceMeshesResponseServiceMeshesServiceMeshInfo(TeaModel):
def __init__(self, creation_time=None, error_message=None, name=None, region_id=None, service_mesh_id=None, state=None, update_time=None, version=None):
self.creation_time = creation_time
self.error_message = error_message
self.name = name
self.region_id = region_id
self.service_mesh_id = service_mesh_id
self.state = state
self.update_time = update_time
self.version = version
def validate(self):
self.validate_required(self.creation_time, 'creation_time')
self.validate_required(self.error_message, 'error_message')
self.validate_required(self.name, 'name')
self.validate_required(self.region_id, 'region_id')
self.validate_required(self.service_mesh_id, 'service_mesh_id')
self.validate_required(self.state, 'state')
self.validate_required(self.update_time, 'update_time')
self.validate_required(self.version, 'version')
def to_map(self):
result = {}
result['CreationTime'] = self.creation_time
result['ErrorMessage'] = self.error_message
result['Name'] = self.name
result['RegionId'] = self.region_id
result['ServiceMeshId'] = self.service_mesh_id
result['State'] = self.state
result['UpdateTime'] = self.update_time
result['Version'] = self.version
return result
def from_map(self, map={}):
self.creation_time = map.get('CreationTime')
self.error_message = map.get('ErrorMessage')
self.name = map.get('Name')
self.region_id = map.get('RegionId')
self.service_mesh_id = map.get('ServiceMeshId')
self.state = map.get('State')
self.update_time = map.get('UpdateTime')
self.version = map.get('Version')
return self
class DescribeServiceMeshesResponseServiceMeshesSpecLoadBalancer(TeaModel):
def __init__(self, api_server_loadbalancer_id=None, api_server_public_eip=None, pilot_public_eip=None, pilot_public_loadbalancer_id=None):
self.api_server_loadbalancer_id = api_server_loadbalancer_id
self.api_server_public_eip = api_server_public_eip
self.pilot_public_eip = pilot_public_eip
self.pilot_public_loadbalancer_id = pilot_public_loadbalancer_id
def validate(self):
self.validate_required(self.api_server_loadbalancer_id, 'api_server_loadbalancer_id')
self.validate_required(self.api_server_public_eip, 'api_server_public_eip')
self.validate_required(self.pilot_public_eip, 'pilot_public_eip')
self.validate_required(self.pilot_public_loadbalancer_id, 'pilot_public_loadbalancer_id')
def to_map(self):
result = {}
result['ApiServerLoadbalancerId'] = self.api_server_loadbalancer_id
result['ApiServerPublicEip'] = self.api_server_public_eip
result['PilotPublicEip'] = self.pilot_public_eip
result['PilotPublicLoadbalancerId'] = self.pilot_public_loadbalancer_id
return result
def from_map(self, map={}):
self.api_server_loadbalancer_id = map.get('ApiServerLoadbalancerId')
self.api_server_public_eip = map.get('ApiServerPublicEip')
self.pilot_public_eip = map.get('PilotPublicEip')
self.pilot_public_loadbalancer_id = map.get('PilotPublicLoadbalancerId')
return self
class DescribeServiceMeshesResponseServiceMeshesSpecMeshConfig(TeaModel):
def __init__(self, mtls=None, outbound_traffic_policy=None, strict_mtls=None, tracing=None, telemetry=None):
self.mtls = mtls
self.outbound_traffic_policy = outbound_traffic_policy
self.strict_mtls = strict_mtls
self.tracing = tracing
self.telemetry = telemetry
def validate(self):
self.validate_required(self.mtls, 'mtls')
self.validate_required(self.outbound_traffic_policy, 'outbound_traffic_policy')
self.validate_required(self.strict_mtls, 'strict_mtls')
self.validate_required(self.tracing, 'tracing')
self.validate_required(self.telemetry, 'telemetry')
def to_map(self):
result = {}
result['Mtls'] = self.mtls
result['OutboundTrafficPolicy'] = self.outbound_traffic_policy
result['StrictMtls'] = self.strict_mtls
result['Tracing'] = self.tracing
result['Telemetry'] = self.telemetry
return result
def from_map(self, map={}):
self.mtls = map.get('Mtls')
self.outbound_traffic_policy = map.get('OutboundTrafficPolicy')
self.strict_mtls = map.get('StrictMtls')
self.tracing = map.get('Tracing')
self.telemetry = map.get('Telemetry')
return self
class DescribeServiceMeshesResponseServiceMeshesSpecNetwork(TeaModel):
def __init__(self, security_group_id=None, vpc_id=None, v_switches=None):
self.security_group_id = security_group_id
self.vpc_id = vpc_id
self.v_switches = []
def validate(self):
self.validate_required(self.security_group_id, 'security_group_id')
self.validate_required(self.vpc_id, 'vpc_id')
self.validate_required(self.v_switches, 'v_switches')
def to_map(self):
result = {}
result['SecurityGroupId'] = self.security_group_id
result['VpcId'] = self.vpc_id
result['VSwitches'] = []
if self.v_switches is not None:
for k in self.v_switches:
result['VSwitches'].append(k)
else:
result['VSwitches'] = None
return result
def from_map(self, map={}):
self.security_group_id = map.get('SecurityGroupId')
self.vpc_id = map.get('VpcId')
self.v_switches = []
if map.get('VSwitches') is not None:
for k in map.get('VSwitches'):
self.v_switches.append(k)
else:
self.v_switches = None
return self
class DescribeServiceMeshesResponseServiceMeshesSpec(TeaModel):
def __init__(self, load_balancer=None, mesh_config=None, network=None):
self.load_balancer = load_balancer
self.mesh_config = mesh_config
self.network = network
def validate(self):
self.validate_required(self.load_balancer, 'load_balancer')
if self.load_balancer:
self.load_balancer.validate()
self.validate_required(self.mesh_config, 'mesh_config')
if self.mesh_config:
self.mesh_config.validate()
self.validate_required(self.network, 'network')
if self.network:
self.network.validate()
def to_map(self):
result = {}
if self.load_balancer is not None:
result['LoadBalancer'] = self.load_balancer.to_map()
else:
result['LoadBalancer'] = None
if self.mesh_config is not None:
result['MeshConfig'] = self.mesh_config.to_map()
else:
result['MeshConfig'] = None
if self.network is not None:
result['Network'] = self.network.to_map()
else:
result['Network'] = None
return result
def from_map(self, map={}):
if map.get('LoadBalancer') is not None:
temp_model = DescribeServiceMeshesResponseServiceMeshesSpecLoadBalancer()
self.load_balancer = temp_model.from_map(map['LoadBalancer'])
else:
self.load_balancer = None
if map.get('MeshConfig') is not None:
temp_model = DescribeServiceMeshesResponseServiceMeshesSpecMeshConfig()
self.mesh_config = temp_model.from_map(map['MeshConfig'])
else:
self.mesh_config = None
if map.get('Network') is not None:
temp_model = DescribeServiceMeshesResponseServiceMeshesSpecNetwork()
| |
from typing import Any, Dict, Iterable, List, Optional, Tuple, Callable
from math import pi as π
from sympy import Matrix as Mat
from numpy import ndarray
from physical_education.links import Link3D, constrain_rel_angle
from physical_education.system import System3D
from physical_education.foot import add_foot, feet, Foot3D
from physical_education.motor import add_torque
from physical_education.drag import add_drag
from physical_education.spring import add_torquespring
from physical_education.damper import add_torquedamper
parameters = {
# # The model below is terribly out of date. If needed, manually
# # uncomment + test it!
# 'model-6': {
# 'source': """
# A model of cheetah 6 from Functional anatomy of the cheetah (Acinonyx jubatus) forelimb and hindlimb
# doi: 10.1111/j.1469-7580.2011.01344.x and 10.1111/j.1469-7580.2010.01310.x
# """,
# 'body_B': {'mass': 17., 'radius': 0.08, 'length': 0.41},
# 'body_F': {'mass': 8., 'radius': 0.08, 'length': 0.21},
# 'tail0': {'mass': 0.4, 'radius': 0.005, 'length': 0.38},
# 'tail1': {'mass': 0.2, 'radius': 0.005, 'length': 0.38},
# 'front': {
# 'thigh': {'mass': 0.171, 'radius': 0.012, 'length': 0.254},
# 'calf': {'mass': 0.068, 'radius': 0.005, 'length': 0.247},
# },
# 'back': {
# 'thigh': {'mass': 0.210, 'radius': 0.010, 'length': 0.281},
# 'calf': {'mass': 0.160, 'radius': 0.011, 'length': 0.287},
# },
# 'friction_coeff': 1.3,
# 'motor_params': {'torque_bounds': (-2., 2.), 'no_load_speed': 50.},
# },
'mean-male': {
'source': """
Parameters for the 'mean' (X) cheetah from
Morphology, Physical Condition, and Growth of the Cheetah (Acinonyx jubatus jubatus)
https://academic.oup.com/jmammal/article/84/3/840/905900
body mass = 45.6 kg ---> majority (42kg?) in body
chest girth = 71.7 cm ---> front radius = 0.717m / (2*pi)
abdomen girth = 59.4 cm ---> back radius = 0.594m / (2*pi)
skull length = 23.4 cm
body length = 125.5 cm ---> body - skull - neck = 125.5 - 23.4 - (20?) = 80cm => front = 0.5m, back = 0.3m
tail length = 76.7 cm ---> 38cm per half
total length = 202.2 cm
total foreleg length = 77 cm
total hind leg length = 81.1 cm
front foot length = 8.2 cm
front foot width = 6.1 cm
hind foot length = 9.2 cm
hind foot width = 6.2 cm
From "Quasi-steady state aerodynamics of the cheetah tail"
fur length on tail = 10mm on average
average tail diameter (no fur) = 31mm
---> radius = 31/2 + 10 = 25.5mm = 0.0255m
Friction coeff of 1.3 from
"Locomotion dynamics of hunting in wild cheetahs"
NOTE: leg measurements mostly cribbed from 'model-6' above. Find proper values!
lengths = same
masses = same * 1.2
radii = same
NOTE: the motor_params values are mostly made up. In any case, different muscle
groups would need different values
""",
'body_B': {'mass': 28., 'radius': 0.594/(2*π), 'length': 0.3},
'body_F': {'mass': 14., 'radius': 0.717/(2*π), 'length': 0.5},
'tail0': {'mass': 0.4, 'radius': 0.0255, 'length': 0.38},
'tail1': {'mass': 0.2, 'radius': 0.0255, 'length': 0.38},
'front': {
'thigh': {'mass': 0.171*1.2, 'radius': 0.012, 'length': 0.254},
'calf': {'mass': 0.068*1.2, 'radius': 0.005, 'length': 0.247},
},
'back': {
'thigh': {'mass': 0.210*1.2, 'radius': 0.010, 'length': 0.281},
# based on ratios
'calf': {'mass': 0.100*1.2, 'radius': 0.011, 'length': 0.287 * 1.1*(33/(33+24.5))},
# from Liams model
'hock': {'mass': 0.060*1.2, 'radius': 0.011, 'length': 0.287 * 1.1*(24.5/(33+24.5))},
},
'friction_coeff': 1.3,
# measured in terms of body weight, based on the observed limits
# of energy efficient gallops and C-turns at 8, 14 and 20 m/s
# for this model
'motor': {
'spine': {'torque_bounds': (-0.7, 0.7), 'no_load_speed': 50.},
'spine-tail0': {'torque_bounds': (-0.25, 0.25), 'no_load_speed': 50.},
'tail0-tail1': {'torque_bounds': (-0.2, 0.2), 'no_load_speed': 50.},
'front': {
'hip-pitch': {'torque_bounds': (-0.5, 0.6), 'no_load_speed': 50.},
'hip-abduct': {'torque_bounds': (-0.5, 0.6), 'no_load_speed': 50.},
'knee': {'torque_bounds': (-0.5, 0.4), 'no_load_speed': 50.},
},
'back': {
'hip-pitch': {'torque_bounds': (-0.6, 0.6), 'no_load_speed': 50.},
'hip-abduct': {'torque_bounds': (-0.4, 0.5), 'no_load_speed': 50.},
'knee': {'torque_bounds': (-0.1, 0.5), 'no_load_speed': 50.},
'ankle': {'torque_bounds': (-0.4, 0.05), 'no_load_speed': 50.},
},
},
},
}
def model(params: Dict[str, Any], with_tail: bool) -> Tuple[System3D, Callable[[System3D], None]]:
"""
Defines a quadruped model based off a cheetah (see `cheetah-model.png`).
Roughly 400 000 operations in the equations of motion without simplification,
and 140 000 if simplified with
>>> robot.calc_eom(simp_func = lambda x: utils.parsimp(x, nprocs = 14))
Note that the numbers are probably out of date at this point.
"""
# create front and back links of body and tail
body_B = Link3D('base_B', '+x', base=True, **params['body_B'],
meta=['spine', 'back'])
body_F = Link3D('base_F', '+x', start_I=body_B.bottom_I, **params['body_F'],
meta=['spine', 'front'])
# input torques for roll, pitch and yaw of the spine
# body_B.add_hookes_joint(body_F, about='xyz')
add_torque(body_B, body_F, about='xyz', **params['motor']['spine'])
# spring/damper forces on spine
phi_b, th_b, psi_b = body_B.q[3:]
phi_f, th_f, psi_f = body_F.q[:3]
for angles, dof in [(phi_b - phi_f, 'roll'),
(th_b - th_f, 'pitch'),
(psi_b - psi_f, 'yaw')]:
# TODO: actually find these by initialising to 0.5 and bounding to (0.1, 10.)
# the current fixed values are sort of arbitrary (based on a paper)
# about humans
add_torquespring(body_B, body_F, angles, spring_coeff=0.5,
# spring_coeff_lims=(0.1, 10.),
rest_angle=0,
name=f'spine-torquespring-{dof}')
add_torquedamper(body_B, body_F, angles, damping_coeff=0.5,
# damping_coeff_lims=(0.1, 10.),
name=f'spine-torquedamper-{dof}')
# drag on body
add_drag(body_F, at=body_F.bottom_I, name='body_F-drag-head',
use_dummy_vars=True, cylinder_top=True)
add_drag(body_F, at=body_F.Pb_I, name='body_F-drag-body',
use_dummy_vars=True)
add_drag(body_B, at=body_B.Pb_I, use_dummy_vars=True)
if with_tail:
tail0 = Link3D('tail0', '-x', start_I=body_B.top_I,
**params['tail0'], meta=['tail'])
tail1 = Link3D('tail1', '-x', start_I=tail0.bottom_I,
**params['tail1'], meta=['tail'])
# friction coefficient of 0.1 is arbitrary. Worth setting to 0
# in case it speeds things up?
add_foot(tail1, at='bottom', nsides=8, friction_coeff=0.1,
GRFxy_max=0.1, GRFz_max=0.1)
# input torques to tail - pitch and yaw
body_B.add_hookes_joint(tail0, about='xy')
add_torque(body_B, tail0, about='xy', **params['motor']['spine-tail0'])
# torques in the middle of the tail - pitch and yaw
tail0.add_hookes_joint(tail1, about='xy')
add_torque(tail0, tail1, about='xy', **params['motor']['tail0-tail1'])
# drag on tail
add_drag(tail0, at=tail0.Pb_I, use_dummy_vars=True)
add_drag(tail1, at=tail1.Pb_I, use_dummy_vars=True)
def def_leg(body: Link3D, front: bool, right: bool) -> Iterable[Link3D]:
"""Define a leg and attach it to the front/back right/left of `body`.
Only really makes sense when `body` is aligned along the `x`-axis"""
# maybe flip x (or y)
# the model is considered to face along the x axis (so front/back
# refers to changes in the y value).
def mfx(x): return x if front else -x
def mfy(y): return y if right else -y
start_I = body.Pb_I + \
body.Rb_I @ Mat([mfx(body.length/2), mfy(body.radius), 0])
suffix = ('F' if front else 'B') + ('R' if right else 'L')
frontorback_str = 'front' if front else 'back'
rightorleft_str = 'right' if right else 'left'
p = params[frontorback_str]
thigh = Link3D('U'+suffix, '-z', start_I=start_I, **p['thigh'],
meta=['leg', 'thigh', frontorback_str, rightorleft_str])
calf = Link3D('L'+suffix, '-z', start_I=thigh.bottom_I, **p['calf'],
meta=['leg', 'calf', frontorback_str, rightorleft_str])
# next, all of the muscles and their respective limits
muscleparams = params['motor'][frontorback_str]
# input torques: hip pitch and abduct
body.add_hookes_joint(thigh, about='xy')
add_torque(body, thigh, name=f'{frontorback_str}-{rightorleft_str}-hip-pitch',
about='x', **muscleparams['hip-pitch'])
add_torque(body, thigh, name=f'{frontorback_str}-{rightorleft_str}-hip-abduct',
about='y', **muscleparams['hip-abduct'])
thigh.add_revolute_joint(calf, about='y')
add_torque(thigh, calf, about='y', **muscleparams['knee'])
if front:
add_foot(calf, at='bottom', nsides=8,
friction_coeff=params['friction_coeff'],
GRFxy_max=5, GRFz_max=5)
return thigh, calf
else:
hock = Link3D('H'+suffix, '-z', start_I=calf.bottom_I, **p['hock'],
meta=['leg', 'calf', frontorback_str, rightorleft_str])
calf.add_revolute_joint(hock, about='y')
add_torque(calf, hock, about='y', **muscleparams['ankle'])
add_foot(hock, at='bottom', nsides=8,
friction_coeff=params['friction_coeff'],
GRFxy_max=5, GRFz_max=5)
return thigh, calf, hock
ufl, lfl = def_leg(body_F, front=True, right=False)
ufr, lfr = def_leg(body_F, front=True, right=True)
ubl, lbl, hbl = def_leg(body_B, front=False, right=False)
ubr, lbr, hbr = def_leg(body_B, front=False, right=True)
# combine into a robot
tail = [tail0, tail1] if with_tail else [] # type: ignore
robot = System3D('3D quadruped', [body_B, body_F, *tail,
ufl, lfl, ufr, lfr,
ubl, lbl, ubr, lbr,
hbl, hbr])
return robot, add_pyomo_constraints
def has_tail(robot: System3D) -> bool:
return any('tail' in link.name for link in robot.links)
def add_pyomo_constraints(robot: System3D) -> None:
# π/3 = 60 degrees
# π/2 = 90 degrees
# π/4 = 45 degrees
assert robot.m is not None,\
'robot does not have a pyomo model defined on it'
if has_tail(robot):
body_B, body_F, tail0, tail1, \
ufl, lfl, ufr, lfr, \
ubl, lbl, ubr, lbr, \
hbl, hbr = [link['q'] for link in robot.links]
else:
body_B, body_F, \
ufl, lfl, ufr, lfr, \
ubl, lbl, ubr, lbr, \
hbl, hbr = [link['q'] for link in robot.links]
tail0 = tail1 = None
# spine can't bend too much:
constrain_rel_angle(robot.m, 'spine_pitch',
-π/4, body_B[:, :, 'theta'], body_F[:, :, 'theta'], π/4)
constrain_rel_angle(robot.m, 'spine_roll',
-π/4, body_B[:, :, 'phi'], body_F[:, :, 'phi'], π/4)
constrain_rel_angle(robot.m, 'spine_yaw',
-π/4, body_B[:, :, 'psi'], body_F[:, :, 'psi'], π/4)
# tail | |
type(json) is unicode) and len(json) > 0:
if json[0] == '@':
# It's a variable, we replace it with the JSON data
# It will return an error if the variable doesn't exist, it's intended
varName = json[1:]
if varData.has_key(varName):
return preprocessJson(varData[varName], varData)
else:
raise Exception("varData doesn't have key `%s`, keys of varData:\n%s" % (varName, str(varData.keys())))
elif '$' in json:
if '$BUILD_PATH' in json:
return preprocessJson(json.replace('$BUILD_PATH', varData['BUILD_PATH']), varData)
elif '$ROOT_PATH' in json:
return preprocessJson(json.replace('$ROOT_PATH', varData['ROOT_PATH']), varData)
elif '$TASK_PATH' in json:
return preprocessJson(json.replace('$TASK_PATH', varData['TASK_PATH']), varData)
else:
return json
else:
return json
elif type(json) is dict:
# It's a dict, we process the values in it
newjson = {}
for k in json.keys():
newjson[k] = preprocessJson(json[k], varData)
return newjson
elif type(json) is list:
# It's a list, we filter the values in it
newjson = map(lambda x: preprocessJson(x, varData), json)
# We remove None values, which are probably undefined variables
while None in newjson:
newjson.remove(None)
return newjson
else:
return json
def waitWithTimeout(subProc, timeout=0):
"""Waits for subProc completion or timeout seconds, whichever comes
first."""
if timeout > 0:
to = threading.Timer(timeout, subProc.kill)
try:
to.start()
subProc.wait()
finally:
to.cancel()
else:
subProc.wait()
def communicateWithTimeout(subProc, timeout=0, input=None):
"""Communicates with subProc until its completion or timeout seconds,
whichever comes first."""
if timeout > 0:
to = threading.Timer(timeout, subProc.kill)
try:
to.start()
return subProc.communicate(input=input)
finally:
to.cancel()
else:
return subProc.communicate(input=input)
def isInRestrict(path):
"""Check whether a path is in the allowed paths for read/write."""
global RESTRICT_PATHS
if len(RESTRICT_PATHS) == 0:
return True
for folder in RESTRICT_PATHS:
if os.path.abspath(path).startswith(os.path.abspath(folder) + '/'):
return True
return False
def globOfGlobs(folder, globlist):
"""Makes the combined list of files corresponding to a list of globs in a
folder."""
filelist = []
for g in globlist:
# We sort file list for each glob, but keep the original glob order, so
# that ['test*.in', 'mytest.in'] will give a predictable
# ['test1.in', 'test2.in', 'mytest.in']
curglob = glob.glob(os.path.join(folder, g))
curglob.sort()
for f in curglob:
if f not in filelist:
filelist.append(f)
return filelist
def symlink(filefrom, fileto, fromlocal=False, tolocal=False):
"""Make a symlink. *local variables indicate whether the paths must be
explicitly allowed or not."""
if fromlocal and not isInRestrict(filefrom):
raise Exception("Loading file `%s` not allowed." % filefrom)
if tolocal and not isInRestrict(fileto):
raise Exception("Loading file `%s` not allowed." % fileto)
os.symlink(filefrom, fileto)
def filecopy(filefrom, fileto, fromlocal=False, tolocal=False, makedirs=False):
"""Copy a file. *local variables indicate whether the paths must be
explicitly allowed or not."""
if fromlocal and not isInRestrict(filefrom):
raise Exception("Loading file `%s` not allowed." % filefrom)
if tolocal and not isInRestrict(fileto):
raise Exception("Loading file `%s` not allowed." % fileto)
if makedirs:
try:
os.makedirs(os.path.dirname(fileto))
except:
pass
shutil.copy2(filefrom, fileto)
def dircopy(originDir, destDir, overwrite=True):
"""Copy all files and subdirectories from a folder to another one.
If a destination file exists, it will be overwritten if overwrite is True,
else the original file will not be copied."""
for (dirpath, dirnames, filenames) in os.walk(originDir):
dirRelPath = os.path.relpath(originDir, dirpath)
try:
os.makedirs(os.path.join(destDir, dirRelPath))
except:
pass
for f in filenames:
try:
filecopy(os.path.join(originDir, dirRelPath, f), os.path.join(destDir, dirRelPath, f))
except:
# Sometimes "junk" files are written
pass
def isExecError(executionReport, checkContinue=True):
"""Returns whether an execution returned an error according to its exit
code. checkContinue means that we also return False if the continueOnError
flag is True."""
return (executionReport['exitCode'] != 0 and
not (checkContinue and executionReport.get('continueOnError', False)))
def capture(path, name='', truncateSize=-1):
"""Capture a file contents for inclusion into the output JSON as a
captureReport object."""
if not isInRestrict(path):
raise Exception("Opening file `%s` for capture is not allowed.")
report = {'name': name,
'sizeKb': os.path.getsize(path) / 1024}
fd = open(path, 'r')
if truncateSize <= 0:
tSize = CFG_MAX_GETFILE
else:
tSize = min(truncateSize, CFG_MAX_GETFILE)
report['data'] = fd.read(tSize).decode('utf-8', errors='replace').encode('utf-8')
report['wasTruncated'] = (len(fd.read(1)) > 0)
fd.close()
return report
def removeFeedbackReport(report, noFeedback=False, isChecker=False):
"""Remove the feedback from an execution report, if the test has hidden
results."""
if not noFeedback:
return report
report.update({
'noFeedback': True,
'commandLine': '',
'files': []
})
report['stderr'] = {
'name': 'stderr',
'sizeKb': 0,
'data': '',
'wasTruncated': False}
if isChecker:
newData = report['stdout']['data'].split('\n')[0]
else:
newData = ''
report['stdout'] = {
'name': 'stdout',
'sizeKb': len(newData)/1024,
'data': newData,
'wasTruncated': False}
return report
def pyFrenchErrors(report, paths):
"""Transform a Python report with pyFrenchErrors."""
if paths is False:
return report
if report['stderr']['data'].strip() == '':
return report
if not os.path.isfile(CFG_PYFRENCHERRORS):
logging.error('PyFrenchErrors is not installed, unable to translate report!')
return report
# If the first line of the original solution wasn't a shebang, then
# taskgrader will have added one. We add an empty line so that error
# message lines are correct again.
solData = None
solFile = open(paths['solution'], 'r+')
if solFile.read(2) != '#!':
solFile.seek(0)
solData = solFile.read()
solFile.seek(0)
solFile.write('\n' + solData)
solFile.close()
# Call pyFrenchErrors
cmdLine = [CFG_PYFRENCHERRORS, paths['solution'], paths['stderr'], paths['outputJson']]
proc = subprocess.Popen(cmdLine, stdin=None, stdout=open(paths['output'], 'w'),
stderr=subprocess.PIPE, cwd=os.path.dirname(CFG_PYFRENCHERRORS))
procOut, procErr = proc.communicate()
if solData is not None:
solFile = open(paths['solution'], 'w')
solFile.write(solData)
solFile.close()
if procErr != '':
logging.error('pyFrenchErrors stderr: `%s`' % procErr)
# Check whether there were block ids found
useOutputJson = False
outputJson = open(paths['outputJson'], 'rb').read().decode('utf-8', errors='replace').encode('utf-8')
try:
useOutputJson = len(json.loads(outputJson)['blockIds']) > 0
except:
pass
if useOutputJson:
report['stderr']['data'] = outputJson
else:
report['stderr']['data'] = open(paths['output'], 'rb').read().decode('utf-8', errors='replace').encode('utf-8')
report['stderr']['sizeKb'] = len(report['stderr']['data'])/1024
return report
def transformReport(report, transformations, programType='', execType=''):
"""Transform a report according to some rules.
For now, transformations can be "noFeedback" and "pyFrenchErrors".
transformations must be a dict where keys are transformations types and
values are options for that transformation."""
for transType in transformations.keys():
if transType == 'noFeedback':
report = removeFeedbackReport(report, transformations['noFeedback'], programType == 'checker')
elif transType == 'pyFrenchErrors':
report = pyFrenchErrors(report, transformations['pyFrenchErrors'])
return report
def evaluation(evaluationParams):
"""Full evaluation process."""
global RESTRICT_PATHS
logging.info("Initializing evaluation")
# Check root path and task path
# We need to check the keys exist as the JSON schema check is done later
if not evaluationParams.has_key('rootPath'):
raise Exception("Input JSON missing 'rootPath' key.")
if not os.path.isdir(evaluationParams['rootPath']):
raise Exception("Root path `%s` invalid." % evaluationParams['rootPath'])
evaluationParams['taskPath'] = evaluationParams['taskPath'].replace('$ROOT_PATH', evaluationParams['rootPath'])
if not evaluationParams.has_key('taskPath'):
raise Exception("Input JSON missing 'taskPath' key.")
if not os.path.isdir(evaluationParams['taskPath']):
raise Exception("Task path `%s` invalid (folder not found)." % evaluationParams['taskPath'])
# *** Variables handling
varData = {'ROOT_PATH': evaluationParams['rootPath'],
'TASK_PATH': evaluationParams['taskPath']}
# Load path restriction if present
if evaluationParams.has_key('restrictToPaths'):
RESTRICT_PATHS = evaluationParams['restrictToPaths']
else:
RESTRICT_PATHS = []
# Load a "preprocessing" JSON node or file
defParamsPath = os.path.join(evaluationParams['taskPath'], 'defaultParams.json')
if os.path.isfile(defParamsPath):
try:
varData.update(json.load(open(defParamsPath, 'r')))
except:
raise Exception("defaultParams.json in `%s` is invalid." % evaluationParams['taskPath'])
if evaluationParams.has_key('extraParams'):
exp = evaluationParams.pop('extraParams')
if type(exp) is str and isInRestrict(exp):
varData.update(json.load(open(exp, 'r')))
else:
varData.update(exp)
# Check for evaluation elements
for elem in ['generators', 'generations', 'extraTests', 'sanitizer',
'checker', 'solutions', 'executions']:
if not evaluationParams.has_key(elem):
# Get the default one defined by the task
elemKey = 'defaultEvaluation%s%s' % (elem[0].upper(), elem[1:])
if varData.has_key(elemKey):
evaluationParams[elem] = '@%s' % elemKey
else:
raise Exception("Input JSON doesn't have key '%s', and no default for this key was defined by the task." % elem)
# Path where the evaluation will take place
if evaluationParams.has_key('outputPath'):
if '../' in evaluationParams['outputPath']:
raise Exception("Output path `%s` invalid." % evaluationParams['outputPath'])
baseWorkingDir = os.path.join(CFG_BUILDSDIR, evaluationParams['outputPath'])
else:
# Make a new build folder in the build pool
buildPoolTries = 0
baseWorkingDir = '/'
while os.path.isdir(baseWorkingDir):
baseWorkingDir = os.path.join(CFG_BUILDSDIR, '_build%d/' % random.randint(10000*buildPoolTries, 10000*(buildPoolTries+1)))
buildPoolTries += 1
os.mkdir(baseWorkingDir)
report = {}
varData['BUILD_PATH'] = baseWorkingDir
report['buildPath'] = baseWorkingDir
if len(RESTRICT_PATHS) > 0:
RESTRICT_PATHS.append(baseWorkingDir)
evaluationParams = preprocessJson(evaluationParams, varData)
cache = CacheDatabase()
# We validate the input JSON format
if validate is not None:
try:
validate(evaluationParams, json.load(open(CFG_INPUTSCHEMA, 'r')))
except Exception as err:
raise Exception("Validation failed for input JSON, error message: %s" % str(err))
else:
logging.warning("Unable to import jsonschema library, continuing without input/output JSON validation.")
os.mkdir(baseWorkingDir + "libs/")
os.mkdir(baseWorkingDir + "tests/")
errorSoFar = False
logging.info("Evaluation taking place in dir `%s`" % baseWorkingDir)
# Handle options
evaluationOptions = {
'locale': 'en',
'pyFrenchErrors': True,
'onlyOneCheckerMessage': True,
'multiCheck': CFG_MULTICHECK,
'outputSizeLimit': True
}
if 'defaultEvaluationOptions' in varData:
evaluationOptions.update(varData['defaultEvaluationOptions'])
if 'options' in evaluationParams:
evaluationOptions.update(evaluationParams['options'])
# Create evaluationContext object
# allows to pass different evaluation objects around
evaluationContext = {
'options': evaluationOptions,
'cache': cache
}
# *** Generators
os.mkdir(baseWorkingDir + "generators/")
report['generators'] = []
generators = {}
for gen in evaluationParams['generators']:
| |
import math
from collections import OrderedDict
import logging
logger = logging.getLogger(__name__)
import numpy as np
import torch
import tensorflow as tf
from paragen.generators import AbstractGenerator, register_generator
from paragen.utils.io import remove
from paragen.utils.runtime import Environment
@register_generator
class LightseqTransformerGenerator(AbstractGenerator):
"""
SequenceGenerator is combination of a model and search algorithm.
It processes in a multi-step fashion while model processes only one step.
It is usually separated into encoder and search with decoder, and is
exported and load with encoder and search module.
Args:
path: path to export or load generator
"""
def __init__(self,
batch_size,
path=None, ):
super().__init__(path)
self._batch_size = batch_size
env = Environment()
self._maxlen = getattr(env.configs, 'maxlen', 512)
self._model = None
self._src_special_tokens, self._tgt_special_tokens = None, None
self._lightseq_model = None
def build_from_model(self, model, src_special_tokens, tgt_special_tokens):
"""
Build generator from model and search.
Args:
model (paragen.models.EncoderDecoder): an encoder-decoder model to be wrapped
src_special_tokens (dict): source special token dict
tgt_special_tokens (dict): target special token dict
"""
self._model = model
self._src_special_tokens = src_special_tokens
self._tgt_special_tokens = tgt_special_tokens
def forward(self, encoder, decoder, search=None):
"""
Infer a sample as model in evaluation mode.
Compute encoder output first and decode results with search module
Args:
encoder (tuple): encoder inputs
decoder (tuple): decoder inputs
search (tuple): search states
Returns:
decoder_output: results inferred by search algorithm on decoder
"""
src = encoder[0].cpu().numpy()
output, _ = self._lightseq_model.infer(src)
output = torch.from_numpy(output)
output = output[:, 0, :]
return output
def export(self,
path,
net_input,
lang='en',
**kwargs):
"""
Export self to `path` by export model directly
Args:
path: path to store serialized model
net_input: fake net_input for tracing the model
lang: language
**kwargs:
- beam_size: beam search size
- lenpen: length penalty
- extra_decode_length: maximum_generation_length = min(src_length + extra_decode_length, max_step)
- generation_method: generation method
- topk: top-k candidates
- topp:
- diverse_lambda: lambda in diverse
"""
assert self._model.encoder._normalize_before and self._model.decoder._normalize_before, 'only pre-norm arch can be exported by LightSeq'
from .transformer_pb2 import Transformer
transformer = Transformer()
encoder_state_dict, decoder_state_dict = self._extract_weight()
self._fill_weight(transformer, encoder_state_dict, decoder_state_dict, lang=lang)
self._fill_in_conf(transformer, self._model.encoder._n_head, **kwargs)
self._write(transformer, path)
def _fill_weight(self, transformer, encoder_state_dict, decoder_state_dict, lang='en'):
dec_var_name_list = list(decoder_state_dict.keys())
enc_var_name_list = list(encoder_state_dict.keys())
# fill each encoder layer's params
enc_tensor_names = {}
for name in enc_var_name_list:
name_split = name.split(".")
if len(name_split) <= 2 or not name_split[2].isdigit():
continue
layer_id = int(name_split[2])
enc_tensor_names.setdefault(layer_id, []).append(name)
for layer_id in sorted(enc_tensor_names.keys()):
fill_layer(
enc_tensor_names[layer_id],
encoder_state_dict,
transformer.encoder_stack.add(),
enc_layer_mapping_dict,
)
# fill each decoder layer's params
dec_tensor_names = {}
for name in dec_var_name_list:
name_split = name.split(".")
if len(name_split) <= 2 or not name.split(".")[2].isdigit():
continue
layer_id = int(name.split(".")[2])
dec_tensor_names.setdefault(layer_id, []).append(name)
for layer_id in sorted(dec_tensor_names.keys()):
fill_layer(
dec_tensor_names[layer_id],
decoder_state_dict,
transformer.decoder_stack.add(),
dec_layer_mapping_dict,
)
# fill src_embedding
fill_layer(
enc_var_name_list,
encoder_state_dict,
transformer.src_embedding,
src_emb_mapping_dict,
)
src_tb = _gather_token_embedding(
enc_var_name_list, encoder_state_dict, "_embed"
)
transformer.src_embedding.token_embedding[:] = src_tb.flatten().tolist()
pos_emb = _get_position_encoding(length=self._maxlen, hidden_size=src_tb.shape[-1])
pos_emb_list = pos_emb.numpy().reshape([-1]).tolist()
transformer.src_embedding.position_embedding[:] = pos_emb_list
logger.info(
"model.encoder.embed_positions.weight -> src_embedding.position_embedding, shape: {}, conversion finished!".format(
(pos_emb.shape)
)
)
# fill trg_embedding
encode_output_mapping_dict = _get_encode_output_mapping_dict(len(dec_tensor_names))
trg_emb_mapping_dict.update(encode_output_mapping_dict)
fill_layer(
dec_var_name_list,
decoder_state_dict,
transformer.trg_embedding,
trg_emb_mapping_dict,
)
# assert lang in LANG2ID
trg_tb = _gather_token_embedding(
dec_var_name_list, decoder_state_dict, "_embed", lang=lang
)
transformer.trg_embedding.token_embedding[:] = trg_tb.transpose().flatten().tolist()
logger.info(
"token_embedding.weight -> trg_embedding.token_embedding, shape: {}, conversion finished!".format(
trg_tb.transpose().shape
)
)
pos_emb = _get_position_encoding(length=self._maxlen, hidden_size=trg_tb.shape[-1])
pos_emb_list = pos_emb.numpy().reshape([-1]).tolist()
transformer.trg_embedding.position_embedding[:] = pos_emb_list
logger.info(
"model.decoder.embed_positions.weight -> trg_embedding.position_embedding, shape: {}, conversion finished!".format(
(pos_emb.shape)
)
)
def _extract_weight(self):
reloaded = self._model.state_dict()
encoder_state_dict = {}
decoder_state_dict = {}
for k in reloaded:
if k.startswith("_encoder."):
encoder_state_dict[k] = reloaded[k]
if k.startswith("_decoder."):
decoder_state_dict[k] = reloaded[k]
decoder_state_dict = split_qkv(decoder_state_dict)
decoder_state_dict['_decoder.shared_bias'] = decoder_state_dict.pop('_decoder._out_proj_bias')
return encoder_state_dict, decoder_state_dict
def _fill_in_conf(self,
transformer,
nhead,
beam_size=4,
length_penalty=0.6,
extra_decode_length=50,
generation_method='beam_search',
topk=1,
topp=0.75,
diverse_lambda=0.,):
# fill in conf to transformer
transformer.model_conf.head_num = nhead
transformer.model_conf.beam_size = beam_size
transformer.model_conf.length_penalty = length_penalty
transformer.model_conf.extra_decode_length = extra_decode_length
transformer.model_conf.src_padding_id = self._src_special_tokens['pad']
transformer.model_conf.trg_start_id = self._tgt_special_tokens['bos']
transformer.model_conf.trg_end_id = self._tgt_special_tokens['eos']
transformer.model_conf.sampling_method = generation_method
transformer.model_conf.topk = topk
transformer.model_conf.topp = topp
transformer.model_conf.diverse_lambda = diverse_lambda
transformer.model_conf.is_post_ln = False
transformer.model_conf.no_scale_embedding = False
transformer.model_conf.use_gelu = False
def _write(self, transformer, path):
logger.info("Writing to {0}".format(path))
try:
with tf.io.gfile.GFile(path, "wb") as fout:
fout.write(transformer.SerializeToString())
except Exception:
logger.info('Saving PB fails. Save HDF5 instead!')
remove(path)
path = path.replace('pb', 'hdf5')
import h5py
f = h5py.File(path, "w")
save_bart_proto_to_hdf5(transformer, f)
f.close()
def load(self):
"""
Load generator from path
"""
import lightseq.inference as lsi
self._lightseq_model = lsi.Transformer(self._path, self._batch_size)
""" key是proto参数的值,value是一个强大的表达式,每个&&分割tensor name的匹配路径或表达式,每个匹配
路径的子pattern用空格分隔,表达式用expression_开头,可以对每个tensor进行单独操作,支持多个表达式。多个匹配路径
和表达式最后会concat,axis=-1 """
enc_layer_mapping_dict = OrderedDict(
{
"multihead_norm_scale": "self_attn_norm.weight",
"multihead_norm_bias": "self_attn_norm.bias",
"multihead_project_kernel_qkv": "self_attn.in_proj_weight&&expression_.transpose(0, 1)",
"multihead_project_bias_qkv": "self_attn.in_proj_bias",
"multihead_project_kernel_output": "self_attn.out_proj.weight&&expression_.transpose(0, 1)",
"multihead_project_bias_output": "self_attn.out_proj.bias",
"ffn_norm_scale": "ffn_norm.weight",
"ffn_norm_bias": "ffn_norm.bias",
"ffn_first_kernel": "ffn._fc1.weight&&expression_.transpose(0, 1)",
"ffn_first_bias": "ffn._fc1.bias",
"ffn_second_kernel": "ffn._fc2.weight&&expression_.transpose(0, 1)",
"ffn_second_bias": "ffn._fc2.bias",
}
)
dec_layer_mapping_dict = OrderedDict(
{
"self_norm_scale": "self_attn_norm.weight",
"self_norm_bias": "self_attn_norm.bias",
"self_project_kernel_qkv": "self_attn.in_proj_weight&&expression_.transpose(0, 1)",
"self_project_bias_qkv": "self_attn.in_proj_bias",
"self_project_kernel_output": "self_attn.out_proj.weight&&expression_.transpose(0, 1)",
"self_project_bias_output": "self_attn.out_proj.bias",
"encdec_norm_scale": "multihead_attn_norm.weight",
"encdec_norm_bias": "multihead_attn_norm.bias",
"encdec_project_kernel_q": "multihead_attn.q_proj_weight&&expression_.transpose(0, 1)",
"encdec_project_bias_q": "multihead_attn.q_proj_bias",
"encdec_project_kernel_output": "multihead_attn.out_proj.weight&&expression_.transpose(0, 1)",
"encdec_project_bias_output": "multihead_attn.out_proj.bias",
"ffn_norm_scale": "ffn_norm.weight",
"ffn_norm_bias": "ffn_norm.bias",
"ffn_first_kernel": "ffn._fc1.weight&&expression_.transpose(0, 1)",
"ffn_first_bias": "ffn._fc1.bias",
"ffn_second_kernel": "ffn._fc2.weight&&expression_.transpose(0, 1)",
"ffn_second_bias": "ffn._fc2.bias",
}
)
src_emb_mapping_dict = OrderedDict(
{
"norm_scale": "_norm.weight",
"norm_bias": "_norm.bias",
}
)
trg_emb_mapping_dict = OrderedDict(
{
"norm_scale": "_norm.weight",
"norm_bias": "_norm.bias",
"shared_bias": "shared_bias",
}
)
def check_rule(tensor_name, rule):
if "Adam" in tensor_name or "adam" in tensor_name:
return False
assert isinstance(rule, str) and rule
r_size = len(rule.split('.'))
t = tensor_name.split('.')
if len(t) < r_size:
return False
return rule == '.'.join(t[-r_size:])
def fill_layer(tensor_names, state_dict, layer, mapping_dict):
for proto_name, ckpt_rule in mapping_dict.items():
expression = [
ele for ele in ckpt_rule.split("&&") if ele.startswith("expression_")
]
ckpt_rule = [
ele for ele in ckpt_rule.split("&&") if not ele.startswith("expression_")
]
assert (len(ckpt_rule) > 0 and len(expression) < 2) or (
len(ckpt_rule) == 0 and len(expression) > 0
)
if len(expression) < 2:
expression = "" if not expression else expression[0].split("_")[1]
else:
expression = [exp.split("_")[1] for exp in expression]
target_tn = []
for cr in ckpt_rule:
tmp = []
for tn in tensor_names:
if check_rule(tn, cr):
tmp.append(tn)
if len(tmp) != 1:
logger.info(f'{tmp} {cr}')
assert len(tmp) == 1
target_tn.extend(tmp)
target_tensor = [state_dict[name] for name in target_tn]
tt = {}
if target_tensor:
exec("tt['save'] = [ele%s for ele in target_tensor]" % expression)
else:
if not isinstance(expression, list):
expression = [expression]
exec("tt['save'] = [%s]" % ",".join(expression))
target_tensor = np.concatenate(tt["save"], axis=-1)
logger.info(
"%s -> %s, shape: %s, convert finished."
% (target_tn if target_tn else "created", proto_name, target_tensor.shape)
)
exec("layer.%s[:]=target_tensor.flatten().tolist()" % proto_name)
def _get_encode_output_mapping_dict(dec_layer_num):
encode_output_kernel_pattern = [
"{0}.multihead_attn.k_proj_weight&&{0}.multihead_attn.v_proj_weight".format(ele)
for ele in range(dec_layer_num)
]
encode_output_bias_pattern = [
"{0}.multihead_attn.k_proj_bias&&{0}.multihead_attn.v_proj_bias".format(ele)
for ele in range(dec_layer_num)
]
return {
"encode_output_project_kernel_kv": "&&".join(
encode_output_kernel_pattern + ["expression_.transpose(0, 1)"]
),
"encode_output_project_bias_kv": "&&".join(encode_output_bias_pattern),
}
def _get_position_encoding(length, hidden_size, min_timescale=1.0, max_timescale=1.0e4):
"""Return positional encoding.
Calculates the position encoding as a mix of sine and cosine functions with
geometrically increasing wavelengths.
Defined and formulized in Attention is All You Need, section 3.5.
Args:
length: Sequence length.
hidden_size: Size of the
min_timescale: Minimum scale that will be applied at each position
max_timescale: Maximum scale that will be applied at each position
Returns:
Tensor with shape [length, hidden_size]
"""
with tf.device("/cpu:0"):
position = tf.cast(tf.range(length), tf.float32)
num_timescales = hidden_size // 2
log_timescale_increment = math.log(
float(max_timescale) / float(min_timescale)
) / (tf.cast(num_timescales, tf.float32) - 1)
inv_timescales = min_timescale * tf.exp(
tf.cast(tf.range(num_timescales), tf.float32) * -log_timescale_increment
)
scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0)
signal = tf.concat([tf.math.sin(scaled_time), tf.math.cos(scaled_time)], axis=1)
return signal
def _gather_token_embedding(tensor_names, name2var_dict, tn_pattern, lang="en"):
""" use pattern to diff source and target. """
target_tn = []
for tn in tensor_names:
if (tn_pattern in tn.split(".")) and ("weight" in tn.split(".")):
target_tn.append(tn)
continue
target_tensor = [name2var_dict[name] for name in target_tn]
target_tensor = np.concatenate(target_tensor, axis=0)
target_tensor = target_tensor * (target_tensor.shape[1] ** 0.5)
logger.info(
"token embedding shape is %s, scaled by %s"
% (target_tensor.shape, target_tensor.shape[1] ** 0.5))
logger.info("token embedding shape is {}".format(target_tensor.shape))
return target_tensor
def split_qkv(decoder_state_dict):
state_dict = OrderedDict()
for key, val in decoder_state_dict.items():
if 'multihead_attn.in_proj' in key:
dim = val.size(0) // 3
state_dict[key.replace('multihead_attn.in_proj', 'multihead_attn.q_proj')] = val[:dim]
state_dict[key.replace('multihead_attn.in_proj', 'multihead_attn.k_proj')] = val[dim:dim * 2]
state_dict[key.replace('multihead_attn.in_proj', 'multihead_attn.v_proj')] = val[dim * 2:]
else:
state_dict[key] = val
return state_dict
def save_bart_proto_to_hdf5(transformer, f):
"""Convert bart protobuf to hdf5 format to support larger weight."""
MODEL_CONF_KEYS = [
# model_conf
"head_num",
"beam_size",
"extra_decode_length",
"length_penalty",
"src_padding_id",
"trg_start_id",
"diverse_lambda",
"sampling_method",
"topp",
"topk",
"trg_end_id",
"is_post_ln",
"no_scale_embedding",
"use_gelu",
"is_multilingual",
]
EMBEDDING_KEYS = [
# src_embedding
# trg_embedding
"token_embedding",
"position_embedding",
"norm_scale",
"norm_bias",
"encode_output_project_kernel_kv",
"encode_output_project_bias_kv",
"shared_bias",
"lang_emb",
"trg_vocab_mask",
]
ENCODER_LAYER_KEYS = [
# encoder_stack/{i}
"multihead_norm_scale",
"multihead_norm_bias",
"multihead_project_kernel_qkv",
"multihead_project_bias_qkv",
"multihead_project_kernel_output",
"multihead_project_bias_output",
"ffn_norm_scale",
"ffn_norm_bias",
"ffn_first_kernel",
"ffn_first_bias",
"ffn_second_kernel",
"ffn_second_bias",
]
DECODER_LAYER_KEYS = [
# decoder_stack/{i}
"self_norm_scale",
"self_norm_bias",
"self_project_kernel_qkv",
"self_project_bias_qkv",
"self_project_kernel_output",
"self_project_bias_output",
"encdec_norm_scale",
"encdec_norm_bias",
"encdec_project_kernel_q",
"encdec_project_bias_q",
"encdec_project_kernel_output",
"encdec_project_bias_output",
"ffn_norm_scale",
"ffn_norm_bias",
"ffn_first_kernel",
"ffn_first_bias",
"ffn_second_kernel",
| |
<reponame>MarkusRJK/IEC-60601-1-8<filename>low_prio_sound_gen.py<gh_stars>1-10
#!/usr/bin/python
from __future__ import division
from wavtools import getVolumes
from wavtools import volumesOutOfDbRange
from wavtools import hasEnoughHarmonics
from wavtools import hasSignificantVolumesInsideDbRange
from wavtools import getMax
from wavtools import save_wav
from wavtools import IEC_60601_1_8_Pulse
from wavtools import PulseMerger
from wavtools import introduction
from wavtools import maxDiffSoundPressureToPulseFrequencyInDB
from shutil import copyfile
import subprocess
import os
import sys
# specify, where you want to save the file and its filename:
#outputFilePath = os.environ['HOME'] + "/Downloads/sounds/low-prio-new.wav"
outputFilePath = "new-lp.wav"
# name where a new configuration is saved:
# IMPORTANT NOTE: name must be the same as in the (next) import statement
# below around line 122
lpConfigFilename = 'lp_alarm'
# if you want to also remote copy it to a device via scp, specify
# the remote target here in the form of a scp target, e.g.
# someuser@host:path. See scp for more details. If you set
# the remote to None, no attempt for scp is made.
remoteSCPtarget = None
# NOTE: you will see several sections of settings the later overwriting the
# priors. This is intented so you can see the history where it began.
###############################################################################
# Default Settings
###############################################################################
# See IEC_60601_1_8_*_Pulse class for spacing, duration, rise and fall ranges
#
# NOTE: there were scripts to generate 8kHz and 44.1kHz bursts
defaultSampleRate_Hz = 44100
# NOTE: pulse spacing in initial script did not cover from 90% of the fall
# mark to 90% of the rise mark
defaultPulseSpacing_ms = 180
# NOTE: pulse duration in initial script did not cover from 90% of the rise
# mark to 90% of the fall mark
defaultPulseDuration_ms = 180
# NOTE: default rise and fall time were measured from the output and was
# actually around 8.8
defaultRiseTime_pc = 20
defaultFallTime_pc = 10
defaultBaseFrequency1 = 505
defaultHarmonicStr1 = "1 3 5 7 9"
defaultVolumeStr1 = "0.75 0.7 0.5 0.3 0.3"
defaultBaseFrequency2 = 400
defaultHarmonicStr2 = defaultHarmonicStr1
defaultVolumeStr2 = defaultVolumeStr1
# spacing between two bursts
defaultBurstSpacing_ms = 25000
# NOTE: the intial script output 5 wave files that had to be merged with an
# external tool e.g. audacity. Audacity caused clippings whatever way
# it merged the files.
# gain in (0.0; 1.0]
gain = 0.98
###############################################################################
# Some devices may require a small silence period at the start of a HP alarm
# default should be 0
###############################################################################
defaultStartWithSilence_ms = 0
###############################################################################
###############################################################################
###############################################################################
if len(sys.argv) > 1:
copyfile(sys.argv[1], 'lp_alarm.py')
# NOTE: this import must come after all initialization
# By the way Python is made lp_alarm must be located in the directory in
# which THIS script (low_prio_sound_gen.py) resides.
try:
from lp_alarm import defaultSampleRate_Hz
from lp_alarm import defaultPulseSpacing_ms
from lp_alarm import defaultPulseDuration_ms
from lp_alarm import defaultRiseTime_pc
from lp_alarm import defaultFallTime_pc
from lp_alarm import defaultBaseFrequency1
from lp_alarm import defaultHarmonicStr1
from lp_alarm import defaultVolumeStr1
from lp_alarm import defaultBaseFrequency2
from lp_alarm import defaultHarmonicStr2
from lp_alarm import defaultVolumeStr2
from lp_alarm import defaultBurstSpacing_ms
from lp_alarm import defaultStartWithSilence_ms
except:
print("No config " + lpConfigFilename + ".py found\n")
pass
def LP_introduction():
print "Terminology - Low Priority Alarm:"
print
print " | burst spacing |"
print " |------- burst -------| |"
print " | |pulse|pulse|pulse| | |"
print " | | ___ |space| ___ | | | __..."
print " | |/ \ / \| | | /"
print " | / \ / \ | |/"
print " __|/freq. 1\___/freq. 2\|_______________/"
print ""
print "Please set the maximum sound pressure difference to the fundamental"
print "frequency of (at least 4) harmonics in wavtools.py."
# pulses may have different tones within this range:
baseFrequency_Hz_min = 150
baseFrequency_Hz_max = 1000
class IEC_60601_1_8_Low_Priority_Pulse(IEC_60601_1_8_Pulse):
"""
Inherits from IEC_60601_1_8_Pulse and sets pulseDuration_ms_min|max
and pulse_90pc_spacing_ms_min|max for Low Priority alarms.
"""
# pulses spacing t_s (measured from 90% of the fall to 90% of the rise)
pulse_90pc_spacing_ms_min=125
pulse_90pc_spacing_ms_max=250
_pulse_90pc_spacing_ms = None
# low priority alarm pulse duration (t_d): 125ms to 250ms
pulseDuration_ms_min=125
pulseDuration_ms_max=250
burstSpacing_ms_min=15000
def setBurstSpacing_ms(self, bs = None):
"""
spacing between two bursts in milliseconds (measured from 90% mark of the rise
to 90% mark of the fall)
"""
if bs is None:
self._burstSpacing_ms = self.burstSpacing_ms_min
else:
self._burstSpacing_ms = max(0, bs)
return self._burstSpacing_ms
def getBurstSpacing_ms(self):
return self._burstSpacing_ms
def isBurstSpacingInRange(self):
return self._burstSpacing_ms >= self.burstSpacing_ms_min
###############################################################################
###############################################################################
###############################################################################
print
introduction()
print
LP_introduction()
print
print
sampleRate_Hz = defaultSampleRate_Hz
try:
select = input("Sample rate\n 0) 8kHz\n 1) 9.6kHz\n 2) 12kHz\n 3) 16kHz\n 4) 19.2kHz\n 5) 24kHz\n 6) 32kHz\n 7) 44.1kHz\n 8) 48kHz\n 9) 96kHz\nSelect [0; 9] (default: " + str(defaultSampleRate_Hz) + "): ")
if select == 0:
sampleRate_Hz = 8000
if select == 1:
sampleRate_Hz = 9600
if select == 2:
sampleRate_Hz = 12000
if select == 3:
sampleRate_Hz = 16000
if select == 4:
sampleRate_Hz = 19200
if select == 5:
sampleRate_Hz = 24000
if select == 6:
sampleRate_Hz = 32000
if select == 7:
sampleRate_Hz = 44100
if select == 8:
sampleRate_Hz = 48000
if select == 9:
sampleRate_Hz = 96000
except:
pass
print("==> Selected sample rate: " + str(sampleRate_Hz) + " Hz\n")
pulse = IEC_60601_1_8_Low_Priority_Pulse()
pulse.setSampleRate_Hz(sampleRate_Hz)
###############################################################################
try:
pulseSpacing_ms = input("Pulse spacing [" + str(pulse.pulse_90pc_spacing_ms_min) + "ms; " + str(pulse.pulse_90pc_spacing_ms_max) + "ms] (default: " + str(defaultPulseSpacing_ms) + "ms): ")
except:
pulseSpacing_ms = defaultPulseSpacing_ms
print("==> Selected pulse spacing: " + str(pulse.setPulseSpacing_ms(pulseSpacing_ms)) + " ms")
if (not pulse.isPulseSpacingInRange()):
print("*** WARNING: pulse spacing out of range")
print
###############################################################################
try:
pulseDuration_ms = input("Pulse duration [" + str(pulse.pulseDuration_ms_min) + "ms; " + str(pulse.pulseDuration_ms_max) + "ms] (default: " + str(defaultPulseDuration_ms) + "ms): ")
except:
pulseDuration_ms = defaultPulseDuration_ms
print("==> Selected pulse duration: " + str(pulse.setPulseDuration_ms(pulseDuration_ms)) + " ms")
if (not pulse.isPulseDurationInRange()):
print("*** WARNING: pulse duration out of range")
print
###############################################################################
try:
riseTime_pc = input("Rise time [" + str(pulse.riseTime_pc_min) + "%; " + str(pulse.riseTime_pc_max) + "%] (default: " + str(defaultRiseTime_pc) + "%): ")
except:
riseTime_pc = defaultRiseTime_pc # 100 * (11 / 125) = 8.8% measures with Audaciy
print("==> Selected rise time: " + str(pulse.setRiseTime_pc(riseTime_pc)) + " %")
if (not pulse.isRiseTimeInRange()):
print("*** WARNING: rise time out of range")
print
###############################################################################
try:
fallTime_pc = input("Fall time [0%; 100%] (default: " + str(defaultFallTime_pc) + "%): ")
except:
fallTime_pc = defaultFallTime_pc
print("==> Selected fall time: " + str(pulse.setFallTime_pc(fallTime_pc)) + " %")
if (not pulse.isFallTimeInRange()):
print("*** WARNING: fall time out of range")
print
###############################################################################
try:
baseFrequency1 = input("Base frequency of 1st pulse [" + str(baseFrequency_Hz_min) + "Hz; " + str(baseFrequency_Hz_max) + "Hz] (default: " + str(defaultBaseFrequency1) + "Hz): ")
except:
baseFrequency1 = defaultBaseFrequency1
print("==> Selected base frequency of 1st pulse: " + str(baseFrequency1))
# IEC 60601-1-8 page 17 requires base frequency in range [150Hz, 1000Hz]
if (baseFrequency_Hz_min > baseFrequency1 or baseFrequency1 > baseFrequency_Hz_max):
print("*** WARNING: pulse frequency f_0 out of range")
print
# to keep this code simple we leave this task to the user: first harmonic is 1 and harmonics
# are sorted ascending.
print("IMPORTANT NOTE: the first harmonic must be 1 and the harmonics must be in ascending order!!!")
harmonicsStr1 = raw_input("Harmonics [positive integer] (default: " + defaultHarmonicStr1 + "): ")
if len(harmonicsStr1) == 0:
harmonicsStr1 = defaultHarmonicStr1
print("==> Selected harmonics of 1st pulse: " + harmonicsStr1)
harmonics1 = harmonicsStr1.split()
noHarmonics = len(harmonics1)
# hasEnoughHarmonics ensures the correct format of harmonics. Hence call before
# working on harmonics
if (not hasEnoughHarmonics(baseFrequency1, harmonics1)):
print("*** WARNING: less than required number of harmonics in range [300Hz, 4000Hz]")
print
###############################################################################
volumes1 = []
while len(volumes1) != noHarmonics:
volumesStr1 = raw_input(str(noHarmonics) + " Volumes (default: " + defaultVolumeStr1 + "): ")
if len(volumesStr1) == 0:
volumesStr1 = defaultVolumeStr1
volumes1 = getVolumes(volumesStr1)
print("==> Selected volumes of 1st pulse: " + volumesStr1)
dBvalue=str(maxDiffSoundPressureToPulseFrequencyInDB)
if (not hasSignificantVolumesInsideDbRange(volumes1, baseFrequency1, harmonics1)):
print("*** WARNING: less than 4 harmonics with volumes +-" + dBvalue + "dB from base frequency's volume")
print(" "),
vOODbR = volumesOutOfDbRange(volumes1)
print(" The following volumes differ more than " + dBvalue + "dB from base frequency's volume:")
for v in vOODbR:
print(str(v) + ", "),
print
###############################################################################
try:
print("NOTE: if you wish to create a low priority alarm with 1 pulse enter frequency 0 here!")
baseFrequency2 = input("Base frequency of 2nd pulse [" + str(baseFrequency_Hz_min) + "Hz; " + str(baseFrequency_Hz_max) + "Hz] (default: " + str(defaultBaseFrequency2) + "Hz): ")
except:
baseFrequency2 = defaultBaseFrequency2
print("==> Selected base frequency of 2nd pulse: " + str(baseFrequency2))
# IEC 60601-1-8 page 17 requires base frequency in range [150Hz, 1000Hz]
if baseFrequency2 == 0:
print("NOTE: Second pulse disabled!")
else:
if (baseFrequency_Hz_min > baseFrequency2 or baseFrequency2 > baseFrequency_Hz_max):
print("*** WARNING: pulse frequency f_0 out of range [150Hz, 1000Hz]")
print
harmonics2 = []
volumes2 = []
if baseFrequency2 != 0:
# to keep this code simple we leave this task to the user:
# first harmonic is 1 and harmonics are sorted ascending.
print("IMPORTANT NOTE: the first harmonic must be 1 | |
# Copyright (c) 2018-2021, Texas Instruments
# All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import shutil
import time
import math
import copy
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.onnx
import onnx
import datetime
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import random
import cv2
from colorama import Fore
import progiter
from packaging import version
import warnings
from torchvision.edgeailite import xnn
from torchvision.edgeailite import xvision
from torchvision.edgeailite.xvision.transforms import image_transforms
from torchvision.edgeailite.xvision import losses as pixel2pixel_losses
from .infer_pixel2pixel import compute_accuracy
##################################################
warnings.filterwarnings('ignore', category=torch.jit.TracerWarning)
##################################################
def get_config():
args = xnn.utils.ConfigNode()
args.dataset_config = xnn.utils.ConfigNode()
args.dataset_config.split_name = 'val'
args.dataset_config.max_depth_bfr_scaling = 80
args.dataset_config.depth_scale = 1
args.dataset_config.train_depth_log = 1
args.use_semseg_for_depth = False
# model config
args.model_config = xnn.utils.ConfigNode()
args.model_config.output_type = ['segmentation'] # the network is used to predict flow or depth or sceneflow
args.model_config.output_channels = None # number of output channels
args.model_config.prediction_channels = None # intermediate number of channels before final output_channels
args.model_config.input_channels = None # number of input channels
args.model_config.final_upsample = True # use final upsample to input resolution or not
args.model_config.output_range = None # max range of output
args.model_config.num_decoders = None # number of decoders to use. [options: 0, 1, None]
args.model_config.freeze_encoder = False # do not update encoder weights
args.model_config.freeze_decoder = False # do not update decoder weights
args.model_config.multi_task_type = 'learned' # find out loss multiplier by learning, choices=[None, 'learned', 'uncertainty', 'grad_norm', 'dwa_grad_norm']
args.model_config.target_input_ratio = 1 # Keep target size same as input size
args.model_config.input_nv12 = False # convert input to nv12 format
args.model_config.enable_fp16 = False # faster training if the GPU supports fp16
args.model = None # the model itself can be given from ouside
args.model_name = 'deeplabv2lite_mobilenetv2' # model architecture, overwritten if pretrained is specified
args.dataset_name = 'cityscapes_segmentation' # dataset type
args.transforms = None # the transforms itself can be given from outside
args.input_channel_reverse = False # reverse input channels, for example RGB to BGR
args.data_path = './data/cityscapes' # 'path to dataset'
args.save_path = None # checkpoints save path
args.phase = 'training' # training/calibration/validation
args.date = None # date to add to save path. if this is None, current date will be added.
args.logger = None # logger stream to output into
args.show_gpu_usage = False # Shows gpu usage at the begining of each training epoch
args.split_file = None # train_val split file
args.split_files = None # split list files. eg: train.txt val.txt
args.split_value = None # test_val split proportion (between 0 (only test) and 1 (only train))
args.optimizer = 'adam' # optimizer algorithms, choices=['adam','sgd']
args.scheduler = 'step' # scheduler algorithms, choices=['step','poly', 'cosine']
args.workers = 8 # number of data loading workers
args.epochs = 250 # number of total epochs to run
args.start_epoch = 0 # manual epoch number (useful on restarts)
args.epoch_size = 0 # manual epoch size (will match dataset size if not specified)
args.epoch_size_val = 0 # manual epoch size (will match dataset size if not specified)
args.batch_size = 12 # mini_batch size
args.total_batch_size = None # accumulated batch size. total_batch_size = batch_size*iter_size
args.iter_size = 1 # iteration size. total_batch_size = batch_size*iter_size
args.lr = 1e-4 # initial learning rate
args.lr_clips = None # use args.lr itself if it is None
args.lr_calib = 0.05 # lr for bias calibration
args.warmup_epochs = 5 # number of epochs to warmup
args.warmup_factor = 1e-3 # max lr allowed for the first epoch during warmup (as a factor of initial lr)
args.momentum = 0.9 # momentum for sgd, alpha parameter for adam
args.beta = 0.999 # beta parameter for adam
args.weight_decay = 1e-4 # weight decay
args.bias_decay = None # bias decay
args.sparse = True # avoid invalid/ignored target pixels from loss computation, use NEAREST for interpolation
args.tensorboard_num_imgs = 5 # number of imgs to display in tensorboard
args.pretrained = None # path to pre_trained model
args.resume = None # path to latest checkpoint (default: none)
args.no_date = False # don\'t append date timestamp to folder
args.print_freq = 100 # print frequency (default: 100)
args.milestones = (100, 200) # epochs at which learning rate is divided by 2
args.losses = ['segmentation_loss'] # loss functions to mchoices=['step','poly', 'cosine'],loss multiplication factor')
args.metrics = ['segmentation_metrics'] # metric/measurement/error functions for train/validation
args.multi_task_factors = None # loss mult factors
args.class_weights = None # class weights
args.loss_mult_factors = None # fixed loss mult factors - per loss - not: this is different from multi_task_factors (which is per task)
args.multistep_gamma = 0.5 # steps for step scheduler
args.polystep_power = 1.0 # power for polynomial scheduler
args.rand_seed = 1 # random seed
args.img_border_crop = None # image border crop rectangle. can be relative or absolute
args.target_mask = None # mask rectangle. can be relative or absolute. last value is the mask value
args.rand_resize = None # random image size to be resized to during training
args.rand_output_size = None # output size to be resized to during training
args.rand_scale = (1.0, 2.0) # random scale range for training
args.rand_crop = None # image size to be cropped to
args.img_resize = None # image size to be resized to during evaluation
args.output_size = None # target output size to be resized to
args.count_flops = True # count flops and report
args.shuffle = True # shuffle or not
args.shuffle_val = True # shuffle val dataset or not
args.transform_rotation = 0. # apply rotation augumentation. value is rotation in degrees. 0 indicates no rotation
args.is_flow = None # whether entries in images and targets lists are optical flow or not
args.upsample_mode = 'bilinear' # upsample mode to use, choices=['nearest','bilinear']
args.image_prenorm = True # whether normalization is done before all other the transforms
args.image_mean = (128.0,) # image mean for input image normalization
args.image_scale = (1.0 / (0.25 * 256),) # image scaling/mult for input iamge normalization
args.max_depth = 80 # maximum depth to be used for visualization
args.pivot_task_idx = 0 # task id to select best model
args.parallel_model = True # Usedata parallel for model
args.parallel_criterion = True # Usedata parallel for loss and metric
args.evaluate_start = True # evaluate right at the begining of training or not
args.save_onnx = True # apply quantized inference or not
args.print_model = False # print the model to text
args.run_soon = True # To start training after generating configs/models
args.quantize = False # apply quantized inference or not
#args.model_surgery = None # replace activations with PAct2 activation module. Helpful in quantized training.
args.bitwidth_weights = 8 # bitwidth for weights
args.bitwidth_activations = 8 # bitwidth for activations
args.histogram_range = True # histogram range for calibration
args.bias_calibration = True # apply bias correction during quantized inference calibration
args.per_channel_q = False # apply separate quantizion factor for each channel in depthwise or not
args.constrain_bias = None # constrain bias according to | |
#!/usr/bin/env python
"""
Scheduling tactician.
"""
import os
import logging
import numpy as np
import ephem
from collections import OrderedDict as odict
from obztak import get_survey
from obztak.utils.projector import angsep
from obztak.utils import projector as proj
from obztak.ctio import CTIO
from obztak.utils import constants
from obztak.utils.date import datestring
CONDITIONS = odict([
(None, [0.0, 2.0]), #default
('great', [1.6, 2.0]),
('good', [0.0, 2.0]),
('complete',[0.0, 2.0]),
('maglites',[0.0, 2.0]),
('fine', [0.0, 1.9]),
('ok', [0.0, 1.6]),
('poor', [0.0, 1.5]),
('bad', [0.0, 1.4]),
])
class Tactician(object):
name = 'tactician'
def __init__(self, fields=None, observatory=None, **kwargs):
""" Initialize the survey scheduling tactician.
Parameters:
-----------
fields : The available fields.
observatory : The observatory (defaults to CTIO)
Returns:
--------
Tactician : The Tactician object
"""
if not observatory: observatory = CTIO()
self.observatory = observatory
self.moon = ephem.Moon()
self.sun = ephem.Sun()
self.set_target_fields(fields)
self.set_completed_fields(None)
self.set_date(None)
def set_date(self,date):
if date is not None:
self.observatory.date = ephem.Date(date)
self.moon.compute(self.observatory)
self.sun.compute(self.observatory)
def set_target_fields(self,fields):
if fields is not None:
self.fields = fields.copy()
else:
self.fields = None
def set_completed_fields(self,fields):
if fields is not None:
self.completed_fields = fields.copy()
else:
self.completed_fields = None
def set_previous_field(self,field):
#if field is not None:
# self.previous_field = field.copy()
#else:
# self.previous_field = None
pass
@property
def date(self):
return self.observatory.date
@property
def hour_angle_limit(self):
return self.observatory.hour_angle_limit(self.fields['DEC'])
@property
def airmass_limit(self):
return self.observatory.airmass_limit(self.fields['DEC'])
@property
def zenith_angle(self):
# RA and Dec of zenith
return np.degrees(self.observatory.radec_of(0,'90'))
@property
def airmass(self):
""" Calculate the airmass of each field. """
ra_zenith,dec_zenith = self.zenith_angle
return proj.airmass(ra_zenith, dec_zenith,
self.fields['RA'], self.fields['DEC'])
@property
def moon_angle(self):
# Include moon angle
# See here for ra,dec details: http://rhodesmill.org/pyephem/radec
ra_moon,dec_moon = np.degrees([self.moon.ra,self.moon.dec])
return proj.angsep(ra_moon, dec_moon,
self.fields['RA'], self.fields['DEC'])
@property
def moon_phase(self):
return self.moon.phase
@property
def slew(self):
"""Angular separation to previous field."""
# Set previous field as last completed field
previous_field = None
if (self.completed_fields is not None) and len(self.completed_fields):
previous_field = self.completed_fields[-1]
# Ignore if more than 30 minutes has elapsed
if (self.date-ephem.Date(previous_field['DATE'])) > 30*ephem.minute:
previous_field = None
if previous_field:
return angsep(previous_field['RA'],previous_field['DEC'],
self.fields['RA'], self.fields['DEC'])
else:
return np.zeros(len(self.fields))
@property
def slew_time(self):
"""Estimate of the slew time (Alt/Az telescope)."""
# Set previous field as last completed field
previous_field = None
if (self.completed_fields is not None) and len(self.completed_fields):
previous_field = self.completed_fields[-1]
# Ignore if more than 30 minutes has elapsed
if (self.date-ephem.Date(previous_field['DATE'])) > 30*ephem.minute:
previous_field = None
if previous_field:
return np.sqrt((previous_field['RA']-self.fields['RA'])**2 +
(previous_field['DEC']-self.fields['DEC'])**2)
else:
return np.zeros(len(self.fields))
@property
def hour_angle(self):
ra_zenith,dec_zenith = self.zenith_angle
hour_angle = np.copy(self.fields['RA']) - ra_zenith
hour_angle[hour_angle < -180.] += 360.
hour_angle[hour_angle > 180.] -= 360.
return hour_angle
@property
def viable_fields(self):
# Check the hour angle restrictions at south pole
sel_hour_angle = np.fabs(self.hour_angle) < self.hour_angle_limit
# Blanco airmass restrictions
sel_airmass = self.airmass < self.airmass_limit
# Declination restrictions
sel_declination = self.fields['DEC'] > constants.SOUTHERN_REACH
# Exclude special fields (unless using special tacticians)
sel_special = self.fields['PRIORITY'] < 90
viable = sel_hour_angle & sel_airmass & sel_declination & sel_special
return viable
@property
def weight(self):
weight = self.hour_angle
sel = self.viable_fields
weight[~sel] = np.inf
weight += 6. * 360. * self.fields['TILING'] # Was 6, 60
weight += self.slew**3 # slew**2
weight += 100. * (self.airmass - 1.)**3
return weight
def select_index(self):
index_select = np.argmin(self.weight)
# Search for other exposures in the same field
field_id = self.fields['HEX'][index_select]
tiling = self.fields['TILING'][index_select]
index = np.nonzero( (self.fields['HEX'] == field_id) &
(self.fields['TILING'] == tiling))[0]
return index
def select_fields(self):
index = self.select_index()
timedelta = constants.FIELDTIME*np.arange(len(index))
if np.any(self.slew[index] > 5.):
# Apply a 30 second penalty for slews over 5 deg.
# This is not completely realistic, but better than nothing
# WARNING: This is broken when selecting two fields at once
timedelta += 30*ephem.second
fields = self.fields[index]
fields['AIRMASS'] = self.airmass[index]
fields['DATE'] = map(datestring,self.date+timedelta)
fields['SLEW'] = self.slew[index]
fields['MOONANGLE'] = self.moon_angle[index]
fields['HOURANGLE'] = self.hour_angle[index]
return fields
class CoverageTactician(Tactician):
name = 'coverage'
mode = None
@property
def weight(self):
sel = self.viable_fields
weight = self.hour_angle
weight[~sel] = np.inf
weight += 6. * 360. * self.fields['TILING'] # Was 6, 60
weight += self.slew**3 # slew**2
weight += 100. * (self.airmass - 1.)**3
return weight
class ConditionTactician(Tactician):
name = 'condition'
def __init__(self, *args, **kwargs):
super(ConditionTactician,self).__init__(*args,**kwargs)
self.mode = kwargs.get('mode',None)
@property
def weight(self):
airmass = self.airmass
sel = self.viable_fields
weight = 2.0 * self.hour_angle
weight[~sel] = np.inf
weight += 3. * 360. * self.fields['TILING']
if self.mode == 'complete':
weight += 100. * 360. * self.fields['TILING']
weight += self.slew**3
airmass_min, airmass_max = CONDITIONS[self.mode]
airmass_cut = ((airmass < airmass_min) | (airmass > airmass_max))
# ADW: This should probably also be in there
weight += 100. * (airmass - 1.)**3
weight += 5000. * airmass_cut
if self.mode == 'great':
weight += 5000. * (self.fields['DEC'] > -80)
return weight
class SMCNODTactician(Tactician):
@property
def weight(self):
sel = self.viable_fields
weight = 10000. * np.logical_not(np.in1d(self.fields['HEX'], obztak.utils.constants.HEX_SMCNOD)).astype(float)
weight[~sel] = np.inf
weight += 360. * self.fields['TILING']
weight += slew
return weight
class BlissTactician(Tactician):
CONDITIONS = odict([
(None, [1.0, 1.4]),
('bliss', [1.0, 1.4]),
#('good', [1.0, 1.4]),
#('poor', [1.0, 1.2]),
])
def __init__(self, *args, **kwargs):
super(BlissTactician,self).__init__(*args,**kwargs)
self.mode = kwargs.get('mode',None)
@property
def weight(self):
airmass = self.airmass
moon_angle = self.moon_angle
sel = self.viable_fields
weight = np.zeros(len(sel))
# Moon angle constraints
moon_limit = 30.
sel &= (moon_angle > moon_limit)
# Moon band constraints
if (self.moon.phase >= 80) and (self.moon.alt > -0.04):
# Moon is very bright; only do z
sel &= (np.char.count('z',self.fields['FILTER']) > 0)
# Allow i,z but prefer z
#sel &= (np.char.count('iz',self.fields['FILTER']) > 0)
#weight += 1e2 * (np.char.count('i',self.fields['FILTER']) > 0)
elif (self.moon.phase >= 45) and (self.moon.alt > -0.04):
# Moon is more than half full; do i,z
sel &= (np.char.count('iz',self.fields['FILTER']) > 0)
else:
# Moon is faint or down; do g,r (unless none available)
sel &= (np.char.count('gr',self.fields['FILTER']) > 0)
#weight += 1e8 * (np.char.count('iz',self.fields['FILTER']) > 0)
if (self.sun.alt > -0.28):
# No g-band if Sun altitude > -16 deg
sel &= ~(np.char.count('g',self.fields['FILTER']) > 0)
# Airmass cut
airmass_min, airmass_max = self.CONDITIONS[self.mode]
sel &= ((airmass > airmass_min) & (airmass < airmass_max))
# Don't allow the same field to be scheduled in different bands
# less than 10 hours apart
if len(self.completed_fields):
dates = np.array(map(ephem.Date,self.completed_fields['DATE']))
recent = self.completed_fields[(self.date - dates) < 10*ephem.hour]
# Don't allow the same fields twice on one night
sel &= ~np.in1d(self.fields.field_id,recent.field_id)
# Higher weight for duplicate HEXs
weight += 500.0 * np.in1d(self.fields['HEX'],recent['HEX'])
#weight += 1e9 * np.in1d(self.fields.field_id,recent.field_id)
# Set the weights for each field. Lower weight means more favorable.
# Higher weight for rising fields (higher hour angle)
# HA [min,max] = [-53,54] (for airmass 1.4)
#weight += 5.0 * self.hour_angle
weight += 1.0 * self.hour_angle
#weight += 0.1 * self.hour_angle
# Higher weight for larger slews
# slew = 10 deg -> weight = 1e2
#weight += self.slew**2
#weight += self.slew
weight += 1e3 * self.slew
# Higher weight for higher airmass
# airmass = 1.4 -> weight = 6.4
weight += 100. * (airmass - 1.)**3
# Higher weight for fields close to the moon (when up)
# angle = 50 -> weight = 6.4
if (self.moon.alt > -0.04):
#weight += 100 * (35./moon_angle)**3
#weight += 10 * (35./moon_angle)**3
weight += 1 * (35./moon_angle)**3
# Try hard to do the first tiling
weight += 1e6 * (self.fields['TILING'] - 1)
# Prioritize Planet 9 Region late in the survey/night
#ra_zenith, dec_zenith = np.degrees(self.observatory.radec_of(0,'90'))
#if ra_zenith > 270:
# weight += 1e6 * (self.fields['PRIORITY'] - 1)
# # Allow i,z exposures at high penalty
# #sel &= (np.char.count('iz',self.fields['FILTER']) > 0)
# #weight += 1e8 * (np.char.count('iz',self.fields['FILTER']) > 0)
# Set infinite weight to all disallowed fields
weight[~sel] = np.inf
return weight
def select_index(self):
weight = self.weight
index = np.array([np.argmin(weight)],dtype=int)
if np.any(~np.isfinite(weight[index])):
#if True:
msg = "Infinite weight selected"
print(msg)
import obztak.utils.ortho, pylab as plt
airmass_min, airmass_max = self.CONDITIONS[self.mode]
bmap = obztak.utils.ortho.plotFields(self.completed_fields[-1],self.fields,self.completed_fields,options_basemap=dict(airmass=airmass_max))
import pdb; pdb.set_trace()
raise ValueError(msg)
return index
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description=__doc__)
args = parser.parse_args()
# if mode == 'airmass':
# airmass_effective = copy.copy(airmass)
# # Do not observe fields that are unavailable
# airmass_effective[np.logical_not(cut)] = np.inf
# # Priorize coverage over multiple tilings
# airmass_effective += self.target_fields['TILING']
# index_select = np.argmin(airmass_effective)
# elif mode == 'ra':
# # Different selection
# #ra_effective = copy.copy(self.target_fields['RA'])
# ra_effective = copy.copy(self.target_fields['RA']) - ra_zenith
# ra_effective[ra_effective > 180.] = | |
<reponame>junxnone/aerial_wildlife_detection<filename>modules/UserHandling/backend/middleware.py
'''
Provides functionality for checking login details,
session validity, and the like.
2019-20 <NAME>
'''
from threading import Thread
from modules.Database.app import Database
import psycopg2
from psycopg2 import sql
from datetime import timedelta
from util.helpers import current_time, checkDemoMode
import secrets
import hashlib
import bcrypt
from .exceptions import *
class UserMiddleware():
TOKEN_NUM_BYTES = 64
SALT_NUM_ROUNDS = 12
def __init__(self, config):
self.config = config
self.dbConnector = Database(config)
self.usersLoggedIn = {} # username -> {timestamp, sessionToken}
def _current_time(self):
return current_time()
def _create_token(self):
return secrets.token_urlsafe(self.TOKEN_NUM_BYTES)
def _compare_tokens(self, tokenA, tokenB):
if tokenA is None or tokenB is None:
return False
return secrets.compare_digest(tokenA, tokenB)
def _check_password(self, providedPass, hashedTargetPass):
return bcrypt.checkpw(providedPass, hashedTargetPass)
def _create_hash(self, password):
hash = bcrypt.hashpw(password, bcrypt.gensalt(self.SALT_NUM_ROUNDS))
return hash
def _get_user_data(self, username):
result = self.dbConnector.execute('SELECT last_login, session_token, secret_token FROM aide_admin.user WHERE name = %s;',
(username,), numReturn=1)
if not len(result):
return None
result = result[0]
return result
def _extend_session_database(self, username, sessionToken):
'''
Updates the last login timestamp of the user to the current
time and commits the changes to the database.
Runs in a thread to be non-blocking.
'''
def _extend_session():
now = self._current_time()
self.dbConnector.execute('''UPDATE aide_admin.user SET last_login = %s,
session_token = %s
WHERE name = %s
''',
(now, sessionToken, username,),
numReturn=None)
# also update local cache
self.usersLoggedIn[username]['timestamp'] = now
eT = Thread(target=_extend_session)
eT.start()
def _init_or_extend_session(self, username, sessionToken=None):
'''
Establishes a "session" for the user (i.e., sets 'time_login'
to now).
Also creates a new sessionToken if None provided.
'''
now = self._current_time()
if sessionToken is None:
sessionToken = self._create_token()
# new session created; add to database
self.dbConnector.execute('''UPDATE aide_admin.user SET last_login = %s, session_token = %s
WHERE name = %s
''',
(now, sessionToken, username,),
numReturn=None)
# store locally
self.usersLoggedIn[username] = {
'timestamp': now,
'sessionToken': sessionToken
}
# update local cache as well
if not username in self.usersLoggedIn:
self.usersLoggedIn[username] = {
'timestamp': now,
'sessionToken': sessionToken
}
else:
self.usersLoggedIn[username]['timestamp'] = now
self.usersLoggedIn[username]['sessionToken'] = sessionToken
# also tell DB about updated tokens
self._extend_session_database(username, sessionToken)
expires = now + timedelta(0, self.config.getProperty('UserHandler', 'time_login', type=int))
return sessionToken, now, expires
def _invalidate_session(self, username):
if username in self.usersLoggedIn:
del self.usersLoggedIn[username]
self.dbConnector.execute(
'UPDATE aide_admin.user SET session_token = NULL WHERE name = %s',
(username,),
numReturn=None)
#TODO: feedback that everything is ok?
def _check_account_exists(self, username, email):
response = {
'username': True,
'email': True
}
if username is None or not len(username): username = ''
if email is None or not len(email): email = ''
result = self.dbConnector.execute('SELECT COUNT(name) AS c FROM aide_admin.user WHERE name = %s UNION ALL SELECT COUNT(name) AS c FROM aide_admin.user WHERE email = %s',
(username,email,),
numReturn=2)
response['username'] = (result[0]['c'] > 0)
response['email'] = (result[1]['c'] > 0)
return response
def _check_logged_in(self, username, sessionToken):
now = self._current_time()
time_login = self.config.getProperty('UserHandler', 'time_login', type=int)
if not username in self.usersLoggedIn:
# check database
result = self._get_user_data(username)
if result is None:
# account does not exist
return False
# check for session token
if not self._compare_tokens(result['session_token'], sessionToken):
# invalid session token provided
return False
# check for timestamp
time_diff = (now - result['last_login']).total_seconds()
if time_diff <= time_login:
# user still logged in
if not username in self.usersLoggedIn:
self.usersLoggedIn[username] = {
'timestamp': now,
'sessionToken': sessionToken
}
else:
self.usersLoggedIn[username]['timestamp'] = now
# extend user session (commit to DB) if needed
if time_diff >= 0.75 * time_login:
self._extend_session_database(username, sessionToken)
return True
else:
# session time-out
return False
# generic error
return False
else:
# check locally
if not self._compare_tokens(self.usersLoggedIn[username]['sessionToken'],
sessionToken):
# invalid session token provided; check database if token has updated
# (can happen if user logs in again from another machine)
result = self._get_user_data(username)
if not self._compare_tokens(result['session_token'],
sessionToken):
return False
else:
# update local cache
self.usersLoggedIn[username]['sessionToken'] = result['session_token']
self.usersLoggedIn[username]['timestamp'] = now
if (now - self.usersLoggedIn[username]['timestamp']).total_seconds() <= time_login:
# user still logged in
return True
else:
# local cache session time-out; check if database holds more recent timestamp
result = self._get_user_data(username)
if (now - result['last_login']).total_seconds() <= time_login:
# user still logged in; update
self._init_or_extend_session(username, sessionToken)
else:
# session time-out
return False
# generic error
return False
# generic error
return False
def _check_authorized(self, project, username, admin, return_all=False):
'''
Verifies whether a user has access rights to a project.
If "return_all" is set to True, a dict with the following bools
is returned:
- enrolled: if the user is member of the project
- isAdmin: if the user is a project administrator
- isPublic: if the project is publicly visible (*)
- demoMode: if the project runs in demo mode (*)
(* note that these are here for convenience, but do not count
as authorization tokens)
If "return_all" is False, only a single bool is returned, with
criteria as follows:
- if "admin" is set to True, the user must be a project admini-
strator
- else, the user must be enrolled, admitted, and not blocked for
the current date and time
In this case, options like the demo mode and public flag are not
relevant for the decision.
'''
now = current_time()
response = {
'enrolled': False,
'isAdmin': False,
'isPublic': False
}
queryStr = sql.SQL('''
SELECT * FROM aide_admin.authentication AS auth
JOIN (SELECT shortname, demoMode, isPublic FROM aide_admin.project) AS proj
ON auth.project = proj.shortname
WHERE project = %s AND username = %s;
''')
try:
result = self.dbConnector.execute(queryStr, (project, username,), 1)
if len(result):
response['isAdmin'] = result[0]['isadmin']
response['isPublic'] = result[0]['ispublic']
admitted_until = True
blocked_until = False
if result[0]['admitted_until'] is not None:
admitted_until = (result[0]['admitted_until'] >= now)
if result[0]['blocked_until'] is not None:
blocked_until = (result[0]['blocked_until'] >= now)
response['enrolled'] = (admitted_until and not blocked_until)
except:
# no results to fetch: user is not authenticated
pass
# check if super user
superUser = self._check_user_privileges(username, superuser=True)
if superUser:
response['enrolled'] = True
response['isAdmin'] = True
if return_all:
return response
else:
if admin:
return response['isAdmin']
else:
return response['enrolled']
# if admin:
# queryStr = sql.SQL('''SELECT COUNT(*) AS cnt FROM aide_admin.authentication
# WHERE project = %s AND username = %s AND isAdmin = %s''')
# queryVals = (project,username,admin,)
# else:
# queryStr = sql.SQL('''SELECT COUNT(*) AS cnt FROM aide_admin.authentication
# WHERE project = %s AND username = %s
# AND (
# (admitted_until IS NULL OR admitted_until >= now())
# AND
# (blocked_until IS NULL OR blocked_until < now())
# )''')
# queryVals = (project,username,)
# result = self.dbConnector.execute(queryStr, queryVals, 1)
# return result[0]['cnt'] == 1
def checkDemoMode(self, project):
return checkDemoMode(project, self.dbConnector)
def decryptSessionToken(self, username, request):
try:
userdata = self._get_user_data(username)
return request.get_cookie('session_token', secret=userdata['secret_token'])
except:
return None
def encryptSessionToken(self, username, response):
userdata = self._get_user_data(username)
response.set_cookie('session_token', userdata['session_token'],
httponly=True, path='/', secret=userdata['secret_token'])
def _check_user_privileges(self, username, superuser=False, canCreateProjects=False, return_all=False):
response = {
'superuser': False,
'can_create_projects': False
}
result = self.dbConnector.execute('''SELECT isSuperUser, canCreateProjects
FROM aide_admin.user WHERE name = %s;''',
(username,),
1)
if len(result):
response['superuser'] = result[0]['issuperuser']
response['can_create_projects'] = result[0]['cancreateprojects']
if return_all:
return response
else:
if superuser and not result[0]['issuperuser']:
return False
if canCreateProjects and not (
result[0]['cancreateprojects'] or result[0]['issuperuser']):
return False
return True
def isAuthenticated(self, username, sessionToken, project=None, admin=False, superuser=False, canCreateProjects=False, extend_session=False, return_all=False):
'''
Checks if the user is authenticated to access a service.
Returns False if one or more of the following conditions holds:
- user is not logged in
- 'project' (shortname) is provided, project is configured to be private and user is not in the
authenticated users list
- 'admin' is True, 'project' (shortname) is provided and user is not an admin of the project
- 'superuser' is True and user is not a super user
- 'canCreateProjects' is True and user is not authenticated to create (or remove) projects
If 'extend_session' is True, the user's session will automatically be prolonged by the max login time
specified in the configuration file.
If 'return_all' is True, all individual flags (instead of just a single bool) is returned.
'''
demoMode = checkDemoMode(project, self.dbConnector)
if return_all:
returnVals = {}
returnVals['logged_in'] = self._check_logged_in(username, sessionToken)
if not returnVals['logged_in']:
username = None
if project is not None:
returnVals['project'] = self._check_authorized(project, username, admin, return_all=True)
returnVals['project']['demoMode'] = demoMode
returnVals['privileges'] = self._check_user_privileges(username, superuser, | |
this statement to submit a prize.'
)
return value
def clean(self):
return self.cleaned_data
def save(self, event, handler=None):
provider = ''
if handler and handler.username != handler.email:
provider = handler.username
prize = models.Prize.objects.create(
event=event,
name=self.cleaned_data['name'],
description=self.cleaned_data['description'],
maxwinners=self.cleaned_data['maxwinners'],
extrainfo=self.cleaned_data['extrainfo'],
estimatedvalue=self.cleaned_data['estimatedvalue'],
minimumbid=5,
maximumbid=5,
image=self.cleaned_data['imageurl'],
handler=handler,
provider=provider,
creator=self.cleaned_data['creatorname'],
creatoremail=self.cleaned_data['creatoremail'],
creatorwebsite=self.cleaned_data['creatorwebsite'],
)
prize.save()
return prize
class AutomailPrizeContributorsForm(forms.Form):
def __init__(self, prizes, *args, **kwargs):
super(AutomailPrizeContributorsForm, self).__init__(*args, **kwargs)
self.choices = []
prizes = [prize for prize in prizes if prize.handler]
event = prizes[0].event if len(prizes) > 0 else None
for prize in prizes:
self.choices.append(
(
prize.id,
mark_safe(
format_html(
'<a href="{0}">{1}</a> State: {2} (<a href="mailto:{3}">{3}</a>)',
viewutil.admin_url(prize),
prize,
prize.get_state_display(),
prize.handler.email,
)
),
)
)
self.fields['fromaddress'] = forms.EmailField(
max_length=256,
initial=prizemail.get_event_default_sender_email(event),
required=True,
label='From Address',
help_text='Specify the e-mail you would like to identify as the sender',
)
self.fields['replyaddress'] = forms.EmailField(
max_length=256,
required=False,
label='Reply Address',
help_text='If left blank this will be the same as the from address',
)
self.fields['emailtemplate'] = forms.ModelChoiceField(
queryset=post_office.models.EmailTemplate.objects.all(),
empty_label='Pick a template...',
required=True,
label='Email Template',
help_text='Select an email template to use.',
)
self.fields['prizes'] = forms.TypedMultipleChoiceField(
choices=self.choices,
initial=[prize.id for prize in prizes],
label='Prizes',
empty_value='',
widget=forms.widgets.CheckboxSelectMultiple,
)
def clean(self):
if not self.cleaned_data['replyaddress']:
self.cleaned_data['replyaddress'] = self.cleaned_data['fromaddress']
self.cleaned_data['prizes'] = [
models.Prize.objects.get(id=x) for x in self.cleaned_data['prizes']
]
return self.cleaned_data
class DrawPrizeWinnersForm(forms.Form):
def __init__(self, prizes, *args, **kwargs):
super(DrawPrizeWinnersForm, self).__init__(*args, **kwargs)
self.choices = []
for prize in prizes:
self.choices.append(
(
prize.id,
mark_safe(
format_html(
'<a href="{0}">{1}</a>', viewutil.admin_url(prize), prize
)
),
)
)
self.fields['prizes'] = forms.TypedMultipleChoiceField(
choices=self.choices,
initial=[prize.id for prize in prizes],
coerce=lambda x: int(x),
label='Prizes',
empty_value='',
widget=forms.widgets.CheckboxSelectMultiple,
)
self.fields['seed'] = forms.IntegerField(
required=False,
label='Random Seed',
help_text="Completely optional, if you don't know what this is, don't worry about it",
)
def clean(self):
self.cleaned_data['prizes'] = [
models.Prize.objects.get(id=x) for x in self.cleaned_data['prizes']
]
return self.cleaned_data
class AutomailPrizeWinnersForm(forms.Form):
def __init__(self, prizewinners, *args, **kwargs):
super(AutomailPrizeWinnersForm, self).__init__(*args, **kwargs)
event = prizewinners[0].prize.event if len(prizewinners) > 0 else None
self.fields['fromaddress'] = forms.EmailField(
max_length=256,
initial=prizemail.get_event_default_sender_email(event),
required=True,
label='From Address',
help_text='Specify the e-mail you would like to identify as the sender',
)
self.fields['replyaddress'] = forms.EmailField(
max_length=256,
required=False,
label='Reply Address',
help_text='If left blank this will be the same as the from address',
)
self.fields['emailtemplate'] = forms.ModelChoiceField(
queryset=post_office.models.EmailTemplate.objects.all(),
initial=event.prizewinneremailtemplate if event else None,
empty_label='Pick a template...',
required=True,
label='Email Template',
help_text='Select an email template to use. Can be overridden by the prize itself.',
)
self.fields['acceptdeadline'] = forms.DateField(
initial=timezone.now() + datetime.timedelta(weeks=2)
)
self.choices = []
for prizewinner in prizewinners:
winner = prizewinner.winner
prize = prizewinner.prize
self.choices.append(
(
prizewinner.id,
mark_safe(
format_html(
'<a href="{0}">{1}</a>: <a href="{2}">{3}</a> <a href="{4}">Preview</a>',
viewutil.admin_url(prize),
prize,
viewutil.admin_url(winner),
winner,
reverse(
'admin:preview_prize_winner_mail',
args=(prizewinner.id,),
),
)
),
)
)
self.fields['prizewinners'] = forms.TypedMultipleChoiceField(
choices=self.choices,
initial=[prizewinner.id for prizewinner in prizewinners],
coerce=lambda x: int(x),
label='Prize Winners',
empty_value='',
widget=forms.widgets.CheckboxSelectMultiple,
)
def clean(self):
if not self.cleaned_data.get('replyaddress', ''):
self.cleaned_data['replyaddress'] = self.cleaned_data['fromaddress']
self.cleaned_data['prizewinners'] = [
models.PrizeWinner.objects.get(id=pw)
for pw in self.cleaned_data.get('prizewinners', [])
]
return self.cleaned_data
class AutomailPrizeAcceptNotifyForm(forms.Form):
def __init__(self, prizewinners, *args, **kwargs):
super(AutomailPrizeAcceptNotifyForm, self).__init__(*args, **kwargs)
event = prizewinners[0].prize.event if len(prizewinners) > 0 else None
self.fields['fromaddress'] = forms.EmailField(
max_length=256,
initial=prizemail.get_event_default_sender_email(event),
required=True,
label='From Address',
help_text='Specify the e-mail you would like to identify as the sender',
)
self.fields['replyaddress'] = forms.EmailField(
max_length=256,
required=False,
label='Reply Address',
help_text='If left blank this will be the same as the from address',
)
self.fields['emailtemplate'] = forms.ModelChoiceField(
queryset=post_office.models.EmailTemplate.objects.all(),
initial=None,
empty_label='Pick a template...',
required=True,
label='Email Template',
help_text='Select an email template to use.',
)
self.choices = []
for prizewinner in prizewinners:
winner = prizewinner.winner
prize = prizewinner.prize
self.choices.append(
(
prizewinner.id,
mark_safe(
format_html(
'<a href="{0}">{1}</a>: <a href="{2}">{3}</a>',
viewutil.admin_url(prize),
prize,
viewutil.admin_url(winner),
winner,
)
),
)
)
self.fields['prizewinners'] = forms.TypedMultipleChoiceField(
choices=self.choices,
initial=[prizewinner.id for prizewinner in prizewinners],
coerce=lambda x: int(x),
label='Prize Winners',
empty_value='',
widget=forms.widgets.CheckboxSelectMultiple,
)
def clean(self):
if not self.cleaned_data['replyaddress']:
self.cleaned_data['replyaddress'] = self.cleaned_data['fromaddress']
self.cleaned_data['prizewinners'] = [
models.PrizeWinner.objects.get(id=x)
for x in self.cleaned_data['prizewinners']
]
return self.cleaned_data
class AutomailPrizeShippingNotifyForm(forms.Form):
def __init__(self, prizewinners, *args, **kwargs):
super(AutomailPrizeShippingNotifyForm, self).__init__(*args, **kwargs)
event = prizewinners[0].prize.event if len(prizewinners) > 0 else None
self.fields['fromaddress'] = forms.EmailField(
max_length=256,
initial=prizemail.get_event_default_sender_email(event),
required=True,
label='From Address',
help_text='Specify the e-mail you would like to identify as the sender',
)
self.fields['replyaddress'] = forms.EmailField(
max_length=256,
required=False,
label='Reply Address',
help_text='If left blank this will be the same as the from address',
)
self.fields['emailtemplate'] = forms.ModelChoiceField(
queryset=post_office.models.EmailTemplate.objects.all(),
initial=None,
empty_label='Pick a template...',
required=True,
label='Email Template',
help_text='Select an email template to use.',
)
self.choices = []
for prizewinner in prizewinners:
winner = prizewinner.winner
prize = prizewinner.prize
self.choices.append(
(
prizewinner.id,
mark_safe(
format_html(
'<a href="{0}">{1}</a>: <a href="{2}">{3}</a>',
viewutil.admin_url(prize),
prize,
viewutil.admin_url(winner),
winner,
)
),
)
)
self.fields['prizewinners'] = forms.TypedMultipleChoiceField(
choices=self.choices,
initial=[prizewinner.id for prizewinner in prizewinners],
coerce=lambda x: int(x),
label='Prize Winners',
empty_value='',
widget=forms.widgets.CheckboxSelectMultiple,
)
def clean(self):
if not self.cleaned_data['replyaddress']:
self.cleaned_data['replyaddress'] = self.cleaned_data['fromaddress']
self.cleaned_data['prizewinners'] = [
models.PrizeWinner.objects.get(id=x)
for x in self.cleaned_data['prizewinners']
]
return self.cleaned_data
class RegistrationForm(forms.Form):
email = forms.EmailField(label='Email', max_length=254, required=True)
def clean_email(self):
user = self.get_existing_user()
if user is not None and user.is_active:
raise forms.ValidationError(
'This email is already registered. Please log in, (or reset your password if you forgot it).'
)
return self.cleaned_data['email']
def save(
self,
email_template=None,
token_generator=default_token_generator,
from_email=None,
request=None,
**kwargs,
):
if not email_template:
email_template = auth.default_registration_template()
user = self.get_existing_user()
if user is None:
email = self.cleaned_data['email']
username = email
if len(username) > 30:
username = email[:30]
AuthUser = get_user_model()
tries = 0
while user is None and tries < 5:
try:
user = AuthUser.objects.create(
username=username, email=email, is_active=False
)
except django.db.utils.IntegrityError:
tries += 1
username = tracker.util.random_num_replace(
username, 8, max_length=30
)
if tries >= 5:
raise forms.ValidationError(
'Something horrible happened, please try again'
)
return auth.send_registration_mail(
request,
user,
template=email_template,
sender=from_email,
token_generator=token_generator,
)
def get_existing_user(self):
AuthUser = get_user_model()
email = self.cleaned_data['email']
userSet = AuthUser.objects.filter(email__iexact=email)
if userSet.count() > 1:
raise forms.ValidationError(
'More than one user has the e-mail {0}. Ideally this would be a db constraint, but django is stupid. Contact SMK to get this sorted out.'.format(
email
)
)
if userSet.exists():
return userSet[0]
else:
return None
class RegistrationConfirmationForm(forms.Form):
username = forms.CharField(
label='<NAME>',
max_length=30,
required=True,
validators=[
validators.RegexValidator(
r'^[\w.@+-]+$',
'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.',
'invalid',
)
],
)
password = forms.CharField(
label='Password', widget=forms.PasswordInput(), required=True
)
passwordconfirm = forms.CharField(
label='Confirm Password', widget=forms.PasswordInput(), required=True
)
def __init__(
self, user, token, token_generator=default_token_generator, *args, **kwargs
):
super(RegistrationConfirmationForm, self).__init__(*args, **kwargs)
self.user = user
self.token = token
self.token_generator = token_generator
if not self.check_token():
self.fields = {}
def check_token(self):
if (
self.user
and not self.user.is_active
and self.token
and self.token_generator
):
return self.token_generator.check_token(self.user, self.token)
else:
return False
def clean_username(self):
AuthUser = get_user_model()
cleaned = AuthUser.normalize_username(self.cleaned_data['username'])
existing = AuthUser.objects.filter(username__iexact=cleaned).exclude(
pk=self.user.pk
)
if existing.exists():
raise forms.ValidationError(f'Username {cleaned} is already taken')
return cleaned
def clean_password(self):
if not self.cleaned_data['password']:
raise forms.ValidationError('Password must not be blank.')
return self.cleaned_data['password']
def clean(self):
if not self.check_token():
raise forms.ValidationError('User token pair is not valid.')
if 'password' in self.cleaned_data and 'passwordconfirm' in self.cleaned_data:
if self.cleaned_data['password'] != self.cleaned_data['passwordconfirm']:
raise forms.ValidationError('Passwords must match.')
return self.cleaned_data
def save(self, commit=True):
if self.user:
self.user.username = self.cleaned_data['username']
self.user.set_password(self.cleaned_data['password'])
self.user.is_active = True
if commit is True:
self.user.save()
else:
raise forms.ValidationError('Could not save user.')
return self.user
class PrizeAcceptanceForm(forms.ModelForm):
class Meta:
model = models.PrizeWinner
fields = []
def __init__(self, *args, **kwargs):
super(PrizeAcceptanceForm, self).__init__(*args, **kwargs)
self.accepted = None
data = kwargs.get('data', {})
if 'accept' in data:
self.accepted = True
elif 'decline' in data:
self.accepted = False
self.fields['count'] = forms.ChoiceField(
initial=self.instance.pendingcount,
choices=[(x, x) for x in range(1, self.instance.pendingcount + 1)],
label='Count',
help_text='You were selected to win more than one copy of this prize, please select how many you would like to take, or press Deny All if you do not want any of them.',
)
if self.instance.pendingcount == 1:
self.fields['count'].widget = forms.HiddenInput()
self.fields['total'] = forms.IntegerField(
initial=self.instance.pendingcount,
validators=[positive],
widget=forms.HiddenInput(),
)
self.fields['comments'] = forms.CharField(
max_length=512,
label='Notes',
required=False,
help_text='Please put any additional notes here (such as if you have the option of customizing your prize before it is shipped, or additional delivery information).',
widget=forms.Textarea(attrs=dict(cols=40, rows=2)),
)
def clean_total(self):
if self.instance.pendingcount != self.cleaned_data['total']:
raise forms.ValidationError(
'It seems something changed in your status since you loaded the page. Please review and try again.'
)
return self.instance.pendingcount
def clean_count(self):
count = int(self.cleaned_data['count'])
if count > self.instance.pendingcount:
raise forms.ValidationError('Error, count cannot exceed total')
return count
def clean(self):
if self.accepted is False:
self.cleaned_data['count'] = 0
self.cleaned_data['accept'] = False
elif self.accepted is None:
raise forms.ValidationError(
| |
@validate_api_v2
@request_error_handler
def bulk_delete_hosts_tag(self, tag, host_ids):
"""
Delete a tag in bulk on multiple hosts. Only one tag can be deleted at a time
:param host_ids: IDs of the hosts on which to delete the tag
"""
if not isinstance(host_ids, list):
raise TypeError('Host IDs must be of type list')
payload = {
'objectIds': host_ids,
'tag': tag
}
return requests.delete('{url}/tagging/host'.format(url=self.url), headers=self.headers, json=payload,
verify=False)
@validate_api_v2
@request_error_handler
def get_host_note(self, host_id=None):
"""
Get host notes
:param host_id:
For consistency we return a requests.models.Response object
As we do not want to return the complete host body, we alter the response content
"""
if not host_id:
raise ValueError('Host id required')
host = requests.get('{url}/hosts/{id}'.format(url=self.url, id=host_id), headers=self.headers, verify=self.verify)
if host.status_code == 200:
host_note = host.json()['note']
# API endpoint return HTML escaped characters
host_note = html.unescape(host_note) if host_note else ''
json_dict = {'status': 'success', 'host_id': str(host_id), 'note': host_note}
host._content = json.dumps(json_dict).encode('utf-8')
return host
@validate_api_v2
@request_error_handler
def set_host_note(self, host_id=None, note='', append=False):
"""
Set host note
:param host_id:
:param note: content of the note to set
:param append: overwrites existing note if set to False, appends if set to True
Set to empty note string to clear host note
"""
if not host_id:
raise ValueError('Host id required')
if append and isinstance(note, str):
current_note = self.get_host_note(host_id=host_id).json()['note']
if current_note:
if len(note) > 0:
payload = {
"note": '{}{}{}'.format(current_note, '\n', note)
}
else:
payload = {
"note": current_note
}
else:
payload = {
"note": note
}
elif isinstance(note, str):
payload = {
"note": note
}
else:
raise TypeError('Note must be of type str')
return requests.patch('{url}/hosts/{id}'.format(url=self.url, id=host_id), headers=self.headers, data=json.dumps(payload),
verify=self.verify)
@request_error_handler
def get_detections(self, **kwargs):
"""
Query all detections - all parameters are optional
:param c_score: certainty score (int) - will be removed with deprecation of v1 of api
:param c_score_gte: certainty score greater than or equal to (int) - will be removed with deprecation of v1 of api
:param category: detection category - will be removed with deprecation of v1 of api
:param certainty: certainty score (int)
:param certainty_gte: certainty score greater than or equal to (int)
:param detection: detection type
:param detection_type: detection type
:param detection_category: detection category
:param description:
:param fields: comma separated string of fields to be filtered and returned
possible values are: id, url, detection_url, category, detection, detection_category,
detection_type, custom_detection, description, src_ip, state, t_score, c_score,
certainty, threat, first_timestamp, last_timestamp, targets_key_asset,
is_targeting_key_asset, src_account, src_host, note, note_modified_by,
note_modified_timestamp, sensor, sensor_name, tags, triage_rule_id, assigned_to,
assigned_date, groups, is_marked_custom, is_custom_model
:param host_id: detection id (int)
:param is_targeting_key_asset: detection is targeting key asset (bool)
:param is_triaged: detection is triaged
:param last_timestamp: timestamp of last activity on detection (datetime)
:param max_id: maximum ID of detection returned
:param min_id: minimum ID of detection returned
:param ordering: field used to sort response
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
:param src_ip: source ip address of host attributed to detection
:param state: state of detection (active/inactive)
:param t_score: threat score (int) - will be removed with deprecation of v1 of api
:param t_score_gte: threat score is greater than or equal to (int) - will be removed with deprecation of v1 of api
:param tags: tags assigned to detection
:param targets_key_asset: detection targets key asset (bool) - will be removed with deprecation of v1 of api
:param threat: threat score (int)
:param threat_gte threat score is greater than or equal to (int)
:param note_modified_timestamp_gte: note last modified timestamp greater than or equal to (datetime)
"""
if self.version == 2:
return requests.get('{url}/detections'.format(url=self.url), headers=self.headers,
params=self._generate_detection_params(kwargs), verify=self.verify)
else:
return requests.get('{url}/detections'.format(url=self.url), auth=self.auth,
params=self._generate_detection_params(kwargs), verify=self.verify)
def get_all_detections(self, **kwargs):
"""
Generator to retrieve all detections - all parameters are optional
:param c_score: certainty score (int) - will be removed with deprecation of v1 of api
:param c_score_gte: certainty score greater than or equal to (int) - will be removed with deprecation of v1 of api
:param category: detection category - will be removed with deprecation of v1 of api
:param certainty: certainty score (int)
:param certainty_gte: certainty score greater than or equal to (int)
:param detection: detection type
:param detection_type: detection type
:param detection_category: detection category
:param description:
:param fields: comma separated string of fields to be filtered and returned
possible values are: id, url, detection_url, category, detection, detection_category,
detection_type, custom_detection, description, src_ip, state, t_score, c_score,
certainty, threat, first_timestamp, last_timestamp, targets_key_asset,
is_targeting_key_asset, src_account, src_host, note, note_modified_by,
note_modified_timestamp, sensor, sensor_name, tags, triage_rule_id, assigned_to,
assigned_date, groups, is_marked_custom, is_custom_model
:param host_id: detection id (int)
:param is_targeting_key_asset: detection is targeting key asset (bool)
:param is_triaged: detection is triaged
:param last_timestamp: timestamp of last activity on detection (datetime)
:param max_id: maximum ID of detection returned
:param min_id: minimum ID of detection returned
:param ordering: field used to sort response
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
:param src_ip: source ip address of host attributed to detection
:param state: state of detection (active/inactive)
:param t_score: threat score (int) - will be removed with deprecation of v1 of api
:param t_score_gte: threat score is greater than or equal to (int) - will be removed with deprecation of v1 of api
:param tags: tags assigned to detection
:param targets_key_asset: detection targets key asset (bool) - will be removed with deprecation of v1 of api
:param threat: threat score (int)
:param threat_gte threat score is greater than or equal to (int)
:param note_modified_timestamp_gte: note last modified timestamp greater than or equal to (datetime)
"""
resp = requests.get('{url}/detections'.format(url=self.url), headers=self.headers,
params=self._generate_detection_params(kwargs), verify=self.verify)
yield resp
while resp.json()['next']:
resp = self._get_request(url = resp.json()['next'])
yield resp
@request_error_handler
def get_detection_by_id(self, detection_id=None, **kwargs):
"""
Get detection by id
:param detection_id: detection id - required
:param fields: comma separated string of fields to be filtered and returned - optional
possible values are: id, url, detection_url, category, detection, detection_category,
detection_type, custom_detection, description, src_ip, state, t_score, c_score,
certainty, threat, first_timestamp, last_timestamp, targets_key_asset,
is_targeting_key_asset, src_account, src_host, note, note_modified_by,
note_modified_timestamp, sensor, sensor_name, tags, triage_rule_id, assigned_to,
assigned_date, groups, is_marked_custom, is_custom_model
"""
if not detection_id:
raise ValueError('Detection id required')
if self.version == 2:
return requests.get('{url}/detections/{id}'.format(url=self.url, id=detection_id), headers=self.headers,
params=self._generate_detection_params(kwargs), verify=self.verify)
else:
return requests.get('{url}/detections/{id}'.format(url=self.url, id=detection_id), auth=self.auth,
params=self._generate_detection_params(kwargs), verify=self.verify)
@validate_api_v2
@request_error_handler
def mark_detections_fixed(self, detection_ids=None):
"""
Mark detections as fixed
:param detection_ids: list of detections to mark as fixed
"""
if not isinstance(detection_ids, list):
raise ValueError('Must provide a list of detection IDs to mark as fixed')
return self._toggle_detections_fixed(detection_ids, fixed=True)
@validate_api_v2
@request_error_handler
def unmark_detections_fixed(self, detection_ids=None):
"""
Unmark detections as fixed
:param detection_ids: list of detections to unmark as fixed
"""
if not isinstance(detection_ids, list):
raise ValueError('Must provide a list of detection IDs to unmark as fixed')
return self._toggle_detections_fixed(detection_ids, fixed=False)
def _toggle_detections_fixed(self, detection_ids, fixed):
"""
Internal function to mark/unmark detections as fixed
"""
payload = {
'detectionIdList': detection_ids,
'mark_as_fixed': str(fixed)
}
return requests.patch('{url}/detections'.format(url=self.url), json=payload, headers=self.headers,
verify=self.verify)
@validate_api_v2
@request_error_handler
def mark_detections_custom(self, detection_ids=[], triage_category=None):
"""
Mark detections as custom
:param detection_ids: list of detection IDs to mark as custom
:param triage_category: custom name to give detection
:rtype: requests.Response
"""
if not isinstance(detection_ids, list):
raise ValueError('Must provide a list of detection IDs to mark as custom')
payload = {
"triage_category": triage_category,
"detectionIdList": detection_ids
}
return requests.post('{url}/rules'.format(url=self.url), headers=self.headers, json=payload,
verify=self.verify)
@validate_api_v2
@request_error_handler
def unmark_detections_custom(self, detection_ids=[]):
"""
Unmark detection as custom
:param detection_ids: list of detection IDs to unmark as custom
:rtype: requests.Response
"""
if not isinstance(detection_ids, list):
raise ValueError('Must provide a list of detection IDs to unmark as custom')
payload = {
"detectionIdList": detection_ids
}
response = requests.delete('{url}/rules'.format(url=self.url), headers=self.headers, json=payload,
verify=self.verify)
# DELETE returns an empty response, but we populate the response for consistency with the mark_as_fixed() function
json_dict = {'_meta': {'message': 'Successfully unmarked detections', 'level': 'Success'}}
response._content = json.dumps(json_dict).encode('utf-8')
return response
@validate_api_v2
@request_error_handler
def get_detection_tags(self, detection_id=None):
"""
Get detection tags
:param detection_id:
"""
return requests.get('{url}/tagging/detection/{id}'.format(url=self.url, id=detection_id), headers=self.headers,
verify=False)
@validate_api_v2
@request_error_handler
def set_detection_tags(self, detection_id=None, tags=[], append=False):
"""
Set | |
= self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
the_data = bb.cache.Cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn), self.cooker.data)
done.add(self.rqdata.runq_fnid[task])
bb.parse.siggen.dump_sigs(self.rqdata.dataCache, options)
return
def print_diffscenetasks(self):
valid = []
sq_hash = []
sq_hashfn = []
sq_fn = []
sq_taskname = []
sq_task = []
noexec = []
stamppresent = []
valid_new = set()
for task in xrange(len(self.rqdata.runq_fnid)):
fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
taskname = self.rqdata.runq_task[task]
taskdep = self.rqdata.dataCache.task_deps[fn]
if 'noexec' in taskdep and taskname in taskdep['noexec']:
noexec.append(task)
continue
sq_fn.append(fn)
sq_hashfn.append(self.rqdata.dataCache.hashfn[fn])
sq_hash.append(self.rqdata.runq_hash[task])
sq_taskname.append(taskname)
sq_task.append(task)
call = self.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d)"
locs = { "sq_fn" : sq_fn, "sq_task" : sq_taskname, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn, "d" : self.cooker.data }
valid = bb.utils.better_eval(call, locs)
for v in valid:
valid_new.add(sq_task[v])
# Tasks which are both setscene and noexec never care about dependencies
# We therefore find tasks which are setscene and noexec and mark their
# unique dependencies as valid.
for task in noexec:
if task not in self.rqdata.runq_setscene:
continue
for dep in self.rqdata.runq_depends[task]:
hasnoexecparents = True
for dep2 in self.rqdata.runq_revdeps[dep]:
if dep2 in self.rqdata.runq_setscene and dep2 in noexec:
continue
hasnoexecparents = False
break
if hasnoexecparents:
valid_new.add(dep)
invalidtasks = set()
for task in xrange(len(self.rqdata.runq_fnid)):
if task not in valid_new and task not in noexec:
invalidtasks.add(task)
found = set()
processed = set()
for task in invalidtasks:
toprocess = set([task])
while toprocess:
next = set()
for t in toprocess:
for dep in self.rqdata.runq_depends[t]:
if dep in invalidtasks:
found.add(task)
if dep not in processed:
processed.add(dep)
next.add(dep)
toprocess = next
if task in found:
toprocess = set()
tasklist = []
for task in invalidtasks.difference(found):
tasklist.append(self.rqdata.get_user_idstring(task))
if tasklist:
bb.plain("The differences between the current build and any cached tasks start at the following tasks:\n" + "\n".join(tasklist))
return invalidtasks.difference(found)
def write_diffscenetasks(self, invalidtasks):
# Define recursion callback
def recursecb(key, hash1, hash2):
hashes = [hash1, hash2]
hashfiles = bb.siggen.find_siginfo(key, None, hashes, self.cfgData)
recout = []
if len(hashfiles) == 2:
out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb)
recout.extend(list(' ' + l for l in out2))
else:
recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
return recout
for task in invalidtasks:
fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
pn = self.rqdata.dataCache.pkg_fn[fn]
taskname = self.rqdata.runq_task[task]
h = self.rqdata.runq_hash[task]
matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData)
match = None
for m in matches:
if h in m:
match = m
if match is None:
bb.fatal("Can't find a task we're supposed to have written out? (hash: %s)?" % h)
matches = {k : v for k, v in matches.iteritems() if h not in k}
if matches:
latestmatch = sorted(matches.keys(), key=lambda f: matches[f])[-1]
prevh = __find_md5__.search(latestmatch).group(0)
output = bb.siggen.compare_sigfiles(latestmatch, match, recursecb)
bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, closest matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output))
class RunQueueExecute:
def __init__(self, rq):
self.rq = rq
self.cooker = rq.cooker
self.cfgData = rq.cfgData
self.rqdata = rq.rqdata
self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS", True) or 1)
self.scheduler = self.cfgData.getVar("BB_SCHEDULER", True) or "speed"
self.runq_buildable = []
self.runq_running = []
self.runq_complete = []
self.build_stamps = {}
self.build_stamps2 = []
self.failed_fnids = []
self.stampcache = {}
rq.workerpipe.setrunqueueexec(self)
if rq.fakeworkerpipe:
rq.fakeworkerpipe.setrunqueueexec(self)
def runqueue_process_waitpid(self, task, status):
# self.build_stamps[pid] may not exist when use shared work directory.
if task in self.build_stamps:
self.build_stamps2.remove(self.build_stamps[task])
del self.build_stamps[task]
if status != 0:
self.task_fail(task, status)
else:
self.task_complete(task)
return True
def finish_now(self):
for worker in [self.rq.worker, self.rq.fakeworker]:
if not worker:
continue
try:
worker.stdin.write("<finishnow></finishnow>")
worker.stdin.flush()
except IOError:
# worker must have died?
pass
if len(self.failed_fnids) != 0:
self.rq.state = runQueueFailed
return
self.rq.state = runQueueComplete
return
def finish(self):
self.rq.state = runQueueCleanUp
if self.stats.active > 0:
bb.event.fire(runQueueExitWait(self.stats.active), self.cfgData)
self.rq.read_workers()
return self.rq.active_fds()
if len(self.failed_fnids) != 0:
self.rq.state = runQueueFailed
return True
self.rq.state = runQueueComplete
return True
def check_dependencies(self, task, taskdeps, setscene = False):
if not self.rq.depvalidate:
return False
taskdata = {}
taskdeps.add(task)
for dep in taskdeps:
if setscene:
depid = self.rqdata.runq_setscene[dep]
else:
depid = dep
fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[depid]]
pn = self.rqdata.dataCache.pkg_fn[fn]
taskname = self.rqdata.runq_task[depid]
taskdata[dep] = [pn, taskname, fn]
call = self.rq.depvalidate + "(task, taskdata, notneeded, d)"
locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.data }
valid = bb.utils.better_eval(call, locs)
return valid
class RunQueueExecuteDummy(RunQueueExecute):
def __init__(self, rq):
self.rq = rq
self.stats = RunQueueStats(0)
def finish(self):
self.rq.state = runQueueComplete
return
class RunQueueExecuteTasks(RunQueueExecute):
def __init__(self, rq):
RunQueueExecute.__init__(self, rq)
self.stats = RunQueueStats(len(self.rqdata.runq_fnid))
self.stampcache = {}
initial_covered = self.rq.scenequeue_covered.copy()
# Mark initial buildable tasks
for task in xrange(self.stats.total):
self.runq_running.append(0)
self.runq_complete.append(0)
if len(self.rqdata.runq_depends[task]) == 0:
self.runq_buildable.append(1)
else:
self.runq_buildable.append(0)
if len(self.rqdata.runq_revdeps[task]) > 0 and self.rqdata.runq_revdeps[task].issubset(self.rq.scenequeue_covered) and task not in self.rq.scenequeue_notcovered:
self.rq.scenequeue_covered.add(task)
found = True
while found:
found = False
for task in xrange(self.stats.total):
if task in self.rq.scenequeue_covered:
continue
logger.debug(1, 'Considering %s (%s): %s' % (task, self.rqdata.get_user_idstring(task), str(self.rqdata.runq_revdeps[task])))
if len(self.rqdata.runq_revdeps[task]) > 0 and self.rqdata.runq_revdeps[task].issubset(self.rq.scenequeue_covered) and task not in self.rq.scenequeue_notcovered:
found = True
self.rq.scenequeue_covered.add(task)
logger.debug(1, 'Skip list (pre setsceneverify) %s', sorted(self.rq.scenequeue_covered))
# Allow the metadata to elect for setscene tasks to run anyway
covered_remove = set()
if self.rq.setsceneverify:
invalidtasks = []
for task in xrange(len(self.rqdata.runq_task)):
fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
taskname = self.rqdata.runq_task[task]
taskdep = self.rqdata.dataCache.task_deps[fn]
if 'noexec' in taskdep and taskname in taskdep['noexec']:
continue
if self.rq.check_stamp_task(task, taskname + "_setscene", cache=self.stampcache):
logger.debug(2, 'Setscene stamp current for task %s(%s)', task, self.rqdata.get_user_idstring(task))
continue
if self.rq.check_stamp_task(task, taskname, recurse = True, cache=self.stampcache):
logger.debug(2, 'Normal stamp current for task %s(%s)', task, self.rqdata.get_user_idstring(task))
continue
invalidtasks.append(task)
call = self.rq.setsceneverify + "(covered, tasknames, fnids, fns, d, invalidtasks=invalidtasks)"
call2 = self.rq.setsceneverify + "(covered, tasknames, fnids, fns, d)"
locs = { "covered" : self.rq.scenequeue_covered, "tasknames" : self.rqdata.runq_task, "fnids" : self.rqdata.runq_fnid, "fns" : self.rqdata.taskData.fn_index, "d" : self.cooker.data, "invalidtasks" : invalidtasks }
# Backwards compatibility with older versions without invalidtasks
try:
covered_remove = bb.utils.better_eval(call, locs)
except TypeError:
covered_remove = bb.utils.better_eval(call2, locs)
def removecoveredtask(task):
fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
taskname = self.rqdata.runq_task[task] + '_setscene'
bb.build.del_stamp(taskname, self.rqdata.dataCache, fn)
self.rq.scenequeue_covered.remove(task)
toremove = covered_remove
for task in toremove:
logger.debug(1, 'Not skipping task %s due to setsceneverify', task)
while toremove:
covered_remove = []
for task in toremove:
removecoveredtask(task)
for deptask in self.rqdata.runq_depends[task]:
if deptask not in self.rq.scenequeue_covered:
continue
if deptask in toremove or deptask in covered_remove or deptask in initial_covered:
continue
logger.debug(1, 'Task %s depends on task %s so not skipping' % (task, deptask))
covered_remove.append(deptask)
toremove = covered_remove
logger.debug(1, 'Full skip list %s', self.rq.scenequeue_covered)
event.fire(bb.event.StampUpdate(self.rqdata.target_pairs, self.rqdata.dataCache.stamp), self.cfgData)
schedulers = self.get_schedulers()
for scheduler in schedulers:
if self.scheduler == scheduler.name:
self.sched = scheduler(self, self.rqdata)
logger.debug(1, "Using runqueue scheduler '%s'", scheduler.name)
break
else:
bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" %
(self.scheduler, ", ".join(obj.name for obj in schedulers)))
def get_schedulers(self):
schedulers = set(obj for obj in globals().values()
if type(obj) is type and
issubclass(obj, RunQueueScheduler))
user_schedulers = self.cfgData.getVar("BB_SCHEDULERS", True)
if user_schedulers:
for sched in user_schedulers.split():
if not "." in sched:
bb.note("Ignoring scheduler '%s' from BB_SCHEDULERS: not an import" % sched)
continue
modname, name = sched.rsplit(".", 1)
try:
module = __import__(modname, fromlist=(name,))
except ImportError as exc:
logger.critical("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc))
raise SystemExit(1)
else:
schedulers.add(getattr(module, name))
return schedulers
def setbuildable(self, task):
self.runq_buildable[task] = 1
self.sched.newbuilable(task)
def task_completeoutright(self, task):
"""
Mark a task as completed
Look at the reverse dependencies and mark any task with
completed dependencies as buildable
"""
self.runq_complete[task] = 1
for revdep in self.rqdata.runq_revdeps[task]:
if self.runq_running[revdep] == 1:
continue
if self.runq_buildable[revdep] == 1:
continue
alldeps = 1
for dep in self.rqdata.runq_depends[revdep]:
if self.runq_complete[dep] != 1:
alldeps = 0
if alldeps == 1:
self.setbuildable(revdep)
fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[revdep]]
taskname = self.rqdata.runq_task[revdep]
logger.debug(1, "Marking task %s (%s, %s) as buildable", revdep, fn, taskname)
def task_complete(self, task):
self.stats.taskCompleted()
bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
self.task_completeoutright(task)
def task_fail(self, task, exitcode):
"""
Called when a task has failed
Updates the state engine with the failure
"""
self.stats.taskFailed()
fnid = self.rqdata.runq_fnid[task]
self.failed_fnids.append(fnid)
bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData)
if self.rqdata.taskData.abort:
self.rq.state = runQueueCleanUp
def task_skip(self, task, reason):
self.runq_running[task] = 1
self.setbuildable(task)
bb.event.fire(runQueueTaskSkipped(task, self.stats, self.rq, reason), self.cfgData)
self.task_completeoutright(task)
self.stats.taskCompleted()
self.stats.taskSkipped()
def execute(self):
"""
Run the tasks in a queue prepared by rqdata.prepare()
"""
self.rq.read_workers()
if self.stats.total == 0:
# nothing to do
self.rq.state = runQueueCleanUp
task = self.sched.next()
if task is not | |
<filename>fastatomography/operators/default_ops.py<gh_stars>1-10
# coding=utf-8
# Copyright 2014-2019 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Default operators defined on any (reasonable) space."""
from __future__ import print_function, division, absolute_import
from copy import copy
import numpy as np
from odl.operator.operator import Operator
from odl.set import LinearSpace, Field, RealNumbers, ComplexNumbers
from odl.set.space import LinearSpaceElement
from odl.space import ProductSpace
__all__ = ('ScalingOperator', 'ZeroOperator', 'IdentityOperator',
'LinCombOperator', 'MultiplyOperator', 'PowerOperator',
'InnerProductOperator', 'NormOperator', 'DistOperator',
'ConstantOperator', 'RealPart', 'ImagPart', 'ComplexEmbedding',
'ComplexModulus', 'ComplexModulusSquared')
class ScalingOperator(Operator):
"""Operator of multiplication with a scalar.
Implements::
ScalingOperator(s)(x) == s * x
"""
def __init__(self, domain, scalar):
"""Initialize a new instance.
Parameters
----------
domain : `LinearSpace` or `Field`
Set of elements on which this operator acts.
scalar : ``domain.field`` element
Fixed scaling factor of this operator.
Examples
--------
>>> r3 = odl.rn(3)
>>> vec = r3.element([1, 2, 3])
>>> out = r3.element()
>>> op = ScalingOperator(r3, 2.0)
>>> op(vec, out) # In-place, Returns out
rn(3).element([ 2., 4., 6.])
>>> out
rn(3).element([ 2., 4., 6.])
>>> op(vec) # Out-of-place
rn(3).element([ 2., 4., 6.])
"""
if not isinstance(domain, (LinearSpace, Field)):
raise TypeError('`domain` {!r} not a `LinearSpace` or `Field` '
'instance'.format(domain))
super(ScalingOperator, self).__init__(domain, domain, linear=True)
self.__scalar = domain.field.element(scalar)
@property
def scalar(self):
"""Fixed scaling factor of this operator."""
return self.__scalar
def _call(self, x, out=None):
"""Scale ``x`` and write to ``out`` if given."""
if out is None:
out = self.scalar * x
else:
out.lincomb(self.scalar, x)
return out
@property
def inverse(self):
"""Return the inverse operator.
Examples
--------
>>> r3 = odl.rn(3)
>>> vec = r3.element([1, 2, 3])
>>> op = ScalingOperator(r3, 2.0)
>>> inv = op.inverse
>>> inv(op(vec)) == vec
True
>>> op(inv(vec)) == vec
True
"""
if self.scalar == 0.0:
raise ZeroDivisionError('scaling operator not invertible for '
'scalar==0')
return ScalingOperator(self.domain, 1.0 / self.scalar)
@property
def adjoint(self):
"""Adjoint, given as scaling with the conjugate of the scalar.
Examples
--------
In the real case, the adjoint is the same as the operator:
>>> r3 = odl.rn(3)
>>> x = r3.element([1, 2, 3])
>>> op = ScalingOperator(r3, 2)
>>> op(x)
rn(3).element([ 2., 4., 6.])
>>> op.adjoint(x) # The same
rn(3).element([ 2., 4., 6.])
In the complex case, the scalar is conjugated:
>>> c3 = odl.cn(3)
>>> x_complex = c3.element([1, 1j, 1-1j])
>>> op = ScalingOperator(c3, 1+1j)
>>> expected_op = ScalingOperator(c3, 1-1j)
>>> op.adjoint(x_complex)
cn(3).element([ 1.-1.j, 1.+1.j, 0.-2.j])
>>> expected_op(x_complex) # The same
cn(3).element([ 1.-1.j, 1.+1.j, 0.-2.j])
Returns
-------
adjoint : `ScalingOperator`
``self`` if `scalar` is real, else `scalar` is conjugated.
"""
if complex(self.scalar).imag == 0.0:
return self
else:
return ScalingOperator(self.domain, self.scalar.conjugate())
def norm(self, estimate=False, **kwargs):
"""Return the operator norm of this operator.
Parameters
----------
estimate, kwargs : bool
Ignored. Present to conform with base-class interface.
Returns
-------
norm : float
The operator norm, absolute value of `scalar`.
Examples
--------
>>> spc = odl.rn(3)
>>> scaling = odl.ScalingOperator(spc, 3.0)
>>> scaling.norm(True)
3.0
"""
return np.abs(self.scalar)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r}, {!r})'.format(self.__class__.__name__,
self.domain, self.scalar)
def __str__(self):
"""Return ``str(self)``."""
return '{} * I'.format(self.scalar)
class IdentityOperator(ScalingOperator):
"""Operator mapping each element to itself.
Implements::
IdentityOperator()(x) == x
"""
def __init__(self, space):
"""Initialize a new instance.
Parameters
----------
space : `LinearSpace`
Space of elements which the operator is acting on.
"""
super(IdentityOperator, self).__init__(space, 1)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r})'.format(self.__class__.__name__, self.domain)
def __str__(self):
"""Return ``str(self)``."""
return "I"
class LinCombOperator(Operator):
"""Operator mapping two space elements to a linear combination.
Implements::
LinCombOperator(a, b)([x, y]) == a * x + b * y
"""
def __init__(self, space, a, b):
"""Initialize a new instance.
Parameters
----------
space : `LinearSpace`
Space of elements which the operator is acting on.
a, b : ``space.field`` elements
Scalars to multiply ``x[0]`` and ``x[1]`` with, respectively.
Examples
--------
>>> r3 = odl.rn(3)
>>> r3xr3 = odl.ProductSpace(r3, r3)
>>> xy = r3xr3.element([[1, 2, 3], [1, 2, 3]])
>>> z = r3.element()
>>> op = LinCombOperator(r3, 1.0, 1.0)
>>> op(xy, out=z) # Returns z
rn(3).element([ 2., 4., 6.])
>>> z
rn(3).element([ 2., 4., 6.])
"""
domain = ProductSpace(space, space)
super(LinCombOperator, self).__init__(domain, space, linear=True)
self.a = a
self.b = b
def _call(self, x, out=None):
"""Linearly combine ``x`` and write to ``out`` if given."""
if out is None:
out = self.range.element()
out.lincomb(self.a, x[0], self.b, x[1])
return out
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r}, {!r}, {!r})'.format(self.__class__.__name__,
self.range, self.a, self.b)
def __str__(self):
"""Return ``str(self)``."""
return "{}*x + {}*y".format(self.a, self.b)
class MultiplyOperator(Operator):
"""Operator multiplying by a fixed space or field element.
Implements::
MultiplyOperator(y)(x) == x * y
Here, ``y`` is a `LinearSpaceElement` or `Field` element and
``x`` is a `LinearSpaceElement`.
Hence, this operator can be defined either on a `LinearSpace` or on
a `Field`. In the first case it is the pointwise multiplication,
in the second the scalar multiplication.
"""
def __init__(self, multiplicand, domain=None, range=None):
"""Initialize a new instance.
Parameters
----------
multiplicand : `LinearSpaceElement` or scalar
Value to multiply by.
domain : `LinearSpace` or `Field`, optional
Set to which the operator can be applied.
Default: ``multiplicand.space``.
range : `LinearSpace` or `Field`, optional
Set to which the operator maps. Default: ``multiplicand.space``.
Examples
--------
>>> r3 = odl.rn(3)
>>> x = r3.element([1, 2, 3])
Multiply by vector:
>>> op = MultiplyOperator(x)
>>> op(x)
rn(3).element([ 1., 4., 9.])
>>> out = r3.element()
>>> op(x, out)
rn(3).element([ 1., 4., 9.])
Multiply by scalar:
>>> op2 = MultiplyOperator(x, domain=r3.field)
>>> op2(3)
rn(3).element([ 3., 6., 9.])
>>> out = r3.element()
>>> op2(3, out)
rn(3).element([ 3., 6., 9.])
"""
if domain is None:
domain = multiplicand.space
if range is None:
range = multiplicand.space
super(MultiplyOperator, self).__init__(domain, range, linear=True)
self.__multiplicand = multiplicand
self.__domain_is_field = isinstance(domain, Field)
self.__range_is_field = isinstance(range, Field)
@property
def multiplicand(self):
"""Value to multiply by."""
return self.__multiplicand
def _call(self, x, out=None):
"""Multiply ``x`` and write to ``out`` if given."""
if out is None:
return x * self.multiplicand
elif not self.__range_is_field:
if self.__domain_is_field:
out.lincomb(x, self.multiplicand)
else:
out.assign(self.multiplicand * x)
else:
raise ValueError('can only use `out` with `LinearSpace` range')
@property
def adjoint(self):
"""Adjoint of this operator.
Returns
-------
adjoint : `InnerProductOperator` or `MultiplyOperator`
If the domain of this operator is the scalar field of a
`LinearSpace` the adjoint is the inner product with ``y``,
else it is the multiplication with ``y``.
Examples
--------
>>> r3 = odl.rn(3)
>>> x = r3.element([1, 2, 3])
Multiply by a space element:
>>> op = MultiplyOperator(x)
>>> out = r3.element()
>>> op.adjoint(x)
rn(3).element([ 1., 4., 9.])
Multiply scalars with a fixed vector:
>>> op2 = MultiplyOperator(x, domain=r3.field)
>>> op2.adjoint(x)
14.0
Multiply vectors with a fixed scalar:
>>> op2 = MultiplyOperator(3.0, domain=r3, range=r3)
>>> op2.adjoint(x)
rn(3).element([ 3., 6., 9.])
Multiplication operator with complex space:
>>> c3 = odl.cn(3)
>>> x_complex = c3.element([1, 1j, 1-1j])
>>> op3 = MultiplyOperator(x_complex)
>>> op3.adjoint.multiplicand
cn(3).element([ 1.-0.j, 0.-1.j, 1.+1.j])
"""
if self.__domain_is_field:
if isinstance(self.domain, RealNumbers):
return InnerProductOperator(self.multiplicand)
elif isinstance(self.domain, ComplexNumbers):
return InnerProductOperator(self.multiplicand.conjugate())
else:
raise NotImplementedError(
'adjoint not implemented for domain{!r}'
''.format(self.domain))
elif self.domain.is_complex:
return MultiplyOperator(np.conj(self.multiplicand),
domain=self.range, range=self.domain)
else:
return MultiplyOperator(self.multiplicand,
domain=self.range, range=self.domain)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r})'.format(self.__class__.__name__, self.multiplicand)
def __str__(self):
"""Return ``str(self)``."""
return "x * {}".format(self.y)
class PowerOperator(Operator):
"""Operator taking a fixed power of a space or field element.
Implements::
PowerOperator(p)(x) == x ** p
Here, ``x`` is a `LinearSpaceElement` or `Field` element and ``p`` is
a number. Hence, this operator can be defined either on a
`LinearSpace` or on a `Field`.
"""
def __init__(self, domain, exponent):
"""Initialize a new instance.
Parameters
----------
domain : `LinearSpace` or `Field`
Set of elements on which the operator can be applied.
exponent : float
Exponent parameter of the power function applied to an element.
Examples
--------
Use with vectors
>>> op = PowerOperator(odl.rn(3), exponent=2)
>>> op([1, 2, 3])
rn(3).element([ 1., 4., 9.])
or scalars
>>> op = PowerOperator(odl.RealNumbers(), exponent=2)
>>> op(2.0)
4.0
"""
super(PowerOperator, self).__init__(
domain, domain, linear=(exponent == 1))
self.__exponent = float(exponent)
self.__domain_is_field = isinstance(domain, Field)
@property
def exponent(self):
"""Power of the input element to take."""
return | |
<reponame>mikeireland/chronostar
"""
A module containing all the required functions to evaluate the Bayesian
posterior of a Component model given the data.
The entry point function lnprob_func yields a score (higher is better)
for a given model for the data. This function can be given to an
emcee Sampler object for the model's parameter space to be explored.
Bayes' Theorem states that the posterior of a model given the data
(goodness of the fit) is proportional to the product of the prior
belief on model parameters with the likelihood of the seeing the data
given the model. For convenience this equation is calculated in
log form. That is:
ln P(M|D) \propto ln P(M) + ln P(D|M)
In this module, lnprob_func calculates P(M|D)
lnlike calculates ln P(D|M)
lnprior calculates ln P(M)
A simple example to consider is finding the posterior probabilty
of a proposed normal distribution given some N data points
D distributed over X. The chance we see one individual data point x
given the model M is P(d|M), which we can find by evaluating the
normal distribution at x.
To find the combined probability of seeing every data point in D,
given the model of M, we take the product of the model evaluated at each
data point:
P(D|M) = P(x_1|M) * P(x_2|M) * .. * P(x_N|M) = \prod_i^N P(x_i|M)
"""
import numpy as np
from chronostar.component import SphereComponent
#~ from chronostar import component
#~ SphereComponent = component.SphereComponent
#~ from . import component
USE_C_IMPLEMENTATION = True
try:
from ._overlap import get_lnoverlaps as c_get_lnoverlaps
except ImportError:
print("C IMPLEMENTATION OF GET_OVERLAP NOT IMPORTED")
USE_C_IMPLEMENTATION = False
def slow_get_lnoverlaps(g_cov, g_mn, st_covs, st_mns, dummy=None):
"""
A pythonic implementation of overlap integral calculation.
Left here in case swigged _overlap doesn't work.
Parameters
---------
g_cov: ([6,6] float array)
Covariance matrix of the group
g_mn: ([6] float array)
mean of the group
st_covs: ([nstars, 6, 6] float array)
covariance matrices of the stars
st_mns: ([nstars, 6], float array)
means of the stars
dummy: {None}
a place holder parameter such that this function's signature
matches that of the c implementation, which requires an
explicit size of `nstars`.
Returns
-------
ln_ols: ([nstars] float array)
an array of the logarithm of the overlaps
"""
lnols = []
for st_cov, st_mn in zip(st_covs, st_mns):
res = 0
res -= 6 * np.log(2*np.pi)
res -= np.log(np.linalg.det(g_cov + st_cov))
stmg_mn = st_mn - g_mn
stpg_cov = st_cov + g_cov
res -= np.dot(stmg_mn.T, np.dot(np.linalg.inv(stpg_cov), stmg_mn))
res *= 0.5
lnols.append(res)
return np.array(lnols)
def calc_alpha(dx, dv, nstars):
"""
Assuming we have identified 100% of star mass with a 1 M_sun missing
mass offset, and that average star mass is 1 M_sun.
alpha>1: gravitationally unbound, it is expanding.
alpha<1: gravitationally bound, it is collapsing.
Calculated alpha is unitless
"""
# G_const taken from astropy
# G_const = 4.30211e-3 #pc (km/s)^2 / M_sol
G_const = 0.004302113488372941 # pc (km/s)^2 / Msun
G_const = 0.004300917270069976 # pc (km/s)^2 / Msun
M_sol = 1. # Msun
return (dv**2 * dx) / (G_const * (nstars+1) * M_sol)
def lnlognormal(x, mu=2.1, sig=1.0):
return -np.log(x*sig*np.sqrt(2*np.pi)) - (np.log(x)-mu)**2/(2*sig**2)
def ln_alpha_prior(comp, memb_probs, sig=1.0):
"""
A very approximate, gentle prior preferring super-virial distributions
Since alpha is strictly positive, we use a lognormal prior. We then
take the log of the result to incorporate it into the log likelihood
evaluation.
Mode is set at 3, when `sig` is 1, this corresponds to a FWHM of 1 dex
(AlphaPrior(alpha=1, sig=1.) = AlphaPrior(alpha=11,sig=1.)
= 0.5*AlphaPrior(alpha=3, sig=1.)
Parameters
----------
comp: Component object
An object from an implementation of the AbstractComponent class.
Encapsulates the parameters describing a component fit.
memb_probs: [nstars] float array
membership array
"""
dx = comp.get_sphere_dx()
dv = comp.get_sphere_dv()
nstars = np.sum(memb_probs)
alpha = calc_alpha(dx, dv, nstars)
return lnlognormal(alpha, mu=2.1, sig=sig)
def lnprior(comp, memb_probs):
"""Computes the prior of the group models constraining parameter space
Parameters
----------
comp: Component object
Component object encapsulating the component model
memb_probs: [nstars] float array
array of weights [0.0 - 1.0] for each star, describing probabilty
of each star being a member of component beign fitted.
Returns
-------
lnprior
The logarithm of the prior on the model parameters
"""
# set maximum allowed age
MAX_AGE = 500
covmatrix = comp.get_covmatrix()
stds = np.linalg.eigvalsh(covmatrix)
if np.min(comp.get_mean()) < -100000 or np.max(comp.get_mean()) > 100000:
return -np.inf
# Components can be quite large. Lets let them be as large as they like.
#~ if np.min(stds) <= 0.0: # or np.max(stds) > 10000.0:
if np.min(stds) <= 0.0 or np.max(stds) > 1e+6:
return -np.inf
if comp.get_age() < 0.0 or comp.get_age() > MAX_AGE:
return -np.inf
# Check covariance matrix is transform of itself
if not np.allclose(covmatrix, covmatrix.T):
return -np.inf
# Check correlations are valid
if not np.all(np.linalg.eigvals(covmatrix) > 0):
return -np.inf
return ln_alpha_prior(comp, memb_probs, sig=1.0)
def get_lnoverlaps(comp, data, star_mask=None):
"""
Given the parametric description of an origin, calculate star overlaps
Utilises Overlap, a c module wrapped with swig to be callable by python.
This allows a 100x speed up in our 6x6 matrix operations when compared
to numpy.
Parameters
----------
pars: [npars] list
Parameters describing the origin of group
typically [X,Y,Z,U,V,W,np.log(dX),np.log(dV),age]
data: dict
stellar cartesian data being fitted to, stored as a dict:
'means': [nstars,6] float array
the central estimates of each star in XYZUVW space
'covs': [nstars,6,6] float array
the covariance of each star in XYZUVW space
star_mask: [len(data)] indices
A mask that excludes stars that have negliglbe membership probablities
(and thus have their log overlaps scaled to tiny numbers).
"""
# Prepare star arrays
if star_mask is not None:
star_means = data['means'][star_mask]
star_covs = data['covs'][star_mask]
else:
star_means = data['means']
star_covs = data['covs']
star_count = len(star_means)
# Get current day projection of component
mean_now, cov_now = comp.get_currentday_projection()
# Calculate overlap integral of each star
if USE_C_IMPLEMENTATION:
#~ print(cov_now, mean_now, star_count)
lnols = c_get_lnoverlaps(cov_now, mean_now, star_covs, star_means,
star_count)
else:
lnols = slow_get_lnoverlaps(cov_now, mean_now, star_covs, star_means)
return lnols
def lnlike(comp, data, memb_probs, memb_threshold=1e-5,
minimum_exp_starcount=10.):
"""Computes the log-likelihood for a fit to a group.
The emcee parameters encode the modelled origin point of the stars.
Using the parameters, a mean and covariance in 6D space are constructed
as well as an age. The kinematics are then projected forward to the
current age and compared with the current stars' XYZUVW values (and
uncertainties)
P(D|G) = prod_i[P(d_i|G)^{z_i}]
ln P(D|G) = sum_i z_i*ln P(d_i|G)
Parameters
----------
pars: [npars] list
Parameters describing the group model being fitted
data: dict
traceback data being fitted to, stored as a dict:
'means': [nstars,6] float array
the central estimates of each star in XYZUVW space
'covs': [nstars,6,6] float array
the covariance of each star in XYZUVW space
memb_probs: [nstars] float array
array of weights [0.0 - 1.0] for each star, describing how likely
they are members of group to be fitted.
Returns
-------
lnlike
the logarithm of the likelihood of the fit
"""
# Boost expect star count to some minimum threshold
# This is a bit of a hack to prevent component amplitudes dwindling
# to nothing
# TODO: Check if this effect is ever actually triggered...
exp_starcount = np.sum(memb_probs)
if exp_starcount < minimum_exp_starcount:
memb_probs = np.copy(memb_probs)
memb_probs *= minimum_exp_starcount / exp_starcount
# As a potentially negligible optimisation:
# only consider contributions of stars with larger than provided
# threshold membership prob.
nearby_star_mask = np.where(memb_probs > memb_threshold)
# Calculate log overlaps of relevant stars
lnols = np.zeros(len(memb_probs))
lnols[nearby_star_mask] = get_lnoverlaps(comp, data,
star_mask=nearby_star_mask)
# Weight each stars contribution by their membership probability
result = np.sum(lnols * memb_probs)
return result
def lnprob_func(pars, data, memb_probs=None,
trace_orbit_func=None, optimisation_method='emcee',
Component=SphereComponent, **kwargs):
"""Computes the log-probability for a fit to a group.
Parameters
----------
pars
Parameters describing the group model being fitted
e.g. for SphereComponent:
0,1,2,3,4,5, 6, 7, 8
X,Y,Z,U,V,W,lndX,lndV,age
data
data: dict
'means': [nstars,6] float array_like
the central estimates of star phase-space properties
'covs': [nstars,6,6] float array_like
the phase-space covariance matrices of stars
'bg_lnols': [nstars] float array_like (opt.)
the log overlaps of stars with whatever pdf describes
the background distribution of stars.
memb_probs
array of weights [0.0 - 1.0] for each star, describing how likely
they are members of group to be fitted.
Component: Class implmentation | |
* (pt.y - outPt2.pt.y)
/ (outPt2.prevOp.pt.y - outPt2.pt.y)
+ outPt2.pt.x
):
result = not result
outPt2 = outPt2.nextOp
if outPt2 == outPt:
break
def _Poly2ContainsPoly1(outPt1, outPt2):
pt = outPt1
if _PointOnPolygon(pt.pt, outPt2):
pt = pt.nextOp
while pt != outPt1 and _PointOnPolygon(pt.pt, outPt2):
pt = pt.nextOp
if pt == outPt1:
return True
return _PointInPolygon(pt.pt, outPt2)
def _EdgesAdjacent(inode):
return (inode.e1.nextInSEL == inode.e2) or (inode.e1.prevInSEL == inode.e2)
def _UpdateOutPtIdxs(outrec):
op = outrec.pts
while True:
op.idx = outrec.idx
op = op.prevOp
if op == outrec.pts:
break
class Clipper(ClipperBase):
def __init__(self):
ClipperBase.__init__(self)
self.ReverseSolution = False
self.ForceSimple = False
self._PolyOutList = []
self._ClipType = ClipType.Intersection
self._Scanbeam = None
self._ActiveEdges = None
self._SortedEdges = None
self._IntersectNodes = None
self._ClipFillType = PolyFillType.EvenOdd
self._SubjFillType = PolyFillType.EvenOdd
self._ExecuteLocked = False
self._UsingPolyTree = False
self._JoinList = None
self._HorzJoins = None
def _Reset(self):
ClipperBase._Reset(self)
self._Scanbeam = None
self._PolyOutList = []
lm = self._LocalMinList
while lm is not None:
self._InsertScanbeam(lm.y)
lm = lm.nextLm
def Clear(self):
self._PolyOutList = []
ClipperBase.Clear(self)
def _InsertScanbeam(self, y):
if self._Scanbeam is None:
self._Scanbeam = Scanbeam(y)
elif y > self._Scanbeam.y:
self._Scanbeam = Scanbeam(y, self._Scanbeam)
else:
sb = self._Scanbeam
while sb.nextSb is not None and y <= sb.nextSb.y:
sb = sb.nextSb
if y == sb.y:
return
newSb = Scanbeam(y, sb.nextSb)
sb.nextSb = newSb
def _PopScanbeam(self):
result = self._Scanbeam.y
self._Scanbeam = self._Scanbeam.nextSb
return result
def _SetWindingCount(self, edge):
e = edge.prevInAEL
while e is not None and e.PolyType != edge.PolyType:
e = e.prevInAEL
if e is None:
edge.windCnt = edge.windDelta
edge.windCnt2 = 0
e = self._ActiveEdges
elif self._IsEvenOddFillType(edge):
edge.windCnt = 1
edge.windCnt2 = e.windCnt2
e = e.nextInAEL
else:
if e.windCnt * e.windDelta < 0:
if abs(e.windCnt) > 1:
if e.windDelta * edge.windDelta < 0:
edge.windCnt = e.windCnt
else:
edge.windCnt = e.windCnt + edge.windDelta
else:
edge.windCnt = e.windCnt + e.windDelta + edge.windDelta
elif (abs(e.windCnt) > 1) and (e.windDelta * edge.windDelta < 0):
edge.windCnt = e.windCnt
elif e.windCnt + edge.windDelta == 0:
edge.windCnt = e.windCnt
else:
edge.windCnt = e.windCnt + edge.windDelta
edge.windCnt2 = e.windCnt2
e = e.nextInAEL
# update windCnt2 ...
if self._IsEvenOddAltFillType(edge):
while e != edge:
if edge.windCnt2 == 0:
edge.windCnt2 = 1
else:
edge.windCnt2 = 0
e = e.nextInAEL
else:
while e != edge:
edge.windCnt2 += e.windDelta
e = e.nextInAEL
def _IsEvenOddFillType(self, edge):
if edge.PolyType == PolyType.Subject:
return self._SubjFillType == PolyFillType.EvenOdd
else:
return self._ClipFillType == PolyFillType.EvenOdd
def _IsEvenOddAltFillType(self, edge):
if edge.PolyType == PolyType.Subject:
return self._ClipFillType == PolyFillType.EvenOdd
else:
return self._SubjFillType == PolyFillType.EvenOdd
def _IsContributing(self, edge):
if edge.PolyType == PolyType.Subject:
pft = self._SubjFillType
pft2 = self._ClipFillType
else:
pft = self._ClipFillType
pft2 = self._SubjFillType
if pft == PolyFillType.EvenOdd or pft == PolyFillType.NonZero:
if abs(edge.windCnt) != 1:
return False
elif pft == PolyFillType.Positive:
if edge.windCnt != 1:
return False
elif pft == PolyFillType.Negative:
if edge.windCnt != -1:
return False
if self._ClipType == ClipType.Intersection: ###########
if pft2 == PolyFillType.EvenOdd or pft2 == PolyFillType.NonZero:
return edge.windCnt2 != 0
elif pft2 == PolyFillType.Positive:
return edge.windCnt2 > 0
else:
return edge.windCnt2 < 0 # Negative
elif self._ClipType == ClipType.Union: ###########
if pft2 == PolyFillType.EvenOdd or pft2 == PolyFillType.NonZero:
return edge.windCnt2 == 0
elif pft2 == PolyFillType.Positive:
return edge.windCnt2 <= 0
else:
return edge.windCnt2 >= 0 # Negative
elif self._ClipType == ClipType.Difference: ###########
if edge.PolyType == PolyType.Subject:
if pft2 == PolyFillType.EvenOdd or pft2 == PolyFillType.NonZero:
return edge.windCnt2 == 0
elif edge.PolyType == PolyFillType.Positive:
return edge.windCnt2 <= 0
else:
return edge.windCnt2 >= 0
else:
if pft2 == PolyFillType.EvenOdd or pft2 == PolyFillType.NonZero:
return edge.windCnt2 != 0
elif pft2 == PolyFillType.Positive:
return edge.windCnt2 > 0
else:
return edge.windCnt2 < 0
else: # self._ClipType == ClipType.XOR: ###########
return True
def _AddEdgeToSEL(self, edge):
if self._SortedEdges is None:
self._SortedEdges = edge
edge.prevInSEL = None
edge.nextInSEL = None
else:
# add edge to front of list ...
edge.nextInSEL = self._SortedEdges
edge.prevInSEL = None
self._SortedEdges.prevInSEL = edge
self._SortedEdges = edge
def _CopyAELToSEL(self):
e = self._ActiveEdges
self._SortedEdges = e
while e is not None:
e.prevInSEL = e.prevInAEL
e.nextInSEL = e.nextInAEL
e = e.nextInAEL
def _InsertEdgeIntoAEL(self, edge):
edge.prevInAEL = None
edge.nextInAEL = None
if self._ActiveEdges is None:
self._ActiveEdges = edge
elif _E2InsertsBeforeE1(self._ActiveEdges, edge):
edge.nextInAEL = self._ActiveEdges
self._ActiveEdges.prevInAEL = edge
self._ActiveEdges = edge
else:
e = self._ActiveEdges
while e.nextInAEL is not None and not _E2InsertsBeforeE1(e.nextInAEL, edge):
e = e.nextInAEL
edge.nextInAEL = e.nextInAEL
if e.nextInAEL is not None:
e.nextInAEL.prevInAEL = edge
edge.prevInAEL = e
e.nextInAEL = edge
def _InsertLocalMinimaIntoAEL(self, botY):
while self._CurrentLocMin is not None and self._CurrentLocMin.y == botY:
lb = self._CurrentLocMin.leftBound
rb = self._CurrentLocMin.rightBound
self._InsertEdgeIntoAEL(lb)
self._InsertScanbeam(lb.Top.y)
self._InsertEdgeIntoAEL(rb)
if self._IsEvenOddFillType(lb):
lb.windDelta = 1
rb.windDelta = 1
else:
rb.windDelta = -lb.windDelta
self._SetWindingCount(lb)
rb.windCnt = lb.windCnt
rb.windCnt2 = lb.windCnt2
if rb.dx == horizontal:
self._AddEdgeToSEL(rb)
self._InsertScanbeam(rb.nextInLML.Top.y)
else:
self._InsertScanbeam(rb.Top.y)
if self._IsContributing(lb):
self._AddLocalMinPoly(lb, rb, Point(lb.Curr.x, self._CurrentLocMin.y))
if rb.outIdx >= 0 and rb.dx == horizontal and self._HorzJoins is not None:
hj = self._HorzJoins
while True:
dummy1, dummy2, overlap = _GetOverlapSegment(
hj.edge.Bot, hj.edge.Top, rb.Bot, rb.Top
)
if overlap:
self._AddJoin(hj.edge, rb, hj.savedIdx)
hj = hj.nextHj
if hj == self._HorzJoins:
break
if lb.nextInAEL != rb:
if (
rb.outIdx >= 0
and rb.prevInAEL.outIdx >= 0
and _SlopesEqual2(rb.prevInAEL, rb)
):
self._AddJoin(rb, rb.prevInAEL)
e = lb.nextInAEL
pt = lb.Curr
while e != rb:
self._IntersectEdges(rb, e, pt)
e = e.nextInAEL
self._PopLocalMinima()
def _SwapPositionsInAEL(self, e1, e2):
if e1.nextInAEL == e2:
nextE = e2.nextInAEL
if nextE is not None:
nextE.prevInAEL = e1
prevE = e1.prevInAEL
if prevE is not None:
prevE.nextInAEL = e2
e2.prevInAEL = prevE
e2.nextInAEL = e1
e1.prevInAEL = e2
e1.nextInAEL = nextE
elif e2.nextInAEL == e1:
nextE = e1.nextInAEL
if nextE is not None:
nextE.prevInAEL = e2
prevE = e2.prevInAEL
if prevE is not None:
prevE.nextInAEL = e1
e1.prevInAEL = prevE
e1.nextInAEL = e2
e2.prevInAEL = e1
e2.nextInAEL = nextE
else:
nextE = e1.nextInAEL
prevE = e1.prevInAEL
e1.nextInAEL = e2.nextInAEL
if e1.nextInAEL is not None:
e1.nextInAEL.prevInAEL = e1
e1.prevInAEL = e2.prevInAEL
if e1.prevInAEL is not None:
e1.prevInAEL.nextInAEL = e1
e2.nextInAEL = nextE
if e2.nextInAEL is not None:
e2.nextInAEL.prevInAEL = e2
e2.prevInAEL = prevE
if e2.prevInAEL is not None:
e2.prevInAEL.nextInAEL = e2
if e1.prevInAEL is None:
self._ActiveEdges = e1
elif e2.prevInAEL is None:
self._ActiveEdges = e2
def _SwapPositionsInSEL(self, e1, e2):
if e1.nextInSEL == e2:
nextE = e2.nextInSEL
if nextE is not None:
nextE.prevInSEL = e1
prevE = e1.prevInSEL
if prevE is not None:
prevE.nextInSEL = e2
e2.prevInSEL = prevE
e2.nextInSEL = e1
e1.prevInSEL = e2
e1.nextInSEL = nextE
elif e2.nextInSEL == e1:
nextE = e1.nextInSEL
if nextE is not None:
nextE.prevInSEL = e2
prevE = e2.prevInSEL
if prevE is not None:
prevE.nextInSEL = e1
e1.prevInSEL = prevE
e1.nextInSEL = e2
e2.prevInSEL = e1
e2.nextInSEL = nextE
else:
nextE = e1.nextInSEL
prevE = e1.prevInSEL
e1.nextInSEL = e2.nextInSEL
e1.nextInSEL = e2.nextInSEL
if e1.nextInSEL is not None:
e1.nextInSEL.prevInSEL = e1
e1.prevInSEL = e2.prevInSEL
if e1.prevInSEL is not None:
e1.prevInSEL.nextInSEL = e1
e2.nextInSEL = nextE
if e2.nextInSEL is not None:
e2.nextInSEL.prevInSEL = e2
e2.prevInSEL = prevE
if e2.prevInSEL is not None:
e2.prevInSEL.nextInSEL = e2
if e1.prevInSEL is None:
self._SortedEdges = e1
elif e2.prevInSEL is None:
self._SortedEdges = e2
def _IsTopHorz(self, xPos):
e = self._SortedEdges
while e is not None:
if (xPos >= min(e.Curr.x, e.Top.x)) and (xPos <= max(e.Curr.x, e.Top.x)):
return False
e = e.nextInSEL
return True
def _ProcessHorizontal(self, horzEdge):
if horzEdge.Curr.x < horzEdge.Top.x:
horzLeft = horzEdge.Curr.x
horzRight = horzEdge.Top.x
direction = Direction.LeftToRight
else:
horzLeft = horzEdge.Top.x
horzRight = horzEdge.Curr.x
direction = Direction.RightToLeft
eMaxPair = None
if horzEdge.nextInLML is None:
eMaxPair = _GetMaximaPair(horzEdge)
e = _GetnextInAEL(horzEdge, direction)
while e is not None:
if (e.Curr.x == horzEdge.Top.x) and eMaxPair is None:
if _SlopesEqual2(e, horzEdge.nextInLML):
if horzEdge.outIdx >= 0 and e.outIdx >= 0:
self._AddJoin(horzEdge.nextInLML, e, horzEdge.outIdx)
break
elif e.dx < horzEdge.nextInLML.dx:
break
eNext = _GetnextInAEL(e, direction)
if (
eMaxPair is not None
or ((direction == Direction.LeftToRight) and (e.Curr.x < horzRight))
or ((direction == Direction.RightToLeft) and (e.Curr.x > horzLeft))
):
if e == eMaxPair:
if direction == Direction.LeftToRight:
self._IntersectEdges(
horzEdge, e, Point(e.Curr.x, horzEdge.Curr.y)
)
else:
self._IntersectEdges(
e, horzEdge, Point(e.Curr.x, horzEdge.Curr.y)
)
return
elif e.dx == horizontal and not _IsMinima(e) and e.Curr.x <= e.Top.x:
if direction == Direction.LeftToRight:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.