input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
\**kw: Additional keyword arguments not mentioned above are
dialect specific, and passed in the form
``<dialectname>_<argname>``. See the documentation regarding an
individual dialect at :ref:`dialect_toplevel` for detail on
documented arguments.
"""
self.table = table = None
self.name = quoted_name(name, kw.pop("quote", None))
self.unique = kw.pop("unique", False)
_column_flag = kw.pop("_column_flag", False)
if "info" in kw:
self.info = kw.pop("info")
# TODO: consider "table" argument being public, but for
# the purpose of the fix here, it starts as private.
if "_table" in kw:
table = kw.pop("_table")
self._validate_dialect_kwargs(kw)
self.expressions = []
# will call _set_parent() if table-bound column
# objects are present
ColumnCollectionMixin.__init__(
self,
*expressions,
_column_flag=_column_flag,
_gather_expressions=self.expressions
)
if table is not None:
self._set_parent(table)
def _set_parent(self, table, **kw):
ColumnCollectionMixin._set_parent(self, table)
if self.table is not None and table is not self.table:
raise exc.ArgumentError(
"Index '%s' is against table '%s', and "
"cannot be associated with table '%s'."
% (self.name, self.table.description, table.description)
)
self.table = table
table.indexes.add(self)
expressions = self.expressions
col_expressions = self._col_expressions(table)
assert len(expressions) == len(col_expressions)
self.expressions = [
expr if isinstance(expr, ClauseElement) else colexpr
for expr, colexpr in zip(expressions, col_expressions)
]
@property
def bind(self):
"""Return the connectable associated with this Index."""
return self.table.bind
def create(self, bind=None, checkfirst=False):
"""Issue a ``CREATE`` statement for this
:class:`.Index`, using the given :class:`.Connectable`
for connectivity.
.. seealso::
:meth:`_schema.MetaData.create_all`.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_ddl_visitor(ddl.SchemaGenerator, self, checkfirst=checkfirst)
return self
def drop(self, bind=None, checkfirst=False):
"""Issue a ``DROP`` statement for this
:class:`.Index`, using the given :class:`.Connectable`
for connectivity.
.. seealso::
:meth:`_schema.MetaData.drop_all`.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_ddl_visitor(ddl.SchemaDropper, self, checkfirst=checkfirst)
def __repr__(self):
return "Index(%s)" % (
", ".join(
[repr(self.name)]
+ [repr(e) for e in self.expressions]
+ (self.unique and ["unique=True"] or [])
)
)
DEFAULT_NAMING_CONVENTION = util.immutabledict({"ix": "ix_%(column_0_label)s"})
class MetaData(SchemaItem):
"""A collection of :class:`_schema.Table`
objects and their associated schema
constructs.
Holds a collection of :class:`_schema.Table` objects as well as
an optional binding to an :class:`_engine.Engine` or
:class:`_engine.Connection`. If bound, the :class:`_schema.Table` objects
in the collection and their columns may participate in implicit SQL
execution.
The :class:`_schema.Table` objects themselves are stored in the
:attr:`_schema.MetaData.tables` dictionary.
:class:`_schema.MetaData` is a thread-safe object for read operations.
Construction of new tables within a single :class:`_schema.MetaData`
object,
either explicitly or via reflection, may not be completely thread-safe.
.. seealso::
:ref:`metadata_describing` - Introduction to database metadata
"""
__visit_name__ = "metadata"
@util.deprecated_params(
bind=(
"2.0",
"The :paramref:`_schema.MetaData.bind` argument is deprecated and "
"will be removed in SQLAlchemy 2.0.",
),
)
def __init__(
self,
bind=None,
schema=None,
quote_schema=None,
naming_convention=None,
info=None,
):
"""Create a new MetaData object.
:param bind:
An Engine or Connection to bind to. May also be a string or URL
instance, these are passed to :func:`_sa.create_engine` and
this :class:`_schema.MetaData` will
be bound to the resulting engine.
:param schema:
The default schema to use for the :class:`_schema.Table`,
:class:`.Sequence`, and potentially other objects associated with
this :class:`_schema.MetaData`. Defaults to ``None``.
.. seealso::
:ref:`schema_metadata_schema_name` - details on how the
:paramref:`_schema.MetaData.schema` parameter is used.
:paramref:`_schema.Table.schema`
:paramref:`.Sequence.schema`
:param quote_schema:
Sets the ``quote_schema`` flag for those :class:`_schema.Table`,
:class:`.Sequence`, and other objects which make usage of the
local ``schema`` name.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
:param naming_convention: a dictionary referring to values which
will establish default naming conventions for :class:`.Constraint`
and :class:`.Index` objects, for those objects which are not given
a name explicitly.
The keys of this dictionary may be:
* a constraint or Index class, e.g. the :class:`.UniqueConstraint`,
:class:`_schema.ForeignKeyConstraint` class, the :class:`.Index`
class
* a string mnemonic for one of the known constraint classes;
``"fk"``, ``"pk"``, ``"ix"``, ``"ck"``, ``"uq"`` for foreign key,
primary key, index, check, and unique constraint, respectively.
* the string name of a user-defined "token" that can be used
to define new naming tokens.
The values associated with each "constraint class" or "constraint
mnemonic" key are string naming templates, such as
``"uq_%(table_name)s_%(column_0_name)s"``,
which describe how the name should be composed. The values
associated with user-defined "token" keys should be callables of the
form ``fn(constraint, table)``, which accepts the constraint/index
object and :class:`_schema.Table` as arguments, returning a string
result.
The built-in names are as follows, some of which may only be
available for certain types of constraint:
* ``%(table_name)s`` - the name of the :class:`_schema.Table`
object
associated with the constraint.
* ``%(referred_table_name)s`` - the name of the
:class:`_schema.Table`
object associated with the referencing target of a
:class:`_schema.ForeignKeyConstraint`.
* ``%(column_0_name)s`` - the name of the :class:`_schema.Column`
at
index position "0" within the constraint.
* ``%(column_0N_name)s`` - the name of all :class:`_schema.Column`
objects in order within the constraint, joined without a
separator.
* ``%(column_0_N_name)s`` - the name of all
:class:`_schema.Column`
objects in order within the constraint, joined with an
underscore as a separator.
* ``%(column_0_label)s``, ``%(column_0N_label)s``,
``%(column_0_N_label)s`` - the label of either the zeroth
:class:`_schema.Column` or all :class:`.Columns`, separated with
or without an underscore
* ``%(column_0_key)s``, ``%(column_0N_key)s``,
``%(column_0_N_key)s`` - the key of either the zeroth
:class:`_schema.Column` or all :class:`.Columns`, separated with
or without an underscore
* ``%(referred_column_0_name)s``, ``%(referred_column_0N_name)s``
``%(referred_column_0_N_name)s``, ``%(referred_column_0_key)s``,
``%(referred_column_0N_key)s``, ... column tokens which
render the names/keys/labels of columns that are referenced
by a :class:`_schema.ForeignKeyConstraint`.
* ``%(constraint_name)s`` - a special key that refers to the
existing name given to the constraint. When this key is
present, the :class:`.Constraint` object's existing name will be
replaced with one that is composed from template string that
uses this token. When this token is present, it is required that
the :class:`.Constraint` is given an explicit name ahead of time.
* user-defined: any additional token may be implemented by passing
it along with a ``fn(constraint, table)`` callable to the
naming_convention dictionary.
.. versionadded:: 1.3.0 - added new ``%(column_0N_name)s``,
``%(column_0_N_name)s``, and related tokens that produce
concatenations of names, keys, or labels for all columns referred
to by a given constraint.
.. seealso::
:ref:`constraint_naming_conventions` - for detailed usage
examples.
"""
self.tables = util.FacadeDict()
self.schema = quoted_name(schema, quote_schema)
self.naming_convention = (
naming_convention
if naming_convention
else DEFAULT_NAMING_CONVENTION
)
if info:
self.info = info
self._schemas = set()
self._sequences = {}
self._fk_memos = collections.defaultdict(list)
self.bind = bind
tables = None
"""A dictionary of :class:`_schema.Table`
objects keyed to their name or "table key".
The exact key is that determined by the :attr:`_schema.Table.key`
attribute;
for a table with no :attr:`_schema.Table.schema` attribute,
this is the same
as :attr:`_schema.Table.name`. For a table with a schema,
it is typically of the
form ``schemaname.tablename``.
.. seealso::
:attr:`_schema.MetaData.sorted_tables`
"""
def __repr__(self):
if self.bind:
return "MetaData(bind=%r)" % self.bind
else:
return "MetaData()"
def __contains__(self, table_or_key):
if not isinstance(table_or_key, util.string_types):
table_or_key = table_or_key.key
return table_or_key in self.tables
def _add_table(self, name, schema, table):
key = _get_table_key(name, schema)
self.tables._insert_item(key, table)
if schema:
self._schemas.add(schema)
def _remove_table(self, name, schema):
key = _get_table_key(name, schema)
removed = dict.pop(self.tables, key, None)
if removed is not None:
for fk in removed.foreign_keys:
fk._remove_from_metadata(self)
if self._schemas:
self._schemas = set(
[
t.schema
for t in self.tables.values()
if t.schema is not None
]
)
def __getstate__(self):
return {
"tables": self.tables,
"schema": self.schema,
"schemas": self._schemas,
"sequences": self._sequences,
"fk_memos": self._fk_memos,
"naming_convention": self.naming_convention,
}
def __setstate__(self, state):
self.tables = state["tables"]
self.schema = state["schema"]
self.naming_convention = state["naming_convention"]
self._bind = None
self._sequences = state["sequences"]
self._schemas = state["schemas"]
self._fk_memos = state["fk_memos"]
def is_bound(self):
"""True if this MetaData is bound to an Engine or Connection."""
return self._bind is not None
def bind(self):
"""An :class:`_engine.Engine` or :class:`_engine.Connection`
to which this
:class:`_schema.MetaData` is bound.
Typically, a :class:`_engine.Engine` is assigned to this attribute
so that "implicit execution" may be used, or alternatively
as a means of providing engine binding information to an
ORM :class:`.Session` object::
engine = create_engine("someurl://")
metadata.bind = engine
.. seealso::
:ref:`dbengine_implicit` - background on "bound metadata"
"""
return self._bind
@util.preload_module("sqlalchemy.engine.url")
def _bind_to(self, bind):
"""Bind this MetaData to an Engine, Connection, string or URL."""
url = util.preloaded.engine_url
if isinstance(bind, util.string_types + (url.URL,)):
self._bind = sqlalchemy.create_engine(bind)
else:
self._bind = bind
bind = property(bind, _bind_to)
def clear(self):
"""Clear all Table objects from this MetaData."""
dict.clear(self.tables)
self._schemas.clear()
self._fk_memos.clear()
def remove(self, table):
"""Remove the given | |
"""rio_tiler.io.base: ABC class for rio-tiler readers."""
import abc
import asyncio
import re
import warnings
from typing import Any, Coroutine, Dict, List, Optional, Sequence, Tuple, Type, Union
import attr
from morecantile import Tile, TileMatrixSet
from ..constants import WEB_MERCATOR_TMS, BBox
from ..errors import (
ExpressionMixingWarning,
MissingAssets,
MissingBands,
TileOutsideBounds,
)
from ..expression import apply_expression
from ..models import ImageData, ImageStatistics, Info, Metadata, SpatialInfo
from ..tasks import multi_arrays, multi_values
@attr.s
class SpatialMixin:
"""Spatial Info Mixin.
Attributes:
tms (morecantile.TileMatrixSet, optional): TileMatrixSet grid definition. Defaults to `WebMercatorQuad`.
bbox (tuple): Dataset bounds (left, bottom, right, top). **READ ONLY attribute**.
minzoom (int): Overwrite Min Zoom level. **READ ONLY attribute**.
maxzoom (int): Overwrite Max Zoom level. **READ ONLY attribute**.
"""
tms: TileMatrixSet = attr.ib(default=WEB_MERCATOR_TMS)
bounds: BBox = attr.ib(init=False)
minzoom: int = attr.ib(init=False)
maxzoom: int = attr.ib(init=False)
@property
def center(self) -> Tuple[float, float, int]:
"""Dataset center + minzoom."""
return (
(self.bounds[0] + self.bounds[2]) / 2,
(self.bounds[1] + self.bounds[3]) / 2,
self.minzoom,
)
@property
def spatial_info(self) -> SpatialInfo:
"""Return Dataset's spatial info."""
return SpatialInfo(
bounds=self.bounds,
center=self.center,
minzoom=self.minzoom,
maxzoom=self.maxzoom,
)
def tile_exists(self, tile_z: int, tile_x: int, tile_y: int) -> bool:
"""Check if a tile is intersets the dataset bounds.
Args:
tile_x (int): Tile's horizontal index.
tile_y (int): Tile's vertical index.
tile_z (int): Tile's zoom level index.
Returns:
bool: True if the tile is intersets the dataset bounds.
"""
tile = Tile(x=tile_x, y=tile_y, z=tile_z)
tile_bounds = self.tms.bounds(*tile)
return (
(tile_bounds[0] < self.bounds[2])
and (tile_bounds[2] > self.bounds[0])
and (tile_bounds[3] > self.bounds[1])
and (tile_bounds[1] < self.bounds[3])
)
@attr.s
class BaseReader(SpatialMixin, metaclass=abc.ABCMeta):
"""Rio-tiler.io BaseReader."""
def __enter__(self):
"""Support using with Context Managers."""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Support using with Context Managers."""
pass
@abc.abstractmethod
def info(self) -> Info:
"""Return Dataset's info.
Returns:
rio_tile.models.Info: Dataset info.
"""
...
@abc.abstractmethod
def stats(
self, pmin: float = 2.0, pmax: float = 98.0, **kwargs: Any,
) -> Dict[str, ImageStatistics]:
"""Return Dataset's statistics.
Args:
pmin (float, optional): Histogram minimum cut. Defaults to `2.0`.
pmax (float, optional): Histogram maximum cut. Defaults to `98.0`.
Returns:
rio_tile.models.ImageStatistics: Dataset statistics.
"""
...
def metadata(
self, pmin: float = 2.0, pmax: float = 98.0, **kwargs: Any,
) -> Metadata:
"""Return Dataset's statistics and info.
Args:
pmin (float, optional): Histogram minimum cut. Defaults to `2.0`.
pmax (float, optional): Histogram maximum cut. Defaults to `98.0`.
Returns:
rio_tile.models.Metadata: Dataset statistics and metadata.
"""
info = self.info()
stats = self.stats(pmin, pmax, **kwargs)
return Metadata(statistics=stats, **info.dict())
@abc.abstractmethod
def tile(self, tile_x: int, tile_y: int, tile_z: int, **kwargs: Any) -> ImageData:
"""Read a Map tile from the Dataset.
Args:
tile_x (int): Tile's horizontal index.
tile_y (int): Tile's vertical index.
tile_z (int): Tile's zoom level index.
Returns:
rio_tiler.models.ImageData: ImageData instance with data, mask and tile spatial info.
"""
...
@abc.abstractmethod
def part(self, bbox: BBox, **kwargs: Any) -> ImageData:
"""Read a Part of a Dataset.
Args:
bbox (tuple): Output bounds (left, bottom, right, top) in target crs.
Returns:
rio_tiler.models.ImageData: ImageData instance with data, mask and input spatial info.
"""
...
@abc.abstractmethod
def preview(self, **kwargs: Any) -> ImageData:
"""Read a preview of a Dataset.
Returns:
rio_tiler.models.ImageData: ImageData instance with data, mask and input spatial info.
"""
...
@abc.abstractmethod
def point(self, lon: float, lat: float, **kwargs: Any) -> List:
"""Read a value from a Dataset.
Args:
lon (float): Longitude.
lat (float): Latittude.
Returns:
list: Pixel value per bands/assets.
"""
...
@abc.abstractmethod
def feature(self, shape: Dict, **kwargs: Any) -> ImageData:
"""Read a Dataset for a GeoJSON feature.
Args:
shape (dict): Valid GeoJSON feature.
Returns:
rio_tiler.models.ImageData: ImageData instance with data, mask and input spatial info.
"""
...
@attr.s
class AsyncBaseReader(SpatialMixin, metaclass=abc.ABCMeta):
"""Rio-tiler.io AsyncBaseReader."""
async def __aenter__(self):
"""Support using with Context Managers."""
return self
async def __aexit__(self, exc_type, exc_value, traceback):
"""Support using with Context Managers."""
pass
@abc.abstractmethod
async def info(self) -> Coroutine[Any, Any, Info]:
"""Return Dataset's info.
Returns:
rio_tile.models.Info: Dataset info.
"""
...
@abc.abstractmethod
async def stats(
self, pmin: float = 2.0, pmax: float = 98.0, **kwargs: Any
) -> Coroutine[Any, Any, Dict[str, ImageStatistics]]:
"""Return Dataset's statistics.
Args:
pmin (float, optional): Histogram minimum cut. Defaults to `2.0`.
pmax (float, optional): Histogram maximum cut. Defaults to `98.0`.
Returns:
rio_tile.models.ImageStatistics: Dataset statistics.
"""
...
async def metadata(
self, pmin: float = 2.0, pmax: float = 98.0, **kwargs: Any,
) -> Coroutine[Any, Any, Metadata]:
"""Return Dataset's statistics and info.
Args:
pmin (float, optional): Histogram minimum cut. Defaults to `2.0`.
pmax (float, optional): Histogram maximum cut. Defaults to `98.0`.
Returns:
rio_tile.models.Metadata: Dataset statistics and metadata.
"""
info, stats = await asyncio.gather(
*[self.info(), self.stats(pmin, pmax, **kwargs)]
)
return Metadata(statistics=stats, **info.dict())
@abc.abstractmethod
async def tile(
self, tile_x: int, tile_y: int, tile_z: int, **kwargs: Any
) -> Coroutine[Any, Any, ImageData]:
"""Read a Map tile from the Dataset.
Args:
tile_x (int): Tile's horizontal index.
tile_y (int): Tile's vertical index.
tile_z (int): Tile's zoom level index.
Returns:
rio_tiler.models.ImageData: ImageData instance with data, mask and tile spatial info.
"""
...
@abc.abstractmethod
async def part(self, bbox: BBox, **kwargs: Any) -> Coroutine[Any, Any, ImageData]:
"""Read a Part of a Dataset.
Args:
bbox (tuple): Output bounds (left, bottom, right, top) in target crs.
Returns:
rio_tiler.models.ImageData: ImageData instance with data, mask and input spatial info.
"""
...
@abc.abstractmethod
async def preview(self, **kwargs: Any) -> Coroutine[Any, Any, ImageData]:
"""Read a preview of a Dataset.
Returns:
rio_tiler.models.ImageData: ImageData instance with data, mask and input spatial info.
"""
...
@abc.abstractmethod
async def point(
self, lon: float, lat: float, **kwargs: Any
) -> Coroutine[Any, Any, List]:
"""Read a value from a Dataset.
Args:
lon (float): Longitude.
lat (float): Latittude.
Returns:
list: Pixel value per bands/assets.
"""
...
@abc.abstractmethod
async def feature(
self, shape: Dict, **kwargs: Any
) -> Coroutine[Any, Any, ImageData]:
"""Read a Dataset for a GeoJSON feature.
Args:
shape (dict): Valid GeoJSON feature.
Returns:
rio_tiler.models.ImageData: ImageData instance with data, mask and input spatial info.
"""
...
@attr.s
class MultiBaseReader(BaseReader, metaclass=abc.ABCMeta):
"""MultiBaseReader Reader.
This Reader is suited for dataset that are composed of multiple assets (e.g. STAC).
Attributes:
reader (rio_tiler.io.BaseReader): reader.
reader_options (dict, option): options to forward to the reader. Defaults to `{}`.
tms (morecantile.TileMatrixSet, optional): TileMatrixSet grid definition. Defaults to `WebMercatorQuad`.
assets (sequence): Asset list. **READ ONLY attribute**.
"""
reader: Type[BaseReader] = attr.ib()
reader_options: Dict = attr.ib(factory=dict)
tms: TileMatrixSet = attr.ib(default=WEB_MERCATOR_TMS)
assets: Sequence[str] = attr.ib(init=False)
@abc.abstractmethod
def _get_asset_url(self, asset: str) -> str:
"""Validate asset name and construct url."""
...
def parse_expression(self, expression: str) -> Tuple:
"""Parse rio-tiler band math expression."""
assets = "|".join([fr"\b{asset}\b" for asset in self.assets])
_re = re.compile(assets.replace("\\\\", "\\"))
return tuple(set(re.findall(_re, expression)))
def info( # type: ignore
self, assets: Union[Sequence[str], str] = None, *args, **kwargs: Any
) -> Dict[str, Info]:
"""Return metadata from multiple assets.
Args:
assets (sequence of str or str, optional): assets to fetch info from. Required keyword argument.
Returns:
dict: Multiple assets info in form of {"asset1": rio_tile.models.Info}.
"""
if not assets:
raise MissingAssets("Missing 'assets' option")
if isinstance(assets, str):
assets = (assets,)
def _reader(asset: str, **kwargs: Any) -> Dict:
url = self._get_asset_url(asset)
with self.reader(url, tms=self.tms, **self.reader_options) as cog: # type: ignore
return cog.info()
return multi_values(assets, _reader, *args, **kwargs)
def stats( # type: ignore
self,
pmin: float = 2.0,
pmax: float = 98.0,
assets: Union[Sequence[str], str] = None,
**kwargs: Any,
) -> Dict[str, Dict[str, ImageStatistics]]:
"""Return array statistics from multiple assets.
Args:
pmin (float, optional): Histogram minimum cut. Defaults to `2.0`.
pmax (float, optional): Histogram maximum cut. Defaults to `98.0`.
assets (sequence of str or str): assets to fetch info from. Required keyword argument.
kwargs (optional): Options to forward to the `self.reader.stats` method.
Returns:
dict: Multiple assets statistics in form of {"asset1": rio_tile.models.ImageStatistics}.
"""
if not assets:
raise MissingAssets("Missing 'assets' option")
if isinstance(assets, str):
assets = (assets,)
def _reader(asset: str, *args, **kwargs) -> Dict:
url = self._get_asset_url(asset)
with self.reader(url, tms=self.tms, **self.reader_options) as cog: # type: ignore
return cog.stats(*args, **kwargs)
return multi_values(assets, _reader, pmin, pmax, **kwargs)
def metadata( # type: ignore
self,
pmin: float = 2.0,
pmax: float = 98.0,
assets: Union[Sequence[str], str] = None,
**kwargs: Any,
) -> Dict[str, Metadata]:
"""Return metadata from multiple assets.
Args:
pmin (float, optional): Histogram minimum cut. Defaults to `2.0`.
pmax (float, optional): Histogram maximum cut. Defaults to `98.0`.
| |
<reponame>perrin-isir/gym-gmazes
import random
import numpy as np
from gym_gmazes.envs.maze.cell import Cell
class Maze(object):
"""Class representing a maze; a 2D grid of Cell objects. Contains functions
for generating randomly generating the maze as well as for solving the maze.
Attributes:
num_cols (int): The height of the maze, in Cells
num_rows (int): The width of the maze, in Cells
grid_size (int): The area of the maze, also the total number of Cells
generation_path : The path that was taken when generating the maze
grid (list): A list of Cell objects (the grid)
"""
def __init__(self, num_rows, num_cols, seed=None, standard=False):
"""Creates a gird of Cell objects that are neighbours to each other.
Args:
num_rows (int): The width of the maze, in cells
num_cols (int): The height of the maze in cells
"""
print("MAZE setting random seed ", seed)
random.seed(seed)
self.num_cols = num_cols
self.num_rows = num_rows
self.grid_size = num_rows * num_cols
self.grid = self.generate_grid()
self.generation_path = []
if not standard:
self.generate_maze((0, 0))
elif self.num_cols == 3:
self.grid[0][0].remove_walls(1, 0)
self.grid[1][0].remove_walls(0, 0)
self.grid[1][0].remove_walls(2, 0)
self.grid[2][0].remove_walls(1, 0)
self.grid[2][0].remove_walls(2, 1)
self.grid[2][1].remove_walls(2, 0)
self.grid[2][1].remove_walls(1, 1)
self.grid[1][1].remove_walls(2, 1)
self.grid[1][1].remove_walls(0, 1)
self.grid[0][1].remove_walls(1, 1)
self.grid[0][1].remove_walls(0, 2)
self.grid[0][2].remove_walls(0, 1)
self.grid[0][2].remove_walls(1, 2)
self.grid[1][2].remove_walls(0, 2)
self.grid[1][2].remove_walls(2, 2)
self.grid[2][2].remove_walls(1, 2)
elif self.num_cols == 2:
self.grid[0][0].remove_walls(1, 0)
self.grid[1][0].remove_walls(0, 0)
self.grid[1][0].remove_walls(1, 1)
self.grid[1][1].remove_walls(1, 0)
self.grid[1][1].remove_walls(0, 1)
self.grid[0][1].remove_walls(1, 1)
elif self.num_cols == 4:
self.empty_grid()
for i in range(self.num_rows):
if i != 0:
self.grid[i][1].add_walls(i, 2)
self.grid[i][2].add_walls(i, 1)
elif self.num_cols == 8:
self.empty_grid()
for i in range(self.num_rows):
if i != 1:
self.grid[i][3].add_walls(i, 4)
self.grid[i][4].add_walls(i, 3)
else:
print("ERROR : No standard maze for size ", self.num_cols)
def empty_grid(self):
for i in range(self.num_rows):
for j in range(self.num_cols):
if i != 0:
self.grid[i][j].remove_walls(i - 1, j)
if j != 0:
self.grid[i][j].remove_walls(i, j - 1)
if i != self.num_rows - 1:
self.grid[i][j].remove_walls(i + 1, j)
if j != self.num_cols - 1:
self.grid[i][j].remove_walls(i, j + 1)
def generate_grid(self):
"""Function that creates a 2D grid of Cell objects. This can be thought of as a
maze without any paths carved out
Return:
A list with Cell objects at each position
"""
# Create an empty list
grid = list()
# Place a Cell object at each location in the grid
for i in range(self.num_rows):
grid.append(list())
for j in range(self.num_cols):
grid[i].append(Cell(i, j))
return grid
def find_neighbours(self, cell_row, cell_col):
"""Finds all existing neighbours of a cell in the
grid. Return a list of tuples containing indices for the neighbours.
Args:
cell_row (int):
cell_col (int):
Return:
list: A list of neighbours
None: If there are no neighbours
"""
neighbours = list()
def check_neighbour(row, col):
# Check that a neighbour exists and that it's not visited before.
if row >= 0 and row < self.num_rows and col >= 0 and col < self.num_cols:
neighbours.append((row, col))
check_neighbour(cell_row - 1, cell_col) # Top neighbour
check_neighbour(cell_row, cell_col + 1) # Right neighbour
check_neighbour(cell_row + 1, cell_col) # Bottom neighbour
check_neighbour(cell_row, cell_col - 1) # Left neighbour
if len(neighbours) > 0:
return neighbours
else:
return None # None if no unvisited neighbours found
def _validate_neighbours_generate(self, neighbour_indices):
"""Function that validates whether a neighbour is unvisited or not. When
generating the maze, we only want to move to unvisited cells (unless we are
backtracking).
Args:
neighbour_indices:
Return:
True: If the neighbour has been visited
False: If the neighbour has not been visited
"""
neigh_list = [n for n in neighbour_indices if not self.grid[n[0]][n[1]].visited]
if len(neigh_list) > 0:
return neigh_list
else:
return None
def _pick_random_entry_exit(self, used_entry_exit=None, extremity=False):
"""Function that picks random coordinates along the maze boundary to represent
either the entry or exit point of the maze. Makes sure they are not at the same
place.
Args:
used_entry_exit
Return:
"""
if extremity:
def count_walls(coor):
n_walls = (
self.grid[coor[0]][coor[1]].walls["right"] * 1
+ self.grid[coor[0]][coor[1]].walls["left"] * 1
+ self.grid[coor[0]][coor[1]].walls["top"] * 1
+ self.grid[coor[0]][coor[1]].walls["bottom"] * 1
)
return n_walls
else:
def count_walls(coor):
return 3
rng_entry_exit = used_entry_exit # Initialize with used value
# Try until unused location along boundary is found.
while rng_entry_exit == used_entry_exit:
rng_side = random.randint(0, 3)
if rng_side == 0: # Top side
tmp_entry = (0, random.randint(0, self.num_cols - 1))
if count_walls(tmp_entry) == 3:
rng_entry_exit = tmp_entry
elif rng_side == 2: # Right side
tmp_entry = (self.num_rows - 1, random.randint(0, self.num_cols - 1))
if count_walls(tmp_entry) == 3:
rng_entry_exit = tmp_entry
elif rng_side == 1: # Bottom side
tmp_entry = (random.randint(0, self.num_rows - 1), self.num_cols - 1)
if count_walls(tmp_entry) == 3:
rng_entry_exit = tmp_entry
elif rng_side == 3: # Left side
tmp_entry = (random.randint(0, self.num_rows - 1), 0)
if count_walls(tmp_entry) == 3:
rng_entry_exit = tmp_entry
return rng_entry_exit # Return entry/exit that is different from exit/entry
def generate_maze(self, start_coor=(0, 0)):
"""This takes the internal grid object and removes walls between cells using the
depth-first recursive backtracker algorithm.
Args:
start_coor: The starting point for the algorithm
"""
k_curr, l_curr = start_coor # Where to start generating
path = [(k_curr, l_curr)] # To track path of solution
self.grid[k_curr][l_curr].visited = True # Set initial cell to visited
visit_counter = 1 # To count number of visited cells
visited_cells = list() # Stack of visited cells for backtracking
while visit_counter < self.grid_size: # While there are unvisited cells
neighbour_indices = self.find_neighbours(k_curr, l_curr)
neighbour_indices = self._validate_neighbours_generate(neighbour_indices)
if neighbour_indices is not None: # If there are unvisited neighbour cells
visited_cells.append((k_curr, l_curr)) # Add current cell to stack
k_next, l_next = random.choice(neighbour_indices) # Choose neighbour
self.grid[k_curr][l_curr].remove_walls(k_next, l_next)
self.grid[k_next][l_next].remove_walls(k_curr, l_curr)
self.grid[k_next][l_next].visited = True # Move to that neighbour
k_curr = k_next
l_curr = l_next
path.append((k_curr, l_curr)) # Add coordinates to path
visit_counter += 1
elif len(visited_cells) > 0: # If there are no unvisited neighbour cells
k_curr, l_curr = visited_cells.pop() # Backtracking
path.append((k_curr, l_curr)) # Add coordinates to path
for i in range(self.num_rows):
for j in range(self.num_cols):
self.grid[i][j].visited = False # Set all cells to unvisited
self.generation_path = path
def random_path(self, start_coor, seed=None):
"""Returns a path starting at coordinates start_coor.
Args:
start_coor: The starting point of the path
seed: The seed
"""
rng = np.random.default_rng(seed)
longest_path = []
for i in range(20):
set_coors = set()
path = []
current_coor = start_coor
done = False
while not done:
done = True
path.append(current_coor)
set_coors.add(current_coor)
neighs = self.find_neighbours(current_coor[0], current_coor[1])
rng.shuffle(neighs)
for nei in neighs:
if (
not self.grid[current_coor[0]][
current_coor[1]
].is_walls_between(self.grid[nei[0]][nei[1]])
and nei not in set_coors
):
current_coor = nei
done = False
break
if len(path) > len(longest_path):
longest_path = path.copy()
return longest_path[1:]
def __str__(self):
buffer = [[] for i in range(len(self.grid) * 2 + 1)]
for i in range(len(self.grid) * 2 + 1):
buffer[i] = [
(
"+"
if i % 2 == 0 and j % 2 == 0
else "-"
if i % 2 == 0 and j % 2 == 1
else "|"
if i % 2 == 1 and j % 2 == 0
else " "
)
for j in range(len(self.grid[0]) * 2 + 1)
]
for i in range(len(self.grid)):
for j in range(len(self.grid[i])):
if not self.grid[i][j].walls["top"]:
buffer[i * 2 + 1 - 1][j * 2 + 1] = " "
if not self.grid[i][j].walls["bottom"]:
buffer[i * 2 + 1 + 1][j * 2 + 1] = " "
if not self.grid[i][j].walls["left"]:
buffer[i * 2 + 1][j * 2 + 1 - 1] = " "
if not self.grid[i][j].walls["right"]:
buffer[i * 2 + 1][j * 2 + 1 + 1] = " "
s = ""
for r in buffer:
for c in r:
s += c
s += "\n"
return s
def __repr__(self):
return self.__str__()
def get_maze(num_rows, num_cols, thin=True, seed=None, standard=False):
m = Maze(num_rows, num_cols, seed, standard)
walls = []
thickness = 0.0 if thin else 0.1
def add_hwall(input_lines, i, j, t=0.0):
input_lines.append(([i - t, j - 0.001], [i - t, j + 1 + 0.001]))
if t > 0:
input_lines.append(([i - t - 0.001, j + 1], [i + t + 0.001, j + 1]))
input_lines.append(([i + t, j - 0.001], [i + t, j + 1 + 0.001]))
input_lines.append(([i + | |
<reponame>pgajdos/yaql
# Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The Math module describes implemented math operations on numbers.
"""
import random
import six
from yaql.language import specs
from yaql.language import yaqltypes
@specs.parameter('left', yaqltypes.Number())
@specs.parameter('right', yaqltypes.Number())
@specs.name('#operator_+')
def binary_plus(left, right):
""":yaql:operator +
Returns the sum of left and right operands.
:signature: left + right
:arg left: left operand
:argType left: number
:arg right: right operand
:argType right: number
:returnType: number
.. code::
yaql> 3 + 2
5
"""
return left + right
@specs.parameter('left', yaqltypes.Number())
@specs.parameter('right', yaqltypes.Number())
@specs.name('#operator_-')
def binary_minus(left, right):
""":yaql:operator -
Returns the difference between left and right.
:signature: left - right
:arg left: left operand
:argType left: number
:arg right: right operand
:argType right: number
:returnType: number
.. code::
yaql> 3 - 2
1
"""
return left - right
@specs.parameter('left', yaqltypes.Number())
@specs.parameter('right', yaqltypes.Number())
@specs.name('#operator_*')
def multiplication(left, right):
""":yaql:operator *
Returns left multiplied by right.
:signature: left * right
:arg left: left operand
:argType left: number
:arg right: right operand
:argType right: number
:returnType: number
.. code::
yaql> 3 * 2.5
7.5
"""
return left * right
@specs.parameter('left', yaqltypes.Number())
@specs.parameter('right', yaqltypes.Number())
@specs.name('#operator_/')
def division(left, right):
""":yaql:operator /
Returns left divided by right.
:signature: left / right
:arg left: left operand
:argType left: number
:arg right: right operand
:argType right: number
:returnType: number
.. code::
yaql> 3 / 2
1
yaql> 3.0 / 2
1.5
"""
if isinstance(left, six.integer_types) and isinstance(
right, six.integer_types):
return left // right
return left / right
@specs.parameter('left', yaqltypes.Number())
@specs.parameter('right', yaqltypes.Number())
@specs.name('#operator_mod')
def modulo(left, right):
""":yaql:operator mod
Returns left modulo right.
:signature: left mod right
:arg left: left operand
:argType left: number
:arg right: right operand
:argType right: number
:returnType: number
.. code::
yaql> 3 mod 2
1
"""
return left % right
@specs.parameter('op', yaqltypes.Number())
@specs.name('#unary_operator_+')
def unary_plus(op):
""":yaql:operator unary +
Returns +op.
:signature: +op
:arg op: operand
:argType op: number
:returnType: number
.. code::
yaql> +2
2
"""
return +op
@specs.parameter('op', yaqltypes.Number())
@specs.name('#unary_operator_-')
def unary_minus(op):
""":yaql:operator unary -
Returns -op.
:signature: -op
:arg op: operand
:argType op: number
:returnType: number
.. code::
yaql> -2
-2
"""
return -op
@specs.parameter('left', yaqltypes.Number())
@specs.parameter('right', yaqltypes.Number())
@specs.name('#operator_>')
def gt(left, right):
""":yaql:operator >
Returns true if left is strictly greater than right, false otherwise.
:signature: left > right
:arg left: left operand
:argType left: number
:arg right: right operand
:argType left: number
:returnType: boolean
.. code::
yaql> 3 > 2
true
"""
return left > right
@specs.parameter('left', yaqltypes.Number())
@specs.parameter('right', yaqltypes.Number())
@specs.name('#operator_>=')
def gte(left, right):
""":yaql:operator >=
Returns true if left is greater or equal to right, false otherwise.
:signature: left >= right
:arg left: left operand
:argType left: number
:arg right: right operand
:argType left: number
:returnType: boolean
.. code::
yaql> 3 >= 3
true
"""
return left >= right
@specs.parameter('left', yaqltypes.Number())
@specs.parameter('right', yaqltypes.Number())
@specs.name('#operator_<')
def lt(left, right):
""":yaql:operator <
Returns true if left is strictly less than right, false otherwise.
:signature: left < right
:arg left: left operand
:argType left: number
:arg right: right operand
:argType left: number
:returnType: boolean
.. code::
yaql> 3 < 2
false
"""
return left < right
@specs.parameter('left', yaqltypes.Number())
@specs.parameter('right', yaqltypes.Number())
@specs.name('#operator_<=')
def lte(left, right):
""":yaql:operator <=
Returns true if left is less or equal to right, false otherwise.
:signature: left <= right
:arg left: left operand
:argType left: number
:arg right: right operand
:argType left: number
:returnType: boolean
.. code::
yaql> 3 <= 3
true
"""
return left <= right
@specs.parameter('op', yaqltypes.Number())
def abs_(op):
""":yaql:abs
Returns the absolute value of a number.
:signature: abs(op)
:arg op: input value
:argType op: number
:returnType: number
.. code::
yaql> abs(-2)
2
"""
return abs(op)
def int_(value):
""":yaql:int
Returns an integer built from number, string or null value.
:signature: int(value)
:arg value: input value
:argType value: number, string or null
:returnType: integer
.. code::
yaql> int("2")
2
yaql> int(12.999)
12
yaql> int(null)
0
"""
if value is None:
return 0
return int(value)
def float_(value):
""":yaql:float
Returns a floating number built from number, string or null value.
:signature: float(value)
:arg value: input value
:argType value: number, string or null
:returnType: float
.. code::
yaql> float("2.2")
2.2
yaql> float(12)
12.0
yaql> float(null)
0.0
"""
if value is None:
return 0.0
return float(value)
def random_():
""":yaql:random
Returns the next random floating number from [0.0, 1.0).
:signature: random()
:returnType: float
.. code::
yaql> random()
0.6039529924951869
"""
return random.random()
def random__(from_, to_):
""":yaql:random
Returns the next random integer from [a, b].
:signature: random(from, to)
:arg from: left value for generating random number
:argType from: integer
:arg to: right value for generating random number
:argType to: integer
:returnType: integer
.. code::
yaql> random(1, 2)
2
yaql> random(1, 2)
1
"""
return random.randint(from_, to_)
@specs.parameter('left', int)
@specs.parameter('right', int)
def bitwise_and(left, right):
""":yaql:bitwiseAnd
Returns applied "bitwise and" to left and right integers.
Each bit of the output is 1 if the corresponding bit of left AND right
is 1, otherwise 0.
:signature: bitwiseAnd(left, right)
:arg left: left value
:argType left: integer
:arg right: right value
:argType right: integer
:returnType: integer
.. code::
yaql> bitwiseAnd(6, 12)
4
"""
return left & right
@specs.parameter('left', int)
@specs.parameter('right', int)
def bitwise_or(left, right):
""":yaql:bitwiseOr
Returns applied "bitwise or" to left and right numbers.
Each bit of the output is 1 if the corresponding bit of left OR right
is 1, otherwise 0.
:signature: bitwiseOr(left, right)
:arg left: left value
:argType left: integer
:arg right: right value
:argType right: integer
:returnType: integer
.. code::
yaql> bitwiseOr(6, 12)
14
"""
return left | right
@specs.parameter('left', int)
@specs.parameter('right', int)
def bitwise_xor(left, right):
""":yaql:bitwiseXor
Returns applied "bitwise exclusive or" to left and right numbers.
Each bit of the output is equal to the sum of corresponding left and right
bits mod 2.
:signature: bitwiseXor(left, right)
:arg left: left value
:argType left: integer
:arg right: right value
:argType right: integer
:returnType: integer
.. code::
yaql> bitwiseXor(6, 12)
10
"""
return left ^ right
@specs.parameter('arg', int)
def bitwise_not(arg):
""":yaql:bitwiseNot
Returns an integer where each bit is a reversed corresponding bit of arg.
:signature: bitwiseNot(arg)
:arg arg: input value
:argType arg: integer
:returnType: integer
.. code::
yaql> bitwiseNot(6)
-7
"""
return ~arg
@specs.parameter('value', int)
@specs.parameter('bits_number', int)
def shift_bits_right(value, bits_number):
""":yaql:shiftBitsRight
Shifts the bits of value right by the number of bits bitsNumber.
:signature: shiftBitsRight(value, bitsNumber)
:arg value: given value
:argType value: integer
:arg bitsNumber: number of bits
:argType right: integer
:returnType: integer
.. code::
yaql> shiftBitsRight(8, 2)
2
"""
return value >> bits_number
@specs.parameter('value', int)
@specs.parameter('bits_number', int)
def shift_bits_left(value, bits_number):
""":yaql:shiftBitsLeft
Shifts the bits of value left by the number of bits bitsNumber.
:signature: shiftBitsLeft(value, bitsNumber)
:arg value: given value
:argType value: integer
:arg bitsNumber: number of bits
:argType right: integer
:returnType: integer
.. code::
yaql> shiftBitsLeft(8, 2)
32
"""
return value << bits_number
@specs.parameter('a', nullable=True)
@specs.parameter('b', nullable=True)
@specs.inject('operator', yaqltypes.Delegate('#operator_>'))
def max_(a, b, operator):
""":yaql:max
Returns max from a and b.
:signature: max(a, b)
:arg a: input value
:argType a: number
:arg b: input value
:argType b: number
:returnType: number
.. code::
yaql> max(8, 2)
8
"""
if operator(b, a):
return b
return a
@specs.inject('operator', yaqltypes.Delegate('#operator_>'))
def min_(a, b, operator):
""":yaql:min
Returns min from a and b.
:signature: min(a, b)
:arg a: input value
:argType a: number
:arg b: input value
:argType b: number
:returnType: number
.. code::
yaql> min(8, 2)
2
"""
if operator(b, a):
return a
return b
@specs.parameter('a', yaqltypes.Number())
@specs.parameter('b', yaqltypes.Number())
@specs.parameter('c', yaqltypes.Number(nullable=True))
def pow_(a, b, c=None):
""":yaql:pow
Returns a to the power b modulo c.
:signature: pow(a, b, c => null)
:arg a: input value
:argType a: number
:arg b: power
:argType b: number
:arg c: modulo. null by default, which means no modulo is done after power.
:argType c: integer
:returnType: number
.. code::
yaql> pow(3, 2)
9
yaql> pow(3, 2, 5)
4
"""
return pow(a, b, c)
@specs.parameter('num', yaqltypes.Number())
def sign(num):
""":yaql:sign
Returns 1 if num > 0; 0 if num = 0; -1 if num < 0.
:signature: sign(num)
| |
x, y = load_spe(folder, file)
else:
try:
x, y = load_txt(folder, file+'.txt')
except (FileNotFoundError, OSError):
try:
x, y = load_spe(folder, file+'.SPE')
except (FileNotFoundError, OSError):
raise ValueError('Only .txt or .SPE file can be processed.')
if transf_func is not None:
x = transf_func(x)
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
else:
ax = ax
ax.plot(x, y, **kwargs)
#######################################################################################################################
################################### FUNCTIONS FOR PANDA DATAFRAME CONSTRUCTION FROM DATAFILES #########################
################################################## AND PANDA RELATED FUNCTIONS ########################################
#######################################################################################################################
def get_parameters(filename):
"""Return a dictionary with all the parameters contained in the filename given, following the established regex.
The file extension must be removed from the filename.
Parameters looked for in the filename
--------------------------------------
temperature : in kelvin
laser_wavelength : in nanometer
power : in micro watt
wavelength : the central wavelength of the spectro in nm
grooves : number of line per mm on the diffraction grating
tacq : acquisition time in seconds
slit : width of the slit in micrometers
"""
list_params = filename.split('_')
# Get the parameters.
temperature = int(list_params[0][:-1])
sample = list_params[1]
laser_wavelength = float(list_params[2][5:-2])
power = float(list_params[3][:-2])
wavelength = float(list_params[4][:-2])
grooves = float(list_params[5][:-7])
time = list_params[6]
items = time[:-3].split('x')
number_acq = int(items[0])
tacq = float(items[1])
slit = float(list_params[7][4:])
filter_ = list_params[8]
calibration = list_params[9]
try:
position = list_params[10][:] # I keep the 'P' before the number, ex: 'P2' for position 2.
except Exception:
position = 0
return {'temperature': temperature, 'sample': sample, 'laser_wavelength': laser_wavelength,
'power': power, 'wavelength': wavelength, 'grooves': grooves, 'number_acq': number_acq, 'tacq': tacq,
'slit': slit, 'filter': filter_, 'calibration': calibration, 'position': position}
def get_parameters_key(filename):
"""Return a dictionary with all the parameters contained in the filename indicated by a key.
Conventions of name have been fixed by a collective choice. Use regular expressions.
"""
# Find the keys present in the filename.
# keys_in_filename = re.findall("[a-zA-Z]+(?=\[)", s) # matches any string that is followed by '['
# We could create a list for each parameter in which all the name variations of the related key would be listed so
# that we can normalized the key name by giving it the 'conventional' name. It would be useful to give this code
# more flexibility.
dic_key_parameter = {'T': 'temperature', 'S': 'sample_id', 'P': 'power', 'Lwv': 'laser_wv', 'gr': 'grating',
'Gwv': 'grating_wv', 'tacq': 'tacq', 'nacq': 'nacq', 'slit': 'slit', 'F': 'filter_id',
'calib': 'calib', 'position': 'position', 'dateID': 'dateID', 'kind': 'kind', 'polar': 'polar'}
value_parameter = {}
for key in dic_key_parameter:
pattern = key + r"\[([^\[\]]*?)\]" # This regex matches any string between [] that do not contain '[' or ']'
match = re.findall(pattern, filename) # and it stops at the first string matching the pattern
# (I set the non greedy mode by placing a '?' after '*').
# if len(match) == 0:
# value_parameter[dic_key_parameter[key]] = 0
if len(match) == 1:
value_parameter[dic_key_parameter[key]] = match[0]
elif len(match) > 1:
for i, value in enumerate(match):
if re.match(r"\w+", value) is None:
del match[i]
for value in match:
error = True if value != match[0] else False
if not error:
value_parameter[dic_key_parameter[key]] = match[0]
else:
raise ValueError(
"The file contains two different value of the same parameter. Please correct the mistake.")
return value_parameter
def make_DataFrame(list_of_filenames, files_location, format_, version='old', unpack=True):
"""
Return a dataframe. The number of row correponds to the number of filename & the number of columns to the numbers
of parameters found in the filenames plus the data.
Important: the extension of the filenames given are supposed to be removed.
Warning: this function is made for the MacroPL experiment and the convention taken for the filenames.
It is necessary to adapt this function for other filenames, along with the 'get_parameters' function.
=> There is an 'old' <version>: adapted when no keys are used in the filename. The order of the parameter written
in the filename is primordial.
=> There is a 'new' <version>: adapted when keys are used in the filename. Much more adaptable, does not depend on
the order of the parameter, neither on the existence or not of some parameter.
Parameters looked for in the filenames
---------------------------------------
temperature : in kelvin
laser_wavelength : in nanometer
power : in micro watt
wavelength : the central wavelength of the spectro in nm
grooves : number of line per mm on the diffraction grating
tacq : acquisition time in seconds
slit : width of the slit in micrometers
"""
df = pd.DataFrame({'energies': np.zeros(len(list_of_filenames)), 'intensities': np.zeros(len(list_of_filenames))})
df['energies'] = df['energies'].astype(object)
df['intensities'] = df['intensities'].astype(object)
if version == 'old':
for i in range(len(list_of_filenames)):
parameters = get_parameters(list_of_filenames[i])
df.at[i, 'sample_id'] = parameters['sample']
df.at[i, 'position'] = parameters['position']
df.at[i, 'wavelength'] = parameters['wavelength']
df.at[i, 'power'] = parameters['power']
df.at[i, 'tacq'] = parameters['tacq']
df.at[i, 'number_acq'] = parameters['number_acq']
df.at[i, 'laser_wavelength'] = parameters['laser_wavelength']
df.at[i, 'temperature'] = parameters['temperature']
df.at[i, 'filter_id'] = parameters['filter']
df.at[i, 'slit'] = parameters['slit']
df.at[i, 'grooves'] = parameters['grooves']
df.at[i, 'calibration'] = parameters['calibration']
# get spectrum
x, y = np.loadtxt(files_location + list_of_filenames[i] + '.' + format_, unpack=unpack)
# sort by ascending order of x's values
y = y[np.argsort(x)]
x = np.sort(x)
df.at[i, 'energies'] = x
df.at[i, 'intensities'] = y
# df['position'] = df['position'].astype(int) # I don't do that anymore because I want to keep the 'P'
# for MultiIndex
df['number_acq'] = df['number_acq'].astype(int)
df['temperature'] = df['temperature'].astype(int)
# re order the DataFrame
df = df[['sample_id', 'position', 'wavelength', 'power', 'tacq', 'number_acq', 'laser_wavelength',
'temperature', 'filter_id', 'slit', 'grooves', 'calibration', 'energies', 'intensities']]
elif version == 'new':
for i in range(len(list_of_filenames)):
parameters = get_parameters_key(list_of_filenames[i])
for key in parameters:
df.at[i, key] = parameters[key]
# get the spectrum
x, y = np.loadtxt(files_location + list_of_filenames[i] + '.' + format_, unpack=unpack)
# sort by ascending order of x's values
y = y[np.argsort(x)]
x = np.sort(x)
df.at[i, 'energies'] = x
df.at[i, 'intensities'] = y
return df
def list_values_of_parameter(dataframe, parameter):
"""Return a list of all values of the parameter from the dataframe."""
index_of_parameter = dataframe['{}'.format(parameter)].value_counts().index
list_of_parameter = []
for value in index_of_parameter:
list_of_parameter.append(value)
return list_of_parameter
def highlight_row_by_name(s, names, color):
"""This function is intented to be called inside ``pandas.DataFrame.style.apply()`.
'axis=1' must be specified in the call.
Parameters
----------
'names' & 'color' : must be passed as a list in the kwargs.
Examples
--------
df.style.apply(highlight_row_by_name, axis=1, names=['mean', 'std'])
==> will highlight the rows named 'mean' and 'std'
"""
if s.name in names:
return ['background-color: ' + color] * len(s)
else:
return ['background-color: white'] * len(s)
# I made the ones below during postdoc at Grenoble
def df_from_files(folder, files, fmt='txt', columns=['x', 'y']):
"""Computes a dataframe from two-columns data files, for each file in ``files``.
Each row corresponds to a datafile, each column to a type of data, each cell to the list of data.
Parameters
----------
folder : complete path to folder. To ignore this parameter just put ``""`` as value, for example
when the full path is contained in the file name already.
files : sequence of file names to extract data from.
fmt : format of the data file.
columns : column name to use for each column
Return a pandas dataframe.
"""
xx, yy = [], []
for file in files:
if fmt is None:
x, y = np.loadtxt(folder+file, unpack=True, comments='Frame') # for trivista files
else:
x, y = np.loadtxt(folder+file+'.'+fmt, unpack=True, comments='Frame')
xx.append(x)
yy.append(y)
df = pd.DataFrame(data={columns[0]: xx, columns[1]: yy})
# Alternative way:
#df.insert(0, 'x', xx)
#df.insert(1, 'y', yy)
return df
from collections import abc
def df_from_bsweep(folder, file, B_init=None, B_final=None, step=None, bvalues=None, CCD_nb_pixel=1340):
"""Computes a dataframe from a file associated to a sweep in magnetic field and sort dataframe by B values.
The file should contain several spectra at different field.
Parameters
----------
folder: complete path to folder.
file: name of the file. Can be a list of files.
B_init, B_final: value of initial and final magnetic field.
step: the step in magnetic field in the sweep.
bvalues: list of magnetic field values.
Use this if the data file is a set of spectra from irregular values of | |
key_list:
if expected_key not in actual_response_dict[0]:
zoomba.fail("The response does not contain the key '" + expected_key + "'")
continue
if actual_response_dict[0][expected_key] != expected_response[0][expected_key]:
zoomba.fail("The value for the key '" + expected_key + "' doesn't match the response:" +
"\nExpected: " + expected_response[0][expected_key] +
"\nActual: " + actual_response_dict[0][expected_key])
return
def validate_response_contains_correct_number_of_items(self, json_actual_response, number_of_items):
""" This keyword is used to validate the number of returned items on Request responses from an API.\n
json_actual_response: (request response object) The response from an API.\n
number_of_items: (integer) The expected number of items.\n
return: There is no actual returned output, other than error messages when comparisons fail.\n
"""
actual_response_dict = json.loads(json_actual_response)
if isinstance(number_of_items, str):
number_of_items = number_of_items.upper()
if number_of_items == "IGNORE":
return True
elif not isinstance(number_of_items, int):
zoomba.fail("Did not pass number or string value, function expects a number or string 'IGNORE'.")
return
if isinstance(actual_response_dict, list):
if len(actual_response_dict) != int(number_of_items):
zoomba.fail('API is returning ' + str(
len(actual_response_dict)) + ' instead of the expected ' + str(number_of_items) + ' result(s).')
else:
zoomba.fail("The response is not a list:\nActual Response:\n" + str(actual_response_dict))
def key_by_key_validator(self, actual_dictionary, expected_dictionary, ignored_keys=None, unmatched_keys_list=None,
parent_key=None, full_list_validation=False, sort_lists=False, **kwargs):
""" This method is used to find and verify the value of every key in the expectedItem dictionary when compared
against a single dictionary actual_item, unless any keys are included on the ignored_keys array./n
actual_item: (array of dictionaries) The list of dictionary items extracted from a json Response.\n
ExpectedItem: (dictionary) The expected item with the key to be validated.\n
ignored_keys: (strings list) A list of strings of the keys to be ignored on the validation.\n
full_list_validation: (bool) Check that the entire list matches the expected response, defaults to False.\n
sort_lists: (bool) Sort lists before doing key by key validation, defaults to False.\n
**kwargs: (dict) Currently supported kwargs are margin_type and margin_amt\n
margin_type: (string) The type of unit of time to be used to generate a delta for the date comparisons.\n
margin_amt: (string/#) The amount of units specified in margin_type to allot for difference between dates.\n
return: (boolean) If the method completes successfully, it returns True. Appropriate error messages are
returned otherwise.\n
"""
if len(actual_dictionary) != len(expected_dictionary):
zoomba.fail("Collections not the same length:"
"\nActual length: " + str(len(actual_dictionary)) +
"\nExpected length " + str(len(expected_dictionary)))
return
for key, value in expected_dictionary.items():
if ignored_keys and key in ignored_keys:
continue
if key not in actual_dictionary:
zoomba.fail("Key not found in Actual : " + str(actual_dictionary) + " Key: " + str(key))
continue
if isinstance(value, list):
if full_list_validation and len(value) != len(actual_dictionary[key]):
zoomba.fail("Arrays not the same length:" +
"\nExpected: " + str(value) +
"\nActual: " + str(actual_dictionary[key]))
continue
self._key_by_key_list(key, value, actual_dictionary, unmatched_keys_list, ignored_keys, parent_key,
full_list_validation=full_list_validation, sort_lists=sort_lists, **kwargs)
elif isinstance(value, dict):
self._key_by_key_dict(key, value, actual_dictionary, expected_dictionary, unmatched_keys_list,
ignored_keys, full_list_validation=full_list_validation, sort_lists=sort_lists,
**kwargs)
elif isinstance(expected_dictionary[key], str) and not expected_dictionary[key].isdigit():
try:
parse(expected_dictionary[key])
self.date_string_comparator(value, actual_dictionary[key], key, unmatched_keys_list, **kwargs)
except (ValueError, TypeError):
if value == actual_dictionary[key]:
continue
else:
unmatched_keys_list.append(("------------------\n" + "Key: " + str(key),
"Expected: " + str(value),
"Actual: " + str(actual_dictionary[key])))
elif value == actual_dictionary[key]:
continue
else:
unmatched_keys_list.append(("------------------\n" + "Key: " + str(key), "Expected: " + str(value),
"Actual: " + str(actual_dictionary[key])))
return True
def date_string_comparator(self, expected_date, actual_date, key, unmatched_keys_list, **kwargs):
"""This Method is used to validate a single property on a JSON object of the Date Type.
It Validates for any the following Date Formats:
%Y-%m-%dT%H:%M:%S, %Y-%m-%dT%H:%M:%SZ, %Y-%m-%dT%H:%M:%S.%f, %Y-%m-%dT%H:%M:%S.%fZ
expected_date: (string) The Expected date string the key being validated.\n
actual_date: (string) The Actual date string of the key being validated.\n
key: (string) The key being validated.\n
unmatched_keys_list (list): List of keys that are unvalidated - to be passed to error handling method.
**kwargs: (dict) Currently supported kwargs are margin_type and margin_amt\n
margin_type: (string) The type of unit of time to be used to generate a delta for the date comparisons.\n
margin_amt: (string/#) The amount of units specified in margin_type to allot for difference between dates.\n
"""
if expected_date == actual_date:
return
expected_utc = _date_format(expected_date, key, unmatched_keys_list, "Expected")
actual_utc = _date_format(actual_date, key, unmatched_keys_list, "Actual")
if expected_utc and actual_utc:
self.date_comparator(expected_utc, actual_utc, key, unmatched_keys_list, **kwargs)
def date_comparator(self, expected_date, actual_date, key, unmatched_keys_list, margin_type="minutes",
margin_amt=10):
"""This method compares two date values, given a certain margin type(minutes, seconds, etc),
and a margin amount (int). If the two dates are not within the margin amount for the margin type, I.E. within
10 minutes of difference, it asserts False, and returns an error message.
expected_date: (date) The Expected date value of the key being validated.\n
actual_date: (date) The Actual date value of the key being validated.\n
key: (string) The key being validated.\n
unmatched_keys_list: (list) List of Date keys that are not within the accepted margin_type
and margin_amt resolution\n
margin_type: (string) The type of unit of time to be used to generate a delta for the date comparisons.\n
margin_amt: (integer) The amount of units specified in margin_type to allot for difference between dates.\n
"""
arg_dict = {margin_type: int(margin_amt)}
margin = datetime.timedelta(**arg_dict)
if expected_date - margin <= actual_date <= expected_date + margin:
return
unmatched_keys_list.append(("------------------\n" + "Dates Not Close Enough\nKey: " + str(key),
"Expected: " + str(expected_date),
"Actual: " + str(actual_date)))
def generate_unmatched_keys_error_message(self, unmatched_keys):
""" This method is only used as an internal call from other validating methods to generate an error string
containing every unmatched key when a validation fails.\n
unmatchedKeys: (array of key/value pairs) An array containing the unmatched keys during a validation.\n
"""
if unmatched_keys:
keys_error_msg = "Key(s) Did Not Match:\n"
for key_error_tuple in unmatched_keys:
for key_error in key_error_tuple:
keys_error_msg += str(key_error) + "\n"
zoomba.fail(keys_error_msg + "\nPlease see differing value(s)")
def _key_by_key_list(self, key, value, actual_dictionary, unmatched_keys_list=None, ignored_keys=None,
parent_key=None, full_list_validation=False, sort_lists=False, **kwargs):
if sort_lists and isinstance(value, list):
try:
value = list(map(dict, sorted(list(i.items()) for i in value)))
except AttributeError:
pass
for index, item in enumerate(value):
if isinstance(item, str):
if value != actual_dictionary[key]:
if sort_lists:
if sorted(value) != sorted(actual_dictionary[key]):
zoomba.fail("Arrays do not match:" +
"\nExpected: " + str(sorted(value)) +
"\nActual: " + str(sorted(actual_dictionary[key])))
continue
else:
zoomba.fail("Arrays do not match:" + \
"\nExpected: " + str(value) + \
"\nActual: " + str(actual_dictionary[key]) + \
"\nIf this is simply out of order try 'sort_list=True'")
continue
else:
if len(actual_dictionary[key]) == 0:
actual_item = ''
else:
if sort_lists:
actual_dictionary[key] = list(map(dict, sorted(list(i.items()) for i in actual_dictionary[key])))
actual_item = actual_dictionary[key][index]
temp_actual_dict = {key: actual_item}
temp_expected_dict = {key: item}
if unmatched_keys_list:
current_unmatched_length = len(unmatched_keys_list)
else:
current_unmatched_length = 0
self.key_by_key_validator(temp_actual_dict, temp_expected_dict,
ignored_keys, unmatched_keys_list, parent_key=key,
full_list_validation=full_list_validation, sort_lists=sort_lists, **kwargs)
if unmatched_keys_list is None:
continue
else:
_unmatched_list_check(unmatched_keys_list, current_unmatched_length,
key, index, parent_key, is_list=True)
def _key_by_key_dict(self, key, value, actual_dictionary, expected_dictionary, unmatched_keys_list=None,
ignored_keys=None, full_list_validation=False, sort_lists=False, **kwargs):
try:
if len(value) != len(actual_dictionary[key]):
zoomba.fail("Dicts do not match:" +
"\nExpected: " + str(value) +
"\nActual: " + str(actual_dictionary[key]))
return
except TypeError:
zoomba.fail("Dicts do not match:" +
"\nExpected: " + str(value) +
"\nActual is not a valid dictionary.")
return
if unmatched_keys_list is not None:
current_unmatched_length = len(unmatched_keys_list)
self.key_by_key_validator(actual_dictionary[key], expected_dictionary[key],
ignored_keys, unmatched_keys_list, parent_key=key,
full_list_validation=full_list_validation, sort_lists=sort_lists, **kwargs)
if unmatched_keys_list is None:
return
_unmatched_list_check(unmatched_keys_list, current_unmatched_length, key)
def full_list_validation(self, actual_response_dict, expected_response_dict, unmatched_keys_list, ignored_keys=None,
sort_lists=False, **kwargs):
if actual_response_dict == expected_response_dict:
return
for actual_item, expected_item in zip(actual_response_dict, expected_response_dict):
self.key_by_key_validator(actual_item, expected_item, ignored_keys, unmatched_keys_list,
full_list_validation=True, sort_lists=sort_lists, **kwargs)
if unmatched_keys_list:
unmatched_keys_list.append(("------------------\n" + "Full List Breakdown:",
"Expected: " + str(expected_response_dict),
"Actual: " + str(actual_response_dict)))
self.generate_unmatched_keys_error_message(unmatched_keys_list)
return
def _unmatched_list_check(unmatched_keys_list, current_unmatched_length, key, index=None, parent_key=None,
is_list=False):
if len(unmatched_keys_list) > current_unmatched_length and parent_key == key:
for new_index in range(len(unmatched_keys_list) - current_unmatched_length):
reverse_index = -1 * (new_index + 1)
unmatched_tuple = unmatched_keys_list[reverse_index]
split_key_string = unmatched_tuple[0].split("Key: " + parent_key)
new_key_string = split_key_string[0] + "Key: " + parent_key + "[" + str(index) + "]" + split_key_string[1]
unmatched_keys_list[reverse_index] = (new_key_string, *unmatched_tuple[1:])
elif len(unmatched_keys_list) > current_unmatched_length and parent_key is not None:
for new_index in range(len(unmatched_keys_list) - current_unmatched_length):
reverse_index = -1 * (new_index + 1)
unmatched_tuple = unmatched_keys_list[reverse_index]
if "Key: " + str(key) not in unmatched_tuple[0]:
split_key_string = unmatched_tuple[0].split("Key: ")
if is_list:
new_key_string = split_key_string[0] + "Key: " + key + "[" + str(index) + "]." + split_key_string[1]
else:
new_key_string = split_key_string[0] + "Key: " + key + "." + split_key_string[1]
| |
#########################################################################################
# MIT License #
# #
# Copyright (c) 2021 SumBot team #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy #
# of this software and associated documentation files (the "Software"), to deal #
# in the Software without restriction, including without limitation the rights #
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #
# copies of the Software, and to permit persons to whom the Software is #
# furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
# © 2021 GitHub, Inc. #
#########################################################################################
import discord
import asyncio
from discord.ext import commands
import sqlite3
from typing import Optional
class Mod(commands.Cog):
"""
Moderator commands
"""
def __init__(self, client):
self.client = client
self.db = sqlite3.connect("./app.db")
self.cr = self.db.cursor()
self.warn_count = {}
@commands.command(name='setprefix', aliases=['set_prefix', "set-prefix", "prefix"])
@commands.has_permissions(manage_guild=True)
async def prefix(self, ctx, prefix: str = ""):
try:
if len(prefix) > 5:
await ctx.send(embed=discord.Embed(
description='The prefix cannot be more than 5 characters long.',
color=discord.Colour.red()
))
elif prefix == "":
self.cr.execute("UPDATE guilds SET prefix = '@' WHERE guild_id = ?", (ctx.guild.id,))
self.db.commit()
await ctx.send(embed=discord.Embed(
description=f"the prefix has been reset to `@`",
color=discord.Colour.green()))
else:
self.cr.execute("UPDATE guilds SET prefix = ? WHERE guild_id = ?", (prefix, ctx.guild.id))
self.db.commit()
prefix = self.cr.execute("SELECT prefix FROM guilds WHERE guild_id = ?", (ctx.guild.id,))
await ctx.send(embed=discord.Embed(
description=f"the prefix now is `{prefix.fetchone()[0]}`",
color=discord.Colour.green()))
except Exception as e:
print(e)
# self.cr.execute(
# "INSERT OR IGNORE INTO guilds(guild_id, prefix) VALUES(?, ?)", (ctx.guild.id, "-"))
# self.db.commit()
@prefix.error
async def prefix_error(self, ctx, error):
if isinstance(error, commands.MissingPermissions):
pass
@commands.command(help='to re-send the your message')
@commands.guild_only()
@commands.has_permissions(manage_messages=True)
async def say(self, ctx, *, arg):
await ctx.message.delete()
await ctx.send(arg)
@commands.has_permissions(manage_messages=True)
@say.error
async def say_error(self, ctx, error):
prefix = self.cr.execute("SELECT prefix FROM guilds WHERE guild_id = ?", (ctx.guild.id,))
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send(embed=discord.Embed(
description='**Used:** `{}say <messgae>`\n**Type:** Mod'.format(prefix.fetchone()[0]),
color=discord.Colour.red()
))
if isinstance(error, commands.MissingPermissions):
await ctx.send(embed=discord.Embed(
description="🙄 You don't have permissions",
color=discord.Colour.red()
))
if isinstance(ctx.channel, discord.channel.DMChannel):
pass
@commands.command(help='to re-send the your message in embed')
@commands.guild_only()
@commands.has_permissions(embed_links=True)
async def embed(self, ctx, *, arg):
embed = discord.Embed(
description=arg,
color=ctx.author.color,
timestamp=ctx.message.created_at)
embed.set_author(name=self.client.user.name, icon_url=self.client.user.avatar_url)
embed.set_footer(text=ctx.guild.name, icon_url=ctx.guild.icon_url)
await ctx.message.delete()
await ctx.send(embed=embed)
@commands.has_permissions(embed_links=True)
@embed.error
async def embed_error(self, ctx, error):
prefix = self.cr.execute("SELECT prefix FROM guilds WHERE guild_id = ?", (ctx.guild.id,))
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send(embed=discord.Embed(
description='**Used:** `{}embed <messgae>`\n**Type:** Mod'.format(prefix.fetchone()[0]),
color=discord.Colour.red()
))
if isinstance(error, commands.CommandInvokeError):
await ctx.send(embed=discord.Embed(
description='I do not have permissions `embed_links`',
color=discord.Colour.red()
))
if isinstance(error, commands.MissingPermissions):
await ctx.send(embed=discord.Embed(
description="You don't have permissions `embed_links`",
color=discord.Colour.red()
))
if isinstance(ctx.channel, discord.channel.DMChannel):
pass
@commands.command(help="to remove the namber message")
@commands.guild_only()
@commands.has_permissions(manage_messages=True)
async def clear(self, ctx, amount: int):
if amount > 200:
await ctx.send(embed=discord.Embed(
description='You cannot delete more than 200 messages.',
color=discord.Colour.red()
))
elif amount <= 0:
await ctx.send(embed=discord.Embed(
description='You cannot delete less than one message.',
color=discord.Colour.red()
))
else:
await ctx.message.delete()
await ctx.channel.purge(limit=amount)
await ctx.send(embed=discord.Embed(
description="✅ Done",
color=discord.Colour.green()
))
await asyncio.sleep(2)
await ctx.channel.purge(limit=1)
@commands.has_permissions(manage_messages=True)
@clear.error
async def clear_error(self, ctx, error):
prefix = self.cr.execute("SELECT prefix FROM guilds WHERE guild_id = ?", (ctx.guild.id,))
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send(embed=discord.Embed(
description='**Used:** `{}clear <namber>`\n**Type:** Mod'.format(prefix.fetchone()[0]),
color=discord.Colour.red()
))
if isinstance(error, commands.CommandInvokeError):
await ctx.send(embed=discord.Embed(
description='🙄 I do not have permissions `manage messages`',
color=discord.Colour.red()
))
if isinstance(error, commands.MissingPermissions):
await ctx.send(embed=discord.Embed(
description="🙄 You don't have permissions `manage messages`",
color=discord.Colour.red()
))
if isinstance(ctx.channel, discord.channel.DMChannel):
pass
@commands.command(help='to hide the channel in everyone')
@commands.guild_only()
@commands.has_permissions(manage_channels=True)
async def hide(self, ctx, channel: discord.TextChannel = None):
channel = channel or ctx.channel
overwrite = channel.overwrites_for(ctx.guild.default_role)
overwrite.read_messages = False
await channel.set_permissions(ctx.guild.default_role, overwrite=overwrite)
await ctx.send(embed=discord.Embed(
description='👤 | channel has been Hide {}'.format(channel.mention),
color=discord.Colour.green()
))
@commands.has_permissions(manage_channels=True)
@hide.error
async def hide_error(self, ctx, error):
if isinstance(error, commands.CommandInvokeError):
await ctx.send(embed=discord.Embed(
description='🙄 I do not have permissions `manage channels`',
color=discord.Colour.red()
))
if isinstance(error, commands.MissingPermissions):
await ctx.send(embed=discord.Embed(
description="🙄 You don't have permissions `manage channels`",
color=discord.Colour.red()
))
if isinstance(ctx.channel, discord.channel.DMChannel):
pass
@commands.command(help="to unhide the channel in everyone")
@commands.guild_only()
@commands.has_permissions(manage_channels=True)
async def unhide(self, ctx, channel: discord.TextChannel = None):
channel = channel or ctx.channel
overwrite = channel.overwrites_for(ctx.guild.default_role)
overwrite.read_messages = True
await channel.set_permissions(ctx.guild.default_role, overwrite=overwrite)
await ctx.send(embed=discord.Embed(
description='👥 | channel has been unHide {}'.format(channel.mention),
color=discord.Colour.green()
))
@commands.has_permissions(manage_channels=True)
@unhide.error
async def unhide_error(self, ctx, error):
if isinstance(error, commands.CommandInvokeError):
await ctx.send(embed=discord.Embed(
description='🙄 I do not have permissions `manage channels`',
color=discord.Colour.red()
))
if isinstance(error, commands.MissingPermissions):
await ctx.send(embed=discord.Embed(
description="🙄 You don't have permissions `manage channels`",
color=discord.Colour.red()
))
if isinstance(ctx.channel, discord.channel.DMChannel):
pass
@commands.command(help='to lock the channel in everyone')
@commands.guild_only()
@commands.has_permissions(manage_messages=True)
async def lock(self, ctx, channel: discord.TextChannel = None):
channel = channel or ctx.channel
overwrite = channel.overwrites_for(ctx.guild.default_role)
overwrite.send_messages = False
await channel.set_permissions(ctx.guild.default_role, overwrite=overwrite)
await ctx.send(embed=discord.Embed(
description='🔒 | channel locked {}'.format(channel.mention),
color=discord.Colour.green()
))
@commands.has_permissions(manage_messages=True)
@lock.error
async def lock_error(self, ctx, error):
if isinstance(error, commands.CommandInvokeError):
await ctx.send(embed=discord.Embed(
description='🙄 I do not have permissions `manage messages`',
color=discord.Colour.red()
))
if isinstance(error, commands.MissingPermissions):
await ctx.send(embed=discord.Embed(
description="🙄 You don't have permissions `manage messages`",
color=discord.Colour.red()
))
if isinstance(ctx.channel, discord.channel.DMChannel):
pass
@commands.command(help='to unlock the channel in everyone')
@commands.guild_only()
@commands.has_permissions(manage_messages=True)
async def unlock(self, ctx, channel: discord.TextChannel = None):
channel = channel or ctx.channel
overwrite = channel.overwrites_for(ctx.guild.default_role)
overwrite.send_messages = True
await channel.set_permissions(ctx.guild.default_role, overwrite=overwrite)
await ctx.send(embed=discord.Embed(
description='🔓 | channel unlock {}'.format(channel.mention),
color=discord.Colour.green()
))
@commands.has_permissions(manage_messages=True)
@unlock.error
async def unlock_error(self, ctx, error):
if isinstance(error, commands.CommandInvokeError):
await ctx.send(embed=discord.Embed(
description='🙄 I do not have permissions `manage messages`',
color=discord.Colour.red()
))
if isinstance(error, commands.MissingPermissions):
await ctx.send(embed=discord.Embed(
description="🙄 You don't have permissions `manage messages`",
color=discord.Colour.red()
))
if isinstance(ctx.channel, discord.channel.DMChannel):
pass
@commands.command(invoke_without_command=True, help='to send the message in channel')
@commands.guild_only()
@commands.has_permissions(manage_messages=True)
async def echo(self, ctx, channel: discord.TextChannel, *, arg):
await channel.send(arg)
await ctx.send(embed=discord.Embed(
description='Message was sent in {}'.format(channel.mention),
color=discord.Colour.green()
))
@commands.has_permissions(manage_messages=True)
@echo.error
async def echo_error(self, ctx, error):
prefix = self.cr.execute("SELECT prefix FROM guilds WHERE guild_id = ?", (ctx.guild.id,))
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send(embed=discord.Embed(
description='**Used:** `{0}echo(channel, message)`\n**Type:** Mod\n**description:** To add the bot to your server'.format(prefix.fetchone()[0]),
color=discord.Colour.red()
))
if isinstance(error, commands.errors.CommandInvokeError):
await ctx.send(embed=discord.Embed(
description='🙄 I could not find this channel',
color=discord.Colour.red()
))
if isinstance(error, discord.ext.commands.errors.MissingPermissions):
await ctx.send(embed=discord.Embed(
description='🙄 You don\'t have permissions `manage messages`',
color=discord.Colour.red()
))
if isinstance(error, commands.MissingPermissions):
await ctx.send(embed=discord.Embed(
description="🙄 I don't have permissions",
color=discord.Colour.red()
))
if isinstance(ctx.channel, discord.channel.DMChannel):
pass
@commands.command(help='to make the poll')
@commands.guild_only()
@commands.has_permissions(manage_messages=True)
async def poll(self, ctx, *, arg):
await ctx.message.delete()
embed = discord.Embed(
timestamp=ctx.message.created_at,
description=arg,
color=ctx.author.color)
embed.set_author(name=self.client.user.name, icon_url=self.client.user.avatar_url)
embed.set_footer(text=ctx.guild.name, icon_url=ctx.guild.icon_url)
msg = await ctx.send("📢 poll 📢", embed=embed)
await msg.add_reaction('👍')
await msg.add_reaction('👎')
@commands.has_permissions(manage_messages=True)
@poll.error
async def poll_error(self, ctx, error):
prefix = self.cr.execute("SELECT prefix FROM guilds WHERE guild_id = ?", (ctx.guild.id,))
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send(embed=discord.Embed(
description='**Used:** `{}poll <messgae>`\n**Type:** Mod'.format(prefix.fetchone()[0]),
color=discord.Colour.red()
))
if isinstance(error, discord.ext.commands.errors.MissingPermissions):
await ctx.send(embed=discord.Embed(
description='🙄 You don\'t have permissions `manage messages`',
color=discord.Colour.red()
))
if isinstance(ctx.channel, discord.channel.DMChannel):
pass
@commands.command(aliases=['nick', "rename"], help='add and remove nickname')
@commands.has_guild_permissions(manage_nicknames=True)
async def nickname(self, ctx, member: discord.Member, *, new: str = None):
if new == None:
await member.edit(nick="")
await ctx.send(embed=discord.Embed(
description=f'{member.name} has been reset nickname',
color=discord.Colour.green()
))
else:
await member.edit(nick=f'{new}')
await ctx.send(embed=discord.Embed(
description=f'{member.name} has been changed to {new}',
color=discord.Colour.green()
))
@nickname.error
async def nickname_error(self, ctx, error):
prefix = self.cr.execute("SELECT prefix FROM guilds WHERE guild_id = ?", (ctx.guild.id,))
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send(embed=discord.Embed(
description='**Used:** `{}nick <member> <name>`\n**Type:** Mod'.format(prefix.fetchone()[0]),
color=discord.Colour.red()
))
if isinstance(error, commands.MissingPermissions):
await ctx.send(embed=discord.Embed(
description='🙄 You don\'t have permissions `manage nicknames`',
color=discord.Colour.red()
))
if isinstance(error, commands.BotMissingPermissions):
await ctx.send(embed=discord.Embed(
description='🙄 I don\'t have permissions `manage nicknames`',
color=discord.Colour.red()
))
# @commands.command(name="warn")
# @commands.has_guild_permissions(administrator=True)
# async def warn(self, ctx, user: discord.User = None, *, reason=None):
#
#
# @commands.command(name="clearwarn")
# @commands.has_guild_permissions(administrator=True)
# async def clearwarn(self, ctx, user: discord.User = None):
# """Clear warnings of every user, or just the set user"""
# if user is None:
# self.warn_count = {}
# await ctx.send("Clearing | |
#
# Copyright (c) 2014 - 2021 StorPool.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Classes for accessing the StorPool API over an HTTP connection.
The Api class provides methods corresponding to the StorPool API calls.
The most common way to initialize it is to use the Api.fromConfig()
class method that will parse the StorPool configuration and set up
the connection parameters; see the apidoc.html reference documentation
for more information.
"""
import errno
import inspect
import socket as sock
import sys
import time as time
import six
from six.moves import http_client as http
from . import spjson as js
from . import sptypes as sp
from .spcatch import InvalidArgumentError
from .spconfig import SPConfig
from .sptype import JsonObject, spType, either, const, maybe, longType
from .sputils import msec, sec, pathPollWait
from .spdoc import ApiDoc, ApiCallDoc
if sys.version_info[0] < 3:
import urllib as uquote
else:
import urllib.parse as uquote
VERSION = '7.0.0'
SP_DEV_PATH = '/dev/storpool/'
SP_API_PREFIX = '/ctrl/1.0'
def _format_path(query, multiCluster, clusterName=None):
""" Return the HTTP path to send an actual query to. """
return "{pref}/{remote}{multi}{query}".format(
pref=SP_API_PREFIX,
remote="RemoteCommand/{name}/".format(name=clusterName) if clusterName is not None else "",
multi="MultiCluster/" if multiCluster else "",
query=query)
class _API_ARG(object):
def __init__(self, name, validate):
self._name = name
self._type = spType(validate)
def defstr(self):
if self._type.spDoc.name == "Optional":
return "{name}=None".format(name=self._name)
else:
return self._name
DiskId = _API_ARG('diskId', sp.DiskId)
ServerId = _API_ARG('serverId', sp.ServerId)
ClientId = _API_ARG('clientId', sp.ClientId)
VolumeName = _API_ARG('volumeName', sp.VolumeNameOrGlobalId)
SnapshotName = _API_ARG('snapshotName', sp.SnapshotNameOrGlobalId)
PlacementGroupName = _API_ARG('placementGroupName', sp.PlacementGroupName)
VolumeTemplateName = _API_ARG('templateName', sp.VolumeTemplateName)
GlobalVolumeId = _API_ARG('globalVolumeId', sp.GlobalVolumeId)
class _API_METHOD(object):
def __init__(self, method, multiCluster, query, args, json, returns):
self.method = method
self.multiCluster = multiCluster
self.query = query
self.path = _format_path(query, multiCluster)
self.args = args
self.json = spType(json) if json is not None else None
self.returns = spType(returns)
self.types = {}
def addType(self, name, desc):
self.types.update({name: desc})
def doc(self, name, desc):
self.spDoc = ApiCallDoc(name, desc, self.method, self.query, self.path, dict((arg._name, arg._type.spDoc) for arg in self.args), self.json.spDoc if self.json else None, self.returns.spDoc)
return self
def compile(self):
def commas(xs):
return ", ".join(xs)
def fmtEq(x):
return "{x}={x}".format(x=x)
method, query, args, json, returns = self.method, self.query, self.args, self.json, self.returns
args = list(args)
if json is not None:
args.append(_API_ARG('json', json))
ftext = 'def func(self, {args}clusterName=None):\n'.format(
args=''.join(arg.defstr() + ", " for arg in args))
for arg in args:
ftext += ' {arg} = _validate_{arg}({arg})\n'.format(arg=arg._name)
ftext += ' query = "{query}"'.format(query=query)
if args:
ftext += '.format({args})\n'.format(args=commas(fmtEq(arg._name) for arg in args))
ftext += '\n'
ftext += ' res = self("{method}", {multiCluster}, query, {json}, clusterName=clusterName)\n'.format(method=method, multiCluster=repr(self.multiCluster), json=None if json is None else 'json')
ftext += ' try:\n'
ftext += ' return returns(res)\n'
ftext += ' except InvalidArgumentError as e:\n'
ftext += ' if e.partial is not None:\n'
ftext += ' return e.partial\n'
ftext += ' else:\n'
ftext += ' raise\n'
globalz = dict(("_validate_{0}".format(arg._name), arg._type.handleVal) for arg in args)
globalz['InvalidArgumentError'] = InvalidArgumentError
globalz['returns'] = returns.handleVal
six.exec_(ftext, globalz)
func = globalz['func']
del globalz['func']
doc = "HTTP: {method} {path}\n\n".format(method=method, path=self.path)
if args:
doc += " Arguments:\n"
for arg in args:
doc += " {argName}: {argType}\n".format(argName=arg._name, argType=arg._type.name)
doc += "\n"
if returns is not None:
doc += " Returns: {res}\n".format(res=returns.name)
func.__doc__ = doc
func.spDoc = self.spDoc
return func
def GET(query, *args, **kwargs):
assert 'returns' in kwargs, 'GET requests must specify a return type'
return _API_METHOD('GET', kwargs.get('multiCluster', False), query, args, kwargs.get('json', None), kwargs['returns'])
def POST(query, *args, **kwargs):
return _API_METHOD('POST', kwargs.get('multiCluster', False), query, args, kwargs.get('json', None), kwargs.get('returns', ApiOk))
@JsonObject(ok=const(True), generation=longType, info=maybe(str))
class ApiOk(object):
'''
ok: Always returns true. If something goes wrong, an ApiError is returned instead.
generation: The cluster generation based on the number of configuration changes since the cluster was created.
info: May contain additional information about the request.
'''
@JsonObject(autoName=sp.maybe(sp.SnapshotName))
class ApiOkVolumeCreate(ApiOk):
'''
autoName: The name of the transient snapshot used during the creation of the volume.
'''
@JsonObject(remoteId=sp.maybe(sp.GlobalVolumeId))
class ApiOkVolumeBackup(ApiOkVolumeCreate):
'''
remoteId: The globally unique id of the backup
'''
@JsonObject(backups={sp.VolumeName: sp.VolumesGroupBackupSingle})
class ApiOkVolumesGroupBackup(ApiOk):
'''
backups: The mapping of volume names to backup id.
'''
@JsonObject(
autoName=sp.maybe(sp.SnapshotName),
snapshotGlobalId=sp.maybe(sp.GlobalVolumeId),
snapshotVisibleVolumeId=sp.maybe(sp.longType)
)
class ApiOkSnapshotCreate(ApiOk):
'''
autoName: The name of the transient snapshot used during the creation of the volume.
snapshotGlobalId: The global snapshot identifier.
snapshotVisibleVolumeId: The ID by which the volume/snapshot was created.
'''
class ApiError(Exception):
def __init__(self, status, json):
super(ApiError, self).__init__()
self.status = status
self.json = json
self.name = json['error'].get('name', "<Missing error name>")
self.desc = json['error'].get('descr', "<Missing error description>")
self.transient = json['error'].get('transient', False)
def __str__(self):
return "{0}: {1}".format(self.name, self.desc)
class ApiMeta(type):
def spDocSection(cls, name, desc):
cls.spDoc.add_section(name, desc)
def __setattr__(cls, name, func):
cls.spDoc.add_call(func.spDoc)
func = func.compile()
func.__name__ = func.func_name = name
func.__module__ = __name__
type.__setattr__(cls, name, func)
def clear_none(data):
""" Recursively remove any NoneType values. """
if getattr(data, 'to_json', None) is not None:
data = data.to_json()
if isinstance(data, dict):
return dict([
(item[0], clear_none(item[1]))
for item in data.items()
if item[1] is not None
])
if isinstance(data, list) or isinstance(data, set):
return [clear_none(item) for item in data if item is not None]
return data
@six.add_metaclass(ApiMeta)
class Api(object):
'''StorPool API abstraction'''
spDoc = ApiDoc(
"""StorPool API Reference""",
"""
Copyright (c) 2014 - 2021 StorPool. All rights reserved.
This reference document describes the StorPool API version 19.01 and
the supported API calls.
""")
def __init__(self, host='127.0.0.1', port=80, auth='', timeout=300, transientRetries=5, transientSleep=lambda retry: 2 ** retry, source=None, multiCluster=False):
self._host = host
self._port = port
self._timeout = timeout
self._transientRetries = transientRetries
self._transientSleep = transientSleep
self._authHeader = {"Authorization": "Storpool v1:" + str(auth)}
self._multiCluster = multiCluster
if source is not None:
hinit = http.HTTPConnection.__init__
if getattr(inspect, 'getfullargspec', None) is None:
hargs = inspect.getargspec(hinit).args
else:
hargs = inspect.getfullargspec(hinit).args
if "source_address" not in hargs:
raise NotImplementedError(
"HTTP connection source not supported with "
"this Python version")
self._source = {"source_address": (source, 0)}
else:
self._source = {}
@classmethod
def fromConfig(klass, cfg=None, use_env=True, **kwargs):
if cfg is None:
cfg = SPConfig(use_env=use_env)
return klass(host=cfg['SP_API_HTTP_HOST'], port=int(cfg['SP_API_HTTP_PORT']), auth=cfg['SP_AUTH_TOKEN'], **kwargs)
def __call__(self, method, multiCluster, query, json=None, clusterName=None):
if json is not None:
json = js.dumps(clear_none(json))
def is_transient_error(err):
if isinstance(err, http.HTTPException):
return True
assert isinstance(err, sock.error)
return err.errno in (errno.ECONNREFUSED, errno.ECONNRESET)
retry, lastErr = 0, None
while True:
conn = None
try:
conn = http.HTTPConnection(self._host, self._port, timeout=self._timeout, **self._source)
path = _format_path(query, multiCluster and self._multiCluster, clusterName=clusterName)
if method == "GET" and json:
path += "?json=" + uquote.quote(json, safe='')
json = None
conn.request(method, path, json, self._authHeader)
response = conn.getresponse()
status, jres = response.status, js.load(response)
if status != http.OK or 'error' in jres:
err = ApiError(status, jres)
if self._transientRetries and err.transient:
lastErr = err
else:
raise err
else:
return jres['data']
except (sock.error, http.HTTPException) as err:
if self._transientRetries and is_transient_error(err):
lastErr = err
else:
raise
finally:
if conn:
conn.close()
if retry < self._transientRetries:
retrySleep = self._transientSleep(retry)
time.sleep(retrySleep)
retry += 1
else:
raise lastErr
def volumeDevLinkWait(self, volumeName, attach, pollTime=200 * msec, maxTime=60 * sec):
return pathPollWait(SP_DEV_PATH + volumeName, attach, True, pollTime, maxTime)
Api.spDocSection("General",
"""
The StorPool API can be used with any tool that can generate HTTP requests with the GET and POST methods.
The only requirement is to supply the Authorization header and, if required by the request, valid JSON data.
For each call there is an explanation of the HTTP request and response
and an example in raw format as it should be sent to the StorPool management service.
Here are two examples using curl using the GET and POST methods respectively and their counterparts as issued by the StorPool CLI:
```
curl -H "Authorization: Storpool v1:1556129910218014736" 192.168.42.208:81/ctrl/1.0/DisksList
storpool disk list
```
```
curl -d '{"addDisks":["1"]}' -H "Authorization: Storpool v1:1556129910218014736" 192.168.42.208:81/ctrl/1.0/PlacementGroupUpdate/hdd
storpool placementGroup hdd addDisk 1
```
Python programs may use the API by importing the Python StorPool bindings (use 'pypi install storpool' to install them):
```
# Use the default StorPool configuration settings
>>> from storpool import spapi
>>> api=spapi.Api.fromConfig()
# Use the default StorPool configuration settings, but do NOT allow environment variables to override them
>>> from storpool import spapi
>>> | |
# -*- coding: utf-8 -*-
import unittest
import numpy as np
import os
import contextlib
import re
from testfixtures import TempDirectory
from .. import fluoxas
from ..run import run_sequential
from ...io import nxfs
from ...io import xiaedf
from ...process import nxresult
from ...align import types
from ...utils import instance
from ...utils import listtools
from ...utils import cli
from ...materials import compoundfromname
from ...testutils.subtest import TestCase
from ...process.tests.xrfmap import XrfMapGenerator
from ...xrf.fit import OutputBuffer
logger = cli.getLogger(__name__, __file__)
class test_fluoxas(TestCase):
def setUp(self):
self.dir = TempDirectory()
def tearDown(self):
self.dir.cleanup()
@unittest.skipIf(compoundfromname.xraylib is None, "xraylib not installed")
def test_process(self):
self.xrfmap = XrfMapGenerator()
self.xrfmap.generate(self.dir.path, "test")
parameters = {
"alignmethod": (None,),
"cfgfileuse": (True, False),
"include_detectors": self.xrfmap.include_detectors,
"adddetectors": (True, False),
"addbeforefit": (True, False),
"quant": (True, False),
"dtcor": (True, False),
"stackdim": (0,),
"correctspectra": (True, False),
}
parameters2 = {
"alignmethod": (None,),
"cfgfileuse": (True,),
"include_detectors": [(1, (0, 2))],
"adddetectors": (False,),
"addbeforefit": (False,),
"quant": (True,),
"dtcor": (True,),
"stackdim": (0,),
"correctspectra": (False,),
}
self.run_subtests(parameters, self._process)
def _process(
self,
alignmethod=None,
cfgfileuse=None,
include_detectors=None,
adddetectors=None,
addbeforefit=None,
quant=None,
dtcor=None,
stackdim=None,
correctspectra=None,
):
if not cfgfileuse and alignmethod is not None:
return
parameters = {}
pymcaparams = {}
prenormparams = {}
postnormparams = {}
alignparams = {}
cropparams = {}
replaceparams = {}
commonparams = {}
parameters["pymca"] = pymcaparams
parameters["prealignnormalize"] = prenormparams
parameters["postalignnormalize"] = postnormparams
parameters["align"] = alignparams
parameters["crop"] = cropparams
parameters["replacenan"] = replaceparams
parameters["common"] = commonparams
commonparams["instrument"] = self.xrfmap.instrument
commonparams["stackdim"] = stackdim
nmaps, nlines, nspec, nchan, ndet = self.xrfmap.data.shape
if include_detectors or include_detectors == 0:
incdets_explicite = include_detectors
else:
incdets_explicite = tuple(range(ndet))
alldetectors = tuple(listtools.flatten(incdets_explicite))
adddetectorgroups = any(
len(instance.asarray(dets)) > 1
for dets in instance.asarray(incdets_explicite)
)
adddects_explicite = adddetectors and len(alldetectors) > 1
addspectra = (adddects_explicite or adddetectorgroups) and addbeforefit
fluxnormbefore = quant and correctspectra
dtcorbefore = dtcor and (correctspectra or addspectra)
newspectra = addspectra or fluxnormbefore or dtcorbefore
if addspectra:
if adddetectorgroups:
seldetectors = [
tuple(instance.asarray(dets).tolist())
for dets in instance.asarray(incdets_explicite)
]
else:
seldetectors = [alldetectors]
else:
seldetectors = [(det,) for det in alldetectors]
if cfgfileuse:
cfgfiles = [self.xrfmap.xrfspectra[k]["cfgfile"] for k in seldetectors]
else:
cfgfiles = None
if quant:
geomparams = self.xrfmap.taskparams_geometry(seldetectors)
parameters["geometry"] = geomparams
pymcaparams.update(self.xrfmap.taskparams_pymca(seldetectors))
prealignnormcounter = None
else:
geomparams = {}
prealignnormcounter = "arr_norm"
alignreference = None
fitlabels = set(self.fitlabels(quant=quant))
fitlabelsfile = fitlabels
detcounterlabels = {"xmap_icr", "xmap_ocr", "xmap_x1c", "xmap_x2c"}
counterlabels = {"arr_iodet", "arr_idet", "arr_norm"}
calclabels = {
"calc_transmission",
"calc_absorbance",
"calc_flux0",
"calc_fluxt",
}
for label in fitlabels:
if not "Scatter" in label:
alignreference = label
break
refimageindex = 0
usealign = alignmethod is not None and alignreference is not None
# Data
expectedgroups_data = []
if addspectra:
if adddetectorgroups:
expectedgroups_data = {
"S{:d}".format(i + 1) for i in range(len(incdets_explicite))
}
else:
if len(alldetectors) == 1:
expectedgroups_data = {"{:02d}".format(alldetectors[0])}
else:
expectedgroups_data = {"S1"}
else:
expectedgroups_data = {
"{:02d}".format(i) for i in list(listtools.flatten(incdets_explicite))
}
# Final groups
if adddects_explicite:
if adddetectorgroups:
expectedgroups_result = ["S{:d}".format(len(incdets_explicite) + 1)]
else:
expectedgroups_result = ["S1"]
elif adddetectorgroups:
expectedgroups_result = [
"S{:d}".format(i + 1) for i in range(len(incdets_explicite))
]
else:
expectedgroups_result = ["{:02d}".format(i) for i in alldetectors]
if alignreference:
alignreference = "/detector{}/{}".format(
expectedgroups_result[0], alignreference
)
expectedgroups_result = ["counters"] + [
"detector" + det for det in expectedgroups_result
]
expectedgroups_result = set(expectedgroups_result)
# Processes
expected_nxprocess = ["pymca.1"]
if prealignnormcounter is not None:
expected_nxprocess.append("normalize.1")
if usealign:
expected_nxprocess.append("align.1")
expected_nxprocess.append("crop.1")
with self._destpath_context() as destpath:
radix = self.xrfmap.radix
commonparams["outputparent"] = os.path.join(
destpath.path, radix + ".h5::/" + radix
)
geomparams["outputparent"] = os.path.join(
destpath.path, radix + ".h5::/" + radix
)
pymcaparams["sourcepaths"] = [self.xrfmap.path]
pymcaparams["scannames"] = [radix]
pymcaparams["scannumbers"] = [self.xrfmap.scannumbers]
pymcaparams["pymcacfg"] = cfgfiles
pymcaparams["dtcor"] = dtcor
pymcaparams["adddetectors"] = adddetectors
pymcaparams["addbeforefit"] = addbeforefit
pymcaparams["correctspectra"] = correctspectra
pymcaparams["include_detectors"] = include_detectors
pymcaparams["counters"] = ["arr_norm"]
pymcaparams["fastfitting"] = True
prenormparams["counter"] = prealignnormcounter
alignparams["alignmethod"] = alignmethod
alignparams["reference"] = alignreference
alignparams["refimageindex"] = refimageindex
alignparams["plot"] = False
cropparams["crop"] = usealign
replaceparams["replacenan"] = False
for repeat in range(2):
tasks = fluoxas.tasks(**parameters)
if repeat:
for task in tasks:
self.assertTrue(task.done)
continue
else:
for task in tasks:
self.assertFalse(task.done)
run_sequential(tasks)
for task in tasks:
self.assertTrue(task.done)
nxprocess = tasks[-1].output
# Check generated spectra (files)
if newspectra:
corlabel = ""
if dtcorbefore:
corlabel = corlabel + "dt"
if fluxnormbefore:
corlabel = corlabel + "fl"
if corlabel:
radixout = "{}_{}cor".format(radix, corlabel)
else:
radixout = radix
if addspectra:
expected = [
"{}_xia{}_{:04d}_0000_{:04d}.edf".format(
radixout, det, mapnum, linenum
)
for det in expectedgroups_data
for mapnum in range(nmaps)
for linenum in range(nlines)
]
else:
expected = [
"{}_xia{}_{:04d}_0000_{:04d}.edf".format(
radixout, det, mapnum, linenum
)
for det in expectedgroups_data
for mapnum in range(nmaps)
for linenum in range(nlines)
]
xrfspectra_subdir = os.path.join(
"{}_pymca.1".format(radix), "xrfspectra"
)
destpath.compare(
sorted(expected),
path=xrfspectra_subdir,
files_only=True,
recursive=False,
)
else:
radixout = radix
# Check pymca fit output (files)
if cfgfileuse:
if addspectra:
expected = [
"{}_xia{}_{:04d}_0000_{}.edf".format(
radixout, det, mapnum, label
)
for det in expectedgroups_data
for mapnum in range(nmaps)
for label in fitlabelsfile
]
expected.extend(
[
"{}_xia{}_{:04d}_0000.cfg".format(
radixout, det, mapnum, label
)
for det in expectedgroups_data
for mapnum in range(nmaps)
]
)
else:
expected = [
"{}_xia{}_{:04d}_0000_{}.edf".format(
radixout, det, mapnum, label
)
for det in expectedgroups_data
for mapnum in range(nmaps)
for label in fitlabelsfile
]
expected.extend(
[
"{}_xia{}_{:04d}_0000.cfg".format(
radixout, det, mapnum, label
)
for det in expectedgroups_data
for mapnum in range(nmaps)
]
)
fitresults_subdir = os.path.join(
"{}_pymca.1".format(radix), "pymcaresults"
)
if OutputBuffer is None:
destpath.compare(
sorted(expected),
path=fitresults_subdir,
files_only=True,
recursive=False,
)
else:
expected = set(expected)
actual = set(
os.listdir(os.path.join(destpath.path, fitresults_subdir))
)
self.assertEqual(actual & expected, expected)
# Check top-level output directory (h5 files)
expected = []
if cfgfiles or newspectra:
expected.append("{}_pymca.1".format(radix))
h5file = "{}.h5".format(radix)
expected.append(h5file)
destpath.compare(sorted(expected), files_only=True, recursive=False)
# Check NXprocess groups
entry = nxprocess.nxentry()
for name in expected_nxprocess:
self.assertTrue(name in entry)
self.assertEqual(nxprocess.name, expected_nxprocess[-1])
# Check NXdata groups
groups, axes, stackdim = nxresult.regulargriddata(nxprocess)
self.assertEqual(set(groups.keys()), expectedgroups_result)
for group, signals in groups.items():
if group == "counters":
if quant:
expectedsubgroups = counterlabels | calclabels
else:
expectedsubgroups = counterlabels
else:
if cfgfileuse:
expectedsubgroups = detcounterlabels | fitlabels
else:
expectedsubgroups = detcounterlabels
self.assertEqual({sig.name for sig in signals}, expectedsubgroups)
# Check generated spectra (data)
if newspectra:
# Apply DT correction
if dtcorbefore:
data0 = (
self.xrfmap.stats[..., xiaedf.xiadata.STICR, :]
/ self.xrfmap.stats[..., xiaedf.xiadata.STOCR, :]
)
data0 = self.xrfmap.data * data0[..., np.newaxis, :]
else:
data0 = self.xrfmap.data.copy()
# Apply flux normalization
if fluxnormbefore:
data0 /= self.xrfmap.ctrs["arr_norm"][
..., np.newaxis, np.newaxis
]
# Add spectra
if addspectra:
if adddetectorgroups:
data0 = np.stack(
[
data0[..., instance.asarray(ind)].sum(axis=-1)
for ind in incdets_explicite
],
axis=-1,
)
else:
data0 = data0[..., alldetectors].sum(axis=-1)[
..., np.newaxis
]
else:
data0 = data0[..., tuple(sorted(alldetectors))]
# Saved spectra
stack = xiaedf.xiastack_radix(
os.path.join(destpath.path, xrfspectra_subdir), radixout
)
data2 = stack.data
# Check spectra are equal
np.testing.assert_allclose(data0, data2, rtol=1e-6)
# Check fit results
if cfgfileuse and dtcor:
for group, signals in groups.items():
if not group.isdetector:
continue
if group.issum:
if adddetectorgroups:
if group.number > len(incdets_explicite):
dets = tuple(sorted(alldetectors))
else:
dets = tuple(
instance.asarray(
incdets_explicite[group.number - 1]
).tolist()
)
else:
dets = tuple(sorted(alldetectors))
else:
dets = (group.number,)
logger.debug(
"Check fit result for sum of xrfdata {}".format(dets)
)
info = self.xrfmap.xrfspectra[dets]
for signal in signals:
if "xmap" in signal.name:
continue
dataset = signal.read()
if stackdim == 1:
grpdata = np.moveaxis(dataset, 1, 0)
elif stackdim == 2:
grpdata = np.moveaxis(dataset, 2, 0)
else:
grpdata = dataset[:]
self._assert_fitresult(signal.name, grpdata, info)
# repeat
# destpath context
def fitlabels(self, quant=False):
labels = []
if OutputBuffer is not None:
labels += ["Constant"]
labels += list(self.xrfmap.labels)
if quant:
labels += [
"w" + label
for label in labels
if "Scatter" not in label and label != "Constant"
]
labels = [label.replace("-", "_") for label in labels]
return labels
@contextlib.contextmanager
def _destpath_context(self):
destpath = TempDirectory()
yield destpath
destpath.cleanup()
def _assert_fitresult(self, grpname, grpdata, info):
if "Scatter" in grpname or "chisq" in grpname or "Constant" in grpname:
return
grpname = str(grpname).replace("_", "-")
m = re.match("Scatter-(Compton|Peak)([0-9]+)", grpname)
if m:
grpname = m.group(1)
if grpname == "Peak":
grpname = "Rayleigh"
values1 = [
peakareas[0][grpname][int(m.group(2))]
for peakareas in info["peakareas"]
]
values2 = [
peakareas[1][grpname][int(m.group(2))]
for peakareas in info["peakareas"]
]
else:
if grpname.startswith("w"):
grpname = grpname[1:]
values1 = [
massfractions[0][grpname] for massfractions in info["massfractions"]
]
values2 = [
massfractions[1][grpname] for massfractions in info["massfractions"]
]
else:
values1 = [peakareas[0][grpname] for peakareas in info["peakareas"]]
values2 = [peakareas[1][grpname] for peakareas in info["peakareas"]]
for data, v1, v2 in | |
train-test pair
"""
def __init__(self, data, labels, classifier, numTopVars,
covariate_detrend_params=None):
self.data = data
self.labels = labels
self.clf = classifier
self.numTopVars = numTopVars
self.covariate_detrend_params = covariate_detrend_params
def __call__(self, zipped_ranks_n_fp):
rankedVars, fp = zipped_ranks_n_fp
confMats = []
totalErrs = []
fitted_classifiers = []
predictions = []
cont_preds = []
for numVars in self.numTopVars:
if self.covariate_detrend_params:
self.covariate_detrend_params['rankedVars'] = rankedVars
self.covariate_detrend_params['numVars'] = numVars
classify_output = classify(self.data[:, rankedVars[:numVars]],
self.labels, fp, self.clf,
covariate_detrend_params=self.covariate_detrend_params)
confMats.append(classify_output[0])
totalErrs.append(classify_output[1])
fitted_classifiers.append(classify_output[2])
predictions.append(classify_output[3])
cont_preds.append(classify_output[4])
return confMats, totalErrs, fitted_classifiers, predictions, cont_preds
def get_score(data, labels, fold_pairs, name, model, param, numTopVars,
rank_per_fold=None, parallel=True, rand_iter=-1,
covariate_detrend_params=None,
provide_continuous_output=True):
"""
Function to get score for a classifier.
Parameters
----------
data: array_like
Data from which to derive score.
labels: array_like or list
Corresponding labels for each sample.
fold_pairs: list of pairs of array_like
A list of train/test indicies for each fold
dhjelm(Why can't we just use the KFold object?)
name: str
Name of classifier.
model: WRITEME
param: WRITEME
Parameters for the classifier.
parallel: bool
Whether to run folds in parallel. Default: True
Returns
-------
classifier: WRITEME
allConfMats: Confusion matrix for all folds and all variables sets and best performing parameter set
([numFolds, numVarSets])
"""
assert isinstance(name, str)
logging.info("Classifying %s" % name)
ksplit = len(fold_pairs)
# if name not in NAMES:
# raise ValueError("Classifier %s not supported. "
# "Did you enter it properly?" % name)
# Redefine the parameters to be used for RBF SVM (dependent on
# training data)
if "SGD" in name:
param["n_iter"] = [25] # [np.ceil(10**3 / len(fold_pairs[0][0]))]
classifier = get_classifier(name, model, param, rand_iter=rand_iter)
if name == "RBF SVM": #This doesn't use labels, but looks as ALL data
logging.info("RBF SVM requires some preprocessing."
"This may take a while")
#Euclidean distances between samples
dist = pdist(StandardScaler().fit(data), "euclidean").ravel()
#dist = pdist(RobustScaler().fit_transform(data), "euclidean").ravel()
#Estimates for sigma (10th, 50th and 90th percentile)
sigest = np.asarray(np.percentile(dist,[10,50,90]))
#Estimates for gamma (= -1/(2*sigma^2))
gamma = 1./(2*sigest**2)
#Set SVM parameters with these values
param = [{"kernel": ["rbf"],
"gamma": gamma.tolist(),
"C": np.logspace(-2,2,5).tolist()}]
# if name not in ["Decision Tree", "Naive Bayes"]:
if param:
if hasattr(classifier,'param_grid'):
# isinstance(classifier, GridSearchCV):
N_p = np.prod([len(l) for l in param.values()])
elif isinstance(classifier, RandomizedSearchCV):
N_p = classifier.n_iter
else:
N_p = 1
# is_cv = isinstance(classifier, GridSearchCV) or \
# isinstance(classifier, RandomizedSearchCV)
# print('Name: {}, ksplit: {}, N_p: {}'.format(name, ksplit, N_p))
if (not parallel) or \
(name == "Random Forest") or ("SGD" in name):
# or ksplit <= N_p:
logging.info("Attempting to use grid search...")
classifier.n_jobs = PROCESSORS
# classifier.pre_dispatch = 1 # np.floor(PROCESSORS/24)
allConfMats = []
allTotalErrs = []
allFittedClassifiers = []
allPredictions = []
allContPreds = []
for i, fold_pair in enumerate(fold_pairs):
confMats = []
totalErrs = []
fitted_classifiers = []
predictions = []
cont_preds = []
logging.info("Classifying a %s the %d-th out of %d folds..."
% (name, i+1, len(fold_pairs)))
if rank_per_fold is not None:
rankedVars = np.squeeze(rank_per_fold)[i]
else:
rankedVars = np.arange(data.shape[1])
for numVars in numTopVars:
logging.info('Classifying for top %i variables' % numVars)
if covariate_detrend_params:
covariate_detrend_params['rankedVars'] = rankedVars
covariate_detrend_params['numVars'] = numVars
classify_output = classify(data[:, rankedVars[:numVars]],
labels,
fold_pair,
classifier,
covariate_detrend_params=covariate_detrend_params)
confMat, totalErr, fitted_classifier, prediction, cont_pred\
= classify_output
confMats.append(confMat)
totalErrs.append(totalErr)
fitted_classifiers.append(fitted_classifier)
predictions.append(prediction)
cont_preds.append(cont_pred)
# recheck the structure of area and fScore variables
allConfMats.append(confMats)
allTotalErrs.append(totalErrs)
allFittedClassifiers.append(fitted_classifiers)
allPredictions.append(predictions)
allContPreds.append(cont_preds)
else:
classifier.n_jobs = PROCESSORS
logging.info("Multiprocessing folds for classifier {}.".format(name))
pool = Pool(processes=min(ksplit, PROCESSORS))
out_list = pool.map(per_split_classifier(data, labels, classifier,
numTopVars,
covariate_detrend_params=covariate_detrend_params),
zip(rank_per_fold, fold_pairs))
pool.close()
pool.join()
#allConfMats = [el[0] for el in out_list]
#allTotalErrs = [el[1] for el in out_list]
#allFittedClassifiers = [el[2] for el in out_list]
allConfMats, allTotalErrs, allFittedClassifiers, allPredictions, allContPreds\
= tuple(zip(*out_list))
return classifier, allConfMats, allTotalErrs, allFittedClassifiers, allPredictions, allContPreds
def get_classifier(name, model, param, rand_iter=-1):
"""
Returns the classifier for the model.
Parameters
----------
name: str
Classifier name.
model: WRITEME
param: WRITEME
data: array_like, optional
Returns
-------
WRITEME
"""
assert isinstance(name, str)
if param: # Do grid search only if parameter list is not empty
N_p = np.prod([len(l) for l in param.values()])
if (N_p <= rand_iter) or rand_iter<=0:
logging.info("Using grid search for %s" % name)
model = GridSearchCV(model, param, cv=5, scoring="accuracy",
n_jobs=PROCESSORS)
else:
logging.info("Using random search for %s" % name)
model = RandomizedSearchCV(model, param, cv=5, scoring="accuracy",
n_jobs=PROCESSORS, n_iter=rand_iter)
else:
logging.info("Not using grid search for %s" % name)
return model
def classify(data, labels, train_test_idx, classifier=None,
covariate_detrend_params=None):
"""
Classifies given a fold and a model.
Parameters
----------
data: array_like
2d matrix of observations vs variables
labels: list or array_like
1d vector of labels for each data observation
(train_idx, test_idx) : list
set of indices for splitting data into train and test
classifier: sklearn classifier object
initialized classifier with "fit" and "predict_proba" methods.
Returns
-------
WRITEME
"""
assert classifier is not None, "Why would you pass not classifier?"
train_idx, test_idx = train_test_idx
# Perform detrending:
if covariate_detrend_params:
detrender = TON_tools.TON_feature_detrender(**covariate_detrend_params)
detrender.fit(train_idx, test_idx) # test_idx provided to be EPXLICITLY removed
# from the fitting procedure
clean_data = detrender.transform(np.union1d(train_idx, test_idx))
else:
clean_data = data
# Data scaling based on training set
scaler = SupervisedStdScaler() #SupervisedRobustScaler() # #
# scaler = StandardScaler()
scaler.fit(clean_data[train_idx,:], labels[train_idx], label=-1)
scaler.fit(clean_data[train_idx, :], labels[train_idx])
data_train = scaler.transform(clean_data[train_idx, :])
data_test = scaler.transform(clean_data[test_idx, :])
#from IPython.terminal.debugger import TerminalPdb; TerminalPdb().set_trace()
try:
classifier.fit(data_train, labels[train_idx])
predictions = classifier.predict(data_test)
confMat = confusion_matrix(labels[test_idx],
predictions)
cont_prediction = np.nan * np.zeros(predictions.shape)
try:
cont_prediction = classifier.predict_proba(data_test)
except AttributeError:
try:
cont_prediction = classifier.decision_function(data_test)
except AttributeError:
pass
if len(cont_prediction.shape) > 1:
cont_prediction = cont_prediction[:,-1]
if confMat.shape == (1, 1):
if all(labels[test_idx] == -1):
confMat = np.array([[confMat[0], 0], [0, 0]],
dtype=confMat.dtype)
else:
confMat = np.array([[0, 0], [0, confMat[0]]],
dtype=confMat.dtype)
confMatRate = confMat / np.tile(np.sum(confMat, axis=1).
astype('float'), (2, 1)).transpose()
totalErr = (confMat[0, 1] + confMat[1, 0]) / float(confMat.sum())
# if type(classifier) not in [type(None), DummyClassifier]:
if hasattr(classifier, 'param_grid'):
# isinstance(classifier, GridSearchCV) or \
# isinstance(classifier, RandomizedSearchCV):
fitted_model = classifier.best_estimator_
else:
fitted_model = copy.copy(classifier)
return confMatRate, totalErr, fitted_model, predictions, cont_prediction
except np.linalg.linalg.LinAlgError:
return np.array([[np.nan, np.nan], [np.nan, np.nan]]), np.nan, None, np.nan, np.nan
def load_data(data_file, data_pattern='*.mat'):
"""
Loads the data from multiple sources if provided.
Parameters
----------
data_file: str
data_pattern: str
Returns
-------
data: array_like
"""
dataMat = scipy.io.loadmat(data_file, mat_dtype = True)
data = dataMat['data']
logging.info("Data loading complete. Shape is %r" % (data.shape,))
return data[:, :-1], data[:, -1], data_file
def load_labels(source_dir, label_pattern):
"""
Function to load labels file.
Parameters
----------
source_dir: str
Source directory of labels
label_pattern: str
unix regex for label files.
Returns
-------
labels: array_like
A numpy vector of the labels.
"""
logging.info("Loading labels from %s with pattern %s"
% (source_dir, label_pattern))
label_files = glob(path.join(source_dir, label_pattern))
if len(label_files) == 0:
raise ValueError("No label files found with pattern %s"
% label_pattern)
if len(label_files) > 1:
raise ValueError("Only one label file supported ATM.")
labels = np.load(label_files[0]).flatten()
logging.info("Label loading complete. Shape is %r" % (labels.shape,))
return labels
def load_subject_list(data_file, source='matlab'):
"""
provides the lsit of subjects
Parameters
----------
data_file: filename
source: 'matlab' or 'python'
Returns
-------
subject_list: list fo sujects in mat file
"""
zz = loadmat(data_file)['fmri_subjects']
if source == 'matlab':
subject_list = np.array([s[0][0] for s in zz])
elif source == 'python':
try:
tt = isinstance(zz[0], (str, unicode))
except NameError:
tt = isinstance(zz[0], str)
if tt:
subject_list = zz
else:
subject_list = np.array([str(s[0]) for s in zz[0]])
subject_list = np.array([s.replace(' ','') for s in subject_list])
return subject_list
def save_classifier_results(classifier_name, out_dir, allConfMats,
allTotalErrs):
"""
saves the classifier results including TN, FN and total error.
"""
# convert confusion matrix and total errors into numpy array
tmpAllConfMats = np.array(allConfMats)
tmpAllTotalErrs = np.array(allTotalErrs)
# initialize mean and std variables
TN_means = np.zeros(tmpAllConfMats.shape[1])
TN_stds = np.zeros(tmpAllConfMats.shape[1])
FN_means = np.zeros(tmpAllConfMats.shape[1])
FN_stds = np.zeros(tmpAllConfMats.shape[1])
total_means = np.zeros(tmpAllConfMats.shape[1])
total_stds = np.zeros(tmpAllConfMats.shape[1])
for j in range(tmpAllConfMats.shape[1]):
tmpData = tmpAllConfMats[:, j, 0, 0]
TN_means[j] = np.mean(tmpData[np.invert(np.isnan(tmpData))])
TN_stds[j] = np.std(tmpData[np.invert(np.isnan(tmpData))])
tmpData = tmpAllConfMats[:, j, 1, 0]
FN_means[j] = np.mean(tmpData[np.invert(np.isnan(tmpData))])
FN_stds[j] = np.std(tmpData[np.invert(np.isnan(tmpData))])
tmpData = tmpAllTotalErrs[:, j]
# Compute mean of std of non-Nan values
total_means[j] = np.mean(tmpData[np.invert(np.isnan(tmpData))])
total_stds[j] = np.std(tmpData[np.invert(np.isnan(tmpData))])
with open(path.join(out_dir, classifier_name + '_errors.mat'), 'wb') as f:
scipy.io.savemat(f, {'TN_means': TN_means,
'TN_stds': TN_stds,
'FN_means': FN_means,
'FN_stds': FN_stds,
'total_means': total_means,
'total_stds': total_stds,
})
def save_classifier_predictions_per_sample(classifier_name, out_dir,
predictions, cont_predictions,
fold_pairs, labels,
subjects_per_run, ktop=-1):
'''
Construction of a data frame with labels, calssifier predictions | |
author=article.authors,
url=url_external('main.article_detail_v3',
url_seg=journal.url_segment,
article_pid_v3=article.aid,
lang=article_lang),
updated=journal.updated,
published=journal.created)
return feed.get_response()
@main.route("/journal/<string:url_seg>/about/", methods=['GET'])
@cache.cached(key_prefix=cache_key_with_lang)
def about_journal(url_seg):
language = session.get('lang', get_locale())
journal = controllers.get_journal_by_url_seg(url_seg)
if not journal:
abort(404, _('Periódico não encontrado'))
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
latest_issue = utils.fix_journal_last_issue(journal)
if latest_issue:
latest_issue_legend = descriptive_short_format(
title=journal.title, short_title=journal.short_title,
pubdate=str(latest_issue.year), volume=latest_issue.volume, number=latest_issue.number,
suppl=latest_issue.suppl_text, language=language[:2].lower())
else:
latest_issue_legend = None
page = controllers.get_page_by_journal_acron_lang(journal.acronym, language)
context = {
'journal': journal,
'latest_issue_legend': latest_issue_legend,
'last_issue': latest_issue,
'journal_study_areas': [
STUDY_AREAS.get(study_area.upper()) for study_area in journal.study_areas
],
}
if page:
context['content'] = page.content
if page.updated_at:
context['page_updated_at'] = page.updated_at
return render_template("journal/about.html", **context)
@main.route("/journals/search/alpha/ajax/", methods=['GET', ])
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def journals_search_alpha_ajax():
if not request.is_xhr:
abort(400, _('Requisição inválida. Deve ser por ajax'))
query = request.args.get('query', '', type=str)
query_filter = request.args.get('query_filter', '', type=str)
page = request.args.get('page', 1, type=int)
lang = get_lang_from_session()[:2].lower()
response_data = controllers.get_alpha_list_from_paginated_journals(
title_query=query,
query_filter=query_filter,
page=page,
lang=lang)
return jsonify(response_data)
@main.route("/journals/search/group/by/filter/ajax/", methods=['GET'])
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def journals_search_by_theme_ajax():
if not request.is_xhr:
abort(400, _('Requisição inválida. Deve ser por ajax'))
query = request.args.get('query', '', type=str)
query_filter = request.args.get('query_filter', '', type=str)
filter = request.args.get('filter', 'areas', type=str)
lang = get_lang_from_session()[:2].lower()
if filter == 'areas':
objects = controllers.get_journals_grouped_by('study_areas', query, query_filter=query_filter, lang=lang)
elif filter == 'wos':
objects = controllers.get_journals_grouped_by('subject_categories', query, query_filter=query_filter, lang=lang)
elif filter == 'publisher':
objects = controllers.get_journals_grouped_by('publisher_name', query, query_filter=query_filter, lang=lang)
else:
return jsonify({
'error': 401,
'message': _('Parámetro "filter" é inválido, deve ser "areas", "wos" ou "publisher".')
})
return jsonify(objects)
@main.route("/journals/download/<string:list_type>/<string:extension>/", methods=['GET', ])
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def download_journal_list(list_type, extension):
if extension.lower() not in ['csv', 'xls']:
abort(401, _('Parámetro "extension" é inválido, deve ser "csv" ou "xls".'))
elif list_type.lower() not in ['alpha', 'areas', 'wos', 'publisher']:
abort(401, _('Parámetro "list_type" é inválido, deve ser: "alpha", "areas", "wos" ou "publisher".'))
else:
if extension.lower() == 'xls':
mimetype = 'application/vnd.ms-excel'
else:
mimetype = 'text/csv'
query = request.args.get('query', '', type=str)
data = controllers.get_journal_generator_for_csv(list_type=list_type,
title_query=query,
extension=extension.lower())
timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
filename = 'journals_%s_%s.%s' % (list_type, timestamp, extension)
response = Response(data, mimetype=mimetype)
response.headers['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
@main.route("/<string:url_seg>/contact", methods=['POST'])
def contact(url_seg):
if not request.is_xhr:
abort(403, _('Requisição inválida, deve ser ajax.'))
if utils.is_recaptcha_valid(request):
form = forms.ContactForm(request.form)
journal = controllers.get_journal_by_url_seg(url_seg)
if not journal.enable_contact:
abort(403, _('Periódico não permite envio de email.'))
recipients = journal.editor_email
if form.validate():
sent, message = controllers.send_email_contact(recipients,
form.data['name'],
form.data['your_email'],
form.data['message'])
return jsonify({'sent': sent, 'message': str(message),
'fields': [key for key in form.data.keys()]})
else:
return jsonify({'sent': False, 'message': form.errors,
'fields': [key for key in form.data.keys()]})
else:
abort(400, _('Requisição inválida, captcha inválido.'))
@main.route("/form_contact/<string:url_seg>/", methods=['GET'])
def form_contact(url_seg):
journal = controllers.get_journal_by_url_seg(url_seg)
if not journal:
abort(404, _('Periódico não encontrado'))
context = {
'journal': journal
}
return render_template("journal/includes/contact_form.html", **context)
# ###################################Issue#######################################
@main.route('/grid/<string:url_seg>/')
def issue_grid_legacy(url_seg):
return redirect(url_for('main.issue_grid', url_seg=url_seg), 301)
@main.route('/j/<string:url_seg>/grid')
@cache.cached(key_prefix=cache_key_with_lang)
def issue_grid(url_seg):
journal = controllers.get_journal_by_url_seg(url_seg)
if not journal:
abort(404, _('Periódico não encontrado'))
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
# idioma da sessão
language = session.get('lang', get_locale())
# A ordenação padrão da função ``get_issues_by_jid``: "-year", "-volume", "-order"
issues_data = controllers.get_issues_for_grid_by_jid(journal.id, is_public=True)
latest_issue = issues_data['last_issue']
if latest_issue:
latest_issue_legend = descriptive_short_format(
title=journal.title, short_title=journal.short_title,
pubdate=str(latest_issue.year), volume=latest_issue.volume, number=latest_issue.number,
suppl=latest_issue.suppl_text, language=language[:2].lower())
else:
latest_issue_legend = None
context = {
'journal': journal,
'last_issue': issues_data['last_issue'],
'latest_issue_legend': latest_issue_legend,
'volume_issue': issues_data['volume_issue'],
'ahead': issues_data['ahead'],
'result_dict': issues_data['ordered_for_grid'],
'journal_study_areas': [
STUDY_AREAS.get(study_area.upper()) for study_area in journal.study_areas
],
}
return render_template("issue/grid.html", **context)
@main.route('/toc/<string:url_seg>/<string:url_seg_issue>/')
def issue_toc_legacy(url_seg, url_seg_issue):
if url_seg_issue and "ahead" in url_seg_issue:
return redirect(url_for('main.aop_toc', url_seg=url_seg), code=301)
return redirect(
url_for('main.issue_toc',
url_seg=url_seg,
url_seg_issue=url_seg_issue),
code=301)
@main.route('/j/<string:url_seg>/i/<string:url_seg_issue>/')
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def issue_toc(url_seg, url_seg_issue):
section_filter = None
goto = request.args.get("goto", None, type=str)
if goto not in ("previous", "next"):
goto = None
if goto in (None, "next") and "ahead" in url_seg_issue:
# redireciona para `aop_toc`
return redirect(url_for('main.aop_toc', url_seg=url_seg), code=301)
# idioma da sessão
language = session.get('lang', get_locale())
if current_app.config["FILTER_SECTION_ENABLE"]:
# seção dos documentos, se selecionada
section_filter = request.args.get('section', '', type=str).upper()
# obtém o issue
issue = controllers.get_issue_by_url_seg(url_seg, url_seg_issue)
if not issue:
abort(404, _('Número não encontrado'))
if not issue.is_public:
abort(404, ISSUE_UNPUBLISH + _(issue.unpublish_reason))
# obtém o journal
journal = issue.journal
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
# completa url_segment do last_issue
utils.fix_journal_last_issue(journal)
# goto_next_or_previous_issue (redireciona)
goto_url = goto_next_or_previous_issue(
issue, request.args.get('goto', None, type=str))
if goto_url:
return redirect(goto_url, code=301)
# obtém os documentos
articles = controllers.get_articles_by_iid(issue.iid, is_public=True)
if articles:
# obtém TODAS as seções dos documentos deste sumário
sections = sorted({a.section.upper() for a in articles if a.section})
else:
# obtém as seções dos documentos deste sumário
sections = []
if current_app.config["FILTER_SECTION_ENABLE"] and section_filter != '':
# obtém somente os documentos da seção selecionada
articles = [a for a in articles if a.section.upper() == section_filter]
# obtém PDF e TEXT de cada documento
has_math_content = False
for article in articles:
article_text_languages = [doc['lang'] for doc in article.htmls]
article_pdf_languages = [(doc['lang'], doc['url']) for doc in article.pdfs]
setattr(article, "article_text_languages", article_text_languages)
setattr(article, "article_pdf_languages", article_pdf_languages)
if 'mml:' in article.title:
has_math_content = True
# obtém a legenda bibliográfica
issue_bibliographic_strip = descriptive_short_format(
title=journal.title, short_title=journal.short_title,
pubdate=str(issue.year), volume=issue.volume, number=issue.number,
suppl=issue.suppl_text, language=language[:2].lower())
context = {
'this_page_url': url_for(
'main.issue_toc',
url_seg=url_seg,
url_seg_issue=url_seg_issue),
'has_math_content': has_math_content,
'journal': journal,
'issue': issue,
'issue_bibliographic_strip': issue_bibliographic_strip,
'articles': articles,
'sections': sections,
'section_filter': section_filter,
'journal_study_areas': [
STUDY_AREAS.get(study_area.upper()) for study_area in journal.study_areas
],
'last_issue': journal.last_issue
}
return render_template("issue/toc.html", **context)
def goto_next_or_previous_issue(current_issue, goto_param):
if goto_param not in ["next", "previous"]:
return None
all_issues = list(
controllers.get_issues_by_jid(current_issue.journal.id, is_public=True))
if goto_param == "next":
selected_issue = utils.get_next_issue(all_issues, current_issue)
elif goto_param == "previous":
selected_issue = utils.get_prev_issue(all_issues, current_issue)
if selected_issue in (None, current_issue):
# nao precisa redirecionar
return None
try:
url_seg_issue = selected_issue.url_segment
except AttributeError:
return None
else:
return url_for('main.issue_toc',
url_seg=selected_issue.journal.url_segment,
url_seg_issue=url_seg_issue)
def get_next_or_previous_issue(current_issue, goto_param):
if goto_param not in ["next", "previous"]:
return current_issue
all_issues = list(
controllers.get_issues_by_jid(current_issue.journal.id, is_public=True))
if goto_param == "next":
return utils.get_next_issue(all_issues, current_issue)
return utils.get_prev_issue(all_issues, current_issue)
@main.route('/j/<string:url_seg>/aop')
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def aop_toc(url_seg):
section_filter = request.args.get('section', '', type=str).upper()
aop_issues = controllers.get_aop_issues(url_seg) or []
if not aop_issues:
abort(404, _('Artigos ahead of print não encontrados'))
goto = request.args.get("goto", None, type=str)
if goto == "previous":
url = goto_next_or_previous_issue(aop_issues[-1], goto)
if url:
redirect(url, code=301)
journal = aop_issues[0].journal
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
utils.fix_journal_last_issue(journal)
articles = []
for aop_issue in aop_issues:
_articles = controllers.get_articles_by_iid(
aop_issue.iid, is_public=True)
if _articles:
articles.extend(_articles)
if not articles:
abort(404, _('Artigos ahead of print não encontrados'))
sections = sorted({a.section.upper() for a in articles if a.section})
if section_filter != '':
articles = [a for a in articles if a.section.upper() == section_filter]
for article in articles:
article_text_languages = [doc['lang'] for doc in article.htmls]
article_pdf_languages = [(doc['lang'], doc['url']) for doc in article.pdfs]
setattr(article, "article_text_languages", article_text_languages)
setattr(article, "article_pdf_languages", article_pdf_languages)
context = {
'this_page_url': url_for("main.aop_toc", url_seg=url_seg),
'journal': journal,
'issue': aop_issues[0],
'issue_bibliographic_strip': "ahead of print",
'articles': articles,
'sections': sections,
'section_filter': section_filter,
'journal_study_areas': [
STUDY_AREAS.get(study_area.upper())
for study_area in journal.study_areas
],
# o primeiro item da lista é o último número.
'last_issue': journal.last_issue
}
return render_template("issue/toc.html", **context)
@main.route('/feed/<string:url_seg>/<string:url_seg_issue>/')
@cache.cached(key_prefix=cache_key_with_lang)
def issue_feed(url_seg, url_seg_issue):
issue = controllers.get_issue_by_url_seg(url_seg, url_seg_issue)
if not issue:
abort(404, _('Número não encontrado'))
if not issue.is_public:
abort(404, ISSUE_UNPUBLISH + _(issue.unpublish_reason))
if not issue.journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(issue.journal.unpublish_reason))
journal = issue.journal
articles = controllers.get_articles_by_iid(issue.iid, is_public=True)
feed = AtomFeed(journal.title or "",
feed_url=request.url,
url=request.url_root,
subtitle=utils.get_label_issue(issue))
feed_language = session.get('lang', get_locale())
for article in articles:
# ######### TODO: Revisar #########
article_lang = feed_language
if feed_language not in article.languages:
article_lang = article.original_language
feed.add(article.title or 'Unknow title',
render_template("issue/feed_content.html", article=article),
content_type='html',
author=article.authors,
id=article.doi or article.pid,
url=url_external('main.article_detail_v3',
url_seg=journal.url_segment,
article_pid_v3=article.aid,
lang=article_lang),
updated=journal.updated,
published=journal.created)
return feed.get_response()
# ##################################Article######################################
@main.route('/article/<regex("S\d{4}-\d{3}[0-9xX][0-2][0-9]{3}\d{4}\d{5}"):pid>/')
@cache.cached(key_prefix=cache_key_with_lang)
def article_detail_pid(pid):
article = controllers.get_article_by_pid(pid)
if not article:
article = controllers.get_article_by_oap_pid(pid)
if not article:
abort(404, _('Artigo não encontrado'))
return redirect(url_for('main.article_detail_v3',
url_seg=article.journal.acronym,
article_pid_v3=article.aid))
def render_html_from_xml(article, lang, gs_abstract=False):
logger.debug("Get XML: %s", article.xml)
if current_app.config["SSM_XML_URL_REWRITE"]:
result = fetch_data(use_ssm_url(article.xml))
else:
result = fetch_data(article.xml)
xml = etree.parse(BytesIO(result))
generator = HTMLGenerator.parse(
xml, valid_only=False, gs_abstract=gs_abstract, output_style="website")
return generator.generate(lang), generator.languages
def render_html_from_html(article, lang):
html_url = [html
for html in article.htmls
if html['lang'] == lang]
try:
html_url = html_url[0]['url']
except IndexError:
raise ValueError('Artigo não encontrado') from None
result = fetch_data(use_ssm_url(html_url))
html = result.decode('utf8')
text_languages = [html['lang'] for html in article.htmls]
return html, text_languages
def render_html_abstract(article, lang):
abstract_text = ''
for abstract in article.abstracts:
if abstract['language'] == lang:
abstract_text = abstract["text"]
break
return abstract_text, article.abstract_languages
def render_html(article, lang, gs_abstract=False):
if article.xml:
return render_html_from_xml(article, lang, gs_abstract)
elif article.htmls:
if gs_abstract:
return render_html_abstract(article, lang)
return render_html_from_html(article, lang)
else:
# TODO: Corrigir os teste que esperam ter o atributo ``htmls``
# O ideal seria levantar um ValueError.
return '', []
# TODO: Remover assim que o valor Article.xml estiver consistente na base de
# dados
def use_ssm_url(url):
"""Normaliza a string `url` de acordo com os valores das diretivas de
configuração OPAC_SSM_SCHEME, OPAC_SSM_DOMAIN e OPAC_SSM_PORT.
A | |
<reponame>laurens-in/magenta
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SketchRNN RNN definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.contrib import rnn as contrib_rnn
def orthogonal(shape):
"""Orthogonal initilaizer."""
flat_shape = (shape[0], np.prod(shape[1:]))
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v
return q.reshape(shape)
def orthogonal_initializer(scale=1.0):
"""Orthogonal initializer."""
def _initializer(shape, dtype=tf.float32,
partition_info=None): # pylint: disable=unused-argument
return tf.constant(orthogonal(shape) * scale, dtype)
return _initializer
def lstm_ortho_initializer(scale=1.0):
"""LSTM orthogonal initializer."""
def _initializer(shape, dtype=tf.float32,
partition_info=None): # pylint: disable=unused-argument
size_x = shape[0]
size_h = shape[1] // 4 # assumes lstm.
t = np.zeros(shape)
t[:, :size_h] = orthogonal([size_x, size_h]) * scale
t[:, size_h:size_h * 2] = orthogonal([size_x, size_h]) * scale
t[:, size_h * 2:size_h * 3] = orthogonal([size_x, size_h]) * scale
t[:, size_h * 3:] = orthogonal([size_x, size_h]) * scale
return tf.constant(t, dtype)
return _initializer
class LSTMCell(contrib_rnn.RNNCell):
"""Vanilla LSTM cell.
Uses ortho initializer, and also recurrent dropout without memory loss
(https://arxiv.org/abs/1603.05118)
"""
def __init__(self,
num_units,
forget_bias=1.0,
use_recurrent_dropout=False,
dropout_keep_prob=0.9):
self.num_units = num_units
self.forget_bias = forget_bias
self.use_recurrent_dropout = use_recurrent_dropout
self.dropout_keep_prob = dropout_keep_prob
@property
def state_size(self):
return 2 * self.num_units
@property
def output_size(self):
return self.num_units
def get_output(self, state):
unused_c, h = tf.split(state, 2, 1)
return h
def __call__(self, x, state, scope=None):
with tf.variable_scope(scope or type(self).__name__):
c, h = tf.split(state, 2, 1)
x_size = x.get_shape().as_list()[1]
w_init = None # uniform
h_init = lstm_ortho_initializer(1.0)
# Keep W_xh and W_hh separate here as well to use different init methods.
w_xh = tf.get_variable(
'W_xh', [x_size, 4 * self.num_units], initializer=w_init)
w_hh = tf.get_variable(
'W_hh', [self.num_units, 4 * self.num_units], initializer=h_init)
bias = tf.get_variable(
'bias', [4 * self.num_units],
initializer=tf.constant_initializer(0.0))
concat = tf.concat([x, h], 1)
w_full = tf.concat([w_xh, w_hh], 0)
hidden = tf.matmul(concat, w_full) + bias
i, j, f, o = tf.split(hidden, 4, 1)
if self.use_recurrent_dropout:
g = tf.nn.dropout(tf.tanh(j), self.dropout_keep_prob)
else:
g = tf.tanh(j)
new_c = c * tf.sigmoid(f + self.forget_bias) + tf.sigmoid(i) * g
new_h = tf.tanh(new_c) * tf.sigmoid(o)
return new_h, tf.concat([new_c, new_h], 1) # fuk tuples.
def layer_norm_all(h,
batch_size,
base,
num_units,
scope='layer_norm',
reuse=False,
gamma_start=1.0,
epsilon=1e-3,
use_bias=True):
"""Layer Norm (faster version, but not using defun)."""
# Performs layer norm on multiple base at once (ie, i, g, j, o for lstm)
# Reshapes h in to perform layer norm in parallel
h_reshape = tf.reshape(h, [batch_size, base, num_units])
mean = tf.reduce_mean(h_reshape, [2], keep_dims=True)
var = tf.reduce_mean(tf.square(h_reshape - mean), [2], keep_dims=True)
epsilon = tf.constant(epsilon)
rstd = tf.rsqrt(var + epsilon)
h_reshape = (h_reshape - mean) * rstd
# reshape back to original
h = tf.reshape(h_reshape, [batch_size, base * num_units])
with tf.variable_scope(scope):
if reuse:
tf.get_variable_scope().reuse_variables()
gamma = tf.get_variable(
'ln_gamma', [4 * num_units],
initializer=tf.constant_initializer(gamma_start))
if use_bias:
beta = tf.get_variable(
'ln_beta', [4 * num_units], initializer=tf.constant_initializer(0.0))
if use_bias:
return gamma * h + beta
return gamma * h
def layer_norm(x,
num_units,
scope='layer_norm',
reuse=False,
gamma_start=1.0,
epsilon=1e-3,
use_bias=True):
"""Calculate layer norm."""
axes = [1]
mean = tf.reduce_mean(x, axes, keep_dims=True)
x_shifted = x - mean
var = tf.reduce_mean(tf.square(x_shifted), axes, keep_dims=True)
inv_std = tf.rsqrt(var + epsilon)
with tf.variable_scope(scope):
if reuse:
tf.get_variable_scope().reuse_variables()
gamma = tf.get_variable(
'ln_gamma', [num_units],
initializer=tf.constant_initializer(gamma_start))
if use_bias:
beta = tf.get_variable(
'ln_beta', [num_units], initializer=tf.constant_initializer(0.0))
output = gamma * (x_shifted) * inv_std
if use_bias:
output += beta
return output
def raw_layer_norm(x, epsilon=1e-3):
axes = [1]
mean = tf.reduce_mean(x, axes, keep_dims=True)
std = tf.sqrt(
tf.reduce_mean(tf.square(x - mean), axes, keep_dims=True) + epsilon)
output = (x - mean) / (std)
return output
def super_linear(x,
output_size,
scope=None,
reuse=False,
init_w='ortho',
weight_start=0.0,
use_bias=True,
bias_start=0.0,
input_size=None):
"""Performs linear operation. Uses ortho init defined earlier."""
shape = x.get_shape().as_list()
with tf.variable_scope(scope or 'linear'):
if reuse:
tf.get_variable_scope().reuse_variables()
w_init = None # uniform
if input_size is None:
x_size = shape[1]
else:
x_size = input_size
if init_w == 'zeros':
w_init = tf.constant_initializer(0.0)
elif init_w == 'constant':
w_init = tf.constant_initializer(weight_start)
elif init_w == 'gaussian':
w_init = tf.random_normal_initializer(stddev=weight_start)
elif init_w == 'ortho':
w_init = lstm_ortho_initializer(1.0)
w = tf.get_variable(
'super_linear_w', [x_size, output_size], tf.float32, initializer=w_init)
if use_bias:
b = tf.get_variable(
'super_linear_b', [output_size],
tf.float32,
initializer=tf.constant_initializer(bias_start))
return tf.matmul(x, w) + b
return tf.matmul(x, w)
class LayerNormLSTMCell(contrib_rnn.RNNCell):
"""Layer-Norm, with Ortho Init. and Recurrent Dropout without Memory Loss.
https://arxiv.org/abs/1607.06450 - Layer Norm
https://arxiv.org/abs/1603.05118 - Recurrent Dropout without Memory Loss
"""
def __init__(self,
num_units,
forget_bias=1.0,
use_recurrent_dropout=False,
dropout_keep_prob=0.90):
"""Initialize the Layer Norm LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (default 1.0).
use_recurrent_dropout: Whether to use Recurrent Dropout (default False)
dropout_keep_prob: float, dropout keep probability (default 0.90)
"""
self.num_units = num_units
self.forget_bias = forget_bias
self.use_recurrent_dropout = use_recurrent_dropout
self.dropout_keep_prob = dropout_keep_prob
@property
def input_size(self):
return self.num_units
@property
def output_size(self):
return self.num_units
@property
def state_size(self):
return 2 * self.num_units
def get_output(self, state):
h, unused_c = tf.split(state, 2, 1)
return h
def __call__(self, x, state, timestep=0, scope=None):
with tf.variable_scope(scope or type(self).__name__):
h, c = tf.split(state, 2, 1)
h_size = self.num_units
x_size = x.get_shape().as_list()[1]
batch_size = x.get_shape().as_list()[0]
w_init = None # uniform
h_init = lstm_ortho_initializer(1.0)
w_xh = tf.get_variable(
'W_xh', [x_size, 4 * self.num_units], initializer=w_init)
w_hh = tf.get_variable(
'W_hh', [self.num_units, 4 * self.num_units], initializer=h_init)
concat = tf.concat([x, h], 1) # concat for speed.
w_full = tf.concat([w_xh, w_hh], 0)
concat = tf.matmul(concat, w_full) #+ bias # live life without garbage.
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
concat = layer_norm_all(concat, batch_size, 4, h_size, 'ln_all')
i, j, f, o = tf.split(concat, 4, 1)
if self.use_recurrent_dropout:
g = tf.nn.dropout(tf.tanh(j), self.dropout_keep_prob)
else:
g = tf.tanh(j)
new_c = c * tf.sigmoid(f + self.forget_bias) + tf.sigmoid(i) * g
new_h = tf.tanh(layer_norm(new_c, h_size, 'ln_c')) * tf.sigmoid(o)
return new_h, tf.concat([new_h, new_c], 1)
class HyperLSTMCell(contrib_rnn.RNNCell):
"""HyperLSTM with Ortho Init, Layer Norm, Recurrent Dropout, no Memory Loss.
https://arxiv.org/abs/1609.09106
http://blog.otoro.net/2016/09/28/hyper-networks/
"""
def __init__(self,
num_units,
forget_bias=1.0,
use_recurrent_dropout=False,
dropout_keep_prob=0.90,
use_layer_norm=True,
hyper_num_units=256,
hyper_embedding_size=32,
hyper_use_recurrent_dropout=False):
"""Initialize the Layer Norm HyperLSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (default 1.0).
use_recurrent_dropout: Whether to use Recurrent Dropout (default False)
dropout_keep_prob: float, dropout keep probability (default 0.90)
use_layer_norm: boolean. (default True)
Controls whether we use LayerNorm layers in main LSTM & HyperLSTM cell.
hyper_num_units: int, number of units in HyperLSTM cell.
(default is 128, recommend experimenting with 256 for larger tasks)
hyper_embedding_size: int, size of signals emitted from HyperLSTM cell.
(default is 16, recommend trying larger values for large datasets)
hyper_use_recurrent_dropout: boolean. (default False)
Controls whether HyperLSTM cell also uses recurrent dropout.
Recommend turning this on only if hyper_num_units becomes large (>= 512)
"""
self.num_units = num_units
self.forget_bias = forget_bias
self.use_recurrent_dropout = use_recurrent_dropout
self.dropout_keep_prob = dropout_keep_prob
self.use_layer_norm = use_layer_norm
self.hyper_num_units = hyper_num_units
self.hyper_embedding_size = hyper_embedding_size
self.hyper_use_recurrent_dropout = hyper_use_recurrent_dropout
self.total_num_units = self.num_units + self.hyper_num_units
if self.use_layer_norm:
cell_fn = LayerNormLSTMCell
else:
cell_fn = LSTMCell
self.hyper_cell = cell_fn(
hyper_num_units,
use_recurrent_dropout=hyper_use_recurrent_dropout,
dropout_keep_prob=dropout_keep_prob)
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self.num_units
@property
def state_size(self):
return 2 * self.total_num_units
def get_output(self, state):
total_h, unused_total_c = tf.split(state, 2, 1)
h = total_h[:, 0:self.num_units]
return h
def hyper_norm(self, layer, scope='hyper', use_bias=True):
num_units = self.num_units
embedding_size = self.hyper_embedding_size
# recurrent batch norm init trick (https://arxiv.org/abs/1603.09025).
init_gamma = 0.10 # cooijmans' da man.
with tf.variable_scope(scope):
zw = super_linear(
self.hyper_output,
embedding_size,
init_w='constant',
weight_start=0.00,
use_bias=True,
bias_start=1.0,
scope='zw')
alpha = super_linear(
zw,
num_units,
init_w='constant',
weight_start=init_gamma / embedding_size,
use_bias=False,
scope='alpha')
result = tf.multiply(alpha, layer)
if use_bias:
zb = super_linear(
self.hyper_output,
embedding_size,
init_w='gaussian',
weight_start=0.01,
use_bias=False,
bias_start=0.0,
scope='zb')
beta = super_linear(
zb,
num_units,
init_w='constant',
weight_start=0.00,
use_bias=False,
scope='beta')
result += beta
return result
def __call__(self, x, state, timestep=0, scope=None):
with tf.variable_scope(scope or type(self).__name__):
total_h, total_c = tf.split(state, 2, 1)
h = total_h[:, 0:self.num_units]
| |
"authority_provided_id": "172d813cde76caaf4b2cbfad97dafedd0ccff800",
},
{
"created": "2020-04-07T14:34:11.063Z",
"authority_provided_id": "204fca4d6ec2a631822068be6f84b61f0f598ce4",
},
{
"created": "2020-04-07T14:42:51.082Z",
"authority_provided_id": "6b6021fddce4da037ed0bd29e6f6067a99791f21",
},
{
"created": "2020-04-07T15:02:43.007Z",
"authority_provided_id": "fab41a581dd7e10bcfd5728a5295e450dc52b9a0",
},
{
"created": "2020-04-07T15:13:24.369Z",
"authority_provided_id": "3efa08e0b256b1deccf74eac7ea7656cf9af3d26",
},
{
"created": "2020-04-07T15:26:17.566Z",
"authority_provided_id": "077979634c4e292cd88863f7e2fdcf4c0ed0494d",
},
{
"created": "2020-04-07T15:32:59.865Z",
"authority_provided_id": "f81ffbc99a1b0826b8089dd3e8f2c0f9beb870f3",
},
{
"created": "2020-04-07T16:05:14.637Z",
"authority_provided_id": "671139ac30dd7bc02db73678fea528d168b76c09",
},
{
"created": "2020-04-07T16:28:13.283Z",
"authority_provided_id": "46b8f18a8ab795f8ef4d60c833d3f9372d0330eb",
},
{
"created": "2020-04-07T17:47:52.789Z",
"authority_provided_id": "17c53046de2c6250c9011a3bc406e887ad866f3d",
},
{
"created": "2020-04-07T18:34:26.551Z",
"authority_provided_id": "e55c58a3b7ab3d828fda7bd6bb323673141bad29",
},
{
"created": "2020-04-07T18:36:05.501Z",
"authority_provided_id": "96382b24d417dc0ca51c8450e7e2f74663a747c8",
},
{
"created": "2020-04-07T18:56:22.045Z",
"authority_provided_id": "b6d91ab3b384db0f85cc8062aa7b8b8d29d12373",
},
{
"created": "2020-04-07T19:04:22.733Z",
"authority_provided_id": "5faea556f78438f3a7baef4db17265593ab8d587",
},
{
"created": "2020-04-07T19:56:22.193Z",
"authority_provided_id": "14367455de335069487dbb6bbd9f0822868092d1",
},
{
"created": "2020-04-07T19:14:57.041Z",
"authority_provided_id": "e15ec557928be80ea9644c93b5b2da885e83d357",
},
{
"created": "2020-04-03T18:37:11.305Z",
"authority_provided_id": "8850ad83696f550939a19e23dde74aa59f3e3777",
},
{
"created": "2020-04-07T23:35:56.511Z",
"authority_provided_id": "181e85942f4dacbf9b9388cfbce08584b2ec4a50",
},
{
"created": "2020-04-08T00:05:20.997Z",
"authority_provided_id": "535e6238a4e93841d85e95d3c505e96ac3d9b558",
},
{
"created": "2020-04-08T13:03:12.753Z",
"authority_provided_id": "c7a3541ccae652caa7b20989691e63ef0977ae1a",
},
{
"created": "2020-04-07T19:30:47.757Z",
"authority_provided_id": "754083365786432ea6c19875725272bb3020fad4",
},
{
"created": "2020-04-07T19:39:27.442Z",
"authority_provided_id": "5a2444d76b6cb55cb28e4c8d235d91cddc6dfb85",
},
{
"created": "2020-04-07T20:11:14.017Z",
"authority_provided_id": "eaa4f2c67a19a82fd2d9d086fc5adc0d3dc3bc45",
},
{
"created": "2020-04-07T20:18:54.657Z",
"authority_provided_id": "dbf6e48c78e71025f6313ea2a8b38c41f7acc095",
},
{
"created": "2019-05-29T13:22:45.157Z",
"authority_provided_id": "49aa1b25794c61429ec477c745a6825e4689935c",
},
{
"created": "2020-04-07T20:30:51.003Z",
"authority_provided_id": "409d2aa1772466e0994057c8716738674ce4f464",
},
{
"created": "2020-04-07T20:41:49.565Z",
"authority_provided_id": "5e56d82a8977bf31c18d992c840903b42f2d2892",
},
{
"created": "2019-10-10T14:59:50.887Z",
"authority_provided_id": "c48ef568494870fa3401d47985b64e5f1474df59",
},
{
"created": "2020-04-07T21:02:00.892Z",
"authority_provided_id": "7a542bffba26db14cad78065825920f05998dd20",
},
{
"created": "2020-04-08T13:56:17.491Z",
"authority_provided_id": "eff5291296da03bfe8ee931055bff0f1ac6a92d0",
},
{
"created": "2020-04-07T22:39:29.789Z",
"authority_provided_id": "83a69170f5b05bf0135893f9d3190356a415410c",
},
{
"created": "2020-04-08T08:50:30.236Z",
"authority_provided_id": "1f094a81cfc5071994f672da942c85654a6242e6",
},
{
"created": "2020-04-08T00:08:27.242Z",
"authority_provided_id": "da531836e85edcb322120970db092c4548a28bfc",
},
{
"created": "2020-04-08T13:08:03.258Z",
"authority_provided_id": "0f394b3c5bd140731380926f32541e65d2dfc4c0",
},
{
"created": "2020-04-08T00:19:02.238Z",
"authority_provided_id": "cc4b6847e60cf56fc974d95158aebfd4fcc3b6b2",
},
{
"created": "2020-04-08T12:58:13.650Z",
"authority_provided_id": "9f03a7dd093e0376b855831d9c9b4c6a8a1f158c",
},
{
"created": "2020-04-08T08:41:29.292Z",
"authority_provided_id": "b5608c72753bead5edad116d30083ca06029caa0",
},
{
"created": "2020-04-08T19:04:52.654Z",
"authority_provided_id": "c490f7e1e3d199eba962a7facb481d730dbcb518",
},
{
"created": "2020-04-08T14:32:36.994Z",
"authority_provided_id": "dd9fb03d6f2567b0a74e8a7329f3a129b4c24843",
},
{
"created": "2020-04-08T14:55:03.789Z",
"authority_provided_id": "0895e959bd983eb1f5e2a2d78f81367589476401",
},
{
"created": "2020-04-08T14:57:32.314Z",
"authority_provided_id": "78ff1937b455b072cf0ac3975e7d625cda92275d",
},
{
"created": "2020-04-08T17:47:20.911Z",
"authority_provided_id": "8d274d5e345010bf8be77db8e45adce0076f188c",
},
{
"created": "2020-04-08T17:52:25.019Z",
"authority_provided_id": "2326c38a5885e06ba8f17f9e329f3623dc93ac4c",
},
{
"created": "2020-04-08T16:17:44.668Z",
"authority_provided_id": "0af9fe3c3aa2b435debd1d14124d7d195bf20145",
},
{
"created": "2020-03-16T14:35:09.052Z",
"authority_provided_id": "593e4c0fe0cb0f9c74828e49f7fbb405214e1b7d",
},
{
"created": "2020-04-08T17:00:01.658Z",
"authority_provided_id": "f1c1749418fc64ae689d81173e9527900a9826dc",
},
{
"created": "2020-04-08T17:06:30.267Z",
"authority_provided_id": "74e8f7eb09646975794ed47e96c142a49bdef96a",
},
{
"created": "2020-04-08T19:09:37.965Z",
"authority_provided_id": "f86b692c82f1c6e0bab1368129acac51f60bba1b",
},
{
"created": "2020-04-08T19:59:19.295Z",
"authority_provided_id": "704dc291b7f94f1547cef42dbf9531a3f23388d7",
},
{
"created": "2020-04-08T20:41:46.809Z",
"authority_provided_id": "74c34282de2c71fcb1667fd6103e47a52b54195f",
},
{
"created": "2020-04-08T22:36:07.904Z",
"authority_provided_id": "7c237695e1b0c0a3fc17da5ec1af54b48ef015b5",
},
{
"created": "2020-04-09T05:17:54.759Z",
"authority_provided_id": "fd70fba04725c67d730a1d15a05a2e03bed75d2e",
},
{
"created": "2020-04-09T19:00:06.965Z",
"authority_provided_id": "fc2c60abb4243fc0f4d0e7e4c0f94c77dfe1a3f2",
},
{
"created": "2020-04-13T20:32:03.474Z",
"authority_provided_id": "6642caae784c8fff842455a15b0f60cf2e9b1c5e",
},
{
"created": "2020-04-09T16:47:37.697Z",
"authority_provided_id": "8e81fbfaa34f765b2a6cac0f94568d67c92abbbf",
},
{
"created": "2020-02-14T23:59:24.247Z",
"authority_provided_id": "7b3eacaf63f586a9d6a970bbb460bdad7acc3da7",
},
{
"created": "2020-04-10T14:28:26.540Z",
"authority_provided_id": "51281b0b9f95142b2730ffad9813eb2ad19c889b",
},
{
"created": "2020-04-10T21:38:20.620Z",
"authority_provided_id": "ff88b669e6f8db7cb0e0a45c356250c8d6635a73",
},
{
"created": "2020-02-23T19:04:01.310Z",
"authority_provided_id": "2e3f9bd082329117cf2d784d2efac23c62daf263",
},
{
"created": "2020-04-11T23:45:36.800Z",
"authority_provided_id": "747f21f6bc9846ccb17b9f85159a8e399685d6ed",
},
{
"created": "2020-04-09T15:41:51.925Z",
"authority_provided_id": "f2363816988400ab7565311b30cdb355090048c5",
},
{
"created": "2020-04-09T20:06:00.761Z",
"authority_provided_id": "feab636e8a95ff0e42fa090f8f8b7069118d1e0b",
},
{
"created": "2020-04-09T23:05:54.607Z",
"authority_provided_id": "98e4ef5e26a38d1541398986a4777c121e4852ea",
},
{
"created": "2020-04-10T04:57:00.709Z",
"authority_provided_id": "5fc26f0cf55db6cdfc4543c2c624a712f0f1d7e5",
},
{
"created": "2020-04-11T21:16:01.497Z",
"authority_provided_id": "919a028ec94a8891516ba8dddb3017d13acb721e",
},
{
"created": "2020-04-11T22:06:57.371Z",
"authority_provided_id": "318662ffa9680f83d55fb385775d60028b550378",
},
{
"created": "2019-01-07T19:14:24.159Z",
"authority_provided_id": "1a983dee29655dd3dcb0a3399d8cb9fd3ea7f3a6",
},
{
"created": "2020-04-13T08:28:11.098Z",
"authority_provided_id": "3d0422dc9b3ed1e63e86b0d776f97b960227fbf2",
},
{
"created": "2020-04-13T20:11:07.995Z",
"authority_provided_id": "a474a0d725e1d2c864792800137ca9769b681109",
},
{
"created": "2020-04-10T16:24:49.603Z",
"authority_provided_id": "b542567087d702c8dbf98300f1f8aecc5ca75261",
},
{
"created": "2020-04-10T22:12:00.007Z",
"authority_provided_id": "fa6010654c3b80784b3ff0e32e8ec69879ed0328",
},
{
"created": "2020-04-10T23:19:31.530Z",
"authority_provided_id": "aba578c81e9c870d70efe1cb3d42af42216e8316",
},
{
"created": "2020-04-13T03:26:09.488Z",
"authority_provided_id": "db7df6fccc9b113741d2c0d554b63cf9d8a15daf",
},
{
"created": "2020-04-13T03:38:53.387Z",
"authority_provided_id": "7af83896e21e778f924ca7d55aaa74d4e988afca",
},
{
"created": "2020-04-13T13:05:32.740Z",
"authority_provided_id": "bef6301aeeeaa12e1b7f72254b7159284c76adc2",
},
{
"created": "2020-04-13T17:09:01.389Z",
"authority_provided_id": "21799682f765fc9de4ffbb36ad2945adadd0c9de",
},
{
"created": "2020-04-09T14:28:22.621Z",
"authority_provided_id": "4003f07d3298ab1dfd223b512a89ff7ce0e50a35",
},
{
"created": "2020-04-09T15:49:27.126Z",
"authority_provided_id": "626c7a7a6e98a3d8f75d7e379cb09fd448864905",
},
{
"created": "2020-04-09T19:02:16.882Z",
"authority_provided_id": "31e1e59f24fa1ed9625ea08c97cd4a7c99123eaa",
},
{
"created": "2020-04-10T04:24:06.867Z",
"authority_provided_id": "d91d077612732bca060b5b85747c175c28239ce2",
},
{
"created": "2019-02-08T21:45:59.479Z",
"authority_provided_id": "9b5313214290455bc7dd736a6d2c5947f6e8a3d6",
},
{
"created": "2020-04-09T15:07:52.512Z",
"authority_provided_id": "c3c19ce58b867c4999f867db677804e4544af8b9",
},
{
"created": "2020-04-09T15:55:10.555Z",
"authority_provided_id": "b3fcf3e6fd34e499e075507c4da8610ca3553ba3",
},
{
"created": "2020-04-09T17:15:13.814Z",
"authority_provided_id": "b86248378b6560e3949032f556397e6f77378a15",
},
{
"created": "2020-04-13T06:28:22.168Z",
"authority_provided_id": "74838366523ceb078dbb0af2fb8c6340e1f0677b",
},
{
"created": "2020-04-09T17:23:48.781Z",
"authority_provided_id": "de4a3dde7a52ba10d8e5d01bd09eccc6b164b215",
},
{
"created": "2020-04-10T21:52:56.327Z",
"authority_provided_id": "4823da5e750291c3fa8ed98dca2fd84d6add673d",
},
{
"created": "2020-04-12T23:32:51.437Z",
"authority_provided_id": "a9735c279cfb5cb6a8a86abaf96b5c634ddb40af",
},
{
"created": "2020-04-13T11:34:41.544Z",
"authority_provided_id": "fef808ec9d2ffb61e60a258d5d482e62aac457b0",
},
{
"created": "2020-04-09T17:27:43.125Z",
"authority_provided_id": "799a2f1427b0691499bb11e08551fccea3fd8858",
},
{
"created": "2020-04-09T18:30:04.977Z",
"authority_provided_id": "c19d8b6b82a394dac2e6fa87a22bd5a313254a57",
},
{
"created": "2020-04-11T20:41:13.838Z",
"authority_provided_id": "7ed76117dc9510c2708f030ef9ea79e8c0722351",
},
{
"created": "2020-04-09T18:32:34.291Z",
"authority_provided_id": "fcad01ec66829d90dc3fb912e8141ea542f58ce7",
},
{
"created": "2020-04-09T19:49:29.063Z",
"authority_provided_id": "bb275ead0757bf8b251f5905f3684635fa35aa42",
},
{
"created": "2020-04-10T03:05:00.702Z",
"authority_provided_id": "d3b290ab7307e0aa3f5975123e31bc93364a057a",
},
{
"created": "2020-04-09T13:11:54.928Z",
"authority_provided_id": "b097e75995b885bd2495b141290d35326d1f4d29",
},
{
"created": "2020-04-08T23:03:09.269Z",
"authority_provided_id": "3db42928ac52d86b9b0676af5fece5cf2a564e77",
},
{
"created": "2020-04-10T00:04:13.426Z",
"authority_provided_id": "3a2a9eedf8759df2bed07f9a24cd2e5ad9fc38fd",
},
{
"created": "2019-08-31T00:07:00.575Z",
"authority_provided_id": "79256bf00f8203195ff76c6db9c8847c6c43656e",
},
{
"created": "2020-04-10T03:28:55.498Z",
"authority_provided_id": "a3ee356b232a04a3d4f2618452be02343dfe67f9",
},
{
"created": "2020-04-10T15:29:13.329Z",
"authority_provided_id": "d0d314e7b52a841e1c2487531ebdfc570bc2cd56",
},
{
"created": "2020-04-14T14:56:37.562Z",
"authority_provided_id": "77c7a546573d83be3936b5837abc0708e2b04545",
},
{
"created": "2020-04-11T19:01:34.357Z",
"authority_provided_id": "70f60c9f66853a89fdb310137442093f3507fd7c",
},
{
"created": "2020-04-11T19:21:53.412Z",
"authority_provided_id": "1f2deb34d572bae11d56f38ee0cde2df710df9b8",
},
{
"created": "2020-04-11T21:31:11.564Z",
"authority_provided_id": "534e0ac08108b4f7bf9583166b5ddb34988de63c",
},
{
"created": "2020-04-13T08:59:49.179Z",
"authority_provided_id": "851fbe7e010539397fd188775f525655de39b6ce",
},
{
"created": "2020-04-13T13:01:59.033Z",
"authority_provided_id": "bbb637047a9e40d85c544249f448b235e8754df8",
},
{
"created": "2019-10-25T08:12:40.024Z",
"authority_provided_id": "b06b1c42d712aaecff11fdb04f22fdf6acf74d49",
},
{
"created": "2019-08-02T17:14:27.194Z",
"authority_provided_id": "902eff8ea9bc1cc7f75d544af55ab062b30eeb6e",
},
{
"created": "2020-04-10T13:08:19.867Z",
"authority_provided_id": "f1ade069a38e2600f9bcececf5610bbdce97e8fe",
},
{
"created": "2020-04-10T21:52:12.462Z",
"authority_provided_id": "69072345ebfbc1afa88ed8ba66088c68425c23dc",
},
{
"created": "2020-04-10T15:03:03.544Z",
"authority_provided_id": "2cb93eb164754afa056e7530a2d4c329ba17a79d",
},
{
"created": "2020-04-10T15:09:03.931Z",
"authority_provided_id": "da9aacf0f1df75dddf51bb6931e83968926adda9",
},
{
"created": "2020-04-10T16:47:50.155Z",
"authority_provided_id": "cba25875a02a2782e0e3c5aaed3a37b9182d3543",
},
{
"created": "2020-04-13T15:22:10.238Z",
"authority_provided_id": "e592eac3e0ec8c99fb7f5f222444bd6549a383a0",
},
{
"created": "2020-04-13T19:34:57.689Z",
"authority_provided_id": "efbc5768401c4e450fb51bb0d751d46dc67a0233",
},
{
"created": "2020-04-10T19:19:41.013Z",
"authority_provided_id": "dc00270089ebcc5edabd22e7bc29918fb8218d88",
},
{
"created": "2020-04-11T18:57:36.233Z",
"authority_provided_id": "b0a54b66ac099bd31d8b7a98d5ab3a2f254b039e",
},
{
"created": "2020-04-13T19:14:29.652Z",
"authority_provided_id": "3f8d96a6630e8f61b8a01f142e44daa5e02695ae",
},
{
"created": "2020-04-10T21:46:19.483Z",
"authority_provided_id": "2c93cb09a1d5a5ae15ed902090d875dc503b70cb",
},
{
"created": "2020-04-10T21:58:04.464Z",
"authority_provided_id": "5bfe407d6cd862ad01cf2788ce8e5ddaf9010448",
},
{
"created": "2020-04-11T20:15:53.773Z",
"authority_provided_id": "bd9b7f2283b1ec3d51874673a3ab8d7f351b08dc",
},
{
"created": "2020-02-28T13:47:15.831Z",
"authority_provided_id": "9275bc1285901b5b39e7a1f9a83f332365cc60b5",
},
{
"created": "2020-01-28T18:52:19.521Z",
"authority_provided_id": "390483c20cafd976304caa74e79fb7baafed2aa0",
},
{
"created": "2020-04-13T17:54:18.355Z",
"authority_provided_id": "a5f8141f09ae098dee7f935d4436021b239ab906",
},
{
"created": "2020-04-13T18:22:23.641Z",
"authority_provided_id": "5fc0334f6fcd4d3de4a04603a2ad2c9aa2abb3c5",
},
{
"created": "2020-04-13T19:31:30.217Z",
"authority_provided_id": "c1f7411b8795c36598fc8cafc555bfe81224d22c",
},
{
"created": "2020-04-14T13:34:43.381Z",
"authority_provided_id": "1ba85046fa737c7f0dc03bfa4bf506b80261f8c3",
},
{
"created": "2020-04-13T19:43:05.226Z",
"authority_provided_id": "82c727ff31ebb2039a29ca538e7c331bc2e2c176",
},
{
"created": "2020-04-13T20:03:38.086Z",
"authority_provided_id": "2512460a51cc511d571dfea384287915dc3b4d98",
},
{
"created": "2020-04-13T15:05:40.158Z",
"authority_provided_id": "6774817796254306bd2e768f47278392fc83013c",
},
{
"created": "2020-04-13T16:59:07.169Z",
"authority_provided_id": "7ab8f440df8e5b0a41a1b91d59a73bd98942feea",
},
{
"created": "2020-04-13T17:40:00.369Z",
"authority_provided_id": "f5361eb0f8e79bdd6202bd591320f3915b441c91",
},
{
"created": "2020-04-13T20:05:16.157Z",
"authority_provided_id": "780b51b99c484bed5a4c2cc4a823a45e7282e074",
},
{
"created": "2020-04-13T20:31:57.155Z",
"authority_provided_id": "5119c72ddd2571a292235c11cb6919ccf196c0b2",
},
{
"created": "2020-04-13T20:46:27.693Z",
"authority_provided_id": "4215527716c6e90a6c17eabc432743bcb5c19905",
},
{
"created": "2020-04-13T23:59:29.864Z",
"authority_provided_id": "b51b53a56b6de47a370ee0da5db94a1e69077f57",
},
{
"created": "2020-04-14T04:01:42.205Z",
"authority_provided_id": "5c716ba6dce1a0e37febb309bbb16b8d3fc6c7c2",
},
{
"created": "2019-09-11T15:17:22.316Z",
"authority_provided_id": "a50f7891df86f602b9212a611190b92014f379f9",
},
{
"created": "2020-04-14T14:52:30.771Z",
"authority_provided_id": "325fee266c494f439fcc81f85a701b06dd192ec4",
},
{
"created": "2020-04-14T15:30:27.786Z",
"authority_provided_id": "7b8b76d16c5d2bee62f8677e75a6526299755bb3",
},
{
"created": "2020-04-14T00:42:09.831Z",
"authority_provided_id": "a579b1b8ff1601fec21a19ffaf7729a3db10189e",
},
{
"created": "2020-04-14T12:59:00.449Z",
"authority_provided_id": "e04ab49e9ac88bab40e956212deb796d60c86f13",
},
{
"created": "2020-04-14T14:11:19.307Z",
"authority_provided_id": "b6e32a459c34f91245bd0ad12ab71ae92909514b",
},
{
"created": "2020-04-14T15:26:17.178Z",
"authority_provided_id": "d4e36cf2ed13c9c27c68b6bdbeb6d79df4ba3ea2",
},
{
"created": "2020-04-14T16:01:38.048Z",
"authority_provided_id": "14004e997b3fb6ec829ded756a54ecca78fd446e",
},
{
"created": "2020-04-14T16:03:18.338Z",
"authority_provided_id": "a27317a4c2f8ef9613171e85aca2561338399aae",
},
{
"created": "2020-04-14T17:15:43.094Z",
"authority_provided_id": "05c59a657836f1aea94b949c3ac494b0e597c934",
},
{
"created": "2020-04-14T17:52:08.787Z",
"authority_provided_id": "db2f928960c25691138c7dcf21880af85b281be0",
},
{
"created": "2020-04-14T18:45:19.233Z",
"authority_provided_id": "d7a597574ae9ab505d493d2e78ab25d018a1d48c",
},
{
"created": "2020-04-14T19:49:45.485Z",
"authority_provided_id": "2f9caf6e1139e90f0addd81662696bb0c9867267",
},
{
"created": "2020-04-14T21:02:27.233Z",
"authority_provided_id": "e9ec38917c82cd88879466ad54d026c0d0f0aae2",
},
{
"created": "2019-10-28T20:16:22.813Z",
"authority_provided_id": "ffd283e38e7f55289db1e60f302e290b57ea38e9",
},
{
"created": "2020-04-14T21:50:09.201Z",
"authority_provided_id": "119243db91875dd0b2039d482a2a451240e98e77",
},
{
"created": "2019-09-06T18:07:13.053Z",
"authority_provided_id": "b162d663ea33b46893afff7944481674f39b0ce9",
},
{
"created": "2020-04-15T15:10:12.875Z",
"authority_provided_id": "d5c12a5cdda05c85d76f2e4d9b99ed3b3e5cde6a",
},
{
"created": "2020-04-15T04:18:20.595Z",
"authority_provided_id": "f72cdb6ceadbc3dea06cff56fd70838d9b7450de",
},
{
"created": "2020-04-15T11:10:31.975Z",
"authority_provided_id": "4c171ea395a7d1ad20c1b5f18d3bba7d704ffe07",
},
{
"created": "2020-04-15T11:41:05.063Z",
"authority_provided_id": "2914b72772e4edaa4b8dd42f60ec976bf90cb776",
},
{
"created": "2020-04-15T12:55:28.903Z",
"authority_provided_id": "06f750a096f544821b05353b9a2e401f5cfebd49",
},
{
"created": "2020-04-15T12:56:16.349Z",
"authority_provided_id": "f57109ab93d0a07c8f68009f826ea1e02423c86d",
},
{
"created": "2020-04-15T13:38:11.324Z",
"authority_provided_id": "876ddf96215b06b67b8409866a1d9290fb94a1db",
},
{
"created": "2020-04-05T17:28:05.156Z",
"authority_provided_id": "4128daf2bb7b56933a186d45a3d5fd0c00760184",
},
{
"created": "2020-04-15T18:15:48.113Z",
"authority_provided_id": "a2a851a494dc407bcfdd5f50c7fc013c6917d073",
},
{
"created": "2020-04-15T18:23:05.921Z",
"authority_provided_id": "ea9b59f558fd9fce7521253fa873d24296e4c448",
},
{
"created": "2020-04-15T18:56:47.892Z",
"authority_provided_id": "55556f2aae4dca377fd986a419e70d549238d574",
},
{
"created": "2020-04-15T19:03:23.784Z",
"authority_provided_id": "83fa1ed337d5e366bffd00b579dff343c11e4e0c",
},
{
"created": "2019-07-22T20:01:30.448Z",
"authority_provided_id": "a3a2433d25ede8a5a32d822eae77c2f01da0cdd5",
},
{
"created": "2020-04-15T21:25:03.638Z",
"authority_provided_id": "b31d52f0b4114d3c4c21a12508cac92cdf10ce0c",
},
{
"created": "2020-04-15T21:37:48.093Z",
"authority_provided_id": "fb1b236bf336e9c06025a07ef6fad86a3314a1d3",
},
{
"created": "2020-04-15T23:11:09.336Z",
"authority_provided_id": "5a8edc7301cf95b6cd477409e18a6bd8440e0b9e",
},
{
"created": "2020-04-15T19:55:05.225Z",
"authority_provided_id": "1d1845e069dbf429ca9cd7f9609e047083b3e4bc",
},
{
"created": "2020-04-15T23:43:50.687Z",
"authority_provided_id": "52b4fc974747434b9b879d238e076a5ea8162e7a",
},
{
"created": "2020-04-15T23:58:18.325Z",
"authority_provided_id": "b4003d0e4a537c0025db0be768c188f9c63d6410",
},
{
"created": "2020-04-16T14:41:59.389Z",
"authority_provided_id": "3c4b5b427f71059639719c2a24cffa40e876c47f",
},
{
"created": "2020-04-17T00:21:48.578Z",
"authority_provided_id": "23d35d55f9ee9c6597d26d3b678c25ae58418915",
},
{
"created": "2020-04-17T22:44:35.610Z",
"authority_provided_id": "a858ff13ac3ff0999ca9371fc986be8df384106d",
},
{
"created": "2020-04-17T18:51:33.791Z",
"authority_provided_id": "8d7348cf8f0a8ca43150df9fc11403c3c44772d4",
},
{
"created": "2020-04-17T16:46:41.935Z",
"authority_provided_id": "b6143a04a00782b27b5d3ae23d491f53794cd34d",
},
{
"created": "2019-12-04T17:28:56.135Z",
"authority_provided_id": "38fb2dc33f186fe489fe98394562a83f3e7f9a7f",
},
{
"created": "2020-04-16T18:11:55.099Z",
"authority_provided_id": "4395972ad970ac080619d23f9e78ca01304630bc",
},
{
"created": "2020-04-17T16:30:09.165Z",
"authority_provided_id": "575cd123c82901d869c4eb02971f2ee19847601a",
},
{
"created": "2020-04-16T23:24:45.316Z",
"authority_provided_id": "c45a625b5d7edf98d85d4bc5213a93ef40e69ba2",
},
{
"created": "2020-04-18T19:31:07.507Z",
"authority_provided_id": "793ab7355ef343e6c6b50f2099de39509ecd1a51",
},
{
"created": "2020-04-16T13:48:23.380Z",
"authority_provided_id": "5ac9a777a1a1d31410f0dc89c565ffdbb4883e92",
},
{
"created": "2019-01-24T21:23:30.268Z",
"authority_provided_id": "9bb0384748400646dc7c836e8b42655630edf476",
},
{
"created": "2020-04-17T16:18:36.224Z",
"authority_provided_id": "8f7239d22194d85a5c5ff63837f6155f4d349986",
},
{
"created": "2020-04-19T18:34:08.039Z",
"authority_provided_id": "065bfbe1317061cecbe5e5c1420bcf0ea876f0bf",
},
{
"created": "2020-04-17T20:31:00.606Z",
"authority_provided_id": "afc2d9654e60f0e2b5665cce416e6bba8e1f4488",
},
{
"created": "2020-04-16T15:49:24.248Z",
"authority_provided_id": "01979becb2a2dcabdf0bf4c6eabb5d1976944b13",
},
{
"created": "2020-04-16T16:16:21.604Z",
"authority_provided_id": "f69e196229d1bdcebdd5da96532560c66c4f35b3",
},
{
"created": "2020-04-16T18:55:58.876Z",
"authority_provided_id": "962e684f5ea0b8046803d8b70e0a9cdf5ac34f19",
},
{
"created": "2020-04-16T18:40:45.121Z",
"authority_provided_id": "712883f347163f2128344325a2c919ead428385f",
},
{
"created": "2020-04-16T19:16:27.310Z",
"authority_provided_id": "5beed0b12053976e5074270b9ee77484e7513f3f",
},
{
"created": "2020-04-16T19:26:18.195Z",
"authority_provided_id": "6658bedb2cbc6c94ad4d75e521cbd024ddf55f13",
},
{
"created": "2020-04-16T19:42:58.748Z",
"authority_provided_id": "4b18a57ef6f2fc66bbd63e7bf7fd56c66b75650a",
},
{
"created": "2020-04-17T15:00:20.750Z",
"authority_provided_id": "fcd4cf7829e290d8f1ac0dec946b576646fb6a48",
},
{
"created": "2020-04-16T20:04:17.692Z",
"authority_provided_id": "f890a5713497285ee329308708431fc6083c3f5f",
},
{
"created": "2020-04-16T20:08:06.106Z",
"authority_provided_id": "ec058e966d49bb0c2013e410c0c2900ade3f1357",
},
{
"created": "2020-04-17T13:06:27.574Z",
"authority_provided_id": "c1b51f9eb8c7968e2d2612655f752dcb53726a66",
},
{
"created": "2020-04-18T16:40:36.490Z",
"authority_provided_id": "e80b82fa91b05d04dd901c9977444203587aaeee",
},
{
"created": "2020-04-18T00:28:25.043Z",
"authority_provided_id": "68478980dadb9d506ae41a7ed238c4c99945a7a2",
},
{
"created": "2020-04-18T22:53:11.173Z",
"authority_provided_id": "a726c03a877e3ed63e91829f16097106141b2f30",
},
{
"created": "2020-04-17T17:27:45.817Z",
"authority_provided_id": "22634f29600708be77487707e8daab63b65ec5d1",
},
{
"created": "2020-04-17T22:00:20.612Z",
"authority_provided_id": "7f38698b7ddf6e07d32c72f7978254b3705188ba",
},
{
"created": "2020-04-19T19:54:02.434Z",
"authority_provided_id": "3e36f8a20cde65db2ba4a95166b87d9127690d05",
},
{
"created": "2020-04-17T23:24:42.393Z",
"authority_provided_id": "7a5d8611b7349839396e3ed6a0126c6ca045374c",
},
{
"created": "2020-04-18T06:14:35.000Z",
"authority_provided_id": "35d2eb44069122c8266ef370ed3a8f667c6f35bc",
},
{
"created": "2020-04-19T21:50:34.576Z",
"authority_provided_id": "194e28acbc51051f091ecfb5e0995de41f60ad1b",
},
{
"created": "2020-04-18T00:07:12.391Z",
"authority_provided_id": "1e0f8fcd2d9b7fc3cca17fac78949df21257bd83",
},
{
"created": "2020-04-18T05:32:38.217Z",
"authority_provided_id": "088b0bd71798a49336fc232c283819439fed8527",
},
{
"created": "2020-04-19T14:49:48.195Z",
"authority_provided_id": "5ad3010ea41004d51390eb8d2f4028b5f46d9ff2",
},
{
"created": "2020-04-19T16:14:49.522Z",
"authority_provided_id": "b9285035095be33b08b0dce7a7a6f70b491e5c95",
},
{
"created": "2020-04-19T18:19:35.660Z",
"authority_provided_id": "8fbe300704dd14d22797f078cdb031a44a56172c",
| |
0, 0, 0, 0],
[787, 778.0, 0, 9999, -9999, 1.0, 100, 1, 778.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[788, 875.0, 0, 9999, -9999, 1.0, 100, 1, 875.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[789, 77.4, 0, 9999, -9999, 1.0, 100, 1, 77.4, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[790, 75.8, 0, 9999, -9999, 1.0, 100, 1, 75.8, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[791, 10.0, 0, 9999, -9999, 1.0, 100, 1, 10.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[792, 62.7, 0, 9999, -9999, 1.0, 100, 1, 62.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[793, 9.8, 0, 9999, -9999, 1.0, 100, 1, 9.8, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[794, 0.2, 0, 9999, -9999, 1.0, 100, 1, 0.2, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[795, 13.6, 0, 9999, -9999, 1.0, 100, 1, 13.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[796, 85.1, 0, 9999, -9999, 1.0, 100, 1, 85.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[798, 275.523757, 0, 9999, -9999, 1.0, 100, 1, 319.8, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[800, 36.5, 0, 9999, -9999, 1.0, 100, 1, 36.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[801, 28.385802, 0, 9999, -9999, 1.0, 100, 1, 50.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[802, 500.0, 0, 9999, -9999, 1.0, 100, 1, 500.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[805, 882.568691, 0, 9999, -9999, 1.0, 100, 1, 1410.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[806, 35.8, 0, 9999, -9999, 1.0, 100, 1, 35.8, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[808, 217.5, 0, 9999, -9999, 1.0, 100, 1, 217.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[809, 12.5, 0, 9999, -9999, 1.0, 100, 1, 12.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[811, 25.2, 0, 9999, -9999, 1.0, 100, 1, 25.2, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[814, 89.0, 0, 9999, -9999, 1.0, 100, 1, 89.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[815, 13.4, 0, 9999, -9999, 1.0, 100, 1, 13.4, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[816, 80.1, 0, 9999, -9999, 1.0, 100, 1, 80.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[817, 54.0, 0, 9999, -9999, 1.0, 100, 1, 54.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[818, 749.510841, 0, 9999, -9999, 1.0, 100, 1, 757.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[821, 82.5, 0, 9999, -9999, 1.0, 100, 1, 82.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[822, 134.0, 0, 9999, -9999, 1.0, 100, 1, 134.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[825, 42.7, 0, 9999, -9999, 1.0, 100, 1, 42.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[826, 58.0, 0, 9999, -9999, 1.0, 100, 1, 58.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[830, 89.0, 0, 9999, -9999, 1.0, 100, 1, 89.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[833, 18.6, 0, 9999, -9999, 1.0, 100, 1, 18.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[834, 23.3, 0, 9999, -9999, 1.0, 100, 1, 23.3, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[835, 63.7, 0, 9999, -9999, 1.0, 100, 1, 63.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[836, 25.5, 0, 9999, -9999, 1.0, 100, 1, 25.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[837, 472.0, 0, 9999, -9999, 1.0, 100, 1, 472.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[839, 73.3, 0, 9999, -9999, 1.0, 100, 1, 73.3, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[840, 269.255769, 0, 9999, -9999, 1.0, 100, 1, 1391.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[841, 23.3, 0, 9999, -9999, 1.0, 100, 1, 23.3, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[842, 540.5, 0, 9999, -9999, 1.0, 100, 1, 540.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[843, 333.0, 0, 9999, -9999, 1.0, 100, 1, 333.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[844, 40.0, 0, 9999, -9999, 1.0, 100, 1, 40.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[845, 318.0, 0, 9999, -9999, 1.0, 100, 1, 318.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[848, 42.0, 0, 9999, -9999, 1.0, 100, 1, 42.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[849, 779.0, 0, 9999, -9999, 1.0, 100, 1, 779.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[850, 16.0, 0, 9999, -9999, 1.0, 100, 1, 16.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[851, 79.5, 0, 9999, -9999, 1.0, 100, 1, 79.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[852, 16.0, 0, 9999, -9999, 1.0, 100, 1, 16.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[853, 11.6, 0, 9999, -9999, 1.0, 100, 1, 11.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[855, 688.0, 0, 9999, -9999, 1.0, 100, 1, 688.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[856, 36.0, 0, 9999, -9999, 1.0, 100, 1, 36.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[857, 1402.0, 0, 9999, -9999, 1.0, 100, 1, 1402.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[858, 56.8, 0, 9999, -9999, 1.0, 100, 1, 56.8, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[859, 85.0, 0, 9999, -9999, 1.0, 100, 1, 85.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[860, 25.0, 0, 9999, -9999, 1.0, 100, 1, 25.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[862, 725.0, 0, 9999, -9999, 1.0, 100, 1, 725.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[863, 0.6, 0, 9999, -9999, 1.0, 100, 1, 0.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[864, 875.0, 0, 9999, -9999, 1.0, 100, 1, 875.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[865, 11.0, 0, 9999, -9999, 1.0, 100, 1, 11.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[866, 260.44, 0, 9999, -9999, 1.0, 100, 1, 260.44, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[867, 769.0, 0, 9999, -9999, 1.0, 100, 1, 769.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[869, 1360.0, 0, 9999, -9999, 1.0, 100, 1, 1360.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[870, 58.4, 0, 9999, -9999, 1.0, 100, 1, 58.4, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[872, 22.5, 0, 9999, -9999, 1.0, 100, 1, 22.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[873, 122.0, 0, 9999, -9999, 1.0, 100, 1, 122.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[874, 20.7, 0, 9999, -9999, 1.0, 100, 1, 20.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[875, 24.4, 0, 9999, -9999, 1.0, 100, 1, 24.4, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[876, 58.4, 0, 9999, -9999, 1.0, 100, 1, 58.4, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[877, 24.8, 0, 9999, -9999, 1.0, 100, 1, 24.8, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[881, 1001.3, 0, 9999, -9999, 1.0, 100, 1, 1001.3, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[882, 17.4, 0, 9999, -9999, 1.0, 100, 1, 17.4, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[883, 18.0, 0, 9999, -9999, 1.0, 100, 1, 18.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[884, 26.5, 0, 9999, -9999, 1.0, 100, 1, 26.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[885, 490.0, 0, 9999, -9999, 1.0, 100, 1, 490.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[886, 2572.0, 0, 9999, -9999, 1.0, 100, 1, 2572.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[888, 35.1, 0, 9999, -9999, 1.0, 100, 1, 35.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[889, 9.5, 0, 9999, -9999, 1.0, 100, 1, 9.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[890, 48.0, 0, 9999, -9999, 1.0, 100, 1, 48.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[893, 60.0, 0, 9999, -9999, 1.0, 100, 1, 60.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[894, 4.130026, 0, 9999, -9999, 1.0, 100, 1, 158.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[895, 19.0, 0, 9999, -9999, 1.0, 100, 1, 19.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[896, 24.0, 0, 9999, -9999, 1.0, 100, 1, 24.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[897, 56.0, 0, 9999, -9999, 1.0, 100, 1, 56.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[898, 84.6, 0, 9999, -9999, 1.0, 100, 1, 84.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[899, 8.5, 0, 9999, -9999, 1.0, 100, 1, 8.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[900, 112.6, 0, 9999, -9999, 1.0, 100, 1, 112.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[902, 19.5, 0, 9999, -9999, 1.0, 100, 1, 19.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[903, 20.1, 0, 9999, -9999, 1.0, 100, 1, 20.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[905, 137.3, 0, 9999, -9999, 1.0, 100, 1, 137.3, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[906, 66.0, 0, 9999, -9999, 1.0, 100, 1, 66.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[907, 67.3, 0, 9999, -9999, 1.0, 100, 1, 67.3, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[908, 13.5, 0, 9999, -9999, 1.0, 100, 1, 13.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[909, 36.8, 0, 9999, -9999, 1.0, 100, 1, 36.8, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[911, 288.5, 0, 9999, -9999, 1.0, 100, 1, 288.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[913, 74.0, 0, 9999, -9999, 1.0, 100, 1, 74.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[914, 112.1, 0, 9999, -9999, 1.0, 100, 1, 112.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[915, 12.0, 0, 9999, -9999, 1.0, 100, 1, 12.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[916, 196.0, 0, 9999, -9999, 1.0, 100, 1, 196.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[917, 17.0, 0, 9999, -9999, 1.0, 100, 1, 17.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[918, 38.5, 0, 9999, -9999, 1.0, 100, 1, 38.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[919, 15.6, 0, 9999, -9999, 1.0, 100, 1, 15.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[920, 12.8, 0, 9999, -9999, 1.0, 100, 1, 12.8, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[921, 124.0, 0, 9999, -9999, 1.0, 100, 1, 124.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[922, 164.0, 0, 9999, -9999, 1.0, 100, 1, 164.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[923, 146.0, 0, 9999, -9999, 1.0, 100, 1, 146.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[924, 11.7, 0, 9999, -9999, 1.0, 100, 1, 11.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[925, 26.0, 0, 9999, -9999, 1.0, 100, 1, 26.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[928, 61.5, 0, 9999, -9999, 1.0, 100, 1, 61.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[931, 217.1, 0, 9999, -9999, 1.0, 100, 1, 217.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[934, 296.0, 0, 9999, -9999, 1.0, 100, 1, 296.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[935, 23.1, 0, 9999, -9999, 1.0, 100, 1, 23.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[936, 104.4, 0, 9999, -9999, 1.0, 100, 1, 104.4, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[937, 30.0, 0, 9999, -9999, 1.0, 100, 1, 30.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[939, 0.1, 0, 9999, -9999, 1.0, 100, 1, 0.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[940, 29.6, 0, 9999, -9999, 1.0, 100, 1, 29.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[942, 51.9, 0, 9999, -9999, 1.0, 100, 1, 51.9, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[943, 66.3, 0, 9999, -9999, 1.0, 100, 1, 66.3, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[944, 25.4, 0, 9999, -9999, 1.0, 100, 1, 25.4, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[945, 35.0, 0, 9999, -9999, 1.0, 100, 1, 35.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[946, 80.0, 0, 9999, -9999, 1.0, 100, 1, 80.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[948, 79.0, 0, 9999, -9999, 1.0, 100, 1, 79.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[950, 16.0, 0, 9999, -9999, 1.0, 100, 1, 16.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[952, 31.7, 0, 9999, -9999, 1.0, 100, 1, 31.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[956, 65.0, 0, 9999, -9999, 1.0, 100, 1, 65.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[957, 6.0, 0, 9999, -9999, 1.0, 100, 1, 6.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[958, 66.7, 0, 9999, -9999, 1.0, 100, 1, 66.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[959, 45.5, 0, 9999, -9999, 1.0, 100, 1, 45.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[960, 26.5, 0, 9999, -9999, 1.0, 100, 1, 26.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[963, 798.066783, 0, 9999, -9999, 1.0, 100, 1, 875.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[965, 352.0, 0, 9999, -9999, 1.0, 100, 1, 352.0, 0.0, 0, 0, 0, 0, 0, 0, 0, | |
def viewportTransform(*args, **kwargs):
pass
def viewportUpdateMode(*args, **kwargs):
pass
def wheelEvent(*args, **kwargs):
pass
AnchorUnderMouse = None
AnchorViewCenter = None
BoundingRectViewportUpdate = None
CacheBackground = None
CacheMode = None
CacheModeFlag = None
CacheNone = None
DontAdjustForAntialiasing = None
DontClipPainter = None
DontSavePainterState = None
DragMode = None
FullViewportUpdate = None
IndirectPainting = None
MinimalViewportUpdate = None
NoAnchor = None
NoDrag = None
NoViewportUpdate = None
OptimizationFlag = None
OptimizationFlags = None
RubberBandDrag = None
ScrollHandDrag = None
SmartViewportUpdate = None
ViewportAnchor = None
ViewportUpdateMode = None
__new__ = None
rubberBandChanged = None
staticMetaObject = None
class QDateEdit(QDateTimeEdit):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
__new__ = None
staticMetaObject = None
userDateChanged = None
class QVBoxLayout(QBoxLayout):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
__new__ = None
staticMetaObject = None
class QTimeEdit(QDateTimeEdit):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
__new__ = None
staticMetaObject = None
userTimeChanged = None
class QMdiArea(QAbstractScrollArea):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def activateNextSubWindow(*args, **kwargs):
pass
def activatePreviousSubWindow(*args, **kwargs):
pass
def activationOrder(*args, **kwargs):
pass
def activeSubWindow(*args, **kwargs):
pass
def addSubWindow(*args, **kwargs):
pass
def background(*args, **kwargs):
pass
def cascadeSubWindows(*args, **kwargs):
pass
def childEvent(*args, **kwargs):
pass
def closeActiveSubWindow(*args, **kwargs):
pass
def closeAllSubWindows(*args, **kwargs):
pass
def currentSubWindow(*args, **kwargs):
pass
def documentMode(*args, **kwargs):
pass
def event(*args, **kwargs):
pass
def eventFilter(*args, **kwargs):
pass
def minimumSizeHint(*args, **kwargs):
pass
def paintEvent(*args, **kwargs):
pass
def removeSubWindow(*args, **kwargs):
pass
def resizeEvent(*args, **kwargs):
pass
def scrollContentsBy(*args, **kwargs):
pass
def setActivationOrder(*args, **kwargs):
pass
def setActiveSubWindow(*args, **kwargs):
pass
def setBackground(*args, **kwargs):
pass
def setDocumentMode(*args, **kwargs):
pass
def setOption(*args, **kwargs):
pass
def setTabPosition(*args, **kwargs):
pass
def setTabShape(*args, **kwargs):
pass
def setTabsClosable(*args, **kwargs):
pass
def setTabsMovable(*args, **kwargs):
pass
def setViewMode(*args, **kwargs):
pass
def setupViewport(*args, **kwargs):
pass
def showEvent(*args, **kwargs):
pass
def sizeHint(*args, **kwargs):
pass
def subWindowList(*args, **kwargs):
pass
def tabPosition(*args, **kwargs):
pass
def tabShape(*args, **kwargs):
pass
def tabsClosable(*args, **kwargs):
pass
def tabsMovable(*args, **kwargs):
pass
def testOption(*args, **kwargs):
pass
def tileSubWindows(*args, **kwargs):
pass
def timerEvent(*args, **kwargs):
pass
def viewMode(*args, **kwargs):
pass
def viewportEvent(*args, **kwargs):
pass
ActivationHistoryOrder = None
AreaOption = None
AreaOptions = None
CreationOrder = None
DontMaximizeSubWindowOnActivation = None
StackingOrder = None
SubWindowView = None
TabbedView = None
ViewMode = None
WindowOrder = None
__new__ = None
staticMetaObject = None
subWindowActivated = None
class QCommandLinkButton(QPushButton):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def description(*args, **kwargs):
pass
def event(*args, **kwargs):
pass
def heightForWidth(*args, **kwargs):
pass
def minimumSizeHint(*args, **kwargs):
pass
def paintEvent(*args, **kwargs):
pass
def setDescription(*args, **kwargs):
pass
def sizeHint(*args, **kwargs):
pass
__new__ = None
staticMetaObject = None
class QPlainTextEdit(QAbstractScrollArea):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def anchorAt(*args, **kwargs):
pass
def appendHtml(*args, **kwargs):
pass
def appendPlainText(*args, **kwargs):
pass
def backgroundVisible(*args, **kwargs):
pass
def blockBoundingGeometry(*args, **kwargs):
pass
def blockBoundingRect(*args, **kwargs):
pass
def blockCount(*args, **kwargs):
pass
def canInsertFromMimeData(*args, **kwargs):
pass
def canPaste(*args, **kwargs):
pass
def centerCursor(*args, **kwargs):
pass
def centerOnScroll(*args, **kwargs):
pass
def changeEvent(*args, **kwargs):
pass
def clear(*args, **kwargs):
pass
def contentOffset(*args, **kwargs):
pass
def contextMenuEvent(*args, **kwargs):
pass
def copy(*args, **kwargs):
pass
def createMimeDataFromSelection(*args, **kwargs):
pass
def createStandardContextMenu(*args, **kwargs):
pass
def currentCharFormat(*args, **kwargs):
pass
def cursorForPosition(*args, **kwargs):
pass
def cursorRect(*args, **kwargs):
pass
def cursorWidth(*args, **kwargs):
pass
def cut(*args, **kwargs):
pass
def doSetTextCursor(*args, **kwargs):
pass
def document(*args, **kwargs):
pass
def documentTitle(*args, **kwargs):
pass
def dragEnterEvent(*args, **kwargs):
pass
def dragLeaveEvent(*args, **kwargs):
pass
def dragMoveEvent(*args, **kwargs):
pass
def dropEvent(*args, **kwargs):
pass
def ensureCursorVisible(*args, **kwargs):
pass
def event(*args, **kwargs):
pass
def extraSelections(*args, **kwargs):
pass
def find(*args, **kwargs):
pass
def firstVisibleBlock(*args, **kwargs):
pass
def focusInEvent(*args, **kwargs):
pass
def focusNextPrevChild(*args, **kwargs):
pass
def focusOutEvent(*args, **kwargs):
pass
def getPaintContext(*args, **kwargs):
pass
def inputMethodEvent(*args, **kwargs):
pass
def inputMethodQuery(*args, **kwargs):
pass
def insertFromMimeData(*args, **kwargs):
pass
def insertPlainText(*args, **kwargs):
pass
def isReadOnly(*args, **kwargs):
pass
def isUndoRedoEnabled(*args, **kwargs):
pass
def keyPressEvent(*args, **kwargs):
pass
def keyReleaseEvent(*args, **kwargs):
pass
def lineWrapMode(*args, **kwargs):
pass
def loadResource(*args, **kwargs):
pass
def maximumBlockCount(*args, **kwargs):
pass
def mergeCurrentCharFormat(*args, **kwargs):
pass
def mouseDoubleClickEvent(*args, **kwargs):
pass
def mouseMoveEvent(*args, **kwargs):
pass
def mousePressEvent(*args, **kwargs):
pass
def mouseReleaseEvent(*args, **kwargs):
pass
def moveCursor(*args, **kwargs):
pass
def overwriteMode(*args, **kwargs):
pass
def paintEvent(*args, **kwargs):
pass
def paste(*args, **kwargs):
pass
def placeholderText(*args, **kwargs):
pass
def print_(*args, **kwargs):
pass
def redo(*args, **kwargs):
pass
def resizeEvent(*args, **kwargs):
pass
def scrollContentsBy(*args, **kwargs):
pass
def selectAll(*args, **kwargs):
pass
def setBackgroundVisible(*args, **kwargs):
pass
def setCenterOnScroll(*args, **kwargs):
pass
def setCurrentCharFormat(*args, **kwargs):
pass
def setCursorWidth(*args, **kwargs):
pass
def setDocument(*args, **kwargs):
pass
def setDocumentTitle(*args, **kwargs):
pass
def setExtraSelections(*args, **kwargs):
pass
def setLineWrapMode(*args, **kwargs):
pass
def setMaximumBlockCount(*args, **kwargs):
pass
def setOverwriteMode(*args, **kwargs):
pass
def setPlaceholderText(*args, **kwargs):
pass
def setPlainText(*args, **kwargs):
pass
def setReadOnly(*args, **kwargs):
pass
def setTabChangesFocus(*args, **kwargs):
pass
def setTabStopWidth(*args, **kwargs):
pass
def setTextCursor(*args, **kwargs):
pass
def setTextInteractionFlags(*args, **kwargs):
pass
def setUndoRedoEnabled(*args, **kwargs):
pass
def setWordWrapMode(*args, **kwargs):
pass
def showEvent(*args, **kwargs):
pass
def tabChangesFocus(*args, **kwargs):
pass
def tabStopWidth(*args, **kwargs):
pass
def textCursor(*args, **kwargs):
pass
def textInteractionFlags(*args, **kwargs):
pass
def timerEvent(*args, **kwargs):
pass
def toPlainText(*args, **kwargs):
pass
def undo(*args, **kwargs):
pass
def wheelEvent(*args, **kwargs):
pass
def wordWrapMode(*args, **kwargs):
pass
def zoomIn(*args, **kwargs):
pass
def zoomInF(*args, **kwargs):
pass
def zoomOut(*args, **kwargs):
pass
LineWrapMode = None
NoWrap = None
WidgetWidth = None
__new__ = None
blockCountChanged = None
copyAvailable = None
cursorPositionChanged = None
modificationChanged = None
redoAvailable = None
| |
import calendar
import logging
import json
import re
import time
import uuid
from datetime import datetime, timedelta
import dateutil.parser
import jwt
from prometheus_client import Counter, Histogram
from app import app
from buildman.build_token import (
build_token,
verify_build_token,
InvalidBearerTokenException,
BUILD_JOB_REGISTRATION_TYPE,
BUILD_JOB_TOKEN_TYPE
)
from buildman.interface import (
BuildStateInterface,
BuildJobAlreadyExistsError,
BuildJobDoesNotExistsError,
BuildJobError,
BuildJobResult,
RESULT_PHASES
)
from buildman.jobutil.buildjob import BuildJob, BuildJobLoadException
from buildman.manager.executor import PopenExecutor, EC2Executor, KubernetesExecutor
from buildman.orchestrator import (
orchestrator_from_config,
KeyEvent,
OrchestratorError,
OrchestratorConnectionError,
ORCHESTRATOR_UNAVAILABLE_SLEEP_DURATION
)
from app import instance_keys
from data import database
from data.database import BUILD_PHASE
from data import model
from util import slash_join
from util.morecollections import AttrDict
logger = logging.getLogger(__name__)
build_fallback = Counter(
"quay_build_fallback_total", "number of times a build has been retried", labelnames=["executor"]
)
build_ack_duration = Histogram(
"quay_build_ack_duration_seconds",
"seconds taken for the builder to acknowledge a queued build",
labelnames=["executor"],
)
build_duration = Histogram(
"quay_build_duration_seconds",
"seconds taken for a build's execution",
labelnames=["executor", "job_status"], # status in (COMPLETE, INCOMPLETE, ERROR)
)
JOB_PREFIX = "building/"
LOCK_PREFIX = "lock/"
CANCEL_PREFIX = "cancel/"
METRIC_PREFIX = "metric/"
EPHEMERAL_API_TIMEOUT = 20
EPHEMERAL_SETUP_TIMEOUT = 500
WORK_CHECK_TIMEOUT = 10
SETUP_LEEWAY_SECONDS = 30
# Schedule retry durations
RETRY_IMMEDIATELY_SLEEP_DURATION = 0
TOO_MANY_WORKERS_SLEEP_DURATION = 10
CREATED_JOB_TIMEOUT_SLEEP_DURATION = 10
CREATED_JOB_TIMEOUT = 15
JOB_TIMEOUT_SECONDS = 300
MINIMUM_JOB_EXTENSION = timedelta(minutes=1)
HEARTBEAT_PERIOD_SECONDS = 30
HEARTBEAT_DELTA = timedelta(seconds=60)
logger = logging.getLogger(__name__)
class EphemeralBuilderManager(BuildStateInterface):
PHASES_NOT_ALLOWED_TO_CANCEL_FROM = (
BUILD_PHASE.PUSHING,
BUILD_PHASE.COMPLETE,
BUILD_PHASE.ERROR,
BUILD_PHASE.INTERNAL_ERROR,
BUILD_PHASE.CANCELLED,
)
ARCHIVABLE_BUILD_PHASES = (BUILD_PHASE.COMPLETE, BUILD_PHASE.ERROR, BUILD_PHASE.CANCELLED)
COMPLETED_PHASES = ARCHIVABLE_BUILD_PHASES + (BUILD_PHASE.INTERNAL_ERROR,)
EXECUTORS = {
"popen": PopenExecutor,
"ec2": EC2Executor,
"kubernetes": KubernetesExecutor,
}
def __init__(self, registry_hostname, manager_hostname, queue, build_logs, user_files, instance_keys):
self._registry_hostname = registry_hostname
self._manager_hostname = manager_hostname
self._queue = queue
self._build_logs = build_logs
self._user_files = user_files
self._instance_keys = instance_keys
self._ordered_executors = []
self._executor_name_to_executor = {}
self._manager_config = {}
self._orchestrator = None
def initialize(self, manager_config):
self._manager_config = manager_config
if manager_config.get("EXECUTORS"):
for executor_config in manager_config["EXECUTORS"]:
self._load_executor(executor_config.get("EXECUTOR"), executor_config)
else:
self._load_executor(
manager_config.get("EXECUTOR"), manager_config.get("EXECUTOR_CONFIG")
)
logger.debug("calling orchestrator_from_config")
self._orchestrator = orchestrator_from_config(manager_config)
logger.debug("setting on_key_change callbacks for job expiry, cancel")
self._orchestrator.on_key_change(self._job_prefix, self._job_expired_callback)
self._orchestrator.on_key_change(self._cancel_prefix, self._job_cancelled_callback)
def _load_executor(self, executor_kind_name, executor_config):
executor_klass = EphemeralBuilderManager.EXECUTORS.get(executor_kind_name)
if executor_klass is None:
logger.error("Unknown executor %s; skipping install", executor_kind_name)
return
executor = executor_klass(executor_config, self._manager_hostname)
if executor.name in self._executor_name_to_executor:
raise Exception("Executor with name %s already registered" % executor.name)
self._ordered_executors.append(executor)
self._executor_name_to_executor[executor.name] = executor
def generate_build_token(self, token_type, build_id, job_id, expiration):
return build_token(
self._manager_hostname,
token_type,
build_id,
job_id,
expiration,
self._instance_keys
)
def verify_build_token(self, token, token_type):
return verify_build_token(
token,
self._manager_hostname,
token_type,
self._instance_keys
)
def _config_prefix(self, key):
if self._manager_config.get("ORCHESTRATOR") is None:
return key
prefix = self._manager_config.get("ORCHESTRATOR_PREFIX", "")
return slash_join(prefix, key).lstrip("/") + "/"
@property
def _job_prefix(self):
return self._config_prefix(JOB_PREFIX)
@property
def _cancel_prefix(self):
return self._config_prefix(CANCEL_PREFIX)
@property
def _metric_prefix(self):
return self._config_prefix(METRIC_PREFIX)
@property
def _lock_prefix(self):
return self._config_prefix(LOCK_PREFIX)
@property
def machine_max_expiration(self):
return self._manager_config.get("MACHINE_MAX_TIME", 7200)
def _lock_key(self, build_id):
"""Create a key which is used to get a lock on a job in the Orchestrator."""
return slash_join(self._lock_prefix, build_id)
def _metric_key(self, build_id):
"""Create a key which is used to track a job's metrics in the Orchestrator."""
return slash_join(self._metric_prefix, build_id)
def _job_key(self, build_id):
"""Creates a key which is used to track a job in the Orchestrator."""
return slash_join(self._job_prefix, build_id)
def _build_job_from_job_id(self, job_id):
"""Return the BuildJob from the job id."""
try:
job_data = self._orchestrator.get_key(job_id)
except KeyError:
raise BuildJobDoesNotExistsError(job_id)
except (OrchestratorConnectionError, OrchestratorError) as oe:
raise BuildJobError(oe)
job_metadata = json.loads(job_data)
build_job = BuildJob(AttrDict(job_metadata["job_queue_item"]))
return build_job
def create_job(self, build_id, build_metadata):
"""Create the job in the orchestrator.
The job will expire if it is not scheduled within CREATED_JOB_TIMEOUT.
"""
# Sets max threshold for build heartbeats. i.e max total running time of the build (default: 2h)
# This is separate from the redis key expiration, which is kept alive with heartbeats from the worker.
max_expiration = datetime.utcnow() + timedelta(seconds=self.machine_max_expiration)
build_metadata["max_expiration"] = calendar.timegm(max_expiration.timetuple())
build_metadata["last_heartbeat"] = None
job_key = self._job_key(build_id)
try:
self._orchestrator.set_key(
job_key, json.dumps(build_metadata), overwrite=False, expiration=CREATED_JOB_TIMEOUT,
)
except KeyError:
raise BuildJobAlreadyExistsError(job_key)
except (OrchestratorConnectionError, OrchestratorError) as je:
raise BuildJobError(je)
return job_key
def job_scheduled(self, job_id, control_plane, execution_id, max_startup_time):
"""Mark the given job as scheduled with execution id, with max_startup_time.
A job is considered scheduled once a worker is started with a given registration token.
"""
# Get job to schedule
try:
job_data = self._orchestrator.get_key(job_id)
job_data_json = json.loads(job_data)
except KeyError:
logger.warning("Failed to mark job %s as scheduled. Job no longer exists in the orchestrator", job_id)
return False
except Exception as e:
logger.warning("Exception loading job %s from orchestrator: %s", job_id, e)
return False
# Update build context
job_data_json["executor_name"] = control_plane
job_data_json["execution_id"] = execution_id
try:
self._orchestrator.set_key(
job_id,
json.dumps(job_data_json),
overwrite=True,
expiration=max_startup_time
)
except Exception as e:
logger.warning("Exception updating job %s in orchestrator: %s", job_id, e)
return False
build_job = BuildJob(AttrDict(job_data_json["job_queue_item"]))
updated = self.update_job_phase(job_id, BUILD_PHASE.BUILD_SCHEDULED)
if updated:
self._queue.extend_processing(
build_job.job_item,
seconds_from_now=max_startup_time + 60, # Add some leeway to allow the expiry event to complete
minimum_extension=MINIMUM_JOB_EXTENSION,
)
logger.debug(
"Job scheduled for job %s with execution with ID %s on control plane %s with max startup time of %s",
job_id,
execution_id,
control_plane,
max_startup_time,
)
else:
logger.warning("Job %s not scheduled. Unable update build phase to SCHEDULED")
return updated
def job_unschedulable(self, job_id):
""" Stop tracking the given unschedulable job.
Deletes any states that might have previously been stored in the orchestrator.
"""
try:
build_job = self._build_job_from_job_id(job_id)
self._cleanup_job_from_orchestrator(build_job)
except Exception as e:
logger.warning(
"Exception trying to mark job %s as unschedulable. Some state may not have been cleaned/updated: %s",
job_id,
e
)
def on_job_complete(self, build_job, job_result, executor_name, execution_id):
"""Handle a completed job by updating the queue, job metrics, and cleaning up
any remaining state.
If the job result is INCOMPLETE, the job is requeued with its retry restored.
If a job result is in EXPIRED or ERROR, the job is requeued, but it retry is not restored.
If the job is cancelled, it is not requeued.
If the job is completed, it is marked as such in the queue.
Also checks the disable threshold on the build trigger if the phase is in (INTERNAL_ERROR, ERROR)
"""
job_id = self._job_key(build_job.build_uuid)
logger.debug("Calling job complete callback for job %s with result %s", job_id, job_result)
self._write_duration_metric(build_duration, build_job.build_uuid, job_status=job_result)
# Build timeout. No retry restored
if job_result == BuildJobResult.EXPIRED:
self._queue.incomplete(build_job.job_item, restore_retry=False, retry_after=30)
logger.warning("Job %s completed with result %s. Requeuing build without restoring retry.", job_id, job_result)
# Unfinished build due to internal error. Restore retry.
elif job_result == BuildJobResult.INCOMPLETE:
logger.warning("Job %s completed with result %s. Requeuing build with retry restored.", job_id, job_result)
self._queue.incomplete(build_job.job_item, restore_retry=True, retry_after=30)
elif job_result in (BuildJobResult.ERROR, BuildJobResult.COMPLETE, BuildJobResult.CANCELLED):
logger.warning("Job %s completed with result %s. Marking build done in queue.", job_id, job_result)
self._queue.complete(build_job.job_item)
# Disable trigger if needed
if build_job.repo_build.trigger is not None:
model.build.update_trigger_disable_status(
build_job.repo_build.trigger, RESULT_PHASES[job_result]
)
# Cleanup job from executors
if executor_name and execution_id:
self._terminate_executor(executor_name, execution_id)
# Cleanup job from orchestrator
self._cleanup_job_from_orchestrator(build_job)
logger.debug("Job completed for job %s with result %s", job_id, job_result)
def start_job(self, job_id, max_build_time):
""" Starts the build job. This is invoked by the worker once the job has been created and
scheduled, returing the buildpack needed to start the actual build.
"""
try:
job_data = self._orchestrator.get_key(job_id)
job_data_json = json.loads(job_data)
build_job = BuildJob(AttrDict(job_data_json["job_queue_item"]))
except KeyError:
logger.warning("Failed to start job %s. Job does not exists in orchestrator", job_id)
return None, None
except Exception as e:
logger.error("Exception loading job %s from orchestrator: %s", job_id, e)
return None, None
# Construct the buildpack
repo = build_job.repo_build.repository
repository_name = repo.namespace_user.username + "/" + repo.name
context, dockerfile_path = build_job.extract_dockerfile_args()
base_image_information = {}
if build_job.pull_credentials:
base_image_information["username"] = build_job.pull_credentials.get("username", "")
base_image_information["password"] = build_job.pull_credentials.get("password", "")
build_args = {
"build_package": build_job.get_build_package_url(self._user_files),
"context": context,
"dockerfile_path": dockerfile_path,
"repository": repository_name,
"registry": self._registry_hostname,
"pull_token": build_job.repo_build.access_token.get_code(),
"push_token": build_job.repo_build.access_token.get_code(),
"tag_names": build_job.build_config.get("docker_tags", ["latest"]),
"base_image": base_image_information,
}
private_key = None
if (
build_job.repo_build.trigger is not None
and build_job.repo_build.trigger.secure_private_key is not None
):
private_key = build_job.repo_build.trigger.secure_private_key.decrypt()
if private_key is not None:
build_args["git"] = {
"url": build_job.build_config["trigger_metadata"].get("git_url", ""),
"sha": build_job.commit_sha(build_job.build_config),
"private_key": private_key or "",
}
# If the build args have no buildpack, mark it as a failure before sending
# it to a builder instance.
if not build_args["build_package"] and not build_args["git"]:
logger.error(
"Failed to start job %s: insufficient build args - No package url or git",
job_id,
)
self.update_job_phase(job_id, BUILD_PHASE.INTERNAL_ERROR)
return (None, None)
# Generate the build token
token = self.generate_build_token(BUILD_JOB_TOKEN_TYPE, build_job.build_uuid, job_id, max_build_time)
# Publish the time it took for a worker to ack the build
self._write_duration_metric(build_ack_duration, build_job.build_uuid)
logger.debug("Started build job %s with arguments %s", job_id, build_args)
return (token, build_args)
def update_job_phase(self, job_id, phase, phase_metadata=None):
"""Updates the given job's phase and append the phase change to the | |
= \
# fat_image.visualize_model_output(
# image_data, use_thresh=True, use_centroid=False, print_poses=False,
# required_objects=required_objects
# )
# Convert model output poses to table frame and save them to file so that they can be read by perch
run_perch = True
if len(labels) == 0:
run_perch = False
if run_perch:
_, max_min_dict, _, _ = fat_image.visualize_pose_ros(
# image_data, model_annotations, frame='table', camera_optical_frame=False, num_publish=1, write_poses=True, ros_publish=False
image_data, model_annotations, frame='camera', camera_optical_frame=False, num_publish=1, write_poses=True, ros_publish=False,
)
# for anno in model_annotations:
# if fat_image.category_id_to_names[anno['category_id']] not in required_objects:
# print("Removing : {}".format(fat_image.category_id_to_names[anno['category_id']]))
# model_annotations.remove(anno)
# print(model_annotations)
# Run perch/ICP on written poses
perch_annotations, stats = fat_image.visualize_perch_output(
image_data, model_annotations, max_min_dict, frame='camera',
# use_external_render=0, required_object=[labels[1]],
use_external_render=0, required_object=labels,
camera_optical_frame=False, use_external_pose_list=1,
# model_poses_file=model_poses_file, use_centroid_shifting=0,
model_poses_file=model_poses_file, use_centroid_shifting=0,
predicted_mask_path=predicted_mask_path, num_cores=0
)
else:
perch_annotations = None
stats = None
else:
run_perch = True
output_dir_name = os.path.join("greedy_mug", fat_image.get_clean_name(image_data['file_name']))
perch_annotations, stats = fat_image.read_perch_output(output_dir_name)
f_accuracy.write("{},".format(image_data['file_name']))
if perch_annotations is not None:
# # # Compare Poses by applying to model and computing distance
add_dict, add_s_dict = fat_image.compare_clouds(
annotations, perch_annotations, use_add_s=True, convert_annotation_2=not run_perch, use_points_file=True)
if add_dict is not None and add_s_dict is not None:
for object_name in required_objects:
if (object_name in add_dict) and (object_name in add_s_dict):
f_accuracy.write("{},{},".format(add_dict[object_name], add_s_dict[object_name]))
else:
f_accuracy.write(" , ,")
if stats is not None:
f_runtime.write("{} {} {} {} {}".format(image_data['file_name'], stats['expands'], stats['runtime'], stats['icp_runtime'], stats['peak_gpu_mem']))
f_accuracy.write("\n")
f_runtime.write("\n")
f_runtime.close()
f_accuracy.close()
def run_on_image(dataset_cfg=None):
'''
Run on images that have no ground truth
'''
import rospy
rospy.init_node("image_run_node")
if '/opt/ros/kinetic/lib/python2.7/dist-packages' in sys.path:
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2
# directory = "/media/aditya/A69AFABA9AFA85D9/Cruzr/code/DOPE/catkin_ws/src/Deep_Object_Pose/output/drill"
# directory = "./bag_output/drill_1"
directory = "./bag_output/sugar_1"
image_data = {}
# image_data['file_name'] = "1579546223951406812.color.jpg"
camera_pose_path = directory + "/camera_pose.json"
# camera_intrinsics_matrix_path = directory + "/depth_camera_intrinsics.txt"
camera_intrinsics_matrix_path = directory + "/rgb_camera_intrinsics.txt"
with open(camera_pose_path) as f:
camera_pose = json.load(f)
camera_intrinsics = np.loadtxt(camera_intrinsics_matrix_path)
max_min_dict = {}
max_min_dict['ymax'] = 1.5
max_min_dict['ymin'] = 0.0
max_min_dict['xmax'] = 0.6
max_min_dict['xmin'] = 0.2
table_height = 0.735
f_runtime = open('runtime.txt', "w", 1)
f_runtime.write("{} {} {}\n".format('name', 'expands', 'runtime'))
# required_objects = ['035_power_drill']
required_objects = ['004_sugar_box']
fat_image = FATImage(
coco_image_directory = directory,
depth_factor=100,
model_dir=dataset_cfg['model_dir'],
model_mesh_in_mm=True,
model_mesh_scaling_factor=1,
models_flipped=False,
model_type="upright",
img_width=640,
img_height=480,
distance_scale=1,
env_config="pr2_conv_env_config.yaml",
planner_config="pr2_planner_config.yaml",
perch_debug_dir=dataset_cfg["perch_debug_dir"],
python_debug_dir=dataset_cfg["python_debug_dir"],
dataset_type=dataset_cfg["type"]
)
fat_image.search_resolution_translation = 0.07
fat_image.search_resolution_yaw = 0.4
fat_image.camera_intrinsic_matrix = camera_intrinsics
fat_image.category_names_to_id = {
required_objects[0]: 0
}
fat_image.category_names = [
"002_master_chef_can",
"003_cracker_box",
"004_sugar_box",
"005_tomato_soup_can",
"006_mustard_bottle",
"007_tuna_fish_can",
"008_pudding_box",
"009_gelatin_box",
"010_potted_meat_can",
"011_banana",
"019_pitcher_base",
"021_bleach_cleanser",
"024_bowl",
"025_mug",
"035_power_drill",
"036_wood_block",
"037_scissors",
"040_large_marker",
"051_large_clamp",
"052_extra_large_clamp",
"061_foam_brick"
]
## Try to run mask detection
# fat_image.init_model(
# dataset_cfg['maskrcnn_config'],
# print_poses=False,
# required_objects=required_objects,
# model_weights=dataset_cfg['maskrcnn_model_path'],
# min_image_size=fat_image.height
# )
fat_image.init_dope_node()
for img_i in np.arange(150, 360, 1):
image_data['file_name'] = "{}.color.jpg".format(img_i)
## Try to run mask detection
# color_img_path = os.path.join(fat_image.coco_image_directory, image_data['file_name'])
# color_img = cv2.imread(color_img_path)
# composite, mask_list_all, labels_all, centroids_2d_all, boxes_all, overall_binary_mask \
# = fat_image.coco_demo.run_on_opencv_image(color_img, use_thresh=True)
# mask_output_path = os.path.join(fat_image.coco_image_directory,
# fat_image.get_clean_name(image_data['file_name']) + "_mask.jpg")
# cv2.imwrite(mask_output_path, composite)
# dope_annotations, runtime = fat_image.visualize_dope_output(image_data)
perch_annotations, stats = fat_image.visualize_perch_output(
image_data, None, max_min_dict, frame='table',
use_external_render=0, required_object=required_objects,
# Apply cam to body transform because this camera pose is with optical frame
camera_optical_frame=False, use_external_pose_list=0,
input_camera_pose=camera_pose, table_height=table_height,
num_cores=0, compute_type=1
)
f_runtime.write("{} {} {}\n".format(image_data['file_name'], stats['expands'], stats['runtime']))
f_runtime.close()
def run_on_jenga_image(dataset_cfg=None):
'''
Run on images that have no ground truth
'''
import rospy
from sensor_msgs.msg import Image, PointCloud2
# rospy.init_node("image_run_node")
if '/opt/ros/kinetic/lib/python2.7/dist-packages' in sys.path:
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2
image_directory = dataset_cfg['image_dir']
annotation_file = dataset_cfg['image_dir']
annotation_file = dataset_cfg['coco_annotation_file']
# annotation_file = image_directory + '/instances_jenga_tower_pose.json'
model_dir = dataset_cfg['model_dir']
camera_idx = dataset_cfg['camera_idx']
f_runtime = open('runtime.txt', "w", 1)
f_runtime.write("{} {} {}\n".format('name', 'expands', 'runtime'))
required_objects = ['color_block_0',
'color_block_1',
'color_block_2',
'color_block_3']
required_objects = ['color_block_0']
required_objects = [
"color_block_0",
"color_block_1",
"color_block_2",
"color_block_3",
"color_block_4",
"color_block_5",
"color_block_6",
"color_block_7",
"color_block_8",
"color_block_9",
"color_block_10",
"color_block_11",
"color_block_12"
]
fat_image = FATImage(
coco_annotation_file=annotation_file,
coco_image_directory = image_directory,
depth_factor=1000,
model_dir=model_dir,
model_mesh_in_mm=False,
model_mesh_scaling_factor=1,
models_flipped=False,
model_type="default",
img_width=640,
img_height=360,
distance_scale=1,
table_ransac_threshold=0.025,
env_config="pr3_jenga_env_config.yaml",
planner_config="pr3_planner_config.yaml",
perch_debug_dir=dataset_cfg["perch_debug_dir"],
python_debug_dir=dataset_cfg["python_debug_dir"],
dataset_type=dataset_cfg["type"]
)
for img_i in np.arange(1, 26, 1):
image_name = "clutter/{}/{}_color_crop.jpg".format(img_i, str(camera_idx).zfill(4))
image_data, annotations = fat_image.get_random_image(
name=image_name, required_objects=None
)
if image_data is None or annotations is None:
continue
elif len(annotations) == 0:
continue
# dope_annotations, runtime = fat_image.visualize_dope_output(image_data)
# color_img_path = os.path.join(fat_image.coco_image_directory, image_data['file_name'])
# depth_img_path = fat_image.get_depth_img_path(color_img_path)
# scene_cloud, table_location, table_quat = \
# fat_image.get_table_pose(depth_img_path, 'camera')
# _, max_min_dict, _, _ = fat_image.visualize_pose_ros(image_data,
# annotations,
# frame='camera',
# camera_optical_frame=False,
# num_publish=2,
# write_poses=False,
# ros_publish=True,
# get_table_pose=True
# )
labels, model_annotations, predicted_mask_path, model_poses_file = \
fat_image.visualize_sphere_sampling(
image_data, annotations=annotations, print_poses=False,
required_objects=required_objects, num_samples=100,
mask_type="jenga", mask_image_id=img_i
)
# Write poses to file for perch
_, max_min_dict, _, _ = fat_image.visualize_pose_ros(image_data,
model_annotations,
frame='camera',
camera_optical_frame=False,
num_publish=1,
write_poses=True,
ros_publish=True,
get_table_pose=True
)
perch_annotations, stats = fat_image.visualize_perch_output(
image_data, model_annotations, max_min_dict, frame='camera',
# use_external_render=0, required_object=[labels[1]],
use_external_render=0, required_object=labels,
camera_optical_frame=False, use_external_pose_list=1,
# model_poses_file=model_poses_file, use_centroid_shifting=0,
model_poses_file=model_poses_file, use_centroid_shifting=0,
predicted_mask_path=predicted_mask_path, num_cores=0,
)
# Convert poses to table frame for simulator
_, _, transformed_anns, cam_pose = fat_image.visualize_pose_ros(image_data,
perch_annotations,
frame='table',
camera_optical_frame=False,
num_publish=1,
write_poses=False,
ros_publish=True
# get_table_pose=True
)
pose_output_filename = "clutter/{}/{}_poses.json".format(img_i, str(0).zfill(4))
for ann in transformed_anns:
ann["type"] = "jenga"
pose_output = {}
pose_output["poses"] = transformed_anns
pose_output["num_objects"] = len(transformed_anns)
pose_output["runtime"] = stats['runtime']
mkdir_if_missing("jenga_output_poses/clutter/{}".format(img_i))
with open(os.path.join("jenga_output_poses", pose_output_filename), 'w') as outfile:
json.dump(pose_output, outfile, indent=2)
f_runtime.write("{} {} {}\n".format(image_data['file_name'], stats['expands'], stats['runtime']))
f_runtime.close()
def compute_pose_metrics(rec, max_auc_dist = 0.1, max_pose_dist = 0.02):
# TODO : this should be in utils.py
'''
Follows plot_accuracy_keyframe.m from YCB_Video_toolbox
@rec - np.array - add-s values in sorted order
@prec - accuracy number
'''
rec_mean = np.mean(rec)
rec_less = np.where(rec < max_pose_dist)[0]
rec_less_perc = rec_less.shape[0]/rec.shape[0] * 100.0
rec[rec > max_auc_dist] = np.inf
rec = np.sort(rec)
prec = np.arange(0, rec.shape[0], 1)/rec.shape[0]
# Remove first 0 and add 1 at the end (denotes 100 percent of poses)
prec = np.array(prec[1:].tolist() + [1])
index = np.isfinite(rec)
# Actual pose error
rec = rec[index]
# Percentage of poses with that error
prec = prec[index]
# Append end point values
mrec = np.array([0] + rec.tolist() + [0.1])
mpre = np.array([0] + prec.tolist() + [prec[-1]])
# Indexes where value is not equal to previous value
args = np.where(mrec[:-1] != mrec[1:])[0]
args_prev = args
args = args + 1
# Calculate area under the curve
ap = np.sum((mrec[args] - mrec[args_prev]) * mpre[args]) * 10
return {
"auc" : ap * 100.0,
"pose_error_less_perc" : rec_less_perc,
"mean_pose_error" : rec_mean,
"pose_count" : rec.shape[0]
}
def get_filename_from_path(full_path):
return os.path.splitext(os.path.basename(full_path))[0]
def analyze_conveyor_results(config=None):
import pandas as pd
dataset_cfg = config['dataset']
analysis_cfg = config['analysis']
image_directory = dataset_cfg['image_dir']
annotation_file = dataset_cfg['image_dir'] + '/instances_conveyor_pose.json'
perch_config_yaml = "pr2_gpu_conv_env_config.yaml"
fat_image = FATImage(
coco_annotation_file=annotation_file,
coco_image_directory=image_directory,
depth_factor=100,
model_dir=dataset_cfg['model_dir'],
model_mesh_in_mm=False,
model_mesh_scaling_factor=1,
models_flipped=False,
model_type="upright",
img_width=640,
img_height=480,
distance_scale=1,
env_config=perch_config_yaml,
planner_config="pr2_planner_config.yaml",
perch_debug_dir=dataset_cfg["perch_debug_dir"],
python_debug_dir=dataset_cfg["python_debug_dir"],
dataset_type=dataset_cfg["type"],
analysis_output_dir=analysis_cfg["output_dir"]
)
filename_conveyor_y_dict = {}
for scene_name in ["mustard_1",
"mustard_2",
"mustard_3",
"drill_1",
"drill_2",
"drill_3",
"soup_1",
"sugar_1",
"sugar_2",
"sugar_3"]:
for img_i in range(100, 400):
image_name = '{}/{}.color.jpg'.format(scene_name, str(img_i))
image_data, annotations = fat_image.get_random_image(name=image_name)
if annotations is None:
continue
filename_conveyor_y_dict[image_name] = annotations[0]['location'][1]
# df_y_dist = pd.DataFrame(filename_conveyor_y_dict, index=range(0, len(filename_conveyor_y_dict.keys())))
df_y_dist = pd.DataFrame.from_dict(filename_conveyor_y_dict, orient='index', columns = ['y_dist'])
# print(df_y_dist)
min_y = df_y_dist["y_dist"].min()
max_y = df_y_dist["y_dist"].max()
print("GT Min y: {}, GT Max y : {}".format(min_y, max_y))
overall_stats_dict = {}
# Object wise metrics
print("\n### Object Wise AUC ###")
li = []
for accuracy_file in analysis_cfg['result_files']['accuracy']:
# Read file for every object
print("Accuracy file : {}".format(accuracy_file))
df = pd.read_csv(accuracy_file,
header=None,
index_col=None,
names=["filename", "add", "add-s", "blank"],
skiprows=1,
sep=",")
df = df.drop(columns=["add", "blank"])
df = df.set_index('filename')
add_s = np.copy(df['add-s'].to_numpy())
stats = compute_pose_metrics(add_s)
print("AUC : {}, Pose Percentage : {}, Mean ADD-S : {}".format(
stats['auc'], stats['pose_error_less_perc'], stats['mean_pose_error']))
li.append(df)
overall_stats_dict[get_filename_from_path(accuracy_file)] = stats
# Overall Metrics
# print("Dataframe with add-s")
df_acc = pd.concat(li, axis=0, ignore_index=False)
print("\n### Overall AUC ###")
stats = compute_pose_metrics(np.copy(df_acc['add-s'].to_numpy()))
print("AUC : {}, Pose Percentage : {}, Mean ADD-S : {}".format(
stats['auc'], stats['pose_error_less_perc'], stats['mean_pose_error']))
overall_stats_dict["overall"] = stats
## Runtime
print("\n### Object Wise Runtimes ###")
li = []
for runtime_file in analysis_cfg['result_files']['runtime']:
# Read file for every object
print("Runtime file : {}".format(runtime_file))
df = pd.read_csv(runtime_file,
header=0,
index_col=None,
# names=["filename", "runtime", "icp-runtime"],
# skiprows=1,
delim_whitespace=True)
# print(df)
df = df.set_index('name')
mean_runtime = df['runtime'].mean()
mean_rendered = df['expands'].mean()
print("Average | |
#!/usr/bin/env python3
import requests
import datetime
import re
from urllib.parse import quote
import sys
import linkpath
import config
def htmlescape(string):
return string.replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"')
def _safe_get(obj, key, default=None):
"""This acts like obj.get(key, default), except that if obj[key] exists but
is None, we still return default rather than the accessed result. Also, if
obj happens to be None, we return default rather than raising an exception.
To see the difference, suppose obj = {"a": None}. Then obj.get("a", 1) is
None but _safe_get(obj, "a", 1) is 1.
Since obj can be None, _safe_get can also be nested without checking for
None each time: _safe_get(_safe_get({}, "a"), "b", 1) is 1. Thus in some
cases a default need only be specified at the end."""
if obj is None:
return default
result = obj.get(key)
if result is None:
result = default
return result
def _safe_multiget(obj, key_list, default=None):
"""This acts like _safe_get(_safe_get(obj, key1), key2, default). The
intention is something like, get each key in turn, and return default is we
get a None at any point."""
if len(key_list) < 1:
return obj
result = obj
for key in key_list[:-1]:
result = _safe_get(result, key)
return _safe_get(result, key_list[-1], default)
def safe_get(obj, keys, default=None):
if isinstance(keys, list):
return _safe_multiget(obj, keys, default)
else:
return _safe_get(obj, keys, default)
def official_url_to_gw(ea_forum_link):
if "forum.effectivealtruism.org" in config.GRAPHQL_URL:
return ea_forum_link.replace('forum.effectivealtruism.org', 'ea.greaterwrong.com', 1)
else:
return ea_forum_link.replace('www.lesswrong.com', 'www.greaterwrong.com', 1)
def official_url_to_reader(ea_forum_link):
if "forum.effectivealtruism.org" in config.GRAPHQL_URL:
return ea_forum_link.replace('forum.effectivealtruism.org', 'eaforum.issarice.com', 1)
else:
return ea_forum_link.replace('www.lesswrong.com', 'lw2.issarice.com', 1)
def int_to_base36(number):
alphabet = '0123456789abcdefghijklmnopqrstuvwxyz'
base36 = ''
while base36 == '' or number > 0:
number, i = divmod(int(number), 36)
base36 = alphabet[i] + base36
return base36
def legacy_link(legacy_slug):
slug = int_to_base36(legacy_slug)
if "forum.effectivealtruism.org" in config.GRAPHQL_URL:
return 'https://web.archive.org/web/*/http://effective-altruism.com/ea/%s/*' % slug
else:
return 'https://web.archive.org/web/*/http://lesswrong.com/lw/%s/*' % slug
def show_head(title, author="", date="", publisher="", widepage=False, canonical_url=""):
result = ("""
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes">
%s
%s
%s
<meta property="og:title" content="%s" />
<meta property="og:locale" content="en_US" />
<meta property="og:type" content="article" />
<meta name="citation_title" content="%s">
%s
%s
<meta name="citation_fulltext_world_readable" content="">
<title>%s</title>
<style type="text/css">
body {
font-family: Lato, Helvetica, sans-serif;
font-size: 15.4px;
line-height: 1.4;
background-color: whitesmoke;
}
a {
color: %s;
text-decoration: underline;
}
a:visited {
color: #8a8a8b;
}
blockquote {
border-left: 2px solid #369;
padding-left: 10px;
margin-right: 15px;
margin-left: 0px;
}
h1 {
color: %s;
}
code {
background-color: #f6f6f6;
padding-left: 4px;
padding-right: 4px;
word-wrap: normal;
}
pre code {
display: block;
line-height: 1.2;
overflow: auto;
padding: 10px;
}
img {
max-width: 100%%;
height: auto;
}
table {
background-color: #f9f9f9;
border-collapse: collapse;
border-bottom: none;
border-top: none;
}
table th {
background-color: #f2f2f2;
border: 1px solid #aaaaaa;
font-weight: bold;
padding: 5px 10px;
}
table td {
border: 1px solid #aaaaaa;
padding: 5px 10px;
}
/* See https://stackoverflow.com/a/34259648/3422337 */
.spoilers { border: 1px solid black; }
.spoilers, .spoilers > * { transition: color 0.5s, opacity 0.5s; }
.spoilers:not(:hover) { color: transparent; }
.spoilers:not(:hover) > * { opacity: 0; }
.spoiler { border: 1px solid black; }
.spoiler, .spoiler > * { transition: color 0.5s, opacity 0.5s; }
.spoiler:not(:hover) { color: transparent; }
.spoiler:not(:hover) > * { opacity: 0; }
""" % (
'''<meta name="author" content="%s">''' % htmlescape(author) if author else "",
'''<meta name="dcterms.date" content="%s">''' % htmlescape(date) if date else "",
'''<link rel="canonical" href="%s">''' % htmlescape(canonical_url) if canonical_url else "",
htmlescape(title),
htmlescape(title),
'''<meta name="citation_author" content="%s">''' % htmlescape(author) if author else "",
'''<meta name="citation_publication_date" content="%s">''' % htmlescape(date) if date else "",
htmlescape(title),
config.LINK_COLOR,
config.LINK_COLOR
)
)
result += ("""
#wrapper {
border-left: 1px solid #d2d2d2;
border-right: 1px solid #d2d2d2;
margin: 0 auto;
overflow: hidden;
background-color: #fff;
}
#content {
padding: 30px 0 0 32px;
background-color: #fff;
float: left;
}
#sidebar {
padding: 30px 32px 0 0;
background-color: #fff;
float: right;
}
@media (max-width: 768px) {
#sidebar {
width: 100%%;
float: none;
padding: 0 0 0 0;
}
#content {
width: 97%%;
float: none;
padding: 0 0 0 0;
}
#wrapper {
width: 100%%;
overflow: auto;
}
}
""")
if widepage:
result += """
#wrapper {
max-width: 1500px;
}
#content {
}
#sidebar {
width: 0px;
}
"""
else:
result += """
#wrapper {
width: 1024px;
}
#content {
width: 710px;
}
#sidebar {
width: 220px;
}
"""
result += """
</style>
</head>
"""
return result
def show_navbar(navlinks=None, search_value=""):
if navlinks is None:
navlinks = []
result = ("""<nav><a href="/">Home</a> ·
<a href="https://github.com/riceissa/ea-forum-reader">About</a> ·
<a href="%s">User list</a>
""" % linkpath.userlist())
for link in navlinks:
result += " · " + link
if search_value:
search_value = 'value="%s"' % htmlescape(search_value)
result += ("""
<form action="%s" method="get" style="display: inline-block;">
<input name="q" type="text" %s/>
<input type="submit" value="Search" />
</form>
</nav>
""" % (linkpath.search(), search_value))
return result
def send_query(query):
return requests.get(config.GRAPHQL_URL, params={'query': query})
def cleanHtmlBody(htmlBody):
"""For some reason htmlBody values often have the following tags that
really shouldn't be there."""
if htmlBody is None:
return ""
return (htmlBody.replace("<html>", "")
.replace("</html>", "")
.replace("<body>", "")
.replace("</body>", "")
.replace("<head>", "")
.replace("</head>", ""))
def userid_to_userslug(userid):
query = ("""
{
user(input: {selector: {documentId: "%s"}}) {
result {
_id
slug
}
}
}
""" % userid)
request = send_query(query)
return request.json()['data']['user']['result']['slug']
def userslug_to_userid(userslug, run_query=True):
query = ("""
{
user(input: {selector: {slug: "%s"}}) {
result {
_id
slug
}
}
}
""" % userslug)
if not run_query:
return query + ('''\n<a href="%s">Run this query</a>\n\n''' % (config.GRAPHQL_URL.replace("graphql", "graphiql") + "?query=" + quote(query)))
request = send_query(query)
return request.json()['data']['user']['result']['_id']
def userlink(slug=None, username=None, display_name=None, bio=None):
if slug:
displayed = username if username else slug
if display_name and display_name != displayed:
displayed = display_name + " (" + displayed + ")"
url = linkpath.users(userslug=slug)
if bio:
return '''<a href="%s" title="%s">%s</a>''' % (url, htmlescape(bio), displayed)
else:
return '''<a href="%s">%s</a>''' % (url, displayed)
else:
return '''<b>[deleted]</b>'''
def official_link(page_url):
if "lesswrong" in config.GRAPHQL_URL:
return '''<a href="%s" title="Official LessWrong 2.0 link">LW</a>''' % page_url
else:
return '''<a href="%s" title="Official EA Forum link">EA</a>''' % page_url
def gw_link(page_url):
return '''<a href="%s" title="GreaterWrong link">GW</a>''' % official_url_to_gw(page_url)
def alt_urls(original_url, is_answer=False):
"""Return a dictionary of URLs for all the alternative services (official,
GW, my reader). Supported keys are: official, official_permalink, gw,
gw_permalink, reader.
For example, if the URL is "https://www.lesswrong.com/posts/bnY3L48TtDrKTzGRb/ai-safety-success-stories#9zAikCfT78BhyT8Aj" then the result is
{
"official": "https://www.lesswrong.com/posts/bnY3L48TtDrKTzGRb/ai-safety-success-stories#9zAikCfT78BhyT8Aj",
"official_permalink": "https://www.lesswrong.com/posts/bnY3L48TtDrKTzGRb/ai-safety-success-stories?commentId=9zAikCfT78BhyT8Aj",
"gw": "https://www.greaterwrong.com/posts/bnY3L48TtDrKTzGRb/ai-safety-success-stories#comment-9zAikCfT78BhyT8Aj",
"gw_permalink": "https://www.greaterwrong.com/posts/bnY3L48TtDrKTzGRb/ai-safety-success-stories/comment/9zAikCfT78BhyT8Aj",
"reader": "https://lw2.issarice.com/posts/bnY3L48TtDrKTzGRb/ai-safety-success-stories#9zAikCfT78BhyT8Aj"
}
For post URLs, the permalink keys will not exist."""
anchor = None
try:
domain, path, comment_id = re.match(r'https?://((?:www|ea|forum)\.(?:greaterwrong\.com|effectivealtruism\.org|lesswrong\.com|alignmentforum\.org))(/posts/[a-zA-Z0-9]+/[^/]+)(?:/comment/|/answer/|#|#comment-|\?commentId=)([a-zA-Z0-9]+)$', original_url).groups()
# Keep track of a list of common anchors that are not comment anchors
if comment_id in ["comments"]:
anchor = comment_id
comment_id = None
except AttributeError:
try:
domain, path = re.match(r'https?://((?:www|ea|forum)\.(?:greaterwrong\.com|effectivealtruism\.org|lesswrong\.com|alignmentforum\.org))(/posts/[a-zA-Z0-9]+/[^/]+|/users/[^/#]+)$', original_url).groups()
comment_id = None
except:
print("We don't know how to deal with this URL: ", original_url, file=sys.stderr)
return {"official": "?", "gw": "?", "reader": "?"}
if domain in ["www.lesswrong.com", "forum.effectivealtruism.org", "www.alignmentforum.org"]:
official_domain = domain
elif domain == "www.greaterwrong.com":
official_domain = "www.lesswrong.com"
elif domain == "ea.greaterwrong.com":
official_domain = "forum.effectivealtruism.org"
if domain in ["forum.effectivealtruism.org", "ea.greaterwrong.com"]:
gw_domain = "ea.greaterwrong.com"
reader_domain = "eaforum.issarice.com"
else:
gw_domain = "www.greaterwrong.com"
reader_domain = "lw2.issarice.com"
if comment_id:
# GW is the only weird one which distinguishes between comment vs
# answer URL structure, but it only does this for the permalink, so the
# anchor version still uses "#comment-" even for answers
if is_answer:
gw_permalink = "https://" + gw_domain + path + "/answer/" + comment_id
else:
gw_permalink = "https://" + gw_domain + path + "/comment/" + comment_id
result = {
"official": "https://" + official_domain + path + "#" + comment_id,
"official_permalink": "https://" + official_domain + path + "?commentId=" + comment_id,
"gw": "https://" + gw_domain + path + "#comment-" + comment_id,
"gw_permalink": gw_permalink,
"reader": "https://" + reader_domain + path + "#" + comment_id
}
else:
result = {
"official": "https://" + official_domain + path + ("#" + anchor if anchor else ""),
"gw": "https://" + gw_domain + path + ("#" + anchor if anchor else ""),
"reader": "https://" + reader_domain + path + ("#" + anchor if anchor else "")
}
return result
def grouped_links(url_dict):
if "lesswrong.com" in url_dict["official"]:
official_variant = "LW"
official_title = "Official LessWrong 2.0"
elif "forum.effectivealtruism.org" in url_dict["official"]:
official_variant = "EA"
official_title = "Official EA Forum"
elif "alignmentforum.org" in url_dict["official"]:
official_variant = "AF"
official_title | |
(%s)" % (e, pfc_name_turl))
else:
tolog("Renamed TURL based PFC from %s to %s" % (pfc_name_turl, pfc_name))
createdPFC = True
else:
tolog("No TURL based PFC was created earlier")
# create a standard PFC with SURLs if needed (basically this is default)
# note: the SURLs are actually TURLs if FAX was used as a primary site mover in combination with direct I/O
if not createdPFC:
# always write a PoolFileCatalog.xml independently from how many files were transferred succesfully
# No PFC only if no PFC was returned by Rucio
createPFC4TRF(pfc_name, guidfname)
def PFC4TURLs(analysisJob, transferType, fileInfoDic, pfc_name_turl, sitemover, sitename, usect, dsdict, eventService, tokens_dictionary, computingSite, sourceSite, lfns, scope_dict, experiment):
""" Create a TURL based PFC if necessary/requested """
# I.e if copy tool should not be used [useCT=False] and if oldPrefix and newPrefix are not already set in copysetup [useSetPrefixes=False]
ec = 0
pilotErrorDiag = ""
createdPFCTURL = False
LFN_to_TURL_dictionary = {}
# first check if there is a need to create the PFC
if shouldPFC4TURLsBeCreated(analysisJob, transferType, experiment, eventService):
ec, pilotErrorDiag, LFN_to_TURL_dictionary = createPFC4TURLs(fileInfoDic, pfc_name_turl, sitemover, sitename, dsdict, tokens_dictionary, computingSite, sourceSite, lfns, scope_dict, transferType, experiment)
if ec == 0:
tolog("PFC created with TURLs")
createdPFCTURL = True
elif analysisJob:
# reset the pilotErrorDiag since it is not needed
pilotErrorDiag = ""
tolog("Defaulting to copy-to-scratch")
statusPFCTurl = False # this will trigger a correction of the setup command (user analysis jobs only, not needed for production jobs)
usect = True
else:
tolog("Will not switch to copy-to-scratch for production job (fail immediately)")
return ec, pilotErrorDiag, createdPFCTURL, usect, LFN_to_TURL_dictionary
def extractInputFileInfo(fileInfoList_nr, lfns):
""" Extract the file info for the given input file """
guid = fileInfoList_nr[0]
gpfn = fileInfoList_nr[1]
size = fileInfoList_nr[2]
checksum = fileInfoList_nr[3]
filetype = fileInfoList_nr[4]
copytool = fileInfoList_nr[5]
os_bucket_id = fileInfoList_nr[6]
tolog("Extracted (guid, gpfn, size, checksum, filetype, copytool, os_bucket_id) = (%s, %s, %s, %s, %s, %s, %s)" % (guid, gpfn, str(size), checksum, filetype, copytool, os_bucket_id))
# get the corresponding lfn
lfn = getLFN(gpfn, lfns)
if checksum == "" or checksum == "None":
checksum = 0
if size == "":
size = 0
return guid, gpfn, lfn, size, checksum, filetype, copytool, os_bucket_id
def getAlternativeReplica(gpfn, guid, replica_number, createdPFCTURL, replica_dictionary):
""" Grab the gpfn from the replicas dictionary in case alternative replica stage-in is allowed """
if not createdPFCTURL and replica_dictionary != {}:
try:
gpfn = replica_dictionary[guid][replica_number]
except Exception, e:
tolog("!!WARNING!!1001!! Could not grab alternative replica from dictionary: %s (using default replica)" % str(e))
else:
tolog("Using replica number %d: %s" % (replica_number, gpfn))
return gpfn
def getSurlTokenDictionary(lfns, tokens):
""" Create a SURL vs space tokens dictionary """
dictionary = {}
if len(lfns) == len(tokens):
dictionary = dict(zip(lfns, tokens))
else:
tolog("!!WARNING!!2233!! Cannot create dictionary from lists of different lengths: %s, %s" % (str(lfns), str(tokens)))
return dictionary
def mover_get_data(lfns,
path,
sitename,
queuename,
stageinTries,
inputpoolfcstring="xmlcatalog_file:PoolFileCatalog.xml",
ub="outdated", # to be removed
dsname="",
dsdict={},
rucio_dataset_dictionary={},
guids=[],
analysisJob=False,
usect=True,
pinitdir="",
proxycheck=True,
spsetup="",
tokens=[],
userid="",
inputDir="",
jobId=None,
jobsetID=None,
jobDefId="",
access_dict=None,
scope_dict=None,
workDir="",
DN=None,
dbh=None,
jobPars="",
cmtconfig="",
filesizeIn=[],
checksumIn=[],
transferType=None,
experiment="",
eventService=False,
sourceSite="",
pandaProxySecretKey=None,
job={}):
"""
This method is called by a job to get the required input data.
The parameters passed are a list of LFNs, working directory path, site name,
and a connection string to the poolfile catalog to fill with
input data (if none given the default is xmlcatalog_file:PoolFileCatalog.xml).
The local destination directory (working directory path) should already exist,
or the copy will fail.
The program stops at the first failed transfer (after retries) and the PFC
contains the files that were transferred correctly, an error message is returned.
"""
tolog("Mover get data started")
statusPFCTurl = None
pilotErrorDiag = ""
# FAX counters (will be reported in jobMetrics; only relevant when FAX has been activated after a stage-in failure)
N_filesWithoutFAX = 0
N_filesWithFAX = 0
bytesWithoutFAX = 0L
bytesWithFAX = 0L
# FAX control variable, if FAX is used as primary site mover in combination with direct I/O
usedFAXandDirectIO = False
# The FAX variables above will be stored in a dictionary, to be returned by this function
FAX_dictionary = {}
# Is the DBRelease file available locally?
DBReleaseIsAvailable = handleDBRelease(dbh, lfns, jobPars, path)
# Should stage-in be aborted? (if there are only locally available DBRelease files in the stage-in list)
if abortStageIn(dbh, lfns, DBReleaseIsAvailable):
return 0, pilotErrorDiag, statusPFCTurl, FAX_dictionary
# Setup the dictionary necessary for all instrumentation
report = getInitialTracingReport(userid, sitename, dsname, "get_sm", analysisJob, jobId, jobDefId, DN, job.taskID)
if stageinTries != 0:
get_RETRY = min(stageinTries, MAX_NUMBER_OF_RETRIES)
else:
get_RETRY = MAX_RETRY
get_TIMEOUT = 5*3600/get_RETRY
fail = 0
guidfname = {}
error = PilotErrors()
region = readpar('region')
# Space tokens currently not used for input files
# # check if there is are any space tokens
# _token = getProperSpaceTokenList(token, listSEs, len(lfns))
# Select the correct mover
copycmd, setup = getCopytool(mode="get")
# Get the sitemover object corresponding to the default copy command
sitemover = getSiteMover(copycmd, setup) # to be patched: job args should be passed here
sitemover.init_data(job) # quick hack to avoid nested passing arguments via ALL execution stack: TO BE fixed and properly implemented later in constructor
# Get the experiment object
thisExperiment = getExperiment(experiment)
# Get the name for the PFC file
_path = path
if eventService:
# Update the path (create the PFC in one level above the payload workdir)
path = os.path.abspath(os.path.join(path, '..'))
pfc_name = getPFCName(path, inputpoolfcstring)
# done with the event server modification (related to the PFC generation), reset the path again
path = _path
# Build the file info dictionary (use the filesize and checksum from the dispatcher if possible) and create the PFC
# Format: fileInfoDic[file_nr] = (guid, gpfn, fsize, fchecksum, filetype, copytool)
# replicas_dic[guid1] = [ replica1, .. ] where replicaN is an object of class replica
ec, pilotErrorDiag, fileInfoDic, totalFileSize, replicas_dic, xml_source = \
getFileInfo(region, ub, queuename, guids, dsname, dsdict, lfns, pinitdir, analysisJob, tokens, DN, sitemover, error, path, dbh, DBReleaseIsAvailable,\
scope_dict, job.prodDBlockToken, pfc_name=pfc_name, filesizeIn=filesizeIn, checksumIn=checksumIn, thisExperiment=thisExperiment,\
computingSite=sitename, sourceSite=sourceSite, ddmEndPointIn=job.ddmEndPointIn)
if ec != 0:
return ec, pilotErrorDiag, statusPFCTurl, FAX_dictionary
# Until the Mover PFC file is no longer needed, call the TURL based PFC "PoolFileCatalogTURL.xml"
pfc_name_turl = pfc_name.replace(".xml", "TURL.xml")
# Create a SURL to space token dictionary
tokens_dictionary = getSurlTokenDictionary(lfns, tokens)
# Create a TURL based PFC if necessary/requested (i.e. if copy tool should not be used [useCT=False] and
# if oldPrefix and newPrefix are not already set in copysetup [useSetPrefixes=False])
if xml_source != "FAX":
ec, pilotErrorDiag, createdPFCTURL, usect, LFN_to_TURL_dictionary = PFC4TURLs(analysisJob, transferType, fileInfoDic, pfc_name_turl, sitemover, sitename, usect, dsdict, eventService, tokens_dictionary, sitename, sourceSite, lfns, scope_dict, job.experiment)
if ec != 0:
return ec, pilotErrorDiag, statusPFCTurl, FAX_dictionary
if LFN_to_TURL_dictionary != {}:
# Update the @input file (used to send potentially very large input file list to the trf)
status = updateInputFileWithTURLs(job.jobPars, LFN_to_TURL_dictionary)
if not status:
tolog("!!WARNING!!5465!! LFN to TURL replacement in @input file failed - Direct I/O will fail")
else:
tolog("(Skipping PFC4TURL call since it is not necessary in FAX mode)")
createdPFCTURL = True
# Correct the total file size for the DBRelease file if necessary
totalFileSize = correctTotalFileSize(totalFileSize, fileInfoDic, lfns, dbh, DBReleaseIsAvailable)
# Only bother with the size checks if the copy tool is to be used (non-direct access mode)
if usect:
# Get a proper maxinputsize from schedconfig/default
_maxinputsize = getMaxInputSize()
# Check the total input file size
ec, pilotErrorDiag = verifyInputFileSize(totalFileSize, _maxinputsize, error)
if ec != 0:
return ec, pilotErrorDiag, statusPFCTurl, FAX_dictionary
# Do we have enough local space to stage in all data and run the job?
ec, pilotErrorDiag = verifyAvailableSpace(sitemover, totalFileSize, path, error)
if ec != 0:
return ec, pilotErrorDiag, statusPFCTurl, FAX_dictionary
# Get the replica dictionary from file (used when the primary replica can not be staged due to some temporary error)
replica_dictionary = getReplicaDictionaryFile(path)
# file counters
N_files_on_tape = 0
N_root_files = 0
N_non_root_files = 0
# If FAX is used as a primary site mover then set the default FAX mode to true, otherwise | |
if (hasattr(self, 'n_samples_seen_') and
isinstance(self.n_samples_seen_, (int, np.integer))):
self.n_samples_seen_ = np.repeat(self.n_samples_seen_,
X.shape[1]).astype(np.int64)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
sparse_constructor = (sparse.csr_matrix
if X.format == 'csr' else sparse.csc_matrix)
counts_nan = sparse_constructor(
(np.isnan(X.data), X.indices, X.indptr),
shape=X.shape).sum(axis=0).A.ravel()
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = (X.shape[0] -
counts_nan).astype(np.int64)
if self.with_std:
# First pass
if not hasattr(self, 'scale_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
if hasattr(self, 'scale_'):
self.n_samples_seen_ += X.shape[0] - counts_nan
else:
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = np.zeros(X.shape[1], dtype=np.int64)
# First pass
if not hasattr(self, 'scale_'):
self.mean_ = .0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
if not self.with_mean and not self.with_std:
self.mean_ = None
self.var_ = None
self.n_samples_seen_ += X.shape[0] - np.isnan(X).sum(axis=0)
else:
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
# for backward-compatibility, reduce n_samples_seen_ to an integer
# if the number of samples is the same for each feature (i.e. no
# missing values)
if np.ptp(self.n_samples_seen_) == 0:
self.n_samples_seen_ = self.n_samples_seen_[0]
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
def transform(self, X, y='deprecated', copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : bool, optional (default: None)
Copy the input X or not.
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
copy : bool, optional (default: None)
Copy the input X or not.
Returns
-------
X_tr : array-like, shape [n_samples, n_features]
Transformed array.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray, shape (n_features,)
Per feature maximum absolute value.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
Examples
--------
>>> from sklearn.preprocessing import MaxAbsScaler
>>> X = [[ 1., -1., 2.],
... [ 2., 0., 0.],
... [ 0., 1., -1.]]
>>> transformer = MaxAbsScaler().fit(X)
>>> transformer
MaxAbsScaler(copy=True)
>>> transformer.transform(X)
array([[ 0.5, -1. , 1. ],
[ 1. , 0. , 0. ],
[ 0. , 1. , -0.5]])
See also
--------
maxabs_scale: Equivalent function without the estimator API.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y
Ignored
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0, ignore_nan=True)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.nanmax(np.abs(X), axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs)
return self
def transform(self, X):
"""Scale the data
Parameters
----------
X : {array-like, sparse matrix}
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix}
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MaxAbsScaler: Performs scaling to the [-1, 1] range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
NaNs are treated as missing values: disregarded to compute the statistics,
and maintained during the data transformation.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES,
force_all_finite='allow-nan')
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the quantile | |
"name": "property-blue"
},
{
"url": "/images/status/property-export.png",
"name": "property-export"
},
{
"url": "/images/status/property-import.png",
"name": "property-import"
},
{
"url": "/images/status/property.png",
"name": "property"
},
{
"url": "/images/status/puzzle--arrow.png",
"name": "puzzle--arrow"
},
{
"url": "/images/status/puzzle--exclamation.png",
"name": "puzzle--exclamation"
},
{
"url": "/images/status/puzzle--minus.png",
"name": "puzzle--minus"
},
{
"url": "/images/status/puzzle--pencil.png",
"name": "puzzle--pencil"
},
{
"url": "/images/status/puzzle--plus.png",
"name": "puzzle--plus"
},
{
"url": "/images/status/puzzle.png",
"name": "puzzle"
},
{
"url": "/images/status/question-balloon.png",
"name": "question-balloon"
},
{
"url": "/images/status/question-button.png",
"name": "question-button"
},
{
"url": "/images/status/question-frame.png",
"name": "question-frame"
},
{
"url": "/images/status/question-octagon-frame.png",
"name": "question-octagon-frame"
},
{
"url": "/images/status/question-octagon.png",
"name": "question-octagon"
},
{
"url": "/images/status/question-shield.png",
"name": "question-shield"
},
{
"url": "/images/status/question-small-white.png",
"name": "question-small-white"
},
{
"url": "/images/status/question-small.png",
"name": "question-small"
},
{
"url": "/images/status/question-white.png",
"name": "question-white"
},
{
"url": "/images/status/question.png",
"name": "question"
},
{
"url": "/images/status/quill--arrow.png",
"name": "quill--arrow"
},
{
"url": "/images/status/quill--exclamation.png",
"name": "quill--exclamation"
},
{
"url": "/images/status/quill--minus.png",
"name": "quill--minus"
},
{
"url": "/images/status/quill--plus.png",
"name": "quill--plus"
},
{
"url": "/images/status/quill.png",
"name": "quill"
},
{
"url": "/images/status/rainbow.png",
"name": "rainbow"
},
{
"url": "/images/status/receipt--arrow.png",
"name": "receipt--arrow"
},
{
"url": "/images/status/receipt--exclamation.png",
"name": "receipt--exclamation"
},
{
"url": "/images/status/receipt--minus.png",
"name": "receipt--minus"
},
{
"url": "/images/status/receipt--pencil.png",
"name": "receipt--pencil"
},
{
"url": "/images/status/receipt--plus.png",
"name": "receipt--plus"
},
{
"url": "/images/status/receipt-excel-text.png",
"name": "receipt-excel-text"
},
{
"url": "/images/status/receipt-excel.png",
"name": "receipt-excel"
},
{
"url": "/images/status/receipt-export.png",
"name": "receipt-export"
},
{
"url": "/images/status/receipt-import.png",
"name": "receipt-import"
},
{
"url": "/images/status/receipt-invoice.png",
"name": "receipt-invoice"
},
{
"url": "/images/status/receipt-shred.png",
"name": "receipt-shred"
},
{
"url": "/images/status/receipt-stamp.png",
"name": "receipt-stamp"
},
{
"url": "/images/status/receipt-text.png",
"name": "receipt-text"
},
{
"url": "/images/status/receipt.png",
"name": "receipt"
},
{
"url": "/images/status/receipts-text.png",
"name": "receipts-text"
},
{
"url": "/images/status/receipts.png",
"name": "receipts"
},
{
"url": "/images/status/report--arrow.png",
"name": "report--arrow"
},
{
"url": "/images/status/report--exclamation.png",
"name": "report--exclamation"
},
{
"url": "/images/status/report--minus.png",
"name": "report--minus"
},
{
"url": "/images/status/report--pencil.png",
"name": "report--pencil"
},
{
"url": "/images/status/report--plus.png",
"name": "report--plus"
},
{
"url": "/images/status/report-excel.png",
"name": "report-excel"
},
{
"url": "/images/status/report-paper.png",
"name": "report-paper"
},
{
"url": "/images/status/report-word.png",
"name": "report-word"
},
{
"url": "/images/status/report.png",
"name": "report"
},
{
"url": "/images/status/reports-stack.png",
"name": "reports-stack"
},
{
"url": "/images/status/reports.png",
"name": "reports"
},
{
"url": "/images/status/robot-off.png",
"name": "robot-off"
},
{
"url": "/images/status/robot.png",
"name": "robot"
},
{
"url": "/images/status/rocket--arrow.png",
"name": "rocket--arrow"
},
{
"url": "/images/status/rocket--exclamation.png",
"name": "rocket--exclamation"
},
{
"url": "/images/status/rocket--minus.png",
"name": "rocket--minus"
},
{
"url": "/images/status/rocket--pencil.png",
"name": "rocket--pencil"
},
{
"url": "/images/status/rocket--plus.png",
"name": "rocket--plus"
},
{
"url": "/images/status/rocket-fly.png",
"name": "rocket-fly"
},
{
"url": "/images/status/rocket.png",
"name": "rocket"
},
{
"url": "/images/status/ruby.png",
"name": "ruby"
},
{
"url": "/images/status/ruler--arrow.png",
"name": "ruler--arrow"
},
{
"url": "/images/status/ruler--exclamation.png",
"name": "ruler--exclamation"
},
{
"url": "/images/status/ruler--minus.png",
"name": "ruler--minus"
},
{
"url": "/images/status/ruler--pencil.png",
"name": "ruler--pencil"
},
{
"url": "/images/status/ruler--plus.png",
"name": "ruler--plus"
},
{
"url": "/images/status/ruler-crop.png",
"name": "ruler-crop"
},
{
"url": "/images/status/ruler-triangle.png",
"name": "ruler-triangle"
},
{
"url": "/images/status/ruler.png",
"name": "ruler"
},
{
"url": "/images/status/safe--arrow.png",
"name": "safe--arrow"
},
{
"url": "/images/status/safe--exclamation.png",
"name": "safe--exclamation"
},
{
"url": "/images/status/safe--minus.png",
"name": "safe--minus"
},
{
"url": "/images/status/safe--pencil.png",
"name": "safe--pencil"
},
{
"url": "/images/status/safe--plus.png",
"name": "safe--plus"
},
{
"url": "/images/status/safe.png",
"name": "safe"
},
{
"url": "/images/status/scanner--arrow.png",
"name": "scanner--arrow"
},
{
"url": "/images/status/scanner--exclamation.png",
"name": "scanner--exclamation"
},
{
"url": "/images/status/scanner--minus.png",
"name": "scanner--minus"
},
{
"url": "/images/status/scanner--pencil.png",
"name": "scanner--pencil"
},
{
"url": "/images/status/scanner--plus.png",
"name": "scanner--plus"
},
{
"url": "/images/status/scanner-off.png",
"name": "scanner-off"
},
{
"url": "/images/status/scanner.png",
"name": "scanner"
},
{
"url": "/images/status/scissors--arrow.png",
"name": "scissors--arrow"
},
{
"url": "/images/status/scissors--exclamation.png",
"name": "scissors--exclamation"
},
{
"url": "/images/status/scissors--minus.png",
"name": "scissors--minus"
},
{
"url": "/images/status/scissors--pencil.png",
"name": "scissors--pencil"
},
{
"url": "/images/status/scissors--plus.png",
"name": "scissors--plus"
},
{
"url": "/images/status/scissors-blue.png",
"name": "scissors-blue"
},
{
"url": "/images/status/scissors.png",
"name": "scissors"
},
{
"url": "/images/status/screwdriver--arrow.png",
"name": "screwdriver--arrow"
},
{
"url": "/images/status/screwdriver--exclamation.png",
"name": "screwdriver--exclamation"
},
{
"url": "/images/status/screwdriver--minus.png",
"name": "screwdriver--minus"
},
{
"url": "/images/status/screwdriver--pencil.png",
"name": "screwdriver--pencil"
},
{
"url": "/images/status/screwdriver--plus.png",
"name": "screwdriver--plus"
},
{
"url": "/images/status/screwdriver.png",
"name": "screwdriver"
},
{
"url": "/images/status/script--arrow.png",
"name": "script--arrow"
},
{
"url": "/images/status/script--exclamation.png",
"name": "script--exclamation"
},
{
"url": "/images/status/script--minus.png",
"name": "script--minus"
},
{
"url": "/images/status/script--pencil.png",
"name": "script--pencil"
},
{
"url": "/images/status/script--plus.png",
"name": "script--plus"
},
{
"url": "/images/status/script-attribute-b.png",
"name": "script-attribute-b"
},
{
"url": "/images/status/script-attribute-c.png",
"name": "script-attribute-c"
},
{
"url": "/images/status/script-attribute-d.png",
"name": "script-attribute-d"
},
{
"url": "/images/status/script-attribute-e.png",
"name": "script-attribute-e"
},
{
"url": "/images/status/script-attribute-f.png",
"name": "script-attribute-f"
},
{
"url": "/images/status/script-attribute-g.png",
"name": "script-attribute-g"
},
{
"url": "/images/status/script-attribute-h.png",
"name": "script-attribute-h"
},
{
"url": "/images/status/script-attribute-i.png",
"name": "script-attribute-i"
},
{
"url": "/images/status/script-attribute-j.png",
"name": "script-attribute-j"
},
{
"url": "/images/status/script-attribute-k.png",
"name": "script-attribute-k"
},
{
"url": "/images/status/script-attribute-l.png",
"name": "script-attribute-l"
},
{
"url": "/images/status/script-attribute-m.png",
"name": "script-attribute-m"
},
{
"url": "/images/status/script-attribute-n.png",
"name": "script-attribute-n"
},
{
"url": "/images/status/script-attribute-o.png",
"name": "script-attribute-o"
},
{
"url": "/images/status/script-attribute-p.png",
"name": "script-attribute-p"
},
{
"url": "/images/status/script-attribute-q.png",
"name": "script-attribute-q"
},
{
"url": "/images/status/script-attribute-r.png",
"name": "script-attribute-r"
},
{
"url": "/images/status/script-attribute-s.png",
"name": "script-attribute-s"
},
{
"url": "/images/status/script-attribute-t.png",
"name": "script-attribute-t"
},
{
"url": "/images/status/script-attribute-u.png",
"name": "script-attribute-u"
},
{
"url": "/images/status/script-attribute-v.png",
"name": "script-attribute-v"
},
{
"url": "/images/status/script-attribute-w.png",
"name": "script-attribute-w"
},
{
"url": "/images/status/script-attribute-x.png",
"name": "script-attribute-x"
},
{
"url": "/images/status/script-attribute-y.png",
"name": "script-attribute-y"
},
{
"url": "/images/status/script-attribute-z.png",
"name": "script-attribute-z"
},
{
"url": "/images/status/script-attribute.png",
"name": "script-attribute"
},
{
"url": "/images/status/script-binary.png",
"name": "script-binary"
},
{
"url": "/images/status/script-block.png",
"name": "script-block"
},
{
"url": "/images/status/script-code.png",
"name": "script-code"
},
{
"url": "/images/status/script-excel.png",
"name": "script-excel"
},
{
"url": "/images/status/script-export.png",
"name": "script-export"
},
{
"url": "/images/status/script-flash.png",
"name": "script-flash"
},
{
"url": "/images/status/script-globe.png",
"name": "script-globe"
},
{
"url": "/images/status/script-import.png",
"name": "script-import"
},
{
"url": "/images/status/script-office.png",
"name": "script-office"
},
{
"url": "/images/status/script-php.png",
"name": "script-php"
},
{
"url": "/images/status/script-stamp.png",
"name": "script-stamp"
},
{
"url": "/images/status/script-text.png",
"name": "script-text"
},
{
"url": "/images/status/script-visual-studio.png",
"name": "script-visual-studio"
},
{
"url": "/images/status/script-word.png",
"name": "script-word"
},
{
"url": "/images/status/script.png",
"name": "script"
},
{
"url": "/images/status/scripts-text.png",
"name": "scripts-text"
},
{
"url": "/images/status/scripts.png",
"name": "scripts"
},
{
"url": "/images/status/selection-input.png",
"name": "selection-input"
},
{
"url": "/images/status/selection-select-input.png",
"name": "selection-select-input"
},
{
"url": "/images/status/selection-select.png",
"name": "selection-select"
},
{
"url": "/images/status/selection.png",
"name": "selection"
},
{
"url": "/images/status/server--arrow.png",
"name": "server--arrow"
},
{
"url": "/images/status/server--exclamation.png",
"name": "server--exclamation"
},
{
"url": "/images/status/server--minus.png",
"name": "server--minus"
},
{
"url": "/images/status/server--pencil.png",
"name": "server--pencil"
},
{
"url": "/images/status/server--plus.png",
"name": "server--plus"
},
{
"url": "/images/status/server-cast.png",
"name": "server-cast"
},
{
"url": "/images/status/server-network.png",
"name": "server-network"
},
{
"url": "/images/status/server-property.png",
"name": "server-property"
},
{
"url": "/images/status/server.png",
"name": "server"
},
{
"url": "/images/status/servers-network.png",
"name": "servers-network"
},
{
"url": "/images/status/servers.png",
"name": "servers"
},
{
"url": "/images/status/service-bell--arrow.png", | |
# -*- coding: utf-8 -*-
"""
shepherd_herd
~~~~~
click-based command line utility for controlling a group of shepherd nodes
remotely through ssh. Provides commands for starting/stopping harvest and
emulation, retrieving recordings to the local machine and flashing firmware
images to target sensor nodes.
:copyright: (c) 2019 Networked Embedded Systems Lab, TU Dresden.
:license: MIT, see LICENSE for more details.
"""
import click
import re
import time
from fabric import Group
import numpy as np
from io import StringIO
from pathlib import Path
import telnetlib
import yaml
import logging
import click_config_file
consoleHandler = logging.StreamHandler()
logger = logging.getLogger("shepherd-herd")
logger.addHandler(consoleHandler)
def yamlprovider(file_path, cmd_name):
logger.info(f"reading config from {file_path}")
with open(file_path, "r") as config_data:
full_config = yaml.safe_load(config_data)
return full_config
def find_consensus_time(group):
"""Finds a start time in the future when all nodes should start service
In order to run synchronously, all nodes should start at the same time.
This is achieved by querying all nodes to check any large time offset,
agreeing on a common time in the future and waiting for that time on each
node.
Args:
group (fabric.Group): Group of fabric hosts on which to start shepherd.
"""
# Get the current time on each target node
ts_nows = np.empty(len(group))
for i, cnx in enumerate(group):
res = cnx.run("date +%s", hide=True, warn=True)
ts_nows[i] = float(res.stdout)
if len(ts_nows) == 1:
ts_start = ts_nows[0] + 20
else:
ts_max = max(ts_nows)
# Check for excessive time difference among nodes
ts_diffs = ts_nows - ts_max
if any(abs(ts_diffs) > 10):
raise Exception("Time difference between hosts greater 10s")
# We need to estimate a future point in time such that all nodes are ready
ts_start = ts_max + 20 + 2 * len(group)
return int(ts_start), float(ts_start - ts_nows[0])
def configure_shepherd(
group: Group,
command: str,
parameters: dict,
hostnames: dict,
verbose: int = 0,
):
"""Configures shepherd service on the group of hosts.
Rolls out a configuration file according to the given command and parameters
service.
Args:
group (fabric.Group): Group of fabric hosts on which to start shepherd.
command (str): What shepherd is supposed to do. One of 'harvest' or 'emulation'.
parameters (dict): Parameters for shepherd-sheep
hostnames (dict): Dictionary of hostnames corresponding to fabric hosts
verbose (int): Verbosity for shepherd-sheep
"""
config_dict = {
"command": command,
"verbose": verbose,
"parameters": parameters,
}
config_yml = yaml.dump(
config_dict, default_flow_style=False, sort_keys=False
)
logger.debug(f"Rolling out the following config:\n\n{config_yml}")
for cnx in group:
res = cnx.sudo("systemctl status shepherd", hide=True, warn=True)
if res.exited != 3:
raise Exception(f"shepherd not inactive on {hostnames[cnx.host]}")
cnx.put(StringIO(config_yml), "/tmp/config.yml")
cnx.sudo("mv /tmp/config.yml /etc/shepherd/config.yml")
def start_shepherd(
group: Group, hostnames: dict,
):
"""Starts shepherd service on the group of hosts.
Args:
group (fabric.Group): Group of fabric hosts on which to start shepherd.
hostnames (dict): Dictionary of hostnames corresponding to fabric hosts
"""
for cnx in group:
res = cnx.sudo("systemctl status shepherd", hide=True, warn=True)
if res.exited != 3:
raise Exception(f"shepherd not inactive on {hostnames[cnx.host]}")
res = cnx.sudo("systemctl start shepherd", hide=True, warn=True)
@click.group(context_settings=dict(help_option_names=["-h", "--help"], obj={}))
@click.option("--inventory", "-i", type=str,
default="inventory/herd.yml",
help="List of target hosts as comma-separated string or path to ansible-style yaml file")
@click.option("--limit", "-l", type=str,
help="Comma-separated list of hosts to limit execution to")
@click.option("--user", "-u", type=str, help="User name for login to nodes")
@click.option("--key-filename", "-k", type=click.Path(exists=True),
help="Path to private ssh key file")
@click.option("-v", "--verbose", count=True, default=2)
@click.pass_context
def cli(ctx, inventory, limit, user, key_filename, verbose):
if inventory.rstrip().endswith(","):
hostlist = inventory.split(",")[:-1]
if limit is not None:
hostlist = list(set(hostlist) & set(limit))
hostnames = {hostname: hostname for hostname in hostlist}
else:
host_path = Path(inventory)
if not host_path.exists():
raise click.FileError(inventory)
with open(host_path, "r") as stream:
try:
inventory_data = yaml.safe_load(stream)
except yaml.YAMLError:
raise click.UsageError(
f"Couldn't read inventory file {host_path}"
)
hostlist = list()
hostnames = dict()
for hostname, hostvars in inventory_data["sheep"]["hosts"].items():
if limit is not None:
if not hostname in limit.split(","):
continue
if "ansible_host" in hostvars.keys():
hostlist.append(hostvars["ansible_host"])
hostnames[hostvars["ansible_host"]] = hostname
else:
hostlist.append(hostname)
hostnames[hostname] = hostname
if user is None:
try:
user = inventory_data["sheep"]["vars"]["ansible_user"]
except KeyError:
pass
if user is None:
raise click.UsageError(
"Provide user by command line or in inventory file"
)
if verbose == 0:
logger.setLevel(logging.ERROR)
elif verbose == 1:
logger.setLevel(logging.WARNING)
elif verbose == 2:
logger.setLevel(logging.INFO)
elif verbose > 2:
logger.setLevel(logging.DEBUG)
ctx.obj["verbose"] = verbose
connect_kwargs = dict()
if key_filename is not None:
connect_kwargs["key_filename"] = key_filename
ctx.obj["fab group"] = Group(
*hostlist, user=user, connect_kwargs=connect_kwargs
)
ctx.obj["hostnames"] = hostnames
@cli.command(short_help="Power off shepherd nodes")
@click.option("--restart", "-r", is_flag=True, help="Reboot")
@click.pass_context
def poweroff(ctx, restart):
for cnx in ctx.obj["fab group"]:
if restart:
logger.info(f"rebooting {ctx.obj['hostnames'][cnx.host]}")
cnx.sudo("reboot", hide=True, warn=True)
else:
logger.info(f"powering off {ctx.obj['hostnames'][cnx.host]}")
cnx.sudo("poweroff", hide=True, warn=True)
@cli.command(short_help="Run COMMAND on the shell")
@click.pass_context
@click.argument("command", type=str)
@click.option("--sudo", "-s", is_flag=True, help="Run command with sudo")
def run(ctx, command, sudo):
for cnx in ctx.obj["fab group"]:
click.echo(
f"************** {ctx.obj['hostnames'][cnx.host]} **************"
)
if sudo:
cnx.sudo(command, warn=True)
else:
cnx.run(command, warn=True)
@cli.group(
short_help="Remote programming/debugging of the target sensor node",
invoke_without_command=True,
)
@click.option(
"--port",
"-p",
type=int,
default=4444,
help="Port on which OpenOCD should listen for telnet",
)
@click.option(
"--on/--off",
default=True,
help="Enable/disable power and debug access to the target",
)
@click.option("--voltage", "-v", type=float, default=3.0, help="Target supply voltage")
@click.option("--sel_a/--sel_b", default=True,
help="Choose (main)Target that gets connected to virtual Source")
@click.pass_context
def target(ctx, port, on, voltage, sel_a):
ctx.obj["openocd_telnet_port"] = port
sel_target = "sel_a" if sel_a else "sel_b"
if on or ctx.invoked_subcommand:
for cnx in ctx.obj["fab group"]:
cnx.sudo(f"shepherd-sheep target-power --on --voltage {voltage} --{sel_target}", hide=True)
start_openocd(cnx, ctx.obj["hostnames"][cnx.host])
else:
for cnx in ctx.obj["fab group"]:
cnx.sudo("systemctl stop shepherd-openocd")
cnx.sudo("shepherd-sheep target-power --off", hide=True)
@target.resultcallback()
@click.pass_context
def process_result(ctx, result, **kwargs):
if not kwargs["on"]:
for cnx in ctx.obj["fab group"]:
cnx.sudo("systemctl stop shepherd-openocd")
cnx.sudo("shepherd-sheep target-power --off", hide=True)
def start_openocd(cnx, hostname, timeout=30):
cnx.sudo("systemctl start shepherd-openocd", hide=True, warn=True)
ts_end = time.time() + timeout
while True:
openocd_status = cnx.sudo(
"systemctl status shepherd-openocd", hide=True, warn=True
)
if openocd_status.exited == 0:
break
if time.time() > ts_end:
raise TimeoutError(
f"Timed out waiting for openocd on host {hostname}"
)
else:
logger.debug(f"waiting for openocd on {hostname}")
time.sleep(1)
@target.command(short_help="Flashes the binary IMAGE file to the target")
@click.argument("image", type=click.Path(exists=True))
@click.pass_context
def flash(ctx, image):
for cnx in ctx.obj["fab group"]:
cnx.put(image, "/tmp/target_image.bin")
with telnetlib.Telnet(cnx.host, ctx.obj["openocd_telnet_port"]) as tn:
logger.debug(
f"connected to openocd on {ctx.obj['hostnames'][cnx.host]}"
)
tn.write(b"program /tmp/target_image.bin verify reset\n")
res = tn.read_until(b"Verified OK", timeout=5)
if b"Verified OK" in res:
logger.info(
f"flashed image on {ctx.obj['hostnames'][cnx.host]} successfully"
)
else:
logger.error(
f"failed flashing image on {ctx.obj['hostnames'][cnx.host]}"
)
@target.command(short_help="Halts the target")
@click.pass_context
def halt(ctx):
for cnx in ctx.obj["fab group"]:
with telnetlib.Telnet(cnx.host, ctx.obj["openocd_telnet_port"]) as tn:
logger.debug(
f"connected to openocd on {ctx.obj['hostnames'][cnx.host]}"
)
tn.write(b"halt\n")
logger.info(f"target halted on {ctx.obj['hostnames'][cnx.host]}")
@target.command(short_help="Erases the target")
@click.pass_context
def erase(ctx):
for cnx in ctx.obj["fab group"]:
with telnetlib.Telnet(cnx.host, ctx.obj["openocd_telnet_port"]) as tn:
logger.debug(
f"connected to openocd on {ctx.obj['hostnames'][cnx.host]}"
)
tn.write(b"halt\n")
logger.info(f"target halted on {ctx.obj['hostnames'][cnx.host]}")
tn.write(b"nrf52 mass_erase\n")
logger.info(f"target erased on {ctx.obj['hostnames'][cnx.host]}")
@target.command(short_help="Resets the target")
@click.pass_context
def reset(ctx):
for cnx in ctx.obj["fab group"]:
with telnetlib.Telnet(cnx.host, ctx.obj["openocd_telnet_port"]) as tn:
logger.debug(
f"connected to openocd on {ctx.obj['hostnames'][cnx.host]}"
)
tn.write(b"reset\n")
logger.info(f"target reset on {ctx.obj['hostnames'][cnx.host]}")
@cli.command(short_help="Record IV data from a harvest-source")
@click.option("--output_path", "-o", type=click.Path(),
default="/var/shepherd/recordings/",
help="Dir or file path for resulting hdf5 file")
@click.option("--harvester", type=str, default=None,
help="Choose one of the predefined virtual harvesters")
@click.option("--duration", "-d", type=click.FLOAT, help="Duration of recording in seconds")
@click.option("--force_overwrite", "-f", is_flag=True, help="Overwrite existing file")
@click.option("--use_cal_default", is_flag=True, help="Use default calibration values")
@click.option("--start/--no-start", default=True, help="Start shepherd after uploading config")
@click.pass_context
def harvest(
ctx,
output_path,
harvester,
duration,
force_overwrite,
use_cal_default,
start,
):
fp_output = Path(output_path)
if not fp_output.is_absolute():
fp_output = Path("/var/shepherd/recordings") / output_path
parameter_dict = {
"output_path": str(fp_output),
"harvester": harvester,
"duration": duration,
"force_overwrite": force_overwrite,
"use_cal_default": use_cal_default,
}
if start:
ts_start, delay = find_consensus_time(ctx.obj["fab group"])
parameter_dict["start_time"] = ts_start
configure_shepherd(
ctx.obj["fab group"],
"harvest",
parameter_dict,
ctx.obj["hostnames"],
ctx.obj["verbose"],
)
if start:
logger.debug(f"Scheduling start of shepherd at {ts_start} (in ~ {delay} s)")
start_shepherd(ctx.obj["fab group"], ctx.obj["hostnames"])
@cli.command(short_help="Emulates IV data read from INPUT hdf5 file")
@click.argument("input_path", type=click.Path())
@click.option("--output_path", "-o", type=click.Path(),
default="/var/shepherd/recordings/",
help="Dir or file path for resulting hdf5 file with load recordings")
@click.option("--duration", "-d", type=click.FLOAT, help="Duration of recording in seconds")
@click.option("--force_overwrite", "-f", is_flag=True, help="Overwrite existing file")
@click.option("--use_cal_default", is_flag=True, help="Use default calibration values")
@click.option("--enable_io/--disable_io", default=True,
help="Switch the GPIO level converter to targets on/off")
@click.option("--io_sel_target_a/--io_sel_target_b", default=True,
help="Choose Target that gets connected to IO")
@click.option("--pwr_sel_target_a/--pwr_sel_target_b", default=True,
help="Choose (main)Target that gets connected to virtual Source")
@click.option("--aux_voltage", type=float,
help="Set Voltage of auxiliary Power Source (second target)")
@click.option("--virtsource", default=dict(), help="Use the desired setting for the virtual source")
@click_config_file.configuration_option(provider=yamlprovider, implicit=False)
@click.option(
"--start/--no-start",
default=True,
help="Start shepherd after uploading config",
)
@click.pass_context
def emulate(
ctx,
input_path,
output_path,
duration,
force_overwrite,
use_cal_default,
enable_target_io,
sel_target_a_for_io,
sel_target_a_for_pwr,
aux_target_voltage,
virtsource,
start,
):
fp_input = Path(input_path)
if not fp_input.is_absolute():
fp_input = Path("/var/shepherd/recordings") / input_path
parameter_dict = {
"input_path": str(fp_input),
"force_overwrite": force_overwrite,
"duration": duration,
"use_cal_default": use_cal_default,
"set_target_io_lvl_conv": enable_target_io,
"sel_target_for_io": sel_target_a_for_io,
"sel_target_for_pwr": sel_target_a_for_pwr,
"aux_target_voltage": aux_target_voltage,
"settings_virtsource": virtsource,
}
if output_path is not None:
fp_output = Path(output_path)
if not fp_output.is_absolute():
fp_output = Path("/var/shepherd/recordings") / output_path
| |
<gh_stars>0
import os
from .base import PythonGnuRecipe
from hardhat.util import patch
class PyGObjectRecipe(PythonGnuRecipe):
def __init__(self, *args, **kwargs):
super(PyGObjectRecipe, self).__init__(*args, **kwargs)
self.depends = ['glib', 'gobject-introspection', 'gtk3']
self.pydepends = ['pycairo']
self.name = 'pygobject'
# versions with an odd minor version number (second number) are
# developmental versions and may show deprecation warnings
# for instance in 'ipython --pylab'
self.version = '3.20.1'
short_version = '.'.join(self.version.split('.')[:2])
self.sha256 = '3d261005d6fed6a92ac4c25f28379255' \
'2f7dad865d1b7e0c03c2b84c04dbd745'
self.url = 'http://ftp.gnome.org/pub/GNOME/sources/$name/' \
'%s/$name-$version.tar.xz' % short_version
def configure(self):
self.configure_args += ['--with-python=%s' % self.python]
super(PyGObjectRecipe, self).configure()
class PyGObject2Recipe(PythonGnuRecipe):
def __init__(self, *args, **kwargs):
super(PyGObject2Recipe, self).__init__(*args, **kwargs)
self.sha256 = 'fb8a1d4f665130a125011659bd347c73' \
'39c944232163dbb9a34fd0686577adb8'
self.pythons = ['python2']
self.depends = ['glib', 'gobject-introspection', 'gtk3']
self.pydepends = ['pycairo']
self.name = 'pygobject2'
# versions with an odd minor version number (second number) are
# developmental versions and may show deprecation warnings
# for instance in 'ipython --pylab'
self.version = '2.28.6'
self.url = 'http://ftp.gnome.org/pub/GNOME/sources/pygobject/' \
'%s/pygobject-$version.tar.xz' % self.short_version
def configure(self):
self.configure_args += ['--with-python=%s' % self.python]
super(PyGObject2Recipe, self).configure()
def patch(self):
text = r'''
Submitted By: <NAME> <<EMAIL>> (gobject-introspection) and <NAME>. <<EMAIL>>, after thomas kaedin (git)
Date: 2012-03-29 (gobject-introspection) and 2014-03-04 (git)
Initial Package Version: 2.28.6
Upstream Status: not submitted (gobject-introspection) and committed (git)
Origin: <NAME> (gobject-introspection) and upstream (git)
Description: Fixes compiling with recent versions of gobject-introspection; and upstream fixes
diff -Naur pygobject-2.28.6.orig/configure.ac pygobject-2.28.6/configure.ac
--- pygobject-2.28.6.orig/configure.ac 2011-06-13 13:33:56.000000000 -0300
+++ pygobject-2.28.6/configure.ac 2014-03-04 18:36:07.947079909 -0300
@@ -85,7 +85,7 @@
AM_PROG_CC_STDC
AM_PROG_CC_C_O
-# check that we have the minimum version of python necisary to build
+# check that we have the minimum version of python necessary to build
JD_PATH_PYTHON(python_min_ver)
# check if we are building for python 3
@@ -236,7 +236,7 @@
AC_ARG_ENABLE(introspection,
AC_HELP_STRING([--enable-introspection], [Use introspection information]),
enable_introspection=$enableval,
- enable_introspection=yes)
+ enable_introspection=no)
if test "$enable_introspection" != no; then
AC_DEFINE(ENABLE_INTROSPECTION,1,Use introspection information)
PKG_CHECK_MODULES(GI,
@@ -262,6 +262,9 @@
AC_SUBST(INTROSPECTION_SCANNER)
AC_SUBST(INTROSPECTION_COMPILER)
+dnl Do not install codegen for Python 3.
+AM_CONDITIONAL(ENABLE_CODEGEN, test $build_py3k = false)
+
dnl add required cflags ...
if test "x$GCC" = "xyes"; then
JH_ADD_CFLAG([-Wall])
@@ -281,8 +284,6 @@
Makefile
pygobject-2.0.pc
pygobject-2.0-uninstalled.pc
- codegen/Makefile
- codegen/pygobject-codegen-2.0
docs/Makefile
docs/reference/entities.docbook
docs/xsl/fixxref.py
@@ -295,6 +296,13 @@
examples/Makefile
tests/Makefile
PKG-INFO)
+
+if test $build_py3k = false; then
+ AC_CONFIG_FILES(
+ codegen/Makefile
+ codegen/pygobject-codegen-2.0)
+fi
+
AC_OUTPUT
echo
diff -Naur pygobject-2.28.6.orig/gi/module.py pygobject-2.28.6/gi/module.py
--- pygobject-2.28.6.orig/gi/module.py 2011-06-13 13:30:25.000000000 -0300
+++ pygobject-2.28.6/gi/module.py 2014-03-04 18:36:07.947079909 -0300
@@ -24,7 +24,11 @@
import os
import gobject
-import string
+try:
+ maketrans = ''.maketrans
+except AttributeError:
+ # fallback for Python 2
+ from string import maketrans
import gi
from .overrides import registry
diff -Naur pygobject-2.28.6.orig/gi/overrides/Gtk.py pygobject-2.28.6/gi/overrides/Gtk.py
--- pygobject-2.28.6.orig/gi/overrides/Gtk.py 2011-06-13 13:33:49.000000000 -0300
+++ pygobject-2.28.6/gi/overrides/Gtk.py 2014-03-04 18:36:07.949079863 -0300
@@ -35,6 +35,18 @@
Gtk = modules['Gtk']._introspection_module
__all__ = []
+if Gtk._version == '2.0':
+ import warnings
+ warn_msg = "You have imported the Gtk 2.0 module. Because Gtk 2.0 \
+was not designed for use with introspection some of the \
+interfaces and API will fail. As such this is not supported \
+by the pygobject development team and we encourage you to \
+port your app to Gtk 3 or greater. PyGTK is the recomended \
+python module to use with Gtk 2.0"
+
+ warnings.warn(warn_msg, RuntimeWarning)
+
+
class Widget(Gtk.Widget):
def translate_coordinates(self, dest_widget, src_x, src_y):
@@ -401,16 +413,22 @@
def __init__(self,
parent=None,
flags=0,
- type=Gtk.MessageType.INFO,
+ message_type=Gtk.MessageType.INFO,
buttons=Gtk.ButtonsType.NONE,
message_format=None,
**kwds):
if message_format != None:
kwds['text'] = message_format
+
+ if 'type' in kwds:
+ import warnings
+ warnings.warn("The use of the keyword type as a parameter of the Gtk.MessageDialog constructor has been depricated. Please use message_type instead.", DeprecationWarning)
+ message_type = kwds['type']
+
Gtk.MessageDialog.__init__(self,
_buttons_property=buttons,
- message_type=type,
+ message_type=message_type,
**kwds)
Dialog.__init__(self, parent=parent, flags=flags)
@@ -619,12 +637,18 @@
def forward_search(self, string, flags, limit):
success, match_start, match_end = super(TextIter, self).forward_search(string,
flags, limit)
- return (match_start, match_end,)
+ if success:
+ return (match_start, match_end)
+ else:
+ return None
def backward_search(self, string, flags, limit):
success, match_start, match_end = super(TextIter, self).backward_search(string,
flags, limit)
- return (match_start, match_end,)
+ if success:
+ return (match_start, match_end)
+ else:
+ return None
def begins_tag(self, tag=None):
return super(TextIter, self).begins_tag(tag)
diff -Naur pygobject-2.28.6.orig/gi/pygi-foreign-cairo.c pygobject-2.28.6/gi/pygi-foreign-cairo.c
--- pygobject-2.28.6.orig/gi/pygi-foreign-cairo.c 2011-06-13 13:33:49.000000000 -0300
+++ pygobject-2.28.6/gi/pygi-foreign-cairo.c 2014-03-04 18:36:07.949079863 -0300
@@ -30,7 +30,7 @@
#include <pycairo/py3cairo.h>
#endif
-Pycairo_CAPI_t *Pycairo_CAPI;
+static Pycairo_CAPI_t *Pycairo_CAPI;
#include "pygi-foreign.h"
@@ -114,10 +114,15 @@
Py_RETURN_NONE;
}
-static PyMethodDef _gi_cairo_functions[] = {};
+static PyMethodDef _gi_cairo_functions[] = {0,};
PYGLIB_MODULE_START(_gi_cairo, "_gi_cairo")
{
+#if PY_VERSION_HEX < 0x03000000
Pycairo_IMPORT;
+#else
+ Pycairo_CAPI = (Pycairo_CAPI_t*) PyCObject_Import("cairo", "CAPI");
+#endif
+
if (Pycairo_CAPI == NULL)
return PYGLIB_MODULE_ERROR_RETURN;
diff -Naur pygobject-2.28.6.orig/gi/pygi-info.c pygobject-2.28.6/gi/pygi-info.c
--- pygobject-2.28.6.orig/gi/pygi-info.c 2011-06-13 13:30:25.000000000 -0300
+++ pygobject-2.28.6/gi/pygi-info.c 2014-03-04 18:35:32.473899924 -0300
@@ -162,9 +162,6 @@
case GI_INFO_TYPE_CONSTANT:
type = &PyGIConstantInfo_Type;
break;
- case GI_INFO_TYPE_ERROR_DOMAIN:
- type = &PyGIErrorDomainInfo_Type;
- break;
case GI_INFO_TYPE_UNION:
type = &PyGIUnionInfo_Type;
break;
@@ -481,7 +478,6 @@
case GI_INFO_TYPE_INVALID:
case GI_INFO_TYPE_FUNCTION:
case GI_INFO_TYPE_CONSTANT:
- case GI_INFO_TYPE_ERROR_DOMAIN:
case GI_INFO_TYPE_VALUE:
case GI_INFO_TYPE_SIGNAL:
case GI_INFO_TYPE_PROPERTY:
@@ -860,7 +856,6 @@
case GI_INFO_TYPE_INVALID:
case GI_INFO_TYPE_FUNCTION:
case GI_INFO_TYPE_CONSTANT:
- case GI_INFO_TYPE_ERROR_DOMAIN:
case GI_INFO_TYPE_VALUE:
case GI_INFO_TYPE_SIGNAL:
case GI_INFO_TYPE_PROPERTY:
diff -Naur pygobject-2.28.6.orig/gio/gio-types.defs pygobject-2.28.6/gio/gio-types.defs
--- pygobject-2.28.6.orig/gio/gio-types.defs 2011-06-13 13:33:49.000000000 -0300
+++ pygobject-2.28.6/gio/gio-types.defs 2014-03-04 18:36:07.950079840 -0300
@@ -526,7 +526,7 @@
)
)
-(define-enum MountMountFlags
+(define-flags MountMountFlags
(in-module "gio")
(c-name "GMountMountFlags")
(gtype-id "G_TYPE_MOUNT_MOUNT_FLAGS")
@@ -545,7 +545,7 @@
)
)
-(define-enum DriveStartFlags
+(define-flags DriveStartFlags
(in-module "gio")
(c-name "GDriveStartFlags")
(gtype-id "G_TYPE_DRIVE_START_FLAGS")
@@ -770,7 +770,7 @@
)
)
-(define-enum SocketMsgFlags
+(define-flags SocketMsgFlags
(in-module "gio")
(c-name "GSocketMsgFlags")
(gtype-id "G_TYPE_SOCKET_MSG_FLAGS")
diff -Naur pygobject-2.28.6.orig/gobject/gobjectmodule.c pygobject-2.28.6/gobject/gobjectmodule.c
--- pygobject-2.28.6.orig/gobject/gobjectmodule.c 2011-06-13 13:33:49.000000000 -0300
+++ pygobject-2.28.6/gobject/gobjectmodule.c 2014-03-04 18:36:07.952079793 -0300
@@ -312,13 +312,6 @@
pyglib_gil_state_release(state);
}
-static void
-pyg_object_class_init(GObjectClass *class, PyObject *py_class)
-{
- class->set_property = pyg_object_set_property;
- class->get_property = pyg_object_get_property;
-}
-
typedef struct _PyGSignalAccumulatorData {
PyObject *callable;
PyObject *user_data;
@@ -484,15 +477,14 @@
}
static PyObject *
-add_signals (GType instance_type, PyObject *signals)
+add_signals (GObjectClass *klass, PyObject *signals)
{
gboolean ret = TRUE;
- GObjectClass *oclass;
Py_ssize_t pos = 0;
PyObject *key, *value, *overridden_signals = NULL;
+ GType instance_type = G_OBJECT_CLASS_TYPE (klass);
overridden_signals = PyDict_New();
- oclass = g_type_class_ref(instance_type);
while (PyDict_Next(signals, &pos, &key, &value)) {
const gchar *signal_name;
gchar *signal_name_canon, *c;
@@ -530,7 +522,6 @@
if (!ret)
break;
}
- g_type_class_unref(oclass);
if (ret)
return overridden_signals;
else {
@@ -800,14 +791,12 @@
}
static gboolean
-add_properties (GType instance_type, PyObject *properties)
+add_properties (GObjectClass *klass, PyObject *properties)
{
gboolean ret = TRUE;
- GObjectClass *oclass;
Py_ssize_t pos = 0;
PyObject *key, *value;
- oclass = g_type_class_ref(instance_type);
while (PyDict_Next(properties, &pos, &key, &value)) {
const gchar *prop_name;
GType prop_type;
@@ -873,7 +862,7 @@
Py_DECREF(slice);
if (pspec) {
- g_object_class_install_property(oclass, 1, pspec);
+ g_object_class_install_property(klass, 1, pspec);
} else {
PyObject *type, *value, *traceback;
ret = FALSE;
@@ -883,7 +872,7 @@
g_snprintf(msg, 256,
"%s (while registering property '%s' for GType '%s')",
PYGLIB_PyUnicode_AsString(value),
- prop_name, g_type_name(instance_type));
+ prop_name, G_OBJECT_CLASS_NAME(klass));
Py_DECREF(value);
value = PYGLIB_PyUnicode_FromString(msg);
}
@@ -892,11 +881,63 @@
}
}
- g_type_class_unref(oclass);
return ret;
}
static void
+pyg_object_class_init(GObjectClass *class, PyObject *py_class)
+{
+ PyObject *gproperties, *gsignals, *overridden_signals;
+ PyObject *class_dict = ((PyTypeObject*) py_class)->tp_dict;
+
+ class->set_property = pyg_object_set_property;
+ class->get_property = pyg_object_get_property;
+
+ /* install signals */
+ /* we look this up in the instance dictionary, so we don't
+ * accidentally get a parent type's __gsignals__ attribute. */
+ gsignals = PyDict_GetItemString(class_dict, "__gsignals__");
+ if (gsignals) {
+ if (!PyDict_Check(gsignals)) {
+ PyErr_SetString(PyExc_TypeError,
+ "__gsignals__ attribute not a dict!");
+ return;
+ }
+ if (!(overridden_signals = add_signals(class, gsignals))) {
+ return;
+ }
+ if (PyDict_SetItemString(class_dict, "__gsignals__",
+ overridden_signals)) {
+ return;
+ }
+ Py_DECREF(overridden_signals);
+
+ PyDict_DelItemString(class_dict, "__gsignals__");
+ } else {
+ PyErr_Clear();
+ }
+
+ /* install properties */
+ /* we look this up in the instance dictionary, so we don't
+ * accidentally get a parent type's __gproperties__ attribute. */
+ gproperties = PyDict_GetItemString(class_dict, "__gproperties__");
+ if (gproperties) {
+ if (!PyDict_Check(gproperties)) {
+ PyErr_SetString(PyExc_TypeError,
+ "__gproperties__ attribute not a dict!");
+ return;
+ }
+ if (!add_properties(class, gproperties)) {
+ return;
+ }
+ PyDict_DelItemString(class_dict, "__gproperties__");
+ /* Borrowed reference. Py_DECREF(gproperties); */
+ } else {
+ PyErr_Clear();
+ }
+}
+
+static void
pyg_register_class_init(GType gtype, PyGClassInitFunc class_init)
{
GSList *list;
@@ -1068,7 +1109,7 @@
*/
static void
pyg_type_add_interfaces(PyTypeObject *class, GType instance_type,
- PyObject *bases, gboolean new_interfaces,
+ PyObject *bases,
GType *parent_interfaces, guint n_parent_interfaces)
{
int i;
@@ -1082,7 +1123,6 @@
guint k;
PyObject *base = PyTuple_GET_ITEM(bases, i);
GType itype;
- gboolean is_new = TRUE;
const GInterfaceInfo *iinfo;
GInterfaceInfo iinfo_copy;
@@ -1099,16 +1139,6 @@
if (!G_TYPE_IS_INTERFACE(itype))
continue;
- for (k = 0; k < n_parent_interfaces; ++k) {
- if (parent_interfaces[k] == itype) {
- is_new = FALSE;
- break;
- }
- }
-
- if ((new_interfaces && !is_new) || (!new_interfaces && is_new))
- continue;
-
iinfo = pyg_lookup_interface_info(itype);
if (!iinfo) {
gchar *error;
@@ -1129,7 +1159,7 @@
int
pyg_type_register(PyTypeObject *class, const char *type_name)
{
- PyObject *gtype, *gsignals, *gproperties, *overridden_signals;
+ PyObject *gtype;
GType parent_type, instance_type;
GType *parent_interfaces;
guint n_parent_interfaces;
@@ -1216,88 +1246,22 @@
}
/*
- * Note: Interfaces to be implemented are searched twice. First
- * we register interfaces that are already implemented by a parent
- * type. The second time, the remaining interfaces are
- * registered, i.e. the ones that are not implemented by a parent
- * type. In between these two loops, properties and signals are
- * registered. It has to be done this way, in two steps,
- * otherwise glib will complain. If registering all interfaces
- * always before properties, you get an error like:
- *
- * ../gobject:121: Warning: Object class
- * test_interface+MyObject doesn't implement | |
import kivy
# base Class of your App inherits from the App class.
# app:always refers to the instance of your application
from kivy.app import App
# this restrict the kivy version i.e
# below this kivy version you cannot
# use the app or software
kivy.require('1.9.0')
# Builder is used when .kv file is
# to be used in .py file
from kivy.lang import Builder
# The screen manager is a widget
# dedicated to managing multiple screens for your application.
from kivy.uix.screenmanager import (ScreenManager, Screen, NoTransition,
SlideTransition, CardTransition, SwapTransition,
FadeTransition, WipeTransition, FallOutTransition, RiseInTransition)
# You can create your kv code in the Python file
Builder.load_string("""
<MyButton@Button>
background_normal: ''
background_color: 0,0,0,0
font_name: './yahei.ttf'
<MyTextInput@TextInput>:
canvas.before:
# Draw border first
Color:
rgba: 1,0,0,1
Rectangle:
size: self.size
pos: self.pos
# Draw background (covers most of the above border)
Color:
rgba: 0,0,0,1
Rectangle:
size: (self.width - self.border[1] - self.border[3], self.height - self.border[0] - self.border[2])
pos: (self.x + self.border[3], self.y + self.border[0])
# set the color for the text
Color:
rgba: 1,1,1,1
<MenuScreen>:
GridLayout:
cols:8
MyButton:
text: '乾'
on_release:
root.manager.current = 'Screenqian'
root.manager.transition.direction = 'right'
MyButton:
text: '兑'
on_release:
root.manager.current = 'Screendui'
root.manager.transition.direction = 'right'
MyButton:
text: '离'
on_release:
root.manager.current = 'Screenli'
root.manager.transition.direction = 'right'
MyButton:
text: '震'
on_release:
root.manager.current = 'Screenzhen'
root.manager.transition.direction = 'right'
MyButton:
text: '巽'
MyButton:
text: '坎'
MyButton:
text: '艮'
MyButton:
text: '坤'
Button:
text: '1'
on_release:
root.manager.current = 'Screen1'
root.manager.transition.direction = 'right'
Button:
text: '43'
on_release:
root.manager.current = 'Screen43'
root.manager.transition.direction = 'left'
Button:
text: '14'
on_release:
root.manager.current = 'Screen14'
root.manager.transition.direction = 'left'
Button:
text: '34'
on_release:
root.manager.current = 'Screen34'
root.manager.transition.direction = 'left'
Button:
text: '9'
on_release:
root.manager.current = 'Screen9'
root.manager.transition.direction = 'left'
Button:
text: '5'
on_release:
root.manager.current = 'Screen5'
root.manager.transition.direction = 'left'
Button:
text: '26'
on_release:
root.manager.current = 'Screen26'
root.manager.transition.direction = 'left'
Button:
text: '11'
on_release:
root.manager.current = 'Screen11'
root.manager.transition.direction = 'left'
Button:
text: '10'
on_release:
root.manager.current = 'Screen10'
root.manager.transition.direction = 'left'
Button:
text: '58'
on_release:
root.manager.current = 'Screen58'
root.manager.transition.direction = 'left'
Button:
text: '38'
on_release:
root.manager.current = 'Screen38'
root.manager.transition.direction = 'left'
Button:
text: '54'
on_release:
root.manager.current = 'Screen54'
root.manager.transition.direction = 'left'
Button:
text: '61'
on_release:
root.manager.current = 'Screen61'
root.manager.transition.direction = 'left'
Button:
text: '60'
on_release:
root.manager.current = 'Screen60'
root.manager.transition.direction = 'left'
Button:
text: '41'
on_release:
root.manager.current = 'Screen41'
root.manager.transition.direction = 'left'
Button:
text: '19'
on_release:
root.manager.current = 'Screen19'
root.manager.transition.direction = 'left'
Button:
text: '13'
on_release:
root.manager.current = 'Screen13'
root.manager.transition.direction = 'left'
Button:
text: '49'
on_release:
root.manager.current = 'Screen49'
root.manager.transition.direction = 'left'
Button:
text: '30'
on_release:
root.manager.current = 'Screen30'
root.manager.transition.direction = 'left'
Button:
text: '55'
on_release:
root.manager.current = 'Screen55'
root.manager.transition.direction = 'left'
Button:
text: '37'
on_release:
root.manager.current = 'Screen37'
root.manager.transition.direction = 'left'
Button:
text: '63'
on_release:
root.manager.current = 'Screen63'
root.manager.transition.direction = 'left'
Button:
text: '22'
on_release:
root.manager.current = 'Screen22'
root.manager.transition.direction = 'left'
Button:
text: '36'
on_release:
root.manager.current = 'Screen36'
root.manager.transition.direction = 'left'
Button:
text: '25'
on_release:
root.manager.current = 'Screen25'
root.manager.transition.direction = 'left'
Button:
text: '17'
on_release:
root.manager.current = 'Screen17'
root.manager.transition.direction = 'left'
Button:
text: '21'
on_release:
root.manager.current = 'Screen21'
root.manager.transition.direction = 'left'
Button:
text: '51'
on_release:
root.manager.current = 'Screen51'
root.manager.transition.direction = 'left'
Button:
text: '42'
on_release:
root.manager.current = 'Screen42'
root.manager.transition.direction = 'left'
Button:
text: '3'
on_release:
root.manager.current = 'Screen3'
root.manager.transition.direction = 'left'
Button:
text: '27'
on_release:
root.manager.current = 'Screen27'
root.manager.transition.direction = 'left'
Button:
text: '24'
on_release:
root.manager.current = 'Screen24'
root.manager.transition.direction = 'left'
Button:
text: '44'
on_release:
root.manager.current = 'Screen44'
root.manager.transition.direction = 'left'
Button:
text: '28'
on_release:
root.manager.current = 'Screen28'
root.manager.transition.direction = 'left'
Button:
text: '50'
on_release:
root.manager.current = 'Screen50'
root.manager.transition.direction = 'left'
Button:
text: '32'
on_release:
root.manager.current = 'Screen32'
root.manager.transition.direction = 'left'
Button:
text: '57'
on_release:
root.manager.current = 'Screen57'
root.manager.transition.direction = 'left'
Button:
text: '48'
on_release:
root.manager.current = 'Screen48'
root.manager.transition.direction = 'left'
Button:
text: '18'
on_release:
root.manager.current = 'Screen18'
root.manager.transition.direction = 'left'
Button:
text: '46'
on_release:
root.manager.current = 'Screen46'
root.manager.transition.direction = 'left'
Button:
text: '6'
on_release:
root.manager.current = 'Screen6'
root.manager.transition.direction = 'left'
Button:
text: '47'
on_release:
root.manager.current = 'Screen47'
root.manager.transition.direction = 'left'
Button:
text: '64'
on_release:
root.manager.current = 'Screen64'
root.manager.transition.direction = 'left'
Button:
text: '40'
on_release:
root.manager.current = 'Screen40'
root.manager.transition.direction = 'left'
Button:
text: '59'
on_release:
root.manager.current = 'Screen59'
root.manager.transition.direction = 'left'
Button:
text: '29'
on_release:
root.manager.current = 'Screen29'
root.manager.transition.direction = 'left'
Button:
text: '4'
on_release:
root.manager.current = 'Screen4'
root.manager.transition.direction = 'left'
Button:
text: '7'
on_release:
root.manager.current = 'Screen7'
root.manager.transition.direction = 'left'
Button:
text: '33'
on_release:
root.manager.current = 'Screen33'
root.manager.transition.direction = 'left'
Button:
text: '31'
on_release:
root.manager.current = 'Screen31'
root.manager.transition.direction = 'left'
Button:
text: '56'
on_release:
root.manager.current = 'Screen56'
root.manager.transition.direction = 'left'
Button:
text: '62'
on_release:
root.manager.current = 'Screen62'
root.manager.transition.direction = 'left'
Button:
text: '53'
on_release:
root.manager.current = 'Screen53'
root.manager.transition.direction = 'left'
Button:
text: '39'
on_release:
root.manager.current = 'Screen39'
root.manager.transition.direction = 'left'
Button:
text: '52'
on_release:
root.manager.current = 'Screen52'
root.manager.transition.direction = 'left'
Button:
text: '15'
on_release:
root.manager.current = 'Screen15'
root.manager.transition.direction = 'left'
Button:
text: '12'
on_release:
root.manager.current = 'Screen12'
root.manager.transition.direction = 'left'
Button:
text: '45'
on_release:
root.manager.current = 'Screen45'
root.manager.transition.direction = 'left'
Button:
text: '35'
on_release:
root.manager.current = 'Screen35'
root.manager.transition.direction = 'left'
Button:
text: '16'
on_release:
root.manager.current = 'Screen16'
root.manager.transition.direction = 'left'
Button:
text: '20'
on_release:
root.manager.current = 'Screen20'
root.manager.transition.direction = 'left'
Button:
text: '8'
on_release:
root.manager.current = 'Screen8'
root.manager.transition.direction = 'left'
Button:
text: '23'
on_release:
root.manager.current = 'Screen23'
root.manager.transition.direction = 'left'
Button:
text: '2'
on_release:
root.manager.current = 'Screen2'
root.manager.transition.direction = 'left'
MyButton:
text: '三枚'
on_release:
root.manager.current = 'Screensanmei'
root.manager.transition.direction = 'left'
MyButton:
text: '六枚'
on_release:
root.manager.current = 'Screenliumei'
root.manager.transition.direction = 'left'
MyButton:
text: '三数'
on_release:
root.manager.current = 'Screensanshu'
root.manager.transition.direction = 'left'
MyButton:
text: '揲蓍'
on_release:
root.manager.current = 'Screensheshi'
root.manager.transition.direction = 'left'
MyButton:
text: '抽签'
on_release:
root.manager.current = 'Screenchouqian'
root.manager.transition.direction = 'left'
MyButton:
text: ''
MyButton:
text: '学习'
color: 0,1,1,1
MyButton:
text: '退出'
color: 1,0,0,1
on_press: app.stop()
<Screen1>:
BoxLayout:
orientation: 'vertical'
padding: [10,40,40,30]
BoxLayout:
size_hint_y: .3
Label:
text:'四月\\n立夏\\n蛇'
font_name: './yahei.ttf'
markup: True
MyButton:
text: '乾'
font_size: 60
on_release:
root.manager.current = 'menu'
root.manager.transition.direction = 'right'
MyButton:
text: '困龙得水好运交\\n不由喜气上眉梢\\n一切谋望皆如意\\n向后时运渐渐高'
BoxLayout:
Image:
id: 1
source: '1.png'
allow_stretch: False
Label:
text: '乾元亨利贞\\n\\n用九见群龙无首吉'
font_name: './yahei.ttf'
BoxLayout:
BoxLayout:
orientation: 'vertical'
canvas:
Color:
rgba: 1,1,1,1
Rectangle:
size: 140, 10
pos: 140, 240
Rectangle:
size: 140, 10
pos: 140, 200
Rectangle:
size: 140, 10
pos: 140, 160
Rectangle:
size: 140, 10
pos: 140, 120
Rectangle:
size: 140, 10
pos: 140, 80
Rectangle:
size: 140, 10
pos: 140, 40
BoxLayout:
orientation: 'vertical'
Label:
text: '上九亢龙有悔'
font_name: './yahei.ttf'
Label:
text: '九五飞龙在天利见大人'
font_name: './yahei.ttf'
Label:
text: '九四或跃在渊无咎'
font_name: './yahei.ttf'
Label:
text: '九三君子终日乾乾夕惕若厉无咎'
font_name: './yahei.ttf'
Label:
text: '九二见龙在田利见大人'
font_name: './yahei.ttf'
Label:
text: '初九潜龙勿用'
font_name: './yahei.ttf'
<Screen43>:
BoxLayout:
orientation: 'vertical'
padding: [10,40,40,30]
BoxLayout:
size_hint_y: .3
Label:
text:'三月\\n清明\\n龙'
font_name: './yahei.ttf'
markup: True
Button:
background_normal: ''
background_color: 0,0,0,0
text: '夬'
font_size: 60
font_name: './yahei.ttf'
on_release:
root.manager.current = 'menu'
root.manager.transition.direction = 'right'
Button:
background_normal: ''
background_color: 0,0,0,0
text: '蜘蛛脱网赛天军\\n粘住游蜂翅翎毛\\n幸有大风吹破网\\n脱离灾难又逍遥'
font_name: './yahei.ttf'
BoxLayout:
Image:
id: 43
source: '43.png'
allow_stretch: False
Label:
text: '夬揚于王庭孚號有厲告自邑不利即戎利有攸往'
font_name: './yahei.ttf'
BoxLayout:
BoxLayout:
orientation: 'vertical'
canvas:
Color:
rgba: 1,1,1,1
Rectangle:
size: 60, 10
pos: 220, 240
Rectangle:
size: 60, 10
pos: 140, 240
Rectangle:
size: 140, 10
pos: 140, 200
Rectangle:
size: 140, 10
pos: 140, 160
Rectangle:
size: 140, 10
pos: 140, 120
Rectangle:
size: 140, 10
pos: 140, 80
Rectangle:
size: 140, 10
pos: 140, 40
BoxLayout:
orientation: 'vertical'
Label:
text: '上六无号终有凶'
font_name: './yahei.ttf'
Label:
text: '九五苋陆夬夬中行无咎'
font_name: './yahei.ttf'
Label:
text: '九四臀无肤其行次且牵羊悔亡闻言不信'
font_name: './yahei.ttf'
Label:
text: '九三壮于頄有凶君子夬夬独行遇雨若濡有愠无咎'
font_name: './yahei.ttf'
Label:
text: '九二惕号莫夜有戎勿恤'
font_name: './yahei.ttf'
Label:
text: '初九壮于前趾往不胜为咎'
font_name: './yahei.ttf'
<Screen14>:
name: '14'
BoxLayout:
orientation: 'vertical'
padding: [10,40,40,30]
BoxLayout:
size_hint_y: .3
Label:
text:''#[b]00[/b]:00:00
font_name: './yahei.ttf'
font_size: 60
markup: True
Button:
background_normal: ''
background_color: 0,0,0,0
text: '大有'
font_size: 60
font_name: './yahei.ttf'
on_release:
root.manager.current = 'menu'
root.manager.transition.direction = 'right'
Button:
background_normal: ''
background_color: 0,0,0,0
text: '砍树摸雀作事牢\\n是非口舌自然消\\n婚姻合伙来费力\\n若问走失未逃脱'
font_name: './yahei.ttf'
BoxLayout:
Image:
id: 14
source: '14.png'
allow_stretch: False
Label:
text: '大有元亨'
font_name: './yahei.ttf'
BoxLayout:
BoxLayout:
| |
# -*- coding: UTF-8 -*-
from spider import *
class Prepare:
def __init__(self,col=None):
self.url = BASE_URL
self.col = col
self.content_type = 'application/json; charset=utf-8'
self.collect = mongodb.db[self.col]
def update_one(self,matchid,item):
"""更新数据"""
try:
if not self.isrollball:
inplaydelay = item.get('inplaydelay')
del item['inplaydelay']
self.collect.update_one({'matchid': matchid,'inplaydelay':inplaydelay}, {'$set': item}, upsert=True)
logger.info('更新数据')
else:
inplaydelay = item.get('inplaydelay')
del item['inplaydelay']
self.collect.update_one({'matchid': matchid,'inplaydelay':inplaydelay}, {'$set': item}, upsert=True)
logger.info('更新数据')
except Exception as ex:
logger.info(traceback.format_exc())
def update_one_by_jsontype(self,jsontype,item):
try:
self.collect.update_one(jsontype, {'$set': item}, upsert=True)
logger.info(f'冠军更新数据,暂停{fresh_by_jsontype}秒')
#time.sleep(fresh_by_jsontype)
except Exception as ex:
logger.info(traceback.format_exc())
def get_matchid(self):
try:
gids = []
response = self.session.get(self.url,params=self.json_type, timeout=timeout)
if response.status_code == 200 and self.content_type == response.headers['Content-Type']:
response = response.json()
for reslut in response:
gids.append(reslut.get('matchID'))
return gids
else:
logger.info('响应不是json,获取cookie')
# set cookie
get_cookie(self.session,self.url,response.text,self.json_type)
return self.get_matchid()
except Exception as ex:
logger.info(traceback.format_exc())
return self.get_matchid()
def handle_matchid(self,matchid):
para={'jsontype': 'odds_allodds.aspx','matchid':matchid}
try:
response = self.session.get(self.url, timeout=timeout,params=para)
if response.status_code == 200 and self.content_type == response.headers['Content-Type']:
response = response.json()
return response
else:
logger.info('响应不是json,获取cookie')
get_cookie(self.session, self.url, response.text,para=para)
return self.handle_matchid(matchid)
except Exception as ex:
return self.handle_matchid(matchid)
def get_rollball_matchid(self):
para = {'jsontype': 'odds_inplay.aspx'}
try:
gids = []
response = self.session.get(self.url,params=para, timeout=timeout)
if response.status_code == 200 and self.content_type == response.headers['Content-Type']:
response = response.json()
for reslut in response:
#if reslut.get('matchStatus') != 'Defined':
gids.append(reslut.get('matchID'))
return gids
else:
logger.info('响应不是json,获取cookie' + traceback.format_exc())
# set cookie
get_cookie(self.session,self.url,response.text)
return self.get_rollball_matchid()
except Exception as ex:
logger.info(ex.args)
return self.get_rollball_matchid()
def handle_rollball_matchid(self,matchid):
para={'jsontype': 'odds_inplay_all.aspx','matchid': matchid}
try:
response = self.session.get(self.url,params=para, timeout=timeout)
if response.status_code == 200 and self.content_type == response.headers['Content-Type']:
responses = response.json()
for response in responses:
if response.get('matchID') == matchid:
return response
return response
else:
logger.info('响应不是json,获取cookie' + traceback.format_exc())
get_cookie(self.session, self.url, response.text,para=para)
return self.handle_rollball_matchid(matchid)
except Exception as ex:
return self.handle_rollball_matchid(matchid)
def fetch(self):
gids = self.get_matchid()
print(len(gids))
if not gids:
logger.info(f'{self.name}暂无赛事暂停{stop}')
time.sleep(stop)
for gid in gids:
response = self.handle_matchid(gid)
item = self.parse_response(response)
if item:
self.update_one(gid, item)
def fetch_rollball(self):
gids = self.get_rollball_matchid()
if not gids:
logger.info(f'{self.name}暂无赛事暂停{rollball_stop}')
time.sleep(rollball_stop)
for gid in gids:
response = self.handle_rollball_matchid(gid)
item = self.parse_rollball_response(response)
if item:
self.update_one(gid,item)
def parse_response(self, response):
for item in response:
if item.get('anyInplaySelling') and item.get('matchStatus') == 'Defined':
result = self.parse_current_match(item)
return result
def parse_rollball_response(self,response):
result = self.parse_current_match(response)
return result
def parse_current_match(self,item):
item['updateTime'] = datetime.datetime.now()
if not getattr(self,'isrollball'):
inplaypools_name = 'definedPools'
else:
inplaypools_name = 'inplayPools'
inplayPools = item.get('inplayPools')
remove = []
for key in item:
if 'odds' in key:
if key[:-4].upper() in inplayPools:
pass
else:
remove.append(key)
for key in remove:
del item[key]
inplay_pools = item.get(inplaypools_name)
for inplay in inplay_pools:
inplay_key = inplay.lower() + 'odds'
odds = {}
if item.get(inplay_key) and inplay_key == 'hadodds':
# 主客和
temp = item.get(inplay_key)
odds['homewin'] = temp.get('H')[4:]
odds['awaywin'] = temp.get('A')[4:]
odds['draw'] = temp.get('D')[4:]
odds['status'] = temp.get('POOLSTATUS')
odds['title'] = map_item.get(inplay_key)
item[inplay_key] = odds
elif item.get(inplay_key) and inplay_key == 'fhaodds':
# 半场主客和
temp = item.get(inplay_key)
odds['homewin'] = temp.get('H')[4:]
odds['awaywin'] = temp.get('A')[4:]
odds['draw'] = temp.get('D')[4:]
odds['status'] = temp.get('POOLSTATUS')
odds['title'] = map_item.get(inplay_key)
item[inplay_key] = odds
elif item.get(inplay_key) and inplay_key == 'hhaodds':
# 让球主客和
temp = item.get(inplay_key)
odds['homewin'] = temp.get('H')[4:]
odds['awaywin'] = temp.get('A')[4:]
odds['draw'] = temp.get('D')[4:]
odds['status'] = temp.get('POOLSTATUS')
odds['title'] = map_item.get(inplay_key)
item[inplay_key] = odds
elif item.get(inplay_key) and inplay_key == 'hdcodds':
# 让球
temp = item.get(inplay_key)
point = temp.get('HG')
if '/' in point:
slice_point = point.split('/')
point = point if slice_point[0] != slice_point[1] else slice_point[0]
odds['homewin'] = temp.get('H')[4:]
odds['awaywin'] = temp.get('A')[4:]
odds['point'] = point
odds['status'] = temp.get('POOLSTATUS')
odds['title'] = map_item.get(inplay_key)
item[inplay_key] = odds
elif item.get(inplay_key) and inplay_key == 'hilodds':
# 入球大细
temp = item.get(inplay_key)
odds['info'] = []
linelist = temp.get('LINELIST')
for i in linelist:
point = i.get('LINE')
if '/' in point:
slice_point = point.split('/')
point = point if slice_point[0] != slice_point[1] else slice_point[0]
odds['info'].append({'gt': i.get('H')[4:],'lt': i.get('L')[4:],'point': point})
odds['status'] = temp['POOLSTATUS']
odds['title'] = map_item.get(inplay_key)
item[inplay_key] = odds
elif item.get(inplay_key) and inplay_key == 'fhlodds':
# 半场入球大细
temp = item.get(inplay_key)
odds['info'] = []
linelist = temp.get('LINELIST')
for i in linelist:
point = i.get('LINE')
if '/' in point:
slice_point = point.split('/')
point = point if slice_point[0] != slice_point[1] else slice_point[0]
odds['info'].append({'gt': i.get('H')[4:], 'lt': i.get('L')[4:], 'point': point})
odds['sataus'] = temp['POOLSTATUS']
odds['title'] = map_item.get(inplay_key)
item[inplay_key] = odds
elif item.get(inplay_key) and inplay_key == 'chlodds':
# 角球大细
temp = item.get(inplay_key)
odds['info'] = []
linelist = temp.get('LINELIST')
for i in linelist:
point = i.get('LINE')
if '/' in point:
slice_point = point.split('/')
point = point if slice_point[0] != slice_point[1] else slice_point[0]
odds['info'].append({'gt': i.get('H')[4:], 'lt': i.get('L')[4:], 'point': point})
odds['sataus'] = temp['POOLSTATUS']
odds['title'] = map_item.get(inplay_key)
item[inplay_key] = odds
elif item.get(inplay_key) and inplay_key == 'crsodds':
# 波胆
temp = item.get(inplay_key)
odds['home'] = []
odds['away'] = []
odds['draw'] = []
map_csrodds = {
'S0100':'home','S0200':'home','S0201':'home','S0300':'home','S0301':'home','S0302':'home','S0400':'home','S0401':'home','S0402':'home','S0500':'home','S0501':'home','S0502':'home',
'S0001':'away','S0002':'away', 'S0102':'away','S0003': 'away', 'S0103': 'away','S0203': 'away', 'S0004': 'away', 'S0104': 'away', 'S0204': 'away', 'S0005': 'away','S0105': 'away', 'S0205': 'away',
'S0000': 'draw','S0101': 'draw','S0202': 'draw','S0303': 'draw',
'SM1MA': 'away','SM1MH': 'home','SM1MD':'draw'
}
map_other = {'1:A': 'other','1:D': 'other','1:H': 'other',}
for key, value in temp.items():
if '@' in value:
score = key[2] + ':' + key[-1]
score = map_other.get(score) or score
info = {'name': score,'odds': value[4:]}
odds[map_csrodds.get(key)].append(info)
odds['title'] = map_item.get(inplay_key)
odds['sataus'] = temp['POOLSTATUS']
item[inplay_key] = odds
elif item.get(inplay_key) and inplay_key == 'fcsodds':
# 半场波胆
temp = item.get(inplay_key)
odds['home'] = []
odds['away'] = []
odds['draw'] = []
map_csrodds = {
'S0100':'home','S0200':'home','S0201':'home','S0300':'home','S0301':'home','S0302':'home','S0400':'home','S0401':'home','S0402':'home','S0500':'home','S0501':'home','S0502':'home',
'S0001':'away','S0002':'away', 'S0102':'away','S0003': 'away', 'S0103': 'away','S0203': 'away', 'S0004': 'away', 'S0104': 'away', 'S0204': 'away', 'S0005': 'away','S0105': 'away', 'S0205': 'away',
'S0000': 'draw','S0101': 'draw','S0202': 'draw','S0303': 'draw',
'SM1MA': 'away','SM1MH': 'home','SM1MD':'draw'
}
map_other = {'1:A': 'other','1:D': 'other','1:H': 'other',}
for key, value in temp.items():
if '@' in value:
score = key[2] + ':' + key[-1]
score = map_other.get(score) or score
info = {'name': score,'odds': value[4:]}
odds[map_csrodds.get(key)].append(info)
odds['title'] = map_item.get(inplay_key)
odds['status'] = temp['POOLSTATUS']
item[inplay_key] = odds
elif item.get(inplay_key) and inplay_key == 'ntsodds':
# 下一队进球
if item.get(inplay_key):
descriptions = item.get(inplay_key)
odds['info'] = []
for description in descriptions:
odds['info'].append({'home':description.get('H')[4:],'away':description.get('A')[4:],'N':description.get('N')[4:]})
odds['status'] = description['POOLSTATUS']
odds['title'] = map_item.get(inplay_key)
item[inplay_key] = odds
elif item.get(inplay_key) and inplay_key == 'ftsodds':
# 第一队入球
temp = item.get(inplay_key)
odds['home'] = temp.get('H')[4:]
odds['away'] = temp.get('A')[4:]
odds['N'] = temp.get('N')[4:]
odds['title'] = map_item.get(inplay_key)
odds['status'] = temp['POOLSTATUS']
item[inplay_key] = odds
elif item.get(inplay_key) and inplay_key == 'ttgodds':
# 总入球
temp = item.get(inplay_key)
odds['info'] = []
for key, value in temp.items():
if '@' in value:
odds['info'].append({'name': key,'odds': value[4:]})
odds['title'] = map_item.get(inplay_key)
odds['status'] = temp['POOLSTATUS']
item[inplay_key] = odds
elif item.get(inplay_key) and inplay_key == 'ooeodds':
# 入球单双
temp = item.get(inplay_key)
odds['info'] = [{'name': '单','odds': temp.get('O')[4:]},{'name': '双','odds': temp.get('E')[4:]}]
odds['title'] = map_item.get(inplay_key)
item[inplay_key] = odds
elif item.get(inplay_key) and inplay_key == 'hftodds':
# 半全场
temp = item.get(inplay_key)
map_hftodds = {
'HD': '主-和','HA': '主-客','HH': '主-主',
'AD': '客-和','AH': '客-主','AA': '客-客',
'DH': '和-主','DA': '和-客','DD': '和-和'}
odds['info'] = []
for key, value in temp.items():
if '@' in value:
odds['info'].append({'name': key,'odds': value[4:],'CH':map_hftodds.get(key)})
odds['title'] = map_item.get(inplay_key)
item[inplay_key] = odds
elif item.get(inplay_key) and inplay_key == 'spcodds':
#特别项目
descriptions = item.get(inplay_key)
info = []
for description in descriptions:
desc = {}
desc['title'] = map_item.get(inplay_key)
desc['item'] = description.get('ITEM')
desc['inplay'] = description.get('INPLAY')
desc['itemech'] = description.get('ITEMCH')
desc['itemeen'] = description.get('ITEMEN')
desc['status'] = description.get('POOLSTATUS')
desc['info'] = []
for sellist in description['SELLIST']:
sel = {'odds': sellist['ODDS'][4:],'sel': sellist['SEL'],'itemech': sellist.get('CONTENTCH'),'selstatus':sellist.get('SELSTATUS') }
desc['info'].append(sel)
info.append(desc)
item[inplay_key] = info
elif item.get(inplay_key) and inplay_key == 'fgsodds':
# 首名入球
description = item.get(inplay_key)
desc = {}
desc['title'] = map_item.get(inplay_key)
desc['inplay'] = description.get('INPLAY')
desc['status'] = description.get('POOLSTATUS')
desc['info'] = []
for sellist in description['SELLIST']:
sel = {'odds': sellist['ODDS'][4:], 'sel': sellist['SEL'], 'itemech': sellist.get('CONTENTCH'),
'itemen': sellist.get('CONTENTEN')}
desc['info'].append(sel)
item[inplay_key] = desc
elif item.get(inplay_key) and inplay_key == 'tqlodds':
#晋级队伍
description = item.get(inplay_key)
desc = {}
desc['title'] = map_item.get(inplay_key)
desc['inplay'] = description.get('INPLAY')
desc['status'] = description.get('POOLSTATUS')
desc['homewin'] = description.get('H')[4:]
desc['awaywin'] = description.get('A')[4:]
item[inplay_key] = desc
item['sportid'] = 1
item['updateTime'] = datetime.datetime.now()
if item.get('channel'): del item['channel']
del item['matchID']
return item
def champion(self,param):
try:
response = self.session.get(self.url,params=param)
if response.status_code == 200 and self.content_type == response.headers['Content-Type']:
response = response.json()
return response
else:
logger.info('响应不是json,获取cookie')
# set cookie
get_cookie(self.session, self.url, response.text, param)
return self.champion(param)
except Exception as ex:
logger.info(traceback.format_exc())
def fetch_champion(self):
params = [
{'jsontype': 'odds_chp.aspx'},
{'jsontype': 'tournament.aspx', 'tourn': '1644'},
#{'jsontype': 'tournament.aspx', 'tourn': '1658'}
]
for i in params:
response = self.champion(i)
if isinstance(response,list):
key = 'chpodds'
for item in response:
if item.get(key) :
odds = deepcopy(item.get(key))
sellist = item[key]['SELLIST']
del item[key]['SELLIST']
info = []
for sell in sellist:
if sell['ODDS'][4:] != 'LSE':
info.append({'name': sell.get('CONTENTCH'),'odds': sell.get('ODDS')[4:]})
item[key]['info'] = info
odds['title'] = map_item[key]
tournamentID = item.get('tournamentID')
jsontype = {'tournamentID':tournamentID,'jsontype': key}
| |
%s' % (k, expr, exp))
return computed_variables
# Try to find the params for a macro in the foloowing objets, in that order:
# * check
# * service
# * main configuration
def _found_params(self, m, check):
parts = [m]
# if we got a |, we got a default value somewhere
if '|' in m:
parts = m.split('|', 1)
change_to = ''
for p in parts:
elts = [p]
if '.' in p:
elts = p.split('.')
elts = [e.strip() for e in elts]
# we will try to grok into our cfg_data for the k1.k2.k3 =>
# self.cfg_data[k1][k2][k3] entry if exists
d = None
founded = False
# We will look into the check>service>global order
# but skip serviec if it's not related with the check
sname = check.get('service', '')
find_into = [check, self.cfg_data]
if sname and sname in self.services:
service = self.services.get(sname)
find_into = [check, service, self.cfg_data]
for tgt in find_into:
(lfounded, ld) = self._found_params_inside(elts, tgt)
if not lfounded:
continue
if lfounded:
founded = True
d = ld
break
if not founded:
continue
change_to = str(d)
break
return change_to
# Try to found a elts= k1.k2.k3 => d[k1][k2][k3] entry
# if exists
def _found_params_inside(self, elts, d):
founded = False
for e in elts:
if e not in d:
founded = False
break
d = d[e]
founded = True
return (founded, d)
# Launch a check sub-process as a thread
def launch_check(self, check):
# If critical_if available, try it
critical_if = check.get('critical_if')
warning_if = check.get('warning_if')
rc = 3 # by default unknown state and output
output = 'Check not configured'
err = ''
if critical_if or warning_if:
b = False
try:
computed_variables = self.__get_variables(check)
except Exception as exp:
output = "ERROR: the variable expression fail: %s" % exp
b = True
rc = 2
computed_variables = {}
if critical_if:
try:
b = evaluater.eval_expr(critical_if, check=check, variables=computed_variables)
except Exception as exp:
output = "ERROR: the critical_if expression fail: %s : %s" % (critical_if, exp)
b = False
rc = 2
if b:
output = evaluater.eval_expr(check.get('critical_output', ''), variables=computed_variables)
rc = 2
if not b and warning_if:
try:
b = evaluater.eval_expr(warning_if, check=check, variables=computed_variables)
except Exception as exp:
output = "ERROR: the warning_if expression fail: %s : %s" % (warning_if, exp)
b = False
rc = 2
if b:
output = evaluater.eval_expr(check.get('warning_output', ''), variables=computed_variables)
rc = 1
# if unset, we are in OK
if rc == 3:
rc = 0
try:
output = evaluater.eval_expr(check.get('ok_output', ''), variables=computed_variables)
except Exception as exp:
output = "ERROR: the ok_output expression fail: %s : %s" % (check.get('ok_output', ''), exp)
rc = 2
else:
script = check['script']
logger.debug("CHECK start: MACRO launching %s" % script)
# First we need to change the script with good macros (between $$)
it = self.macro_pat.finditer(script)
macros = [m.groups() for m in it]
# can be ('$ load.warning | 95$', 'load.warning | 95') for example
for (to_repl, m) in macros:
change_to = self._found_params(m, check)
script = script.replace(to_repl, change_to)
logger.debug("MACRO finally computed", script)
rc, output, err = exec_command(script)
# not found error like (127) should be catch as unknown check
if rc > 3:
rc = 3
logger.debug("CHECK RETURN %s : %s %s %s" % (check['id'], rc, output, err))
did_change = (check['state_id'] != rc)
if did_change:
# Then save the old state values
check['old_state'] = check['state']
check['old_state_id'] = check['state_id']
check['state'] = {0: 'ok', 1: 'warning', 2: 'critical', 3: 'unknown'}.get(rc, 'unknown')
if 0 <= rc <= 3:
check['state_id'] = rc
else:
check['state_id'] = 3
check['output'] = output + err
check['last_check'] = int(time.time())
self.__analyse_check(check, did_change)
# Launch the handlers, some need the data if the element did change or not
handlermgr.launch_check_handlers(check, did_change)
def __get_history_entry_from_check(self, check):
r = {}
fields = ['name', 'pack_name', 'pack_level', 'output', 'last_check', 'display_name', 'state', 'state_id', 'old_state', 'old_state_id']
for field in fields:
r[field] = check[field]
return r
# get a check return and look it it did change a service state. Also save
# the result in the __health KV
def __analyse_check(self, check, did_change):
logger.debug('CHECK we got a check return, deal with it for %s' % check)
# if did change, update the node check entry about it
if did_change:
gossiper.update_check_state_id(check['name'], check['state_id'])
# and save a history entry about it
history_entry = self.__get_history_entry_from_check(check)
self.add_history_entry(history_entry)
# by default warn others nodes if the check did change
warn_about_our_change = did_change
# If the check is related to a service, import the result into the service
# and look for a service state change
sname = check.get('service', '')
if sname and sname in self.services:
service = self.services.get(sname)
logger.debug('CHECK is related to a service, deal with it! %s => %s' % (check, service))
sstate_id = service.get('state_id')
cstate_id = check.get('state_id')
if cstate_id != sstate_id:
service['state_id'] = cstate_id
logger.log('CHECK: we got a service state change from %s to %s for %s' % (sstate_id, cstate_id, service['name']))
warn_about_our_change = True
else:
logger.debug('CHECK: service %s did not change (%s)' % (service['name'], sstate_id))
# If our check or service did change, warn thers nodes about it
if warn_about_our_change:
gossiper.increase_incarnation_and_broadcast()
# We finally put the result in the KV database
self.put_check(check)
# Save the check as a jsono object into the __health/ KV part
def put_check(self, check):
value = jsoner.dumps(check)
key = '__health/%s/%s' % (gossiper.uuid, check['name'])
logger.debug('CHECK SAVING %s:%s(len=%d)' % (key, value, len(value)))
kvmgr.put_key(key, value, allow_udp=True)
# Now groking metrics from check
elts = check['output'].split('|', 1)
try:
perfdata = elts[1]
except IndexError:
perfdata = ''
# if not perfdata, bail out
if not perfdata:
return
datas = []
cname = check['name'].replace('/', '.')
now = int(time.time())
perfdatas = PerfDatas(perfdata)
for m in perfdatas:
if m.name is None or m.value is None:
continue # skip this invalid perfdata
logger.debug('GOT PERFDATAS', m)
logger.debug('GOT PERFDATAS', m.name)
logger.debug('GOT PERFDATAS', m.value)
e = {'mname': '.'.join([gossiper.name, cname, m.name]), 'timestamp': now, 'value': m.value}
logger.debug('PUT PERFDATA', e)
datas.append(e)
self.put_graphite_datas(datas)
def do_update_checks_kv(self):
logger.info("CHECK UPDATING KV checks")
names = []
for (cid, check) in self.checks.items():
# Only the checks that we are really managing
if cid in self.active_checks:
names.append(check['name'])
self.put_check(check)
all_checks = jsoner.dumps(names)
key = '__health/%s' % gossiper.uuid
kvmgr.put_key(key, all_checks)
# Main thread for launching checks (each with its own thread)
def do_check_thread(self):
# Before run, be sure we have a history directory ready
self.prepare_history_directory()
logger.log('CHECK thread launched')
cur_launchs = {}
while not stopper.is_stop():
# If we are not allowed to do monitoring stuff, do nothing
if not topiker.is_topic_enabled(TOPIC_MONITORING):
time.sleep(1)
continue
now = int(time.time())
for (cid, check) in self.checks.items():
# maybe this chck is not a activated one for us, if so, bail out
if cid not in self.active_checks:
continue
# maybe a check is already running
if cid in cur_launchs:
continue
# else look at the time
last_check = check['last_check']
interval = int(check['interval'].split('s')[0]) # todo manage like it should
# in the conf reading phase
interval = random.randint(int(0.9 * interval), int(1.1 * interval))
if last_check < now - interval:
# randomize a bit the checks
script = check['script']
logger.debug('CHECK: launching check %s:%s' % (cid, script))
t = threader.create_and_launch(self.launch_check, name='check-%s' % cid, args=(check,), part='monitoring')
cur_launchs[cid] = t
to_del = []
for (cid, t) in cur_launchs.items():
if not t.is_alive():
t.join()
to_del.append(cid)
for cid in to_del:
del cur_launchs[cid]
# each seconds we try to look if there are history info to save
self.write_history_entry()
time.sleep(1)
# Will delete all checks into the kv and update new values, but in a thread
def update_checks_kv(self):
# Ok go launch it :)
threader.create_and_launch(self.do_update_checks_kv, name='do_update_checks_kv', essential=True, part='key-values')
# TODO: RE-factorize with the TS code part
def put_graphite_datas(self, datas):
forwards = {}
for e in datas:
mname, value, timestamp = e['mname'], e['value'], e['timestamp']
hkey = hashlib.sha1(mname).hexdigest()
ts_node_manager = gossiper.find_group_node('ts', hkey)
# if | |
##################################################################################
# Copyright 2021 <NAME> at Duke University
#
# Licensed under the Apache License, Version 2.0 (the "License"],
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##################################################################################
# This module exports a class that stores information about atoms that is needed by the
# Probe and Reduce portions of MolProbity to determine how to properly handle
# them. This includes color information for Kinemage outputs but also information
# that may be available from CCTBX such as Van der Waals radii, donor/acceptor
# status, and whether the atom is metallic.
#
# Use the FindExtraAtomInfo() function to get an ExtraAtomInfo structure filled
# based on a specific atom along with a second tuple value that has a list of all
# information in the table for that atom.
from __future__ import print_function, nested_scopes, generators, division
from __future__ import absolute_import, unicode_literals
import sys
import re
import iotbx
from iotbx.map_model_manager import map_model_manager
from iotbx.data_manager import DataManager
import mmtbx
import boost_adaptbx.boost.python as bp
bp.import_ext("mmtbx_probe_ext")
import mmtbx_probe_ext as probe
##################################################################################
# Helper functions.
# Pad the name with spaces on the right to ensure that it is at least as long as
# requested.
def Pad(s, n=4):
ret = s
while len(s) < n:
s += ' '
return ret
# Gobble up all spaces from the end of the name after non-space characters
def Unpad(n):
# Gobble up all spaces from the end of the name.
while n[-1] == ' ':
n = n[:-1]
return n
# Is a carbon atom a Carbonyl from a standard amino acid?
def IsSpecialAminoAcidCarbonyl(resName, atomName):
"""Given a residue and atom name, determine whether that atom is a C=O.
This does not mark the ' C ' atom that is always a Carbonyl; that is checked separately.
:param resName: String containing the 1-3-character residue name in all caps, including leading space.
:param atomName: String containing the 1-4-character atom name in all caps, including leading space.
:returns True if the atom is a C=O in a standard residue, False if not. Does not handle HET atoms.
"""
if Unpad(atomName) == ' CG':
return resName in ['ASP','ASN','ASX']
if Unpad(atomName) == ' CD':
return resName in ['GLU','GLN','GLX']
return False
# Table of aromatic-acceptor atoms by residue and atom name. The first entry in each list element is
# a list of residue names with trailing spaces trimmed. The second is a list of atoms that qualify
# for all of the entries in the residue names. In both cases, the strings are stripped of all
# spaces to the left and right.
_AromaticTable = [
# Note: Some atoms from these residues are listed in other sections. The combination of
# reside and atom name is not duplicated, but there are multiple entries for some residues --
# this is not a set.
[ ['HIS'], ['ND1','NE2'] ],
[ ['ADE','A'], ['N1','N3','N7','C2','C4','C5','C6','C8','N9'] ],
[ ['CYT','C'], ['N3','N1','C2','C4','C5','C6'] ],
[ ['GUA','G'], ['N3','N7','N1','C2','C4','C5','C6','C8','N9'] ],
[ ['THY','T'], ['N1','C2','N3','C4','C5','C6'] ],
[ ['URA','U'], ['N1','C2','N3','C4','C5','C6'] ],
[ ['DA'], ['N1','N3','N7','C2','C4','C5','C6','C8','N9'] ],
[ ['DC'], ['N3','N1','C2','C4','C5','C6'] ],
[ ['DG'], ['N3','N7','N1','C2','C4','C5','C6','C8','N9'] ],
[ ['DT'], ['N1','C2','N3','C4','C5','C6'] ],
[ ['HEM'], ['N A','N B','N C','N D'] ],
# Here we treat the aromatic Pi-bonds as hydrogen bond acceptors.
# Note: Some atoms from these residues are listed in other sections. The combination of
# reside and atom name is not duplicated, but there are multiple entries for some residues --
# this is not a set.
[ ['HEM'], ['C1A','C2A','C3A','C4A',
'C1B','C2B','C3B','C4B',
'C1C','C2C','C3C','C4C',
'C1D','C2D','C3D','C4D'] ],
[ ['PHE'], ['CZ','CE2','CE1','CD2','CD1','CG'] ],
[ ['TYR'], ['CZ','CE2','CE1','CD2','CD1','CG'] ],
# [ ['HIS'], ['CD2','CE1','CG'] ],
[ ['TRP'], ['CH2','CZ3','CZ2','CE3','CE2','NE1','CD2','CD1','CG'] ],
# Here we add the hydrogens and deuteriums that can be part of a ring from probe:select.c
[ ['PHE'], ['HD1','HD2','HE1','HE2','HZ','DD1','DD2','DE1','DE2','DZ'] ],
[ ['HIS'], ['HD1','HD2','HE1','HE2','DD1','DD2','DE1','DE2'] ],
[ ['TYR'], ['HD1','HD2','HE1','HE2','DD1','DD2','DE1','DE2'] ],
[ ['TRP'], ['HD1','HE1','HE3','HZ2','HZ3','HH2','DD1','DE1','DE3','DZ2','DZ3','DH2'] ],
[ ['U','URA','UTP','UDP','UMP','UR'], ['H3','HN3','H5','H6','D3','DN3','D5','D6'] ],
[ ['T','THY','TTP','TDP','TMP','5MU','DT','TR'], ['H3','HN3','H6','D3','DN3','D6'] ],
[ ['A','ADE','ATP','ADP','AMP','1MA','RIA','T6A','DA','AR'], ['H8','H2','D8','D2'] ],
[ ['C','CYT','CTP','CDP','CMP','5MC','OMC','DC','CR'], ['H5','H6','D5','D6'] ],
[ ['G','GUA','GTP','GDP','GMP','GSP','2MG','M2G','7MG','OMG','DG','GR'], ['H8','H1','HN1','D8','D1','DN1'] ],
[ ['YG','1MG'], ['H8','D8'] ],
[ ['PSU'], ['H6','D6','H1','HN1','D1','DN1','H3','HN3','D3','DN3'] ],
[ ['I','DI'], ['H8','H2','H1','HN1','D8','D2','D1','DN1'] ]
]
# Is a carbon or nitrogen or hydrogen atom part of an aromatic ring?
def IsAromatic(resName, atomName):
"""Given a residue and atom name, determine whether that atom is part of an aromatic ring.
:param resName: String containing the 1-3-character residue name in all caps, including leading space.
:param atomName: String containing the 1-4-character atom name in all caps, including leading space.
:returns True if the atom is aromatic in a standard residue, False if not. Does not handle HET atoms.
"""
for e in _AromaticTable:
if resName.strip() in e[0] and atomName.strip() in e[1]:
return True
return False
##################################################################################
class AtomFlags(object):
"""Flags describing attributes that atoms can have.
"""
EMPTY_FLAGS = 0 # No flags set
IGNORE_ATOM = 1 << 0 # This atom should be ignored during processing, as if it did not exist
DONOR_ATOM = 1 << 1 # Can be an electron donor
ACCEPTOR_ATOM = 1 << 2 # Can be an electron acceptor
HB_ONLY_DUMMY_ATOM = 1 << 3 # This is a dummy hydrogen added temporarily to a water when a donor is needed; it can Hbond but not clash.
METALLIC_ATOM = 1 << 4 # The atom is metallic
##################################################################################
class AtomInfo(object):
"""Class that stores extra information about an atom that is looked up by the AtomTypes
class methods. The information is stored in properties.
"""
def __init__(self, myValList = None):
try:
self._atomicNumber = myValList[0]
except Exception:
self._atomicNumber = 0
try:
self._name = myValList[1]
except Exception:
self._name = "?"
try:
self._fullName = myValList[2]
except Exception:
self._fullName = "unknown"
try:
self._vdwElectronCloudExplicit = myValList[3]
except Exception:
self._vdwElectronCloudExplicit = 0
try:
self._vdwNeutronExplicit = myValList[4]
except Exception:
self._vdwNeutronExplicit = 0
try:
self._vdwElectronCloudImplicit = myValList[5]
except Exception:
self._vdwElectronCloudImplicit = 0
try:
self._covalent = myValList[6]
except Exception:
self._covalent = 0
try:
self._kinemageColor = myValList[7]
except Exception:
self._kinemageColor = "grey"
try:
self._flags = myValList[8]
except Exception:
self._flags = AtomFlags.EMPTY_FLAGS
# Getter and setter methods
def get_atomicNumber(self): return self._atomicNumber
def set_atomicNumber(self, val): self._atomicNumber = val
def get_name(self): return self._name
def set_name(self, val): self._name = val
def get_fullName(self): return self._fullName
def set_fullName(self, val): self._fullName = val
def get_vdwElectronCloudExplicit(self): return self._vdwElectronCloudExplicit
def set_vdwElectronCloudExplicit(self, val): self._vdwElectronCloudExplicit = val
def get_vdwElectronCloudImplicit(self): return self._vdwElectronCloudImplicit
def set_vdwElectronCloudImplicit(self, val): self._vdwElectronCloudImplicit = val
def get_vdwNeutronExplicit(self): return self._vdwNeutronExplicit
def set_vdwNeutronExplicit(self, val): self._vdwNeutronExplicit = val
def get_covalent(self): return self._covalent
def set_covalent(self, val): self._covalent = val
def get_kinemageColor(self): return self._kinemageColor
def set_kinemageColor(self, val): self._kinemageColor = val
def get_flags(self): return self._flags
def set_flags(self, val): self._flags = val
# Properties
atomicNumber = property(get_atomicNumber, set_atomicNumber)
name = property(get_name, set_name)
fullName = property(get_fullName, set_fullName)
vdwElectronCloudExplicit = property(get_vdwElectronCloudExplicit, set_vdwElectronCloudExplicit)
vdwElectronCloudImplicit = property(get_vdwElectronCloudImplicit, set_vdwElectronCloudImplicit)
vdwNeutronExplicit = property(get_vdwNeutronExplicit, set_vdwNeutronExplicit)
covalent = property(get_covalent, set_covalent)
kinemageColor = property(get_kinemageColor, set_kinemageColor)
flags = property(get_flags, set_flags)
class AtomTypes(object):
"""Class that looks up extra information for atoms that is required by the MolProbity Probe and
Reduce modules.
"""
def __init__(self, useNeutronDistances = False, useImplicitHydrogenDistances = False):
"""Constructor.
:param useNeutronDistances: Use neutron distances and radii for scoring.
The default is to use electron-cloud distances. This is used both for the
separation between a Hydgrogen and its bound partner and for the radius of the
Hydrogen and it must be set consistently across the entire code base. When this is
True, it supercedes useImplicitHydrogenDistances.
:param useImplicitHydrogenDistances: Default is to use distances consistent with
explicitly-listed Hydrgoens, but setting this to True implicit-Hydrogen distances instead.
This must be set consistently with the hydrogens in the model.
"""
##################################################################################
# Store state based on options.
self._useNeutronDistances = useNeutronDistances
self._useImplicitHydrogenDistances = useImplicitHydrogenDistances
##################################################################################
# Table of information about each type of atom. The elements in each list are as
# follows:
# Atomic number
# Name of the type, used to look up the atom type
# Full name of the type, useful when printing
# VDW radius for | |
import logging
import math
import torch
from torch_geometric.data import Data
from torch_geometric.nn.pool import max_pool, voxel_grid
from torch_geometric.nn.pool.pool import pool_edge
from torch_geometric.typing import Adj
from torch_scatter import scatter_max, scatter_sum
from typing import List
from aegnn.models.layer import MaxPooling
from .base.base import add_async_graph, async_context
from .base.utils import compute_edges, graph_new_nodes, graph_changed_nodes
from .flops import compute_flops_voxel_grid
def __intersection(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
"""Compute the unique common elements in x and y, with x and y being in generally different shape.
Attention: Only works for integer typed tensors x & y. """
assert x.dtype == y.dtype == torch.long
if x.numel() == 0 or y.numel() == 0:
return torch.tensor([], device=x.device)
xy_max = max(torch.max(x), torch.max(y))
x_array = torch.zeros(xy_max + 1, device=x.device)
y_array = torch.zeros(xy_max + 1, device=y.device)
x_array[x] = 1
y_array[y] = 1
z_array = torch.mul(x_array, y_array)
return torch.flatten(torch.nonzero(z_array))
def __dense_process(module: MaxPooling, x: torch.Tensor, pos: torch.Tensor, batch: torch.LongTensor = None,
edge_index: Adj = None) -> Data:
batch = torch.zeros(x.size()[0], device=x.device) # no batch processing
if edge_index is None:
edge_index = compute_edges(module, pos=pos)
clusters = __get_clusters(module, pos=pos)
graph = Data(x=x, pos=pos, edge_index=edge_index, batch=batch)
graph_coarse = max_pool(clusters, data=graph, transform=module.transform)
graph_coarse.vdx = torch.unique(clusters, sorted=True)
return graph_coarse
def __graph_initialization(module: MaxPooling, x: torch.Tensor, pos: torch.Tensor, edge_index: Adj = None) -> Data:
"""Graph initialization for asynchronous update.
Both the input as well as the output graph have to be stored, in order to avoid repeated computation. The
input graph is used for spotting changed or new nodes (as for other asyn. layers), while the output graph
is compared to the set of diff & new nodes, in order to be updated. Depending on the type of pooling (max, mean,
average, etc) not only the output voxel feature have to be stored but also aggregations over all nodes in
one output voxel such as the sum or count.
Next to the features the node positions are averaged over all nodes in the voxel, as well. To do so,
position aggregations (count, sum) are stored and updated, too.
"""
logging.debug(f"Input graph with x = {x.shape} and pos = {pos.shape}")
graph_out = __dense_process(module, x, pos, edge_index=None)
module.asy_graph = Data(x=x, pos=pos)
logging.debug(f"Resulting in coarse graph {graph_out}")
# Compute the voxel index for every node (clustering), and determine the max. feature vector over
# all nodes that are assigned to the same voxel, independently for all dimensions. Example:
# x1 = [1, 3, 4, 5, 1], x2 = [0, 2, 6, 10, 20] => x_voxel = [1, 3, 6, 10, 20]
module.asy_graph.vdx = __get_clusters(module, pos=pos) # voxel index of every node
_, index = torch.unique(module.asy_graph.vdx, sorted=True, return_inverse=True)
_, argmax = scatter_max(x, index=index, dim=0)
# Store all of the nodes that contribute to the voxel max feature vector in at least one dimension.
argmax_nodes = torch.unique(argmax)
module.asy_node_max_index = torch.flatten(argmax_nodes).long()
# Store aggregations and final coarse (output) graph.
module.asy_voxel_pos_sum = scatter_sum(pos, index=index, dim=0)
module.asy_voxel_node_count = scatter_sum(torch.ones_like(module.asy_graph.vdx, device=x.device), index=index)
module.asy_graph_coarse = graph_out.clone()
assert module.asy_voxel_pos_sum.shape[0] == module.asy_graph_coarse.num_nodes
assert module.asy_voxel_node_count.shape[0] == module.asy_graph_coarse.num_nodes
# Compute number of floating point operations (no cat, flatten, etc.).
if module.asy_flops_log is not None:
flops = compute_flops_voxel_grid(pos)
flops += graph_out.x.numel() + graph_out.edge_index.numel() # every edge has to be re-assigned
flops += 3 * index.numel() # scatter with index
module.asy_flops_log.append(flops)
return graph_out
def __graph_process(module: MaxPooling, x: torch.Tensor, pos: torch.Tensor, edge_index: Adj = None) -> Data:
pos = module.asy_pos # input pos is pos of new event, not whole graph due to dense & sparse code sharing
_, diff_idx = graph_changed_nodes(module, x=x)
_, new_idx = graph_new_nodes(module, x=x)
logging.debug(f"Subgraph contains {new_idx.numel()} new and {diff_idx.numel()} diff nodes")
# Compute the intersection between nodes that have been changed and nodes that contribute to the
# voxel's feature vector (max values).
replaced_idx = __intersection(diff_idx.long(), module.asy_node_max_index)
logging.debug(f"... from which {replaced_idx.numel()} nodes contributed to the coarse graph")
# As not all of the feature values, that do not contribute to the voxel feature vector (max vector), are
# stored, when one of the contributing nodes has changed, the voxel feature vector has to be re-evaluated
# by looking at all (!) of the nodes that are assigned to the voxel. Therefore, for every changed index
# add all of the nodes in the same voxel to the list of nodes of the subgraph to look at, and reset the
# nodes aggregations.
graph_vdx = getattr(module.asy_graph, "vdx")
for idx in replaced_idx:
nodes_voxel = torch.nonzero(torch.eq(graph_vdx, graph_vdx[idx]))[:, 0]
diff_idx = torch.cat([diff_idx, nodes_voxel])
voxel_idx = module.asy_graph.vdx[idx]
coarse_idx = torch.eq(getattr(module.asy_graph_coarse, "vdx"), voxel_idx)
module.asy_graph_coarse.x[coarse_idx] = -999999
module.asy_voxel_pos_sum[coarse_idx] -= pos[idx]
module.asy_voxel_node_count[coarse_idx] -= 1
update_idx = torch.cat([diff_idx, new_idx])
x_update = x[update_idx, :]
pos_update = pos[update_idx, :]
# Max/Average pool the features x and positions respectively.
vdx_update = __get_clusters(module, pos=pos_update)
x_scatter = torch.cat([x_update, module.asy_graph_coarse.x])
node_index_scatter = torch.cat([update_idx, module.asy_node_max_index])
pos_sum_scatter = torch.cat([pos_update, module.asy_voxel_pos_sum])
node_count_scatter = torch.cat([torch.ones_like(update_idx, device=x.device), module.asy_voxel_node_count])
clusters = torch.cat([vdx_update, getattr(module.asy_graph_coarse, "vdx")])
clusters_unique, index = torch.unique(clusters, sorted=True, return_inverse=True)
x_max, argmax = scatter_max(x_scatter, index=index, dim=0)
voxel_pos_sum = scatter_sum(pos_sum_scatter, index=index, dim=0)
voxel_node_count = scatter_sum(node_count_scatter, index=index)
pos_mean = torch.div(voxel_pos_sum, voxel_node_count.view(-1, 1)) # index is sorted, so output is too
# The coarsened graph is reconnected by dropping all edges inside a voxel and
# unifying all edges between voxels.
vdx = torch.cat([getattr(module.asy_graph, "vdx"), vdx_update[diff_idx.numel():]]) # add new node index
vdx[diff_idx] = vdx_update[:diff_idx.numel()] # change diff node index
edges_coarse = torch.empty((2, 0), dtype=torch.long, device=x.device)
if edge_index is not None:
edges_coarse, _ = pool_edge(cluster=vdx, edge_index=edge_index, edge_attr=None)
# Create the coarsened graph and update the internal graph. While the coarsened graph can be
# overwritten completely, as it has been re-computed, most elements of the un-coarsened graph
# are unchanged, and therefore only have to be partly updated.
graph_out = Data(x=x_max, pos=pos_mean, edge_index=edges_coarse)
module.asy_graph = Data(x=x, pos=pos, edge_index=edge_index, vdx=vdx).clone()
module.asy_node_max_index = torch.flatten(node_index_scatter[argmax]).long()
module.asy_voxel_pos_sum = voxel_pos_sum
module.asy_voxel_node_count = voxel_node_count
module.asy_graph_coarse = Data(x=graph_out.x, pos=graph_out.pos, edge_index=edges_coarse, vdx=clusters_unique)
# Compute number of floating point operations (no cat, flatten, etc.).
if module.asy_flops_log is not None:
flops = x_scatter.size()[0] + pos_sum_scatter.numel() + node_count_scatter.numel() # pooling
flops += voxel_pos_sum.numel() # pos mean
module.asy_flops_log.append(flops)
# For asychronous processing we assume that all events are in the same "batch".
graph_out.batch = torch.zeros(graph_out.num_nodes, dtype=torch.long, device=graph_out.x.device)
# Max pooling coarsens the graph, so the pos vector of all subsequent layer has to be updated..
module.asy_pass_attribute('asy_pos', graph_out.pos)
return graph_out
def __get_clusters(module, pos: torch.Tensor) -> torch.LongTensor:
num_pos, num_dims = pos.shape
grid_start = torch.zeros(num_dims, device=pos.device)
grid_end = module.grid_size
return voxel_grid(pos, batch=torch.zeros(num_pos), size=module.voxel_size, start=grid_start, end=grid_end)
def __get_num_voxels(module, dim: int = None) -> int:
num_dims = len(module.voxel_size)
num_voxels = [int(module.grid_size[i] / module.voxel_size[i]) + 1 for i in range(num_dims)]
if dim is not None:
return num_voxels[dim]
return math.prod(num_voxels)
def make_max_pool_asynchronous(module: MaxPooling, grid_size: List[int], r: float,
log_flops: bool = False, log_runtime: bool = False):
"""Module converter from synchronous to asynchronous & sparse processing for graph max pooling layer.
By overwriting parts of the module asynchronous processing can be enabled without the need re-creating the
object. So, a max pooling layer can be converted by, for example:
```
module = MaxPool([4, 4])
module = make_max_pool_asynchronous(module)
```
:param module: standard max pooling module.
:param grid_size: grid size (grid starting at 0, spanning to `grid_size`), >= `size`.
:param r: update radius around new events.
:param log_flops: log flops of asynchronous update.
:param log_runtime: log runtime of asynchronous update.
"""
assert hasattr(module, "voxel_size")
assert len(module.voxel_size) == len(grid_size)
assert all([module.voxel_size[i] <= grid_size[i] for i in range(len(module.voxel_size))])
assert all([grid_size[i] % module.voxel_size[i] == 0 for i in range(len(module.voxel_size))])
module = add_async_graph(module, r=r, log_flops=log_flops, log_runtime=log_runtime)
module.asy_pos = None
module.asy_graph_coarse = None # coarse output graph
module.asy_node_max_index = None # index of max. node in input data
module.asy_voxel_pos_sum = None # sum of positions per voxel
module.asy_voxel_node_count = None # count of nodes per voxel
module.grid_size = grid_size # grid size in N dimensions
def async_forward(x: torch.Tensor, pos: torch.Tensor = None,
batch=None, edge_index: Adj = None, return_data_obj: bool = False):
with async_context(module, __graph_initialization, __graph_process) as func:
data_out = func(module, x=x, pos=pos, edge_index=edge_index)
# If defined, apply transform to output data.
if module.transform is not None:
data_out = module.transform(data_out)
# Following the convention defined in `aegnn.models.layer.max_pool`, forward either returns | |
<reponame>mattclarke/caproto<gh_stars>1-10
#!/usr/bin/env python
# unit-tests for ca interface
# Lifted almost exactly from pyepics
# The epics python module was orignally written by
#
# <NAME> <<EMAIL>>
# CARS, University of Chicago
#
# There have been several contributions from many others, notably Angus
# Gratton <<EMAIL>>. See the Acknowledgements section of
# the documentation for a list of more contributors.
#
# Except where explicitly noted, all files in this distribution are licensed
# under the Epics Open License.:
#
# ------------------------------------------------
#
# Copyright 2010 <NAME>, The University of Chicago. All rights reserved.
#
# The epics python module is distributed subject to the following license conditions:
# SOFTWARE LICENSE AGREEMENT
# Software: epics python module
#
# 1. The "Software", below, refers to the epics python module (in either
# source code, or binary form and accompanying documentation). Each
# licensee is addressed as "you" or "Licensee."
#
# 2. The copyright holders shown above and their third-party licensors
# hereby grant Licensee a royalty-free nonexclusive license, subject to
# the limitations stated herein and U.S. Government license rights.
#
# 3. You may modify and make a copy or copies of the Software for use
# within your organization, if you meet the following conditions:
#
# 1. Copies in source code must include the copyright notice and this
# Software License Agreement.
#
# 2. Copies in binary form must include the copyright notice and this
# Software License Agreement in the documentation and/or other
# materials provided with the copy.
#
# 4. You may modify a copy or copies of the Software or any portion of
# it, thus forming a work based on the Software, and distribute copies of
# such work outside your organization, if you meet all of the following
# conditions:
#
# 1. Copies in source code must include the copyright notice and this
# Software License Agreement;
#
# 2. Copies in binary form must include the copyright notice and this
# Software License Agreement in the documentation and/or other
# materials provided with the copy;
#
# 3. Modified copies and works based on the Software must carry
# prominent notices stating that you changed specified portions of
# the Software.
#
# 5. Portions of the Software resulted from work developed under a
# U.S. Government contract and are subject to the following license: the
# Government is granted for itself and others acting on its behalf a
# paid-up, nonexclusive, irrevocable worldwide license in this computer
# software to reproduce, prepare derivative works, and perform publicly
# and display publicly.
#
# 6. WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS" WITHOUT
# WARRANTY OF ANY KIND. THE COPYRIGHT HOLDERS, THEIR THIRD PARTY
# LICENSORS, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY,
# AND THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE, TITLE OR NON-INFRINGEMENT, (2) DO NOT
# ASSUME ANY LEGAL LIABILITY OR RESPONSIBILITY FOR THE ACCURACY,
# COMPLETENESS, OR USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT
# USE OF THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4) DO
# NOT WARRANT THAT THE SOFTWARE WILL FUNCTION UNINTERRUPTED, THAT IT IS
# ERROR-FREE OR THAT ANY ERRORS WILL BE CORRECTED.
#
# 7. LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT HOLDERS,
# THEIR THIRD PARTY LICENSORS, THE UNITED STATES, THE UNITED STATES
# DEPARTMENT OF ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
# INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF ANY KIND OR
# NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF PROFITS OR LOSS OF DATA,
# FOR ANY REASON WHATSOEVER, WHETHER SUCH LIABILITY IS ASSERTED ON THE
# BASIS OF CONTRACT, TORT (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR
# OTHERWISE, EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
# POSSIBILITY OF SUCH LOSS OR DAMAGES.
#
# ------------------------------------------------
import pytest
numpy = pytest.importorskip("numpy")
import time
import os
import sys
import threading
from types import SimpleNamespace
from contextlib import contextmanager
from caproto.threading.pyepics_compat import (PV, caput, caget, cainfo,
caget_many, caput_many,
AccessRightsException)
from .conftest import default_setup_module, default_teardown_module
from .test_threading_client import context, shared_broadcaster
def setup_module(module):
default_setup_module(module)
from caproto.benchmarking.util import set_logging_level
set_logging_level('DEBUG')
def teardown_module(module):
default_teardown_module(module)
@pytest.fixture(scope='function')
def pvnames(request, epics_base_ioc, context):
class PVNames:
prefix = epics_base_ioc.prefix
double_pv = prefix + 'ao1'
double_pv_units = 'microns'
double_pv_prec = 4
double_pv2 = prefix + 'ao2'
pause_pv = prefix + 'pause'
str_pv = prefix + 'ao1.DESC'
int_pv = prefix + 'long2'
long_pv = prefix + 'long2'
float_pv = prefix + 'ao3'
enum_pv = prefix + 'mbbo1'
enum_pv_strs = ['Stop', 'Start', 'Pause', 'Resume']
proc_pv = prefix + 'ao1.PROC'
long_arr_pv = prefix + 'long2k'
double_arr_pv = prefix + 'double2k'
string_arr_pv = prefix + 'string128'
char_arr_pv = prefix + 'char128'
char_arrays = [prefix + 'char128',
prefix + 'char2k',
prefix + 'char64k']
long_arrays = [prefix + 'long128',
prefix + 'long2k',
prefix + 'long64k']
double_arrays = [prefix + 'double128',
prefix + 'double2k',
prefix + 'double64k']
updating_pv1 = prefix + 'ao1'
updating_str1 = prefix + 'char256'
updating_pvlist = [prefix + 'ao1',
prefix + 'ai1',
prefix + 'long1',
prefix + 'ao2']
non_updating_pv = prefix + 'ao4'
alarm_pv = prefix + 'long1'
alarm_comp = 'ge'
alarm_trippoint = 7
subarr_driver = prefix + 'wave_test'
subarr1 = prefix + 'subArr1'
subarr2 = prefix + 'subArr2'
subarr3 = prefix + 'subArr3'
subarr4 = prefix + 'subArr4'
zero_len_subarr1 = prefix + 'ZeroLenSubArr1'
# TODO: softIoc does not build with motor
motor1 = 'sim:mtr1'
motor2 = 'sim:mtr2'
def __repr__(self):
return f'<PVNames prefix={epics_base_ioc.prefix}>'
PV._default_context = context
def finalize_context():
print('Cleaning up PV context')
context.disconnect()
assert not context._process_search_results_thread.is_alive()
assert not context._activate_subscriptions_thread.is_alive()
assert not context.selector.thread.is_alive()
sb = context.broadcaster
sb.disconnect()
assert not sb._command_thread.is_alive()
assert not sb.selector.thread.is_alive()
assert not sb._retry_unanswered_searches_thread.is_alive()
print('Done cleaning up PV context')
request.addfinalizer(finalize_context)
return PVNames()
def simulator_main(prefix, ready_event, exit_event):
'simulator.py from pyepics testioc (same license as above)'
import random
from epics import caput as _caput, PV as _PV
class PV(_PV):
def put(self, value, **kw):
rval = repr(value)[:50]
print(f'(simulator: put {self.pvname} {rval})')
return super().put(value, **kw)
def caput(pv, value, **kw):
rval = repr(value)[:50]
print(f'(simulator: caput {pv} {rval})')
return _caput(pv, value, **kw)
NEEDS_INIT = True
SLEEP_TIME = 0.10
def onConnect(pvname=None, conn=None, **kws):
nonlocal NEEDS_INIT
NEEDS_INIT = conn
def make_pvs(*args, **kwds):
# print("Make PVS ' ", prefix, args)
# print( [("%s%s" % (prefix, name)) for name in args])
pvlist = [PV("%s%s" % (prefix, name)) for name in args]
for pv in pvlist:
pv.connect()
pv.connection_callbacks.append(onConnect)
return pvlist
mbbos = make_pvs("mbbo1", "mbbo2")
pause_pv = make_pvs("pause",)[0]
longs = make_pvs("long1", "long2", "long3", "long4")
strs = make_pvs("str1", "str2")
analogs = make_pvs("ao1", "ai1", "ao2", "ao3")
binaries = make_pvs("bo1", "bi1")
char_waves = make_pvs("char128", "char256", "char2k", "char64k")
double_waves = make_pvs("double128", "double2k", "double64k")
long_waves = make_pvs("long128", "long2k", "long64k")
str_waves = make_pvs("string128", "string2k", "string64k")
subarrays = make_pvs("subArr1", "subArr2", "subArr3", "subArr4" )
subarray_driver = make_pvs("wave_test",)[0]
def initialize_data():
subarray_driver.put(numpy.arange(64)/12.0)
for p in mbbos:
p.put(1)
for i, p in enumerate(longs):
p.put((i+1))
for i, p in enumerate(strs):
p.put(("String %s" % (i+1)))
for i, p in enumerate(binaries):
p.put((i+1))
for i, p in enumerate(analogs):
p.put((i+1)*1.7135000 )
caput(f'{prefix}ao1.EGU', 'microns')
caput(f'{prefix}ao1.PREC', 4)
caput(f'{prefix}ai1.PREC', 2)
caput(f'{prefix}ao2.PREC', 3)
char_waves[0].put([60+random.randrange(30) for i in range(128)])
char_waves[1].put([random.randrange(256) for i in range(256)])
char_waves[2].put([random.randrange(256) for i in range(2048)])
char_waves[3].put([random.randrange(256) for i in range(65536)])
long_waves[0].put([i+random.randrange(2) for i in range(128)])
long_waves[1].put([i+random.randrange(128) for i in range(2048)])
long_waves[2].put([i for i in range(65536)])
double_waves[0].put([i+random.randrange(2) for i in range(128)])
double_waves[1].put([random.random() for i in range(2048)])
double_waves[2].put([random.random() for i in range(65536)])
pause_pv.put(0)
str_waves[0].put([(" String %i" % (i+1)) for i in range(128)])
print('Data initialized')
text = '''line 1
this is line 2
and line 3
here is another line
this is the 5th line
line 6
line 7
line 8
line 9
line 10
line 11
'''.split('\n')
start_time = time.time()
count = 0
long_update = 0
lcount = 1
initialized_at = 0
while not exit_event.is_set():
if NEEDS_INIT:
initialize_data()
time.sleep(SLEEP_TIME)
NEEDS_INIT = False
initialized_at = count
time.sleep(SLEEP_TIME)
count = count + 1
if not NEEDS_INIT and count >= initialized_at + 4:
if not ready_event.is_set():
ready_event.set()
print('[Pyepics simulator running!]')
if count > 99999999:
count = 1
t0 = time.time()
if pause_pv.get() == 1:
# pause for up to 120 seconds if pause was selected
t0 = time.time()
while time.time()-t0 < 120:
time.sleep(SLEEP_TIME)
if pause_pv.get() == 0:
break
elif exit_event.is_set():
break
pause_pv.put(0)
if exit_event.is_set():
break
noise = numpy.random.normal
analogs[0].put(100*(random.random()-0.5))
analogs[1].put(76.54321*(time.time()-start_time))
analogs[2].put(0.3*numpy.sin(time.time() / 2.302) + noise(scale=0.4))
char_waves[0].put([45+random.randrange(64)
for i in range(128)])
if count % 3 == 0:
analogs[3].put(
numpy.exp((max(0.001, noise(scale=0.03) +
numpy.sqrt((count/16.0) % | |
# -*- coding: utf-8 -*-
'''
Module contains all of the functions to create a radio telemetry project.'''
# import modules required for function dependencies
import numpy as np
import pandas as pd
import os
import sqlite3
import datetime
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.dates as mdates
from mpl_toolkits.mplot3d import Axes3D
import statsmodels.api as sm
import statsmodels.formula.api as smf
import networkx as nx
from matplotlib import rcParams
from scipy import interpolate
font = {'family': 'serif','size': 6}
rcParams['font.size'] = 6
rcParams['font.family'] = 'serif'
def noiseRatio (duration,data,study_tags):
''' function calculates the ratio of miscoded, pure noise detections, to matching frequency/code
detections within the duration specified.
In other words, what is the ratio of miscoded to correctly coded detections within the duration specified
duration = moving window length in minutes
data = current data file
study_tags = list or list like object of study tags
'''
# identify miscodes
data['miscode'] = np.isin(data.FreqCode.values, study_tags, invert = True)
# bin everything into nearest 5 min time bin and count miscodes and total number of detections
duration_s = str(int(duration * 60)) + 's'
miscode = data.groupby(pd.Grouper(key = 'timeStamp', freq = duration_s)).miscode.sum().to_frame()
total = data.groupby(pd.Grouper(key = 'timeStamp', freq = duration_s)).FreqCode.count().to_frame()
# rename
total.rename(columns = {'FreqCode':'total'}, inplace = True)
# merge dataframes, calculate noise ratio
noise = total.merge(miscode, left_on = 'timeStamp', right_on ='timeStamp')
noise.reset_index(inplace = True)
noise.fillna(value = 0, inplace = True)
noise['noiseRatio'] = noise.miscode / noise.total
noise.dropna(inplace = True)
noise['Epoch'] = (noise['timeStamp'] - datetime.datetime(1970,1,1)).dt.total_seconds()
# create function for noise ratio at time
if len(noise) >= 2:
noise_ratio_fun = interpolate.interp1d(noise.Epoch.values,noise.noiseRatio.values,kind = 'linear',bounds_error = False, fill_value ='extrapolate')
# interpolate noise ratio as a function of time for every row in data
data['noiseRatio'] = noise_ratio_fun(data.Epoch.values)
data.drop(columns = ['miscode'], inplace = True)
return data
def createTrainDB(project_dir, dbName):
''' function creates empty project database, user can edit project parameters using
DB Broswer for sqlite found at: http://sqlitebrowser.org/'''
# first step creates a project directory if it doesn't already exist
if not os.path.exists(project_dir):
os.makedirs(project_dir)
data_dir = os.path.join(project_dir,'Data') # raw data goes here
if not os.path.exists(data_dir):
os.makedirs(data_dir)
training_dir = os.path.join(data_dir,'Training_Files')
if not os.path.exists(training_dir):
os.makedirs(training_dir)
output_dir = os.path.join(project_dir, 'Output') # intermediate data products, final data products and images
if not os.path.exists(output_dir):
os.makedirs(output_dir)
scratch_dir = os.path.join(output_dir,'Scratch')
if not os.path.exists(scratch_dir):
os.makedirs(scratch_dir)
figures_dir = os.path.join(output_dir, 'Figures')
if not os.path.exists(figures_dir):
os.makedirs(figures_dir)
# program_dir = os.path.join(project_dir, 'Program') # this is where we will create a local clone of the Git repository
# if not os.path.exists(program_dir):
# os.makedirs(program_dir)
dbDir = os.path.join(data_dir,dbName)
# connect to and create the project geodatabase
conn = sqlite3.connect(dbDir, timeout=30.0)
c = conn.cursor()
# mandatory project tables
c.execute('''DROP TABLE IF EXISTS tblMasterReceiver''') # receiver ID, receiver type
c.execute('''DROP TABLE IF EXISTS tblMasterTag''') # tag ID, frequency, freqcode
c.execute('''DROP TABLE IF EXISTS tblReceiverParameters''') # field crews fuck up, we need these parameters to correctly quantify detection history
c.execute('''DROP TABLE IF EXISTS tblAlgParams''')
c.execute('''DROP TABLE IF EXISTS tblNodes''')
c.execute('''CREATE TABLE tblMasterReceiver(recID TEXT, Name TEXT, RecType TEXT, Node TEXT)''')
c.execute('''CREATE TABLE tblReceiverParameters(recID TEXT, RecType TEXT, ScanTime REAL, Channels INTEGER, fileName TEXT)''')
c.execute('''CREATE TABLE tblMasterTag(FreqCode TEXT, PIT_ID TEXT, PulseRate REAL, MortRate REAL, CapLoc TEXT, RelLoc TEXT, TagType TEXT, Length INTEGER, Sex TEXT, RelDate TIMESTAMP, Study TEXT, Species TEXT)''')
c.execute('''CREATE TABLE tblAlgParams(det INTEGER, duration INTEGER)''')
c.execute('''CREATE TABLE tblNodes(Node TEXT, Reach TEXT, RecType TEXT, X INTEGER, Y INTEGER)''')
''' note these top three tables are mandatory, depending upon how many receivers
we train and/or use for a study we may not need all of these tables, or we may
need more. This must be addressed in future iterations, can we keep adding empty
tables at the onset of the project???'''
c.execute('''DROP TABLE IF EXISTS tblRaw''')
c.execute('''DROP TABLE IF EXISTS tblTrain''')
c.execute('''CREATE TABLE tblTrain(Channels INTEGER, Detection INTEGER, FreqCode TEXT, Power REAL, lag INTEGER, lagDiff REAL, FishCount INTEGER, conRecLength INTEGER, miss_to_hit REAL, consDet INTEGER, detHist TEXT, hitRatio REAL, noiseRatio REAL, seriesHit INTEGER, timeStamp TIMESTAMP, Epoch INTEGER, Seconds INTEGER, fileName TEXT, recID TEXT, recType TEXT, ScanTime REAL)''') # create full radio table - table includes all records, final version will be designed for specific receiver types
c.execute('''CREATE TABLE tblRaw(timeStamp TIMESTAMP, Epoch INTEGER, FreqCode TEXT, Power REAL,noiseRatio, fileName TEXT, recID TEXT, ScanTime REAL, Channels REAL, RecType TEXT)''')
#c.execute('''CREATE INDEX idx_fileNameRaw ON tblRaw (fileName)''')
c.execute('''CREATE INDEX idx_RecID_Raw ON tblRaw (recID)''')
c.execute('''CREATE INDEX idx_FreqCode On tblRaw (FreqCode)''')
#c.execute('''CREATE INDEX idx_fileNameTrain ON tblTrain (fileName)''')
c.execute('''CREATE INDEX idx_RecType ON tblTrain (recType)''')
conn.commit()
c.close()
def setAlgorithmParameters(det,duration,dbName):
'''Function sets parameters for predictor variables used in the naive bayes
classifier
det = number of detections to look forward and backward in times for detection
history strings
duration = moving window around each detection, used to calculate the noise
ratio and number of fish present (fish count)
'''
conn = sqlite3.connect(dbName, timeout=30.0)
c = conn.cursor()
params = [(det,duration)]
conn.executemany('INSERT INTO tblAlgParams VALUES (?,?)',params)
conn.commit()
conn.commit()
c.close()
def studyDataImport(dataFrame,dbName,tblName):
'''function imports formatted data into project database. The code in its current
function does not check for inconsistencies with data structures. If you're
shit isn't right, this isn't going to work for you. Make sure your table data
structures match exactly, that column names and datatypes match. I'm not your
mother, clean up your shit.
dataFrame = pandas dataframe imported from your structured file.
dbName = full directory path to project database
tblName = the name of the data you can import to. If you are brave, import to
tblRaw, but really this is meant for tblMasterTag and tblMasterReceiver'''
conn = sqlite3.connect(dbName)
c = conn.cursor()
dataFrame.to_sql(tblName,con = conn,index = False, if_exists = 'append')
conn.commit()
c.close()
def orionImport(fileName,rxfile,dbName,recName,switch = False, scanTime = None, channels = None, ant_to_rec_dict = None):
'''Function imports raw Sigma Eight orion data.
Text parser uses simple column fixed column widths.
'''
conn = sqlite3.connect(dbName, timeout=30.0)
c = conn.cursor()
study_tags = pd.read_sql('SELECT FreqCode, TagType FROM tblMasterTag',con = conn)
study_tags = study_tags[study_tags.TagType == 'Study'].FreqCode.values
recType = 'orion'
if ant_to_rec_dict != None:
scanTime = 1
channels = 1
# what orion firmware is it? the header row is the key
o_file =open(fileName, encoding='utf-8')
header = o_file.readline()[:-1] # read first line in file
columns = str.split(header)
o_file.close()
if 'Type' in columns:
# with our data row, extract information using pandas fwf import procedure
telemDat = pd.read_fwf(fileName,colspecs = [(0,12),(13,23),(24,30),(31,35),(36,45),(46,54),(55,60),(61,65)],
names = ['Date','Time','Site','Ant','Freq','Type','Code','Power'],
skiprows = 1,
dtype = {'Date':str,'Time':str,'Site':np.int32,'Ant':str,'Freq':str,'Type':str,'Code':str,'Power':np.float64})
telemDat = telemDat[telemDat.Type != 'STATUS']
telemDat.drop(['Type'], axis = 1, inplace = True)
else:
# with our data row, extract information using pandas fwf import procedure
telemDat = pd.read_fwf(fileName,colspecs = [(0,11),(11,20),(20,26),(26,30),(30,37),(37,42),(42,48)],
names = ['Date','Time','Site','Ant','Freq','Code','Power'],
skiprows = 1,
dtype = {'Date':str,'Time':str,'Site':str,'Ant':str,'Freq':str,'Code':str,'Power':str})
if len(telemDat) > 0:
telemDat['fileName'] = np.repeat(rxfile,len(telemDat)) #Note I'm going back here to the actual file name without the path. Is that OK? I prefer it, but it's a potential source of confusion
telemDat['FreqCode'] = telemDat['Freq'].astype(str) + ' ' + telemDat['Code'].astype(str)
telemDat['timeStamp'] = pd.to_datetime(telemDat['Date'] + ' ' + telemDat['Time'],errors = 'coerce')# create timestamp field from date and time and apply to index
telemDat['ScanTime'] = np.repeat(scanTime,len(telemDat))
telemDat['Channels'] = np.repeat(channels,len(telemDat))
telemDat['RecType'] = np.repeat('orion',len(telemDat))
telemDat = telemDat[telemDat.timeStamp.notnull()]
if len(telemDat) == 0:
print ("Invalid timestamps in raw data, cannot import")
else:
telemDat['Epoch'] = (telemDat['timeStamp'] - datetime.datetime(1970,1,1)).dt.total_seconds()
telemDat.drop (['Date','Time','Freq','Code','Site'],axis = 1, inplace = True)
telemDat = noiseRatio(5.0,telemDat,study_tags)
if ant_to_rec_dict == None:
telemDat.drop(['Ant'], axis = 1, inplace = True)
telemDat['recID'] = np.repeat(recName,len(telemDat))
tuples = zip(telemDat.FreqCode.values,telemDat.recID.values,telemDat.Epoch.values)
index = pd.MultiIndex.from_tuples(tuples, names=['FreqCode', 'recID','Epoch'])
telemDat.set_index(index,inplace = True,drop = False)
telemDat.to_sql('tblRaw',con = conn,index = False, if_exists = 'append')
# recParamLine = [(recName,recType,scanTime,channels,fileName)]
# conn.executemany('INSERT INTO tblReceiverParameters VALUES (?,?,?,?,?)',recParamLine)
conn.commit()
c.close()
else:
for i in ant_to_rec_dict:
site = ant_to_rec_dict[i]
telemDat_sub = telemDat[telemDat.Ant == str(i)]
telemDat_sub['recID'] = np.repeat(site,len(telemDat_sub))
tuples = zip(telemDat_sub.FreqCode.values,telemDat_sub.recID.values,telemDat_sub.Epoch.values)
index = pd.MultiIndex.from_tuples(tuples, names=['FreqCode', 'recID','Epoch'])
telemDat_sub.set_index(index,inplace = True,drop = False)
telemDat_sub.drop(['Ant'], axis = 1, inplace = True)
telemDat_sub.to_sql('tblRaw',con = conn,index = False, if_exists = 'append')
# recParamLine = [(site,recType,scanTime,channels,fileName)]
# conn.executemany('INSERT INTO tblReceiverParameters VALUES (?,?,?,?,?)',recParamLine)
conn.commit()
c.close()
def lotek_import(fileName,rxfile,dbName,recName,ant_to_rec_dict = None):
''' function imports raw lotek data, reads header data to find receiver parameters
and automatically locates raw telemetry data. Import procedure works with
standardized project database. Database must be created before function can be run'''
'''to do: in future iterations create a check for project | |
-f or --function.'
)
if kwargs is None:
kwargs = {}
path = kwargs.get('path', None)
data = kwargs.get('data', None)
if data:
if path:
log.warning(
'Both the \'data\' and \'path\' arguments were provided. '
'\'data\' will take precedence.'
)
elif path:
with salt.utils.files.fopen(path, mode='r') as rfh:
data = rfh.read()
else:
raise SaltCloudSystemExit(
'The secgroup_allocate function requires either \'data\' or a file '
'\'path\' to be provided.'
)
server, user, password = _get_xml_rpc()
auth = ':'.join([user, password])
response = server.one.secgroup.allocate(auth, data)
ret = {
'action': 'secgroup.allocate',
'allocated': response[0],
'secgroup_id': response[1],
'error_code': response[2],
}
return ret
def secgroup_clone(call=None, kwargs=None):
'''
Clones an existing security group.
.. versionadded:: 2016.3.0
name
The name of the new template.
secgroup_id
The ID of the security group to be cloned. Can be used instead of
``secgroup_name``.
secgroup_name
The name of the security group to be cloned. Can be used instead of
``secgroup_id``.
CLI Example:
.. code-block:: bash
salt-cloud -f secgroup_clone opennebula name=my-cloned-secgroup secgroup_id=0
salt-cloud -f secgroup_clone opennebula name=my-cloned-secgroup secgroup_name=my-secgroup
'''
if call != 'function':
raise SaltCloudSystemExit(
'The secgroup_clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
secgroup_id = kwargs.get('secgroup_id', None)
secgroup_name = kwargs.get('secgroup_name', None)
if name is None:
raise SaltCloudSystemExit(
'The secgroup_clone function requires a \'name\' to be provided.'
)
if secgroup_id:
if secgroup_name:
log.warning(
'Both the \'secgroup_id\' and \'secgroup_name\' arguments were provided. '
'\'secgroup_id\' will take precedence.'
)
elif secgroup_name:
secgroup_id = get_secgroup_id(kwargs={'name': secgroup_name})
else:
raise SaltCloudSystemExit(
'The secgroup_clone function requires either a \'secgroup_id\' or a '
'\'secgroup_name\' to be provided.'
)
server, user, password = _get_xml_rpc()
auth = ':'.join([user, password])
response = server.one.secgroup.clone(auth, int(secgroup_id), name)
data = {
'action': 'secgroup.clone',
'cloned': response[0],
'cloned_secgroup_id': response[1],
'cloned_secgroup_name': name,
'error_code': response[2],
}
return data
def secgroup_delete(call=None, kwargs=None):
'''
Deletes the given security group from OpenNebula. Either a name or a secgroup_id
must be supplied.
.. versionadded:: 2016.3.0
name
The name of the security group to delete. Can be used instead of
``secgroup_id``.
secgroup_id
The ID of the security group to delete. Can be used instead of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f secgroup_delete opennebula name=my-secgroup
salt-cloud --function secgroup_delete opennebula secgroup_id=100
'''
if call != 'function':
raise SaltCloudSystemExit(
'The secgroup_delete function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
secgroup_id = kwargs.get('secgroup_id', None)
if secgroup_id:
if name:
log.warning(
'Both the \'secgroup_id\' and \'name\' arguments were provided. '
'\'secgroup_id\' will take precedence.'
)
elif name:
secgroup_id = get_secgroup_id(kwargs={'name': name})
else:
raise SaltCloudSystemExit(
'The secgroup_delete function requires either a \'name\' or a '
'\'secgroup_id\' to be provided.'
)
server, user, password = _get_xml_rpc()
auth = ':'.join([user, password])
response = server.one.secgroup.delete(auth, int(secgroup_id))
data = {
'action': 'secgroup.delete',
'deleted': response[0],
'secgroup_id': response[1],
'error_code': response[2],
}
return data
def secgroup_info(call=None, kwargs=None):
'''
Retrieves information for the given security group. Either a name or a
secgroup_id must be supplied.
.. versionadded:: 2016.3.0
name
The name of the security group for which to gather information. Can be
used instead of ``secgroup_id``.
secgroup_id
The ID of the security group for which to gather information. Can be
used instead of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f secgroup_info opennebula name=my-secgroup
salt-cloud --function secgroup_info opennebula secgroup_id=5
'''
if call != 'function':
raise SaltCloudSystemExit(
'The secgroup_info function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
secgroup_id = kwargs.get('secgroup_id', None)
if secgroup_id:
if name:
log.warning(
'Both the \'secgroup_id\' and \'name\' arguments were provided. '
'\'secgroup_id\' will take precedence.'
)
elif name:
secgroup_id = get_secgroup_id(kwargs={'name': name})
else:
raise SaltCloudSystemExit(
'The secgroup_info function requires either a name or a secgroup_id '
'to be provided.'
)
server, user, password = _get_xml_rpc()
auth = ':'.join([user, password])
info = {}
response = server.one.secgroup.info(auth, int(secgroup_id))[1]
tree = _get_xml(response)
info[tree.find('NAME').text] = _xml_to_dict(tree)
return info
def secgroup_update(call=None, kwargs=None):
'''
Replaces the security group template contents.
.. versionadded:: 2016.3.0
secgroup_id
The ID of the security group to update. Can be used instead of
``secgroup_name``.
secgroup_name
The name of the security group to update. Can be used instead of
``secgroup_id``.
path
The path to a file containing the template of the security group. Syntax
within the file can be the usual attribute=value or XML. Can be used instead
of ``data``.
data
The template data of the security group. Syntax can be the usual attribute=value
or XML. Can be used instead of ``path``.
update_type
There are two ways to update a security group: ``replace`` the whole template
or ``merge`` the new template with the existing one.
CLI Example:
.. code-block:: bash
salt-cloud --function secgroup_update opennebula secgroup_id=100 \\
path=/path/to/secgroup_update_file.txt \\
update_type=replace
salt-cloud -f secgroup_update opennebula secgroup_name=my-secgroup update_type=merge \\
data="Name = test RULE = [PROTOCOL = TCP, RULE_TYPE = inbound, RANGE = 1000:2000]"
'''
if call != 'function':
raise SaltCloudSystemExit(
'The secgroup_allocate function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
secgroup_id = kwargs.get('secgroup_id', None)
secgroup_name = kwargs.get('secgroup_name', None)
path = kwargs.get('path', None)
data = kwargs.get('data', None)
update_type = kwargs.get('update_type', None)
update_args = ['replace', 'merge']
if update_type is None:
raise SaltCloudSystemExit(
'The secgroup_update function requires an \'update_type\' to be provided.'
)
if update_type == update_args[0]:
update_number = 0
elif update_type == update_args[1]:
update_number = 1
else:
raise SaltCloudSystemExit(
'The update_type argument must be either {0} or {1}.'.format(
update_args[0],
update_args[1]
)
)
if secgroup_id:
if secgroup_name:
log.warning(
'Both the \'secgroup_id\' and \'secgroup_name\' arguments were provided. '
'\'secgroup_id\' will take precedence.'
)
elif secgroup_name:
secgroup_id = get_secgroup_id(kwargs={'name': secgroup_name})
else:
raise SaltCloudSystemExit(
'The secgroup_update function requires either a \'secgroup_id\' or a '
'\'secgroup_name\' to be provided.'
)
if data:
if path:
log.warning(
'Both the \'data\' and \'path\' arguments were provided. '
'\'data\' will take precedence.'
)
elif path:
with salt.utils.files.fopen(path, mode='r') as rfh:
data = rfh.read()
else:
raise SaltCloudSystemExit(
'The secgroup_update function requires either \'data\' or a file \'path\' '
'to be provided.'
)
server, user, password = _get_xml_rpc()
auth = ':'.join([user, password])
response = server.one.secgroup.update(auth, int(secgroup_id), data, int(update_number))
ret = {
'action': 'secgroup.update',
'updated': response[0],
'secgroup_id': response[1],
'error_code': response[2],
}
return ret
def template_allocate(call=None, kwargs=None):
'''
Allocates a new template in OpenNebula.
.. versionadded:: 2016.3.0
path
The path to a file containing the elements of the template to be allocated.
Syntax within the file can be the usual attribute=value or XML. Can be used
instead of ``data``.
data
Contains the elements of the template to be allocated. Syntax can be the usual
attribute=value or XML. Can be used instead of ``path``.
CLI Example:
.. code-block:: bash
salt-cloud -f template_allocate opennebula path=/path/to/template_file.txt
salt-cloud -f template_allocate opennebula \\
data='CPU="1.0" DISK=[IMAGE="Ubuntu-14.04"] GRAPHICS=[LISTEN="0.0.0.0",TYPE="vnc"] \\
MEMORY="1024" NETWORK="yes" NIC=[NETWORK="192net",NETWORK_UNAME="oneadmin"] \\
OS=[ARCH="x86_64"] SUNSTONE_CAPACITY_SELECT="YES" SUNSTONE_NETWORK_SELECT="YES" \\
VCPU="1"'
'''
if call != 'function':
raise SaltCloudSystemExit(
'The template_allocate function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
path = kwargs.get('path', None)
data = kwargs.get('data', None)
if data:
if path:
log.warning(
'Both the \'data\' and \'path\' arguments were provided. '
'\'data\' will take precedence.'
)
elif path:
with salt.utils.files.fopen(path, mode='r') as rfh:
data = rfh.read()
else:
raise SaltCloudSystemExit(
'The template_allocate function requires either \'data\' or a file '
'\'path\' to be provided.'
)
server, user, password = _get_xml_rpc()
auth = ':'.join([user, password])
response = server.one.template.allocate(auth, data)
ret = {
'action': 'template.allocate',
'allocated': response[0],
'template_id': response[1],
'error_code': response[2],
}
return ret
def template_clone(call=None, kwargs=None):
'''
Clones an existing virtual machine template.
.. versionadded:: 2016.3.0
name
The name of the new template.
template_id
The ID of the template to be cloned. Can be used instead of ``template_name``.
template_name
The name of the template to be cloned. Can be used instead of ``template_id``.
CLI Example:
.. code-block:: bash
salt-cloud -f template_clone opennebula name=my-new-template template_id=0
salt-cloud -f template_clone opennebula name=my-new-template template_name=my-template
'''
if call != 'function':
raise SaltCloudSystemExit(
'The template_clone function must be called with -f or | |
channel = self.make_request(
"POST", "/createRoom", b'{"visibility":"private"}'
)
self.render(request)
self.assertEquals(200, channel.code)
self.assertTrue("room_id" in channel.json_body)
def test_post_room_custom_key(self):
# POST with custom config keys, expect new room id
request, channel = self.make_request(
"POST", "/createRoom", b'{"custom":"stuff"}'
)
self.render(request)
self.assertEquals(200, channel.code)
self.assertTrue("room_id" in channel.json_body)
def test_post_room_known_and_unknown_keys(self):
# POST with custom + known config keys, expect new room id
request, channel = self.make_request(
"POST", "/createRoom", b'{"visibility":"private","custom":"things"}'
)
self.render(request)
self.assertEquals(200, channel.code)
self.assertTrue("room_id" in channel.json_body)
def test_post_room_invalid_content(self):
# POST with invalid content / paths, expect 400
request, channel = self.make_request("POST", "/createRoom", b'{"visibili')
self.render(request)
self.assertEquals(400, channel.code)
request, channel = self.make_request("POST", "/createRoom", b'["hello"]')
self.render(request)
self.assertEquals(400, channel.code)
def test_post_room_invitees_invalid_mxid(self):
# POST with invalid invitee, see https://github.com/matrix-org/synapse/issues/4088
# Note the trailing space in the MXID here!
request, channel = self.make_request(
"POST", "/createRoom", b'{"invite":["@alice:example.com "]}'
)
self.render(request)
self.assertEquals(400, channel.code)
class RoomTopicTestCase(RoomBase):
""" Tests /rooms/$room_id/topic REST events. """
user_id = "@sid1:red"
def prepare(self, reactor, clock, hs):
# create the room
self.room_id = self.helper.create_room_as(self.user_id)
self.path = "/rooms/%s/state/m.room.topic" % (self.room_id,)
def test_invalid_puts(self):
# missing keys or invalid json
request, channel = self.make_request("PUT", self.path, "{}")
self.render(request)
self.assertEquals(400, channel.code, msg=channel.result["body"])
request, channel = self.make_request("PUT", self.path, '{"_name":"bo"}')
self.render(request)
self.assertEquals(400, channel.code, msg=channel.result["body"])
request, channel = self.make_request("PUT", self.path, '{"nao')
self.render(request)
self.assertEquals(400, channel.code, msg=channel.result["body"])
request, channel = self.make_request(
"PUT", self.path, '[{"_name":"bo"},{"_name":"jill"}]'
)
self.render(request)
self.assertEquals(400, channel.code, msg=channel.result["body"])
request, channel = self.make_request("PUT", self.path, "text only")
self.render(request)
self.assertEquals(400, channel.code, msg=channel.result["body"])
request, channel = self.make_request("PUT", self.path, "")
self.render(request)
self.assertEquals(400, channel.code, msg=channel.result["body"])
# valid key, wrong type
content = '{"topic":["Topic name"]}'
request, channel = self.make_request("PUT", self.path, content)
self.render(request)
self.assertEquals(400, channel.code, msg=channel.result["body"])
def test_rooms_topic(self):
# nothing should be there
request, channel = self.make_request("GET", self.path)
self.render(request)
self.assertEquals(404, channel.code, msg=channel.result["body"])
# valid put
content = '{"topic":"Topic name"}'
request, channel = self.make_request("PUT", self.path, content)
self.render(request)
self.assertEquals(200, channel.code, msg=channel.result["body"])
# valid get
request, channel = self.make_request("GET", self.path)
self.render(request)
self.assertEquals(200, channel.code, msg=channel.result["body"])
self.assert_dict(json.loads(content), channel.json_body)
def test_rooms_topic_with_extra_keys(self):
# valid put with extra keys
content = '{"topic":"Seasons","subtopic":"Summer"}'
request, channel = self.make_request("PUT", self.path, content)
self.render(request)
self.assertEquals(200, channel.code, msg=channel.result["body"])
# valid get
request, channel = self.make_request("GET", self.path)
self.render(request)
self.assertEquals(200, channel.code, msg=channel.result["body"])
self.assert_dict(json.loads(content), channel.json_body)
class RoomMemberStateTestCase(RoomBase):
""" Tests /rooms/$room_id/members/$user_id/state REST events. """
user_id = "@sid1:red"
def prepare(self, reactor, clock, hs):
self.room_id = self.helper.create_room_as(self.user_id)
def test_invalid_puts(self):
path = "/rooms/%s/state/m.room.member/%s" % (self.room_id, self.user_id)
# missing keys or invalid json
request, channel = self.make_request("PUT", path, "{}")
self.render(request)
self.assertEquals(400, channel.code, msg=channel.result["body"])
request, channel = self.make_request("PUT", path, '{"_name":"bo"}')
self.render(request)
self.assertEquals(400, channel.code, msg=channel.result["body"])
request, channel = self.make_request("PUT", path, '{"nao')
self.render(request)
self.assertEquals(400, channel.code, msg=channel.result["body"])
request, channel = self.make_request(
"PUT", path, b'[{"_name":"bo"},{"_name":"jill"}]'
)
self.render(request)
self.assertEquals(400, channel.code, msg=channel.result["body"])
request, channel = self.make_request("PUT", path, "text only")
self.render(request)
self.assertEquals(400, channel.code, msg=channel.result["body"])
request, channel = self.make_request("PUT", path, "")
self.render(request)
self.assertEquals(400, channel.code, msg=channel.result["body"])
# valid keys, wrong types
content = '{"membership":["%s","%s","%s"]}' % (
Membership.INVITE,
Membership.JOIN,
Membership.LEAVE,
)
request, channel = self.make_request("PUT", path, content.encode("ascii"))
self.render(request)
self.assertEquals(400, channel.code, msg=channel.result["body"])
def test_rooms_members_self(self):
path = "/rooms/%s/state/m.room.member/%s" % (
urlparse.quote(self.room_id),
self.user_id,
)
# valid join message (NOOP since we made the room)
content = '{"membership":"%s"}' % Membership.JOIN
request, channel = self.make_request("PUT", path, content.encode("ascii"))
self.render(request)
self.assertEquals(200, channel.code, msg=channel.result["body"])
request, channel = self.make_request("GET", path, None)
self.render(request)
self.assertEquals(200, channel.code, msg=channel.result["body"])
expected_response = {"membership": Membership.JOIN}
self.assertEquals(expected_response, channel.json_body)
def test_rooms_members_other(self):
self.other_id = "@zzsid1:red"
path = "/rooms/%s/state/m.room.member/%s" % (
urlparse.quote(self.room_id),
self.other_id,
)
# valid invite message
content = '{"membership":"%s"}' % Membership.INVITE
request, channel = self.make_request("PUT", path, content)
self.render(request)
self.assertEquals(200, channel.code, msg=channel.result["body"])
request, channel = self.make_request("GET", path, None)
self.render(request)
self.assertEquals(200, channel.code, msg=channel.result["body"])
self.assertEquals(json.loads(content), channel.json_body)
def test_rooms_members_other_custom_keys(self):
self.other_id = "@<PASSWORD>"
path = "/rooms/%s/state/m.room.member/%s" % (
urlparse.quote(self.room_id),
self.other_id,
)
# valid invite message with custom key
content = '{"membership":"%s","invite_text":"%s"}' % (
Membership.INVITE,
"Join us!",
)
request, channel = self.make_request("PUT", path, content)
self.render(request)
self.assertEquals(200, channel.code, msg=channel.result["body"])
request, channel = self.make_request("GET", path, None)
self.render(request)
self.assertEquals(200, channel.code, msg=channel.result["body"])
self.assertEquals(json.loads(content), channel.json_body)
class RoomJoinRatelimitTestCase(RoomBase):
user_id = "@sid1:red"
servlets = [
profile.register_servlets,
room.register_servlets,
]
@unittest.override_config(
{"rc_joins": {"local": {"per_second": 0.5, "burst_count": 3}}}
)
def test_join_local_ratelimit(self):
"""Tests that local joins are actually rate-limited."""
for i in range(3):
self.helper.create_room_as(self.user_id)
self.helper.create_room_as(self.user_id, expect_code=429)
@unittest.override_config(
{"rc_joins": {"local": {"per_second": 0.5, "burst_count": 3}}}
)
def test_join_local_ratelimit_profile_change(self):
"""Tests that sending a profile update into all of the user's joined rooms isn't
rate-limited by the rate-limiter on joins."""
# Create and join as many rooms as the rate-limiting config allows in a second.
room_ids = [
self.helper.create_room_as(self.user_id),
self.helper.create_room_as(self.user_id),
self.helper.create_room_as(self.user_id),
]
# Let some time for the rate-limiter to forget about our multi-join.
self.reactor.advance(2)
# Add one to make sure we're joined to more rooms than the config allows us to
# join in a second.
room_ids.append(self.helper.create_room_as(self.user_id))
# Create a profile for the user, since it hasn't been done on registration.
store = self.hs.get_datastore()
self.get_success(
store.create_profile(UserID.from_string(self.user_id).localpart)
)
# Update the display name for the user.
path = "/_matrix/client/r0/profile/%s/displayname" % self.user_id
request, channel = self.make_request("PUT", path, {"displayname": "<NAME>"})
self.render(request)
self.assertEquals(channel.code, 200, channel.json_body)
# Check that all the rooms have been sent a profile update into.
for room_id in room_ids:
path = "/_matrix/client/r0/rooms/%s/state/m.room.member/%s" % (
room_id,
self.user_id,
)
request, channel = self.make_request("GET", path)
self.render(request)
self.assertEquals(channel.code, 200)
self.assertIn("displayname", channel.json_body)
self.assertEquals(channel.json_body["displayname"], "<NAME>")
@unittest.override_config(
{"rc_joins": {"local": {"per_second": 0.5, "burst_count": 3}}}
)
def test_join_local_ratelimit_idempotent(self):
"""Tests that the room join endpoints remain idempotent despite rate-limiting
on room joins."""
room_id = self.helper.create_room_as(self.user_id)
# Let's test both paths to be sure.
paths_to_test = [
"/_matrix/client/r0/rooms/%s/join",
"/_matrix/client/r0/join/%s",
]
for path in paths_to_test:
# Make sure we send more requests than the rate-limiting config would allow
# if all of these requests ended up joining the user to a room.
for i in range(4):
request, channel = self.make_request("POST", path % room_id, {})
self.render(request)
self.assertEquals(channel.code, 200)
class RoomMessagesTestCase(RoomBase):
""" Tests /rooms/$room_id/messages/$user_id/$msg_id REST events. """
user_id = "@sid1:red"
def prepare(self, reactor, clock, hs):
self.room_id = self.helper.create_room_as(self.user_id)
def test_invalid_puts(self):
path = "/rooms/%s/send/m.room.message/mid1" % (urlparse.quote(self.room_id))
# missing keys or invalid json
request, channel = self.make_request("PUT", path, b"{}")
self.render(request)
self.assertEquals(400, channel.code, msg=channel.result["body"])
request, channel = self.make_request("PUT", path, b'{"_name":"bo"}')
self.render(request)
self.assertEquals(400, channel.code, msg=channel.result["body"])
request, channel = self.make_request("PUT", path, b'{"nao')
self.render(request)
self.assertEquals(400, channel.code, msg=channel.result["body"])
request, channel = self.make_request(
"PUT", path, b'[{"_name":"bo"},{"_name":"jill"}]'
)
self.render(request)
self.assertEquals(400, channel.code, msg=channel.result["body"])
request, channel = self.make_request("PUT", path, b"text only")
self.render(request)
self.assertEquals(400, channel.code, msg=channel.result["body"])
request, channel = self.make_request("PUT", path, b"")
self.render(request)
self.assertEquals(400, channel.code, msg=channel.result["body"])
def test_rooms_messages_sent(self):
path = "/rooms/%s/send/m.room.message/mid1" % (urlparse.quote(self.room_id))
content = b'{"body":"test","msgtype":{"type":"a"}}'
request, channel = self.make_request("PUT", path, content)
self.render(request)
self.assertEquals(400, channel.code, msg=channel.result["body"])
# custom message types
content = b'{"body":"test","msgtype":"test.custom.text"}'
request, channel = self.make_request("PUT", path, content)
self.render(request)
self.assertEquals(200, channel.code, msg=channel.result["body"])
# m.text message type
path = "/rooms/%s/send/m.room.message/mid2" % (urlparse.quote(self.room_id))
content = b'{"body":"test2","msgtype":"m.text"}'
request, channel = self.make_request("PUT", path, content)
self.render(request)
self.assertEquals(200, channel.code, msg=channel.result["body"])
class RoomInitialSyncTestCase(RoomBase):
""" Tests /rooms/$room_id/initialSync. """
user_id = "@sid1:red"
def prepare(self, reactor, clock, hs):
# create the room
self.room_id = self.helper.create_room_as(self.user_id)
def test_initial_sync(self):
request, channel = self.make_request(
"GET", "/rooms/%s/initialSync" % self.room_id
)
self.render(request)
self.assertEquals(200, channel.code)
self.assertEquals(self.room_id, channel.json_body["room_id"])
self.assertEquals("join", channel.json_body["membership"])
# Room state is easier to assert on if we unpack it into a dict
state = {}
for event in channel.json_body["state"]:
if "state_key" not in event:
continue
t = event["type"]
if t not in state:
state[t] = []
state[t].append(event)
self.assertTrue("m.room.create" in state)
self.assertTrue("messages" in channel.json_body)
self.assertTrue("chunk" in channel.json_body["messages"])
self.assertTrue("end" in channel.json_body["messages"])
self.assertTrue("presence" in channel.json_body)
presence_by_user = {
e["content"]["user_id"]: e for e in channel.json_body["presence"]
}
self.assertTrue(self.user_id in presence_by_user)
self.assertEquals("m.presence", presence_by_user[self.user_id]["type"])
class RoomMessageListTestCase(RoomBase):
""" Tests /rooms/$room_id/messages REST events. """
user_id = "@sid1:red"
def prepare(self, reactor, clock, hs):
self.room_id = self.helper.create_room_as(self.user_id)
def test_topo_token_is_accepted(self):
token = "t1-0_0_0_0_0_0_0_0_0"
request, channel = self.make_request(
"GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token)
)
self.render(request)
self.assertEquals(200, channel.code)
self.assertTrue("start" in channel.json_body)
self.assertEquals(token, channel.json_body["start"])
self.assertTrue("chunk" in channel.json_body)
self.assertTrue("end" in channel.json_body)
def test_stream_token_is_accepted_for_fwd_pagianation(self):
token = "s0_0_0_0_0_0_0_0_0"
request, channel = self.make_request(
"GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token)
)
self.render(request)
self.assertEquals(200, channel.code)
self.assertTrue("start" in channel.json_body)
self.assertEquals(token, channel.json_body["start"])
self.assertTrue("chunk" in channel.json_body)
self.assertTrue("end" in channel.json_body)
def test_room_messages_purge(self):
store = self.hs.get_datastore()
pagination_handler = self.hs.get_pagination_handler()
# Send a first message in the room, which will be removed by the purge.
first_event_id = self.helper.send(self.room_id, "message 1")["event_id"]
first_token = self.get_success(
store.get_topological_token_for_event(first_event_id)
)
# Send a second message in the room, which won't be removed, and which we'll
# use as the marker to purge events before.
second_event_id = self.helper.send(self.room_id, "message 2")["event_id"]
second_token = | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
URLs1
http\:\/\/[a-z]{3,4}\.[a-z]{3,4}
http\:\/\/[a-z]{5,19}\.com\/
[a-z]{4,5}\:\/\/[a-z]{3}\.[a-z]{6,19}\.com
http\:\/\/www\.[a-z]{6,19}\.com\/
http\:\/\/www\.[a-z]{6,19}\.co\.uk\/
** With s replacing space and backslashes removed, spaced in groups:
111111111 222 33333333333 4 5555555555 6 777 8 99 10
http :// [a-z]{3,4} . [a-z]{3,4}
http :// [a-z]{5,19} . com /
[a-z]{4,5} :// [a-z]{3} . [a-z]{6,19} . com
http :// www . [a-z]{6,19} . com /
http :// www . [a-z]{6,19} . co . uk /
--> :// is very special in col 2
111111111 222 33333333333 4 5555555555 6 777 8 99 10
http :// [a-z]{3,4} . [a-z]{3,4}
http :// [a-z]{5,19} . com /
[a-z]{4,5} :// [a-z]{3} . [a-z]{6,19} . com
http :// www . [a-z]{6,19} . com /
http :// www . [a-z]{6,19} . co . uk /
--> /, when present is always last in RH group
111111111 222 33333333333 4 5555555555 6 777 8 99 10
http :// [a-z]{3,4} . [a-z]{3,4}
http :// [a-z]{5,19} . com /
[a-z]{4,5} :// [a-z]{3} . [a-z]{6,19} . com
http :// www . [a-z]{6,19} . com /
http :// www . [a-z]{6,19} . co . uk /
Goal:
111111111 222 33333333333 4 5555555555 6 777 8 99 10
http :// [a-z]{3,4} . [a-z]{3,4}
http :// [a-z]{5,19} . com /
[a-z]{4,5} :// [a-z]{3} . [a-z]{6,19} . com
http :// www . [a-z]{6,19} . com /
http :// www . [a-z]{6,19} . co . uk /
URLs 1 + 2:
[a-z]{3,4}\.[a-z]{2,4}
[a-z]{5,19}\.com\/
[a-z]{3,4}[\.\/\:]{1,3}[a-z]{3,19}\.[a-z]{3,4}
http\:\/\/[a-z]{5,19}\.com\/
[a-z]{4,5}\:\/\/[a-z]{3}\.[a-z]{6,19}\.com
http\:\/\/www\.[a-z]{6,19}\.com\/
http\:\/\/www\.[a-z]{6,19}\.co\.uk\/
** With s replacing space and backslashes removed, spaced in groups:
11111111111 2222222222 33333333333 4 55555555555 6 777 8 99 10
[a-z]{3,4} . [a-z]{2,4}
[a-z]{5,19} . com /
[a-z]{3,4} [./:]{1,3} [a-z]{3,19} . [a-z]{3,4}
http :// [a-z]{5,19} . com /
[a-z]{4,5} :// [a-z]{3} . [a-z]{6,19} . com
http :// www . [a-z]{6,19} . com /
http :// www . [a-z]{6,19} . co . uk /
Goal:
11111111111 2222222222 33333333333 4 55555555555 6 777 8 99 10
[a-z]{3,4} . [a-z]{2,4}
[a-z]{5,19} . com /
[a-z]{3,4} [./:]{1,3} [a-z]{3,19} . [a-z]{3,4}
http :// [a-z]{5,19} . com /
[a-z]{4,5} :// [a-z]{3} . [a-z]{6,19} . com
http :// www . [a-z]{6,19} . com /
http :// www . [a-z]{6,19} . co . uk /
TELEPHONES 2
\(\d{3,4}\)\ \d{3,4}\ \d{4}
\+\d{1,2}\ \d{2,3}\ \d{3,4}\ \d{4}
** With s replacing space and backslashes removed, spaced in groups:
1 2222222 3 4444444 5555555 666666 77777 8888
( d{3,4} ) s d{3,4} s d{4}
+ d{1,2} s d{2,3} s d{3,4} s d{4}
--> Space at pos -2
1 2222222 3 4444444 5555555 666666 77777 8888
( d{3,4} ) s d{3,4} s d{4}
+ d{1,2} s d{2,3} s d{3,4} s d{4}
--> Space at pos -2 within left group
1 2222222 3 4444444 5555555 666666 77777 8888
( d{3,4} ) s d{3,4} s d{4}
+ d{1,2} s d{2,3} s d{3,4} s d{4}
Goal
1 2222222 3 4444444 5555555 666666 77777 8 9999
( d{3,4} ) s d{3,4} s d{4}
+ d{1,2} s d{2,3} s d{3,4} s d{4}
TELEPHONES 5
\d{3}\ \d{3}\ \d{4}
\d{3}\-\d{3}\-\d{4}
1\ \d{3}\ \d{3}\ \d{4}
\(\d{3}\)\ \d{3}\ \d{4}
** With s replacing space and backslashes removed, spaced in groups:
1111 2222 3333 4 5555 6 7777
d{3} s d{3} s d{4}
d{3} - d{3} - d{4}
1 s d{3} s d{3} s d{4}
( d{3} ) s d{3} s d{4}
--> Last group is always 4 digits
1111 2222 3333 4 5555 6 7777
d{3} s d{3} s d{4}
d{3} - d{3} - d{4}
1 s d{3} s d{3} s d{4}
( d{3} ) s d{3} s d{4}
--> Group -2 with left part is always 3 digits
1111 2222 3333 4 5555 6 7777
d{3} s d{3} s d{4}
d{3} - d{3} - d{4}
1 s d{3} s d{3} s d{4}
( d{3} ) s d{3} s d{4}
--> Last of left of left is always space or hyphen
1111 2222 3333 4 5555 6 7777
d{3} s d{3} s d{4}
d{3} - d{3} - d{4}
1 s d{3} s d{3} s d{4}
( d{3} ) s d{3} s d{4}
--> Within left three, there is always a block of 3 digits
1 2 3333 4 5 6666 7 8888
d{3} s d{3} s d{4}
d{3} - d{3} - d{4}
1 s d{3} s d{3} s d{4}
( d{3} ) s d{3} s d{4}
Goal:
1 2222 3 4444 5 6666 7 8888
d{3} s d{3} s d{4}
d{3} - d{3} - d{4}
1 s d{3} s d{3} s d{4}
( d{3} ) s d{3} s d{4}
TELS 1-5
\d{3}\ \d{3}\ \d{4}
\d{3,4}[\-\.]\d{3}[\-\.]\d{4}
1\ \d{3}\ \d{3}\ \d{4}
\(\d{3,4}\)\ \d{3,4}\ \d{4}
\+\d{1,2}\ \d{2,3}\ \d{3,4}\ \d{4}
** With s replacing space and backslashes removed, spaced in groups:
111111 222222 3333 444444 555555 666666 7777 8888
d{3} s d{3} s d{4}
d{3,4} [-.] d{3} [-.] d{4}
1 s d{3} s d{3} s d{4}
( d{3,4} ) s d{3,4} s d{4}
+ d{1,2} s d{2,3} s d{3,4} s d{4}
Goal:
1 222222 3333 444444 555555 6666 777777 8 9999
d{3} s d{3} s d{4}
d{3,4} [-.] d{3} [-.] d{4}
1 s d{3} s d{3} s d{4}
( d{3,4} ) s d{3,4} s d{4}
+ d{1,2} s d{2,3} s d{3,4} s d{4}
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import re
import sys
import unittest
try:
import pandas
except:
pandas = None
pd = pandas
#from artists.miro.writabletestcase import WritableTestCase
from tdda.rexpy import *
class TestUtilityFunctions(unittest.TestCase):
def test_signature(self):
self.assertEqual(signature([]), '')
self.assertEqual(signature([('c', 1)]), 'c')
self.assertEqual(signature([('C', 3), ('.', 1),
('C', 2), ('.', 1),
('C', 2)]), 'C.C.C')
self.assertEqual(signature([('C', 8), ('.', 1),
('C', 4), ('.', 1),
('C', 4), ('.', 1),
('C', 4), ('.', 1),
('C', 12)]), 'C.C.C.C.C')
self.assertEqual(signature([('.', 1),
('C', 4), ('.', 1), (' ', 1),
('C', 3), (' ', 1),
('C', 4)]), '.C. C C')
self.assertEqual(signature([('C', 4), ('.', 1),
('C', 2), ('.', 1),
('C', 5), ('.', 1),
('C', 2), ('.', 1,),
('C', 2), (' ', 1), ('.', 1),
('C', 5)]), 'C.C.C.C.C .C')
self.assertEqual(signature([('C', 4), ('.', 1),
('C', 2), ('.', 1),
('C', 5), ('.', 1),
('C', 2), ('.', 1),
('C', 2), ('*', 1), ('.', 1),
('C', 5)]), 'C.C.C.C.C*.C')
def test_get_omnipresent_at_pos(self):
c = {
('a', 1, 1, 'fixed'): {1: 7, -1: 7, 3: 4},
('b', 1, 1, 'fixed'): {2: 6, 3: 4},
('c', 1, 1): {3: 1, 4: 2, 2: 7},
('d', 1, 1, 'fixed'): {1: 1}
}
self.assertEqual(get_omnipresent_at_pos(c, 7),
[((u'a', 1, 1, u'fixed'), -1),
((u'a', 1, 1, u'fixed'), 1),
((u'c', 1, 1), 2)])
self.assertEqual(get_omnipresent_at_pos(c, 6),
[((u'b', 1, 1, u'fixed'), 2)])
self.assertEqual(get_omnipresent_at_pos(c, 5), [])
self.assertEqual(get_omnipresent_at_pos({}, 1), [])
self.assertEqual(get_omnipresent_at_pos({}, 0), [])
def test_length_stats(self):
# Testing with strings, but works with lists etc. too
self.assertEqual(length_stats(['abc', 'def', 'ghi']), (True, 3))
self.assertEqual(length_stats(['a', 'def', 'gh']), (False, 3))
self.assertEqual(length_stats([]), (True, 0))
def test_left_parts1(self):
# For this test, the fragments are always present in the positions
p1 = [('a',), ('b',), ('c',), ('d',), ('e',), ('f',)]
p2 = [('A',), ('b',), ('C',), ('d',), ('E',)]
p3 = [('.',), ('b',), ('.',), ('d',)]
fixed = [(('b',), 1), (('d',), 3)]
expected = [
[[('a',)], [('A',)], [('.',)]],
[[('b',)], [('b',)], [('b',)]],
[[('c',)], [('C',)], [('.',)]],
[[('d',)], [('d',)], [('d',)]],
[[('e',), ('f',)], [('E',)], []]
]
self.assertEqual(left_parts([p1, p2, p3], fixed), expected)
def test_left_parts2(self):
# For this test, the fragments are always present in the positions
p1 = [('a',), ('b',), ('c',), ('d',), ('e',), ('f',)]
p2 = [('a',), ('B',), ('C',), ('d',), ('E',)]
p3 = [('a',), ('b',), ('c',), ('d',)]
fixed = [(('a',), 0), (('c',), 3)]
expected = [
[[('a',)], [('a',)], [('a',)]],
[[('b',), ('c',)], [('B',), ('C',)], [('b',), ('c',)]],
[[('d',)], [('d',)], [('d',)]],
[[('e',), ('f',)], [('E',)], []]
]
self.assertEqual(left_parts([p1, p2, p3], fixed), expected)
def test_right_parts1(self):
p1 = [('F',), ('e',), ('d',), ('c',), ('b',), ('a',)]
p2 = [('E',), ('d',), ('C',), ('b',), ('A',)]
p3 = [('d',), ('.',), ('b',), ('.',)]
fixed = [(('b',), 2), (('d',), 4)]
expected = [
[[('F',), ('e',)], [('E',)], []],
[[('d',)], [('d',)], [('d',)]],
[[('c',)], [('C',)], [('.',)]],
[[('b',)], [('b',)], [('b',)]],
[[('a',)], [('A',)], [('.',)]],
]
self.assertEqual(right_parts([p1, p2, p3], fixed), expected)
def test_right_parts2(self):
p1 = [('F',), ('e',), ('d',), ('c',), ('b',), ('a',)]
p2 = [('E',), ('d',), ('C',), ('b',), ('a',)]
p3 = [('d',), ('.',), ('.',), ('a',)]
fixed = [(('a',), 1), (('d',), 4)]
expected = [
[[('F',), ('e',)], [('E',)], []],
[[('d',)], [('d',)], [('d',)]],
| |
import json
import logging
from typing import Iterator, Tuple, List
from api import ontology
from api.dsp import DataSubmissionPortal
from api.ingest import IngestAPI
from archiver.accessioner import Accessioner
from archiver.converter import ConversionError, SampleConverter, ProjectConverter, \
SequencingExperimentConverter, SequencingRunConverter, StudyConverter
from archiver.ingest_tracker import IngestTracker
from archiver.submission import ArchiveEntityMap, ArchiveEntity, ArchiveSubmission
from utils import protocols
from utils.graph import Graph
def _print_same_line(string):
print(f'\r{string}', end='')
class ArchiverException(Exception):
"""Base-class for all exceptions raised by this module."""
class Biomaterial:
def __init__(self, data, derived_by_process=None, derived_with_protocols=None,
derived_from_biomaterials: List[dict] = None):
self.data = data
self.derived_by_process = derived_by_process
self.derived_with_protocols = derived_with_protocols
self.derived_from_biomaterials = derived_from_biomaterials if derived_from_biomaterials else None
@classmethod
def from_uuid(cls, ingest_api, biomaterial_uuid):
data = ingest_api.get_biomaterial_by_uuid(biomaterial_uuid)
derived_by_processes_count = ingest_api.get_related_entity_count(data, 'derivedByProcesses', 'processes')
if derived_by_processes_count:
derived_by_processes_iter = ingest_api.get_related_entity(data, 'derivedByProcesses', 'processes')
derived_by_processes = list(derived_by_processes_iter)
derived_with_protocols = {}
derived_from_biomaterials = []
for derived_by_process in derived_by_processes:
protocols = ingest_api.get_related_entity(derived_by_process, 'protocols', 'protocols')
for protocol in protocols:
protocol_type = ingest_api.get_concrete_entity_type(protocol)
if not derived_with_protocols.get(protocol_type):
derived_with_protocols[protocol_type] = []
derived_with_protocols[protocol_type].append(protocol)
input_biomaterials_count = ingest_api.get_related_entity_count(derived_by_process, 'inputBiomaterials',
'biomaterials')
if not input_biomaterials_count:
raise ArchiverException('A biomaterial has been derived by a process with no input biomaterial')
input_biomaterials = ingest_api.get_related_entity(derived_by_process, 'inputBiomaterials', 'biomaterials')
derived_from_biomaterials.extend(list(input_biomaterials))
return cls(data, derived_by_process, derived_with_protocols, derived_from_biomaterials)
else:
return cls(data)
class Manifest:
def __init__(self, ingest_api: IngestAPI, manifest_id: str):
self.ingest_api = ingest_api
self.manifest_id = manifest_id
self.manifest = self.ingest_api.get_manifest_by_id(self.manifest_id)
self.project = None
self.biomaterials = None
self.files = None
self.assay_process = None
self.library_preparation_protocol = None
self.sequencing_protocol = None
self.input_biomaterial = None
def get_submission_uuid(self):
return self.manifest.get('envelopeUuid')
def get_project(self):
if not self.project:
project_uuid = list(self.manifest['fileProjectMap'])[0]
self.project = self.ingest_api.get_project_by_uuid(project_uuid)
return self.project
def get_biomaterials(self) -> Iterator['Biomaterial']:
if not self.biomaterials:
self.biomaterials = self._init_biomaterials()
return self.biomaterials
def get_assay_process(self):
if not self.assay_process:
self.assay_process = self._init_assay_process()
return self.assay_process
def get_library_preparation_protocol(self):
if not self.library_preparation_protocol:
self._init_protocols()
return self.library_preparation_protocol
def get_sequencing_protocol(self):
if not self.sequencing_protocol:
self._init_protocols()
return self.sequencing_protocol
def get_files(self):
if not self.files:
assay = self.get_assay_process()
self.files = self.ingest_api.get_related_entity(assay, 'derivedFiles', 'files')
return self.files
def get_input_biomaterial(self):
if not self.input_biomaterial:
self.input_biomaterial = self._init_input_biomaterial()
return self.input_biomaterial
def _init_biomaterials(self) -> Iterator['Biomaterial']:
for biomaterial_uuid in list(self.manifest['fileBiomaterialMap']):
yield Biomaterial.from_uuid(self.ingest_api, biomaterial_uuid)
def _init_assay_process(self):
file_uuid = list(self.manifest['fileFilesMap'])[0]
file = self.ingest_api.get_file_by_uuid(file_uuid)
derived_by_processes_count = self.ingest_api.get_related_entity_count(file, 'derivedByProcesses', 'processes')
if derived_by_processes_count:
if derived_by_processes_count > 1:
raise ArchiverException(f'Manifest {self.manifest_id} has many assay processes.')
derived_by_processes = self.ingest_api.get_related_entity(file, 'derivedByProcesses', 'processes')
return next(derived_by_processes)
return None
def _init_protocols(self):
assay = self.get_assay_process()
protocols = self.ingest_api.get_related_entity(assay, 'protocols', 'protocols')
protocol_by_type = {}
for protocol in protocols:
concrete_entity_type = self.ingest_api.get_concrete_entity_type(protocol)
if not protocol_by_type.get(concrete_entity_type):
protocol_by_type[concrete_entity_type] = []
protocol_by_type[concrete_entity_type].append(protocol)
library_preparation_protocols = protocol_by_type.get('library_preparation_protocol', [])
sequencing_protocols = protocol_by_type.get('sequencing_protocol', [])
if len(library_preparation_protocols) != 1:
raise ArchiverException('There should be 1 library preparation protocol for the assay process.')
if len(sequencing_protocols) != 1:
raise ArchiverException('There should be 1 sequencing_protocol for the assay process.')
self.library_preparation_protocol = library_preparation_protocols[0]
self.sequencing_protocol = sequencing_protocols[0]
def _init_input_biomaterial(self):
assay = self.get_assay_process()
input_biomaterials_count = self.ingest_api.get_related_entity_count(assay, 'inputBiomaterials', 'biomaterials')
if not input_biomaterials_count:
raise ArchiverException('No input biomaterial found to the assay process.')
input_biomaterials = self.ingest_api.get_related_entity(assay, 'inputBiomaterials', 'biomaterials')
# TODO get first for now, clarify if it's possible to have multiple and how to specify the links
return next(input_biomaterials)
class IngestArchiver:
def __init__(self, ingest_api: IngestAPI,
dsp_api: DataSubmissionPortal,
ontology_api=ontology.__api__,
exclude_types=None, alias_prefix=None, dsp_validation=True):
self.logger = logging.getLogger(__name__)
self.ingest_api = ingest_api
self.exclude_types = exclude_types if exclude_types else []
self.alias_prefix = f"{alias_prefix}_" if alias_prefix else ""
self.ontology_api = ontology_api
self.dsp_api = dsp_api
self.dsp_validation = dsp_validation
self.accessioner = Accessioner(self.ingest_api)
self.ingest_tracker = IngestTracker(ingest_api=self.ingest_api)
self.converter = {
"project": ProjectConverter(ontology_api=ontology_api),
"sample": SampleConverter(ontology_api=ontology_api),
"study": StudyConverter(ontology_api=ontology_api),
"sequencingRun": SequencingRunConverter(ontology_api=ontology_api),
"sequencingExperiment": SequencingExperimentConverter(ontology_api=ontology_api)
}
self.converter['sample'].ingest_api = self.ingest_api
def archive(self, entity_map: ArchiveEntityMap):
archive_submission, _ = self.archive_metadata(entity_map)
self.notify_file_archiver(archive_submission)
archive_submission.validate_and_submit()
return archive_submission
def archive_metadata(self, entity_map: ArchiveEntityMap) -> Tuple[ArchiveSubmission, IngestTracker]:
archive_submission = ArchiveSubmission(dsp_api=self.dsp_api)
archive_submission.entity_map = entity_map
converted_entities = entity_map.get_converted_entities()
if converted_entities:
archive_submission.converted_entities = converted_entities
archive_submission.submission = self.dsp_api.create_submission()
dsp_submission_url = archive_submission.get_url()
archive_submission.dsp_url = dsp_submission_url
archive_submission.dsp_uuid = dsp_submission_url.rsplit('/', 1)[-1]
output = f"DSP SUBMISSION: {dsp_submission_url}"
print(output)
self.logger.info(output)
ingest_tracker = self.ingest_tracker
ingest_tracker.create_archive_submission(archive_submission)
for entity in converted_entities:
archive_submission.add_entity(entity)
ingest_tracker.add_entity(entity)
else:
archive_submission.is_completed = True
archive_submission.add_error('ingest_archiver.archive_metadata.no_entities',
'No entities found to convert.')
ingest_tracker = IngestTracker(ingest_api=self.ingest_api)
ingest_tracker.create_archive_submission(archive_submission)
return archive_submission, ingest_tracker
return archive_submission, ingest_tracker
def complete_submission(self, dsp_submission_url, entity_map: ArchiveEntityMap = None):
archive_submission = ArchiveSubmission(dsp_api=self.dsp_api, dsp_submission_url=dsp_submission_url)
if entity_map:
archive_submission.entity_map = entity_map
archive_submission.converted_entities = list(archive_submission.entity_map.get_converted_entities())
if archive_submission.status == 'Draft':
archive_submission.validate_and_submit()
elif archive_submission.status == 'Completed':
archive_submission.is_completed = True
archive_submission.process_result()
self.ingest_tracker.update_entities(archive_submission.dsp_uuid, entity_map)
self.accessioner.accession_entities(archive_submission.entity_map)
self.ingest_tracker.set_submission_as_archived(archive_submission)
return archive_submission
def get_manifest(self, manifest_id):
return Manifest(ingest_api=self.ingest_api, manifest_id=manifest_id)
def convert(self, manifests) -> ArchiveEntityMap:
entity_map = ArchiveEntityMap()
idx = 0
for manifest_url in manifests:
idx = idx + 1
manifest_id = manifest_url.rsplit('/', 1)[-1]
print(f'\n* PROCESSING MANIFEST {idx}/{len(manifests)}: {manifest_id}')
manifest = self.get_manifest(manifest_id)
entities = self._convert(manifest)
entity_map.add_entities(entities)
return entity_map
def _convert(self, manifest: Manifest):
aggregator = ArchiveEntityAggregator(manifest, self.ingest_api, alias_prefix=self.alias_prefix)
entities = []
for archive_entity_type in ["project", "study", "sample", "sequencingExperiment", "sequencingRun"]:
print(f"Finding {archive_entity_type} entities in manifest...")
progress_ctr = 0
if self.exclude_types and archive_entity_type in self.exclude_types:
print(f"Skipping {archive_entity_type} entities in manifest...")
continue
for archive_entity in aggregator.get_archive_entities(archive_entity_type):
progress_ctr = progress_ctr + 1
_print_same_line(str(progress_ctr))
converter = self.converter[archive_entity_type]
if self.dsp_validation:
current_version = self.dsp_api.get_current_version(archive_entity.archive_entity_type,
archive_entity.id)
if current_version and current_version.get('accession'):
archive_entity.accession = current_version.get('accession')
msg = f'This alias has already been submitted to DSP, accession: {archive_entity.accession}.'
archive_entity.add_error('ingest_archiver.convert.entity_already_in_dsp_and_has_accession', msg,
{
"current_version": current_version["_links"]["self"]["href"]
})
elif current_version and not current_version.get('accession'):
msg = f'This alias has already been submitted to DSP, but still has no accession.'
archive_entity.add_error('ingest_archiver.convert.entity_already_in_dsp', msg, {
"current_version": current_version["_links"]["self"]["href"]
})
elif Accessioner.is_metadata_accessioned(archive_entity):
msg = f'Metadata already have an accession'
archive_entity.add_error('ingest_archiver.convert.entity_has_accession', msg, {
"current_version": current_version["_links"]["self"]["href"]
})
if not archive_entity.errors:
try:
archive_entity.conversion = converter.convert(archive_entity.data)
archive_entity.conversion['alias'] = archive_entity.id
archive_entity.conversion.update(archive_entity.links)
except ConversionError as e:
msg = f'An error occured converting data to a {archive_entity_type}: {str(e)}.'
archive_entity.add_error('ingest_archiver.convert.error', msg, {
'data': json.dumps(archive_entity.data),
'error': str(e)
})
entities.append(archive_entity)
print("")
return entities
# TODO save notification to file for now, should be sending to rabbit mq in the future
def notify_file_archiver(self, archive_submission: ArchiveSubmission) -> []:
messages = []
# TODO a bit redundant with converter, refactor this
for entity in archive_submission.converted_entities:
if entity.archive_entity_type == 'sequencingRun':
data = entity.data
files = []
for file in data.get('files'):
obj = {
# required fields
"name": file['content']['file_core']['file_name'],
"read_index": file['content']['read_index'],
"cloud_url": file['cloudUrl']
}
files.append(obj)
message = {
"dsp_api_url": self.dsp_api.url,
"submission_url": archive_submission.get_url(),
"files": files,
"manifest_id": entity.manifest_id
}
manifest = self.ingest_api.get_manifest_by_id(entity.manifest_id)
if manifest.get('bundleUuid'):
message["dcp_bundle_uuid"] = manifest['bundleUuid']
if protocols.is_10x(self.ontology_api, data.get("library_preparation_protocol")):
file_name = data['manifest_id']
if "lane_index" in entity.data:
file_name = f"{file_name}_{entity.data.get('lane_index')}"
file_name = f"{file_name}.bam"
message["conversion"] = {}
message["conversion"]["output_name"] = file_name
message["conversion"]["inputs"] = files
message["conversion"]["schema"] = protocols.map_10x_bam_schema(self.ontology_api, data.get(
"library_preparation_protocol"))
message["files"] = [{"name": file_name}]
messages.append(message)
archive_submission.file_upload_info = messages
return messages
class ArchiveEntityAggregator:
def __init__(self, manifest: Manifest, ingest_api: IngestAPI, alias_prefix: str):
self.manifest = manifest
self.alias_prefix = alias_prefix
self.ingest_api = ingest_api
def _get_projects(self):
project = self.manifest.get_project()
if not project:
return []
archive_entity = ArchiveEntity()
archive_type = "project"
archive_entity.archive_entity_type = archive_type
archive_entity.id = self.generate_archive_entity_id(archive_type, project)
archive_entity.data = {"project": project}
archive_entity.metadata_uuids = [project['uuid']['uuid']]
archive_entity.accessioned_metadata_uuids = [project['uuid']['uuid']]
archive_entity.manifest_id = self.manifest.manifest_id
return [archive_entity]
def _get_studies(self):
project = self.manifest.get_project()
if not project:
return []
archive_entity = ArchiveEntity()
archive_entity.manifest_id = self.manifest.manifest_id
archive_type = "study"
archive_entity.archive_entity_type = archive_type
archive_entity.id = self.generate_archive_entity_id(archive_type, project)
archive_entity.data = {"project": project}
archive_entity.metadata_uuids = [project['uuid']['uuid']]
archive_entity.accessioned_metadata_uuids = [project['uuid']['uuid']]
archive_entity.links = {
"projectRef": {
"alias": self.generate_archive_entity_id('project', project)
}
}
return [archive_entity]
def _get_samples(self):
samples_map = {}
derived_from_graph = Graph()
project = self.manifest.get_project()
for biomaterial in self.manifest.get_biomaterials():
archive_entity = ArchiveEntity()
archive_entity.manifest_id = self.manifest.manifest_id
archive_type = "sample"
archive_entity.archive_entity_type = archive_type
archive_entity.id = self.generate_archive_entity_id(archive_type, biomaterial.data)
archive_entity.data = {
'biomaterial': biomaterial.data,
'project': project
}
archive_entity.metadata_uuids = [biomaterial.data['uuid']['uuid'], project['uuid']['uuid']]
archive_entity.accessioned_metadata_uuids = [biomaterial.data['uuid']['uuid']]
if biomaterial.derived_by_process:
# TODO protocols will be needed for samples conversion
# archive_entity.data.update(biomaterial.derived_with_protocols)
sample_links = []
for derived_from in biomaterial.derived_from_biomaterials:
derived_from_alias = self.generate_archive_entity_id('sample', derived_from)
derived_from_graph.add_edge(derived_from_alias, archive_entity.id)
sample_links.append({
'alias': derived_from_alias,
'relationshipNature': 'derived from'
})
links = {'sampleRelationships': sample_links}
archive_entity.links = links
samples_map[archive_entity.id] = archive_entity
sorted_samples = derived_from_graph.topological_sort()
priority_samples = [samples_map.get(sample) for sample in sorted_samples if samples_map.get(sample)]
orphan_samples = [samples_map.get(sample) for sample in samples_map.keys() if sample not in priority_samples]
return priority_samples + orphan_samples
def _get_sequencing_experiments(self):
process = self.manifest.get_assay_process()
if not process:
return []
input_biomaterial = self.manifest.get_input_biomaterial()
archive_entity = ArchiveEntity()
archive_entity.manifest_id = self.manifest.manifest_id
archive_type = "sequencingExperiment"
archive_entity.archive_entity_type = archive_type
archive_entity.id = self.generate_archive_entity_id(archive_type, process)
lib_prep_protocol = self.manifest.get_library_preparation_protocol()
seq_protocol = self.manifest.get_sequencing_protocol()
archive_entity.data = {
'process': process,
'library_preparation_protocol': lib_prep_protocol,
'sequencing_protocol': seq_protocol,
'input_biomaterial': input_biomaterial
}
archive_entity.metadata_uuids = [
lib_prep_protocol['uuid']['uuid'],
seq_protocol['uuid']['uuid'],
input_biomaterial['uuid']['uuid'],
process['uuid']['uuid'],
]
archive_entity.accessioned_metadata_uuids = [process['uuid']['uuid']]
links = {}
links['studyRef'] = {
"alias": self.generate_archive_entity_id('study', self.manifest.get_project())
}
links['sampleUses'] = []
sample_ref = {
'sampleRef': {
"alias": self.generate_archive_entity_id('sample', input_biomaterial)
}
}
links['sampleUses'].append(sample_ref)
archive_entity.links = links
return [archive_entity]
def _get_sequencing_runs(self):
process = | |
None
if areas is not None:
if len(areas) > 0:
target_row_index = 0
if kktix_area_auto_select_mode == CONST_FROM_TOP_TO_BOTTOM:
pass
if kktix_area_auto_select_mode == CONST_FROM_BOTTOM_TO_TOP:
target_row_index = len(areas)-1
if kktix_area_auto_select_mode == CONST_RANDOM:
target_row_index = random.randint(0,len(areas)-1)
#print("target_row_index", target_row_index)
area = areas[target_row_index]
if area is not None:
try:
#print("area text", area.text)
ticket_price_input = None
try:
wait = WebDriverWait(area, 1)
ticket_price_input = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, "input[type='text']")))
if ticket_price_input is not None:
if ticket_price_input.is_enabled():
current_ticket_number = str(ticket_price_input.get_attribute('value'))
if current_ticket_number == "0":
try:
#print("asssign ticket number:%s" % str(ticket_number))
ticket_price_input.clear()
ticket_price_input.send_keys(ticket_number)
# for //www.google.com/recaptcha/api.js?hl=en&render=explicit check
#time.sleep(0.4)
ret = True
except Exception as exc:
print("asssign ticket number to ticket-price field Exception:")
print(exc)
ticket_price_input.clear()
ticket_price_input.send_keys("1")
# for //www.google.com/recaptcha/api.js?hl=en&render=explicit check
#time.sleep(0.4)
ret = True
pass
else:
# assigned
if str(ticket_number) == current_ticket_number:
ret = True
else:
print("find input, but not is enabled!")
else:
print("find input div fail!")
except Exception as exc:
print("find input tag for price Exception")
#print(exc)
pass
except Exception as exc:
print("auto fill ticket number fail")
print(exc)
pass
return ret
def kktix_get_web_datetime(url, registrationsNewApp_div):
web_datetime = None
el_web_datetime = None
is_found_web_datetime = False
try:
if not registrationsNewApp_div is None:
el_web_datetime_list = registrationsNewApp_div.find_elements(By.TAG_NAME, 'td')
if el_web_datetime_list is not None:
el_web_datetime_list_count = len(el_web_datetime_list)
if el_web_datetime_list_count > 0:
for el_web_datetime in el_web_datetime_list:
try:
el_web_datetime_text = el_web_datetime.text
#print("el_web_datetime_text:", el_web_datetime_text)
now = datetime.now()
#print("now:", now)
for guess_year in range(now.year,now.year+3):
current_year = str(guess_year)
if current_year in el_web_datetime_text:
if u'/' in el_web_datetime_text:
web_datetime = el_web_datetime_text
is_found_web_datetime = True
break
if is_found_web_datetime:
break
except Exception as exc:
#print(exc)
pass
else:
print("find td.ng-binding fail")
except Exception as exc:
#print("find td.ng-binding Exception")
pass
#print("is_found_web_datetime", is_found_web_datetime)
return web_datetime
def kktix_check_agree_checkbox():
is_need_refresh = False
is_finish_checkbox_click = False
person_agree_terms_checkbox = None
try:
person_agree_terms_checkbox = driver.find_element(By.ID, 'person_agree_terms')
if person_agree_terms_checkbox is not None:
if person_agree_terms_checkbox.is_enabled():
#print("find person_agree_terms checkbox")
if not person_agree_terms_checkbox.is_selected():
#print('send check to checkbox')
person_agree_terms_checkbox.click()
is_finish_checkbox_click = True
else:
#print('checked')
is_finish_checkbox_click = True
pass
else:
is_need_refresh = True
else:
is_need_refresh = True
print("find person_agree_terms checkbox fail")
except Exception as exc:
print("find person_agree_terms checkbox Exception")
pass
return is_need_refresh, is_finish_checkbox_click
def kktix_check_register_status(url):
#ex: https://xxx.kktix.cc/events/xxx
prefix_list = ['.com/events/','.cc/events/']
postfix = '/registrations/new'
is_match_event_code = False
event_code = ""
for prefix in prefix_list:
event_code = find_between(url,prefix,postfix)
if len(event_code) > 0:
is_match_event_code = True
#print('event_code:',event_code)
break
html_result = None
if is_match_event_code:
url = "https://kktix.com/g/events/%s/register_info" % (event_code)
#print('event_code:',event_code)
#print("url:", url)
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'
headers = {"Accept-Language": "zh-TW,zh;q=0.5", 'User-Agent': user_agent}
try:
html_result = requests.get(url , headers=headers)
except Exception as exc:
print("send reg_info request fail:")
print(exc)
pass
registerStatus = None
if not html_result is None:
status_code = html_result.status_code
#print("status_code:",status_code)
if status_code == 200:
html_text = html_result.text
#print("html_text:", html_text)
try:
jsLoads = json.loads(html_text)
if 'inventory' in jsLoads:
if 'registerStatus' in jsLoads['inventory']:
registerStatus = jsLoads['inventory']['registerStatus']
except Exception as exc:
print("load reg_info json fail:")
print(exc)
pass
#print("registerStatus:", registerStatus)
return registerStatus
def kktix_reg_new_main(url, answer_index, registrationsNewApp_div, is_finish_checkbox_click):
#---------------------------
# part 2: ticket number
#---------------------------
is_assign_ticket_number = False
if auto_fill_ticket_number:
for retry_index in range(10):
is_assign_ticket_number = kktix_assign_ticket_number()
if is_assign_ticket_number:
break
#print('is_assign_ticket_number:', is_assign_ticket_number)
#---------------------------
# part 3: captcha
#---------------------------
# is captcha div appear
is_captcha_appear = False
is_captcha_appear_and_filled_password = False
# try to auto answer options.
answer_list = None
is_need_keep_symbol = False
my_answer_delimitor = ""
captcha_inner_div = None
try:
captcha_inner_div = driver.find_element(By.CSS_SELECTOR, '.custom-captcha-inner')
except Exception as exc:
#print(exc)
#print("find captcha_inner_div fail")
pass
if captcha_inner_div is not None:
captcha_text_div = None
try:
captcha_text_div = captcha_inner_div.find_element(By.TAG_NAME, "p")
except Exception as exc:
pass
print("find p tag(captcha_text_div) fail")
print(exc)
captcha_password_string = None
if captcha_text_div is not None:
is_captcha_appear = True
captcha_text_div_text = ""
try:
captcha_text_div_text = captcha_text_div.text
except Exception as exc:
pass
#captcha_text_div_text = u"請回答下列問題,請在下方空格輸入DELIGHT(請以半形輸入法作答,大小寫需要一模一樣)"
#captcha_text_div_text = u"請在下方空白處輸入引號內文字:「abc」"
#captcha_text_div_text = u"請在下方空白處輸入引號內文字:「0118eveconcert」(請以半形小寫作答。)"
# format text
keep_symbol_tmp = captcha_text_div_text
keep_symbol_tmp = keep_symbol_tmp.replace(u'也',u'須')
keep_symbol_tmp = keep_symbol_tmp.replace(u'必須',u'須')
keep_symbol_tmp = keep_symbol_tmp.replace(u'全都',u'都')
keep_symbol_tmp = keep_symbol_tmp.replace(u'全部都',u'都')
keep_symbol_tmp = keep_symbol_tmp.replace(u'一致',u'相同')
keep_symbol_tmp = keep_symbol_tmp.replace(u'一樣',u'相同')
keep_symbol_tmp = keep_symbol_tmp.replace(u'相等',u'相同')
if u'符號須都相同' in keep_symbol_tmp:
is_need_keep_symbol = True
if u'符號都相同' in keep_symbol_tmp:
is_need_keep_symbol = True
if u'符號須相同' in keep_symbol_tmp:
is_need_keep_symbol = True
# 請在下方空白處輸入引號內文字:
if captcha_password_string is None:
is_use_quota_message = False
if u"「" in captcha_text_div_text and u"」" in captcha_text_div_text:
if u'下' in captcha_text_div_text and u'空' in captcha_text_div_text and u'輸入' in captcha_text_div_text and u'引號' in captcha_text_div_text and u'字' in captcha_text_div_text:
is_use_quota_message = True
if u'半形' in captcha_text_div_text and u'輸入' in captcha_text_div_text and u'引號' in captcha_text_div_text and u'字' in captcha_text_div_text:
is_use_quota_message = True
#print("is_use_quota_message:" , is_use_quota_message)
if is_use_quota_message:
captcha_password_string = find_between(captcha_text_div_text, u"「", u"」")
#print("find captcha text:" , captcha_password_string)
if captcha_password_string is None:
is_use_quota_message = False
if u"【" in captcha_text_div_text and u"】" in captcha_text_div_text:
if u'下' in captcha_text_div_text and u'空' in captcha_text_div_text and u'輸入' in captcha_text_div_text and u'引號' in captcha_text_div_text and u'字' in captcha_text_div_text:
is_use_quota_message = True
if u'半形' in captcha_text_div_text and u'輸入' in captcha_text_div_text and u'引號' in captcha_text_div_text and u'字' in captcha_text_div_text:
is_use_quota_message = True
#print("is_use_quota_message:" , is_use_quota_message)
if is_use_quota_message:
captcha_password_string = find_between(captcha_text_div_text, u"【", u"】")
#print("find captcha text:" , captcha_password_string)
# 請回答下列問題,請在下方空格輸入DELIGHT(請以半形輸入法作答,大小寫需要一模一樣)
if captcha_password_string is None:
# clean stop word
tmp_text = captcha_text_div_text
tmp_text = tmp_text.replace(u'(',u'(')
tmp_text = tmp_text.replace(u')',u')')
tmp_text = tmp_text.replace(u':',u':')
tmp_text = tmp_text.replace(u'空白',u'空格')
tmp_text = tmp_text.replace(u'填入',u'輸入')
if u"空格" in tmp_text and u"輸入" in tmp_text:
if not u"(" in tmp_text:
tmp_text += u"("
captcha_password_string = find_between(tmp_text, u"輸入", u"(")
captcha_password_string = captcha_password_string.strip()
captcha_password_string = captcha_password_string.replace(u'「',u'')
captcha_password_string = captcha_password_string.replace(u'」',u'')
captcha_password_string = captcha_password_string.replace(u':',u'')
captcha_password_string = captcha_password_string.replace(u'引號',u'')
captcha_password_string = captcha_password_string.replace(u'內',u'')
captcha_password_string = captcha_password_string.replace(u'文字',u'')
#captcha_text_div_text = "請問下列哪張專輯為林俊傑出道專輯?(1A)飛行者(2B)礫行者(3C)樂行者(請以半形輸入法作答,大小寫需要一模一樣,範例:1A)"
#captcha_text_div_text = "以下哪個「不是」正確的林俊傑與其他藝人合唱的歌曲組合?(選項為歌名/合作藝人 ,請以半形輸入法作答選項,大小寫需要一模一樣,範例:jju) 選項: (jjz)I am alive/<NAME> (jjy)友人說/張懷秋 (jjx)豆漿油條/A-Sa蔡卓妍 (jjw)黑暗騎士/五月天阿信 (jjv)手心的薔薇/G.E.M鄧紫棋"
# for test.
#captcha_password_string = None
#captcha_text_div_text = u"以下哪個「不是」正確的林俊傑與其他藝人合唱的歌曲組合?(選項為歌名/合作藝人 ,請以半形輸入法作答選項,大小寫需要一模一樣,範例:jju) 選項: (jja)小酒窩/A-Sa蔡卓妍 (jjb)被風吹過的夏天/金莎 (jjc)友人說/張懷秋 (jjd)全面開戰/五月天阿信 (jje)小說/阿杜, (0118eveconcert)0118eveconcert"
#captcha_text_div_text = u"以下哪個「不是」正確的林俊傑與其他藝人合唱的歌曲組合?(選項為歌名/合作藝人 ,請以半形輸入法作答選項,大小寫需要一模一樣,範例:jju) 選項: (jja)小酒窩/A-Sa蔡卓妍 (jjb)被風吹過的夏天/金莎 (jjc)友人說/張懷秋 (jjd)全面開戰/五月天阿信 (jje)小說/阿杜"
#captcha_text_div_text = u"以下哪個「不是」正確的林俊傑與其他藝人合唱的歌曲組合?(選項為歌名/合作藝人 ,請以半形輸入法作答選項,大小寫需要一模一樣,範例:jju) 選項: (jja)小酒窩/A-Sa蔡卓妍 (jjb)被風吹過的夏天/金莎 (jjc)友人說/張懷秋 (jjd)全面開戰/五月天阿信 (jje)小說/阿杜"
#captcha_text_div_text = u"請問《龍的傳人2060》演唱會是以下哪位藝人的演出?(請以半形輸入法作答,大小寫需要一模一樣,範例:B2)A1.周杰倫 B2.林俊傑 C3.張學友 D4.王力宏 4:4"
# parse '演出日期'
is_need_parse_web_datetime = False
if u'半形數字' in captcha_text_div_text:
if u'演出日期' in captcha_text_div_text:
is_need_parse_web_datetime = True
if u'活動日期' in captcha_text_div_text:
is_need_parse_web_datetime = True
if u'表演日期' in captcha_text_div_text:
is_need_parse_web_datetime = True
if u'開始日期' in captcha_text_div_text:
is_need_parse_web_datetime = True
if u'演唱會日期' in captcha_text_div_text:
is_need_parse_web_datetime = True
if u'展覽日期' in captcha_text_div_text:
is_need_parse_web_datetime = True
if u'音樂會日期' in captcha_text_div_text:
is_need_parse_web_datetime = True
if u'the date of the show you purchased' in captcha_text_div_text:
is_need_parse_web_datetime = True
#print("is_need_parse_web_datetime:", is_need_parse_web_datetime)
if is_need_parse_web_datetime:
captcha_password_string = None
web_datetime = kktix_get_web_datetime(url, registrationsNewApp_div)
if not web_datetime is None:
tmp_text = captcha_text_div_text
# replace ex.
tmp_text = tmp_text.replace(u'例如',u'範例')
tmp_text = tmp_text.replace(u'如:',u'範例:')
tmp_text = tmp_text.replace(u'舉例',u'範例')
if not u'範例' in tmp_text:
tmp_text = tmp_text.replace(u'例',u'範例')
# important, maybe 例 & ex occurs at same time.
tmp_text = tmp_text.replace(u'ex:',u'範例:')
tmp_text = tmp_text.replace(u'輸入:',u'範例')
tmp_text = tmp_text.replace(u'輸入',u'範例')
#print("tmp_text", tmp_text)
my_datetime_foramted = None
if my_datetime_foramted is None:
if u'4位半形數字' in tmp_text:
my_datetime_foramted = "%m%d"
if my_datetime_foramted is None:
now = datetime.now()
for guess_year in range(now.year-1,now.year+3):
current_year = str(guess_year)
if current_year in tmp_text:
my_hint_index = tmp_text.find(current_year)
my_hint_anwser = tmp_text[my_hint_index:]
#print("my_hint_anwser:", my_hint_anwser)
# get after.
my_delimitor_symbol = u'範例'
if my_delimitor_symbol in my_hint_anwser:
my_delimitor_index = my_hint_anwser.find(my_delimitor_symbol)
my_hint_anwser = my_hint_anwser[my_delimitor_index+len(my_delimitor_symbol):]
#print("my_hint_anwser:", my_hint_anwser)
# get before.
my_delimitor_symbol = u','
if my_delimitor_symbol in my_hint_anwser:
my_delimitor_index = my_hint_anwser.find(my_delimitor_symbol)
my_hint_anwser = my_hint_anwser[:my_delimitor_index]
my_delimitor_symbol = u'。'
if my_delimitor_symbol in my_hint_anwser:
my_delimitor_index = my_hint_anwser.find(my_delimitor_symbol)
my_hint_anwser = my_hint_anwser[:my_delimitor_index]
# PS: space may not is delimitor...
my_delimitor_symbol = u' '
if my_delimitor_symbol in my_hint_anwser:
my_delimitor_index = my_hint_anwser.find(my_delimitor_symbol)
my_hint_anwser = my_hint_anwser[:my_delimitor_index]
my_anwser_formated = convert_string_to_pattern(my_hint_anwser, dynamic_length=False)
#print("my_hint_anwser:", my_hint_anwser)
#print(u"my_anwser_formated:", my_anwser_formated)
if my_anwser_formated == u"[\\d][\\d][\\d][\\d][\\d][\\d][\\d][\\d]":
my_datetime_foramted = "%Y%m%d"
if my_anwser_formated == u"[\\d][\\d][\\d][\\d]/[\\d][\\d]/[\\d][\\d]":
my_datetime_foramted = "%Y/%m/%d"
break
if not my_datetime_foramted is None:
my_delimitor_symbol = u' '
if my_delimitor_symbol in web_datetime:
web_datetime = web_datetime[:web_datetime.find(my_delimitor_symbol)]
date_time = datetime.strptime(web_datetime,u"%Y/%m/%d")
#print("date_time:", date_time)
ans = None
try:
ans = date_time.strftime(my_datetime_foramted)
except Exception as exc:
pass
captcha_password_string = ans
#print(u"my_anwser:", ans)
# parse '演出時間'
is_need_parse_web_time = False
if u'半形' in captcha_text_div_text:
if u'演出時間' in captcha_text_div_text:
is_need_parse_web_time = True
if u'表演時間' in captcha_text_div_text:
is_need_parse_web_time = True
if u'開始時間' in captcha_text_div_text:
is_need_parse_web_time = True
if u'演唱會時間' in captcha_text_div_text:
is_need_parse_web_time = True
if u'展覽時間' in captcha_text_div_text:
is_need_parse_web_time = True
if u'音樂會時間' in captcha_text_div_text:
is_need_parse_web_time = True
if u'the time of the show you purchased' in captcha_text_div_text:
is_need_parse_web_time = | |
from esprit import raw, models
import json, sys, time, os
from functools import reduce
class ScrollException(Exception):
pass
class ScrollInitialiseException(ScrollException):
pass
class ScrollTimeoutException(ScrollException):
pass
def bulk_load(conn, type, source_file, limit=None, max_content_length=100000000):
source_size = os.path.getsize(source_file)
with open(source_file, "r") as f:
if limit is None and source_size < max_content_length:
# if we aren't selecting a portion of the file, and the file is below the max content length, then
# we can just serve it directly
raw.raw_bulk(conn, f, type)
return -1
else:
count = 0
while True:
chunk = _make_next_chunk(f, max_content_length)
if chunk == "":
break
finished = False
if limit is not None:
newlines = chunk.count("\n")
records = newlines // 2
if count + records > limit:
max = (limit - count) * 2
lines = chunk.split("\n")
allowed = lines[:max]
chunk = "\n".join(allowed) + "\n"
count += max
finished = True
else:
count += records
resp = raw.raw_bulk(conn, chunk, type)
if resp.status_code != 200:
raise Exception("did not get expected response: " + str(resp.status_code) + " - " + resp.text)
if finished:
break
if limit is not None:
return count
else:
return -1
def make_bulk_chunk_files(source_file, out_file_prefix, max_content_length=100000000):
source_size = os.path.getsize(source_file)
with open(source_file, "r") as f:
if source_size < max_content_length:
return [source_file]
else:
filenames = []
count = 0
while True:
count += 1
chunk = _make_next_chunk(f, max_content_length)
if chunk == "":
break
filename = out_file_prefix + "." + str(count)
with open(filename, "w") as g:
g.write(chunk)
filenames.append(filename)
return filenames
def _make_next_chunk(f, max_content_length):
def is_command(line):
try:
command = json.loads(line)
except (json.JSONDecodeError, TypeError):
return False
keys = list(command.keys())
if len(keys) > 1:
return False
if "index" not in keys:
return False
subkeys = list(command["index"].keys())
for sk in subkeys:
if sk not in ["_id"]:
return False
return True
offset = f.tell()
chunk = f.read(max_content_length)
while True:
last_newline = chunk.rfind("\n")
tail = chunk[last_newline + 1:]
chunk = chunk[:last_newline]
if is_command(tail):
f.seek(offset + last_newline)
if chunk.startswith("\n"):
chunk = chunk[1:]
return chunk
else:
continue
def copy(source_conn, source_type, target_conn, target_type, limit=None, batch_size=1000, method="POST", q=None):
if q is None:
q = models.QueryBuilder.match_all()
batch = []
for r in iterate(source_conn, source_type, q, page_size=batch_size, limit=limit, method=method):
batch.append(r)
if len(batch) >= batch_size:
print("writing batch of", len(batch))
raw.bulk(target_conn, batch, type_=target_type)
batch = []
if len(batch) > 0:
print("writing batch of", len(batch))
raw.bulk(target_conn, batch, type_=target_type)
def scroll(conn, type, q=None, page_size=1000, limit=None, keepalive="10m", scan=False):
if q is not None:
q = q.copy()
if q is None:
q = {"query": {"match_all": {}}}
if "size" not in q:
q["size"] = page_size
resp = raw.initialise_scroll(conn, type, q, keepalive, scan)
if resp.status_code != 200:
# something went wrong initialising the scroll
raise ScrollInitialiseException("Unable to initialise scroll - could be your mappings are broken")
# otherwise, carry on
results, scroll_id = raw.unpack_scroll(resp)
total_results = raw.total_results(resp)
counter = 0
for r in results:
# apply the limit
if limit is not None and counter >= int(limit):
break
counter += 1
yield r
while True:
# apply the limit
if limit is not None and counter >= int(limit):
break
# if we consumed all the results we were expecting, we can just stop here
if counter >= total_results:
break
# get the next page and check that we haven't timed out
sresp = raw.scroll_next(conn, scroll_id, keepalive=keepalive)
if raw.scroll_timedout(sresp):
status = sresp.status_code
message = sresp.text
raise ScrollTimeoutException("Scroll timed out; {status} - {message}".format(status=status, message=message))
# if we didn't get any results back, this also means we're at the end
results = raw.unpack_result(sresp)
if len(results) == 0:
break
for r in results:
# apply the limit (again)
if limit is not None and counter >= int(limit):
break
counter += 1
yield r
def iterate(conn, type, q, page_size=1000, limit=None, method="POST"):
q = q.copy()
q["size"] = page_size
q["from"] = 0
if "sort" not in q:
q["sort"] = [{"_uid": {"order": "asc"}}]
counter = 0
while True:
# apply the limit
if limit is not None and counter >= int(limit):
break
res = raw.search(conn, type=type, query=q, method=method)
rs = raw.unpack_result(res)
if len(rs) == 0:
break
for r in rs:
# apply the limit (again)
if limit is not None and counter >= int(limit):
break
counter += 1
yield r
q["from"] += page_size
def dump(conn, type, q=None, page_size=1000, limit=None, method="POST",
out=None, out_template=None, out_batch_sizes=100000, out_rollover_callback=None,
transform=None,
es_bulk_format=True, idkey='id', es_bulk_fields=None):
q = q if q is not None else {"query": {"match_all": {}}}
filenames = []
n = 1
current_file = None
if out_template is not None:
current_file = out_template + "." + str(n)
filenames.append(current_file)
if out is None and current_file is not None:
out = open(current_file, "w")
else:
out = sys.stdout
count = 0
for record in iterate(conn, type, q, page_size=page_size, limit=limit, method=method):
if transform is not None:
record = transform(record)
if es_bulk_format:
kwargs = {}
if es_bulk_fields is None:
es_bulk_fields = ["_id", "_index", "_type"]
for key in es_bulk_fields:
if key == "_id":
kwargs["idkey"] = idkey
if key == "_index":
kwargs["index"] = conn.index
if key == "_type":
kwargs["type_"] = type
data = raw.to_bulk_single_rec(record, **kwargs)
else:
data = json.dumps(record) + "\n"
out.write(data)
if out_template is not None:
count += 1
if count > out_batch_sizes:
out.close()
if out_rollover_callback is not None:
out_rollover_callback(current_file)
count = 0
n += 1
current_file = out_template + "." + str(n)
filenames.append(current_file)
out = open(current_file, "w")
if out_template is not None:
out.close()
if out_rollover_callback is not None:
out_rollover_callback(current_file)
return filenames
def create_alias(conn, alias):
actions = raw.to_alias_actions(add=[{"alias": alias, "index": conn.index}])
print("Alias create reply: ", raw.post_alias(conn, actions).json())
def create_alias_index_type(conn, alias, t):
index = raw.type_to_index(conn, t)
actions = raw.to_alias_actions(add=[{"alias": alias, "index": index}])
print("Alias create reply: ", raw.post_alias(conn, actions).json())
def repoint_alias(old_conn, new_conn, alias):
actions = raw.to_alias_actions(add=[{"alias": alias, "index": new_conn.index}],
remove=[{"alias": alias, "index": old_conn.index}])
print("Alias re-point reply: ", raw.post_alias(new_conn, actions).json())
def repoint_alias_index_type(old_conn, new_conn, alias, t):
old_index = raw.type_to_index(old_conn, t)
new_index = raw.type_to_index(new_conn, t)
actions = raw.to_alias_actions(add=[{"alias": alias, "index": new_index}],
remove=[{"alias": alias, "index": old_index}])
print("Alias re-point reply: ", raw.post_alias(new_conn, actions).json())
def reindex(old_conn, new_conn, alias, types, new_mappings=None, new_version="0.90.13"):
"""
Re-index without search downtime by aliasing and duplicating the specified types from the existing index
:param old_conn: Connection to the existing index
:param new_conn: Connection to the new index (will create if it doesn't exist)
:param alias: Existing alias which is used to access the index. Will be changed to point to the new index.
:param types: List of types to copy across to the new index
:param new_mappings: New mappings to use, as a dictionary of {<type>: mapping}
:param new_version: The version of the new index (fixme: used for the mapping function)
"""
# Ensure the old index is available via alias, and the new one is not
if not new_conn.index_per_type:
if raw.alias_exists(new_conn, alias):
raise Exception("Alias incorrectly set - check you have the connections the right way around.")
elif not old_conn.index_per_type and not raw.alias_exists(old_conn, alias):
print("The specified alias {alias} does not exist for index {index}. Creating it.".format(alias=alias, index=old_conn.index))
create_alias(old_conn, alias)
else:
print("Alias OK")
# Create a new index with the new mapping
for t in types:
if new_conn.index_per_type:
if raw.alias_exists(new_conn, alias, t):
raise Exception("Alias incorrectly set - check you have the connections the right way around.")
elif old_conn.index_per_type and not raw.alias_exists(old_conn, alias, t):
print("The specified alias {alias} does not exist for index {index}. Creating it.".format(alias=alias,
index=old_conn.index))
create_alias_index_type(old_conn, alias, t)
r = raw.put_mapping(new_conn, type=t, mapping=new_mappings[t], make_index=True, es_version=new_version)
print("Creating ES Type+Mapping for {t}; status: {status_code}".format(t=t, status_code=r.status_code))
print("Mapping OK")
time.sleep(1)
# Copy the data from old index to new index. The index should be unchanging (and may not have .exact) so don't use
# keyword_subfield.
for t in types:
print("Copying type {t}".format(t=t))
copy(old_conn, t, new_conn, t)
print("Copy OK")
time.sleep(1)
if not old_conn.index_per_type:
# Switch alias to point to second index
repoint_alias(old_conn, new_conn, alias)
else:
for t in types:
repoint_alias_index_type(old_conn, new_conn, alias, t)
print("Reindex complete.")
def compare_index_counts(conns, types, q=None):
""" Compare two or more indexes by doc counts of given types. Returns True if all counts equal, False otherwise """
if q is not None:
q = q.copy()
if "size" not in q or q['size'] != 0:
q["size"] = 0
if q is None:
q = {"query": {"match_all": {}}, "size": 0}
| |
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package définissant les types d'autoquêtes.
Il ne contient que de la documentation des types de quêtes.
Chaque type de quête est définie dans un sous-package du nom du type.
Ce sous-package contient au minimum :
Un fichier __init__.py contenant la classe définissant le type de quête
Un fichier editeur.yml définissant des informations propres à l'édition.
Ces deux fichiers sont étudiés par la suite. Suivez cette documentation
pour ajouter un nouveau type de quête.
1. Création du répertoire contenant le type d'autoquêtes
C'est le plus simple. Créer, à la racine de ce package, un répertoire
qui définira un type de sous-quête. Donnez-lui un nom Python valide assez
proche du nom qui sera affiché en définitive. Si possible, faites tenir
ce nom en un seul mot, tout en minuscule, les espaces éventuels sont
remplacés par des signes soulignés (_). Pour des raisons d'encodage,
évitez également les accents au sein du nom de package.
2. Créer la classe AutoQuete
Il faut ensuite créer le fichier __init__.py qui doit contenir la classe
AutoQuete. Elle n'a pas à hériter d'une autre classe (le système se chargera
de l'héritage dynamiquement avec intelligence).
A. Les attributs de la classe AutoQuete
Les attributs de classe disponibles (devant être définies dans le corps de la classe) sont :
nom_type -- le nom du type d'autoquête (str). C'est le nom qui sera
affiché côté bâtisseur au moment de choisir ou consulter
le type d'une autoquête. Ce nom peut être celui du
sous-package créé précédemment ou être différent (inclure
des accents ou des espaces si vous le souhaitez).
parent -- le nom de la classe parente du type créé. Utilisez le
nom_type de la classe parente.
concrete -- défini si la classe est concrète (True) ou abstraite (False)
Une classe abstraite intermédiaire doit avoir cet attribut
à False. Pour simplifier, si une classe a cet attribut
à True, elle peut être instanciée et donc servira de type
d'autoquête. Sinon, elle ne pourra pas être instanciée
et ne sert dans votre hiérarchie qu'à définir des classes
de laquelle hériteront plusieurs types d'autoquêtes distincts.
Un exemple de classe abstraite (concrete = False) pourrait
être une classe QueteSurPNJ qui sera utilisé comme classe
parente des autoquêtes Coursier, Courrier, Executeur,
toutes trois concrètes (concrete = True).
Quant aux attributs d'instance (définis dans le constructeur), vous
pouvez en définir certains propres à votre type de quête. Sachez
également que ceux définis dans les classes parentes seront également
accessibles.
C. Méthode pouvant être redéfinies
Les méthodes pouvant être redéfinies sont :
__init__(self)
Constructeur de la classe. Vous pouvez y définir de nouveaux
attributs d'instance.
est_complete(self, personnage)
Méthode retournant True si l'autoquete a été complétée par
le personnage passé en paramètre.
D. Voir aussi
Vérifiez la documentation et la structure des classes que vous définissez
comme parent. Ceratines subtilités y sont indiqués, notamment la liste
des attributs que vous pouvez utiliser rien qu'en se basant sur ces
types. N'oubliez pas que les classes sont virtuellement liées par l'héritage,
donc de nouveaux attributs peuvent être définis à différents niveaux
de la hiérarchie. Assurez-vous de ne pas simplement regarder la classe
indiquée comme parent mais aussi son parent, ainsi de suite jusqu'en haut
de la hiérarchie.
3. Définir l'éditeur
Pour CHAQUE classe de la hiérarchie des types de quêtes, on trouve
généralement un fichier editeur.yml (dans le sous-package du type) définissant
les informations éditables dans l'éditeur d'autoquêtes.
Souvenez-vous qu'un type de Quete offre généralement des spécificités importantes. Par exemple, l'autoquête Coursier peut avoir besoin :
De savoir quel PNJ donne la quête
De connaître la "liste de courses" (la liste des objets demandés au joueur)
De savoir combien de temps attendre avant de reproposer la même autoquête au même joueur.
Ces informations doivent être entrées par le bâtisseur créant la quête
et doivent donc être éditables dans un éditeur. Le fichier editeur.yml
spécifie quels sont les informations configurables pour ce type de quête.
Là encore, une quête héritant implicitement d'une autre possède également
les informations édfitables pour le type parent.
La structure du fichier editeur.yml est simple :
Chaque information éditable se trouve dans un bloc à part. Les informations spécifiques à la donnée devant être modifiée se trouvent au-dessous. L'indentation permet de repérer la structure des blocs. Voici un exemple clair :
# Début du bloc YAML
- temps d'attente avant de reproposer la quête:
raccourci: t
attribut: tps_attente
type: entier
minimum: 1
aide: >
Entrez le |ent|nombre de jours IRL|ff| à attendre avant de proposer
de nouveau la même auto-quête au même personnage.
# Fin du bloc YAML
Dans l'ordre :
* La première ligne donne le nom affiché de la donnée à configurer.
Elle commence par un tiret suivi d'un espace et du nom de l'information.
Elle se termine par le signe ':' (car le détail de la configuration se
trouve au-dessous, légèrement indenté).
* On définit ensuite le raccourci de l'éditeur, une ou plusieurs lignes
contenues dans le titre
* On définit ensuite le nom de l'attribut qui sera modifié (ici, tps_attente)
* On définit ensuite le type d'information à modifier. Cela influence bien
entendu l'éditeur qui sera sélectionné pour éditer l'information. Ici,
le type choisi est entier (c'est un nombre entier que l'on attend) et
l'éditeur entier sera sélectionné (la liste des éditeurs possibles
est listée plus bas, ainsi que les paramètres qu'ils attendent).
* On définit ensuite la configuration de nom 'minimum'. Cette donnée
de configuration fait parti de celles qui sont attendues par le type
entier (là encore, voire plus bas).
* Les dernières lignes définies l'aide spécifiée pour éditer cette
information. C'est un message plus explicite pour le bâtisseur qui paramètre
l'information. Notez que la première ligne, 'aide: >', se termine par
le signe supérieur (>). Cela signifie que le texte qui suit (légèrement
indenté là encore) sera entièrement sélectionné, même si il se trouve sur
plusieurs lignes. Les sauts de lignes et l'indentation seront ignorés
donc le texte sera de nouveau formatté avant d'être affiché au bâtisseur.
A. Informations à préciser invariablement
Quelque soit le type d'information, il existe des informations qu'il faut
toujours préciser et certaines, optionnelles, qui ont cours pour tous les
types.
Informations obligatoires :
raccourci -- le raccourci à entrer pour se rendre dans l'éditeur-enfant
type -- le type d'information (voire les types disponibles plus bas)
aide -- le message d'aide à afficher pour le bâtisseur.
Informations optionnelles :
attribut -- le nom de l'attribut
Si il n'est pas précisé, la quête-même est prise comme
type édité. Ceci est par définition rare.
<NAME>
Le type d'information entier doit être précisé quand on souhaite modifier un nombre entier.
Informations obligatoires : aucune
Informations facultatives :
minimum -- le nombre minimum pouvant être précisé (aucune limite si absent)
maximum -- le nombre maximum pouvant être précisé | |
= "#7A81FF"
bs_color = "#FF7F7F"
u6_color = "#666666"
u7_color = "#888888"
u8_color = "#AAAAAA"
ug_color = "#CCCCCC"
bg_color = "#D783FF"
x_axis = ["Average"]
fig, ax = plt.subplots(figsize=(fig_w, fig_h))
idx_tot = 6
x_idx = np.arange(len(x_axis))
width = 1 / 2**(math.ceil(math.log2(idx_tot)))
iterval = width
# 8b - spm - bp
index = 0
throughput_list_bp_spm = tp_list[index][-1:]
idx = 1.5
ax.bar(x_idx + iterval * (idx - idx_tot / 2), throughput_list_bp_spm, width, hatch = None, alpha=0.99, color=bp_color, label="Binary Parallel")
# 8b - spm - bs
index = 1
throughput_list_bs_spm = tp_list[index][-1:]
idx += 1
ax.bar(x_idx + iterval * (idx - idx_tot / 2), throughput_list_bs_spm, width, hatch = None, alpha=0.99, color=bs_color, label="Binary Serial")
# 8b - wospm - ur - 32c
index = 2
throughput_list_u6_wspm = tp_list[index][-1:]
idx += 1
ax.bar(x_idx + iterval * (idx - idx_tot / 2), throughput_list_u6_wspm, width, hatch = None, alpha=0.99, color=u6_color, label="Unary-32c")
# 8b - wospm - ur - 64c
index = 3
throughput_list_u7_wspm = tp_list[index][-1:]
idx += 1
ax.bar(x_idx + iterval * (idx - idx_tot / 2), throughput_list_u7_wspm, width, hatch = None, alpha=0.99, color=u7_color, label="Unary-64c")
# 8b - wospm - ur - 128c
index = 4
throughput_list_u8_wspm = tp_list[index][-1:]
idx += 1
ax.bar(x_idx + iterval * (idx - idx_tot / 2), throughput_list_u8_wspm, width, hatch = None, alpha=0.99, color=u8_color, label="Unary-128c")
# 8b - wospm - ug - 256c
index = 5
throughput_list_ug_wspm = tp_list[index][-1:]
idx += 1
ax.bar(x_idx + iterval * (idx - idx_tot / 2), throughput_list_ug_wspm, width, hatch = None, alpha=0.99, color=ug_color, label="uGEMM-H")
ax.set_ylabel('Throughput\n(Frames/s)')
ax.set_xticks(x_idx)
ax.set_xticklabels(x_axis)
plt.xlim(x_idx[0]-0.5, x_idx[-1]+0.5)
plt.yscale("log")
_, top = plt.ylim()
locs, labels = plt.yticks()
if a == "eyeriss":
locs = locs[1:-1]
else:
locs = locs[1:]
ax.set_yticks(locs)
bottom, _ = plt.ylim()
if a == "eyeriss":
ax.set_ylim((bottom, top*60))
bottom, top = plt.ylim()
for x in x_idx:
ax.fill_betweenx([bottom, top/60], x1=x, x2=x+0.5, alpha=0.2, color=bg_color, linewidth=0)
ax.legend(loc="upper center", ncol=3, frameon=True)
else:
ax.set_ylim((bottom, top))
bottom, top = plt.ylim()
for x in x_idx:
ax.fill_betweenx([bottom, top], x1=x, x2=x+0.5, alpha=0.2, color=bg_color, linewidth=0)
y_label_list = []
for y in locs:
if y != 0:
y_label_list.append("{:1.0E}".format(abs(y)))
else:
y_label_list.append("0")
ax.set_yticklabels(y_label_list)
ax.minorticks_off()
fig.tight_layout()
plt.savefig('./outputs_fig/' + technode + '/Throughput_' + a_cap + ".pdf", bbox_inches='tight', dpi=my_dpi, pad_inches=0.02)
print("Throughput fig saved!\n")
# energy
my_dpi = 300
if a == "eyeriss":
fig_h = 1.3
else:
fig_h = 1
fig_w = 3.3115
bp_color = "#7A81FF"
bs_color = "#FF7F7F"
u6_color = "#666666"
u7_color = "#888888"
u8_color = "#AAAAAA"
ug_color = "#CCCCCC"
bg_color = "#D783FF"
x_axis = ["Average"]
fig, ax = plt.subplots(figsize=(fig_w, fig_h))
idx_tot = 6
x_idx = np.arange(len(x_axis))
width = 1 / 2**(math.ceil(math.log2(idx_tot)))
iterval = width
l_alpha = 0.8
# 8b - spm - bp
index = 0
dram_d_list = energy_list[index * 5 + 0][-1:]
sram_d_list = energy_list[index * 5 + 1][-1:]
sram_l_list = energy_list[index * 5 + 2][-1:]
sarr_d_list = energy_list[index * 5 + 3][-1:]
sarr_l_list = energy_list[index * 5 + 4][-1:]
sram_d_neg_list = [-x for x in sram_d_list]
sram_l_neg_list = [-x for x in sram_l_list]
idx = 1.5
ax.bar(x_idx + iterval * (idx - idx_tot / 2), sarr_d_list, width, hatch = None, alpha=0.99, color=bp_color, label='Binary Parallel')
ax.bar(x_idx + iterval * (idx - idx_tot / 2), sarr_l_list, width, bottom=sarr_d_list, hatch = None, alpha=l_alpha, color=bp_color)
ax.bar(x_idx + iterval * (idx - idx_tot / 2), sram_d_neg_list, width, hatch = None, alpha=0.99, color=bp_color)
ax.bar(x_idx + iterval * (idx - idx_tot / 2), sram_l_neg_list, width, bottom=sram_d_neg_list, hatch = None, alpha=l_alpha, color=bp_color)
# 8b - spm - bs
index = 1
dram_d_list = energy_list[index * 5 + 0][-1:]
sram_d_list = energy_list[index * 5 + 1][-1:]
sram_l_list = energy_list[index * 5 + 2][-1:]
sarr_d_list = energy_list[index * 5 + 3][-1:]
sarr_l_list = energy_list[index * 5 + 4][-1:]
sram_d_neg_list = [-x for x in sram_d_list]
sram_l_neg_list = [-x for x in sram_l_list]
idx += 1
ax.bar(x_idx + iterval * (idx - idx_tot / 2), sarr_d_list, width, hatch = None, alpha=0.99, color=bs_color, label='Binary Serial')
ax.bar(x_idx + iterval * (idx - idx_tot / 2), sarr_l_list, width, bottom=sarr_d_list, hatch = None, alpha=l_alpha, color=bs_color)
ax.bar(x_idx + iterval * (idx - idx_tot / 2), sram_d_neg_list, width, hatch = None, alpha=0.99, color=bs_color)
ax.bar(x_idx + iterval * (idx - idx_tot / 2), sram_l_neg_list, width, bottom=sram_d_neg_list, hatch = None, alpha=l_alpha, color=bs_color)
# 8b - wospm - ur - 32c
index = 2
dram_d_list = energy_list[index * 5 + 0][-1:]
sram_d_list = energy_list[index * 5 + 1][-1:]
sram_l_list = energy_list[index * 5 + 2][-1:]
sarr_d_list = energy_list[index * 5 + 3][-1:]
sarr_l_list = energy_list[index * 5 + 4][-1:]
sram_d_neg_list = [-x for x in sram_d_list]
sram_l_neg_list = [-x for x in sram_l_list]
idx += 1
ax.bar(x_idx + iterval * (idx - idx_tot / 2), sarr_d_list, width, hatch = None, alpha=0.99, color=u6_color, label='Unary-32c')
ax.bar(x_idx + iterval * (idx - idx_tot / 2), sarr_l_list, width, bottom=sarr_d_list, hatch = None, alpha=l_alpha, color=u6_color)
ax.bar(x_idx + iterval * (idx - idx_tot / 2), sram_d_neg_list, width, hatch = None, alpha=0.99, color=u6_color)
ax.bar(x_idx + iterval * (idx - idx_tot / 2), sram_l_neg_list, width, bottom=sram_d_neg_list, hatch = None, alpha=l_alpha, color=u6_color)
# 8b - wospm - ur - 64c
index = 3
dram_d_list = energy_list[index * 5 + 0][-1:]
sram_d_list = energy_list[index * 5 + 1][-1:]
sram_l_list = energy_list[index * 5 + 2][-1:]
sarr_d_list = energy_list[index * 5 + 3][-1:]
sarr_l_list = energy_list[index * 5 + 4][-1:]
sram_d_neg_list = [-x for x in sram_d_list]
sram_l_neg_list = [-x for x in sram_l_list]
idx += 1
ax.bar(x_idx + iterval * (idx - idx_tot / 2), sarr_d_list, width, hatch = None, alpha=0.99, color=u7_color, label='Unary-64c')
ax.bar(x_idx + iterval * (idx - idx_tot / 2), sarr_l_list, width, bottom=sarr_d_list, hatch = None, alpha=l_alpha, color=u7_color)
ax.bar(x_idx + iterval * (idx - idx_tot / 2), sram_d_neg_list, width, hatch = None, alpha=0.99, color=u7_color)
ax.bar(x_idx + iterval * (idx - idx_tot / 2), sram_l_neg_list, width, bottom=sram_d_neg_list, hatch = None, alpha=l_alpha, color=u7_color)
# 8b - wospm - ur - 128c
index = 4
dram_d_list = energy_list[index * 5 + 0][-1:]
sram_d_list = energy_list[index * 5 + 1][-1:]
sram_l_list = energy_list[index * 5 + 2][-1:]
sarr_d_list = energy_list[index * 5 + 3][-1:]
sarr_l_list = energy_list[index * 5 + 4][-1:]
sram_d_neg_list = [-x for x in sram_d_list]
sram_l_neg_list = [-x for x in sram_l_list]
idx += 1
ax.bar(x_idx + iterval * (idx - idx_tot / 2), sarr_d_list, width, hatch = None, alpha=0.99, color=u8_color, label='Unary-128c')
ax.bar(x_idx + iterval * (idx - idx_tot / 2), sarr_l_list, width, bottom=sarr_d_list, hatch = None, alpha=l_alpha, color=u8_color)
ax.bar(x_idx + iterval * (idx - idx_tot / 2), sram_d_neg_list, width, hatch = None, alpha=0.99, color=u8_color)
ax.bar(x_idx + iterval * (idx - idx_tot / 2), sram_l_neg_list, width, bottom=sram_d_neg_list, hatch = None, alpha=l_alpha, color=u8_color)
# 8b - wospm - ug - 256c
index = 5
dram_d_list = energy_list[index * 5 + 0][-1:]
sram_d_list = energy_list[index * 5 + 1][-1:]
sram_l_list = energy_list[index * 5 + 2][-1:]
sarr_d_list = energy_list[index * 5 + 3][-1:]
sarr_l_list = energy_list[index * 5 + 4][-1:]
sram_d_neg_list = [-x for x in sram_d_list]
sram_l_neg_list = [-x for x in sram_l_list]
idx += 1
ax.bar(x_idx + iterval * (idx - idx_tot / 2), sarr_d_list, width, hatch = None, alpha=0.99, color=ug_color, label='uGEMM-H')
ax.bar(x_idx + iterval * (idx - idx_tot / 2), sarr_l_list, width, bottom=sarr_d_list, hatch = None, alpha=l_alpha, color=ug_color)
ax.bar(x_idx + iterval * (idx - idx_tot / 2), sram_d_neg_list, width, hatch = None, alpha=0.99, color=ug_color)
ax.bar(x_idx + iterval * (idx - idx_tot / 2), sram_l_neg_list, width, bottom=sram_d_neg_list, hatch = None, alpha=l_alpha, color=ug_color)
ax.set_ylabel('SRAM-SA energy\n(uJ)')
ax.set_xticks(x_idx)
ax.set_xticklabels(x_axis)
plt.xlim(x_idx[0]-0.5, x_idx[-1]+0.5)
plt.yscale("symlog", linthresh=10000)
bottom, top = plt.ylim()
if a == "eyeriss":
ax.set_ylim((bottom, top*8000))
bottom, top = plt.ylim()
for x in x_idx:
ax.fill_betweenx([bottom, top/8000], x1=x, x2=x+0.5, alpha=0.2, color=bg_color, linewidth=0)
ax.legend(loc="upper center", ncol=3, frameon=True)
else:
ax.set_ylim((bottom, top))
bottom, top = plt.ylim()
for x in x_idx:
ax.fill_betweenx([bottom, top], x1=x, x2=x+0.5, alpha=0.2, color=bg_color, linewidth=0)
ax.axhline(y=0, color="k", linewidth = 0.1)
locs, labels = plt.yticks()
if a == "eyeriss":
locs = [-10000000, -100000, 0, 100000, 10000000]
else:
locs = [-1000000000, -10000000, -100000, 0, | |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Convert Monorail PB objects to API PB objects"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import datetime
import logging
import time
from six import string_types
from businesslogic import work_env
from framework import exceptions
from framework import framework_constants
from framework import framework_helpers
from framework import framework_views
from framework import permissions
from framework import timestr
from proto import api_pb2_v1
from proto import project_pb2
from proto import tracker_pb2
from services import project_svc
from tracker import field_helpers
from tracker import tracker_bizobj
from tracker import tracker_helpers
def convert_project(project, config, role, templates):
"""Convert Monorail Project PB to API ProjectWrapper PB."""
return api_pb2_v1.ProjectWrapper(
kind='monorail#project',
name=project.project_name,
externalId=project.project_name,
htmlLink='/p/%s/' % project.project_name,
summary=project.summary,
description=project.description,
role=role,
issuesConfig=convert_project_config(config, templates))
def convert_project_config(config, templates):
"""Convert Monorail ProjectIssueConfig PB to API ProjectIssueConfig PB."""
return api_pb2_v1.ProjectIssueConfig(
kind='monorail#projectIssueConfig',
restrictToKnown=config.restrict_to_known,
defaultColumns=config.default_col_spec.split(),
defaultSorting=config.default_sort_spec.split(),
statuses=[convert_status(s) for s in config.well_known_statuses],
labels=[convert_label(l) for l in config.well_known_labels],
prompts=[convert_template(t) for t in templates],
defaultPromptForMembers=config.default_template_for_developers,
defaultPromptForNonMembers=config.default_template_for_users)
def convert_status(status):
"""Convert Monorail StatusDef PB to API Status PB."""
return api_pb2_v1.Status(
status=status.status,
meansOpen=status.means_open,
description=status.status_docstring)
def convert_label(label):
"""Convert Monorail LabelDef PB to API Label PB."""
return api_pb2_v1.Label(
label=label.label,
description=label.label_docstring)
def convert_template(template):
"""Convert Monorail TemplateDef PB to API Prompt PB."""
return api_pb2_v1.Prompt(
name=template.name,
title=template.summary,
description=template.content,
titleMustBeEdited=template.summary_must_be_edited,
status=template.status,
labels=template.labels,
membersOnly=template.members_only,
defaultToMember=template.owner_defaults_to_member,
componentRequired=template.component_required)
def convert_person(user_id, cnxn, services, trap_exception=False):
"""Convert user id to API AtomPerson PB or None if user_id is None."""
if not user_id:
# convert_person should handle 'converting' optional user values,
# like issue.owner, where user_id may be None.
return None
if user_id == framework_constants.DELETED_USER_ID:
return api_pb2_v1.AtomPerson(
kind='monorail#issuePerson',
name=framework_constants.DELETED_USER_NAME)
try:
user = services.user.GetUser(cnxn, user_id)
except exceptions.NoSuchUserException as ex:
if trap_exception:
logging.warning(str(ex))
return None
else:
raise ex
days_ago = None
if user.last_visit_timestamp:
secs_ago = int(time.time()) - user.last_visit_timestamp
days_ago = secs_ago // framework_constants.SECS_PER_DAY
return api_pb2_v1.AtomPerson(
kind='monorail#issuePerson',
name=user.email,
htmlLink='https://%s/u/%d' % (framework_helpers.GetHostPort(), user_id),
last_visit_days_ago=days_ago,
email_bouncing=bool(user.email_bounce_timestamp),
vacation_message=user.vacation_message)
def convert_issue_ids(issue_ids, mar, services):
"""Convert global issue ids to API IssueRef PB."""
# missed issue ids are filtered out.
issues = services.issue.GetIssues(mar.cnxn, issue_ids)
result = []
for issue in issues:
issue_ref = api_pb2_v1.IssueRef(
issueId=issue.local_id,
projectId=issue.project_name,
kind='monorail#issueRef')
result.append(issue_ref)
return result
def convert_issueref_pbs(issueref_pbs, mar, services):
"""Convert API IssueRef PBs to global issue ids."""
if issueref_pbs:
result = []
for ir in issueref_pbs:
project_id = mar.project_id
if ir.projectId:
project = services.project.GetProjectByName(
mar.cnxn, ir.projectId)
if project:
project_id = project.project_id
try:
issue = services.issue.GetIssueByLocalID(
mar.cnxn, project_id, ir.issueId)
result.append(issue.issue_id)
except exceptions.NoSuchIssueException:
logging.warning(
'Issue (%s:%d) does not exist.' % (ir.projectId, ir.issueId))
return result
else:
return None
def convert_approvals(cnxn, approval_values, services, config, phases):
"""Convert an Issue's Monorail ApprovalValue PBs to API Approval"""
fds_by_id = {fd.field_id: fd for fd in config.field_defs}
phases_by_id = {phase.phase_id: phase for phase in phases}
approvals = []
for av in approval_values:
approval_fd = fds_by_id.get(av.approval_id)
if approval_fd is None:
logging.warning(
'Approval (%d) does not exist' % av.approval_id)
continue
if approval_fd.field_type is not tracker_pb2.FieldTypes.APPROVAL_TYPE:
logging.warning(
'field %s has unexpected field_type: %s' % (
approval_fd.field_name, approval_fd.field_type.name))
continue
approval = api_pb2_v1.Approval()
approval.approvalName = approval_fd.field_name
approvers = [convert_person(approver_id, cnxn, services)
for approver_id in av.approver_ids]
approval.approvers = [approver for approver in approvers if approver]
approval.status = api_pb2_v1.ApprovalStatus(av.status.number)
if av.setter_id:
approval.setter = convert_person(av.setter_id, cnxn, services)
if av.set_on:
approval.setOn = datetime.datetime.fromtimestamp(av.set_on)
if av.phase_id:
try:
approval.phaseName = phases_by_id[av.phase_id].name
except KeyError:
logging.warning('phase %d not found in given phases list' % av.phase_id)
approvals.append(approval)
return approvals
def convert_phases(phases):
"""Convert an Issue's Monorail Phase PBs to API Phase."""
converted_phases = []
for idx, phase in enumerate(phases):
if not phase.name:
try:
logging.warning(
'Phase %d has no name, skipping conversion.' % phase.phase_id)
except TypeError:
logging.warning(
'Phase #%d (%s) has no name or id, skipping conversion.' % (
idx, phase))
continue
converted = api_pb2_v1.Phase(phaseName=phase.name, rank=phase.rank)
converted_phases.append(converted)
return converted_phases
def convert_issue(cls, issue, mar, services):
"""Convert Monorail Issue PB to API IssuesGetInsertResponse."""
config = services.config.GetProjectConfig(mar.cnxn, issue.project_id)
granted_perms = tracker_bizobj.GetGrantedPerms(
issue, mar.auth.effective_ids, config)
issue_project = services.project.GetProject(mar.cnxn, issue.project_id)
component_list = []
for cd in config.component_defs:
cid = cd.component_id
if cid in issue.component_ids:
component_list.append(cd.path)
cc_list = [convert_person(p, mar.cnxn, services) for p in issue.cc_ids]
cc_list = [p for p in cc_list if p is not None]
field_values_list = []
fds_by_id = {
fd.field_id: fd for fd in config.field_defs}
phases_by_id = {phase.phase_id: phase for phase in issue.phases}
for fv in issue.field_values:
fd = fds_by_id.get(fv.field_id)
if not fd:
logging.warning('Custom field %d of project %s does not exist',
fv.field_id, issue_project.project_name)
continue
val = None
if fv.user_id:
val = _get_user_email(
services.user, mar.cnxn, fv.user_id)
else:
val = tracker_bizobj.GetFieldValue(fv, {})
if not isinstance(val, string_types):
val = str(val)
new_fv = api_pb2_v1.FieldValue(
fieldName=fd.field_name,
fieldValue=val,
derived=fv.derived)
if fd.approval_id: # Attach parent approval name
approval_fd = fds_by_id.get(fd.approval_id)
if not approval_fd:
logging.warning('Parent approval field %d of field %s does not exist',
fd.approval_id, fd.field_name)
else:
new_fv.approvalName = approval_fd.field_name
elif fv.phase_id: # Attach phase name
phase = phases_by_id.get(fv.phase_id)
if not phase:
logging.warning('Phase %d for field %s does not exist',
fv.phase_id, fd.field_name)
else:
new_fv.phaseName = phase.name
field_values_list.append(new_fv)
approval_values_list = convert_approvals(
mar.cnxn, issue.approval_values, services, config, issue.phases)
phases_list = convert_phases(issue.phases)
with work_env.WorkEnv(mar, services) as we:
starred = we.IsIssueStarred(issue)
resp = cls(
kind='monorail#issue',
id=issue.local_id,
title=issue.summary,
summary=issue.summary,
projectId=issue_project.project_name,
stars=issue.star_count,
starred=starred,
status=issue.status,
state=(api_pb2_v1.IssueState.open if
tracker_helpers.MeansOpenInProject(
tracker_bizobj.GetStatus(issue), config)
else api_pb2_v1.IssueState.closed),
labels=issue.labels,
components=component_list,
author=convert_person(issue.reporter_id, mar.cnxn, services),
owner=convert_person(issue.owner_id, mar.cnxn, services),
cc=cc_list,
updated=datetime.datetime.fromtimestamp(issue.modified_timestamp),
published=datetime.datetime.fromtimestamp(issue.opened_timestamp),
blockedOn=convert_issue_ids(issue.blocked_on_iids, mar, services),
blocking=convert_issue_ids(issue.blocking_iids, mar, services),
canComment=permissions.CanCommentIssue(
mar.auth.effective_ids, mar.perms, issue_project, issue,
granted_perms=granted_perms),
canEdit=permissions.CanEditIssue(
mar.auth.effective_ids, mar.perms, issue_project, issue,
granted_perms=granted_perms),
fieldValues=field_values_list,
approvalValues=approval_values_list,
phases=phases_list
)
if issue.closed_timestamp > 0:
resp.closed = datetime.datetime.fromtimestamp(issue.closed_timestamp)
if issue.merged_into:
resp.mergedInto=convert_issue_ids([issue.merged_into], mar, services)[0]
if issue.owner_modified_timestamp:
resp.owner_modified = datetime.datetime.fromtimestamp(
issue.owner_modified_timestamp)
if issue.status_modified_timestamp:
resp.status_modified = datetime.datetime.fromtimestamp(
issue.status_modified_timestamp)
if issue.component_modified_timestamp:
resp.component_modified = datetime.datetime.fromtimestamp(
issue.component_modified_timestamp)
return resp
def convert_comment(issue, comment, mar, services, granted_perms):
"""Convert Monorail IssueComment PB to API IssueCommentWrapper."""
perms = permissions.UpdateIssuePermissions(
mar.perms, mar.project, issue, mar.auth.effective_ids,
granted_perms=granted_perms)
commenter = services.user.GetUser(mar.cnxn, comment.user_id)
can_delete = permissions.CanDeleteComment(
comment, commenter, mar.auth.user_id, perms)
return api_pb2_v1.IssueCommentWrapper(
attachments=[convert_attachment(a) for a in comment.attachments],
author=convert_person(comment.user_id, mar.cnxn, services,
trap_exception=True),
canDelete=can_delete,
content=comment.content,
deletedBy=convert_person(comment.deleted_by, mar.cnxn, services,
trap_exception=True),
id=comment.sequence,
published=datetime.datetime.fromtimestamp(comment.timestamp),
updates=convert_amendments(issue, comment.amendments, mar, services),
kind='monorail#issueComment',
is_description=comment.is_description)
def convert_approval_comment(issue, comment, mar, services, granted_perms):
perms = permissions.UpdateIssuePermissions(
mar.perms, mar.project, issue, mar.auth.effective_ids,
granted_perms=granted_perms)
commenter = services.user.GetUser(mar.cnxn, comment.user_id)
can_delete = permissions.CanDeleteComment(
comment, commenter, mar.auth.user_id, perms)
return api_pb2_v1.ApprovalCommentWrapper(
attachments=[convert_attachment(a) for a in comment.attachments],
author=convert_person(
comment.user_id, mar.cnxn, services, trap_exception=True),
canDelete=can_delete,
content=comment.content,
deletedBy=convert_person(comment.deleted_by, mar.cnxn, services,
trap_exception=True),
id=comment.sequence,
published=datetime.datetime.fromtimestamp(comment.timestamp),
approvalUpdates=convert_approval_amendments(
comment.amendments, mar, services),
kind='monorail#approvalComment',
is_description=comment.is_description)
def convert_attachment(attachment):
"""Convert Monorail Attachment PB to API Attachment."""
return api_pb2_v1.Attachment(
attachmentId=attachment.attachment_id,
fileName=attachment.filename,
fileSize=attachment.filesize,
mimetype=attachment.mimetype,
isDeleted=attachment.deleted)
def convert_amendments(issue, amendments, mar, services):
"""Convert a list of Monorail Amendment PBs to API Update."""
amendments_user_ids = tracker_bizobj.UsersInvolvedInAmendments(amendments)
users_by_id = framework_views.MakeAllUserViews(
mar.cnxn, services.user, amendments_user_ids)
framework_views.RevealAllEmailsToMembers(mar.auth, mar.project, users_by_id)
result = api_pb2_v1.Update(kind='monorail#issueCommentUpdate')
for amendment in amendments:
if amendment.field == tracker_pb2.FieldID.SUMMARY:
result.summary = amendment.newvalue
elif amendment.field == tracker_pb2.FieldID.STATUS:
result.status = amendment.newvalue
elif amendment.field == tracker_pb2.FieldID.OWNER:
if len(amendment.added_user_ids) == 0:
result.owner = framework_constants.NO_USER_NAME
else:
result.owner = _get_user_email(
services.user, mar.cnxn, amendment.added_user_ids[0])
elif amendment.field == tracker_pb2.FieldID.LABELS:
result.labels = amendment.newvalue.split()
elif amendment.field == tracker_pb2.FieldID.CC:
for user_id in amendment.added_user_ids:
user_email = _get_user_email(
services.user, mar.cnxn, user_id)
result.cc.append(user_email)
for user_id in amendment.removed_user_ids:
user_email = _get_user_email(
services.user, mar.cnxn, user_id)
result.cc.append('-%s' % user_email)
elif amendment.field == tracker_pb2.FieldID.BLOCKEDON:
result.blockedOn = _append_project(
amendment.newvalue, issue.project_name)
elif amendment.field == tracker_pb2.FieldID.BLOCKING:
result.blocking = _append_project(
amendment.newvalue, issue.project_name)
elif amendment.field == tracker_pb2.FieldID.MERGEDINTO:
result.mergedInto = amendment.newvalue
elif amendment.field == tracker_pb2.FieldID.COMPONENTS:
result.components = amendment.newvalue.split()
elif amendment.field == tracker_pb2.FieldID.CUSTOM:
fv = api_pb2_v1.FieldValue()
fv.fieldName = amendment.custom_field_name
fv.fieldValue = tracker_bizobj.AmendmentString(amendment, users_by_id)
result.fieldValues.append(fv)
return result
def convert_approval_amendments(amendments, mar, services):
"""Convert a list of Monorail Amendment PBs API ApprovalUpdate."""
amendments_user_ids = tracker_bizobj.UsersInvolvedInAmendments(amendments)
users_by_id = framework_views.MakeAllUserViews(
mar.cnxn, services.user, amendments_user_ids)
framework_views.RevealAllEmailsToMembers(mar.auth, mar.project, users_by_id)
result = api_pb2_v1.ApprovalUpdate(kind='monorail#approvalCommentUpdate')
for amendment in amendments:
if amendment.field == tracker_pb2.FieldID.CUSTOM:
if amendment.custom_field_name == 'Status':
status_number = tracker_pb2.ApprovalStatus(
amendment.newvalue.upper()).number
result.status = api_pb2_v1.ApprovalStatus(status_number).name
elif amendment.custom_field_name == 'Approvers':
for user_id in amendment.added_user_ids:
user_email = _get_user_email(
services.user, mar.cnxn, user_id)
result.approvers.append(user_email)
for user_id in amendment.removed_user_ids:
user_email = _get_user_email(
services.user, mar.cnxn, user_id)
result.approvers.append('-%s' % user_email)
else:
fv = api_pb2_v1.FieldValue()
fv.fieldName = amendment.custom_field_name
fv.fieldValue = tracker_bizobj.AmendmentString(amendment, users_by_id)
# TODO(jojwang): monorail:4229, add approvalName field to FieldValue
result.fieldValues.append(fv)
return result
def _get_user_email(user_service, cnxn, user_id):
"""Get user email."""
if user_id == framework_constants.DELETED_USER_ID:
return framework_constants.DELETED_USER_NAME
if not user_id:
# _get_user_email should handle getting emails for optional user values,
# like issue.owner where user_id may be None.
return framework_constants.NO_USER_NAME
try:
user_email = user_service.LookupUserEmail(
cnxn, user_id)
except exceptions.NoSuchUserException:
user_email = framework_constants.USER_NOT_FOUND_NAME
return user_email
def _append_project(issue_ids, project_name):
"""Append | |
must be the same as of the grid.
:Returns:
values_routed: `numpy.ndarray`
The values routed following the flow direction, e.g. how
much river discharge is coming into each grid element
from their upstream grid elements. The shape of this
array is the same as of the grid.
values_out: `numpy.ndarray`
The values that routed following the flow direction would
have left the domain (a value is considered to leave the
domain if it is directed towards beyond the bounds of
of the domain, or towards a masked location within the
domain, if the *flow_direction* is masked), e.g. how
much river discharge has not been routed towards the
downstream grid element because this one is not defined
(i.e. outside the grid) or masked (i.e. outside the
drainage area or into the sea). The shape of this array
is the same as of the grid.
**Examples**
.. warning ::
Given that Y and X `Grid` coordinates are ordered increasingly
northwards and eastwards, respectively, and given that a 2D
`numpy.ndarray` origin (i.e. [0, 0]) is in the upper-left
corner when visualising the content of an array (i.e. using
`print`), routing a value northwards will result in visualising
it "moving down" the array (and not up), and routing a value
eastwards will result in visualising it "moving right".
Using grid routing functionality with basic domain and flow direction:
>>> import numpy
>>> grid = LatLonGrid.from_extent_and_resolution(
... latitude_extent=(51, 55),
... latitude_resolution=1,
... longitude_extent=(-2, 1),
... longitude_resolution=1
... )
>>> values = numpy.arange(12).reshape(4, 3) + 1
>>> print(values)
[[ 1 2 3]
[ 4 5 6]
[ 7 8 9]
[10 11 12]]
>>> directions = grid.to_field()
>>> directions.set_data(numpy.array([['NE', 'N', 'E'],
... ['SE', 'E', 'S'],
... ['N', 'N', 'W'],
... ['SW', 'E', 'NW']]))
>>> grid.flow_direction = directions
>>> moved, out = grid.route(values)
>>> print(moved)
[[ 0 4 6]
[ 0 3 5]
[ 0 9 0]
[ 7 8 11]]
>>> print(out)
[[ 0 0 3]
[ 0 0 0]
[ 0 0 0]
[10 0 12]]
Using grid routing functionality with masked flow direction:
>>> directions.set_data(
... numpy.ma.array(
... [['NE', 'N', 'E'],
... ['SE', 'E', 'S'],
... ['N', 'N', 'W'],
... ['SW', 'E', 'NW']],
... mask=[[1, 0, 0],
... [1, 0, 0],
... [1, 1, 0],
... [0, 0, 0]]
... )
... )
>>> grid.flow_direction = directions
>>> moved, out = grid.route(values)
>>> print(moved)
[[-- 0 6]
[-- 2 5]
[-- -- 0]
[0 0 11]]
>>> print(out)
[[-- 0 3]
[-- 0 0]
[-- -- 9]
[10 0 12]]
Using grid routing functionality with a wrap-around domain:
>>> grid = LatLonGrid.from_extent_and_resolution(
... latitude_extent=(-90, 90),
... latitude_resolution=45,
... longitude_extent=(-180, 180),
... longitude_resolution=120
... )
>>> values = numpy.arange(12).reshape(4, 3) + 1
>>> print(values)
[[ 1 2 3]
[ 4 5 6]
[ 7 8 9]
[10 11 12]]
>>> directions = grid.to_field()
>>> directions.set_data(numpy.array([['NE', 'N', 'E'],
... ['SE', 'E', 'S'],
... ['N', 'N', 'W'],
... ['SW', 'E', 'NW']]))
>>> grid.flow_direction = directions
>>> moved, out = grid.route(values)
>>> print(moved)
[[ 3 16 6]
[ 0 3 5]
[ 0 9 10]
[ 7 8 11]]
>>> print(out)
[[0 0 0]
[0 0 0]
[0 0 0]
[0 0 0]]
Using grid routing functionality on a whole cartesian domain:
>>> grid = BritishNationalGrid.from_extent_and_resolution(
... projection_y_coordinate_extent=(0, 1300000),
... projection_y_coordinate_resolution=325000,
... projection_x_coordinate_extent=(0, 700000),
... projection_x_coordinate_resolution=700000/3
... )
>>> values = numpy.arange(12).reshape(4, 3) + 1
>>> directions = grid.to_field()
>>> directions.set_data(numpy.array([['NE', 'N', 'E'],
... ['SE', 'E', 'S'],
... ['N', 'N', 'W'],
... ['SW', 'E', 'NW']]))
>>> grid.flow_direction = directions
>>> moved, out = grid.route(values)
>>> print(moved)
[[ 0 4 6]
[ 0 3 5]
[ 0 9 0]
[ 7 8 11]]
>>> print(out)
[[ 0 0 3]
[ 0 0 0]
[ 0 0 0]
[10 0 12]]
"""
# check whether method can be used
if self.flow_direction is None:
raise RuntimeError("method 'route' requires setting "
"property 'flow_direction'")
# check that values_to_route has the same shape as flow_direction
if not self.flow_direction.shape[:-1] == values_to_route.shape:
raise RuntimeError("shape mismatch between 'values_to_route' "
"and 'flow_direction' in 'route' method")
# initialise routed and out arrays depending on mask/no-mask
if np.ma.is_masked(self.flow_direction):
mask = self.flow_direction.mask[..., 0]
values_routed = np.ma.array(
np.zeros(values_to_route.shape, values_to_route.dtype),
mask=mask
)
values_out = np.ma.array(
np.zeros(values_to_route.shape, values_to_route.dtype),
mask=mask
)
else:
mask = None
values_routed = np.zeros(values_to_route.shape,
values_to_route.dtype)
values_out = np.zeros(values_to_route.shape,
values_to_route.dtype)
# if no mask, keep as is, if not, take logical negation of it
mask = None if mask is None else ~mask
# collect the values routed towards outside the domain
out_mask = self._routing_out_mask
if np.ma.is_masked(self.flow_direction):
values_out[out_mask & mask] = values_to_route[out_mask & mask]
else:
values_out[out_mask] = values_to_route[out_mask]
# perform the routing using the routing mask
# Y-wards movement
for j in [-1, 0, 1]:
# X-wards movement
for i in [-1, 0, 1]:
routing_mask = self._routing_masks[(j, i)]
values_routed[mask] += np.roll(
values_to_route * routing_mask,
shift=(j, i), axis=(-2, -1)
)[mask]
return values_routed, values_out
@property
def cell_area(self):
"""The horizontal area for the grid cells of the `Grid` in
square metres given as a `cf.Field` and returned as a
`numpy.ndarray`.
:Parameters:
areas: `cf.Field`
The field containing the horizontal grid cell area. The
shape of the data array must be the same as the `Grid`.
The field data must contain surface area values in
square metres.
:Returns:
`numpy.ndarray`
The array containing the horizontal grid cell area
values in square metres. If not set previously, computed
automatically.
**Examples**
>>> grid = LatLonGrid.from_extent_and_resolution(
... latitude_extent=(51, 55),
... latitude_resolution=1,
... longitude_extent=(-2, 1),
... longitude_resolution=1
... )
>>> print(grid.cell_area)
[[7.69725703e+09 7.69725703e+09 7.69725703e+09]
[7.52719193e+09 7.52719193e+09 7.52719193e+09]
[7.35483450e+09 7.35483450e+09 7.35483450e+09]
[7.18023725e+09 7.18023725e+09 7.18023725e+09]]
>>> print(grid) # doctest: +ELLIPSIS
LatLonGrid(
shape {Y, X}: (4, 3)
Y, latitude (4,): [51.5, ..., 54.5] degrees_north
X, longitude (3,): [-1.5, -0.5, 0.5] degrees_east
Y_bounds (4, 2): [[51.0, ..., 55.0]] degrees_north
X_bounds (3, 2): [[-2.0, ..., 1.0]] degrees_east
cell_area (4, 3): [[7697257030.0..., ..., 7180237253.9...]] m2
)
>>> import numpy
>>> areas = grid.to_field()
>>> areas.set_data(numpy.array([[7.70e+09, 7.70e+09, 7.70e+09],
... [7.53e+09, 7.53e+09, 7.53e+09],
... [7.35e+09, 7.35e+09, 7.35e+09],
... [7.18e+09, 7.18e+09, 7.18e+09]]))
>>> areas.units = 'm2'
>>> grid.cell_area = areas
>>> print(grid.cell_area)
[[7.70e+09 7.70e+09 7.70e+09]
[7.53e+09 7.53e+09 7.53e+09]
[7.35e+09 7.35e+09 7.35e+09]
[7.18e+09 7.18e+09 7.18e+09]]
"""
if self._cell_area is None:
self._cell_area = self._compute_cell_area()
return self._cell_area
@cell_area.setter
def cell_area(self, areas):
error_dim = RuntimeError(
f"cell_area shape incompatible with {self.__class__.__name__}"
)
error_units = ValueError(
"cell_area units are missing or not in square metres"
)
# check type
if not isinstance(areas, cf.Field):
raise TypeError("cell_area not a cf.Field")
# store given field for config file
self._cell_area_field = areas
# drop potential size-1 Z axis since areas is
# only relevant horizontally
if areas.domain_axis(self.vertical_axis, key=True, default=False):
areas.squeeze(self.vertical_axis, inplace=True)
# check that mask and spacedomain are compatible
grid = self.to_horizontal_grid()
try:
areas = grid.subset_and_compare(areas)
except RuntimeError:
raise error_dim
# check area units
if not (areas.data.has_units()
and cfunits.Units('m2').equals(areas.Units)):
raise error_units
# get field's data array
areas = areas.array
self._cell_area = areas
def _compute_cell_area(self):
# make use of cf-python to retrieve ESMF objects
operator = self._f.regrids(
self._f, 'conservative', return_operator=True
)
# retrieve ESMF source (arbitrarily chosen) field
esmf_field = operator.regrid.srcfield
# let ESMF compute the cell area
esmf_field.get_area()
# retrieve the values and scale them from unit sphere to Earth
area_unit_sphere = esmf_field.data.T
area = area_unit_sphere * self._earth_radius_m ** 2
return area
@staticmethod
def _check_dimension_limits(dimension, name, limits):
"""**Examples:**
>>> import numpy
>>> Grid._check_dimension_limits( # scalar
... numpy.array(-1.), 'test', (-2, 2))
>>> Grid._check_dimension_limits( # no wrap around
... numpy.array([-1., 0., 1., 2.]), 'test', (-2, 2))
>>> Grid._check_dimension_limits( # wrap around
... numpy.array([0.5, 1.5, -1.5]), 'test', (-2, 2))
>>> Grid._check_dimension_limits( # exceed lower limit
... numpy.array([-3., -2., -1.]), 'test', (-2, 2))
Traceback (most recent call last):
...
RuntimeError: test dimension beyond limits [-2, 2]
>>> Grid._check_dimension_limits( # exceed upper limit
... numpy.array([1., 2., 3.]), 'test', (-2, 2))
Traceback (most recent call last):
...
RuntimeError: test dimension beyond limits [-2, 2]
>>> Grid._check_dimension_limits( # wrapping around | |
= {"input": self}
if chars is not None:
ltrim["chars"] = chars
return ViewExpression({"$ltrim": ltrim})
def rstrip(self, chars=None):
"""Removes whitespace characters from the end of this expression, which
must resolve to a string.
If ``chars`` is provided, those characters are removed instead of
whitespace.
Examples::
import fiftyone as fo
import fiftyone.zoo as foz
from fiftyone import ViewField as F
dataset = foz.load_zoo_dataset("quickstart")
# Adds and then strips whitespace from the end of each tag
transform_tag = F().concat(" ").rstrip()
view = dataset.set_field("tags", F("tags").map(transform_tag))
print(dataset.distinct("tags"))
print(view.distinct("tags"))
Args:
chars (None): an optional string or :class:`ViewExpression`
resolving to a string expression specifying characters to
remove
Returns:
a :class:`ViewExpression`
"""
rtrim = {"input": self}
if chars is not None:
rtrim["chars"] = chars
return ViewExpression({"$rtrim": rtrim})
def replace(self, old, new):
"""Replaces all occurances of ``old`` with ``new`` in this expression,
which must resolve to a string.
Examples::
import fiftyone as fo
import fiftyone.zoo as foz
from fiftyone import ViewField as F
dataset = foz.load_zoo_dataset("quickstart")
# Replaces "val" with "VAL" in each tag
transform_tag = F().replace("val", "VAL")
view = dataset.set_field("tags", F("tags").map(transform_tag))
print(dataset.distinct("tags"))
print(view.distinct("tags"))
Args:
old: a string or :class:`ViewExpression` resolving to a string
expression specifying the substring to replace
new: a string or :class:`ViewExpression` resolving to a string
expression specifying the replacement value
Returns:
a :class:`ViewExpression`
"""
return ViewExpression(
{"$replaceAll": {"input": self, "find": old, "replacement": new}}
)
def re_match(self, regex, options=None):
"""Performs a regular expression pattern match on this expression,
which must resolve to a string.
The output of the expression will be ``True`` if the pattern matches
and ``False`` otherwise.
Examples::
import fiftyone as fo
import fiftyone.zoo as foz
from fiftyone import ViewField as F
dataset = foz.load_zoo_dataset("quickstart")
#
# Get samples whose images are JPEGs
#
view = dataset.match(F("filepath").re_match("\\.jpg$"))
print(view.count())
print(view.first().filepath)
#
# Get samples whose images are in the "/Users" directory
#
view = dataset.match(F("filepath").re_match("^/Users/"))
print(view.count())
print(view.first().filepath)
Args:
regex: the regular expression to apply. Must be a Perl Compatible
Regular Expression (PCRE). See
`this page <https://docs.mongodb.com/manual/reference/operator/aggregation/regexMatch/#regexmatch-regex>`__
for details
options (None): an optional string of regex options to apply. See
`this page <https://docs.mongodb.com/manual/reference/operator/aggregation/regexMatch/#regexmatch-options>`__
for the available options
Returns:
a :class:`ViewExpression`
"""
return ViewExpression(
{
"$regexMatch": {
"input": self,
"regex": regex,
"options": options,
}
}
)
def starts_with(self, str_or_strs, case_sensitive=True):
"""Determines whether this expression, which must resolve to a string,
starts with the given string or string(s).
Examples::
import fiftyone as fo
import fiftyone.zoo as foz
from fiftyone import ViewField as F
dataset = foz.load_zoo_dataset("quickstart")
# Get samples whose images are in "/Users" or "/home" directories
view = dataset.match(F("filepath").starts_with(("/Users", "/home"))
print(view.count())
print(view.first().filepath)
Args:
str_or_strs: a string or iterable of strings
case_sensitive (True): whether to perform a case sensitive match
Returns:
a :class:`ViewExpression`
"""
str_or_strs = _escape_regex_chars(str_or_strs)
if etau.is_str(str_or_strs):
regex = "^" + str_or_strs
else:
regex = "^(%s)" % ("|".join(str_or_strs))
options = None if case_sensitive else "i"
return self.re_match(regex, options=options)
def ends_with(self, str_or_strs, case_sensitive=True):
"""Determines whether this expression, which must resolve to a string,
ends with the given string or string(s).
Examples::
import fiftyone as fo
import fiftyone.zoo as foz
from fiftyone import ViewField as F
dataset = foz.load_zoo_dataset("quickstart")
# Get samples whose images are JPEGs or PNGs
view = dataset.match(F("filepath").ends_with((".jpg", ".png")))
print(view.count())
print(view.first().filepath)
Args:
str_or_strs: a string or iterable of strings
case_sensitive (True): whether to perform a case sensitive match
Returns:
a :class:`ViewExpression`
"""
str_or_strs = _escape_regex_chars(str_or_strs)
if etau.is_str(str_or_strs):
regex = str_or_strs + "$"
else:
regex = "(%s)$" % ("|".join(str_or_strs))
options = None if case_sensitive else "i"
return self.re_match(regex, options=options)
def contains_str(self, str_or_strs, case_sensitive=True):
"""Determines whether this expression, which must resolve to a string,
contains the given string or string(s).
Examples::
import fiftyone as fo
import fiftyone.zoo as foz
from fiftyone import ViewField as F
dataset = foz.load_zoo_dataset("quickstart")
# Only contains predictions whose `label` contains "be"
view = dataset.filter_labels(
"predictions", F("label").contains_str("be")
)
print(view.distinct("predictions.detections.label"))
Args:
str_or_strs: a string or iterable of strings
case_sensitive (True): whether to perform a case sensitive match
Returns:
a :class:`ViewExpression`
"""
str_or_strs = _escape_regex_chars(str_or_strs)
if etau.is_str(str_or_strs):
regex = str_or_strs
else:
regex = "(%s)" % ("|".join(str_or_strs))
options = None if case_sensitive else "i"
return self.re_match(regex, options=options)
def matches_str(self, str_or_strs, case_sensitive=True):
"""Determines whether this expression, which must resolve to a string,
exactly matches the given string or string(s).
Examples::
import fiftyone as fo
import fiftyone.zoo as foz
from fiftyone import ViewField as F
dataset = foz.load_zoo_dataset("quickstart")
# Only contains predictions whose `label` is "cat" or "dog", case
# insensitive
view = dataset.map_labels(
"predictions", {"cat": "CAT", "dog": "DOG"}
).filter_labels(
"predictions",
F("label").matches_str(("cat", "dog"), case_sensitive=False)
)
print(view.distinct("predictions.detections.label"))
Args:
str_or_strs: a string or iterable of strings
case_sensitive (True): whether to perform a case sensitive match
Returns:
a :class:`ViewExpression`
"""
str_or_strs = _escape_regex_chars(str_or_strs)
if etau.is_str(str_or_strs):
regex = "^" + str_or_strs + "$"
else:
regex = "^(%s)$" % ("|".join(str_or_strs))
options = None if case_sensitive else "i"
return self.re_match(regex, options=options)
def split(self, delimiter, maxsplit=None):
"""Splits this expression, which must resolve to a string, by the given
delimiter.
The result is a string array that contains the chunks with the
delimiter removed. If the delimiter is not found, this full string is
returned as a single element array.
Examples::
import fiftyone as fo
import fiftyone.zoo as foz
from fiftyone import ViewField as F
dataset = foz.load_zoo_dataset("quickstart")
# Add "-good" to the first tag and then split on "-" to create two
# tags for each sample
view = dataset.set_field(
"tags", F("tags")[0].concat("-good").split("-")
)
print(view.first().tags)
Args:
delimiter: the delimiter string or :class:`ViewExpression`
resolving to a string expression
maxsplit (None): a maximum number of splits to perform, from the
left
Returns:
a :class:`ViewExpression`
"""
split_expr = ViewExpression({"$split": [self, delimiter]})
if maxsplit is None:
return split_expr
if maxsplit <= 0:
return ViewExpression([self])
# pylint: disable=invalid-unary-operand-type
maxsplit_expr = (split_expr.length() > maxsplit + 1).if_else(
split_expr[:maxsplit].append(
split_expr[maxsplit:].join(delimiter)
),
split_expr,
)
return split_expr.let_in(maxsplit_expr)
def rsplit(self, delimiter, maxsplit=None):
"""Splits this expression, which must resolve to a string, by the given
delimiter.
If the number of chunks exceeds ``maxsplit``, splits are only performed
on the last ``maxsplit`` occurances of the delimiter.
The result is a string array that contains the chunks with the
delimiter removed. If the delimiter is not found, this full string is
returned as a single element array.
Examples::
import fiftyone as fo
import fiftyone.zoo as foz
from fiftyone import ViewField as F
dataset = foz.load_zoo_dataset("quickstart")
# Add "-ok-go" to the first tag and then split once on "-" from the
# right to create two tags for each sample
view = dataset.set_field(
"tags", F("tags")[0].concat("-ok-go").rsplit("-", 1)
)
print(view.first().tags)
Args:
delimiter: the delimiter string or :class:`ViewExpression`
resolving to a string expression
maxsplit (None): a maximum number of splits to perform, from the
right
Returns:
a :class:`ViewExpression`
"""
split_expr = ViewExpression({"$split": [self, delimiter]})
if maxsplit is None:
return split_expr
if maxsplit <= 0:
return ViewExpression([self])
# pylint: disable=invalid-unary-operand-type
maxsplit_expr = (split_expr.length() > maxsplit + 1).if_else(
split_expr[-maxsplit:].prepend(
split_expr[:-maxsplit].join(delimiter)
),
split_expr,
)
return split_expr.let_in(maxsplit_expr)
# Date expression operators ###############################################
def millisecond(self):
"""Returns the millisecond portion of this date expression (in UTC) as
an integer between 0 and 999.
Examples::
from datetime import datetime
import fiftyone as fo
from fiftyone import ViewField as F
samples = [
fo.Sample(
filepath="image1.jpg",
created_at=datetime(1970, 1, 1, 0, 0, 0, 1000),
),
fo.Sample(
filepath="image1.jpg",
created_at=datetime(1970, 1, 1, 0, 0, 0, 2000),
),
fo.Sample(
filepath="image1.jpg",
created_at=datetime(1970, 1, 1, 0, 0, 0, 3000),
),
fo.Sample(
filepath="image1.jpg",
created_at=datetime(1970, 1, 1, 0, 0, 0, 4000),
),
]
dataset = fo.Dataset()
dataset.add_samples(samples)
# Get the millisecond portion of the dates in the dataset
print(dataset.values(F("created_at").millisecond()))
# Samples with even milliseconds
view = dataset.match(F("created_at").millisecond() % 2 == 0)
print(len(view))
Returns:
a :class:`ViewExpression`
"""
return ViewExpression({"$millisecond": self})
def second(self):
"""Returns the second portion of this date expression (in UTC) as a
number between 0 and 59.
Examples::
from datetime import datetime
import fiftyone as fo
from fiftyone import ViewField as F
samples = [
fo.Sample(
filepath="image1.jpg",
created_at=datetime(1970, 1, 1, 0, 0, 1),
),
fo.Sample(
filepath="image1.jpg",
created_at=datetime(1970, 1, 1, 0, | |
/ (unext[0] - upper[ui][0])
yupper = upper[ui][1] + frac * (unext[1] - upper[ui][1])
h2 = yupper - lnext[1]
li += 1
else:
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
da = (xnext - x) * (h1 + h2) / 2.0
total_area += da
areas.append((x, xnext, h1, h2, da, total_area))
x = xnext
h1 = h2
size = total_area / npieces
cuts = []
return h2
def func_bff075a6e6374cc88e08efd3d97e6772(npieces, upper, lower):
areas = []
total_area = 0
x = 0
h1 = upper[0][1] - lower[0][1]
W = lower[-1][0]
while x < W:
lnext = lower[li + 1]
unext = upper[ui + 1]
if lnext[0] == unext[0]:
xnext = lnext[0]
h2 = unext[1] - lnext[1]
li += 1
ui += 1
elif lnext[0] < upper[ui + 1][0]:
xnext = lnext[0]
frac = 1.0 * (xnext - upper[ui][0]) / (unext[0] - upper[ui][0])
yupper = upper[ui][1] + frac * (unext[1] - upper[ui][1])
h2 = yupper - lnext[1]
li += 1
else:
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
da = (xnext - x) * (h1 + h2) / 2.0
total_area += da
areas.append((x, xnext, h1, h2, da, total_area))
x = xnext
h1 = h2
size = total_area / npieces
cuts = []
return ui
def func_a6099a9b3bed4200bd17137ed68a9cf3(npieces, upper, lower):
areas = []
total_area = 0
x = 0
h1 = upper[0][1] - lower[0][1]
W = lower[-1][0]
while x < W:
lnext = lower[li + 1]
unext = upper[ui + 1]
if lnext[0] == unext[0]:
xnext = lnext[0]
h2 = unext[1] - lnext[1]
li += 1
ui += 1
elif lnext[0] < upper[ui + 1][0]:
xnext = lnext[0]
frac = 1.0 * (xnext - upper[ui][0]) / (unext[0] - upper[ui][0])
yupper = upper[ui][1] + frac * (unext[1] - upper[ui][1])
h2 = yupper - lnext[1]
li += 1
else:
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
da = (xnext - x) * (h1 + h2) / 2.0
total_area += da
areas.append((x, xnext, h1, h2, da, total_area))
x = xnext
h1 = h2
size = total_area / npieces
cuts = []
return li
def func_69480b960e564a3ba04045021904143f(npieces, upper, lower):
areas = []
total_area = 0
x = 0
h1 = upper[0][1] - lower[0][1]
W = lower[-1][0]
while x < W:
lnext = lower[li + 1]
unext = upper[ui + 1]
if lnext[0] == unext[0]:
xnext = lnext[0]
h2 = unext[1] - lnext[1]
li += 1
ui += 1
elif lnext[0] < upper[ui + 1][0]:
xnext = lnext[0]
frac = 1.0 * (xnext - upper[ui][0]) / (unext[0] - upper[ui][0])
yupper = upper[ui][1] + frac * (unext[1] - upper[ui][1])
h2 = yupper - lnext[1]
li += 1
else:
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
da = (xnext - x) * (h1 + h2) / 2.0
total_area += da
areas.append((x, xnext, h1, h2, da, total_area))
x = xnext
h1 = h2
size = total_area / npieces
cuts = []
return frac
def func_cae3aeab64b34d2980ae698be4743d9b(npieces, upper, lower):
areas = []
total_area = 0
x = 0
h1 = upper[0][1] - lower[0][1]
W = lower[-1][0]
while x < W:
lnext = lower[li + 1]
unext = upper[ui + 1]
if lnext[0] == unext[0]:
xnext = lnext[0]
h2 = unext[1] - lnext[1]
li += 1
ui += 1
elif lnext[0] < upper[ui + 1][0]:
xnext = lnext[0]
frac = 1.0 * (xnext - upper[ui][0]) / (unext[0] - upper[ui][0])
yupper = upper[ui][1] + frac * (unext[1] - upper[ui][1])
h2 = yupper - lnext[1]
li += 1
else:
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
da = (xnext - x) * (h1 + h2) / 2.0
total_area += da
areas.append((x, xnext, h1, h2, da, total_area))
x = xnext
h1 = h2
size = total_area / npieces
cuts = []
return lnext
def func_5953d2922c004be5892a47266c9f4973(npieces, upper, lower):
areas = []
total_area = 0
x = 0
h1 = upper[0][1] - lower[0][1]
W = lower[-1][0]
while x < W:
lnext = lower[li + 1]
unext = upper[ui + 1]
if lnext[0] == unext[0]:
xnext = lnext[0]
h2 = unext[1] - lnext[1]
li += 1
ui += 1
elif lnext[0] < upper[ui + 1][0]:
xnext = lnext[0]
frac = 1.0 * (xnext - upper[ui][0]) / (unext[0] - upper[ui][0])
yupper = upper[ui][1] + frac * (unext[1] - upper[ui][1])
h2 = yupper - lnext[1]
li += 1
else:
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
da = (xnext - x) * (h1 + h2) / 2.0
total_area += da
areas.append((x, xnext, h1, h2, da, total_area))
x = xnext
h1 = h2
size = total_area / npieces
cuts = []
return areas
def func_6e9ba9ba2ca045b38db23f98d244dff0(npieces, upper, lower):
areas = []
total_area = 0
x = 0
h1 = upper[0][1] - lower[0][1]
W = lower[-1][0]
while x < W:
lnext = lower[li + 1]
unext = upper[ui + 1]
if lnext[0] == unext[0]:
xnext = lnext[0]
h2 = unext[1] - lnext[1]
li += 1
ui += 1
elif lnext[0] < upper[ui + 1][0]:
xnext = lnext[0]
frac = 1.0 * (xnext - upper[ui][0]) / (unext[0] - upper[ui][0])
yupper = upper[ui][1] + frac * (unext[1] - upper[ui][1])
h2 = yupper - lnext[1]
li += 1
else:
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
da = (xnext - x) * (h1 + h2) / 2.0
total_area += da
areas.append((x, xnext, h1, h2, da, total_area))
x = xnext
h1 = h2
size = total_area / npieces
cuts = []
return total_area
def func_a99c084250984caf94d4356f8090957b(npieces, upper, lower):
areas = []
total_area = 0
x = 0
h1 = upper[0][1] - lower[0][1]
W = lower[-1][0]
while x < W:
lnext = lower[li + 1]
unext = upper[ui + 1]
if lnext[0] == unext[0]:
xnext = lnext[0]
h2 = unext[1] - lnext[1]
li += 1
ui += 1
elif lnext[0] < upper[ui + 1][0]:
xnext = lnext[0]
frac = 1.0 * (xnext - upper[ui][0]) / (unext[0] - upper[ui][0])
yupper = upper[ui][1] + frac * (unext[1] - upper[ui][1])
h2 = yupper - lnext[1]
li += 1
else:
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
da = (xnext - x) * (h1 + h2) / 2.0
total_area += da
areas.append((x, xnext, h1, h2, da, total_area))
x = xnext
h1 = h2
size = total_area / npieces
cuts = []
return cuts
def func_ce06e0e228194f229bf5a2c445772173(npieces, upper, lower):
areas = []
total_area = 0
x = 0
h1 = upper[0][1] - lower[0][1]
W = lower[-1][0]
while x < W:
lnext = lower[li + 1]
unext = upper[ui + 1]
if lnext[0] == unext[0]:
xnext = lnext[0]
h2 = unext[1] - lnext[1]
li += 1
ui += 1
elif lnext[0] < upper[ui + 1][0]:
xnext = lnext[0]
frac = 1.0 * (xnext - upper[ui][0]) / (unext[0] - upper[ui][0])
yupper = upper[ui][1] + frac * (unext[1] - upper[ui][1])
h2 = yupper - lnext[1]
li += 1
else:
xnext = | |
<filename>test/tneanet.py
import random
import sys
import snap
def PrintGStats(s, Graph):
'''
Print graph statistics
'''
print("graph %s, nodes %d, edges %d, empty %s" % (
s, Graph.GetNodes(), Graph.GetEdges(),
"yes" if Graph.Empty() else "no"))
def DefaultConstructor():
'''
Test the default constructor
'''
Graph = snap.TNEANet.New()
PrintGStats("DefaultConstructor:Graph", Graph)
def ManipulateNodesEdges():
'''
Test node, edge creation
'''
NNodes = 10000
NEdges = 100000
FName = "test.graph"
Graph = snap.TNEANet.New()
t = Graph.Empty()
# create the nodes
for i in range(0, NNodes):
Graph.AddNode(i)
t = Graph.Empty()
n = Graph.GetNodes()
# create random edges
NCount = NEdges
while NCount > 0:
x = int(random.random() * NNodes)
y = int(random.random() * NNodes)
# skip the loops in this test
if x != y and not Graph.IsEdge(x,y):
n = Graph.AddEdge(x, y)
NCount -= 1
PrintGStats("ManipulateNodesEdges:Graph1", Graph)
# get all the nodes
NCount = 0
NI = Graph.BegNI()
while NI < Graph.EndNI():
NCount += 1
NI.Next()
# get all the edges for all the nodes
ECount1 = 0
NI = Graph.BegNI()
while NI < Graph.EndNI():
ECount1 += NI.GetOutDeg()
NI.Next()
ECount1 = ECount1 / 2
# get all the edges directly
ECount2 = 0
EI = Graph.BegEI()
while EI < Graph.EndEI():
ECount2 += 1
EI.Next()
print("graph ManipulateNodesEdges:Graph2, nodes %d, edges1 %d, edges2 %d"\
% (NCount, ECount1, ECount2))
# assignment
Graph1 = Graph
PrintGStats("ManipulateNodesEdges:Graph3", Graph1)
# save the graph
print("graph type = ", type(Graph))
#FOut = TFOut(TStr(FName))
FOut = snap.TFOut(FName)
Graph.Save(FOut)
FOut.Flush()
# load the graph
#FIn = TFIn(TStr(FName))
FIn = snap.TFIn(FName)
Graph2 = snap.TNEANet(FIn)
PrintGStats("ManipulateNodesEdges:Graph4" , Graph2)
# remove all the nodes and edges
for i in range(0, NNodes):
n = Graph.GetRndNId()
Graph.DelNode(n)
PrintGStats("ManipulateNodesEdges:Graph5" , Graph)
Graph1.Clr()
PrintGStats("ManipulateNodesEdges:Graph6" , Graph1)
def ManipulateAttributesId():
'''
Test node, edge attribute functionality using Ids
'''
NNodes = 1000
NEdges = 1000
Graph = snap.TNEANet.New()
t = Graph.Empty()
# create the nodes
for i in range(0, NNodes):
Graph.AddNode(i)
t = Graph.Empty()
n = Graph.GetNodes()
# create random edges
NCount = NEdges
while NCount > 0:
x = int(random.random() * NNodes)
y = int(random.random() * NNodes)
# skip the loops in this test
if x != y and not Graph.IsEdge(x,y):
n = Graph.AddEdge(x, y)
NCount -= 1
print("Added nodes")
# create attributes and fill all nodes
#attr1 = TStr("str")
#attr2 = TStr("int")
#attr3 = TStr("float")
#attr4 = TStr("default")
attr1 = "STR"
attr2 = "INT"
attr3 = "FLOAT"
attr4 = "DEFAULT"
# Test column int iterator for node 3, 50, 700, 900
# Check if we can set defaults to 0 fordata.
Graph.AddIntAttrN(attr2, 0)
Graph.AddIntAttrDatN(3, 3*2, attr2)
Graph.AddIntAttrDatN(50, 50*2, attr2)
Graph.AddIntAttrDatN(700, 700*2, attr2)
Graph.AddIntAttrDatN(900, 900*2, attr2)
print("Added attributes")
NodeId = 0
NI = Graph.BegNAIntI(attr2)
while NI < Graph.EndNAIntI(attr2):
if NI.GetDat() != 0:
print("Attribute1: %s, Node: %i, Val: %d" % (attr2, NodeId, NI.GetDat()))
#print "Attribute: %s, Node: %i, Val: %d" % (attr2(), NodeId, NI.GetDat())
NodeId += 1
NI.Next()
# Test column flt iterator for node 3, 50, 700, 900
Graph.AddFltAttrDatN(5, 3.41, attr3)
Graph.AddFltAttrDatN(50, 2.718, attr3)
Graph.AddFltAttrDatN(300, 150.0, attr3)
Graph.AddFltAttrDatN(653, 653, attr3)
NodeId = 0
NCount = 0
NI = Graph.BegNI()
while NI < Graph.EndNI():
NCount += 1
NI.Next()
NI = Graph.BegNAFltI(attr3)
NodeId = 0
while NI < Graph.EndNAFltI(attr3):
if NI.GetDat() != snap.TFlt.Mn:
print("Attribute2: %s, Node: %i, Val: %f" % (attr3, NodeId, NI.GetDat()))
#print "Attribute: %s, Node: %i, Val: %f" % (attr3(), NodeId, NI.GetDat())
NodeId += 1
NI.Next()
# Test column str iterator for node 3, 50, 700, 900
#Graph.AddStrAttrDatN(10, TStr("abc"), attr1)
#Graph.AddStrAttrDatN(20, TStr("def"), attr1)
#Graph.AddStrAttrDatN(400, TStr("ghi"), attr1)
Graph.AddStrAttrDatN(10, "abc", attr1)
Graph.AddStrAttrDatN(20, "def", attr1)
Graph.AddStrAttrDatN(400, "ghi", attr1)
# this does not show since ""=null
#Graph.AddStrAttrDatN(455, TStr(""), attr1)
# TODO Graph.AddStrAttrDatN(455, "", attr1)
NodeId = 0
NI = Graph.BegNAStrI(attr1)
NodeId = 0
while NI < Graph.EndNAStrI(attr1):
if NI.GetDat() != snap.TStr.GetNullStr():
print("Attribute3: %s, Node: %i, Val: %s" % (attr1, NodeId, NI.GetDat()))
#print "Attribute: %s, Node: %i, Val: %s" % (attr1(), NodeId, NI.GetDat())
NodeId += 1
NI.Next()
# Test column iterator over many types (must skip default/deleted attr)
NId = 55
#Graph.AddStrAttrDatN(NId, TStr("aaa"), attr1)
Graph.AddStrAttrDatN(NId, "aaa", attr1)
Graph.AddIntAttrDatN(NId, 3*2, attr2)
Graph.AddFltAttrDatN(NId, 3.41, attr3)
#Graph.AddStrAttrDatN(80, TStr("dont appear"), attr4) # should not show up
Graph.AddStrAttrDatN(80, "dont appear", attr4) # should not show up
attr1idx = Graph.GetAttrIndN(attr1)
attr2idx = Graph.GetAttrIndN(attr2)
attr3idx = Graph.GetAttrIndN(attr3)
attr4idx = Graph.GetAttrIndN(attr4)
print("Node attribute indexes: %s %d, %s %d, %s %d, %s %d" % (
attr1, attr1idx, attr2, attr2idx, attr3, attr3idx, attr4, attr4idx))
print("NId attributes: %i, %s %d %.2f" % (
NId,
Graph.GetStrAttrDatN(NId, attr1),
Graph.GetIntAttrDatN(NId, attr2),
Graph.GetFltAttrDatN(NId, attr3)))
print("ind attributes: %i, %s %d %.2f" % (
NId,
Graph.GetStrAttrIndDatN(NId, attr1idx),
Graph.GetIntAttrIndDatN(NId, attr2idx),
Graph.GetFltAttrIndDatN(NId, attr3idx)))
NIdAttrName = snap.TStrV()
NIdAttrValue = snap.TStrV()
NIdIntAttrValue = snap.TIntV()
NIdFltAttrValue = snap.TFltV()
NIdStrAttrValue = snap.TStrV()
Graph.AttrNameNI(NId, NIdAttrName)
AttrLen = NIdAttrName.Len()
for i in range(AttrLen):
print("Vertical Node1: %i, Attr: %s" % (NId, NIdAttrName.GetI(i)()))
Graph.IntAttrNameNI(NId, NIdAttrName)
AttrLen = NIdAttrName.Len()
for i in range(AttrLen):
print("Vertical Node11 (int): %i, Attr: %s" % (NId, NIdAttrName.GetI(i)()))
Graph.FltAttrNameNI(NId, NIdAttrName)
AttrLen = NIdAttrName.Len()
for i in range(AttrLen):
print("Vertical Node12 (flt): %i, Attr: %s" % (NId, NIdAttrName.GetI(i)()))
Graph.StrAttrNameNI(NId, NIdAttrName)
AttrLen = NIdAttrName.Len()
for i in range(AttrLen):
print("Vertical Node13 (str): %i, Attr: %s" % (NId, NIdAttrName.GetI(i)()))
Graph.IntAttrValueNI(NId, NIdIntAttrValue)
AttrLen = NIdIntAttrValue.Len()
for i in range(AttrLen):
print("Vertical Node14 (int): %i, Attr_Val: %d" % (NId, NIdIntAttrValue.GetI(i)()))
Graph.FltAttrValueNI(NId, NIdFltAttrValue)
AttrLen = NIdFltAttrValue.Len()
for i in range(AttrLen):
print("Vertical Node15 (flt): %i, Attr_Val: %.2f" % (NId, NIdFltAttrValue.GetI(i)()))
Graph.StrAttrValueNI(NId, NIdStrAttrValue)
AttrLen = NIdStrAttrValue.Len()
for i in range(AttrLen):
print("Vertical Node16 (str): %i, Attr_Val: %s" % (NId, NIdStrAttrValue.GetI(i)()))
print("DeletedN: %i, Attr: %s, %s" % (NId, attr1, Graph.IsAttrDeletedN(NId, attr1)))
print("DeletedN: %i, Attr: %s, %s" % (NId, attr2, Graph.IsAttrDeletedN(NId, attr2)))
print("DeletedN: %i, Attr: %s, %s" % (NId, attr3, Graph.IsAttrDeletedN(NId, attr3)))
print("DeletedN: %i, Attr: %s, %s" % (NId, attr4, Graph.IsAttrDeletedN(NId, attr4)))
print("DeletedN (str): %i, Attr: %s, %s" % (NId, attr1, Graph.IsStrAttrDeletedN(NId, attr1)))
print("DeletedN (int): %i, Attr: %s, %s" % (NId, attr2, Graph.IsIntAttrDeletedN(NId, attr2)))
print("DeletedN (flt): %i, Attr: %s, %s" % (NId, attr3, Graph.IsFltAttrDeletedN(NId, attr3)))
print("DeletedN (str): %i, Attr: %s, %s" % (NId, attr4, Graph.IsStrAttrDeletedN(NId, attr4)))
print("DeletedN (int): %i, Attr: %s, %s" % (NId, attr4, Graph.IsIntAttrDeletedN(NId, attr4)))
print("DeletedN (flt): %i, Attr: %s, %s" % (NId, attr4, Graph.IsFltAttrDeletedN(NId, attr4)))
Graph.DelAttrDatN(NId, attr2)
print("DeletedN: %i, Attr: %s, %s" % (NId, attr2, Graph.IsAttrDeletedN(NId, attr2)))
print("DeletedN (int): %i, Attr: %s, %s" % (NId, attr2, Graph.IsIntAttrDeletedN(NId, attr2)))
Graph.AttrNameNI(NId, NIdAttrName)
AttrLen = NIdAttrName.Len()
for i in range(AttrLen):
print("Vertical Node2 (no int): %i, Attr: %s" % (NId, NIdAttrName.GetI(i)()))
Graph.AddIntAttrDatN(NId, 3*2, attr2)
Graph.DelAttrN(attr1)
Graph.AttrNameNI(NId, NIdAttrName)
AttrLen = NIdAttrName.Len()
for i in range(AttrLen):
print("Vertical Node3 (no str): %i, Attr: %s" % (NId, NIdAttrName.GetI(i)()))
Graph.AttrValueNI(NId, NIdAttrValue)
AttrLen = NIdAttrValue.Len()
for i in range(AttrLen):
print("Vertical Node4 (no str): %i, Attr_Val: %s" % (NId, NIdAttrValue.GetI(i)()))
for i in range(NNodes):
Graph.AddIntAttrDatN(i, 70, attr2)
total = 0
NI = Graph.BegNAIntI(attr2)
while NI < Graph.EndNAIntI(attr2):
total += NI.GetDat()
NI.Next()
print("Average: %i (should be 70)" % (total/NNodes))
if total/NNodes != 70:
print("*** Error1")
# Test column iterator for edge
Graph.AddIntAttrDatE(3, 3*2, attr2)
Graph.AddIntAttrDatE(55, 55*2, attr2)
Graph.AddIntAttrDatE(705, 705*2, attr2)
Graph.AddIntAttrDatE(905, 905*2, attr2)
EdgeId = 0
EI = Graph.BegEAIntI(attr2)
while EI < Graph.EndEAIntI(attr2):
if EI.GetDat() != snap.TInt.Mn:
print("E Attribute1: %s, Edge: %i, Val: %i" % (
attr2, EdgeId, EI.GetDat()))
#% (attr2(), EdgeId, EI.GetDat())
EdgeId += 1
EI.Next()
# Test column flt iterator for edge
Graph.AddFltAttrE(attr3, 0.00)
Graph.AddFltAttrDatE(5, 4.41, attr3)
Graph.AddFltAttrDatE(50, 3.718, attr3)
Graph.AddFltAttrDatE(300, 151.0, attr3)
Graph.AddFltAttrDatE(653, 654, attr3)
EdgeId = 0
EI = Graph.BegEAFltI(attr3)
while EI < Graph.EndEAFltI(attr3):
# Check if defaults are set to 0.
if EI.GetDat() != 0:
print("E Attribute2: %s, Edge: %i, Val: %f" % (
attr3, EdgeId, EI.GetDat()))
#(attr3(), EdgeId, EI.GetDat())
EdgeId += 1
EI.Next()
# Test column str iterator for edge
#Graph.AddStrAttrDatE(10, TStr("abc"), attr1)
#Graph.AddStrAttrDatE(20, TStr("def"), attr1)
#Graph.AddStrAttrDatE(400, TStr("ghi"), attr1)
Graph.AddStrAttrDatE(10, "abc", attr1)
Graph.AddStrAttrDatE(20, "def", attr1)
Graph.AddStrAttrDatE(400, "ghi", attr1)
# this does not show since ""=null
#Graph.AddStrAttrDatE(455, TStr(""), attr1)
# TODO Graph.AddStrAttrDatE(455, "", attr1)
EdgeId = 0
EI = Graph.BegEAStrI(attr1)
while EI < Graph.EndEAStrI(attr1):
if EI.GetDat() != snap.TStr.GetNullStr():
print("E Attribute3: %s, Edge: %i, Val: %s" % (
attr1, EdgeId, EI.GetDat()))
#(attr1(), EdgeId, EI.GetDat())
| |
help='key: id of group')
c.argument('planner_plan_id', type=str, help='key: id of plannerPlan')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('planner group-planner-plan list-task') as c:
c.argument('group_id', type=str, help='key: id of group')
c.argument('planner_plan_id', type=str, help='key: id of plannerPlan')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('planner group-planner-plan show-bucket') as c:
c.argument('group_id', type=str, help='key: id of group')
c.argument('planner_plan_id', type=str, help='key: id of plannerPlan')
c.argument('planner_bucket_id', type=str, help='key: id of plannerBucket')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('planner group-planner-plan show-detail') as c:
c.argument('group_id', type=str, help='key: id of group')
c.argument('planner_plan_id', type=str, help='key: id of plannerPlan')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('planner group-planner-plan show-task') as c:
c.argument('group_id', type=str, help='key: id of group')
c.argument('planner_plan_id', type=str, help='key: id of plannerPlan')
c.argument('planner_task_id', type=str, help='key: id of plannerTask')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('planner group-planner-plan update-bucket') as c:
c.argument('group_id', type=str, help='key: id of group')
c.argument('planner_plan_id', type=str, help='key: id of plannerPlan')
c.argument('planner_bucket_id', type=str, help='key: id of plannerBucket')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('name', type=str, help='Name of the bucket.')
c.argument('order_hint', type=str, help='Hint used to order items of this type in a list view. The format is '
'defined as outlined here.')
c.argument('plan_id', type=str, help='Plan ID to which the bucket belongs.')
c.argument('tasks', type=validate_file_or_dict, help='Read-only. Nullable. The collection of tasks in the '
'bucket. Expected value: json-string/@json-file.')
with self.argument_context('planner group-planner-plan update-detail') as c:
c.argument('group_id', type=str, help='key: id of group')
c.argument('planner_plan_id', type=str, help='key: id of plannerPlan')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('category_descriptions', action=AddCategoryDescriptions, nargs='+',
help='plannerCategoryDescriptions')
c.argument('shared_with', type=validate_file_or_dict, help='plannerUserIds Expected value: '
'json-string/@json-file.')
with self.argument_context('planner group-planner-plan update-task') as c:
c.argument('group_id', type=str, help='key: id of group')
c.argument('planner_plan_id', type=str, help='key: id of plannerPlan')
c.argument('planner_task_id', type=str, help='key: id of plannerTask')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('active_checklist_item_count', type=int, help='Number of checklist items with value set to false, '
'representing incomplete items.')
c.argument('applied_categories', type=validate_file_or_dict, help='plannerAppliedCategories Expected value: '
'json-string/@json-file.')
c.argument('assignee_priority', type=str, help='Hint used to order items of this type in a list view. The '
'format is defined as outlined here.')
c.argument('assignments', type=validate_file_or_dict, help='plannerAssignments Expected value: '
'json-string/@json-file.')
c.argument('bucket_id', type=str, help='Bucket ID to which the task belongs. The bucket needs to be in the '
'plan that the task is in. It is 28 characters long and case-sensitive. Format validation is done '
'on the service.')
c.argument('checklist_item_count', type=int, help='Number of checklist items that are present on the task.')
c.argument('completed_date_time', help='Read-only. Date and time at which the \'percentComplete\' of the task '
'is set to \'100\'. The Timestamp type represents date and time information using ISO 8601 format '
'and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('conversation_thread_id', type=str, help='Thread ID of the conversation on the task. This is the ID '
'of the conversation thread object created in the group.')
c.argument('created_date_time', help='Read-only. Date and time at which the task is created. The Timestamp '
'type represents date and time information using ISO 8601 format and is always in UTC time. For '
'example, midnight UTC on Jan 1, 2014 would look like this: \'2014-01-01T00:00:00Z\'')
c.argument('due_date_time', help='Date and time at which the task is due. The Timestamp type represents date '
'and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on '
'Jan 1, 2014 would look like this: \'2014-01-01T00:00:00Z\'')
c.argument('has_description', arg_type=get_three_state_flag(), help='Read-only. Value is true if the details '
'object of the task has a non-empty description and false otherwise.')
c.argument('order_hint', type=str, help='Hint used to order items of this type in a list view. The format is '
'defined as outlined here.')
c.argument('percent_complete', type=int, help='Percentage of task completion. When set to 100, the task is '
'considered completed.')
c.argument('plan_id', type=str, help='Plan ID to which the task belongs.')
c.argument('preview_type', arg_type=get_enum_type(['automatic', 'noPreview', 'checklist', 'description',
'reference']), help='')
c.argument('reference_count', type=int, help='Number of external references that exist on the task.')
c.argument('start_date_time', help='Date and time at which the task starts. The Timestamp type represents date '
'and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on '
'Jan 1, 2014 would look like this: \'2014-01-01T00:00:00Z\'')
c.argument('title', type=str, help='Title of the task.')
c.argument('bucket_task_board_format', action=AddBucketTaskBoardFormat, nargs='+',
help='plannerBucketTaskBoardTaskFormat')
c.argument('progress_task_board_format', action=AddProgressTaskBoardFormat, nargs='+',
help='plannerProgressTaskBoardTaskFormat')
c.argument('microsoft_graph_entity_id', type=str, help='Read-only.', arg_group='Details')
c.argument('checklist', type=validate_file_or_dict, help='plannerChecklistItems Expected value: '
'json-string/@json-file.', arg_group='Details')
c.argument('description', type=str, help='Description of the task', arg_group='Details')
c.argument('microsoft_graph_planner_preview_type', arg_type=get_enum_type(['automatic', 'noPreview',
'checklist', 'description',
'reference']), help='',
arg_group='Details')
c.argument('references', type=validate_file_or_dict, help='plannerExternalReferences Expected value: '
'json-string/@json-file.', arg_group='Details')
c.argument('id1', type=str, help='Read-only.', arg_group='Assigned To Task Board Format')
c.argument('order_hints_by_assignee', type=validate_file_or_dict, help='plannerOrderHintsByAssignee Expected '
'value: json-string/@json-file.', arg_group='Assigned To Task Board Format')
c.argument('unassigned_order_hint', type=str, help='Hint value used to order the task on the AssignedTo view '
'of the Task Board when the task is not assigned to anyone, or if the orderHintsByAssignee '
'dictionary does not provide an order hint for the user the task is assigned to. The format is '
'defined as outlined here.', arg_group='Assigned To Task Board Format')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Created By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Created By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Created By')
c.argument('microsoft_graph_identity_application', action=AddApplication, nargs='+', help='identity',
arg_group='Completed By')
c.argument('microsoft_graph_identity_device', action=AddApplication, nargs='+', help='identity',
arg_group='Completed By')
c.argument('microsoft_graph_identity_user', action=AddApplication, nargs='+', help='identity',
arg_group='Completed By')
with self.argument_context('planner group-planner-plan-bucket create-task') as c:
c.argument('group_id', type=str, help='key: id of group')
c.argument('planner_plan_id', type=str, help='key: id of plannerPlan')
c.argument('planner_bucket_id', type=str, help='key: id of plannerBucket')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('active_checklist_item_count', type=int, help='Number of checklist items with value set to false, '
'representing incomplete items.')
c.argument('applied_categories', type=validate_file_or_dict, help='plannerAppliedCategories Expected value: '
'json-string/@json-file.')
c.argument('assignee_priority', type=str, help='Hint used to order items of this type in a list view. The '
'format is defined as outlined here.')
c.argument('assignments', type=validate_file_or_dict, help='plannerAssignments Expected value: '
'json-string/@json-file.')
c.argument('bucket_id', type=str, help='Bucket ID to which the task belongs. The bucket needs to be in the '
'plan that the task is in. It is 28 characters long and case-sensitive. Format validation is done '
'on the service.')
c.argument('checklist_item_count', type=int, help='Number of checklist items that are present on the task.')
c.argument('completed_date_time', help='Read-only. Date and time at which the \'percentComplete\' of the task '
'is set to \'100\'. The Timestamp type represents date and time information using ISO 8601 format '
'and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('conversation_thread_id', type=str, help='Thread ID of the conversation on the task. This is the ID '
'of the conversation thread object created in the group.')
c.argument('created_date_time', help='Read-only. Date and time at which the task is created. The Timestamp '
'type represents date and time information using ISO 8601 format and is always in UTC time. For '
'example, midnight UTC on Jan 1, 2014 would look like this: \'2014-01-01T00:00:00Z\'')
c.argument('due_date_time', help='Date and time at which the task is due. The Timestamp type represents date '
'and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on '
'Jan 1, 2014 would look like this: \'2014-01-01T00:00:00Z\'')
c.argument('has_description', arg_type=get_three_state_flag(), help='Read-only. Value is true if the details '
'object of the task has a non-empty description and false otherwise.')
c.argument('order_hint', type=str, help='Hint used to order items of this type in a list view. The format is '
'defined as outlined here.')
c.argument('percent_complete', type=int, help='Percentage of task completion. When set to 100, the task is '
'considered completed.')
c.argument('plan_id', type=str, help='Plan ID to which the task belongs.')
c.argument('preview_type', arg_type=get_enum_type(['automatic', 'noPreview', 'checklist', 'description',
'reference']), help='')
c.argument('reference_count', type=int, help='Number of external references that exist on the task.')
c.argument('start_date_time', help='Date and time at which the task starts. The Timestamp type represents date '
'and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on '
'Jan 1, 2014 would look like this: | |
"""Sinking point -
A discrete approximation of real numbers with explicit significance tracking.
Implemented really badly in one file.
TODO:
in digital.py:
- representation
- reading in from int / mpfr / string
- reading in from digital, clone and update
- comparison
- trivial bit manipulations like neg and abs
- rounding? round only via w / p
- output? to_mpfr? to_string? to_ieee? to_posit?
in conversion.py:
- to / from mantissa_exp form
- universal is_neg, etc. ?
- extract payload
Where to put OP / RM identifiers?
in xxxmath.py:
- numeric engine: given opcode, inputs as sink, precision,
produce another sink with rounding status
in arithmetic:
- round specifically to ieee / posit
- arith wrappers that can use either backend, and do the correct rounding / special case behavior
"""
import typing
import sys
import random
import re
from .integral import bitmask
from . import conversion
from .ops import RM
def _interval_scan_away(lower, upper, n):
"""Scan for a representative with n=n whose envelope encloses lower and upper.
Returns two things:
True, if the interval is provably too small for the bound, else False
(i.e. we found a representative whose envelope is totally enclosed between lower and upper).
None, if no enclosing representative is found, else the representative.
"""
if lower._inexact or upper._inexact:
raise ValueError('enclose: can only scan exact numbers')
elif lower.negative or upper.negative:
raise ValueError('enclose: can only scan positive numbers')
elif not lower < upper:
raise ValueError('enclose: can only scan ordered envelope, got [{}, {}]'.format(lower, upper))
rep = lower.trunc(n)
if rep.is_exactly_zero():
rep = rep.explode(sided=True, full=False)
else:
rep = rep.explode(sided=False, full=False)
# This loop will only make a small number of iterations.
# We will always hit the bottom of the interval after a short amount of time:
# if we truncated a lot of bits off, then the interval is large and we'll hit the
# exact value in one step. If we didn't truncate bits, the interval might be
# small, but we'll start exactly at lower.
# Because we detect the case where the envelope size is provable smaller than the
# interval, we will abort after a few iterations in cases where the envelope
# is much smaller than the interval.
while True:
bound_lo, bound_hi = rep.bounds()
bottom_enclosed = bound_lo <= lower
top_enclosed = upper <= bound_hi
if bottom_enclosed and top_enclosed:
# representative encloses the interval: return it
return False, rep
elif not (bottom_enclosed or top_enclosed):
# interval encloses the representative: unless we were using the half envelope
# near zero, this is proof that this n is too small
if rep.interval_sided:
# try the next number to see if that gives us a proof
# TODO: sided -> sided will break everything
rep = rep.away(const_p=False)
else:
return True, None
elif bottom_enclosed:
# (top wasn't enclosed, or we'd have hit the first case)
# bottom of interval was good, top wasn't: move on to the next number to see what
# happens
rep = rep.away(const_p=False)
else:
# bottom of interval was no good, so we went too far.
return False, None
def enclose(lower, upper, min_n=None):
"""Return the sink with the smallest interval that encloses lower and upper.
Upper and lower must be exact sinks, with upper <= lower.
TODO: auto bounds?
TODO: other kinds of intervals?
"""
if lower._inexact or upper._inexact:
raise ValueError('enclose: must have exact arguments, got [{} and {}]'.format(lower, upper))
elif lower == upper:
return Sink(lower) if lower.n < upper.n else Sink(upper)
elif not lower < upper:
raise ValueError('enclose: arguments out of order, not {} < {}'.format(lower, upper))
zero = Sink(0)
# because upper != lower, the distance between them must be larger than the interval size
# with this n
min_possible_n = min(lower.n, upper.n) - 1
if min_n is None:
min_n = min_possible_n
else:
min_n = max(min_possible_n, min_n)
if lower < zero and upper > zero:
# binsearch around zero
offset = 1
n_lo = n_hi = min_n
bound_lo, bound_hi = zero.trunc(n_hi).explode(sided=False, full=False).bounds()
# first expsearch for n_hi
while lower < bound_lo or bound_hi < upper:
n_lo = n_hi
n_hi = n_hi + offset
offset <<= 1
bound_lo, bound_hi = zero.trunc(n_hi).explode(sided=False, full=False).bounds()
# final condition: n_hi, bound_lo, bound_hi are all safe
while n_lo + 1 < n_hi:
n_mid = n_lo + ((n_hi - n_lo) // 2)
bound_lo, bound_hi = zero.trunc(n_mid).explode(sided=False, full=False).bounds()
if lower < bound_lo or bound_hi < upper:
# bound is unsafe, update n_lo
n_lo = n_mid
else:
# bound is safe, update n_hi
n_hi = n_mid
# final conditions: n_lo + 1 = n_hi, n_lo doesn't work, n_hi works
# OR, we never entered the loop, and n_lo = n_hi = min_n
return zero.trunc(n_hi).explode(sided=False, full=False)
else:
# First, reorder based on magnitude, as we can only trunc towards zero.
if lower.negative:
tmp = -lower
lower = -upper
upper = tmp
negative = True
else:
negative = False
# Binsearch for the largest interval that doesn't work.
# We know we've found it when we can demonstrate that the span
# of this interval is too small, but the demonstration fails for the next size up.
offset = 1
n_lo = n_hi = min_n
too_small, enclosing_rep = _interval_scan_away(lower, upper, n_hi)
# first expsearch for n_hi
while too_small:
n_lo = n_hi
n_hi = n_hi + offset
offset <<= 1
too_small, enclosing_rep = _interval_scan_away(lower, upper, n_hi)
# final condition: n_hi is not provably too small
while n_lo + 1 < n_hi:
n_mid = n_lo + ((n_hi - n_lo) // 2)
too_small, enclosing_rep = _interval_scan_away(lower, upper, n_mid)
if too_small:
# provably too small, update n_lo
n_lo = n_mid
else:
# not provable: update n_hi
n_hi = n_mid
# final conditions: n_lo + 1 = n_hi, n_lo is provably too small, n_hi has no such proof
# OR, we never entered the loops, and n_lo = n_hi = min_n
# We now perform a linear search, starting from n_lo, until we find the smallest n
# that can produce a representative. This should not take very long, as we are doubling
# the size of the envelope each time we increment n.
# TODO: We could save a few cycles by refusing to actually test n_lo if it is the same as n_hi.
n = n_lo
while True:
too_small, enclosing_rep = _interval_scan_away(lower, upper, n)
if enclosing_rep is None:
n += 1
else:
# remember to correct the sign
return Sink(enclosing_rep, negative=negative)
class PrecisionError(Exception):
"""Insufficient precision given to rounding operation."""
class Sink(object):
# for sinks with a real value, the value is exactly (sign) * _c * 2**_exp
_c : int = None # unsigned significand
_exp : int = None # exponent
# sign is stored separately, as is information about infiniteness or NaN
_negative : bool = None # sign bit
_isinf : bool = None # is this value infinite?
_isnan : bool = None # is this value NaN?
# _m and _exp are not directly visible; we expose them with attributes
@property
def m(self):
"""Signed integer significand.
The real value is m * 2**exp
"""
if self._negative:
return -self._c
else:
return self._c
@property
def exp(self):
"""Exponent."""
return self._exp
# we also present 4 views for the primary 'Titanic' properties
@property
def e(self):
"""IEEE 754 style exponent.
If the significand is interpreted as a binary-point number x between 1 and 2,
i.e. x = 1.10011100 etc. then the real value is x * 2**e.
"""
return (self._exp - 1) + self._c.bit_length()
@property
def n(self):
"""The 'sticky bit' or the binary place where digits are no longer significant.
I.e. -1 for an integer inexact beyond the binary point. Always equal to exp - 1.
"""
return self._exp - 1
@property
def p(self):
"""The precision of the significand.
Always equal to the number of bits in c; 0 for any zero.
"""
return self._c.bit_length()
@property
def c(self):
"""Unsigned integer significand."""
return self._c
# views of basic semantic flags
@property
def negative(self):
"""The sign bit - | |
for v in vals]
else:
vals = list(val)
self.conf[key] = set(vals)
elif key == "pos_classes":
mappings = val.strip().replace("\r","").split('\n')
for mapping in mappings:
if "<-" in mapping:
target, sources = mapping.strip().split("<-")
for source in sources.split("|"):
self.short_pos[source] = target
elif key == "regex_tok":
self.regex_tok = []
items = val.strip().split("\n")
for regex in items:
if "\t" in regex:
f, r = regex.strip().split("\t")
self.regex_tok.append((re.compile(f),r))
else:
sys.stderr.write("WARN: regex entry without tab in conf file\n")
elif key == "allowed":
self.enforce_allowed = True
items = val.strip().split("\n")
for rule in items:
if "<-" in rule:
position, chars = rule.strip().split("<-")
try:
position = int(position)
except Exception as e:
raise ValueError("Can't interpret position instruction in conf file as integer: " + position + "\n")
self.allowed[position] = list(chars)
else:
sys.stderr.write("WARN: allowed segmentation position entry without '<-' in conf file\n")
self.letters = self.conf["base_letters"]
def load(self, model_path=None):
"""
Load a picked model.
:param model_path: Path to the model pickle file. If not specified, looks for model language name +.sm2 (Python 2) or .sm3 (Python 3), e.g. heb.sm3
:return: void
"""
if model_path is None:
# Default model path for a language is the language name, extension ".sm2" for Python 2 or ".sm3" for Python 3
model_path = self.lang + ".sm" + str(sys.version_info[0])
if not os.path.exists(model_path): # Try loading from calling directory
model_path = os.path.dirname(sys.argv[0]) + self.lang + ".sm" + str(sys.version_info[0])
if not os.path.exists(model_path): # Try loading from tokenize_rf.py directory
model_path = os.path.dirname(os.path.realpath(__file__)) + os.sep + self.lang + ".sm" + str(sys.version_info[0])
#sys.stderr.write("Module: " + self.__module__ + "\n")
self.tokenizer, self.num_labels, self.cat_labels, self.multicol_dict, pos_lookup, self.freqs, self.conf_file_parser = joblib.load(model_path)
default_pos_lookup = defaultdict(lambda :"_")
default_pos_lookup.update(pos_lookup)
self.pos_lookup = default_pos_lookup
self.read_conf_file()
self.loaded = True
def train(self, train_file, lexicon_file=None, freq_file=None, test_prop=0.1, output_importances=False, dump_model=False,
cross_val_test=False, output_errors=False, ablations=None, dump_transformed_data=False, do_shuffle=True, conf=None):
"""
:param train_file: File with segmentations to train on in one of the two formats described in make_prev_next()
:param lexicon_file: Tab delimited lexicon file with full forms in first column and POS tag in second column (multiple rows per form possible)
:param freq_file: Tab delimited file with segment forms and their frequencies as integers in two columns
:param conf: configuration file for training (by default: <MODELNAME>.conf)
:param test_prop: (0.0 -- 0.99) Proportion of shuffled data to test on
:param output_importances: Whether to print feature importances (only if test proportion > 0.0)
:param dump_model: Whether to dump trained model to disk via joblib
:param cross_val_test: Whether to perform cross-validation for hyper parameter optimization
:param output_errors: Whether to output prediction errors to a file 'errs.txt'
:param ablations: Comma separated string of feature names to ablate, e.g. "freq_ratio,prev_grp_pos,next_grp_pos"
:param dump_transformed_data: If true, transform data to a pandas dataframe and write to disk, then quit
(useful to train other approaches on the same features, e.g. a DNN classifier)
:param do_shuffle: Whether training data is shuffled after context extraction but before test partition is created
(this has no effect if training on whole training corpus)
:return: None
"""
import timing
self.read_conf_file(file_name=conf)
pos_lookup = read_lex(self.short_pos,lexicon_file)
self.pos_lookup = pos_lookup
conf_file_parser = self.conf_file_parser
letter_config = LetterConfig(self.letters, self.conf["vowels"], self.pos_lookup)
np.random.seed(42)
if lexicon_file is None:
print("i WARN: No lexicon file provided, learning purely from examples")
seg_table = io.open(train_file,encoding="utf8").read()
seg_table = seg_table.replace("\r","").strip()
for c in self.conf["diacritics"]: # TODO: configurable diacritic removal
pass
#seg_table = seg_table.replace(c,"")
seg_table = seg_table.split("\n")
sys.stderr.write("o Encoding Training data\n")
# Validate training data
non_tab_lines = 0
non_tab_row = 0
for r, line in enumerate(seg_table):
if line.count("\t") < 1:
non_tab_lines += 1
non_tab_row = r
if non_tab_lines > 0:
sys.stderr.write("FATAL: found " + str(non_tab_lines) + " rows in training data not containing tab\n")
sys.stderr.write(" Last occurrence at line: " + str(non_tab_row) + "\n")
sys.exit()
# Make into four cols: prev \t next \t current \t segmented (unless already receiving such a table, for shuffled datasets)
if seg_table[0].count("\t") == 1:
seg_table = make_prev_next(seg_table)
# Ensure OOV symbol is in data
seg_table = ["_\t_\t_\t_"] + seg_table
data_y = []
words = []
all_encoded_groups = []
encoding_cache = {}
non_ident_segs = 0
shuffle_mapping = list(range(len(seg_table)))
zipped = list(zip(seg_table, shuffle_mapping))
# Shuffle table to sample across entire dataset if desired
if do_shuffle and False:
random.Random(24).shuffle(zipped)
seg_table, shuffle_mapping = zip(*zipped)
headers = bg2array("_________",prev_group="_",next_group="_",print_headers=True,is_test=1,grp_id=1,config=letter_config)
word_idx = -1
bug_rows = []
freqs = defaultdict(float)
total_segs = 0.0
flines = io.open(freq_file,encoding="utf8").read().replace("\r","").split("\n") if freq_file is not None else []
for l in flines:
if l.count("\t")==1:
w, f = l.split("\t")
freqs[w] += float(f)
total_segs += float(f)
for u in freqs:
freqs[u] = freqs[u]/total_segs
# Don't use freqs if they're empty
if len(freqs) == 0:
sys.stderr.write("o No segment frequencies provided, adding 'freq_ratio' to ablated features\n")
if ablations is None:
ablations = "freq_ratio"
else:
if "freq_ratio" not in ablations:
ablations += ",freq_ratio"
step = int(1/test_prop) if test_prop > 0 else 0
test_indices = list(range(len(seg_table)))[0::step] if step > 0 else []
test_rows = []
for row_idx, row in enumerate(seg_table):
is_test = 1 if row_idx in test_indices else 0
prev_group, next_group, bound_group, segmentation = row.split("\t")
if bound_group != "|":
if len(bound_group) != len(segmentation.replace("|","")): # Ignore segmentations that also normalize
non_ident_segs += 1
bug_rows.append((row_idx,bound_group,segmentation.replace("|","")))
continue
###
if dump_transformed_data:
if is_test:
test_rows.append(bound_group + "\t" + segmentation)
###
word_idx += 1
words.append(bound_group)
group_type = "_".join([x for x in [prev_group, next_group, bound_group] if x != ""])
if group_type in encoding_cache: # No need to encode, an identical featured group has already been seen
encoded_group = encoding_cache[group_type]
for c in encoded_group:
c[headers.index("is_test")] = is_test # Make sure that this group's test index is correctly assigned
else:
encoded_group = bg2array(bound_group,prev_group=prev_group,next_group=next_group,is_test=is_test,grp_id=word_idx,config=letter_config,train=True,freqs=freqs)
encoding_cache[group_type] = encoded_group
all_encoded_groups += encoded_group
data_y += segs2array(segmentation)
sys.stderr.write("o Finished encoding " + str(len(data_y)) + " chars (" + str(len(seg_table)) + " groups, " + str(len(encoding_cache)) + " group types)\n")
if non_ident_segs > 0:
with io.open("bug_rows.txt",'w',encoding="utf8") as f:
f.write(("\n".join([str(r) + ": " + g + "<>" + s for r, g, s in sorted([[shuffle_mapping[x], g, s] for x, g, s in bug_rows])]) + "\n"))
sys.stderr.write("i WARN: found " + str(non_ident_segs) + " rows in training data where left column characters not identical to right column characters\n")
sys.stderr.write(" Row numbers dumped to: bug_rows.txt\n")
sys.stderr.write(" " + str(non_ident_segs) + " rows were ignored in training\n\n")
data_y = np.array(data_y)
# Remove features switched off in .conf file
for label in self.conf["unused"]:
if label in cat_labels:
cat_labels.remove(label)
if label in num_labels:
num_labels.remove(label)
# Handle temporary ablations if specified in option -a
if ablations is not None:
sys.stderr.write("o Applying ablations\n")
if len(ablations) > 0 and ablations != "none":
abl_feats = ablations.split(",")
sys.stderr.write("o Ablating features:\n")
for feat in abl_feats:
found = False
if feat in cat_labels:
cat_labels.remove(feat)
found = True
elif feat in num_labels:
num_labels.remove(feat)
found = True
if found:
sys.stderr.write("\t"+feat+"\n")
else:
sys.stderr.write("\tERR: can't find ablation feature " + feat + "\n")
sys.exit()
sys.stderr.write("o Creating dataframe\n")
data_x = pd.DataFrame(all_encoded_groups, columns=headers)
###
if dump_transformed_data:
data_x["resp"] = data_y
import csv
to_remove = ["is_test","grp_id"] # Columns to remove from transformed data dump
out_cols = [col for col in headers if col not in to_remove] + ["resp"] # Add the response column as 'resp'
data_x.iloc[data_x.index[data_x["is_test"] == 0]].to_csv("rftokenizer_train_featurized.tab",sep="\t",quotechar="",quoting=csv.QUOTE_NONE,encoding="utf8",index=False,columns=out_cols)
data_x.iloc[data_x.index[data_x["is_test"] == 1]].to_csv("rftokenizer_test_featurized.tab",sep="\t",quotechar="",quoting=csv.QUOTE_NONE,encoding="utf8",index=False,columns=out_cols)
# Dump raw test rows to compare gold solution
with io.open("rftokenizer_test_gold.tab","w",encoding="utf8") as gold:
gold.write("\n".join(test_rows) + "\n")
sys.stderr.write("o Wrote featurized train/test set and gold test to rftokenizer_*.tab\n")
sys.exit()
###
data_x_enc, multicol_dict = multicol_fit_transform(data_x, pd.Index(cat_labels))
if test_prop > 0:
sys.stderr.write("o Generating train/test split with test proportion "+str(test_prop)+"\n")
data_x_enc["boundary"] = data_y
strat_train_set = data_x_enc.iloc[data_x_enc.index[data_x_enc["is_test"] == 0]]
strat_test_set = data_x_enc.iloc[data_x_enc.index[data_x_enc["is_test"] == 1]]
sys.stderr.write("o Transforming data to numerical array\n")
train_x = strat_train_set[cat_labels+num_labels].values
train_y = strat_train_set["boundary"]
train_y_bin = np.where(strat_train_set['boundary'] == 0, 0, 1)
if test_prop > 0:
test_x = strat_test_set[cat_labels+num_labels].values
test_y_bin = np.where(strat_test_set['boundary'] == 0, 0, 1)
bound_grp_idx = np.array(strat_test_set['grp_id'])
from sklearn.dummy import DummyClassifier
d = DummyClassifier(strategy="most_frequent")
d.fit(train_x,train_y_bin)
pred = d.predict(test_x)
print("o Majority baseline:")
print("\t" + str(accuracy_score(test_y_bin, pred)))
from xgboost import XGBClassifier
#clf = ExtraTreesClassifier(n_estimators=250, max_features=None, n_jobs=3, random_state=42)
#clf = XGBClassifier(n_estimators=200,n_jobs=3,random_state=42,max_depth=20,subsample=0.6,colsample_bytree=0.9,eta=.05,gamma=.15)
#20 round best:
# clf = XGBClassifier(n_estimators=220,n_jobs=3,random_state=42,max_depth=28,subsample=.8,colsample_bytree=0.6,eta=.05,gamma=.13)
# 100 round best:
clf = XGBClassifier(n_estimators=230,n_jobs=3,random_state=42,max_depth=17,subsample=1.0,colsample_bytree=0.6,eta=.07,gamma=.09)
#{'colsample_bytree': 0.6, 'eta': 0.05, 'gamma': 0.13, 'max_depth': 28, 'n_estimators': 160, 'subsample': 1.0}
#100 rounds:
#{'colsample_bytree': 0.6, 'eta': 0.07, 'gamma': 0.09, 'max_depth': 17, 'n_estimators': 230, 'subsample': 1.0}
if cross_val_test:
# Modify code to tune hyperparameters
from hyperopt import hp
from hyperopt.pyll import scope
space = {
'n_estimators': scope.int(hp.quniform('n_estimators', 100, 250, 10)),
'max_depth': scope.int(hp.quniform('max_depth', 8, 35, 1)),
'eta': scope.float(hp.quniform('eta', 0.01, 0.2, 0.01)),
'gamma': scope.float(hp.quniform('gamma', 0.01, 0.2, 0.01)),
'colsample_bytree': hp.choice('colsample_bytree', [0.6,0.7,0.8,1.0]),
'subsample': hp.choice('subsample', [0.6,0.7,0.8,0.9,1.0]),
'clf': hp.choice('clf', ["xgb"])
}
best_clf, best_params = hyper_optimize(train_x,train_y_bin,val_x=None,val_y=None,space=space,max_evals=100)
print(best_params)
clf = best_clf
print("\nBest parameters:\n" + 30 * "=")
print(best_params)
sys.stderr.write("o Learning...\n")
clf.fit(train_x, train_y_bin)
if test_prop > 0:
pred = clf.predict(test_x)
j=-1
for i, row in strat_test_set.iterrows():
j+=1
if row["idx"] +1 == row["len_bound_group"]:
pred[j] = 0
print("o Binary clf accuracy:")
print("\t" + str(accuracy_score(test_y_bin, pred)))
group_results = defaultdict(lambda : 1)
for i in range(len(pred)):
grp = bound_grp_idx[i]
if test_y_bin[i] != pred[i]:
group_results[grp] = 0
correct = 0
total = 0
for grp in set(bound_grp_idx):
if group_results[grp] == 1:
correct +=1
total +=1
print("o Perfect bound group accuracy:")
print("\t" + str(float(correct)/total))
errs = defaultdict(int)
for i, word in enumerate(words):
if i in group_results:
if group_results[i] == 0:
errs[word] += 1
if output_errors:
print("o Writing prediction errors to errs.txt")
with io.open("errs.txt",'w',encoding="utf8") as f:
for err in errs:
f.write(err + "\t" + str(errs[err])+"\n")
else:
print("o Test proportion is 0%, skipping evaluation")
if output_importances:
feature_names = cat_labels + num_labels
zipped = zip(feature_names, clf.feature_importances_)
sorted_zip = sorted(zipped, key=lambda x: x[1], reverse=True)
print("o Feature importances:\n")
for name, importance in sorted_zip:
print(name, "=", importance)
if dump_model:
plain_dict_pos_lookup = {}
plain_dict_pos_lookup.update(pos_lookup)
joblib.dump((clf, num_labels, cat_labels, multicol_dict, plain_dict_pos_lookup, freqs, conf_file_parser), self.lang + ".sm" + str(sys.version_info[0]), compress=3)
print("o Dumped trained model to " + self.lang + ".sm" + str(sys.version_info[0]))
def rf_tokenize(self, data, sep="|", indices=None, proba=False):
"""
Main tokenizer routine
:param data: ordered list of word forms (prev/next word context is taken from list, so meaningful order is assumed)
:param sep: separator to use for found segments, default: |
:param indices: options; list of integer indices to process. | |
<filename>threeML/minimizer/minimization.py
from __future__ import division
import collections
import math
from builtins import object, range, str, zip
import numpy as np
import pandas as pd
import scipy.optimize
from past.utils import old_div
from threeML.config.config import threeML_config
from threeML.exceptions.custom_exceptions import custom_warnings
from threeML.io.logging import setup_logger
from threeML.utils.differentiation import ParameterOnBoundary, get_hessian
from threeML.utils.progress_bar import tqdm
# Set the warnings to be issued always for this module
custom_warnings.simplefilter("always", RuntimeWarning)
log = setup_logger(__name__)
# Special constants
FIT_FAILED = 1e12
# Define a bunch of custom exceptions relevant for what is being accomplished here
class CannotComputeCovariance(RuntimeWarning):
pass
class CannotComputeErrors(RuntimeWarning):
pass
class ParameterIsNotFree(Exception):
pass
class FitFailed(Exception):
pass
class MinimizerNotAvailable(Exception):
pass
class BetterMinimumDuringProfiling(RuntimeWarning):
pass
# This will contain the available minimizers
_minimizers = {}
def get_minimizer(minimizer_type):
"""
Return the requested minimizer *class* (not instance)
:param minimizer_type: MINUIT, ROOT, PYOPT...
:return: the class (i.e., the type) for the requested minimizer
"""
try:
return _minimizers[minimizer_type.upper()]
except KeyError:
log.error("Minimizer %s is not available on your system" %
minimizer_type)
raise MinimizerNotAvailable()
class FunctionWrapper(object):
def __init__(self, function, all_parameters, fixed_parameters):
"""
:param function:
:param all_parameters:
:param fixed_parameters: list of fixed parameters
"""
self._function = function
self._all_parameters = all_parameters
self._fixed_parameters_values = np.zeros(len(fixed_parameters))
self._fixed_parameters_names = fixed_parameters
self._indexes_of_fixed_par = np.zeros(len(self._all_parameters), bool)
for i, parameter_name in enumerate(self._fixed_parameters_names):
this_index = list(self._all_parameters.keys()
).index(parameter_name)
self._indexes_of_fixed_par[this_index] = True
self._all_values = np.zeros(len(self._all_parameters))
def set_fixed_values(self, new_fixed_values):
# Note that this will receive the fixed values in internal reference (after the transformations, if any)
# A use [:] so there is an implicit check on the right size of new_fixed_values
self._fixed_parameters_values[:] = new_fixed_values
def __call__(self, *trial_values):
# Note that this function will receive the trial values in internal reference (after the transformations,
# if any)
self._all_values[self._indexes_of_fixed_par] = self._fixed_parameters_values
self._all_values[~self._indexes_of_fixed_par] = trial_values
return self._function(*self._all_values)
class ProfileLikelihood(object):
def __init__(self, minimizer_instance, fixed_parameters):
self._fixed_parameters = fixed_parameters
assert (
len(self._fixed_parameters) <= 2
), "Can handle only one or two fixed parameters"
# Get some info from the original minimizer
self._function = minimizer_instance.function
# Note that here we have to use the original parameters (not the internal parameters)
self._all_parameters = minimizer_instance.parameters
# Create a copy of the dictionary of parameters
free_parameters = collections.OrderedDict(self._all_parameters)
# Remove the fixed ones
for parameter_name in fixed_parameters:
free_parameters.pop(parameter_name)
# Now compute how many free parameters we have
self._n_free_parameters = len(free_parameters)
if self._n_free_parameters > 0:
self._wrapper = FunctionWrapper(
self._function, self._all_parameters, self._fixed_parameters
)
# Create a copy of the optimizer with the new parameters (i.e., one or two
# parameters fixed to their current values)
self._optimizer = type(minimizer_instance)(
self._wrapper, free_parameters, verbosity=0
)
if minimizer_instance.algorithm_name is not None:
self._optimizer.set_algorithm(
minimizer_instance.algorithm_name)
else:
# Special case when there are no free parameters after fixing the requested ones
# There is no profiling necessary here
self._wrapper = None
self._optimizer = None
def _transform_steps(self, parameter_name, steps):
"""
If the parameter has a transformation, use it for the steps and return the transformed steps
:return: transformed steps
"""
if self._all_parameters[parameter_name].has_transformation():
new_steps = self._all_parameters[parameter_name].transformation.forward(
steps
)
return new_steps
else:
# Nothing to do
return steps
def step(self, steps1, steps2=None):
if steps2 is not None:
assert (
len(self._fixed_parameters) == 2
), "Cannot step in 2d if you fix only one parameter"
# Find out if the user is giving flipped steps (i.e. param_1 is after param_2 in the
# parameters dictionary)
param_1_name = self._fixed_parameters[0]
param_1_idx = list(self._all_parameters.keys()).index(param_1_name)
param_2_name = self._fixed_parameters[1]
param_2_idx = list(self._all_parameters.keys()).index(param_2_name)
# Fix steps if needed
steps1 = self._transform_steps(param_1_name, steps1)
if steps2 is not None:
steps2 = self._transform_steps(param_2_name, steps2)
if param_1_idx > param_2_idx:
# Switch steps
swap = steps1
steps1 = steps2
steps2 = swap
results = self._step2d(steps1, steps2).T
else:
results = self._step2d(steps1, steps2)
return results
else:
assert (
len(self._fixed_parameters) == 1
), "You cannot step in 1d if you fix 2 parameters"
param_1_name = self._fixed_parameters[0]
# Fix steps if needed.
steps1 = self._transform_steps(param_1_name, steps1)
return self._step1d(steps1)
def __call__(self, values):
self._wrapper.set_fixed_values(values)
_, this_log_like = self._optimizer.minimize(compute_covar=False)
return this_log_like
def _step1d(self, steps1):
log_likes = np.zeros_like(steps1)
for i, step in enumerate(tqdm(steps1, desc="Profiling likelihood")):
if self._n_free_parameters > 0:
# Profile out the free parameters
self._wrapper.set_fixed_values(step)
_, this_log_like = self._optimizer.minimize(
compute_covar=False)
else:
# No free parameters, just compute the likelihood
this_log_like = self._function(step)
log_likes[i] = this_log_like
return log_likes
def _step2d(self, steps1, steps2):
log_likes = np.zeros((len(steps1), len(steps2)))
if threeML_config.interface.progress_bars:
p = tqdm(total=len(steps1) * len(steps2),
desc="Profiling likelihood")
for i, step1 in enumerate(steps1):
for j, step2 in enumerate(steps2):
if self._n_free_parameters > 0:
# Profile out the free parameters
self._wrapper.set_fixed_values([step1, step2])
try:
_, this_log_like = self._optimizer.minimize(
compute_covar=False
)
except FitFailed:
# If the user is stepping too far it might be that the fit fails. It is usually not a
# problem
this_log_like = np.nan
else:
# No free parameters, just compute the likelihood
this_log_like = self._function(step1, step2)
log_likes[i, j] = this_log_like
if threeML_config.interface.progress_bars:
p.update(1)
return log_likes
# This classes are used directly by the user to have better control on the minimizers.
# They are actually factories
class _Minimization(object):
def __init__(self, minimizer_type: str):
self._name = minimizer_type
self._minimizer_type = get_minimizer(minimizer_type=minimizer_type)
self._algorithm = None
self._setup_dict = {}
def setup(self, **setup_dict):
valid_setup_keys = self._minimizer_type.valid_setup_keys
# Check that the setup has been specified well
for key in list(setup_dict.keys()):
assert key in valid_setup_keys, (
"%s is not a valid setup parameter for this minimizer" % key
)
self._setup_dict = setup_dict
@property
def name(self) -> str:
return self._name
def set_algorithm(self, algorithm):
# Note that algorithm might be None
self._algorithm = algorithm
class LocalMinimization(_Minimization):
def __init__(self, minimizer_type):
super(LocalMinimization, self).__init__(minimizer_type)
assert issubclass(self._minimizer_type, LocalMinimizer), (
"Minimizer %s is not a local minimizer" % minimizer_type
)
def get_instance(self, *args, **kwargs):
instance = self._minimizer_type(*args, **kwargs)
if self._algorithm is not None:
instance.set_algorithm(self._algorithm)
# Set up the minimizer
instance._setup(self._setup_dict)
return instance
class GlobalMinimization(_Minimization):
def __init__(self, minimizer_type):
super(GlobalMinimization, self).__init__(minimizer_type)
assert issubclass(self._minimizer_type, GlobalMinimizer), (
"Minimizer %s is not a local minimizer" % minimizer_type
)
self._2nd_minimization = None
def setup(self, **setup_dict):
assert "second_minimization" in setup_dict, (
"You have to provide a secondary minimizer during setup, "
"using the second_minimization keyword"
)
self._2nd_minimization = setup_dict["second_minimization"]
super(GlobalMinimization, self).setup(**setup_dict)
def get_second_minimization_instance(self, *args, **kwargs):
return self._2nd_minimization.get_instance(*args, **kwargs)
def get_instance(self, *args, **kwargs):
instance = self._minimizer_type(*args, **kwargs)
if self._algorithm is not None:
instance.set_algorithm(self._algorithm)
# Set up the minimizer
instance._setup(self._setup_dict)
return instance
class Minimizer(object):
def __init__(self, function, parameters, verbosity=1, setup_dict=None):
"""
:param function: function to be minimized
:param parameters: ordered dictionary of the FREE parameters in the fit. The order must be the same as
in the calling sequence of the function to be minimized.
:param verbosity: control the verbosity of the output
:param type: type of the optimizer (use the enums LOCAL_OPTIMIZER or GLOBAL_OPTIMIZER)
:return:
"""
self._function = function
self._external_parameters = parameters
self._internal_parameters = self._update_internal_parameter_dictionary()
self._Npar = len(list(self.parameters.keys()))
self._verbosity = verbosity
self._setup(setup_dict)
self._fit_results = None
self._covariance_matrix = None
self._correlation_matrix = None
self._algorithm_name = None
self._m_log_like_minimum = None
self._optimizer_type = str(type)
def _update_internal_parameter_dictionary(self):
"""
Returns a dictionary parameter_name -> (current value, delta, minimum, maximum) in the internal frame
(if the parameter has a transformation set).
This should be used by the implementation of the minimizers to get the parameters to optimize.
:return: dictionary
"""
# Prepare the dictionary for the parameters which will be used by iminuit
internal_parameter_dictionary = collections.OrderedDict()
# NOTE: we use the internal_ versions of value, min_value and max_value because they don't have
# units, and they are transformed to make the fit easier (for example in log scale)
# NOTE as well that as in the entire class here, the .parameters dictionary only contains free parameters,
# as only free parameters are passed to the constructor of the minimizer
for k, par in self.parameters.items():
current_name = par.path
current_value = par._get_internal_value()
current_delta = par._get_internal_delta()
current_min = par._get_internal_min_value()
current_max = par._get_internal_max_value()
# Now fix sensible values for parameters deltas
if current_min is None and current_max is None:
# No boundaries, use 2% of value as initial delta
if abs(current_delta) < abs(current_value) * 0.02 or not np.isfinite(
current_delta
):
current_delta = abs(current_value) * 0.02
elif current_min is not None:
if current_max is not None:
# Bounded in both directions. Use 20% of the value
current_delta = abs(current_value) * 0.02
# Make sure we do not violate the boundaries
current_delta = min(
current_delta,
abs(current_value - current_delta) / 10.0,
abs(current_value + current_delta) / | |
= Var(within=Reals,bounds=(0,None),initialize=0)
m.x4437 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4438 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4439 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4440 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4441 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4442 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4443 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4444 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4445 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4446 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4447 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4448 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4449 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4450 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4451 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4452 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4453 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4454 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4455 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4456 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4457 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4458 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4459 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4460 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4461 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4462 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4463 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4464 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4465 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4466 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4467 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4468 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4469 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4470 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4471 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4472 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4473 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4474 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4475 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4476 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4477 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4478 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4479 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4480 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4481 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4482 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4483 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4484 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4485 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4486 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4487 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4488 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4489 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4490 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4491 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4492 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4493 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4494 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4495 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4496 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4497 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4498 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4499 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4500 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4501 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4502 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4503 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4504 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4505 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4506 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4507 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4508 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4509 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4510 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4511 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4512 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4513 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4514 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4515 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4516 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4517 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4518 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4519 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4520 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4521 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4522 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4523 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4524 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4525 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4526 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4527 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4528 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4529 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4530 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4531 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4532 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4533 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4534 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4535 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4536 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4537 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4538 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4539 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4540 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4541 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4542 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4543 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4544 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4545 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4546 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4547 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4548 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4549 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4550 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4551 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4552 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4553 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4554 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4555 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4556 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4557 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4558 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4559 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4560 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4561 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4562 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4563 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4564 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4565 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4566 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4567 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4568 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4569 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4570 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4571 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4572 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4573 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4574 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4575 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4576 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4577 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4578 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4579 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4580 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4581 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4582 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4583 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4584 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4585 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4586 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4587 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4588 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4589 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4590 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4591 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4592 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4593 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4594 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4595 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4596 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4597 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4598 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4599 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4600 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4601 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4602 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4603 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4604 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4605 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4606 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4607 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4608 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4609 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4610 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4611 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4612 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4613 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4614 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4615 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4616 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4617 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4618 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4619 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4620 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4621 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4622 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4623 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4624 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4625 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4626 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4627 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4628 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4629 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4630 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4631 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4632 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4633 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4634 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4635 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4636 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4637 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4638 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4639 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4640 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4641 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4642 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4643 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4644 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4645 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4646 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4647 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4648 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4649 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4650 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4651 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4652 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4653 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4654 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4655 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4656 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4657 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4658 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4659 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4660 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4661 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4662 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4663 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4664 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4665 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4666 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4667 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4668 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4669 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4670 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4671 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4672 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4673 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4674 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4675 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4676 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4677 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4678 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4679 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4680 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4681 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4682 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4683 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4684 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4685 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4686 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4687 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4688 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4689 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4690 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4691 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4692 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4693 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4694 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4695 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4696 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4697 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4698 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4699 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4700 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4701 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4702 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4703 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4704 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4705 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4706 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4707 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4708 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4709 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4710 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4711 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4712 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4713 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4714 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4715 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4716 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4717 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4718 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4719 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4720 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4721 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4722 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4723 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4724 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4725 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4726 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4727 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4728 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4729 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4730 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4731 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4732 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4733 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4734 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4735 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4736 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4737 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4738 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4739 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4740 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4741 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4742 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4743 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4744 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4745 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4746 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4747 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4748 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4749 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4750 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4751 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4752 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4753 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4754 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4755 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4756 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4757 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4758 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4759 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4760 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4761 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4762 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4763 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4764 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4765 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4766 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4767 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4768 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4769 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4770 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4771 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4772 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4773 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4774 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4775 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4776 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4777 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4778 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4779 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4780 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4781 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4782 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4783 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4784 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4785 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4786 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4787 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4788 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4789 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4790 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4791 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4792 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4793 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4794 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4795 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4796 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4797 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4798 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4799 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4800 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4801 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4802 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4803 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4804 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4805 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4806 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4807 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4808 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4809 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4810 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4811 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4812 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4813 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4814 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4815 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4816 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4817 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4818 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4819 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4820 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4821 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4822 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4823 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4824 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4825 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4826 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4827 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4828 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4829 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4830 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4831 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4832 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4833 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4834 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4835 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4836 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4837 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4838 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4839 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4840 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4841 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4842 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4843 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4844 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4845 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4846 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4847 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4848 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4849 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4850 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4851 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4852 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4853 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4854 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4855 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4856 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4857 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4858 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4859 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4860 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4861 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4862 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4863 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4864 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4865 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4866 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4867 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4868 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4869 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4870 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4871 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4872 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4873 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4874 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4875 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4876 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4877 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4878 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4879 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4880 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4881 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4882 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4883 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4884 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4885 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4886 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4887 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4888 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4889 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4890 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4891 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4892 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4893 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4894 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4895 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4896 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4897 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4898 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4899 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4900 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4901 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4902 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4903 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4904 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4905 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4906 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4907 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4908 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4909 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4910 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4911 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4912 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4913 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4914 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4915 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4916 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4917 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4918 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4919 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4920 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4921 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4922 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4923 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4924 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4925 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4926 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4927 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4928 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4929 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4930 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4931 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4932 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4933 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4934 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4935 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4936 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4937 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4938 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4939 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4940 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4941 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4942 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4943 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4944 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4945 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4946 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4947 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4948 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4949 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4950 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4951 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4952 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4953 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4954 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4955 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4956 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4957 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4958 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4959 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4960 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4961 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4962 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4963 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4964 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4965 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4966 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4967 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4968 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4969 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4970 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4971 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4972 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4973 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4974 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4975 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4976 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4977 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4978 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4979 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4980 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4981 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4982 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4983 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4984 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4985 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4986 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4987 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4988 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4989 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4990 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4991 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4992 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4993 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4994 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4995 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4996 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4997 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4998 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4999 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x5000 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x5001 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x5002 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x5003 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x5004 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x5005 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x5006 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x5007 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x5008 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x5009 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x5010 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x5011 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x5012 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x5013 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x5014 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x5015 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x5016 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x5017 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5018 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5019 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5020 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5021 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5022 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5023 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5024 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5025 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5026 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5027 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5028 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5029 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5030 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5031 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5032 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5033 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5034 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5035 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5036 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5037 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5038 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5039 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5040 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5041 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5042 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5043 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5044 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5045 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5046 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5047 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5048 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5049 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5050 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5051 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5052 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5053 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5054 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5055 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5056 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5057 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5058 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5059 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5060 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5061 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5062 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5063 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5064 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5065 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5066 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5067 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5068 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5069 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5070 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5071 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5072 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5073 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5074 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5075 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5076 | |
of numberRangeLooks0, is used, since we need to do next step multilooking after unwrapping. same for numberAzimuthLooks.
(burstValidBox, burstValidBox2, message) = adjustValidWithLooks(frames, box, ionParam.numberAzimuthLooks, ionParam.numberRangeLooks, edge=0, avalid='strict', rvalid='strict')
mergeBurstsVirtual(frames, burstList, box, os.path.join(mergeDirname, mergeFilename+suffix))
if suffix not in ['',None]:
multilook2(os.path.join(mergeDirname, mergeFilename+suffix),
outname = os.path.join(mergeDirname, mergeFilename),
alks = ionParam.numberAzimuthLooks0, rlks=ionParam.numberRangeLooks0)
#this is never used for ionosphere correction
else:
print('Skipping multi-looking ....')
#The orginal coherence calculated by topsApp.py is not good at all, use the following coherence instead
lowerintfile = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.mergedDirname, self._insar.mergedIfgname)
upperintfile = os.path.join(ionParam.ionDirname, ionParam.upperDirname, ionParam.mergedDirname, self._insar.mergedIfgname)
corfile = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.mergedDirname, self._insar.correlationFilename)
img = isceobj.createImage()
img.load(lowerintfile + '.xml')
width = img.width
length = img.length
lowerint = np.fromfile(lowerintfile, dtype=np.complex64).reshape(length, width)
upperint = np.fromfile(upperintfile, dtype=np.complex64).reshape(length, width)
#compute coherence only using interferogram
#here I use differential interferogram of lower and upper band interferograms
#so that coherence is not affected by fringes
cord = cal_coherence(lowerint*np.conjugate(upperint), win=3, edge=4)
cor = np.zeros((length*2, width), dtype=np.float32)
cor[0:length*2:2, :] = np.sqrt( (np.absolute(lowerint)+np.absolute(upperint))/2.0 )
cor[1:length*2:2, :] = cord
cor.astype(np.float32).tofile(corfile)
#create xml and vrt
#img.scheme = 'BIL'
#img.bands = 2
#img.filename = corfile
#img.renderHdr()
#img = isceobj.Image.createUnwImage()
img = isceobj.createOffsetImage()
img.setFilename(corfile)
img.extraFilename = corfile + '.vrt'
img.setWidth(width)
img.setLength(length)
img.renderHdr()
def renameFile(oldname, newname):
img = isceobj.createImage()
img.load(oldname + '.xml')
img.setFilename(newname)
img.extraFilename = newname+'.vrt'
img.renderHdr()
os.rename(oldname, newname)
os.remove(oldname + '.xml')
os.remove(oldname + '.vrt')
def maskUnwrap(unwfile, maskfile):
tmpfile = 'tmp.unw'
renameFile(unwfile, tmpfile)
cmd = "imageMath.py -e='a_0*(abs(b)!=0);a_1*(abs(b)!=0)' --a={0} --b={1} -s BIL -o={2}".format(tmpfile, maskfile, unwfile)
runCmd(cmd)
os.remove(tmpfile)
os.remove(tmpfile+'.xml')
os.remove(tmpfile+'.vrt')
def snaphuUnwrap(self, xmlDirname, wrapName, corrfile, unwrapName, nrlks, nalks, costMode = 'DEFO',initMethod = 'MST', defomax = 4.0, initOnly = False):
#runUnwrap(self, costMode = 'SMOOTH',initMethod = 'MCF', defomax = 2, initOnly = True)
'''
xmlDirname: xml dir name
wrapName: input interferogram
corrfile: input coherence file
unwrapName: output unwrapped interferogram
nrlks: number of range looks of the interferogram
nalks: number of azimuth looks of the interferogram
'''
from contrib.Snaphu.Snaphu import Snaphu
from isceobj.Planet.Planet import Planet
img = isceobj.createImage()
img.load(wrapName + '.xml')
width = img.getWidth()
#get altitude
swathList = self._insar.getValidSwathList(self.swaths)
for swath in swathList[0:1]:
ifg = self._insar.loadProduct( os.path.join(xmlDirname, 'IW{0}.xml'.format(swath)))
wavelength = ifg.bursts[0].radarWavelength
####tmid
tstart = ifg.bursts[0].sensingStart
tend = ifg.bursts[-1].sensingStop
tmid = tstart + 0.5*(tend - tstart)
#14-APR-2018
burst_index = np.int(np.around(len(ifg.bursts)/2))
orbit = ifg.bursts[burst_index].orbit
peg = orbit.interpolateOrbit(tmid, method='hermite')
refElp = Planet(pname='Earth').ellipsoid
llh = refElp.xyz_to_llh(peg.getPosition())
hdg = orbit.getENUHeading(tmid)
refElp.setSCH(llh[0], llh[1], hdg)
earthRadius = refElp.pegRadCur
altitude = llh[2]
rangeLooks = nrlks
azimuthLooks = nalks
azfact = 0.8
rngfact = 0.8
corrLooks = rangeLooks * azimuthLooks/(azfact*rngfact)
maxComponents = 20
snp = Snaphu()
snp.setInitOnly(initOnly)
snp.setInput(wrapName)
snp.setOutput(unwrapName)
snp.setWidth(width)
snp.setCostMode(costMode)
snp.setEarthRadius(earthRadius)
snp.setWavelength(wavelength)
snp.setAltitude(altitude)
snp.setCorrfile(corrfile)
snp.setInitMethod(initMethod)
snp.setCorrLooks(corrLooks)
snp.setMaxComponents(maxComponents)
snp.setDefoMaxCycles(defomax)
snp.setRangeLooks(rangeLooks)
snp.setAzimuthLooks(azimuthLooks)
#snp.setCorFileFormat('FLOAT_DATA')
snp.prepare()
snp.unwrap()
######Render XML
outImage = isceobj.Image.createUnwImage()
outImage.setFilename(unwrapName)
outImage.setWidth(width)
outImage.setAccessMode('read')
outImage.renderVRT()
outImage.createImage()
outImage.finalizeImage()
outImage.renderHdr()
#####Check if connected components was created
if snp.dumpConnectedComponents:
connImage = isceobj.Image.createImage()
connImage.setFilename(unwrapName+'.conncomp')
connImage.setWidth(width)
connImage.setAccessMode('read')
connImage.setDataType('BYTE')
connImage.renderVRT()
connImage.createImage()
connImage.finalizeImage()
connImage.renderHdr()
return
def unwrap(self, ionParam):
'''
unwrap lower and upper band interferograms
'''
print('unwrapping lower and upper band interferograms')
dirs = [ionParam.lowerDirname, ionParam.upperDirname]
#there is only one coherence file in lower directory
corfile = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.mergedDirname, self._insar.correlationFilename)
for dirx in dirs:
procdir = os.path.join(ionParam.ionDirname, dirx, ionParam.mergedDirname)
wrapName = os.path.join(procdir, self._insar.mergedIfgname)
unwrapName = os.path.join(procdir, self._insar.unwrappedIntFilename)
xmlDirname = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.fineIfgDirname)
#unwrap
snaphuUnwrap(self, xmlDirname, wrapName, corfile, unwrapName, ionParam.numberRangeLooks0, ionParam.numberAzimuthLooks0, costMode = 'SMOOTH',initMethod = 'MCF', defomax = 2, initOnly = True)
#remove wired things in no-data area
maskUnwrap(unwrapName, wrapName)
if [ionParam.numberRangeLooks0, ionParam.numberAzimuthLooks0] != [ionParam.numberRangeLooks, ionParam.numberAzimuthLooks]:
multilook_unw(self, ionParam, ionParam.mergedDirname)
def multilook_unw(self, ionParam, mergedDirname):
'''
30-APR-2018
This routine moves the original unwrapped files to a directory and takes looks
'''
from isceobj.TopsProc.runMergeBursts import multilook as multilook2
oridir0 = '{}rlks_{}alks'.format(ionParam.numberRangeLooks0, ionParam.numberAzimuthLooks0)
dirs = [ionParam.lowerDirname, ionParam.upperDirname]
corName = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, ionParam.mergedDirname, oridir0, self._insar.correlationFilename)
for dirx in dirs:
procdir = os.path.join(ionParam.ionDirname, dirx, mergedDirname)
#create a directory for original files
oridir = os.path.join(procdir, oridir0)
os.makedirs(oridir, exist_ok=True)
#move files, renameFile uses os.rename, which overwrites if file already exists in oridir. This can support re-run
filename0 = os.path.join(procdir, self._insar.mergedIfgname)
filename = os.path.join(oridir, self._insar.mergedIfgname)
if os.path.isfile(filename0):
renameFile(filename0, filename)
filename0 = os.path.join(procdir, self._insar.unwrappedIntFilename)
filename = os.path.join(oridir, self._insar.unwrappedIntFilename)
if os.path.isfile(filename0):
renameFile(filename0, filename)
filename0 = os.path.join(procdir, self._insar.unwrappedIntFilename+'.conncomp')
filename = os.path.join(oridir, self._insar.unwrappedIntFilename+'.conncomp')
if os.path.isfile(filename0):
renameFile(filename0, filename)
filename0 = os.path.join(procdir, self._insar.correlationFilename)
filename = os.path.join(oridir, self._insar.correlationFilename)
if os.path.isfile(filename0):
renameFile(filename0, filename)
#for topophase.flat.full, move directly
filename0 = os.path.join(procdir, self._insar.mergedIfgname+'.full.vrt')
filename = os.path.join(oridir, self._insar.mergedIfgname+'.full.vrt')
if os.path.isfile(filename0):
os.rename(filename0, filename)
filename0 = os.path.join(procdir, self._insar.mergedIfgname+'.full.xml')
filename = os.path.join(oridir, self._insar.mergedIfgname+'.full.xml')
if os.path.isfile(filename0):
os.rename(filename0, filename)
#multi-looking
nrlks = np.int(np.around(ionParam.numberRangeLooks / ionParam.numberRangeLooks0))
nalks = np.int(np.around(ionParam.numberAzimuthLooks / ionParam.numberAzimuthLooks0))
#coherence
if dirx == ionParam.lowerDirname:
corName0 = os.path.join(oridir, self._insar.correlationFilename)
corimg = isceobj.createImage()
corimg.load(corName0 + '.xml')
width = corimg.width
length = corimg.length
widthNew = np.int(width / nrlks)
lengthNew = np.int(length / nalks)
cor0 = (np.fromfile(corName0, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :]
amp0 = (np.fromfile(corName0, dtype=np.float32).reshape(length*2, width))[0:length*2:2, :]
wgt = cor0**2
a = multilook(wgt, nalks, nrlks)
b = multilook(cor0, nalks, nrlks)
c = multilook(amp0**2, nalks, nrlks)
d = multilook((cor0!=0).astype(np.int), nalks, nrlks)
#coherence after multiple looking
cor = np.zeros((lengthNew*2, widthNew), dtype=np.float32)
cor[0:lengthNew*2:2, :] = np.sqrt(c / (d + (d==0)))
cor[1:lengthNew*2:2, :] = b / (d + (d==0))
#output file
corName = os.path.join(procdir, self._insar.correlationFilename)
cor.astype(np.float32).tofile(corName)
corimg.setFilename(corName)
corimg.extraFilename = corName + '.vrt'
corimg.setWidth(widthNew)
corimg.setLength(lengthNew)
corimg.renderHdr()
#unwrapped file
unwrapName0 = os.path.join(oridir, self._insar.unwrappedIntFilename)
unwimg = isceobj.createImage()
unwimg.load(unwrapName0 + '.xml')
unw0 = (np.fromfile(unwrapName0, dtype=np.float32).reshape(length*2, width))[1:length*2:2, :]
amp0 = (np.fromfile(unwrapName0, dtype=np.float32).reshape(length*2, width))[0:length*2:2, :]
e = multilook(unw0*wgt, nalks, nrlks)
f = multilook(amp0**2, nalks, nrlks)
unw = np.zeros((lengthNew*2, widthNew), dtype=np.float32)
unw[0:lengthNew*2:2, :] = np.sqrt(f / (d + (d==0)))
unw[1:lengthNew*2:2, :] = e / (a + (a==0))
#output file
unwrapName = os.path.join(procdir, self._insar.unwrappedIntFilename)
unw.astype(np.float32).tofile(unwrapName)
unwimg.setFilename(unwrapName)
unwimg.extraFilename = unwrapName + '.vrt'
unwimg.setWidth(widthNew)
unwimg.setLength(lengthNew)
unwimg.renderHdr()
#looks like the above is not a good coherence, re-calculate here
#here I use differential interferogram of lower and upper band interferograms
#so that coherence is not affected by fringes
lowerIntName0 = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, mergedDirname, oridir0, self._insar.mergedIfgname)
upperIntName0 = os.path.join(ionParam.ionDirname, ionParam.upperDirname, mergedDirname, oridir0, self._insar.mergedIfgname)
lowerIntName = os.path.join(ionParam.ionDirname, ionParam.lowerDirname, mergedDirname, self._insar.mergedIfgname)
upperIntName = os.path.join(ionParam.ionDirname, ionParam.upperDirname, mergedDirname, self._insar.mergedIfgname)
#cmd = 'looks.py -i {} -o {} -r {} -a {}'.format(lowerIntName0, lowerIntName, nrlks, nalks)
#runCmd(cmd)
#cmd = 'looks.py -i {} -o {} -r {} -a {}'.format(upperIntName0, upperIntName, nrlks, nalks)
#runCmd(cmd)
multilook2(lowerIntName0, outname = lowerIntName, alks = nalks, rlks=nrlks)
multilook2(upperIntName0, outname = upperIntName, alks = nalks, rlks=nrlks)
lowerint = np.fromfile(lowerIntName, dtype=np.complex64).reshape(lengthNew, widthNew)
upperint = np.fromfile(upperIntName, dtype=np.complex64).reshape(lengthNew, widthNew)
cor = np.zeros((lengthNew*2, widthNew), dtype=np.float32)
cor[0:length*2:2, :] = np.sqrt( (np.absolute(lowerint)+np.absolute(upperint))/2.0 )
cor[1:length*2:2, :] = cal_coherence(lowerint*np.conjugate(upperint), win=3, edge=4)
cor.astype(np.float32).tofile(corName)
def create_multi_index2(width2, l1, l2):
#for number of looks of l1 and l2
#calculate the correponding index number of l2 in the l1 array
#applies to both range and azimuth direction
return ((l2 - l1) / 2.0 + np.arange(width2) * l2) / l1
def fit_surface(x, y, z, wgt, order):
# x: x coordinate, a column vector
# y: y coordinate, a column vector
# z: z coordinate, a column vector
# wgt: weight of the data points, a column vector
#number of data points
m = x.shape[0]
l = np.ones((m,1), dtype=np.float64)
# #create polynomial
# if order == 1:
# #order of estimated coefficents: 1, x, y
# a1 = np.concatenate((l, x, y), axis=1)
# elif order == 2:
# #order of estimated coefficents: 1, x, y, x*y, x**2, y**2
# a1 = np.concatenate((l, x, y, x*y, x**2, y**2), axis=1)
# elif order == 3:
# #order of estimated coefficents: 1, x, y, x*y, x**2, y**2, x**2*y, y**2*x, x**3, y**3
# a1 = np.concatenate((l, x, y, x*y, x**2, y**2, x**2*y, y**2*x, x**3, y**3), axis=1)
# else:
# raise Exception('order not supported yet\n')
if order < 1:
raise Exception('order must be larger than 1.\n')
#create polynomial
a1 = l;
for i in range(1, order+1):
for j in range(i+1):
a1 = np.concatenate((a1, x**(i-j)*y**(j)), axis=1)
#number of variable to be estimated
n = a1.shape[1]
#do the least squares
a = a1 * np.matlib.repmat(np.sqrt(wgt), 1, n)
b = z * np.sqrt(wgt)
c = np.linalg.lstsq(a, b, rcond=-1)[0]
#type: <class 'numpy.ndarray'>
return c
def cal_surface(x, y, c, order):
#x: x coordinate, a row vector
#y: y coordinate, a column vector
#c: coefficients of polynomial from fit_surface
#order: order of polynomial
if order < 1:
raise Exception('order must be | |
save location
"""
# self._save_to_file(save_path, data={}, params=None)
data = {
"landmark_buffer": self.landmark_buffer,
"refine_with_NN": self.refine_with_NN,
"use_actions": self.use_actions,
"hidden_dim": self.hidden_dim,
"z_dim": self.z_dim,
"batch_size": self.batch_size,
"num_training_steps": self.num_training_steps,
"learning_threshold": self.learning_threshold,
"max_nn_index_size": self.max_nn_index_size,
"steps": self.steps,
"landmarks": self.landmarks,
"d": self.d,
"ac_space": self.ac_space,
"ob_space": self.ob_space,
"goal_space": self.goal_space,
"goal_extraction_function": self.goal_extraction_function,
"buffer_size": self.buffer_size,
"env": self.env
}
index_filename = save_path + '.faiss_idx'
faiss.write_index(self.index, index_filename)
# Model paramaters to be restored
params = self.sess.run(self.params)
self._save_to_file(save_path, data=data, params=params)
class ScoreBasedVAEWithNNRefinement(AbstractLandmarkGenerator):
def __init__(self, buffer_size, env, refine_with_NN=False, use_actions=True, batch_size=128, num_training_steps=100, learning_threshold=100, max_nn_index_size=200000, _init_setup_model=True):
super().__init__(buffer_size, env, _init_setup_model=True)
self.get_scores_with_experiences = True
self.refine_with_NN = refine_with_NN
self.use_actions = use_actions
self.hidden_dim = 400
self.z_dim = 50
self.batch_size = batch_size
self.num_training_steps = num_training_steps
self.learning_threshold = learning_threshold
self.max_nn_index_size = max_nn_index_size
if self.goal_extraction_function is None:
raise ValueError("NonScoreBasedVAEWithNNRefinement requires a goal_extraction function!")
self.steps = 0
## Replay Buffers ##
self.landmarks = np.zeros((0, self.ob_space.shape[-1]))
self.landmark_buffer = ReplayBuffer(self.buffer_size, [("state", self.ob_space.shape),
("action", self.ac_space.shape),
("landmark", self.ob_space.shape),
("desired_goal", self.goal_space.shape),
("scores", (2,)) # score & ratio
])
## NN Index ##
self.d = self.ob_space.shape[-1]
self.index = faiss.IndexFlatL2(self.d)
if _init_setup_model:
self._setup_model()
def _setup_model(self):
## CVAE Graph ##
self.graph = tf.Graph()
with self.graph.as_default():
self.sess = tf_util.make_session(graph=self.graph)
with tf.variable_scope("scorevae"):
sag_len = self.ob_space.shape[-1] + self.goal_space.shape[-1] + 2
if self.use_actions:
sag_len += self.ac_space.shape[-1]
state_action_goal_scores = tf.placeholder(tf.float32, [None, sag_len], name='sag_placeholder')
landmark = tf.placeholder(tf.float32, [None, self.ob_space.shape[-1]], name='lm_placeholder')
# encoder
h = tf.layers.dense(tf.concat([state_action_goal_scores, landmark], axis=1), self.hidden_dim, activation=tf.nn.relu)
h = tf.layers.dense(h, self.hidden_dim, activation=tf.nn.relu)
mu = tf.layers.dense(h, self.z_dim)
log_variance = tf.layers.dense(h, self.z_dim)
z = tf.random_normal(shape=tf.shape(mu)) * tf.sqrt(tf.exp(log_variance)) + mu
# decoder
h = tf.layers.dense(tf.concat([state_action_goal_scores, z], axis=1), self.hidden_dim, activation=tf.nn.relu)
h = tf.layers.dense(h, self.hidden_dim, activation=tf.nn.relu)
generated_landmark = tf.layers.dense(h, self.ob_space.shape[-1])
# Distortion is the negative log likelihood: P(X|z,c)
l2_loss = tf.reduce_sum(tf.squared_difference(landmark, generated_landmark), 1)
tf.summary.scalar("VAE_distortion_l2Loss", tf.reduce_mean(l2_loss))
# The rate is the D_KL(Q(z|X,y)||P(z|c))
latent_loss = -0.5*tf.reduce_sum(1.0 + log_variance - tf.square(mu) - tf.exp(log_variance), 1)
tf.summary.scalar("VAE_rate_LatentLoss", tf.reduce_mean(latent_loss))
loss = tf.reduce_mean(l2_loss + latent_loss)
tf.summary.scalar("VAE_elbo", loss)
opt = tf.train.AdamOptimizer()
gradients = opt.compute_gradients(loss, var_list=tf.trainable_variables())
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_norm(grad, 1.), var)
ts = opt.apply_gradients(gradients)
init = tf.global_variables_initializer()
self.summary = tf.summary.merge_all()
self.params = tf.global_variables("scorevae")
self.sess.run(init)
self.g = {
'sagadd_ph': state_action_goal_scores,
'lm_ph': landmark,
'z': z,
'generated_landmark': generated_landmark,
'loss': loss,
'ts': ts,
'summary': self.summary
}
def add_state_data(self, states, goals):
"""Add state data to buffer (for initial random generation), and also to NN Index"""
self.landmarks = np.concatenate((self.landmarks, states), 0)
self.index.add(states.astype('float32'))
nn_size = self.index.ntotal
if nn_size > self.max_nn_index_size:
#prune half of the nn database
#I verified that this works as intended: when we remove ids from faiss, all ids get bumped up.
self.index.remove_ids(faiss.IDSelectorRange(0, self.max_nn_index_size//2))
self.landmarks = self.landmarks[self.max_nn_index_size//2:]
nn_size = self.index.ntotal
assert(len(self.landmarks) == nn_size)
def add_landmark_experience_data(self, states, actions, landmarks, desired_goals, additional):
self.landmark_buffer.add_batch(states, actions, landmarks, desired_goals, additional)
loss = 0
# run some training steps
for _ in range(self.num_training_steps):
self.steps +=1
s, a, l, g, add = self.landmark_buffer.sample(self.batch_size)
if self.use_actions:
feed_dict = {self.g['sagadd_ph']: np.concatenate((s, a, g, add), axis=1),
self.g['lm_ph']: l}
else:
feed_dict = {self.g['sagadd_ph']: np.concatenate((s, g, add), axis=1),
self.g['lm_ph']: l}
_, loss_, summary = self.sess.run([self.g['ts'], self.g['loss'], self.g['summary']], feed_dict=feed_dict)
loss += loss_
if self.steps % 100 == 0:
print("Landmark CVAE step {}: loss {}".format(self.steps, loss / self.num_training_steps))
return summary
def generate(self, states, actions, goals):
self.states = states
self.actions = actions
self.goals = goals
# Generate randomly at first
if self.steps < self.learning_threshold:
self.landmark_states = landmark_states = np.random.choice(self.landmarks, size=len(states))
landmark_goals = self.goal_extraction_function(landmark_states)
return landmark_states, landmark_goals
# Otherwise, generate using the VAE
sampled_zs = np.random.normal(size=(len(states), self.z_dim))
scores = np.concatenate([np.ones((len(states), 1)), np.zeros((len(states), 1))], 1) # condition on score = 1, ratio = 0.
if self.use_actions:
feed_dict = {self.g['z']: sampled_zs, self.g['sagadd_ph']: np.concatenate([states, actions, goals, scores], axis=1)}
else:
feed_dict = {self.g['z']: sampled_zs, self.g['sagadd_ph']: np.concatenate([states, goals, scores], axis=1)}
landmark_states = self.sess.run(self.g['generated_landmark'], feed_dict=feed_dict)
if self.refine_with_NN:
query = landmark_states.astype('float32')
_, lidxs = self.index.search(query, 1)
landmark_states = self.landmarks[lidxs[:,0]]
landmark_goals = self.goal_extraction_function(landmark_states)
return landmark_states, landmark_goals
def assign_scores(self, scores, ratios):
# Do Nothing (this is not a score-based generator)
pass
def __len__(self):
return self.index.ntotal
def save(self, save_path):
"""
Save the current parameters to file
:param save_path: (str) the save location
"""
# self._save_to_file(save_path, data={}, params=None)
data = {
"get_scores_with_experiences": self.get_scores_with_experiences,
"landmark_buffer": self.landmark_buffer,
"refine_with_NN": self.refine_with_NN,
"use_actions": self.use_actions,
"hidden_dim": self.hidden_dim,
"z_dim": self.z_dim,
"batch_size": self.batch_size,
"num_training_steps": self.num_training_steps,
"learning_threshold": self.learning_threshold,
"max_nn_index_size": self.max_nn_index_size,
"steps": self.steps,
"landmarks": self.landmarks,
"d": self.d,
"ac_space": self.ac_space,
"ob_space": self.ob_space,
"goal_space": self.goal_space,
"goal_extraction_function": self.goal_extraction_function,
"buffer_size": self.buffer_size,
"env": self.env
}
index_filename = save_path + '.faiss_idx'
faiss.write_index(self.index, index_filename)
# Model paramaters to be restored
params = self.sess.run(self.params)
self._save_to_file(save_path, data=data, params=params)
class FetchPushHeuristicGenerator(AbstractLandmarkGenerator):
def __init__(self, buffer_size, env, _init_setup_model=True):
super().__init__(buffer_size, env, _init_setup_model=True)
self.landmarks = np.zeros((0, self.ob_space.shape[-1]))
self.d = self.ob_space.shape[-1]
self.index = faiss.IndexFlatL2(self.d)
self.max_nn_index_size = 200000
def add_state_data(self, states, goals):
"""Add state data to buffer (for initial random generation), and also to NN Index"""
self.landmarks = np.concatenate((self.landmarks, states), 0)
self.index.add(states.astype('float32'))
nn_size = self.index.ntotal
if nn_size > self.max_nn_index_size:
#prune half of the nn database
#I verified that this works as intended: when we remove ids from faiss, all ids get bumped up.
self.index.remove_ids(faiss.IDSelectorRange(0, self.max_nn_index_size//2))
self.landmarks = self.landmarks[self.max_nn_index_size//2:]
nn_size = self.index.ntotal
assert(len(self.landmarks) == nn_size)
def add_landmark_experience_data(self, states, actions, landmarks, desired_goals, additional):
pass
def generate(self, states, actions, goals):
"""6 dim goals"""
goal_pos = goals[:,:3]
obj_pos = states[:,3:6]
gripper_state = states[:, 9:11]
obj_rot = states[:,11:14]
goal_direction = goal_pos - obj_pos
normalized_goal_direction = goal_direction / np.linalg.norm(goal_direction, 1)
landmark_states = np.concatenate([
obj_pos - normalized_goal_direction * 0.15,
obj_pos,
normalized_goal_direction * 0.15,
gripper_state,
obj_rot,
np.zeros((len(states), 11))
], 1)
query = landmark_states.astype('float32')
_, lidxs = self.index.search(query, 1)
landmark_states = self.landmarks[lidxs[:,0]]
landmark_goals = self.goal_extraction_function(landmark_states)
return landmark_states, landmark_goals
def assign_scores(self, scores, ratios):
# Do Nothing (this is not a score-based generator)
pass
def __len__(self):
return 0
class NonScoreBasedImageVAEWithNNRefinement(AbstractLandmarkGenerator):
def __init__(self, buffer_size, env, refine_with_NN=False, use_actions=True, batch_size=128, num_training_steps=100,
learning_threshold=100, max_nn_index_size=200000, _init_setup_model=True):
super().__init__(buffer_size, env, _init_setup_model=True)
self.refine_with_NN = refine_with_NN
self.use_actions = use_actions
self.hidden_dim = 400
self.z_dim = 50
self.batch_size = batch_size
self.num_training_steps = num_training_steps
self.learning_threshold = learning_threshold
self.max_nn_index_size = max_nn_index_size
if self.goal_extraction_function is None:
raise ValueError("NonScoreBasedVAEWithNNRefinement requires a goal_extraction function!")
self.steps = 0
## Replay Buffers ##
self.landmarks = np.zeros((0, np.prod(self.ob_space.shape)))
self.landmark_buffer = ReplayBuffer(self.buffer_size, [("state", self.ob_space.shape),
("action", self.ac_space.shape),
("landmark", self.ob_space.shape),
("desired_goal", self.goal_space.shape)
])
## NN Index ##
self.d = np.prod(self.ob_space.shape)
self.index = faiss.IndexFlatL2(int(self.d))
if _init_setup_model:
self._setup_model()
def _setup_model(self):
## CVAE Graph ##
self.graph = tf.Graph()
with self.graph.as_default():
self.sess = tf_util.make_session(graph=self.graph)
with tf.variable_scope("lervae"):
sag_len = np.prod(self.ob_space.shape) + np.prod(self.goal_space.shape)
if self.use_actions:
sag_len += self.ac_space.n
state_action_goal = tf.placeholder(tf.float32, [None, sag_len], name='sag_placeholder')
landmark = tf.placeholder(tf.float32, [None, np.prod(self.ob_space.shape)], name='lm_placeholder')
# encoder
h = tf.layers.dense(tf.concat([state_action_goal, landmark], axis=1), self.hidden_dim, activation=tf.nn.relu)
h = tf.layers.dense(h, self.hidden_dim, activation=tf.nn.relu)
mu = tf.layers.dense(h, self.z_dim)
log_variance = tf.layers.dense(h, self.z_dim)
z = tf.random_normal(shape=tf.shape(mu)) * tf.sqrt(tf.exp(log_variance)) + mu
# decoder
h = tf.layers.dense(tf.concat([state_action_goal, z], axis=1), self.hidden_dim, activation=tf.nn.relu)
h = tf.layers.dense(h, self.hidden_dim, activation=tf.nn.relu)
generated_landmark = tf.layers.dense(h, np.prod(self.ob_space.shape))
# Distortion is the negative log likelihood: P(X|z,c)
l2_loss = tf.reduce_sum(tf.squared_difference(landmark, generated_landmark), 1)
# The rate is the D_KL(Q(z|X,y)||P(z|c))
latent_loss = -0.5 * tf.reduce_sum(1.0 + log_variance - tf.square(mu) - tf.exp(log_variance), 1)
loss = tf.reduce_mean(l2_loss + latent_loss)
with tf.variable_scope("vae_loss"):
tf.summary.scalar("VAE_distortion_l2Loss", tf.reduce_mean(l2_loss))
tf.summary.scalar("VAE_rate_LatentLoss", tf.reduce_mean(latent_loss))
tf.summary.scalar("VAE_elbo", loss)
opt = tf.train.AdamOptimizer()
gradients = opt.compute_gradients(loss, var_list=tf.trainable_variables())
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_norm(grad, 1.), var)
ts = opt.apply_gradients(gradients)
init = tf.global_variables_initializer()
# TODO: Log the generated outputs
with tf.variable_scope("VAE_training_info", reuse=False):
generated_landmark_orig_shape = tf.reshape(generated_landmark, [-1] + list(self.ob_space.shape))
tf.summary.image("VAE_gen_landmark", generated_landmark_orig_shape, max_outputs=1)
landmark_orig_shape = tf.reshape(landmark, [-1] + list(self.ob_space.shape))
tf.summary.image("VAE_train_landmark", landmark_orig_shape, max_outputs=1)
# Split the SAG placeholder tensor into individual one
if self.use_actions:
state_ph, action_ph, goal_ph = tf.split(state_action_goal, [np.prod(self.ob_space.shape), self.ac_space.n, np.prod(self.goal_space.shape)], 1)
action_shape = tf.reshape(action_ph, [-1, self.ac_space.n, 1, 1])
tf.summary.image("VAE_action_input", action_shape, max_outputs=1)
else:
state_ph, goal_ph = tf.split(state_action_goal, [np.prod(self.ob_space.shape), np.prod(self.goal_space.shape)], 1)
# Reshape
state_orig_shape = tf.reshape(state_ph, [-1] + list(self.ob_space.shape))
goal_orig_shape = tf.reshape(goal_ph, [-1] + list(self.goal_space.shape))
tf.summary.image("VAE_state_input", state_orig_shape, max_outputs=1)
tf.summary.image("VAE_goal_input", goal_orig_shape, max_outputs=1)
self.summary = tf.summary.merge_all()
self.params = tf.global_variables("lervae")
self.sess.run(init)
self.g = {
'sag_ph': state_action_goal,
'lm_ph': landmark,
'z': z,
'generated_landmark': generated_landmark,
'loss': loss,
'ts': ts,
'summary': self.summary
}
def add_state_data(self, states, goals):
"""Add state data to buffer (for initial random generation), and also to NN Index"""
states = np.reshape(states, [states.shape[0], -1]) # Reshape to batch_size x (H * W * C)
self.landmarks = np.concatenate((self.landmarks, | |
<gh_stars>100-1000
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from collections import OrderedDict
import json
import math
import os
import sys
import time
import wandb
import numpy as np
import torch
import torch.cuda.amp as amp
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from torchvision.datasets import ImageFolder
import torchvision.transforms as transforms
import datasets
import models
from tokenizer import SimpleTokenizer
import utils
def get_args_parser():
parser = argparse.ArgumentParser(description='SLIP training and evaluation', add_help=False)
# Data
parser.add_argument('--dataset', default='yfcc15m', type=str, choices=['yfcc15m', 'cc3m', 'cc12m', 'coco', 'redcaps'])
parser.add_argument('--root', default='', type=str,
help='path to dataset root')
parser.add_argument('--metadata', default='yfcc15m.pkl', type=str,
help='path to metadata file (see README for details)')
parser.add_argument('--output-dir', default='./', type=str, help='output dir')
# Model
parser.add_argument('--model', default='SLIP_VITB16', type=str)
parser.add_argument('--ssl-mlp-dim', default=4096, type=int,
help='hidden dim of SimCLR mlp projection head')
parser.add_argument('--ssl-emb-dim', default=256, type=int,
help='output embed dim of SimCLR mlp projection head')
parser.add_argument('--ssl-scale', default=1.0, type=float,
help='loss scale for SimCLR objective')
parser.add_argument('--ssl-temp', default=0.1, type=float,
help='softmax temperature for SimCLR objective')
parser.add_argument('--resume', default='', type=str, help='path to resume from')
# Training
parser.add_argument('--epochs', default=25, type=int)
parser.add_argument('--warmup-epochs', default=1, type=int)
parser.add_argument('--start-epoch', default=0, type=int)
parser.add_argument('--batch-size', default=64, type=int,
help='number of samples per-device/per-gpu')
parser.add_argument('--lr', default=3e-3, type=float)
parser.add_argument('--lr-start', default=1e-6, type=float,
help='initial warmup lr')
parser.add_argument('--lr-end', default=1e-5, type=float,
help='minimum final lr')
parser.add_argument('--update-freq', default=1, type=int,
help='optimizer update frequency (i.e. gradient accumulation steps)')
parser.add_argument('--wd', default=0.1, type=float)
parser.add_argument('--betas', default=(0.9, 0.98), nargs=2, type=float)
parser.add_argument('--eps', default=1e-8, type=float)
parser.add_argument('--eval-freq', default=1, type=int)
parser.add_argument('--disable-amp', action='store_true',
help='disable mixed-precision training (requires more memory and compute)')
# System
parser.add_argument('--print-freq', default=10, type=int, help='print frequency')
parser.add_argument('-j', '--workers', default=10, type=int, metavar='N',
help='number of data loading workers per process')
parser.add_argument('--evaluate', action='store_true', help='eval only')
parser.add_argument('--world-size', default=1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=0, type=int,
help='node rank for distributed training')
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument('--dist-url', default='env://', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str)
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--gpu', default=None, type=int, help='GPU id to use.')
parser.add_argument('--wandb', action='store_true', help='Enable WandB logging')
return parser
best_acc1 = 0
def main(args):
utils.init_distributed_mode(args)
global best_acc1
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
# create model
print("=> creating model: {}".format(args.model))
model = getattr(models, args.model)(ssl_mlp_dim=args.ssl_mlp_dim, ssl_emb_dim=args.ssl_emb_dim)
model.cuda(args.gpu)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], bucket_cap_mb=200)
# define loss function (criterion) and optimizer
criterion = models.get_loss(args.model, args.ssl_temp, args.ssl_scale).cuda(args.gpu)
p_wd, p_non_wd = [], []
for n, p in model.named_parameters():
if not p.requires_grad:
continue # frozen weights
if p.ndim < 2 or 'bias' in n or 'ln' in n or 'bn' in n:
p_non_wd.append(p)
else:
p_wd.append(p)
optim_params = [{"params": p_wd, "weight_decay": args.wd},
{"params": p_non_wd, "weight_decay": 0}]
optimizer = torch.optim.AdamW(optim_params, lr=args.lr, betas=args.betas,
eps=args.eps, weight_decay=args.wd)
scaler = amp.GradScaler(enabled=not args.disable_amp)
# optionally resume from a checkpoint (takes precedence over autoresume)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading resume checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location='cpu')
epoch = checkpoint['epoch'] if 'epoch' in checkpoint else 0
args.start_epoch = epoch
result = model.load_state_dict(checkpoint['state_dict'], strict=False)
print(result)
optimizer.load_state_dict(checkpoint['optimizer']) if 'optimizer' in checkpoint else ()
scaler.load_state_dict(checkpoint['scaler']) if 'scaler' in checkpoint else ()
best_acc1 = checkpoint['best_acc1']
print("=> loaded resume checkpoint '{}' (epoch {})"
.format(args.resume, epoch))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
else:
# auto-resume from latest checkpoint in output directory
latest = os.path.join(args.output_dir, 'checkpoint.pt')
if os.path.isfile(latest):
print("=> loading latest checkpoint '{}'".format(latest))
latest_checkpoint = torch.load(latest, map_location='cpu')
args.start_epoch = latest_checkpoint['epoch']
model.load_state_dict(latest_checkpoint['state_dict'])
optimizer.load_state_dict(latest_checkpoint['optimizer'])
scaler.load_state_dict(latest_checkpoint['scaler'])
best_acc1 = latest_checkpoint['best_acc1']
print("=> loaded latest checkpoint '{}' (epoch {})"
.format(latest, latest_checkpoint['epoch']))
cudnn.benchmark = True
# Data loading code
print("=> creating dataset")
tokenizer = SimpleTokenizer()
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_transform = transforms.Compose([
transforms.RandomResizedCrop(224, scale=(0.5, 1.0)),
transforms.ToTensor(),
normalize
])
val_transform = transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
train_dataset = datasets.get_dataset(train_transform, tokenizer, args)
cwd = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(cwd, 'dataset_catalog.json')) as f:
root = json.load(f)['imagenet']['path']
val_dataset = ImageFolder(os.path.join(root, 'val'), val_transform)
# dist eval resamples data to pad uneven batch sizes
# make sure num_samples = 0 mod num_gpus for exact acc
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)
else:
train_sampler = None
val_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=args.batch_size, shuffle=(val_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=val_sampler, drop_last=False)
if args.evaluate:
if args.model.startswith('SIMCLR'):
print('zero-shot evaluation not supported with ssl-only model.')
return
zero_stats = validate_zeroshot(val_loader, model, tokenizer, args)
if utils.is_main_process():
with open(os.path.join(args.output_dir, 'eval_log.txt'), 'a') as f:
f.write(json.dumps(zero_stats) + '\n')
return
lr_schedule = utils.cosine_scheduler(args.lr, args.lr_end, args.epochs,
len(train_loader) // args.update_freq, warmup_epochs=args.warmup_epochs, start_warmup_value=args.lr_start)
if utils.is_main_process() and args.wandb:
wandb_id = os.path.split(args.output_dir)[-1]
wandb.init(project='slip', id=wandb_id, config=args, resume='allow')
print(args)
print("=> beginning training")
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
# train for one epoch
train_stats = train(train_loader, model, criterion, optimizer, scaler, epoch, lr_schedule, args)
if (epoch + 1) % args.eval_freq != 0:
continue
if args.model.startswith('SIMCLR'):
val_stats = {'acc1': -1}
acc1 = -1
else:
val_stats = validate_zeroshot(val_loader, model, tokenizer, args)
acc1 = val_stats['acc1']
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
print("=> saving checkpoint")
utils.save_on_master({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict(),
'scaler': scaler.state_dict(),
'best_acc1': best_acc1,
'args': args,
}, is_best, args.output_dir)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in val_stats.items()},
'epoch': epoch}
if utils.is_main_process():
if args.wandb:
wandb.log(log_stats)
with open(os.path.join(args.output_dir, 'log.txt'), 'a') as f:
f.write(json.dumps(log_stats) + '\n')
def train(train_loader, model, criterion, optimizer, scaler, epoch, lr_schedule, args):
batch_time = AverageMeter('Time', ':6.2f')
data_time = AverageMeter('Data', ':6.2f')
mem = AverageMeter('Mem (GB)', ':6.1f')
metric_names = models.get_metric_names(args.model)
iters_per_epoch = len(train_loader) // args.update_freq
metrics = OrderedDict([(name, AverageMeter(name, ':.2e')) for name in metric_names])
progress = ProgressMeter(
iters_per_epoch,
[batch_time, data_time, mem, *metrics.values()],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for data_iter, inputs in enumerate(train_loader):
optim_iter = data_iter // args.update_freq
# measure data loading time
data_time.update(time.time() - end)
# update weight decay and learning rate according to their schedule
it = iters_per_epoch * epoch + optim_iter # global training iteration
for k, param_group in enumerate(optimizer.param_groups):
param_group['lr'] = lr_schedule[it]
inputs = [tensor.cuda(args.gpu, non_blocking=True) for tensor in inputs]
# compute output
with amp.autocast(enabled=not args.disable_amp):
outputs = model(*inputs)
loss_dict = criterion(outputs)
loss = loss_dict['loss']
loss /= args.update_freq
if not math.isfinite(loss.item()):
print("Loss is {}, stopping training".format(loss.item()))
sys.exit(1)
scaler.scale(loss).backward()
if (data_iter + 1) % args.update_freq != 0:
continue
# compute gradient and do SGD step
scaler.step(optimizer)
scaler.update()
model.zero_grad(set_to_none=True)
# clamp logit scale to [0, 100]
if args.model.startswith('SIMCLR'):
logit_scale = 0
else:
utils.get_model(model).logit_scale.data.clamp_(0, 4.6052)
logit_scale = utils.get_model(model).logit_scale.exp().item()
for k in loss_dict:
metrics[k].update(loss_dict[k].item(), args.batch_size)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
mem.update(torch.cuda.max_memory_allocated() // 1e9)
if optim_iter % args.print_freq == 0:
if utils.is_main_process() and args.wandb:
wandb.log({**{k: v.item() for k, v in loss_dict.items()},
'scaler': scaler.get_scale(),
'logit': logit_scale})
progress.display(optim_iter)
progress.synchronize()
return {**{k: v.avg for k, v in metrics.items()},
'lr': optimizer.param_groups[0]['lr'],
'logit_scale': logit_scale}
def validate_zeroshot(val_loader, model, tokenizer, args):
batch_time = AverageMeter('Time', ':6.3f')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
print('=> encoding captions')
cwd = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(cwd, 'templates.json')) as f:
templates = json.load(f)['imagenet']
with open(os.path.join(cwd, 'labels.json')) as f:
labels = json.load(f)['imagenet']
with torch.no_grad():
text_features = []
for l in labels:
texts = [t.format(l) for t in templates]
texts = tokenizer(texts).cuda(args.gpu, non_blocking=True)
class_embeddings = utils.get_model(model).encode_text(texts)
class_embeddings = class_embeddings / class_embeddings.norm(dim=-1, keepdim=True)
class_embeddings = class_embeddings.mean(dim=0)
class_embeddings = class_embeddings / class_embeddings.norm(dim=-1, keepdim=True)
text_features.append(class_embeddings)
text_features = torch.stack(text_features, dim=0)
end = time.time()
for i, (images, target) in enumerate(val_loader):
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# encode images
image_features = utils.get_model(model).encode_image(images)
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
# cosine similarity as logits
logits_per_image = image_features @ text_features.t()
# measure accuracy and record loss
acc1, acc5 = accuracy(logits_per_image, target, topk=(1, 5))
acc1, acc5 = utils.scaled_all_reduce([acc1, acc5])
top1.update(acc1.item(), images.size(0))
top5.update(acc5.item(), images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
progress.synchronize()
print('0-shot * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return {'acc1': top1.avg, 'acc5': top5.avg}
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += | |
<reponame>stscirij/crds<gh_stars>0
"""This module provides functions which determine various observatory
specific policies/plugins for Roman:
1. How to convert reference file basenames to fully specified paths.
2. How to manage parameters for reference file Validator objects used
in the certification of reference files.
XXXX Roman NOTE: This code was derived from the JWST locate.py module and
contains substantial duplication. However, because the functions often depend
on project-specific modules, globals, or functions the code is not usable
without some refactoring. Other plugins may vary simply because
ASDF+datamodels Roman is already different than the FITS+datamodels world of
JWST, e.g. there is no longer a need for FITS <-> datamodels translations and
log annotation, i.e. AKA keyword cross-strapping.
"""
import os.path
import re
import warnings
from collections import namedtuple
from asdf.tags.core import NDArrayType
# =======================================================================
from crds.core import rmap, config, utils, timestamp, log, exceptions
from crds.certify import generic_tpn
from crds import data_file
from crds.io import abstract
# =======================================================================
# These two functions decouple the generic reference file certifier program
# from observatory-unique ways of specifying and caching Validator parameters.
from crds.roman import TYPES, INSTRUMENTS, FILEKINDS, EXTENSIONS, INSTRUMENT_FIXERS, TYPE_FIXERS
get_row_keys_by_instrument = TYPES.get_row_keys_by_instrument
get_item = TYPES.get_item
suffix_to_filekind = TYPES.suffix_to_filekind
filekind_to_suffix = TYPES.filekind_to_suffix
get_all_tpninfos = TYPES.get_all_tpninfos
HERE = os.path.dirname(__file__) or "."
# =======================================================================
# XXXX roman TODO needed for scratch JWST repro scheme, itself incomplete
# from crds.jwst.pipeline import header_to_reftypes, header_to_pipelines
# Stub like HST for now
def header_to_reftypes(header, context="roman-operational"):
"""Based on `header` return the default list of appropriate reference type names.
>>> ref_types = header_to_reftypes(None)
>>> print(ref_types)
[]
"""
return [] # translates to "all types" for instrument defined by header.
def header_to_pipelines(header, context="roman-operational"):
"""Based on `header` return the default list of appropriate reference type names.
>>> header_to_pipelines(None)
Traceback (most recent call last):
...
NotImplementedError: Roman has not defined header_to_pipelines().
"""
raise NotImplementedError("Roman has not defined header_to_pipelines().")
# =============================================================================
def tpn_path(tpn_file):
"""Return the full filepath of `tpn_file`.
>>> tpn_path('tpn_file.tpn') # doctest: +ELLIPSIS
'.../crds/roman/tpns/tpn_file.tpn'
"""
return os.path.join(HERE, "tpns", tpn_file)
def get_extra_tpninfos(refpath):
"""Returns TpnInfos (valid value enumerations) derived from the cal code data models schema.
This can be useful to leverage datamodels (core?) schema for rmap checking.
Datamodels schema historically lack per-instrument granularity so the canonical .tpn scheme
can catch errors not caught by the schema, e.g. MIRI values used in a NIRCAM reference
file because a reference file was cloned and not properly updated.
These tend to have a superset of acceptable values for any particular instrument,
particularly since CRDS has historically only loaded the core schema.
>>> get_extra_tpninfos(None)
[]
"""
return [] # use this to ignore datamodels schema for CRDS value checking
# XXXX roman -- datamodels schema scraping can potentially be enabled once
# romancal and datamodels are integrated. This will effectively translate the
# datamodels schema (core unless work is done) automatically as-if they were
# being specified in CRDS .tpn files.
# return schema.get_schema_tpninfos(refpath)
def project_check(refpath, rmap):
pass
# =======================================================================
def match_context_key(key):
"""Set the case of a context key appropriately for this project, Roman
always uses upper case.
>>> match_context_key('<KEY>')
'<KEY>'
"""
return key.upper()
# =======================================================================
@utils.cached
def get_file_properties(filename):
"""Figure out (instrument, filekind, serial) based on `filename` which
should be a mapping or ASDF reference file.
>>> get_file_properties('tests/data/roman_wfi16_f158_flat_small.asdf')
('wfi', 'flat')
>>> get_file_properties('tests/data/roman_wfi_flat_0004.rmap')
('wfi', 'flat')
>>> get_file_properties('tests/data/roman_0001.pmap')
('', '')
>>> get_file_properties('tests/data/ascii_tab.csv') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: string indices must be integers
"""
if config.is_mapping(filename):
try:
return decompose_newstyle_name(filename)[2:4]
except Exception:
# NOTE: load_mapping more conservative than fetch_mapping used in properties_from_mapping
mapping = rmap.load_mapping(filename)
return mapping.instrument, mapping.filekind
elif config.is_reference(filename):
result = get_reference_properties(filename)[2:4]
else:
try:
result = properties_inside_mapping(filename)
except Exception as exc:
result = get_reference_properties(filename)[2:4]
assert result[0] in INSTRUMENTS+[""], "Bad instrument " + \
repr(result[0]) + " in filename " + repr(filename)
assert result[1] in FILEKINDS+[""], "Bad filekind " + \
repr(result[1]) + " in filename " + repr(filename)
return result
def decompose_newstyle_name(filename):
"""
>>> decompose_newstyle_name('./roman.pmap')
('.', 'roman', '', '', '', '.pmap')
>>> decompose_newstyle_name('./roman_0001.pmap')
('.', 'roman', '', '', '0001', '.pmap')
>>> decompose_newstyle_name("./roman_wfi_0001.imap")
('.', 'roman', 'wfi', '', '0001', '.imap')
>>> decompose_newstyle_name("./roman_wfi_flat.rmap")
('.', 'roman', 'wfi', 'flat', '', '.rmap')
>>> decompose_newstyle_name("./roman_wfi_flat.asdf")
('.', 'roman', 'wfi', 'flat', '', '.asdf')
>>> decompose_newstyle_name("./hst_acs.imap")
Traceback (most recent call last):
...
AssertionError: Invalid instrument 'acs'
>>> decompose_newstyle_name("./roman_wfi_lark_0001.rmap")
Traceback (most recent call last):
...
AssertionError: Invalid filekind 'dark'
>>> decompose_newstyle_name("./roman_wfi_flat_abcd.rmap")
Traceback (most recent call last):
...
AssertionError: Invalid id field <built-in function id>
"""
path, parts, ext = _get_fields(filename)
observatory = parts[0]
serial = list_get(parts, 3, "")
if ext == ".pmap":
assert len(parts) in [1,2], "Invalid .pmap filename " + repr(filename)
instrument, filekind = "", ""
serial = list_get(parts, 1, "")
elif ext == ".imap":
assert len(parts) in [2,3], "Invalid .imap filename " + repr(filename)
instrument = parts[1]
filekind = ""
serial = list_get(parts, 2, "")
else:
assert len(parts) in [3,4], "Invalid filename " + repr(filename)
instrument = parts[1]
filekind = parts[2]
serial = list_get(parts, 3, "")
# Don't include filename in these or it messes up crds.certify unique error tracking.
assert instrument in INSTRUMENTS+[""], "Invalid instrument " + repr(instrument)
assert filekind in FILEKINDS+[""], "Invalid filekind " + repr(filekind)
assert re.fullmatch(r"\d*", serial), "Invalid id field " + repr(id)
# extension may vary for upload temporary files.
return path, observatory, instrument, filekind, serial, ext
def properties_inside_mapping(filename):
"""Load `filename`s mapping header to discover and
return (instrument, filekind).
>>> properties_inside_mapping('tests/data/roman_0001.pmap')
('', '')
>>> properties_inside_mapping('tests/data/roman_wfi_flat_0004.rmap')
('wfi', 'flat')
>>> properties_inside_mapping('tests/data/roman_wfi_0001.imap')
('wfi', '')
>>> properties_inside_mapping('tests/data/roman_wfi_flat_0004.rmap')
('wfi', 'flat')
"""
map = rmap.fetch_mapping(filename)
if map.mapping == "pipeline":
result = "", ""
elif map.mapping == "instrument":
result = map.instrument, ""
else:
result = map.instrument, map.filekind
return result
def _get_fields(filename):
"""
>>> _get_fields("")
('', [''], '')
>>> _get_fields("a/b.c")
('a', ['b'], '.c')
>>> _get_fields("_")
('', ['', ''], '')
>>> _get_fields("__")
('', ['', '', ''], '')
>>> _get_fields("a_b_c")
('', ['a', 'b', 'c'], '')
>>> _get_fields("_a_b_c_")
('', ['', 'a', 'b', 'c', ''], '')
"""
path = os.path.dirname(filename)
name = os.path.basename(filename)
name, ext = os.path.splitext(name)
parts = name.split("_")
return path, parts, ext
def list_get(l, index, default):
"""
>>> list_get([], 0, None)
>>> list_get([], -1, 7)
7
>>> list_get(None, 0, None)
Traceback (most recent call last):
...
TypeError: 'NoneType' object is not subscriptable
>>> list_get([1], 1, 9)
9
>>> list_get([1, 2, 3, 4], 2, 8)
3
"""
try:
return l[index]
except IndexError:
return default
def get_reference_properties(filename):
"""Figure out ASDF (instrument, filekind, serial) based on `filename`.
>>> get_reference_properties('tests/data/roman_0001.pmap')
('tests/data', 'roman', '', '', '0001', '.pmap')
>>> get_reference_properties("./roman_wfi_flat.asdf")
('.', 'roman', 'wfi', 'flat', '', '.asdf')
>>> get_reference_properties('tests/data/s7g1700gl_dead_bad_xsum.fits')
Traceback (most recent call last):
...
crds.core.exceptions.CrdsNamingError: Can't identify instrument of 's7g1700gl_dead_bad_xsum.fits' : Invalid instrument 'cos'
"""
try: # Hopefully it's a nice new standard filename, easy
return decompose_newstyle_name(filename)
except AssertionError: # cryptic legacy paths & names, i.e. reality
pass
# If not, dig inside the FITS file, slow
return ref_properties_from_header(filename)
# =======================================================================
def ref_properties_from_header(filename):
"""Look inside ASDF `filename` header to determine instrument, filekind.
>>> ref_properties_from_header('tests/data/roman_wfi16_f158_flat_small.asdf')
('tests/data', 'roman', 'wfi', 'flat', 'roman_wfi16_f158_flat_small', '.asdf')
>>> ref_properties_from_header('tests/data/s7g1700gl_dead_bad_xsum.fits')
Traceback (most recent call last):
...
crds.core.exceptions.CrdsNamingError: Can't identify instrument of 's7g1700gl_dead_bad_xsum.fits' : Invalid instrument 'cos'
"""
# For legacy files, just use the root filename as the unique id
path, parts, ext = _get_fields(filename)
serial = os.path.basename(os.path.splitext(filename)[0])
header = data_file.get_free_header(filename, (), None, "roman")
header["ROMAN.META.TELESCOPE"] = "roman"
name = os.path.basename(filename)
try:
instrument = utils.header_to_instrument(header).lower()
assert instrument in INSTRUMENTS, "Invalid instrument " + repr(instrument)
except Exception as exc:
raise exceptions.CrdsNamingError(
"Can't identify instrument of", repr(name), ":", str(exc)) from exc
try:
filekind = header.get('ROMAN.META.REFTYPE', 'UNDEFINED').lower()
assert filekind in FILEKINDS, "Invalid file type " + repr(filekind)
except Exception as exc:
raise exceptions.CrdsNamingError("Can't identify ROMAN.META.REFTYPE of", repr(name))
return path, "roman", instrument, filekind, serial, ext
# =============================================================================
def reference_keys_to_dataset_keys(rmapping, header):
"""Given a header dictionary for a reference file, map the header back to keys
relevant to datasets. So for ACS biasfile the reference says BINAXIS1 but
the dataset says NUMCOLS. This would convert { "BINAXIS1": 1024 } to {
"NUMCOLS" : 1024 }.
In general, rmap parkeys are matched against datset values and are defined
as dataset header keywords. For refactoring though, what's initially
available are reference file keywords... which need to be mapped into the
terms rmaps know: dataset keywords.
Another aspect of this translation is handling reference file "pattern"
keywords which typically define or-barred sets of values rather than
discrete | |
<filename>OS_1.3.1.py
import time
import os
import matplotlib.pyplot as plt
import numpy as np
import random
spa = (" ") #Define Empty Lines
def los(): #Double Space Effecincy in Code.
print spa
print spa
def bs(): #Tripple Space Effecincy in Code.
print (spa)
print (spa)
print (spa)
time.sleep(1)
os.system('clear')
print("\033[1;31;40m Loading...")
print("\033[0;33;40m")
time.sleep(3) #Just Checks if Script Imports work
os.system('clear')
print "Welcome To..."
time.sleep(2)
print " _ _ ___ ___ "
print " _ | |___ ___ ___ _ __| |_ / _ \/ __|"
print " | || / _ (_-</ -_) '_ \ ' \ | (_) \__ \ "
print " \__/\___/__/\___| .__/_||_| \___/|___/"
print " |_| "
los()
print("\033[0;37;40m (Type help for a list of commands.)")
def command(): #Command Execution function
command_window = raw_input("\033[0;32;40m >: ")
if command_window == 'Decimal_To_Binary': #Base Two Conversion
print spa
x = int(input("Please input Decimal INT [Max - 1024]: "))
one = (x % 2)
two = ((x / 2) % 2)
three = ((x / 4) % 2)
four = ((x / 8) % 2)
five = ((x / 16) % 2)
six = ((x / 32) % 2)
seven = ((x / 64) % 2)
eight = ((x / 128) % 2)
nine = ((x / 256) % 2)
ten = ((x / 512) % 2)
eleven = ((x / 1024) % 2)
los()
print eleven,ten,nine,eight,seven,six,five,four,three,two,one
print spa
simulate()
elif command_window == 'Help': #Creates Help List
los()
los()
print spa, "(([ Base Operations ]))"
print spa
print spa, "* Help - prints a list of commands inside the command line"
print spa, "* Clear - clears the OS"
print spa, "* Restart - Restarts the OS"
print spa
print spa, "(([ Conversions and Math ]))"
print spa
print spa, "* Decimal_To_Binary - Converts Intigers to Binary"
print spa, "* Binary_To_Decimal - Converts Binary to Intigers (Start the number with 0b(#))"
print spa, "* BMI - Calculates where you lay on the Body Mass Index"
print spa, "* Mob Calculator - Calculates mobs"
print spa, "* Test - Add two numbers for the command line maths test"
print spa, "* Quadratic_Equation - Solves the Quadratic Equation"
print spa
print spa, "(([ OSX Base Operations ]))"
print spa
print spa, "* OSX_Say_Hi - Test command that says Hi"
print spa, "* OSX_Terminal - Execute OSX command line functions"
print spa, "* OSX_Network_Ping - Pings local area network"
print spa, "* OSX_Network_Status - Relays network status to the user"
print spa, "* OSX_Get_Subnet_Mask - Relays Subnet Mask to the user"
print spa, "* OSX_Get_Host_IP - Relays the Host's public IP address"
print spa, "* OSX_Get_Personal_IP - Relays the Host's private IP address"
print spa, "* OSX_Get_Basic_Network_Info - Relays network information to the user"
print spa, "* OSX_Launch_Minecraft - Play Minecraft!"
print spa, "* OSX_Launch_Discord - Chat on Discord!"
print spa, "* OSX_Launch_Chess - Play Chess!"
print spa, "* OSX_Launch_Audacity - Record some Music!"
print spa, "* OSX_Launch_SeaMonkey - Surf the web!"
print spa, "* OSX_Launch_HTML_Editor - Make some cool websites for the web!"
print spa, "* OSX_Launch_Arduino_Compiler - Tool Around with some robots!"
print spa, "* OSX_Help - Shows you a list of commands you may Execute inside the OSX Command prompt"
print spa, "* OSX_Launch_1.13.2_Server - Launches a 1.13.2 Minecraft server inside the OSX Command line."
print spa
print spa, "(([ Linux Base Maths and Conversions ]))"
print spa
print spa, "* Music Generator - Creates a chord and note progression for a set scale"
los()
los()
simulate()
elif command_window == 'Clear': #Clears the Command Line
os.system('clear')
simulate()
elif command_window == 'BMI': #BMI Calculator
c = 1
d = 2
ques = int(input("Please type 1 to use the Imperial system. Or type 2 to use the Metric system: "))
def uk():
print(spa)
kg = float(input("Please Enter how much you weigh (Kg's): "))
print(spa)
m = float(input("Please Enter how many Meters tall you are: "))
print(spa)
bmib = (kg / (m ** 2))
print("Your BMI is,")
print (bmib)
print("And...")
if bmib >= 30:
print("\033[1;31;40m You are Obese!!!")
print(spa)
print("\033[0;32;40m")
if bmib <= 18.5:
print("\033[1;31;40m You are Underweight!!!")
print(spa)
print("\033[0;32;40m")
if 18.5 < bmib < 24.9:
print("\033[0;33;40m You have a healthy weight.")
print(spa)
print("\033[0;32;40m")
if 25 < bmib < 30:
print("\033[0;33;40m You are a bit Overweight...")
print(spa)
print("\033[0;32;40m")
t.sleep(2)
mapu = raw_input("Would you like to see a graph of where you stand in the chart? [y / n]: ")
if mapu == ('y'):
objects = ('Your BMI', 'Underweight', 'Normal', 'Overweight', 'Obese', 'UK Avarage')
y_pos = np.arange(len(objects))
performance = [bmib,18.5,24.9,30,37,26.5]
plt.bar(y_pos, performance, align='center', alpha=0.5)
plt.xticks(y_pos, objects)
plt.ylabel('BMI')
plt.title('Body Mass Index')
plt.show()
simulate()
if mapu == ('n'):
los()
simulate()
def us():
print (spa)
lbs = float(input("Please Enter how much you weigh: "))
print (spa)
feet = int(input("Please Enter how many feet tall you are: "))
print (spa)
inc = int(input("Please Enter how many inches tall you are: "))
print (spa)
truehight = ((feet * 12) + inc)
heightmeathod = (truehight ** 2)
bmia = ((lbs / heightmeathod) * 703)
bs()
print("Your BMI is,")
print (bmia)
print("And...")
print (spa)
time.sleep(1)
if bmia >= 30:
print("\033[1;31;40m You are Obese!!!")
print(spa)
print("\033[0;32;40m")
elif bmia <= 18.5:
print("\033[1;31;40m You are Underweight!!!")
print(spa)
print("\033[0;32;40m")
elif 18.5 < bmia < 24.9:
print("\033[0;33;40m You have a healthy weight.")
print(spa)
print("\033[0;32;40m")
elif 25 < bmia < 30:
print("\033[0;33;40m You are a bit Overweight...")
print(spa)
print("\033[0;32;40m")
time.sleep(2)
mapu = raw_input("Would you like to see a graph of where you stand in the chart? [y / n]: ")
if mapu == ('y'):
objects = ('Your BMI', 'Underweight', 'Normal', 'Overweight', 'Obese', 'US Avarage')
y_pos = np.arange(len(objects))
performance = [bmia,18.5,24.9,30,37,28]
plt.bar(y_pos, performance, align='center', alpha=0.5)
plt.xticks(y_pos, objects)
plt.ylabel('BMI')
plt.title('Body Mass Index')
plt.show()
simulate()
if mapu == ('n'):
simulate()
if ques == c:
us()
elif ques == d:
uk()
elif command_window == 'Test': #Mathmatical Test Operation
x = float(input("Enter the first number: "))
y = float(input("Enter the second number: "))
print(x + y)
simulate()
elif command_window == 'Restart': #Restarts OS
os.system('clear')
print("\033[0;33;40m")
print "Welcome To..."
time.sleep(2)
print " _ _ ___ ___ "
print " _ | |___ ___ ___ _ __| |_ / _ \/ __|"
print " | || / _ (_-</ -_) '_ \ ' \ | (_) \__ \ "
print " \__/\___/__/\___| .__/_||_| \___/|___/"
print " |_| "
los()
print("\033[0;37;40m (Type help for a list of commands.)")
simulate()
elif command_window == 'Quadratic_Equation': #Quadratic Equation ;)
a = float(input("Please Input (a) Value: "))
b = float(input("Please Input (b) Value: "))
c = float(input("Please Input (c) Value: "))
mod = ((b * b) - (4 * a * c))
bot = (2 * a)
fir = (-b)
avalue = ((fir + mod) / bot)
bvalue = ((fir - mod) / bot)
print("X ="), avalue, ("and"), bvalue
simulate()
elif command_window == 'OSX_Say_Hi': #Says Hi
print (spa)
os.system('say Hi')
print spa
simulate()
elif command_window == 'OSX_Network_Ping': #Pings Localhost
os.system('ping localhost')
elif command_window == 'OSX_Get_Subnet_Mask':
print spa
os.system('ipconfig getoption en0 subnet_mask')
print spa
simulate()
elif command_window == 'OSX_Get_Host_IP': #Returns your IP address
print spa
os.system('ipconfig getoption en0 domain_name_server')
print (spa)
simulate()
elif command_window == 'OSX_Get_Personal_IP': #ipv6
print spa
os.system('ipconfig getifaddr en0')
print spa
simulate()
elif command_window == 'OSX_Get_Basic_Network_Info': #Returns ipconfig en0
print (spa)
os.system('ifconfig en0')
print spa
simulate()
elif command_window == 'OSX_Launch_Minecraft':
print spa
os.system('open /Users/dirkshumaker/Desktop/Joseph\ OS/Minecraft.app')
print(spa)
simulate()
elif command_window == 'OSX_Launch_SeaMonkey':
print spa
os.system('open /Users/dirkshumaker/Desktop/Joseph\ OS/SeaMonkey')
print spa
simulate()
elif command_window == 'OSX_Launch_Discord':
print spa
os.system('open /Users/dirkshumaker/Desktop/Joseph\ OS/Discord')
print spa
simulate()
elif command_window == 'OSX_Launch_Chess':
print spa
os.system('open /Users/dirkshumaker/Desktop/Joseph\ OS/Chess')
print spa
simulate()
elif command_window == 'OSX_Launch_HTML_Editor':
print spa
os.system('open /Users/dirkshumaker/Desktop/Joseph\ OS/Brackets.app')
print spa
simulate()
elif command_window == 'OSX_Launch_Audacity':
print spa
os.system('open /Users/dirkshumaker/Desktop/Joseph\ OS/Audacity')
print spa
simulate()
elif command_window == 'OSX_Launch_Arduino_Compiler':
print(spa)
os.system('open /Users/dirkshumaker/Desktop/Joseph\ OS/Arduino.app')
print spa
simulate()
elif command_window == 'OSX_Network_Status': #Shows you open and closed ports
print(spa)
os.system('netstat')
print spa
simulate()
elif command_window == 'OSX_Terminal': #Access the OSX terminal as sudo
os.system('clear')
print " ___ _____ __"
print " / _ \/ __\ \/ /"
print " | (_) \__ \> < "
print " \___/|___/_/\_\ "
print spa
print("\033[0;37;40m You have now entered the OSX command line!")
print spa
os.system('sudo -s')
simulate()
elif command_window == 'OSX_Help': #All of the terminal commands
f=open("OSX.txt", "r")
if f.mode == 'r':
contents = f.read()
print(contents)
simulate()
elif command_window == 'Binary_To_Decimal': #0b number
print spa
x = int(input("Enter Binary Number: "))
print x
los()
simulate()
elif command_window == 'OSX_Launch_1.13.2_Server':
print spa
os.system('cd /Users/dirkshumaker/Desktop/Joseph\ OS')
os.system('perl launch.pl')
simulate()
elif command_window == 'Mob Calculator': #Calculate Mobs
walk = float(3)
Bike = float(11.5)
Car = float(30)
Unicycle = float(5)
Hoverboard = float(13)
Running = float(8.3)
Bullet_train = float(200)
F_Bullet_train = float(375)
Plane = float(550)
Skateboard = float(10)
Rocket = float(294)
Land_Speed_Record = float(763.035)
Swim = float(3.7)
x = raw_input("Enter the mode of transportation: ")
if x == ("Walking"):
inn = walk
elif x == ("Biking"):
inn = Bike
elif x == ("Driving"):
inn = Car
elif x == ("Unicycle"):
inn = Unicycle
elif x == ("Hoverboard"):
inn = Hoverboard
elif x == ("Running"):
inn = Running
elif x == ("Bullet Train"):
inn = Bullet_train
elif x == ("Worlds Fastest Bullet Train"):
inn = F_Bullet_train
elif x == ("Flying"):
inn = Plane
elif x == ("Skating"):
inn = Skateboard
elif x == ("Rocket"):
inn = Rocket
elif x == ("Land Speed Record"):
inn = Land_Speed_Record
elif x == ("Swimming"):
inn = Swim
mob = float((inn / 60) * 20)
mobdistance = (mob * 5)
if inn == walk:
print("\033[0;32;40m")
print("(*)")
print(spa)
print("\033[0;33;40m")
print "The avarage person walks at", mob, "miles per mob."
print(spa)
print "If you were to travel 5 mobs... you would travel", mobdistance, "Miles"
print(spa)
print("\033[0;32;40m")
print("(*)")
print(spa)
simulate()
elif inn == Bike:
print("\033[0;32;40m")
print("(*)")
print(spa)
print("\033[0;33;40m")
print "The avarage person bikes at", mob, "miles per mob."
print(spa)
print "If you were to travel 5 mobs... you would travel", mobdistance, "Miles"
print(spa)
print("\033[0;32;40m")
print("(*)")
print(spa)
simulate()
elif inn == Car:
print("\033[0;32;40m")
print("(*)")
print(spa)
print("\033[0;33;40m")
print "The avarage person drives at", mob, "miles per mob."
print(spa)
print "If you were to travel 5 mobs... you would travel", mobdistance, "Miles"
print(spa)
print("\033[0;32;40m")
print("(*)")
print(spa)
simulate()
elif inn == Unicycle:
print("\033[0;32;40m")
print("(*)")
print(spa)
print("\033[0;33;40m")
print "The avarage person unicycles at", mob, "miles per mob."
print(spa)
print "If you were to travel 5 mobs... you would travel", mobdistance, "Miles"
print(spa)
print("\033[0;32;40m")
print("(*)")
print(spa)
simulate()
elif inn == Hoverboard:
print("\033[0;32;40m")
print("(*)")
print(spa)
print("\033[0;33;40m")
print "The avarage person hovers at", mob, "miles per mob."
print(spa)
print "If you were to travel 5 mobs... you would travel", mobdistance, "Miles"
print(spa)
print("\033[0;32;40m")
print("(*)")
print(spa)
simulate()
elif inn == Running:
print("\033[0;32;40m")
print("(*)")
print(spa)
print("\033[0;33;40m")
print "The | |
<reponame>chaitrasj/GAN-based-Visible-Thermal-Person-ReID<filename>utils_.py
import torch
import torchvision.transforms as transforms
import torch.nn as nn
import numpy as np
from torch.autograd import Variable
from collections import OrderedDict
from itertools import cycle
from torch.utils.data import DataLoader
# from networks import Encoder_Dense, Decoder_Dense
from PIL import Image
import os, random, cv2
import pandas as pd
# from losses import TripletLoss
# from model import embed_net
import math
import itertools
import torch.nn.init as init
def getTransform(FLAGS):
# normalize = transforms.Normalize(mean=[0.5],std=[0.5])
transform_train = transforms.Compose([
# transforms.ToPILImage(),
transforms.Resize((FLAGS.image_height, FLAGS.image_width)),
transforms.Pad(10),
transforms.RandomCrop((FLAGS.image_height, FLAGS.image_width)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
# normalize,
])
transform_test = transforms.Compose([
# transforms.ToPILImage(),
transforms.Resize((FLAGS.image_height, FLAGS.image_width)),
transforms.ToTensor(),
# normalize,
])
return transform_train, transform_test
transform_to_gray = transforms.Compose([
transforms.ToPILImage(),
transforms.Grayscale(num_output_channels=3),
transforms.ToTensor(),
])
def weights_init_old(layer):
if isinstance(layer, nn.Conv2d):
layer.weight.data.normal_(0.0, 0.05)
if layer.bias is not None:
layer.bias.data.zero_()
elif isinstance(layer, nn.BatchNorm2d):
layer.weight.data.normal_(1.0, 0.02)
layer.bias.data.zero_()
elif isinstance(layer, nn.Linear):
layer.weight.data.normal_(0.0, 0.05)
layer.bias.data.zero_()
def weights_init(init_type='gaussian'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find('Linear') == 0) and hasattr(m, 'weight'):
# print m.__class__.__name__
if init_type == 'gaussian':
init.normal_(m.weight.data, 0.0, 0.02)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=math.sqrt(2))
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, "Unsupported initialization: {}".format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
return init_fun
#################################### Data loader Helper functions ######################################
def getIds(data_path):
with open(data_path, 'r') as file:
ids = file.read().splitlines()
ids = [int(y) for y in ids[0].split(',')]
id_ = ["%04d" % x for x in ids]
return id_
def getFiles(data_path, ids, cameras, test=2):
files = []
label = []
cam_ = []
a = data_path.split('/')[:-2]
data_path = os.path.join(a[0],a[1],a[2])
for id in sorted(ids):
for cam in cameras:
img_dir = os.path.join(data_path,cam,id)
if os.path.isdir(img_dir):
new_files = sorted([img_dir+'/'+i for i in os.listdir(img_dir)])
##########################
if test!=2:
if test==1:
new_files = [new_files[0]] #Test time of classifier
else:
del new_files[0]
##########################
files.extend(new_files)
cam_.extend([int(cam[-1])]*len(new_files))
label.extend([int(id)]*len(new_files))
return files, label, cam_
def getGallerySet(data_path, shot, mode, ids):
if mode == 'Indoor':
rgb_cameras = ['cam1','cam2']
else:
rgb_cameras = ['cam1','cam2','cam4','cam5']
a = data_path.split('/')[:-2]
data_path = os.path.join(a[0],a[1],a[2])
files = []
label = []
cam_ = []
for id in sorted(ids):
random.shuffle(rgb_cameras)
if shot == 'Single':
for j in range(len(rgb_cameras)):
camera = rgb_cameras[j]
img_dir = os.path.join(data_path,camera,id)
if os.path.isdir(img_dir):
new_files = sorted([img_dir+'/'+i for i in os.listdir(img_dir)])
new = random.choice(new_files)
files.append(new)
cam_.extend([int(camera[-1])])
label.extend([int(id)])
break;
else:
new_files = []
for j in range(len(rgb_cameras)):
camera = rgb_cameras[j]
img_dir = os.path.join(data_path,camera,id)
if os.path.isdir(img_dir):
multi = sorted([img_dir+'/'+i for i in os.listdir(img_dir)])
new_files.extend(multi)
if new_files:
new = random.sample(new_files, 10)
files.extend(new)
l,c = getLabels(new)
cam_.extend(c)
label.extend(l)
return files, label, cam_, ids
def getImageNames(data_path):
rgb_cameras = ['cam1','cam2','cam4','cam5']
ir_cameras = ['cam3','cam6']
id_train = getIds(data_path)
files_rgb, label_rgb, cam_rgb = getFiles(data_path, id_train, rgb_cameras)
files_ir, label_ir, cam_ir = getFiles(data_path, id_train, ir_cameras)
return files_rgb, files_ir, label_rgb, label_ir, cam_rgb, cam_ir, np.array(id_train, dtype=int)
def createAllPermutations(self):
# assert np.unique(self.train_rgb_label) == np.unique(self.train_ir_label), 'Number of Identities in Rgb and Ir data must be samse!'
unique_labels = np.unique(self.train_rgb_label)
rgb_list = []
rgb_list_label = []
ir_list = []
ir_list_label = []
for i in range(len(unique_labels)):
id = unique_labels[i]
# Creating triplets of V, T, T
tmp_pos = [k for k,v in enumerate(self.train_rgb_label) if v==id]
rgb = [self.train_rgb_image[i] for i in tmp_pos]
tmp_pos = [k for k,v in enumerate(self.train_ir_label) if v==id]
ir = [self.train_ir_image[i] for i in tmp_pos]
M = min(len(rgb),len(ir))
rgb_M = random.sample(rgb, M)
ir_M_s = random.sample(ir, M)
tmp_pos = [k for k,v in enumerate(self.train_ir_label) if v!=id]
ir = [self.train_ir_image[i] for i in tmp_pos]
ir_M_d = random.sample(ir, M)
for i in range (M):
dict = {}
dict['1'] = rgb_M[i]
dict['2'] = ir_M_s[i]
dict['3'] = ir_M_d[i]
rgb_list.append(dict)
dict_lab = {}
dict_lab['1'] = id
dict_lab['2'] = id
dict_lab['3'] = int(ir_M_d[i].split('/')[-2])
rgb_list_label.append(dict_lab)
# Creating triplets of T, V, V
tmp_pos = [k for k,v in enumerate(self.train_ir_label) if v==id]
ir = [self.train_ir_image[i] for i in tmp_pos]
ir_M = random.sample(ir, M)
tmp_pos = [k for k,v in enumerate(self.train_rgb_label) if v==id]
rgb = [self.train_rgb_image[i] for i in tmp_pos]
rgb_M_s = random.sample(rgb, M)
tmp_pos = [k for k,v in enumerate(self.train_rgb_label) if v!=id]
rgb = [self.train_rgb_image[i] for i in tmp_pos]
rgb_M_d = random.sample(rgb, M)
for i in range (M):
dict = {}
dict['1'] = ir_M[i]
dict['2'] = rgb_M_s[i]
dict['3'] = rgb_M_d[i]
ir_list.append(dict)
dict_lab = {}
dict_lab['1'] = id
dict_lab['2'] = id
dict_lab['3'] = int(rgb_M_d[i].split('/')[-2])
ir_list_label.append(dict_lab)
self.rgb_list, self.rgb_list_label = rgb_list, rgb_list_label
self.ir_list, self.ir_list_label = ir_list, ir_list_label
return
def createAllPermutations_Single_Modality(self):
# assert np.unique(self.train_rgb_label) == np.unique(self.train_ir_label), 'Number of Identities in Rgb and Ir data must be samse!'
unique_labels = np.unique(self.train_rgb_label)
rgb_list = []
rgb_list_label = []
for i in range(len(unique_labels)):
id = unique_labels[i]
# Creating triplets of V, V, V
tmp_pos = [k for k,v in enumerate(self.train_rgb_label) if v==id]
rgb = [self.train_rgb_image[i] for i in tmp_pos]
M = 20
combi = random.sample(list(itertools.combinations(rgb, 2)), M)
tmp_pos = [k for k,v in enumerate(self.train_rgb_label) if v!=id]
rgb = [self.train_rgb_image[i] for i in tmp_pos]
combi_d = random.sample(rgb, M)
for i in range (M):
dict = {}
dict['1'] = combi[i][0]
dict['2'] = combi[i][1]
dict['3'] = combi_d[i]
rgb_list.append(dict)
dict_lab = {}
dict_lab['1'] = id
dict_lab['2'] = id
dict_lab['3'] = int(combi_d[i].split('/')[-2])
rgb_list_label.append(dict_lab)
self.rgb_list, self.rgb_list_label = rgb_list, rgb_list_label
return
def getRegDB(data_path, color_list, thermal_list):
name = data_path + color_list
basePath = "/".join(data_path.split("/")[:-2]) + "/"
with open(name) as f:
data_color_list = open(name, 'rt').read().splitlines()
# Get full list of color image and labels
rgb_image = [basePath + s.split(' ')[0] for s in data_color_list]
rgb_label = [int(s.split(' ')[1]) for s in data_color_list]
name = data_path + thermal_list
basePath = "/".join(data_path.split("/")[:-2]) + "/"
with open(name) as f:
data_thermal_list = open(name, 'rt').read().splitlines()
# Get full list of thermal image and labels
ir_image = [basePath + s.split(' ')[0] for s in data_thermal_list]
ir_label = [int(s.split(' ')[1]) for s in data_thermal_list]
return rgb_image, rgb_label, ir_image, ir_label
#################################### Testing Helper functions ##########################################
def get_state(path):
# original saved file with DataParallel
state_dict = torch.load(path)
# create new OrderedDict that does not contain `module.`
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] # remove `module.
new_state_dict[name] = v
return new_state_dict
def getLabelIndex(ids):
df = np.sort(np.array(ids))
label_dict_gallery = {label:index for index, label in enumerate(df)}
def getLabels(probe_batch):
label = []
cam = []
for i in range (len(probe_batch)):
label.append(int(probe_batch[i].split('/')[-2]))
cam.append(int(probe_batch[i].split('/')[-3][-1]))
return label, cam
def getImage(minibatch, FLAGS):
X = []
for i in range (len(minibatch)):
img = Image.open(minibatch[i])
img = (img.resize((FLAGS.image_width, FLAGS.image_height), Image.ANTIALIAS))
img = transform_test(img)
X.append(img)
X = torch.Tensor(np.stack(X))
if FLAGS.cuda:
X = X.cuda()
return X
#################################### Evalutaion metric functions #######################################
# def getLoss(checkpoint, valset_sysu_mse, valset_sysu_trip, FLAGS, margin, transform_test):
# loader_mse = cycle(DataLoader(valset_sysu_mse, batch_size=FLAGS.batch_size, shuffle=False, num_workers=0, drop_last=True))
# loader_trip = cycle(DataLoader(valset_sysu_trip, batch_size=FLAGS.batch_size, shuffle=False, num_workers=0, drop_last=True))
# net = embed_net(FLAGS.low_dim_bdtr, FLAGS.num_classes, drop = FLAGS.drop, arch=FLAGS.arch)
# encoder_rgb = Encoder_Dense(FLAGS.embedding_dim, FLAGS.num_classes, FLAGS.feat_dim)
# decoder_rgb = Decoder_Dense(FLAGS.embedding_dim, FLAGS.num_classes, FLAGS.feat_dim)
# encoder_ir = Encoder_Dense(FLAGS.embedding_dim, FLAGS.num_classes, FLAGS.feat_dim)
# decoder_ir = Decoder_Dense(FLAGS.embedding_dim, FLAGS.num_classes, FLAGS.feat_dim)
# net.load_state_dict(checkpoint['net'])
# encoder_ir.load_state_dict(checkpoint['state_dict_encoder_ir'])
# encoder_rgb.load_state_dict(checkpoint['state_dict_encoder_rgb'])
# decoder_ir.load_state_dict(checkpoint['state_dict_decoder_ir'])
# decoder_rgb.load_state_dict(checkpoint['state_dict_decoder_rgb'])
# net.eval()
# encoder_rgb.eval()
# encoder_ir.eval()
# decoder_rgb.eval()
# decoder_ir.eval()
# encoder_rgb.cuda()
# encoder_ir.cuda()
# decoder_rgb.cuda()
# decoder_ir.cuda()
# net.cuda()
# mse_loss = nn.MSELoss()
# loss_mse = 0
# # triplet_loss_fn = TripletLoss(margin)
# # triplet_loss = 0
# criterion = nn.CrossEntropyLoss()
# identity_loss = 0
# with torch.no_grad():
# # MSE loss and Identity loss
# for iteration in range (int(len(valset_sysu_mse) / FLAGS.batch_size)):
# rgb, ir = next(loader_mse)
# X_rgb = getImage(rgb, FLAGS)
# X_ir = getImage(ir, FLAGS)
# X_rgb, X_ir = net(X_rgb, X_ir)
# S_, M_ = encoder_rgb(X_rgb)
# recon_rgb = decoder_rgb(S_, M_)
# loss_mse += mse_loss(X_rgb, recon_rgb).item()
# S, M = encoder_ir(X_ir)
# recon_ir = decoder_ir(S, M)
# loss_mse += mse_loss(X_ir, recon_ir).item()
# label_rgb = torch.LongTensor((np.nonzero(np.array(getLabels(rgb)[0])[:,None] == valset_sysu_mse.id_val_int)[1])).cuda()
# label_ir = torch.LongTensor((np.nonzero(np.array(getLabels(ir)[0])[:,None] == valset_sysu_mse.id_val_int)[1])).cuda()
# identity_loss += criterion(S_,label_rgb).item() + criterion(S,label_ir).item()
# print('MSE loss on validation data = ',str(loss_mse))
# print('Identity loss on validation data = ',str(identity_loss))
# # Triplet loss
# # for iteration in range (int(len(valset_sysu_trip) / FLAGS.batch_size)):
# # num = 8
# # for iteration in range (num):
# # rgb_triplet, ir_triplet = next(loader_trip)
# # S_1, _ = encoder_rgb(getImage(rgb_triplet['1'], FLAGS))
# # S_2, _ = encoder_ir(getImage(rgb_triplet['2'], FLAGS))
# # S_3, _ = encoder_ir(getImage(rgb_triplet['3'], FLAGS))
# # S_11, _ = encoder_ir(getImage(ir_triplet['1'], FLAGS))
# # S_22, _ = encoder_rgb(getImage(ir_triplet['2'], FLAGS))
# # S_33, _ = encoder_rgb(getImage(ir_triplet['3'], FLAGS))
# # triplet_loss += (triplet_loss_fn(S_1, S_2, S_3) + triplet_loss_fn(S_11, S_22, S_33)).item()
# # print('Triplet loss on validation data = ',str(triplet_loss))
# loss = loss_mse+identity_loss #+triplet_loss
# print('Total | |
import argparse
import os
import numpy as np
def str2bool(v):
return v.lower() in ('true', '1')
arg_lists = []
parser = argparse.ArgumentParser()
def add_argument_group(name):
arg = parser.add_argument_group(name)
arg_lists.append(arg)
return arg
# crm
game_arg = add_argument_group('BeerGame')
game_arg.add_argument('--task', type=str, default='bg')
game_arg.add_argument('--fixedAction', type=str2bool, default='False', help='if you want to have actions in [0,actionMax] set it to True. with False it will set it [actionLow, actionUp]')
game_arg.add_argument('--observation_data', type=str2bool, default=False, help='if it is True, then it uses the data that is generated by based on few real world observation')
game_arg.add_argument('--data_id', type=int, default=22, help='the default item id for the basket dataset')
game_arg.add_argument('--TLow', type=int, default=100, help='duration of one GAME (lower bound)')
game_arg.add_argument('--TUp', type=int, default=100, help='duration of one GAME (upper bound)')
game_arg.add_argument('--demandDistribution', type=int, default=0, help='0=uniform, 1=normal distribution, 2=the sequence of 4,4,4,4,8,..., 3= basket data, 4= forecast data')
game_arg.add_argument('--scaled', type=str2bool, default=False, help='if true it uses the (if) existing scaled parameters')
game_arg.add_argument('--demandLow', type=int, default=0, help='the lower bound of random demand')
game_arg.add_argument('--demandUp', type=int, default=3, help='the upper bound of random demand')
game_arg.add_argument('--demandMu', type=float, default=10, help='the mu of the normal distribution for demand ')
game_arg.add_argument('--demandSigma', type=float, default=2, help='the sigma of the normal distribution for demand ')
game_arg.add_argument('--actionMax', type=int, default=2, help='it works when fixedAction is True')
game_arg.add_argument('--actionUp', type=int, default=2, help='bounds on my decision (upper bound), it works when fixedAction is True')
game_arg.add_argument('--actionLow', type=int, default=-2, help='bounds on my decision (lower bound), it works when fixedAction is True')
game_arg.add_argument('--action_step', type=int, default=1, help='The obtained action value by dnn is multiplied by this value')
game_arg.add_argument('--actionList', type=list, default=[], help='The list of the available actions')
game_arg.add_argument('--actionListLen', type=int, default=0, help='the length of the action list')
game_arg.add_argument('--actionListOpt', type=int, default=0 , help='the action list which is used in optimal and sterman')
game_arg.add_argument('--actionListLenOpt', type=int, default=0, help='the length of the actionlistopt')
game_arg.add_argument('--agentTypes', type=list, default=['dnn','dnn','dnn','dnn'], help='the player types')
game_arg.add_argument('--agent_type1', type=str, default='dnn', help='the player types for agent 1, it can be dnn, Strm, bs, rnd')
game_arg.add_argument('--agent_type2', type=str, default='dnn', help='the player types for agent 2, it can be dnn, Strm, bs, rnd')
game_arg.add_argument('--agent_type3', type=str, default='dnn', help='the player types for agent 3, it can be dnn, Strm, bs, rnd')
game_arg.add_argument('--agent_type4', type=str, default='dnn', help='the player types for agent 4, it can be dnn, Strm, bs, rnd')
game_arg.add_argument('--NoAgent', type=int, default=1, help='number of agents, currently it should be in {1,2,3,4}')
game_arg.add_argument('--cp1', type=float, default=2.0, help='shortage cost of player 1')
game_arg.add_argument('--cp2', type=float, default=0.0, help='shortage cost of player 2')
game_arg.add_argument('--cp3', type=float, default=0.0, help='shortage cost of player 3')
game_arg.add_argument('--cp4', type=float, default=0.0, help='shortage cost of player 4')
game_arg.add_argument('--ch1', type=float, default=2.0, help='holding cost of player 1')
game_arg.add_argument('--ch2', type=float, default=2.0, help='holding cost of player 2')
game_arg.add_argument('--ch3', type=float, default=2.0, help='holding cost of player 3')
game_arg.add_argument('--ch4', type=float, default=2.0, help='holding cost of player 4')
game_arg.add_argument('--alpha_b1', type=float, default=-0.5, help='alpha of Sterman formula parameter for player 1')
game_arg.add_argument('--alpha_b2', type=float, default=-0.5, help='alpha of Sterman formula parameter for player 2')
game_arg.add_argument('--alpha_b3', type=float, default=-0.5, help='alpha of Sterman formula parameter for player 3')
game_arg.add_argument('--alpha_b4', type=float, default=-0.5, help='alpha of Sterman formula parameter for player 4')
game_arg.add_argument('--betta_b1', type=float, default=-0.2, help='beta of Sterman formula parameter for player 1')
game_arg.add_argument('--betta_b2', type=float, default=-0.2, help='beta of Sterman formula parameter for player 2')
game_arg.add_argument('--betta_b3', type=float, default=-0.2, help='beta of Sterman formula parameter for player 3')
game_arg.add_argument('--betta_b4', type=float, default=-0.2, help='beta of Sterman formula parameter for player 4')
game_arg.add_argument('--eta', type=list, default=[0,4,4,4], help='the total cost regulazer')
game_arg.add_argument('--distCoeff', type=int, default=20, help='the total cost regulazer')
game_arg.add_argument('--gameConfig', type=int, default=3, help='if it is "0", it uses the current "agentType", otherwise sets agent types according to the function setAgentType() in this file.')
game_arg.add_argument('--ifUseTotalReward', type=str2bool, default='False', help='if you want to have the total rewards in the experience replay, set it to true.')
game_arg.add_argument('--ifUsedistTotReward', type=str2bool, default='True', help='If use correction to the rewards in the experience replay for all iterations of current game')
game_arg.add_argument('--ifUseASAO', type=str2bool, default='True', help='if use AS and AO, i.e., received shipment and received orders in the input of DNN')
game_arg.add_argument('--ifUseActionInD', type=str2bool, default='False', help='if use action in the input of DNN')
game_arg.add_argument('--stateDim', type=int, default=5, help='Number of elements in the state desciptor - Depends on ifUseASAO')
game_arg.add_argument('--iftl', type=str2bool, default=False, help='if apply transfer learning')
game_arg.add_argument('--ifTransferFromSmallerActionSpace', type=str2bool, default=False, help='if want to transfer knowledge from a network with different action space size.')
game_arg.add_argument('--baseActionSize', type=int, default=5, help='if ifTransferFromSmallerActionSpace is true, this determines the size of action space of saved network')
game_arg.add_argument('--tlBaseBrain', type=int, default=3, help='the gameConfig of the base network for re-training with transfer-learning')
game_arg.add_argument('--baseDemandDistribution', type=int, default=0, help='same as the demandDistribution')
game_arg.add_argument('--MultiAgent', type=str2bool, default=False, help='if run multi-agent RL model, not fully operational')
game_arg.add_argument('--MultiAgentRun', type=list, default=[True, True, True, True], help='In the multi-RL setting, it determines which agent should get training.')
game_arg.add_argument('--if_use_AS_t_plus_1', type=str2bool, default='False', help='if use AS[t+1], not AS[t] in the input of DNN')
game_arg.add_argument('--ifSinglePathExist', type=str2bool, default=False, help='If true it uses the predefined path in pre_model_dir and does not merge it with demandDistribution.')
game_arg.add_argument('--ifPlaySavedData', type=str2bool, default=False, help='If true it uses the saved actions which are read from file.')
#################### parameters of the leadtimes ########################
leadtimes_arg = add_argument_group('leadtimes')
leadtimes_arg.add_argument('--leadRecItemLow', type=list, default=[2,2,2,4], help='the min lead time for receiving items')
leadtimes_arg.add_argument('--leadRecItemUp', type=list, default=[2,2,2,4], help='the max lead time for receiving items')
leadtimes_arg.add_argument('--leadRecOrderLow', type=int, default=[2,2,2,0], help='the min lead time for receiving orders')
leadtimes_arg.add_argument('--leadRecOrderUp', type=int, default=[2,2,2,0], help='the max lead time for receiving orders')
leadtimes_arg.add_argument('--ILInit', type=list, default=[0,0,0,0], help='')
leadtimes_arg.add_argument('--AOInit', type=list, default=[0,0,0,0], help='')
leadtimes_arg.add_argument('--ASInit', type=list, default=[0,0,0,0], help='the initial shipment of each agent')
leadtimes_arg.add_argument('--leadRecItem1', type=int, default=2, help='the min lead time for receiving items')
leadtimes_arg.add_argument('--leadRecItem2', type=int, default=2, help='the min lead time for receiving items')
leadtimes_arg.add_argument('--leadRecItem3', type=int, default=2, help='the min lead time for receiving items')
leadtimes_arg.add_argument('--leadRecItem4', type=int, default=2, help='the min lead time for receiving items')
leadtimes_arg.add_argument('--leadRecOrder1', type=int, default=2, help='the min lead time for receiving order')
leadtimes_arg.add_argument('--leadRecOrder2', type=int, default=2, help='the min lead time for receiving order')
leadtimes_arg.add_argument('--leadRecOrder3', type=int, default=2, help='the min lead time for receiving order')
leadtimes_arg.add_argument('--leadRecOrder4', type=int, default=2, help='the min lead time for receiving order')
leadtimes_arg.add_argument('--ILInit1', type=int, default=0, help='the initial inventory level of the agent')
leadtimes_arg.add_argument('--ILInit2', type=int, default=0, help='the initial inventory level of the agent')
leadtimes_arg.add_argument('--ILInit3', type=int, default=0, help='the initial inventory level of the agent')
leadtimes_arg.add_argument('--ILInit4', type=int, default=0, help='the initial inventory level of the agent')
leadtimes_arg.add_argument('--AOInit1', type=int, default=0, help='the initial arriving order of the agent')
leadtimes_arg.add_argument('--AOInit2', type=int, default=0, help='the initial arriving order of the agent')
leadtimes_arg.add_argument('--AOInit3', type=int, default=0, help='the initial arriving order of the agent')
leadtimes_arg.add_argument('--AOInit4', type=int, default=0, help='the initial arriving order of the agent')
leadtimes_arg.add_argument('--ASInit1', type=int, default=0, help='the initial arriving shipment of the agent')
leadtimes_arg.add_argument('--ASInit2', type=int, default=0, help='the initial arriving shipment of the agent')
leadtimes_arg.add_argument('--ASInit3', type=int, default=0, help='the initial arriving shipment of the agent')
leadtimes_arg.add_argument('--ASInit4', type=int, default=0, help='the initial arriving shipment of the agent')
#################### DQN setting ####################
DQN_arg = add_argument_group('DQN')
DQN_arg.add_argument('--maxEpisodesTrain', type=int, default=60100, help='number of GAMES to be trained')
DQN_arg.add_argument('--NoHiLayer', type=int, default=3, help='number of hidden layers')
DQN_arg.add_argument('--NoFixedLayer', type=int, default=1, help='number of hidden layers')
DQN_arg.add_argument('--node1', type=int, default=180, help='the number of nodes in the first hidden layer')
DQN_arg.add_argument('--node2', type=int, default=130, help='the number of nodes in the second hidden layer')
DQN_arg.add_argument('--node3', type=int, default=61, help='the number of nodes in the third hidden layer')
DQN_arg.add_argument('--nodes', type=list, default=[], help='')
DQN_arg.add_argument('--seed', type=int, default=4, help='the seed for DNN stuff')
DQN_arg.add_argument('--batchSize', type=int, default=64, help='the batch size which is used to obtain')
DQN_arg.add_argument('--minReplayMem', type=int, default=50000, help='the minimum of experience reply size to start dnn')
DQN_arg.add_argument('--maxReplayMem', type=int, default=1000000, help='the maximum size of the replay memory')
DQN_arg.add_argument('--alpha', type=float, default=.97, help='learning rate for total reward distribution ')
DQN_arg.add_argument('--gamma', type=float, default=.99, help='discount factor for Q-learning')
DQN_arg.add_argument('--saveInterval', type=int, default=10000, help='every xx training iteration, saves the games network')
DQN_arg.add_argument('--epsilonBeg', type=float, default=0.9, help='')
DQN_arg.add_argument('--epsilonEnd', type=float, default=0.1, help='')
DQN_arg.add_argument('--lr0', type=float, default=0.00025 , help='the learning rate')
DQN_arg.add_argument('--Minlr', type=float, default=1e-8, help='the minimum learning rate, if it drops below it, fix it there ')
DQN_arg.add_argument('--ifDecayAdam', type=str2bool, default=True, help='decays the learning rate of the adam optimizer')
DQN_arg.add_argument('--decayStep', type=int, default=10000, help='the decay step of the learning rate')
DQN_arg.add_argument('--decayRate', type=float, default=0.98, help='the rate to reduce the lr at every decayStep')
DQN_arg.add_argument('--display', type=int, default=1000, help='the number of iterations between two display of results.')
DQN_arg.add_argument('--momentum', type=float, default=0.9, help='the momentum value')
DQN_arg.add_argument('--dnnUpCnt', type=int, default=10000, help='the number of iterations that updates the dnn weights')
DQN_arg.add_argument('--multPerdInpt', type=int, default=10, help='Number of history records which we feed into DNN')
#################### Utilities ####################
utility_arg = add_argument_group('Utilities')
utility_arg.add_argument('--address', type=str, default="", help='the address which is used to save the model files')
utility_arg.add_argument('--ifUsePreviousModel', type=str2bool, default='False', help='if there is a saved model, then False value of this parameter will overwrite.')
utility_arg.add_argument('--number_cpu_active', type=int, default=5, help='number of cpu cores')
utility_arg.add_argument('--gpu_memory_fraction', type=float, default=0.1, help='the fraction of gpu memory which we are gonna use')
# Dirs
utility_arg.add_argument('--load_path', type=str, default='', help='The directory to load the models')
utility_arg.add_argument('--log_dir', type=str, default=os.path.expanduser('./logs/'), help='')
utility_arg.add_argument('--pre_model_dir', type=str, default=os.path.expanduser('./pre_model'),help='')
utility_arg.add_argument('--action_dir', type=str, default=os.path.expanduser('./'),help='if ifPlaySavedData is true, it uses this path to load actions')
utility_arg.add_argument('--model_dir', type=str, default='./',help='')
utility_arg.add_argument('--TB', type=str2bool, default=False, help='set to True if use tensor board and save the required data for TB.')
utility_arg.add_argument('--INFO_print', type=str2bool, default=True, help='if true, it does not print anything all.')
utility_arg.add_argument('--tbLogInterval', type=int, default=80000, help='number of GAMES for testing')
#################### testing ####################
test_arg = add_argument_group('testing')
test_arg.add_argument('--testRepeatMid', type=int, default=50, help='it is number of episodes which is going to be used for testing in the middle of training')
test_arg.add_argument('--testInterval', | |
'
'See also the "--disable" option for examples. '}),
('disable',
{'type' : 'csv', 'metavar': '<msg ids>',
'short': 'd',
'group': 'Messages control',
'help' : 'Disable the message, report, category or checker '
'with the given id(s). You can either give multiple identifiers'
' separated by comma (,) or put this option multiple times '
'(only on the command line, not in the configuration file '
'where it should appear only once).'
'You can also use "--disable=all" to disable everything first '
'and then reenable specific checks. For example, if you want '
'to run only the similarities checker, you can use '
'"--disable=all --enable=similarities". '
'If you want to run only the classes checker, but have no '
'Warning level messages displayed, use'
'"--disable=all --enable=classes --disable=W"'}),
('msg-template',
{'type' : 'string', 'metavar': '<template>',
'group': 'Reports',
'help' : ('Template used to display messages. '
'This is a python new-style format string '
'used to format the message information. '
'See doc for all details')
}),
('jobs',
{'type' : 'int', 'metavar': '<n-processes>',
'short': 'j',
'default': 1,
'help' : '''Use multiple processes to speed up Pylint.''',
}),
('unsafe-load-any-extension',
{'type': 'yn', 'metavar': '<yn>', 'default': False, 'hide': True,
'help': ('Allow loading of arbitrary C extensions. Extensions'
' are imported into the active Python interpreter and'
' may run arbitrary code.')}),
('extension-pkg-whitelist',
{'type': 'csv', 'metavar': '<pkg[,pkg]>', 'default': [],
'help': ('A comma-separated list of package or module names'
' from where C extensions may be loaded. Extensions are'
' loading into the active Python interpreter and may run'
' arbitrary code')}
),
)
option_groups = (
('Messages control', 'Options controlling analysis messages'),
('Reports', 'Options related to output formatting and reporting'),
)
def __init__(self, options=(), reporter=None, option_groups=(),
pylintrc=None):
# some stuff has to be done before ancestors initialization...
#
# messages store / checkers / reporter / astroid manager
self.msgs_store = utils.MessagesStore()
self.reporter = None
self._reporter_name = None
self._reporters = {}
self._checkers = collections.defaultdict(list)
self._pragma_lineno = {}
self._ignore_file = False
# visit variables
self.file_state = utils.FileState()
self.current_name = None
self.current_file = None
self.stats = None
# init options
self._external_opts = options
self.options = options + PyLinter.make_options()
self.option_groups = option_groups + PyLinter.option_groups
self._options_methods = {
'enable': self.enable,
'disable': self.disable}
self._bw_options_methods = {'disable-msg': self.disable,
'enable-msg': self.enable}
full_version = '%%prog %s, \nastroid %s\nPython %s' % (
version, astroid_version, sys.version)
utils.MessagesHandlerMixIn.__init__(self)
utils.ReportsHandlerMixIn.__init__(self)
super(PyLinter, self).__init__(
usage=__doc__,
version=full_version,
config_file=pylintrc or config.PYLINTRC)
checkers.BaseTokenChecker.__init__(self)
# provided reports
self.reports = (('RP0001', 'Messages by category',
report_total_messages_stats),
('RP0002', '% errors / warnings by module',
report_messages_by_module_stats),
('RP0003', 'Messages',
report_messages_stats),
)
self.register_checker(self)
self._dynamic_plugins = set()
self._python3_porting_mode = False
self._error_mode = False
self.load_provider_defaults()
if reporter:
self.set_reporter(reporter)
def load_default_plugins(self):
checkers.initialize(self)
reporters.initialize(self)
# Make sure to load the default reporter, because
# the option has been set before the plugins had been loaded.
if not self.reporter:
self._load_reporter()
def load_plugin_modules(self, modnames):
"""take a list of module names which are pylint plugins and load
and register them
"""
for modname in modnames:
if modname in self._dynamic_plugins:
continue
self._dynamic_plugins.add(modname)
module = modutils.load_module_from_name(modname)
module.register(self)
def _load_reporter(self):
name = self._reporter_name.lower()
if name in self._reporters:
self.set_reporter(self._reporters[name]())
else:
qname = self._reporter_name
module = modutils.load_module_from_name(
modutils.get_module_part(qname))
class_name = qname.split('.')[-1]
reporter_class = getattr(module, class_name)
self.set_reporter(reporter_class())
def set_reporter(self, reporter):
"""set the reporter used to display messages and reports"""
self.reporter = reporter
reporter.linter = self
def set_option(self, optname, value, action=None, optdict=None):
"""overridden from config.OptionsProviderMixin to handle some
special options
"""
if optname in self._options_methods or \
optname in self._bw_options_methods:
if value:
try:
meth = self._options_methods[optname]
except KeyError:
meth = self._bw_options_methods[optname]
warnings.warn('%s is deprecated, replace it by %s' % (optname,
optname.split('-')[0]),
DeprecationWarning)
value = utils._check_csv(value)
if isinstance(value, (list, tuple)):
for _id in value:
meth(_id, ignore_unknown=True)
else:
meth(value)
return # no need to call set_option, disable/enable methods do it
elif optname == 'output-format':
self._reporter_name = value
# If the reporters are already available, load
# the reporter class.
if self._reporters:
self._load_reporter()
try:
checkers.BaseTokenChecker.set_option(self, optname,
value, action, optdict)
except config.UnsupportedAction:
print('option %s can\'t be read from config file' % \
optname, file=sys.stderr)
def register_reporter(self, reporter_class):
self._reporters[reporter_class.name] = reporter_class
def report_order(self):
reports = sorted(self._reports, key=lambda x: getattr(x, 'name', ''))
try:
# Remove the current reporter and add it
# at the end of the list.
reports.pop(reports.index(self))
except ValueError:
pass
else:
reports.append(self)
return reports
# checkers manipulation methods ############################################
def register_checker(self, checker):
"""register a new checker
checker is an object implementing IRawChecker or / and IAstroidChecker
"""
assert checker.priority <= 0, 'checker priority can\'t be >= 0'
self._checkers[checker.name].append(checker)
for r_id, r_title, r_cb in checker.reports:
self.register_report(r_id, r_title, r_cb, checker)
self.register_options_provider(checker)
if hasattr(checker, 'msgs'):
self.msgs_store.register_messages(checker)
checker.load_defaults()
# Register the checker, but disable all of its messages.
# TODO(cpopa): we should have a better API for this.
if not getattr(checker, 'enabled', True):
self.disable(checker.name)
def disable_noerror_messages(self):
for msgcat, msgids in six.iteritems(self.msgs_store._msgs_by_category):
# enable only messages with 'error' severity and above ('fatal')
if msgcat in ['E', 'F']:
for msgid in msgids:
self.enable(msgid)
else:
for msgid in msgids:
self.disable(msgid)
def disable_reporters(self):
"""disable all reporters"""
for _reporters in six.itervalues(self._reports):
for report_id, _, _ in _reporters:
self.disable_report(report_id)
def error_mode(self):
"""error mode: enable only errors; no reports, no persistent"""
self._error_mode = True
self.disable_noerror_messages()
self.disable('miscellaneous')
if self._python3_porting_mode:
self.disable('all')
for msg_id in self._checker_messages('python3'):
if msg_id.startswith('E'):
self.enable(msg_id)
else:
self.disable('python3')
self.set_option('reports', False)
self.set_option('persistent', False)
self.set_option('score', False)
def python3_porting_mode(self):
"""Disable all other checkers and enable Python 3 warnings."""
self.disable('all')
self.enable('python3')
if self._error_mode:
# The error mode was activated, using the -E flag.
# So we'll need to enable only the errors from the
# Python 3 porting checker.
for msg_id in self._checker_messages('python3'):
if msg_id.startswith('E'):
self.enable(msg_id)
else:
self.disable(msg_id)
self._python3_porting_mode = True
# block level option handling #############################################
#
# see func_block_disable_msg.py test case for expected behaviour
def process_tokens(self, tokens):
"""process tokens from the current module to search for module/block
level options
"""
control_pragmas = {'disable', 'enable'}
for (tok_type, content, start, _, _) in tokens:
if tok_type != tokenize.COMMENT:
continue
match = utils.OPTION_RGX.search(content)
if match is None:
continue
if match.group(1).strip() == "disable-all" or \
match.group(1).strip() == 'skip-file':
if match.group(1).strip() == "disable-all":
self.add_message('deprecated-pragma', line=start[0],
args=('disable-all', 'skip-file'))
self.add_message('file-ignored', line=start[0])
self._ignore_file = True
return
try:
opt, value = match.group(1).split('=', 1)
except ValueError:
self.add_message('bad-inline-option', args=match.group(1).strip(),
line=start[0])
continue
opt = opt.strip()
if opt in self._options_methods or opt in self._bw_options_methods:
try:
meth = self._options_methods[opt]
except KeyError:
meth = self._bw_options_methods[opt]
# found a "(dis|en)able-msg" pragma deprecated suppression
self.add_message('deprecated-pragma', line=start[0],
args=(opt, opt.replace('-msg', '')))
for msgid in utils._splitstrip(value):
# Add the line where a control pragma was encountered.
if opt in control_pragmas:
self._pragma_lineno[msgid] = start[0]
try:
if (opt, msgid) == ('disable', 'all'):
self.add_message('deprecated-pragma', line=start[0],
args=('disable=all', 'skip-file'))
self.add_message('file-ignored', line=start[0])
self._ignore_file = True
return
meth(msgid, 'module', start[0])
except exceptions.UnknownMessageError:
self.add_message('bad-option-value', args=msgid, line=start[0])
else:
self.add_message('unrecognized-inline-option', args=opt, line=start[0])
# code checking methods ###################################################
def get_checkers(self):
"""return all available checkers as a list"""
return [self] + [c for _checkers in six.itervalues(self._checkers)
for c in _checkers if c is not self]
def prepare_checkers(self):
"""return checkers needed for activated messages and reports"""
if not self.config.reports:
self.disable_reporters()
# get needed checkers
neededcheckers = [self]
for checker in self.get_checkers()[1:]:
messages = set(msg for msg in checker.msgs
if self.is_message_enabled(msg))
if (messages or
any(self.report_is_enabled(r[0]) for r in checker.reports)):
neededcheckers.append(checker)
# Sort checkers by priority
neededcheckers = sorted(neededcheckers,
key=operator.attrgetter('priority'),
reverse=True)
return neededcheckers
# pylint: disable=unused-argument
@staticmethod
def should_analyze_file(modname, path, is_argument=False):
"""Returns whether or not a module should be checked.
This implementation returns True for all python source file, indicating
that all files should be linted.
Subclasses may override this method to indicate that modules satisfying
certain conditions should not be linted.
:param str modname: The name of the module to be checked.
:param str path: The full path to the source code of the module.
:param bool is_argument: Whetter the file is an argument to pylint or not.
Files which respect this property are always
checked, since the user requested it explicitly.
:returns: True if the module should be checked.
:rtype: bool
"""
if is_argument:
return True
return path.endswith('.py')
# pylint: enable=unused-argument
def check(self, files_or_modules):
"""main checking entry: check | |
<gh_stars>100-1000
#
# Copyright (c) 2011-2014 Exxeleron GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import socket
import struct
from qpython import MetaData, CONVERSION_OPTIONS
from qpython.qtype import QException
from qpython.qreader import QReader, QReaderException
from qpython.qwriter import QWriter, QWriterException
class QConnectionException(Exception):
'''Raised when a connection to the q service cannot be established.'''
pass
class QAuthenticationException(QConnectionException):
'''Raised when a connection to the q service is denied.'''
pass
class MessageType(object):
'''Enumeration defining IPC protocol message types.'''
ASYNC = 0
SYNC = 1
RESPONSE = 2
class QConnection(object):
'''Connector class for interfacing with the q service.
Provides methods for synchronous and asynchronous interaction.
The :class:`.QConnection` class provides a context manager API and can be
used with a ``with`` statement::
with qconnection.QConnection(host = 'localhost', port = 5000) as q:
print(q)
print(q('{`int$ til x}', 10))
:Parameters:
- `host` (`string`) - q service hostname
- `port` (`integer`) - q service port
- `username` (`string` or `None`) - username for q authentication/authorization
- `password` (`string` or `None`) - password for q authentication/authorization
- `timeout` (`nonnegative float` or `None`) - set a timeout on blocking socket operations
- `encoding` (`string`) - string encoding for data deserialization
- `reader_class` (subclass of `QReader`) - data deserializer
- `writer_class` (subclass of `QWriter`) - data serializer
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of parsed
data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars, **Default**: ``False``
'''
def __init__(self, host, port, username = None, password = <PASSWORD>, timeout = None, encoding = 'latin-1', reader_class = None, writer_class = None, **options):
self.host = host
self.port = port
self.username = username
self.password = password
self._connection = None
self._connection_file = None
self._protocol_version = None
self.timeout = timeout
self._encoding = encoding
self._options = MetaData(**CONVERSION_OPTIONS.union_dict(**options))
try:
from qpython._pandas import PandasQReader, PandasQWriter
self._reader_class = PandasQReader
self._writer_class = PandasQWriter
except ImportError:
self._reader_class = QReader
self._writer_class = QWriter
if reader_class:
self._reader_class = reader_class
if writer_class:
self._writer_class = writer_class
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@property
def protocol_version(self):
'''Retrieves established version of the IPC protocol.
:returns: `integer` -- version of the IPC protocol
'''
return self._protocol_version
def open(self):
'''Initialises connection to q service.
If the connection hasn't been initialised yet, invoking the
:func:`.open` creates a new socket and performs a handshake with a q
service.
:raises: :class:`.QConnectionException`, :class:`.QAuthenticationException`
'''
if not self._connection:
if not self.host:
raise QConnectionException('Host cannot be None')
self._init_socket()
self._initialize()
self._writer = self._writer_class(self._connection, protocol_version = self._protocol_version, encoding = self._encoding)
self._reader = self._reader_class(self._connection_file, encoding = self._encoding)
def _init_socket(self):
'''Initialises the socket used for communicating with a q service,'''
try:
self._connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._connection.connect((self.host, self.port))
self._connection.settimeout(self.timeout)
self._connection_file = self._connection.makefile('b')
except:
self._connection = None
self._connection_file = None
raise
def close(self):
'''Closes connection with the q service.'''
if self._connection:
self._connection_file.close()
self._connection_file = None
self._connection.close()
self._connection = None
def is_connected(self):
'''Checks whether connection with a q service has been established.
Connection is considered inactive when:
- it has not been initialised,
- it has been closed.
:returns: `boolean` -- ``True`` if connection has been established,
``False`` otherwise
'''
return True if self._connection else False
def _initialize(self):
'''Performs a IPC protocol handshake.'''
credentials = (self.username if self.username else '') + ':' + (self.password if self.password else '')
credentials = credentials.encode(self._encoding)
self._connection.send(credentials + b'\3\0')
response = self._connection.recv(1)
if len(response) != 1:
self.close()
self._init_socket()
self._connection.send(credentials + b'\0')
response = self._connection.recv(1)
if len(response) != 1:
self.close()
raise QAuthenticationException('Connection denied.')
self._protocol_version = min(struct.unpack('B', response)[0], 3)
def __str__(self):
return '%s@:%s:%s' % (self.username, self.host, self.port) if self.username else ':%s:%s' % (self.host, self.port)
def query(self, msg_type, query, *parameters, **options):
'''Performs a query against a q service.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Calls a anonymous function with a single parameter:
>>> q.query(qconnection.MessageType.SYNC,'{til x}', 10)
Executes a q expression:
>>> q.query(qconnection.MessageType.SYNC,'til 10')
:Parameters:
- `msg_type` (one of the constants defined in :class:`.MessageType`) -
type of the query to be executed
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:raises: :class:`.QConnectionException`, :class:`.QWriterException`
'''
if not self._connection:
raise QConnectionException('Connection is not established.')
if parameters and len(parameters) > 8:
raise QWriterException('Too many parameters.')
if not parameters or len(parameters) == 0:
self._writer.write(query, msg_type, **self._options.union_dict(**options))
else:
self._writer.write([query] + list(parameters), msg_type, **self._options.union_dict(**options))
def sendSync(self, query, *parameters, **options):
'''Performs a synchronous query against a q service and returns parsed
data.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Executes a q expression:
>>> print(q.sendSync('til 10'))
[0 1 2 3 4 5 6 7 8 9]
Executes an anonymous q function with a single parameter:
>>> print(q.sendSync('{til x}', 10))
[0 1 2 3 4 5 6 7 8 9]
Executes an anonymous q function with two parameters:
>>> print(q.sendSync('{y + til x}', 10, 1))
[ 1 2 3 4 5 6 7 8 9 10]
>>> print(q.sendSync('{y + til x}', *[10, 1]))
[ 1 2 3 4 5 6 7 8 9 10]
The :func:`.sendSync` is called from the overloaded :func:`.__call__`
function. This allows :class:`.QConnection` instance to be called as
a function:
>>> print(q('{y + til x}', 10, 1))
[ 1 2 3 4 5 6 7 8 9 10]
:Parameters:
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of
parsed data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:returns: query result parsed to Python data structures
:raises: :class:`.QConnectionException`, :class:`.QWriterException`,
:class:`.QReaderException`
'''
self.query(MessageType.SYNC, query, *parameters, **options)
response = self.receive(data_only = False, **options)
if response.type == MessageType.RESPONSE:
return response.data
else:
self._writer.write(QException('nyi: qPython expected response message'), MessageType.ASYNC if response.type == MessageType.ASYNC else MessageType.RESPONSE)
raise QReaderException('Received message of type: %s where response was expected')
def sendAsync(self, query, *parameters, **options):
'''Performs an asynchronous query and returns **without** retrieving of
the response.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Calls a anonymous function with a single parameter:
>>> q.sendAsync('{til x}', 10)
Executes a q expression:
>>> q.sendAsync('til | |
<reponame>tc985/is-452-fp
# bookstats.py
# this program pulls select data from plaintext books and writes it out to a csv
import sys
import string
import re
import csv
# removes project gutenberg metadata and extra text from the front and end of plaintext book
# returns gutenberg-less lowercase text
def death_to_johannes(text):
text_parts = text.lower().split('\n\n\n\nchapter') # splits by chapter
if len(text_parts) > 1:
cut_point = text_parts[-1].find('end of the project gutenberg')
if cut_point == -1:
cut_point = text_parts[-1].find('end of project gutenberg')
text_parts[-1] = text_parts[-1][:cut_point]
del text_parts[0] # kills everything before the first chapter
# this section removes the line with any leftover chapter heading text from each index in the list by splitting by \n
count = 0
for part in text_parts:
text_parts[count] = '\n'.join(text_parts[count].split('\n')[2:]) # joins while killing the "chapter" lines
count += 1
new_text = '\n'.join(text_parts) # so we have something to return
else:
new_text = 'ERROR' # in case there are no chapter headings, or formatting incompatible
return new_text
# replaces smart quotes with dumb quotes and removes all other punctuation
def clean_punc(text):
text = text.replace('“', '"').replace('”', '"') # converts smart quotes to regular
cleanline = text
for punc in string.punctuation:
if punc != '"':
cleanline = cleanline.replace(punc, ' ')
return cleanline
# returns length of dialogue by splitting the text by "
def dialogue_length(text):
total_length = 0
dia_lst = text.split('"')
dia_lst_odd = dia_lst[1::2]
if len(dia_lst) % 2 != 0: # if we don't have an even number of "
total_length = 'ERROR'
else:
for i in dia_lst_odd:
dia_words = i.split()
count = len(dia_words)
total_length = total_length + count
return total_length
# gets rid of ALL punctuation
def cleaner_punc(text):
cleanline = text
for punc in string.punctuation:
cleanline = cleanline.replace(punc, " ")
return cleanline
# find longest word and checks to make sure it's real
# returns longest word and its length
# words_alpha.txt from https://github.com/dwyl/english-words
def longest_word(text):
with open('words_alpha.txt', 'r', encoding = 'utf-8') as f:
words = [line.strip() for line in f]
word_dict = {}
for w in text.split():
if w not in word_dict:
word_dict[w] = len(w)
word_list = list(word_dict.items())
# this runs every word in the book (collected in dict) against all the
# "real words" in words_alpha (it's less efficient than I'd like)
word_order = []
for tuple in word_list:
if tuple[0] in words:
word_order.append(tuple)
word_order.sort(key = lambda a:a[1], reverse = True)
word = word_order[0][0]
length = word_order[0][1]
return length, word
# returns the most used word and how many times it appears
def most_used(text):
word_dict = {}
for i in text.split():
if i in word_dict:
word_dict[i] += 1
else:
word_dict[i] = 1
word_list = list(word_dict.items())
word_list.sort(key = lambda a:a[1], reverse=True)
top_word = word_list[0][0]
top_times = word_list[0][1]
return top_word, top_times
# returns average words per paragraph
def para_size(text):
para_lst = text.split('\n\n')
avg_wpp = len(text.split())/len(para_lst)
return avg_wpp
# returns title and author of book
def t_and_a(text):
title_re = re.compile('Title: [A-Z][A-Za-z ’,:-]+')
title = re.findall(title_re, text)
author_re = re.compile('Author: [A-Z][A-Za-z ’,:-]+')
author = re.findall(author_re, text)
final_t = title[0][7:]
final_a = author[0][8:]
return final_t, final_a
# returns average word length in characters and the total characters in the book
def avg_word_len(text):
num_of_words = len(text.split())
no_spaces = re.sub(r'\s+', '', text)
full_len = len(no_spaces)
word_len = full_len / num_of_words
return word_len, full_len
def main():
dict_of_power = {}
print('This program reads one or more plaintext Project Gutenberg books from .txt files and selects')
print('statistics about the text. It reads out the statistics to a .csv file.')
print()
exit_code = 0
while exit_code != 1:
try:
stupid_value = 1 # will be used to intentionally cause a DivideByZeroError later to esc a loop
file_name = str(input('Enter the filename: '))
with open(file_name, 'r', encoding = 'utf-8') as f:
text = f.read()
# text_test will determine if the file will read or not
text_test = death_to_johannes(text) # we're now free of gutenberg stuff and all lower case, no chapters
if text_test == 'ERROR': #if johannes returns 'error', we can't work with the file, loop back to beginning or exit
print('This file cannot be evaluated with this program, sorry.')
y_or_n = input(str('Do you want to try a different file? (y/n): '))
while True:
if y_or_n == 'n':
q_dis = input("Do you want to quit or continue? \nQuitting will lose what you currently have. Continuing will export it. (q/c): ")
if q_dis == 'q':
print('Goodbye!')
sys.exit()
elif q_dis != 'c':
print('You did not enter "q" or "c". Please try again.')
else:
stupid_value = 0 # will cause an error later so we exit the try to an except
break
elif y_or_n != 'y':
print('You did not enter "y" or "n". Try again.')
else:
break
# this will run if you don't quit the program above, it will take the file you entered even if it won't work
# but that will cause an error which will be caught be an except and restart at the first while loop
try:
y = 1 / stupid_value # this will force an error to exit the try if 'c' was chosen in the quit/cont if
text2 = death_to_johannes(text) # we couldn't define this in all the mess above, just filter for it
text3 = clean_punc(text2) # now we have gutenberg free, punc free text, with non-smart " retained
text4 = cleaner_punc(text3) # now the " are gone
# this will return an IndexError if the infile can't be read through the johannes properly
# it will only occur if 'y' to try a new file with a non-working file already in
word_len, word = longest_word(text4)
wpp = para_size(text4)
dialogue = dialogue_length(text3) # we go back to text 3 so that we can have "s still to split by
if dialogue != 'ERROR':
dialogue_ratio = dialogue / len(text4.split())
else:
print('Note -- We could not obtain a dialogue ratio for you. That value will display blank.')
dialogue_ratio = ''
top_word, top_times = most_used(text4)
title, author = t_and_a(text) # use text so we still have gutenberg stuff to search
avg_len, length = avg_word_len(text4) # the length var won't include punctuation characters
list_of_might = [title, author, top_word, top_times, word, word_len, dialogue_ratio, wpp, avg_len, length,]
dict_of_power.update({str(title) : list_of_might})
print("We've finished evaluating and are ready to export your results.")
while True:
decision = input("Do you want to add another book to the evaluation or export what you have? (add/export): ")
if decision == 'add':
break
elif decision == 'export':
exit_code = 1
the_list_to_end_all_lists = list(dict_of_power.items())
outfile_name = str(input('Enter a name for your csv file. (Do not include ".csv".): '))
outfile = open(outfile_name + '.csv', 'w', encoding = 'utf-8', newline = '')
csvout = csv.writer(outfile)
csvout.writerow(['title', 'author', 'most-used word', 'times used', 'longest word', 'lw length', 'dialogue ratio', 'words per para', 'avg word len', 'total characters'])
for i in the_list_to_end_all_lists:
r1 = i[1][0]
r2 = i[1][1]
r3 = i[1][2]
r4 = i[1][3]
r5 = i[1][4]
r6 = i[1][5]
r7 = i[1][6]
r8 = i[1][7]
r9 = i[1][8]
r10 = i[1][9]
row = [r1, r2, r3, r4, r5, r6, r7, r8, r9, r10]
csvout.writerow(row)
break
else:
print('You did not enter "add" or "export". Try again.')
# this will loop back to the beginning of the first while loop to re-enter a new file
except IndexError:
print("Then let's take it from the top.")
#this section will run if you answer 'n' to try another file, but 'c' to quit or continue
except ZeroDivisionError:
print("Then let's export your file.")
exit_code = 1
# i just copied the code from earlier, which is a silly duplication of effort, but i was having
# a hard time wrapping my head around making this a function so I wouldn't have to
the_list_to_end_all_lists = list(dict_of_power.items())
outfile_name = str(input('Enter a name for your csv file. (Do not include ".csv".): '))
outfile = open(outfile_name + '.csv', 'w', encoding = 'utf-8', newline = '')
csvout = csv.writer(outfile)
csvout.writerow(['title', 'author', 'most-used word', 'times used', 'longest word', 'lw length', 'dialogue ratio', 'words per para', 'avg word len', 'total characters'])
for i in the_list_to_end_all_lists:
r1 = i[1][0]
r2 | |
<gh_stars>0
import os
import subprocess
import SCons
import glob
import re
import sys
#===========================================================
# The first 4 functions provide for building a library,
# program, multiple-programs, or plugin from all the source
# in the current directory.
#
# The next section contains useful utility functions.
#
# The functions that follow in the final section add support
# for various packages (e.g. ROOT, Xerces, ...)
#===========================================================
##################################
# library
##################################
def library(env, libname=''):
# Library name comes from directory name
if libname=='':
libname = os.path.split(os.getcwd())[1]
env.PrependUnique(CPPPATH = ['.'])
# Add C/C++, and FORTRAN targets
env.AppendUnique(ALL_SOURCES = env.Glob('*.c'))
env.AppendUnique(ALL_SOURCES = env.Glob('*.cc'))
env.AppendUnique(ALL_SOURCES = env.Glob('*.cpp'))
env.AppendUnique(ALL_SOURCES = env.Glob('*.F'))
sources = env['ALL_SOURCES']
# Build static library from all source
myobjs = env.Object(sources)
mylib = env.Library(target = libname, source = myobjs)
# Cleaning and installation are restricted to the directory
# scons was launched from or its descendents
CurrentDir = env.Dir('.').srcnode().abspath
if not CurrentDir.startswith(env.GetLaunchDir()):
# Not in launch directory. Tell scons not to clean these targets
env.NoClean([myobjs, mylib])
else:
# We're in launch directory (or descendent) schedule installation
# Installation directories for library and headers
installdir = env.subst('$INSTALLDIR')
includedir = "%s/%s" %(env.subst('$INCDIR'), libname)
libdir = env.subst('$LIBDIR')
# Install targets
env.Install(libdir, mylib)
env.Install(includedir, env.Glob('*.h*'))
##################################
# executable
##################################
def executable(env, exename=''):
# Executable name comes from directory name
if exename=='':
exename = os.path.split(os.getcwd())[1]
env.PrependUnique(CPPPATH = ['.'])
# Add C/C++, and FORTRAN targets
env.AppendUnique(ALL_SOURCES = env.Glob('*.c'))
env.AppendUnique(ALL_SOURCES = env.Glob('*.cc'))
env.AppendUnique(ALL_SOURCES = env.Glob('*.cpp'))
env.AppendUnique(ALL_SOURCES = env.Glob('*.F'))
# Push commonly used libraries to end of list
ReorderCommonLibraries(env)
sources = env['ALL_SOURCES']
# Build program from all source
myobjs = env.Object(sources)
myexe = env.Program(target = exename, source = myobjs)
# Cleaning and installation are restricted to the directory
# scons was launched from or its descendents
CurrentDir = env.Dir('.').srcnode().abspath
if not CurrentDir.startswith(env.GetLaunchDir()):
# Not in launch directory. Tell scons not to clean these targets
env.NoClean([myobjs, myexe])
else:
# We're in launch directory (or descendent) schedule installation
# Installation directories for executable and headers
installdir = env.subst('$INSTALLDIR')
includedir = env.subst('$INCDIR')
bindir = env.subst('$BINDIR')
# Install targets
env.Install(bindir, myexe)
##################################
# executables
##################################
def executables(env):
# This will generate multiple executables from the
# source in the current directory. It does this
# by identifying source files that define "main()"
# and linking those with all source files that do not
# define "main()". Program names are based on the
# filename of the source file defining "main()"
main_sources = []
common_sources = []
curpath = os.getcwd()
srcpath = env.Dir('.').srcnode().abspath
os.chdir(srcpath)
files = glob.glob('*.c') + glob.glob('*.cc') + glob.glob('*.cpp')
for f in files:
if 'main(' in open(f).read():
main_sources.append(f)
else:
common_sources.append(f)
for f in glob.glob('*.F'):
if ' PROGRAM ' in open(f).read():
main_sources.append(f)
else:
common_sources.append(f)
os.chdir(curpath)
env.PrependUnique(CPPPATH = ['.'])
# Push commonly used libraries to end of list
ReorderCommonLibraries(env)
common_sources.extend(env['ALL_SOURCES'])
# Build program from all source
main_objs = env.Object(main_sources)
common_objs = env.Object(common_sources)
progs = []
for obj in main_objs:
exename = re.sub('\.o$', '', str(obj)) # strip off ".o" from object file name
progs.append(env.Program(target = exename, source = [obj, common_objs]))
# Cleaning and installation are restricted to the directory
# scons was launched from or its descendents
CurrentDir = env.Dir('.').srcnode().abspath
if not CurrentDir.startswith(env.GetLaunchDir()):
# Not in launch directory. Tell scons not to clean these targets
env.NoClean([common_objs, main_objs, progs])
else:
# We're in launch directory (or descendent) schedule installation
bindir = env.subst('$BINDIR')
env.Install(bindir, progs)
##################################
# plugin
##################################
def plugin(env, pluginname=''):
# Library name comes from directory name
if pluginname=='':
pluginname = os.path.split(os.getcwd())[1]
srcdir = str(env.Dir('.').srcnode().path)
env.AppendUnique(CPPPATH = ['.', '#src/lib/JANA'])
# Add C/C++ targets
env.AppendUnique(ALL_SOURCES = env.Glob('*.c'))
env.AppendUnique(ALL_SOURCES = env.Glob('*.cc'))
env.AppendUnique(ALL_SOURCES = env.Glob('*.cpp'))
env.AppendUnique(ALL_SOURCES = env.Glob('*.F'))
sources = env['ALL_SOURCES']
# Build static library from all source
myobjs = env.SharedObject(sources)
myplugin = env.SharedLibrary(target = pluginname, source = myobjs, SHLIBPREFIX='', SHLIBSUFFIX='.so')
# Cleaning and installation are restricted to the directory
# scons was launched from or its descendents
CurrentDir = env.Dir('.').srcnode().abspath
if not CurrentDir.startswith(env.GetLaunchDir()):
# Not in launch directory. Tell scons not to clean these targets
env.NoClean([myobjs, myplugin])
else:
# We're in launch directory (or descendent) schedule installation
# Installation directories for plugin and headers
installdir = env.subst('$INSTALLDIR')
includedir = "%s/%s" %(env.subst('$INCDIR'), pluginname)
pluginsdir = env.subst('$PLUGINSDIR')
# Install targets
installed = env.Install(pluginsdir, myplugin)
env.Install(includedir, env.Glob('*.h*'))
#===========================================================
# Misc utility routines for the SBMS system
#===========================================================
##################################
# AddCompileFlags
##################################
def AddCompileFlags(env, allflags):
# The allflags parameter should be a string containing all
# of the link flags (e.g. what is returned by root-config --cflags)
# It is split on white space and the parameters sorted into
# the 2 lists: ccflags, cpppath
ccflags = []
cpppath = []
for f in allflags.split():
if f.startswith('-I'):
cpppath.append(f[2:])
else:
ccflags.append(f)
if len(ccflags)>0 :
env.AppendUnique(CCFLAGS=ccflags)
if len(cpppath)>0 :
env.AppendUnique(CPPPATH=cpppath)
##################################
# AddLinkFlags
##################################
def AddLinkFlags(env, allflags):
# The allflags parameter should be a string containing all
# of the link flags (e.g. what is returned by root-config --glibs)
# It is split on white space and the parameters sorted into
# the 3 lists: linkflags, libpath, and libs
linkflags = []
libpath = []
libs = []
for f in allflags.split():
if f.startswith('-L'):
libpath.append(f[2:])
elif f.startswith('-l'):
libs.append(f[2:])
else:
linkflags.append(f)
if len(linkflags)>0 :
env.AppendUnique(LINKFLAGS=linkflags)
if len(libpath)>0 :
env.AppendUnique(LIBPATH=libpath)
if len(libs)>0 :
env.AppendUnique(LIBS=libs)
##################################
# ReorderCommonLibraries
##################################
def ReorderCommonLibraries(env):
# Some common libraries are often added by multiple packages
# (e.g. libz is one that many packages use). The gcc4.8.0
# compiler that comes with Ubuntu13.10 seems particularly
# sensitive to the ordering of the libraries. This means if
# one package "AppendUnique"s the "z" library, it may appear
# too early in the link command for another library that needs
# it, even though the second library tries appending it at the
# end. This routine looks for some commonly used libraries
# in the LIBS variable of the given environment and moves them
# to the end of the list.
# If LIBS is not set or is a simple string, return now
if type(env['LIBS']) is not list: return
# If any of the following are in LIBS, they will be moved
# to the back of LIBS maintaining the order in this list
libs = ['ccdb', 'mysql', 'xerces-c','z', 'bz2', 'pthread', 'm', 'dl']
for lib in libs:
if lib in env['LIBS']:
env['LIBS'].remove(lib)
env.Append(LIBS=[lib])
##################################
# ApplyPlatformSpecificSettings
##################################
def ApplyPlatformSpecificSettings(env, platform):
# Look for SBMS file based on this platform and run the InitENV
# function in it to allow for platform-specific settings. Normally,
# the BMS_OSNAME will be passed in which almost certainly contains
# "."s. The Python module loader doesn't like these and we have to
# replace them with "-"s to appease it.
platform = re.sub('\.', '-', platform)
modname = "sbms_%s" % platform
if (int(env['SHOWBUILD']) > 0):
print "looking for %s.py" % modname
try:
InitENV = getattr(__import__(modname), "InitENV")
# Run the InitENV function (if found)
if(InitENV != None):
print "sbms : Applying settings for platform %s" % platform
InitENV(env)
except ImportError,e:
if (int(env['SHOWBUILD']) > 0): print "%s" % e
pass
##################################
# OptionallyBuild
##################################
def OptionallyBuild(env, dirs):
# This is used to add directories that are not built as
# part of the standard build, but can still be added
# to the dependency tree so that the user can build them
# by either invoking scons from within the specific source
# directory or by specifying it on the command line.
#
#
subdirs = []
for dir in dirs:
add_dir = False
if env.GetLaunchDir().endswith(dir): add_dir = True
#if dir in env['COMMAND_LINE_TARGETS']: add_dir = True
for target in env['COMMAND_LINE_TARGETS']:
if target.endswith(dir): add_dir = True
if add_dir : subdirs.extend([dir])
if len(subdirs)>0 : env.SConscript(dirs=subdirs, exports='env', duplicate=0)
##################################
# TestCompile
##################################
def TestCompile(env, name, includes, content, options):
# This provides an autoconf-like method to test compilation
# of a C++ program to see which arguments are needed to get it
# to compile and link. The arguments are:
# env - build environment
# name - name of test (used to make unique filenames)
# includes - list of header files to be #included in test program
# content - content of test program (n.b. this is what gets placed
# inside of "main()" and before the return statement)
# options - list of different argument lists that should be tried
# to see which results in a successful compilation/link.
# The first to succeed is returned (as a list, not a single
# string). If none succeed, then a Python "None" value is
# returned. Note that each element of the list is itself a
# string that may contain many arguments, separated by spaces.
#
# n.b. if either the m32 or m64 flags are set by the user
# via the command line then "-m32" or "-m64" are added to the
# compile command. Otherwise, nothing is added and the default
# bitness is used.
ifname = '%s' % env.File('.%s_%s.cc' % (env['OSNAME'], name))
ofname = '%s' % env.File('.%s_%s' % (env['OSNAME'], name))
f = open(ifname, 'w')
for header in includes: f.write('#include<%s>\n' % header)
f.write('int main(int n, char*argv[]){%s;return 0;}\n' % content)
f.close();
args = [env['CXX'], '-o', ofname]
if (env['BITNESS32']!=0) : args.append('-m32')
if (env['BITNESS64']!=0) : args.append('-m64')
args.append(ifname)
ret = None
for opt in options:
myargs = opt.split()
if(env['SHOWBUILD'] >0):
print 'Test compiling %s:' % name
print args + myargs
res = subprocess.call(args + myargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if res==0:
if(env['SHOWBUILD'] >0): print '---Succeeded'
ret = myargs
break
else:
if(env['SHOWBUILD'] >1):
print '----Failed. Test file content was:------'
print subprocess.call(['cat', ifname])
print '----------------------------------------'
if os.path.exists(ifname): os.unlink(ifname);
if os.path.exists(ofname): os.unlink(ofname);
return ret
#===========================================================
# Package support follows
#===========================================================
##################################
# pthread
##################################
def Add_pthread(env):
includes = ['pthread.h']
content = 'pthread_create(NULL, NULL, NULL, NULL);'
if(TestCompile(env, 'pthread', includes, content, ['']) == None):
if(TestCompile(env, 'pthread', includes, content, ['-pthread']) != None):
env.AppendUnique(LINKFLAGS=['-pthread'])
else:
if(TestCompile(env, 'pthread', includes, content, ['-lpthread']) != None):
env.AppendUnique(LIBS=['pthread'])
##################################
# JANA
##################################
def AddJANA(env):
AddXERCES(env)
AddCCDB(env)
env.AppendUnique(LIBS=['JANA','dl'])
##################################
# JANAInstalled (use an already installed jana-config | |
<filename>PythonAPI/examples/rss/manual_control_rss.py
#!/usr/bin/env python
# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
# Copyright (c) 2019-2020 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
# Allows controlling a vehicle with a keyboard. For a simpler and more
# documented example, please take a look at tutorial.py.
"""
Welcome to CARLA manual control.
Use ARROWS or WASD keys for control.
W : throttle
S : brake
AD : steer
Q : toggle reverse
Space : hand-brake
P : toggle autopilot
TAB : change view
Backspace : change vehicle
R : toggle recording images to disk
F2 : toggle RSS visualization mode
B : toggle RSS Road Boundaries Mode
G : RSS check drop current route
T : toggle RSS
N : pause simulation
F1 : toggle HUD
H/? : toggle help
ESC : quit
"""
from __future__ import print_function
# ==============================================================================
# -- find carla module ---------------------------------------------------------
# ==============================================================================
import glob
import os
import sys
import signal
try:
sys.path.append(glob.glob(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + '/carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
# ==============================================================================
# -- imports -------------------------------------------------------------------
# ==============================================================================
import carla
from carla import ColorConverter as cc
import argparse
import logging
import math
import random
import weakref
from rss_sensor import RssSensor # pylint: disable=relative-import
from rss_visualization import RssUnstructuredSceneVisualizer, RssBoundingBoxVisualizer, RssStateVisualizer # pylint: disable=relative-import
try:
import pygame
from pygame.locals import KMOD_CTRL
from pygame.locals import KMOD_SHIFT
from pygame.locals import K_BACKSPACE
from pygame.locals import K_TAB
from pygame.locals import K_DOWN
from pygame.locals import K_ESCAPE
from pygame.locals import K_F1
from pygame.locals import K_F2
from pygame.locals import K_LEFT
from pygame.locals import K_RIGHT
from pygame.locals import K_SLASH
from pygame.locals import K_SPACE
from pygame.locals import K_UP
from pygame.locals import K_a
from pygame.locals import K_b
from pygame.locals import K_d
from pygame.locals import K_g
from pygame.locals import K_h
from pygame.locals import K_n
from pygame.locals import K_p
from pygame.locals import K_q
from pygame.locals import K_r
from pygame.locals import K_s
from pygame.locals import K_w
from pygame.locals import K_l
from pygame.locals import K_i
from pygame.locals import K_z
from pygame.locals import K_x
from pygame.locals import MOUSEBUTTONDOWN
from pygame.locals import MOUSEBUTTONUP
except ImportError:
raise RuntimeError('cannot import pygame, make sure pygame package is installed')
try:
import numpy as np
except ImportError:
raise RuntimeError('cannot import numpy, make sure numpy package is installed')
# ==============================================================================
# -- World ---------------------------------------------------------------------
# ==============================================================================
class World(object):
def __init__(self, carla_world, args):
self.world = carla_world
self.actor_role_name = args.rolename
self.dim = (args.width, args.height)
try:
self.map = self.world.get_map()
except RuntimeError as error:
print('RuntimeError: {}'.format(error))
print(' The server could not send the OpenDRIVE (.xodr) file:')
print(' Make sure it exists, has the same name of your town, and is correct.')
sys.exit(1)
self.external_actor = args.externalActor
self.hud = HUD(args.width, args.height, carla_world)
self.recording_frame_num = 0
self.recording = False
self.recording_dir_num = 0
self.player = None
self.actors = []
self.rss_sensor = None
self.rss_unstructured_scene_visualizer = None
self.rss_bounding_box_visualizer = None
self._actor_filter = args.filter
if not self._actor_filter.startswith("vehicle."):
print('Error: RSS only supports vehicles as ego.')
sys.exit(1)
self.restart()
self.world_tick_id = self.world.on_tick(self.on_world_tick)
def on_world_tick(self, world_snapshot):
self.hud.on_world_tick(world_snapshot)
def toggle_pause(self):
settings = self.world.get_settings()
self.pause_simulation(not settings.synchronous_mode)
def pause_simulation(self, pause):
settings = self.world.get_settings()
if pause and not settings.synchronous_mode:
settings.synchronous_mode = True
settings.fixed_delta_seconds = 0.05
self.world.apply_settings(settings)
elif not pause and settings.synchronous_mode:
settings.synchronous_mode = False
settings.fixed_delta_seconds = None
self.world.apply_settings(settings)
def restart(self):
if self.external_actor:
# Check whether there is already an actor with defined role name
for actor in self.world.get_actors():
if actor.attributes.get('role_name') == self.actor_role_name:
self.player = actor
break
else:
# Get a random blueprint.
blueprint = random.choice(self.world.get_blueprint_library().filter(self._actor_filter))
blueprint.set_attribute('role_name', self.actor_role_name)
if blueprint.has_attribute('color'):
color = random.choice(blueprint.get_attribute('color').recommended_values)
blueprint.set_attribute('color', color)
if blueprint.has_attribute('driver_id'):
driver_id = random.choice(blueprint.get_attribute('driver_id').recommended_values)
blueprint.set_attribute('driver_id', driver_id)
if blueprint.has_attribute('is_invincible'):
blueprint.set_attribute('is_invincible', 'true')
# Spawn the player.
if self.player is not None:
spawn_point = self.player.get_transform()
spawn_point.location.z += 2.0
spawn_point.rotation.roll = 0.0
spawn_point.rotation.pitch = 0.0
self.destroy()
self.player = self.world.try_spawn_actor(blueprint, spawn_point)
while self.player is None:
if not self.map.get_spawn_points():
print('There are no spawn points available in your map/town.')
print('Please add some Vehicle Spawn Point to your UE4 scene.')
sys.exit(1)
spawn_points = self.map.get_spawn_points()
spawn_point = random.choice(spawn_points) if spawn_points else carla.Transform()
self.player = self.world.try_spawn_actor(blueprint, spawn_point)
if self.external_actor:
ego_sensors = []
for actor in self.world.get_actors():
if actor.parent == self.player:
ego_sensors.append(actor)
for ego_sensor in ego_sensors:
if ego_sensor is not None:
ego_sensor.destroy()
# Set up the sensors.
self.camera = Camera(self.player, self.dim)
self.rss_unstructured_scene_visualizer = RssUnstructuredSceneVisualizer(self.player, self.world, self.dim)
self.rss_bounding_box_visualizer = RssBoundingBoxVisualizer(self.dim, self.world, self.camera.sensor)
self.rss_sensor = RssSensor(self.player, self.world,
self.rss_unstructured_scene_visualizer, self.rss_bounding_box_visualizer, self.hud.rss_state_visualizer)
def tick(self, clock):
self.hud.tick(self.player, clock)
def toggle_recording(self):
if not self.recording:
dir_name = "_out%04d" % self.recording_dir_num
while os.path.exists(dir_name):
self.recording_dir_num += 1
dir_name = "_out%04d" % self.recording_dir_num
self.recording_frame_num = 0
os.mkdir(dir_name)
else:
self.hud.notification('Recording finished (folder: _out%04d)' % self.recording_dir_num)
self.recording = not self.recording
def render(self, display):
self.camera.render(display)
self.rss_bounding_box_visualizer.render(display, self.camera.current_frame)
self.rss_unstructured_scene_visualizer.render(display)
self.hud.render(display)
if self.recording:
pygame.image.save(display, "_out%04d/%08d.bmp" % (self.recording_dir_num, self.recording_frame_num))
self.recording_frame_num += 1
def destroy(self):
# stop from ticking
if self.world_tick_id:
self.world.remove_on_tick(self.world_tick_id)
if self.camera:
self.camera.destroy()
if self.rss_sensor:
self.rss_sensor.destroy()
if self.rss_unstructured_scene_visualizer:
self.rss_unstructured_scene_visualizer.destroy()
if self.player:
self.player.destroy()
# ==============================================================================
# -- Camera --------------------------------------------------------------------
# ==============================================================================
class Camera(object):
def __init__(self, parent_actor, display_dimensions):
self.surface = None
self._parent = parent_actor
self.current_frame = None
bp_library = self._parent.get_world().get_blueprint_library()
bp = bp_library.find('sensor.camera.rgb')
bp.set_attribute('image_size_x', str(display_dimensions[0]))
bp.set_attribute('image_size_y', str(display_dimensions[1]))
self.sensor = self._parent.get_world().spawn_actor(bp, carla.Transform(carla.Location(
x=-5.5, z=2.5), carla.Rotation(pitch=8.0)), attach_to=self._parent, attachment_type=carla.AttachmentType.SpringArm)
# We need to pass the lambda a weak reference to self to avoid
# circular reference.
weak_self = weakref.ref(self)
self.sensor.listen(lambda image: Camera._parse_image(weak_self, image))
def destroy(self):
self.sensor.stop()
self.sensor.destroy()
self.sensor = None
def render(self, display):
if self.surface is not None:
display.blit(self.surface, (0, 0))
@staticmethod
def _parse_image(weak_self, image):
self = weak_self()
if not self:
return
self.current_frame = image.frame
image.convert(cc.Raw)
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
self.surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
# ==============================================================================
# -- VehicleControl -----------------------------------------------------------
# ==============================================================================
class VehicleControl(object):
MOUSE_STEERING_RANGE = 200
signal_received = False
"""Class that handles keyboard input."""
def __init__(self, world, start_in_autopilot):
self._autopilot_enabled = start_in_autopilot
self._world = world
self._control = carla.VehicleControl()
self._lights = carla.VehicleLightState.NONE
world.player.set_autopilot(self._autopilot_enabled)
self._restrictor = carla.RssRestrictor()
self._vehicle_physics = world.player.get_physics_control()
world.player.set_light_state(self._lights)
self._steer_cache = 0.0
self._mouse_steering_center = None
self._surface = pygame.Surface((self.MOUSE_STEERING_RANGE * 2, self.MOUSE_STEERING_RANGE * 2))
self._surface.set_colorkey(pygame.Color('black'))
self._surface.set_alpha(60)
line_width = 2
pygame.draw.polygon(self._surface,
(0, 0, 255),
[
(0, 0),
(0, self.MOUSE_STEERING_RANGE * 2 - line_width),
(self.MOUSE_STEERING_RANGE * 2 - line_width,
self.MOUSE_STEERING_RANGE * 2 - line_width),
(self.MOUSE_STEERING_RANGE * 2 - line_width, 0),
(0, 0)
], line_width)
pygame.draw.polygon(self._surface,
(0, 0, 255),
[
(0, self.MOUSE_STEERING_RANGE),
(self.MOUSE_STEERING_RANGE * 2, self.MOUSE_STEERING_RANGE)
], line_width)
pygame.draw.polygon(self._surface,
(0, 0, 255),
[
(self.MOUSE_STEERING_RANGE, 0),
(self.MOUSE_STEERING_RANGE, self.MOUSE_STEERING_RANGE * 2)
], line_width)
world.hud.notification("Press 'H' or '?' for help.", seconds=4.0)
def render(self, display):
if self._mouse_steering_center:
display.blit(
self._surface, (self._mouse_steering_center[0] - self.MOUSE_STEERING_RANGE, self._mouse_steering_center[1] - self.MOUSE_STEERING_RANGE))
@staticmethod
def signal_handler(signum, _):
print('\nReceived signal {}. Trigger stopping...'.format(signum))
VehicleControl.signal_received = True
def parse_events(self, world, clock):
if VehicleControl.signal_received:
print('\nAccepted signal. Stopping loop...')
return True
if isinstance(self._control, carla.VehicleControl):
current_lights = self._lights
for event in pygame.event.get():
if event.type == pygame.QUIT:
return True
elif event.type == pygame.KEYUP:
if self._is_quit_shortcut(event.key):
return True
elif event.key == K_BACKSPACE:
if self._autopilot_enabled:
world.player.set_autopilot(False)
world.restart()
world.player.set_autopilot(True)
else:
world.restart()
elif event.key == K_F1:
world.hud.toggle_info()
elif event.key == K_h or (event.key == K_SLASH and pygame.key.get_mods() & KMOD_SHIFT):
world.hud.help.toggle()
elif event.key == K_TAB:
world.rss_unstructured_scene_visualizer.toggle_camera()
elif event.key == K_n:
world.toggle_pause()
elif event.key == K_r:
world.toggle_recording()
elif event.key == K_F2:
if self._world and self._world.rss_sensor:
self._world.rss_sensor.toggle_debug_visualization_mode()
elif event.key == K_b:
if self._world and self._world.rss_sensor:
if self._world.rss_sensor.sensor.road_boundaries_mode == carla.RssRoadBoundariesMode.Off:
self._world.rss_sensor.sensor.road_boundaries_mode = carla.RssRoadBoundariesMode.On
print("carla.RssRoadBoundariesMode.On")
else:
self._world.rss_sensor.sensor.road_boundaries_mode = carla.RssRoadBoundariesMode.Off
print("carla.RssRoadBoundariesMode.Off")
elif event.key == K_g:
if self._world and self._world.rss_sensor:
self._world.rss_sensor.drop_route()
if isinstance(self._control, carla.VehicleControl):
if event.key == K_q:
self._control.gear = 1 if self._control.reverse else -1
elif event.key == K_p and not pygame.key.get_mods() & KMOD_CTRL:
self._autopilot_enabled = not self._autopilot_enabled
world.player.set_autopilot(self._autopilot_enabled)
world.hud.notification(
'Autopilot %s' % ('On' if self._autopilot_enabled else 'Off'))
elif event.key == K_l and pygame.key.get_mods() & KMOD_CTRL:
current_lights ^= carla.VehicleLightState.Special1
elif event.key == K_l and pygame.key.get_mods() & KMOD_SHIFT:
current_lights ^= carla.VehicleLightState.HighBeam
elif event.key == K_l:
# Use 'L' key to switch between lights:
# closed -> position -> low beam -> fog
if not self._lights & carla.VehicleLightState.Position:
world.hud.notification("Position lights")
current_lights |= carla.VehicleLightState.Position
else:
world.hud.notification("Low beam lights")
current_lights |= carla.VehicleLightState.LowBeam
if self._lights & carla.VehicleLightState.LowBeam:
world.hud.notification("Fog lights")
current_lights |= carla.VehicleLightState.Fog
if self._lights & carla.VehicleLightState.Fog:
world.hud.notification("Lights off")
current_lights ^= carla.VehicleLightState.Position
current_lights ^= carla.VehicleLightState.LowBeam
current_lights ^= carla.VehicleLightState.Fog
elif event.key == K_i:
current_lights ^= carla.VehicleLightState.Interior
elif event.key == K_z:
current_lights ^= carla.VehicleLightState.LeftBlinker
elif event.key == K_x:
current_lights ^= carla.VehicleLightState.RightBlinker
| |
0
@property
def snap_nums(self):
return list(self._snap_nums)
@property
def video_config(self):
return (
None if self._video_description is None else self._video_description.config
)
def set_logger(self, logger):
self._logger = logger
def ensure_data_is_ready(self, prepared_data_dir, plot_descriptions):
self.logger.info(f"Preparing data for {self.name}")
assert self.data_available
plot_data_locations = {}
bifrost_data = self.get_bifrost_data(self._snap_nums[0])
(
_,
unavailable_quantities,
available_plots,
unavailable_plots,
) = self._check_quantity_availability(bifrost_data, plot_descriptions)
if len(available_plots) > 0:
plot_data_locations[self.data_dir] = available_plots
if len(unavailable_plots) == 0:
return plot_data_locations
if prepared_data_dir.is_dir():
try:
prepared_bifrost_data = self.get_bifrost_data(
self._snap_nums[0], other_data_dir=prepared_data_dir
)
except:
prepared_bifrost_data = None
if prepared_bifrost_data is not None:
(
available_quantities_prepared,
unavailable_quantities_prepared,
available_plots_prepared,
_,
) = self._check_quantity_availability(
prepared_bifrost_data, unavailable_plots
)
if len(unavailable_quantities_prepared) == 0:
if len(available_plots_prepared) > 0:
plot_data_locations[
prepared_data_dir
] = available_plots_prepared
prepared_snap_nums = self._find_snap_nums(
other_data_dir=prepared_data_dir
)
missing_snap_nums = [
snap_num
for snap_num in self._snap_nums
if snap_num not in prepared_snap_nums
]
if len(missing_snap_nums) > 0:
prepared_bifrost_data = None
self._prepare_derived_data(
prepared_data_dir,
available_quantities_prepared,
snap_nums=missing_snap_nums,
)
return plot_data_locations
prepared_bifrost_data = None
self._prepare_derived_data(prepared_data_dir, unavailable_quantities)
prepared_bifrost_data = self.get_bifrost_data(
self._snap_nums[0], other_data_dir=prepared_data_dir
)
(
_,
unavailable_quantities_prepared,
available_plots_prepared,
_,
) = self._check_quantity_availability(prepared_bifrost_data, unavailable_plots)
if len(available_plots_prepared) > 0:
plot_data_locations[prepared_data_dir] = available_plots_prepared
for quantity in unavailable_quantities_prepared:
self.logger.error(
f"Could not obtain quantity {quantity.name} for simulation {self.name}, skipping"
)
return plot_data_locations
def get_bifrost_data(self, snap_num, other_data_dir=None):
fdir = self.data_dir if other_data_dir is None else other_data_dir
self.logger.debug(f"Reading snap {snap_num} of {self.name} in {fdir}")
assert snap_num in self._snap_nums
return helita_utils.CachingBifrostData(
self.name, fdir=fdir, snap=snap_num, verbose=False
)
def _find_snap_nums(self, other_data_dir=None):
input_dir = self.data_dir if other_data_dir is None else other_data_dir
snap_nums = self._find_all_snap_nums(input_dir)
if self._start_snap_num is not None:
snap_nums = [n for n in snap_nums if n >= self._start_snap_num]
if self._end_snap_num is not None:
snap_nums = [n for n in snap_nums if n <= self._end_snap_num]
self.logger.debug(
f'Found snaps {", ".join(map(str, snap_nums))} in {input_dir}'
)
return snap_nums
def _find_all_snap_nums(self, input_dir):
p = re.compile("{}_(\d\d\d)\.idl$".format(self.name))
snap_nums = []
for name in os.listdir(input_dir):
match = p.match(name)
if match:
snap_nums.append(int(match.group(1)))
return sorted(snap_nums)
def _check_quantity_availability(self, bifrost_data, plot_descriptions):
available_quantities = []
unavailable_quantities = []
available_plots = []
unavailable_plots = []
for plot_description in plot_descriptions:
quantity = plot_description.quantity
if quantity in available_quantities:
available_plots.append(plot_description)
elif quantity in unavailable_quantities:
unavailable_plots.append(plot_description)
else:
if plot_description.quantity.is_available(bifrost_data):
self.logger.debug(
f"Quantity {quantity.name} available for {bifrost_data.file_root}"
)
available_quantities.append(quantity)
available_plots.append(plot_description)
else:
self.logger.debug(
f"Quantity {quantity.name} not available for {bifrost_data.file_root}"
)
unavailable_quantities.append(quantity)
unavailable_plots.append(plot_description)
return (
available_quantities,
unavailable_quantities,
available_plots,
unavailable_plots,
)
def _prepare_derived_data(self, prepared_data_dir, quantities, snap_nums=None):
if len(quantities) == 0:
return
os.makedirs(prepared_data_dir, exist_ok=True)
if snap_nums is None:
snap_nums = self._snap_nums
param_file_name = f"{self.name}_{snap_nums[0]}.idl"
snap_range_specification = (
[f"--snap-range={snap_nums[0]},{snap_nums[-1]}"]
if len(snap_nums) > 1
else []
)
derived_dependency_names = []
synthesized_quantities = []
for quantity in quantities:
if quantity.dependency_type == "derived":
derived_dependency_names.append(quantity.dependency_name)
elif quantity.dependency_type == "synthesized":
synthesized_quantities.append(quantity)
else:
raise ValueError(f"Invalid dependency type {quantity.dependency_type}")
synthesize_command_args = SynthesizedQuantity.get_command_args(
synthesized_quantities
)
all_dependency_names = derived_dependency_names + [
quantity.dependency_name for quantity in synthesized_quantities
]
return_code = running.run_command(
[
"backstaff",
"--protected-file-types=",
"snapshot",
"-v",
*snap_range_specification,
param_file_name,
"derive",
"-v",
"--ignore-warnings",
*synthesize_command_args,
"write",
"-v",
"--ignore-warnings",
"--overwrite",
f'--included-quantities={",".join(all_dependency_names)}',
str((prepared_data_dir / param_file_name).resolve()),
],
cwd=self.data_dir,
logger=self.logger.debug,
error_logger=self.logger.error,
)
if return_code != 0:
abort(self.logger, "Non-zero return code")
for snap_num in snap_nums:
snap_name = f"{self.name}_{snap_num:03}.snap"
snap_path = self.data_dir / snap_name
linked_snap_path = prepared_data_dir / snap_name
if (
linked_snap_path.with_suffix(".idl").is_file()
and not linked_snap_path.is_file()
):
os.symlink(snap_path, linked_snap_path)
if return_code != 0:
abort(self.logger, "Non-zero return code")
class Visualizer:
def __init__(self, simulation_run, output_dir_name="autoviz"):
self._simulation_run = simulation_run
self._logger = simulation_run.logger
self._output_dir = self._simulation_run.data_dir / output_dir_name
self._prepared_data_dir = self._output_dir / "data"
self._logger.debug(f"Using output directory {self._output_dir}")
@property
def logger(self):
return self._logger
@property
def simulation_name(self):
return self._simulation_run.name
def set_logger(self, logger):
self._simulation_run.set_logger(logger)
self._logger = logger
def clean(self):
if not self._output_dir.is_dir():
print(f"No data to clean for {self.simulation_name}")
return
print(f"The directory {self._output_dir} and all its content will be removed")
while True:
answer = input("Continue? [y/N] ").strip().lower()
if answer in ("", "n"):
print("Aborted")
break
if answer == "y":
shutil.rmtree(self._output_dir)
self.logger.debug(f"Removed {self._output_dir}")
break
def create_videos_only(self, *plot_descriptions):
video_config = self._simulation_run.video_config
if video_config is not None:
snap_nums = self._simulation_run.snap_nums
if len(snap_nums) == 0:
return
for plot_description in plot_descriptions:
frame_dir = self._output_dir / plot_description.tag
if plot_description.has_multiple_fields:
bifrost_data = self._simulation_run.get_bifrost_data(snap_nums[0])
for snap_num in snap_nums:
fields = plot_description.get_field(bifrost_data)
field_ids = fields.get_ids()
output_dir = frame_dir / f"{snap_num}"
self._create_video_from_frames(
output_dir,
field_ids,
frame_dir.with_name(f"{frame_dir.stem}_{snap_num}.mp4"),
**video_config,
)
else:
self._create_video_from_frames(
frame_dir,
snap_nums,
frame_dir.with_suffix(".mp4"),
**video_config,
)
def visualize(
self,
*plot_descriptions,
overwrite=False,
job_idx=0,
show_progress=True,
new_logger_builder=None,
):
if new_logger_builder is not None:
self.set_logger(new_logger_builder())
if not self._simulation_run.data_available:
self.logger.error(
f"No data for simulation {self.simulation_name} in {self._simulation_run.data_dir}, aborting"
)
return
def add_progress_bar(iterable, extra_desc=None):
if not show_progress:
return iterable
return tqdm(
iterable,
desc=f"{self.simulation_name} {plot_description.tag}"
+ ("" if extra_desc is None else f" {extra_desc}"),
position=job_idx,
ascii=True,
)
plot_data_locations = self._simulation_run.ensure_data_is_ready(
self._prepared_data_dir, plot_descriptions
)
plot_data_locations_inverted = {}
for data_dir, plot_descriptions in plot_data_locations.items():
for plot_description in plot_descriptions:
plot_data_locations_inverted[plot_description] = data_dir
snap_nums = self._simulation_run.snap_nums
for plot_description, data_dir in plot_data_locations_inverted.items():
frame_dir = self._output_dir / plot_description.tag
os.makedirs(frame_dir, exist_ok=True)
self.logger.info(
f"Plotting frames for {plot_description.tag} in {self.simulation_name}"
)
bifrost_data = self._simulation_run.get_bifrost_data(snap_nums[0], data_dir)
if plot_description.has_multiple_fields:
for snap_num in snap_nums:
output_dir = frame_dir / f"{snap_num}"
os.makedirs(output_dir, exist_ok=True)
bifrost_data.set_snap(snap_num)
fields = plot_description.get_field(bifrost_data)
field_ids = fields.get_ids()
for field_id in add_progress_bar(
field_ids, extra_desc=f"(snap {snap_num})"
):
output_path = output_dir / f"{field_id}.png"
if output_path.exists() and not overwrite:
self.logger.debug(f"{output_path} already exists, skipping")
continue
field_wrapper = fields(field_id)
self._plot_frame(
bifrost_data,
plot_description,
field_wrapper.field,
output_path,
label=field_wrapper.label,
)
if self._simulation_run.video_config is not None:
self._create_video_from_frames(
output_dir,
field_ids,
frame_dir.with_name(f"{frame_dir.stem}_{snap_num}.mp4"),
**self._simulation_run.video_config,
)
else:
for snap_num in add_progress_bar(snap_nums):
output_path = frame_dir / f"{snap_num}.png"
if output_path.exists() and not overwrite:
self.logger.debug(f"{output_path} already exists, skipping")
continue
bifrost_data.set_snap(snap_num)
field = plot_description.get_field(bifrost_data)
self._plot_frame(bifrost_data, plot_description, field, output_path)
if self._simulation_run.video_config is not None:
self._create_video_from_frames(
frame_dir,
snap_nums,
frame_dir.with_suffix(".mp4"),
**self._simulation_run.video_config,
)
def _plot_frame(
self, bifrost_data, plot_description, field, output_path, label=None
):
time = float(bifrost_data.params["t"]) * units.U_T
text = f"{time:.1f} s"
if label is not None:
text = f"{text}\n{label}"
field.plot(
output_path=output_path,
extra_artists=[AnchoredText(text, "upper left", frameon=False)],
**plot_description.get_plot_kwargs(field),
)
def _create_video_from_frames(self, frame_dir, frame_indices, output_path, fps=15):
self.logger.info(
f"Creating video {output_path.name} from {self.simulation_name}"
)
tempdir = frame_dir / ".ffmpeg_tmp"
if tempdir.exists():
shutil.rmtree(tempdir)
os.makedirs(tempdir)
frame_num = 0
for frame_idx in frame_indices:
frame_path = frame_dir / f"{frame_idx:d}.png"
linked_frame_path = tempdir / f"{frame_num:d}.png"
if frame_path.is_file():
os.symlink(frame_path, linked_frame_path)
frame_num += 1
frame_path_template = tempdir / "%d.png"
return_code = running.run_command(
[
"ffmpeg",
"-loglevel",
"error",
"-y",
"-r",
"{:d}".format(fps),
"-start_number",
"0",
"-i",
str(frame_path_template),
"-vf",
"pad=width=ceil(iw/2)*2:height=ceil(ih/2)*2:color=white",
"-vcodec",
"libx264",
"-pix_fmt",
"yuv420p",
str(output_path),
],
logger=self.logger.debug,
error_logger=self.logger.error,
)
shutil.rmtree(tempdir)
if return_code != 0:
self.logger.error("Could not create video, skipping")
class Visualization:
def __init__(self, visualizer, *plot_descriptions):
self._visualizer = visualizer
self._plot_descriptions = plot_descriptions
@property
def visualizer(self):
return self._visualizer
def create_videos_only(self, **kwargs):
self.visualizer.create_videos_only(*self._plot_descriptions, **kwargs)
def visualize(self, **kwargs):
self.visualizer.visualize(*self._plot_descriptions, **kwargs)
def parse_config_file(file_path, logger=logging):
logger.debug(f"Parsing {file_path}")
file_path = pathlib.Path(file_path)
if not file_path.exists():
abort(logger, f"Could not find config file {file_path}")
yaml = YAML()
with open(file_path, "r") as f:
try:
entries = yaml.load(f)
except yaml.YAMLError as e:
abort(logger, e)
if isinstance(entries, list):
entries = dict(simulations=entries)
global_quantity_path = entries.get("quantity_path", None)
if global_quantity_path is not None:
global_quantity_path = pathlib.Path(global_quantity_path)
simulations = entries.get("simulations", [])
if not isinstance(simulations, list):
simulations = [simulations]
visualizations = []
for simulation in simulations:
simulation_run = SimulationRun.parse(simulation, logger=logger)
if "quantity_file" not in simulation:
if global_quantity_path is None:
quantity_file = pathlib.Path("quantities.csv")
else:
if global_quantity_path.is_dir():
quantity_file = global_quantity_path / "quantities.csv"
else:
quantity_file = global_quantity_path
else:
quantity_file = pathlib.Path(simulation["quantity_file"])
if not quantity_file.is_absolute():
if global_quantity_path is None or not global_quantity_path.is_dir():
quantity_file = simulation_run.data_dir / quantity_file
else:
quantity_file = global_quantity_path / quantity_file
quantity_file = quantity_file.resolve()
if not quantity_file.exists():
abort(logger, f"Could not find quantity_file {quantity_file}")
quantities = Quantity.parse_file(quantity_file, logger=logger)
global_plots = entries.get("plots", [])
if not isinstance(global_plots, list):
global_plots = [global_plots]
local_plots = simulation.get("plots", [])
if not isinstance(local_plots, list):
local_plots = [local_plots]
global_plot_descriptions = [
plot_description
for plot_config in global_plots
for plot_description in PlotDescription.parse(
quantities, plot_config, allow_reference=False, logger=logger
)
]
references, plot_descriptions = [], []
for plot_config in local_plots:
for p in PlotDescription.parse(
quantities, plot_config, allow_reference=True, logger=logger
):
(references, plot_descriptions)[isinstance(p, PlotDescription)].append(
p
)
global_plot_descriptions_with_name = []
for plot_description in global_plot_descriptions:
(global_plot_descriptions_with_name, plot_descriptions)[
plot_description.name is None
].append(plot_description)
for name in references:
found_plot = False
for plot_description in global_plot_descriptions_with_name:
if name == plot_description.name:
plot_descriptions.append(plot_description)
found_plot = True
if not found_plot:
logger.warning(f"No plots found with name {name}, skipping")
visualizer = Visualizer(simulation_run)
visualizations.append(Visualization(visualizer, *plot_descriptions))
return visualizations
class LoggerBuilder:
def __init__(self, name="autoviz", level=logging.INFO, log_file=None):
self.name = name
self.level = level
self.log_file = log_file
def __call__(self):
logger = logging.getLogger(self.name)
if len(logger.handlers) == 0:
logger.setLevel(self.level)
logger.propagate = False
if self.log_file is | |
name as the original template files, but reside in a different
directory tree.
Each directory in the INCLUDE_PATH is replicated in full beneath the
COMPILE_DIR directory. This example:
provider = template.provider.Provider({
'COMPILE_DIR': '/tmp/ttc',
'INCLUDE_PATH': '/home/abw/templates:/usr/share/templates',
})
would create the following directory structure:
/tmp/ttc/home/abw/templates/
/tmp/ttc/usr/share/templates/
Files loaded from different INCLUDE_PATH directories will have their
compiled forms save in the relevant COMPILE_DIR directory.
On Win32 platforms a filename may by prefixed by a drive letter and
colon. e.g.
C:/My Templates/header
The colon will be silently stripped from the filename when it is added
to the COMPILE_DIR value(s) to prevent illegal filename being generated.
Any colon in COMPILE_DIR elements will be left intact. For example:
# Win32 only
provider = template.provider.Provider({
'DELIMITER': ';',
'COMPILE_DIR': 'C:/TT2/Cache',
'INCLUDE_PATH': 'C:/TT2/Templates;D:/My Templates',
})
This would create the following cache directories:
C:/TT2/Cache/C/TT2/Templates
C:/TT2/Cache/D/My Templates
* TOLERANT
The TOLERANT flag is used by the various Template Toolkit provider
modules (template.provider, template.plugins, template.filters) to
control their behaviour when errors are encountered. By default, any
errors are reported as such, with the request for the particular
resource (template, plugin, filter) being denied and an exception
raised. When the TOLERANT flag is set to any true values, errors will
be silently ignored and the provider will instead return None. This
allows a subsequent provider to take responsibility for providing the
resource, rather than failing the request outright. If all providers
decline to service the request, either through tolerated failure or a
genuine disinclination to comply, then a'<resource> not found'
exception is raised.
* PARSER
The template.parser module implements a parser object for compiling
templates into Python code which can then be executed. A default
object of this class is created automatically and then used by the
Provider whenever a template is loaded and requires compilation. The
PARSER option can be used to provide an alternate parser object.
provider = template.provider.Provider({
'PARSER': myorg.template.parser.Parser({ ... }),
})
* DEBUG
The DEBUG option can be used to enable debugging messages from the
template.provider module by setting it to include the DEBUG_PROVIDER
value.
from template.constants import *
template = template.Template({
'DEBUG': DEBUG_PROVIDER,
})
fetch(name)
Returns a compiled template for the name specified. If the template
cannot be found then None is returned. If an error occurs (e.g. read
error, parse error) then an exception is raised. If the TOLERANT flag
is set the the method returns None instead of raising an exception.
store(name, template)
Stores the compiled template 'template' in the cache under the name 'name'.
Subsequent calls to fetch(name) will return this template in preference to
any disk-based file.
include_path(newpath)
Accessor method for the INCLUDE_PATH setting. If called with an
argument, this method will replace the existing INCLUDE_PATH with
the new value.
paths()
This method generates a copy of the INCLUDE_PATH list. Any elements
in the list which are dynamic generators (e.g. callables or objects
implementing a paths() method) will be called and the list of
directories returned merged into the output list.
It is possible to provide a generator which returns itself, thus
sending this method into an infinite loop. To detect and prevent this
from happening, the MAX_DIRS class variable, set to 64 by default,
limits the maximum number of paths that can be added to, or generated
for the output list. If this number is exceeded then the method will
immediately return an error reporting as much.
SUBCLASSING
The Provider class can be subclassed to provide templates from a
different source (e.g. a database). In most cases you'll just need to
provide custom implementations of the _template_modified() and
_template_content() methods.
Caching in memory and on disk will still be applied (if enabled) when
overriding these methods.
_template_modified(path)
Returns a timestamp of the path passed in by calling stat(). This can
be overridden, for example, to return a last modified value from a
database. The value returned should be a Unix epoch timestamp
although a sequence number should work as well.
_template_content(path, modtime=None)
This method returns the content of the template for all INCLUDE,
PROCESS, and INSERT directives. It returns the content of the
template located at 'path', or None if no such file exists.
If the optional parameter 'modtime' is present, the modification time
of the file is stored in its 'modtime' attribute.
"""
RELATIVE_PATH = re.compile(r"(?:^|/)\.+/")
class Error(Exception):
pass
class Provider:
"""This class handles the loading, compiling and caching of
templates.
Multiple Provider objects can be stacked and queried in turn to
effect a Chain-of-Command between them. A provider will attempt to
return the requested template, raise an exception, or decline to
provide the template (by returning None), allowing subsequent
providers to attempt to deliver it. See 'Design Patterns' for
further details.
"""
MAX_DIRS = 64
STAT_TTL = 1
DEBUG = False
def __init__(self, params):
size = params.get("CACHE_SIZE", 2)
paths = params.get("INCLUDE_PATH", ".")
cdir = params.get("COMPILE_DIR", "")
dlim = params.get("DELIMITER", os.name == "nt" and r":(?!\/)" or ":")
debug = params.get("DEBUG")
if isinstance(paths, str):
paths = re.split(dlim, paths)
if size == 1 or size < 0:
size = 2
if debug is not None:
self.__debug = debug & (DEBUG_PROVIDER & DEBUG_FLAGS)
else:
self.__debug = self.DEBUG
if cdir:
for path in paths:
if not isinstance(path, str):
continue
if os.name == "nt":
path = path.replace(":", "")
if not os.path.isdir(path):
os.makedirs(path)
self.__lookup = {}
self.__notfound = {} # Tracks templates *not* found.
self.__slots = 0
self.__size = size
self.__include_path = paths
self.__delimiter = dlim
self.__compile_dir = cdir
self.__compile_ext = params.get("COMPILE_EXT", "")
self.__absolute = bool(params.get("ABSOLUTE"))
self.__relative = bool(params.get("RELATIVE"))
self.__tolerant = bool(params.get("TOLERANT"))
self.__document = params.get("DOCUMENT", Document)
self.__parser = params.get("PARSER")
self.__default = params.get("DEFAULT")
self.__encoding = params.get("ENCODING")
self.__stat_ttl = params.get("STAT_TTL", self.STAT_TTL)
self.__params = params
self.__head = None
self.__tail = None
def fetch(self, name, prefix=None):
"""Returns a compiled template for the name specified by parameter.
The template is returned from the internal cache if it exists, or
loaded and then subsequently cached. The ABSOLUTE and RELATIVE
configuration flags determine if absolute (e.g. '/something...')
and/or relative (e.g. './something') paths should be honoured.
The INCLUDE_PATH is otherwise used to find the named file. 'name'
may also be a template.util.Literal object that contains the
template text, or a file object from which the content is read.
The compiled template is not cached in these latter cases given
that there is no filename to cache under. A subsequent call to
store(name, compiled) can be made to cache the compiled template
for future fetch() calls, if necessary.
Returns a compiled template or None if the template could not be
found. On error (e.g. the file was found but couldn't be read or
parsed), an exception is raised. The TOLERANT configuration
option can be set to downgrade any errors to None.
"""
if not isinstance(name, str):
data = self._load(name)
data = self._compile(data)
return data and data.data
elif os.path.isabs(name):
if self.__absolute:
return self._fetch(name)
elif not self.__tolerant:
raise Error("%s: absolute paths are not allowed (set ABSOLUTE option)"
% name)
elif RELATIVE_PATH.search(name):
if self.__relative:
return self._fetch(name)
elif not self.__tolerant:
raise Error("%s: relative paths are not allowed (set RELATIVE option)"
% name)
elif self.__include_path:
return self._fetch_path(name)
return None
def _load(self, name, alias=None):
"""Load template text from a string (template.util.Literal), file
object, or from an absolute filename.
Returns an object with the following attributes:
name filename or 'alias', if provided, or 'input text', etc.
text template text
time modification time of file, or current time for files/strings
load time file was loaded (now!)
On error, raises an exception, or returns None if TOLERANT is set.
"""
now = time.time()
if alias is None and isinstance(name, str):
alias = name
if isinstance(name, Literal):
return Data(name.text(), alias, alt="input text", load=0)
elif not isinstance(name, str):
return Data(name.read(), alias, alt="input file", load=0)
if self._template_modified(name):
when = Struct()
text = self._template_content(name, when)
if text is not None:
return Data(text, alias, when=when.modtime, path=name)
return None
def _fetch(self, name, t_name=None):
"""Fetch a file from cache or disk by specification of an absolute
or relative filename.
'name' is the path to search (possibly prefixed by INCLUDE_PATH).
't_name' is the template name.
No search of the INCLUDE_PATH is made. If the file is found and
loaded, it is compiled and cached.
"""
# First see if the named template is in the memory cache.
slot = self.__lookup.get(name)
if slot:
# Test is cache is fresh, and reload/compile if not.
self._refresh(slot)
return slot.data
now = time.time()
last_stat_time = self.__notfound.get(name)
| |
the constraints of convolution engine
args.save_mod_files = False # saves modified files after last commit. Also stores commit id.
args.make_score_zero_mean = False # make score zero mean while learning
args.no_q_for_dws_layer_idx = 0 # no_q_for_dws_layer_idx
args.viz_colormap = 'rainbow' # colormap for tensorboard: 'rainbow', 'plasma', 'magma', 'bone'
args.freeze_bn = False # freeze the statistics of bn
args.tensorboard_enable = True # en/disable of TB writing
args.print_train_class_iou = False
args.print_val_class_iou = False
args.freeze_layers = None
args.opset_version = 11 # onnx opset_version
args.prob_color_to_gray = (0.0,0.0) # this will be used for controlling color 2 gray augmentation
args.interpolation = None # interpolation method to be used for resize. one of cv2.INTER_
return args
# ################################################
# to avoid hangs in data loader with multi threads
# this was observed after using cv2 image processing functions
# https://github.com/pytorch/pytorch/issues/1355
cv2.setNumThreads(0)
# ################################################
def main(args):
# ensure pytorch version is 1.2 or higher
assert version.parse(torch.__version__) >= version.parse('1.1'), \
'torch version must be 1.1 or higher, due to the change in scheduler.step() and optimiser.step() call order'
assert (not hasattr(args, 'evaluate')), 'args.evaluate is deprecated. use args.phase=training or calibration or validation'
assert is_valid_phase(args.phase), f'invalid phase {args.phase}'
assert not hasattr(args, 'model_surgery'), 'the argument model_surgery is deprecated, it is not needed now - remove it'
if (args.phase == 'validation' and args.bias_calibration):
args.bias_calibration = False
warnings.warn('switching off bias calibration in validation')
#
#################################################
args.rand_resize = args.img_resize if args.rand_resize is None else args.rand_resize
args.rand_crop = args.img_resize if args.rand_crop is None else args.rand_crop
args.output_size = args.img_resize if args.output_size is None else args.output_size
# resume has higher priority
args.pretrained = None if (args.resume is not None) else args.pretrained
# prob_color_to_gray will be used for controlling color 2 gray augmentation
if 'tiad' in args.dataset_name and args.prob_color_to_gray == (0.0, 0.0):
#override in case of 'tiad' if default values are used
args.prob_color_to_gray = (0.5, 0.0)
if args.save_path is None:
save_path = get_save_path(args)
else:
save_path = args.save_path
#
if not os.path.exists(save_path):
os.makedirs(save_path)
if args.save_mod_files:
#store all the files after the last commit.
mod_files_path = save_path+'/mod_files'
os.makedirs(mod_files_path)
cmd = "git ls-files --modified | xargs -i cp {} {}".format("{}", mod_files_path)
print("cmd:", cmd)
os.system(cmd)
#stoe last commit id.
cmd = "git log -n 1 >> {}".format(mod_files_path + '/commit_id.txt')
print("cmd:", cmd)
os.system(cmd)
#################################################
if args.logger is None:
log_file = os.path.splitext(os.path.basename(__file__))[0] + '.log'
args.logger = xnn.utils.TeeLogger(filename=os.path.join(save_path,log_file))
#################################################
# global settings. rand seeds for repeatability
random.seed(args.rand_seed)
np.random.seed(args.rand_seed)
torch.manual_seed(args.rand_seed)
torch.cuda.manual_seed(args.rand_seed)
################################
# args check and config
if args.iter_size != 1 and args.total_batch_size is not None:
warnings.warn("only one of --iter_size or --total_batch_size must be set")
#
if args.total_batch_size is not None:
args.iter_size = args.total_batch_size//args.batch_size
else:
args.total_batch_size = args.batch_size*args.iter_size
#################################################
# set some global flags and initializations
# keep it in args for now - although they don't belong here strictly
# using pin_memory is seen to cause issues, especially when when lot of memory is used.
args.use_pinned_memory = False
args.n_iter = 0
args.best_metric = -1
cudnn.benchmark = True
# torch.autograd.set_detect_anomaly(True)
################################
# reset character color, in case it is different
print('{}'.format(Fore.RESET))
# print everything for log
print('=> args: {}'.format(args))
print('\n'.join("%s: %s" % item for item in sorted(vars(args).items())))
print('=> will save everything to {}'.format(save_path))
#################################################
train_writer = SummaryWriter(os.path.join(save_path,'train')) if args.tensorboard_enable else None
val_writer = SummaryWriter(os.path.join(save_path,'val')) if args.tensorboard_enable else None
transforms = get_transforms(args) if args.transforms is None else args.transforms
assert isinstance(transforms, (list,tuple)) and len(transforms) == 2, 'incorrect transforms were given'
print("=> fetching images in '{}'".format(args.data_path))
split_arg = args.split_file if args.split_file else (args.split_files if args.split_files else args.split_value)
train_dataset, val_dataset = xvision.datasets.__dict__[args.dataset_name](args.dataset_config, args.data_path, split=split_arg, transforms=transforms)
#################################################
print('=> {} samples found, {} train samples and {} test samples '.format(len(train_dataset)+len(val_dataset),
len(train_dataset), len(val_dataset)))
train_sampler = get_dataset_sampler(train_dataset, args.epoch_size) if args.epoch_size != 0 else None
shuffle_train = args.shuffle and (train_sampler is None)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size,
num_workers=args.workers, pin_memory=args.use_pinned_memory, sampler=train_sampler, shuffle=shuffle_train)
val_sampler = get_dataset_sampler(val_dataset, args.epoch_size_val) if args.epoch_size_val != 0 else None
shuffle_val = args.shuffle_val and (val_sampler is None)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size,
num_workers=args.workers, pin_memory=args.use_pinned_memory, sampler=val_sampler, shuffle=shuffle_val)
#################################################
if (args.model_config.input_channels is None):
args.model_config.input_channels = (3,)
print("=> input channels is not given - setting to {}".format(args.model_config.input_channels))
if (args.model_config.output_channels is None):
if ('num_classes' in dir(train_dataset)):
args.model_config.output_channels = train_dataset.num_classes()
else:
args.model_config.output_channels = (2 if args.model_config.output_type == 'flow' else args.model_config.output_channels)
xnn.utils.print_yellow("=> output channels is not given - setting to {} - not sure to work".format(args.model_config.output_channels))
#
if not isinstance(args.model_config.output_channels,(list,tuple)):
args.model_config.output_channels = [args.model_config.output_channels]
if (args.class_weights is None) and ('class_weights' in dir(train_dataset)):
args.class_weights = train_dataset.class_weights()
if not isinstance(args.class_weights, (list,tuple)):
args.class_weights = [args.class_weights]
#
print("=> class weights available for dataset: {}".format(args.class_weights))
#################################################
pretrained_data = None
model_surgery_quantize = False
pretrained_data = None
if args.pretrained and args.pretrained != "None":
pretrained_data = []
pretrained_files = args.pretrained if isinstance(args.pretrained,(list,tuple)) else [args.pretrained]
for p in pretrained_files:
if isinstance(p, dict):
p_data = p
else:
if p.startswith('http://') or p.startswith('https://'):
p_file = xnn.utils.download_url(p, './data/downloads')
else:
p_file = p
#
print(f'=> loading pretrained weights file: {p}')
p_data = torch.load(p_file)
#
pretrained_data.append(p_data)
model_surgery_quantize = p_data['quantize'] if 'quantize' in p_data else False
#
#################################################
# create model
is_onnx_model = False
if isinstance(args.model, torch.nn.Module):
model, change_names_dict = args.model if isinstance(args.model, (list, tuple)) else (args.model, None)
assert isinstance(model, torch.nn.Module), 'args.model, if provided must be a valid torch.nn.Module'
elif isinstance(args.model, str) and args.model.endswith('.onnx'):
model = xnn.onnx.import_onnx(args.model)
is_onnx_model = True
else:
xnn.utils.print_yellow("=> creating model '{}'".format(args.model_name))
model = xvision.models.pixel2pixel.__dict__[args.model_name](args.model_config)
# check if we got the model as well as parameters to change the names in pretrained
model, change_names_dict = model if isinstance(model, (list,tuple)) else (model,None)
#
if args.quantize:
# dummy input is used by quantized models to analyze graph
is_cuda = next(model.parameters()).is_cuda
dummy_input = create_rand_inputs(args, is_cuda=is_cuda)
#
if 'training' in args.phase:
model = xnn.quantize.QuantTrainModule(model, per_channel_q=args.per_channel_q,
histogram_range=args.histogram_range, bitwidth_weights=args.bitwidth_weights,
bitwidth_activations=args.bitwidth_activations, constrain_bias=args.constrain_bias,
dummy_input=dummy_input)
elif 'calibration' in args.phase:
model = xnn.quantize.QuantCalibrateModule(model, per_channel_q=args.per_channel_q,
bitwidth_weights=args.bitwidth_weights, bitwidth_activations=args.bitwidth_activations,
histogram_range=args.histogram_range, constrain_bias=args.constrain_bias,
bias_calibration=args.bias_calibration, dummy_input=dummy_input, lr_calib=args.lr_calib)
elif 'validation' in args.phase:
# Note: bias_calibration is not emabled
model = xnn.quantize.QuantTestModule(model, per_channel_q=args.per_channel_q,
bitwidth_weights=args.bitwidth_weights, bitwidth_activations=args.bitwidth_activations,
histogram_range=args.histogram_range, constrain_bias=args.constrain_bias,
dummy_input=dummy_input, model_surgery_quantize=model_surgery_quantize)
else:
assert False, f'invalid phase {args.phase}'
#
# load pretrained model
if pretrained_data is not None and not is_onnx_model:
model_orig = get_model_orig(model)
for (p_data,p_file) in zip(pretrained_data, pretrained_files):
print("=> using pretrained weights from: {}".format(p_file))
if hasattr(model_orig, 'load_weights'):
model_orig.load_weights(pretrained=p_data, change_names_dict=change_names_dict)
else:
xnn.utils.load_weights(get_model_orig(model), pretrained=p_data, change_names_dict=change_names_dict)
#
#
#
#################################################
if args.count_flops:
count_flops(args, model)
#################################################
if args.save_onnx:
write_onnx_model(args, get_model_orig(model), save_path, save_traced_model=False)
#
#################################################
if args.print_model:
print(model)
print('\n')
else:
args.logger.debug(str(model))
args.logger.debug('\n')
#################################################
if (not args.run_soon):
print("Training not needed for now")
close(args)
exit()
#################################################
# DataParallel does not work for QuantCalibrateModule or QuantTestModule
if args.parallel_model and (not isinstance(model, (xnn.quantize.QuantCalibrateModule, xnn.quantize.QuantTestModule))):
model = torch.nn.DataParallel(model)
#################################################
model = model.cuda()
#################################################
# for help in debug/print
for name, module in model.named_modules():
module.name = name
#################################################
args.loss_modules = copy.deepcopy(args.losses)
for task_dx, task_losses in enumerate(args.losses):
for loss_idx, loss_fn in enumerate(task_losses):
kw_args = {}
loss_args = pixel2pixel_losses.__dict__[loss_fn].args()
for arg in loss_args:
if arg == 'weight' and (args.class_weights is not None):
kw_args.update({arg:args.class_weights[task_dx]})
elif arg == 'num_classes':
kw_args.update({arg:args.model_config.output_channels[task_dx]})
elif arg == 'sparse':
kw_args.update({arg:args.sparse})
elif arg == 'enable_fp16':
kw_args.update({arg:args.model_config.enable_fp16})
#
#
loss_fn_raw = pixel2pixel_losses.__dict__[loss_fn](**kw_args)
if args.parallel_criterion:
loss_fn = torch.nn.DataParallel(loss_fn_raw).cuda() if args.parallel_criterion else loss_fn_raw.cuda()
loss_fn.info = loss_fn_raw.info
loss_fn.clear = loss_fn_raw.clear
else:
loss_fn = loss_fn_raw.cuda()
#
args.loss_modules[task_dx][loss_idx] = loss_fn
#
args.metric_modules = copy.deepcopy(args.metrics)
for task_dx, task_metrics in enumerate(args.metrics):
for midx, metric_fn in enumerate(task_metrics):
kw_args = {}
loss_args = pixel2pixel_losses.__dict__[metric_fn].args()
for arg in loss_args:
if arg == 'weight':
kw_args.update({arg:args.class_weights[task_dx]})
elif arg == 'num_classes':
kw_args.update({arg:args.model_config.output_channels[task_dx]})
elif arg == 'sparse':
kw_args.update({arg:args.sparse})
elif arg == 'enable_fp16':
kw_args.update({arg:args.model_config.enable_fp16})
#
#
metric_fn_raw = pixel2pixel_losses.__dict__[metric_fn](**kw_args)
if args.parallel_criterion:
metric_fn = torch.nn.DataParallel(metric_fn_raw).cuda()
metric_fn.info = metric_fn_raw.info
metric_fn.clear = metric_fn_raw.clear
else:
metric_fn = metric_fn_raw.cuda()
#
args.metric_modules[task_dx][midx] = metric_fn
#
#################################################
if args.phase=='validation':
with torch.no_grad():
validate(args, val_dataset, val_loader, model, 0, val_writer)
#
close(args)
return
#################################################
assert(args.optimizer in ['adam', 'sgd'])
print('=> setting {} optimizer'.format(args.optimizer))
if args.lr_clips is not None:
learning_rate_clips = args.lr_clips if 'training' in args.phase else 0.0
clips_decay = args.bias_decay if (args.bias_decay is not None and args.bias_decay != 0.0) else args.weight_decay
clips_params = [p for n,p in model.named_parameters() if 'clips' in n]
other_params = [p for n,p in model.named_parameters() if 'clips' not in n]
param_groups = [{'params': clips_params, 'weight_decay': clips_decay, 'lr': learning_rate_clips},
{'params': other_params, 'weight_decay': args.weight_decay}]
else:
param_groups = [{'params': filter(lambda | |
<filename>experiments/notebooks/augment_3d.py
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Apply Augmentation
# +
import configparser
import os
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as plt
import numpy as np
import chainercv
from chainercv import transforms
from ipywidgets import interact
import sys
sys.path.append("../../src/")
# -
from pose.visualizations import vis_image, vis_point, vis_edge, vis_pose
from pose.hand_dataset.selector import select_dataset
from pose.hand_dataset.common_dataset import COLOR_MAP, STANDARD_KEYPOINT_NAMES, EDGES
# define constants
KEYPOINT_NAMES = STANDARD_KEYPOINT_NAMES
POINT_COLOR = [COLOR_MAP[k] for k in KEYPOINT_NAMES]
EDGE_COLOR = [COLOR_MAP[s, t] for s, t in EDGES]
# # visualize raw dataset
# +
config = configparser.ConfigParser()
config.read("../../src/config_pose.ini")
# force to set
config["dataset"]["train_set"]="fhad"
config["dataset"]["val_set"]="fhad"
config["dataset"]["use_rgb"]="yes"
config["dataset"]["use_depth"]="yes"
# Uh... ugly...
concatenated_dataset=select_dataset(config, return_data=["train_set"], debug=True)
dataset=concatenated_dataset._datasets[0].base
# +
def visualize_dataset(idx):
example = dataset.get_example(idx)
print(example.keys())
rgb_joint_zyx = example["rgb_joint"]
depth_joint_zyx = example["depth_joint"]
rgb = chainercv.utils.read_image(example["rgb_path"])
depth = dataset.read_depth(example["depth_path"])
rgb_vu = dataset.rgb_camera.zyx2vu(rgb_joint_zyx)
depth_vu = dataset.depth_camera.zyx2vu(depth_joint_zyx)
fig = plt.figure(figsize=(8, 8))
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223, projection="3d")
ax4 = fig.add_subplot(224, projection="3d")
vis_pose(depth_vu, EDGES, img=depth, edge_color=EDGE_COLOR,
point_color=POINT_COLOR, ax=ax1)
vis_pose(rgb_vu, EDGES, img=rgb, edge_color=EDGE_COLOR,
point_color=POINT_COLOR, ax=ax2)
vis_pose(depth_joint_zyx, indices=EDGES, edge_color=EDGE_COLOR,
point_color=POINT_COLOR, ax=ax3)
vis_pose(rgb_joint_zyx, indices=EDGES, edge_color=EDGE_COLOR,
point_color=POINT_COLOR, ax=ax4)
for ax in [ax3, ax4]:
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.view_init(-65, -90)
sample_indices = np.random.choice(range(len(dataset)), 100)
interact(visualize_dataset, idx=sample_indices)
# -
# # Flip
# +
def flip_point_xyz(camera, xyz, size, x_flip=False, y_flip=False):
uv, z = camera.xyz2uv(xyz, return_z=True)
vu = uv[:, ::-1]
W, H = size
flipped_vu = transforms.flip_point(
vu[np.newaxis],
(H, W),
x_flip=x_flip,
y_flip=y_flip
)
flipped_uv = np.squeeze(flipped_vu)[:, ::-1]
flipped_xyz = camera.uv2xyz(flipped_uv, z)
return flipped_xyz
def flip_point_zyx(camera, zyx, size, x_flip=False, y_flip=False):
print(zyx.shape)
vu, z = camera.zyx2vu(zyx, return_z=True)
H, W = size
flipped_vu = transforms.flip_point(
vu[np.newaxis],
(H, W),
x_flip=x_flip,
y_flip=y_flip
)
flipped_vu = np.squeeze(flipped_vu)
flipped_zyx = camera.vu2zyx(np.squeeze(flipped_vu), z)
return flipped_zyx
# -
def flip(image, zyx, vu, camera, x_flip=False, y_flip=False):
C, H, W = image.shape
zyx_flipped = flip_point_zyx(
camera, zyx, (H, W), x_flip=x_flip, y_flip=y_flip)
image_flipped = transforms.flip(image, x_flip=x_flip, y_flip=y_flip)
vu_flipped = transforms.flip_point(
vu,
(H, W),
x_flip=x_flip,
y_flip=y_flip,
)
return image_flipped, zyx_flipped, vu_flipped
# +
def visualize_flip(idx, y_flip=False, x_flip=False):
example = dataset.get_example(idx)
rgb_joint_zyx = example["rgb_joint"]
depth_joint_zyx = example["depth_joint"]
rgb = chainercv.utils.read_image(example["rgb_path"])
depth = dataset.read_depth(example["depth_path"])
rgb_vu = dataset.rgb_camera.zyx2vu(rgb_joint_zyx)
depth_vu = dataset.depth_camera.zyx2vu(depth_joint_zyx)
rgb_vu = np.expand_dims(rgb_vu, axis=0)
depth_vu = np.expand_dims(depth_vu, axis=0)
depth_flipped, depth_joint_zyx_flipped, depth_vu_flipped = flip(
depth,
depth_joint_zyx,
depth_vu,
dataset.depth_camera,
x_flip=x_flip,
y_flip=y_flip,
)
fig = plt.figure(figsize=(8, 8))
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223, projection="3d")
ax4 = fig.add_subplot(224, projection="3d")
vis_pose(depth_vu, EDGES, img=depth, edge_color=EDGE_COLOR,
point_color=POINT_COLOR, ax=ax1)
debug_vu = np.expand_dims(dataset.depth_camera.zyx2vu(
depth_joint_zyx_flipped), axis=0)
vis_pose(debug_vu, EDGES, img=depth_flipped, edge_color=EDGE_COLOR,
point_color=POINT_COLOR, ax=ax2)
# plot 3D
vis_pose(depth_joint_zyx, indices=EDGES, edge_color=EDGE_COLOR,
point_color=POINT_COLOR, ax=ax3)
vis_pose(depth_joint_zyx_flipped, indices=EDGES, edge_color=EDGE_COLOR,
point_color=POINT_COLOR, ax=ax4)
for ax in [ax3, ax4]:
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.view_init(-65, -90)
interact(visualize_flip, idx=sample_indices)
# -
# # Rotate
# +
from math import sin, cos
def rotate_point_uv(uv, angle, center_uv):
ndim = uv.ndim
if ndim == 3:
uv = uv.squeeze()
c_u, c_v = center_uv
theta = np.deg2rad(angle)
rmat_uv = np.array([
[cos(theta), -sin(theta)],
[sin(theta), cos(theta)],
], dtype=uv.dtype).transpose()
uv = uv-center_uv
rot_uv = uv @ rmat_uv
rot_uv = rot_uv+center_uv
if ndim == 3:
rot_uv = np.expand_dims(rot_uv, axis=0)
return rot_uv
def rotate_point_xyz(camera, xyz, angle, center_uv):
uv, z = camera.xyz2uv(xyz, return_z=True)
rot_uv = rotate_point_uv(uv, angle, center_uv)
xyz = camera.uv2xyz(rot_uv, z)
return xyz
def rotate_point_vu(vu, angle, center_vu):
ndim = vu.ndim
if ndim == 3:
vu = vu.squeeze()
c_v, c_u = center_vu
theta = np.deg2rad(angle)
P = np.array([
[0, 1],
[1, 0],
], dtype=vu.dtype).transpose()
rmat = np.array([
[cos(theta), -sin(theta)],
[sin(theta), cos(theta)],
], dtype=vu.dtype).transpose()
rmat_vu = P @ rmat @ P
vu = vu-center_vu
rot_vu = vu @ rmat_vu
rot_vu = rot_vu+center_vu
if ndim == 3:
rot_vu = np.expand_dims(rot_vu, axis=0)
return rot_vu
def rotate_point_zyx(camera, zyx, angle, center_vu):
vu, z = camera.zyx2vu(zyx, return_z=True)
rot_vu = rotate_point_vu(vu, angle, center_vu)
zyx = camera.vu2zyx(rot_vu, z)
return zyx
# -
def rotate(image, zyx, vu, angle, camera):
C, H, W = image.shape
center_vu = (H/2, W/2)
# to make compatibility between transforms and rot_point_vu
image_angle = angle
point_angle = -angle
zyx_rot = rotate_point_zyx(camera, zyx, point_angle, center_vu)
image_rot = transforms.rotate(image, image_angle, expand=False)
vu_rot = rotate_point_vu(vu, point_angle, center_vu)
return image_rot, zyx_rot, vu_rot
# +
def visualize_rotate(idx, angle, vis_vu=False):
example = dataset.get_example(idx)
rgb_joint_zyx = example["rgb_joint"]
depth_joint_zyx = example["depth_joint"]
rgb = chainercv.utils.read_image(example["rgb_path"])
depth = dataset.read_depth(example["depth_path"])
rgb_vu = dataset.rgb_camera.zyx2vu(rgb_joint_zyx)
depth_vu = dataset.depth_camera.zyx2vu(depth_joint_zyx)
rgb_vu = np.expand_dims(rgb_vu, axis=0)
depth_vu = np.expand_dims(depth_vu, axis=0)
depth_rot, depth_joint_zyx_rot, depth_vu_rot = rotate(
depth,
depth_joint_zyx,
depth_vu,
angle,
dataset.depth_camera,
)
fig = plt.figure(figsize=(8, 8))
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223, projection="3d")
ax4 = fig.add_subplot(224, projection="3d")
vis_pose(depth_vu, EDGES, img=depth, edge_color=EDGE_COLOR,
point_color=POINT_COLOR, ax=ax1)
if vis_vu:
vis_pose(depth_vu_rot, EDGES, img=depth_rot, edge_color=EDGE_COLOR,
point_color=POINT_COLOR, ax=ax2)
else:
debug_vu = np.expand_dims(
dataset.depth_camera.zyx2vu(depth_joint_zyx_rot), axis=0)
vis_pose(debug_vu, EDGES, img=depth_rot, edge_color=EDGE_COLOR,
point_color=POINT_COLOR, ax=ax2)
# plot 3D
vis_pose(depth_joint_zyx, indices=EDGES, edge_color=EDGE_COLOR,
point_color=POINT_COLOR, ax=ax3)
vis_pose(depth_joint_zyx_rot, indices=EDGES, edge_color=EDGE_COLOR,
point_color=POINT_COLOR, ax=ax4)
for ax in [ax3, ax4]:
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.view_init(-65, -90)
interact(visualize_rotate, idx=sample_indices, angle=range(-180, 181, 30))
# -
# # Crop around hand
# +
crop3dH, crop3dW = 150, 150
crop2dH, crop2dW = 224, 224
crop3dD = 150
def crop_domain(image, domain, fill=0):
"""
image.shape should be (C,H,W)
The order of domain should be [ymin,xmin,ymax,xmax]
"""
# m: min, M:max
ym, xm, yM, xM = domain
# Select domain where to clip
C, H, W = image.shape
# s: select
sxm = max(0, xm)
sxM = min(W, xM)
sym = max(0, ym)
syM = min(H, yM)
outH, outW = yM - ym, xM - xm
canvas = np.empty((C, outH, outW), dtype=image.dtype)
canvas[:] = np.array(fill).reshape((-1, 1, 1))
# where to Put clipped image on canvas
# p: put
pym = max(0, sym - ym)
pxm = max(0, sxm - xm)
pyM = min(outH, syM - ym)
pxM = min(outW, sxM - xm)
if pym == pyM:
print(H, W)
print(ym, xm, yM, xM)
print(pym, pxm, pyM, pxM)
raise Exception
# TODO:express as slice
canvas[:, pym:pyM, pxm:pxM] = image[:, sym:syM, sxm:sxM]
param = {}
param['y_offset'] = -sym + pym
param['x_offset'] = -sxm + pxm
return canvas, param
def calc_com(pts, z=None):
"""
calculate center of mass for given points pts
"""
if z is None:
return np.mean(pts, axis=0)
return np.mean(pts, axis=0), np.mean(z, axis=0)
# -
# # compose affine
from pose.graphics.camera import CameraIntr
from pose.hand_dataset.image_utils import convert_depth_to_uvd
# +
import sympy
symbols = []
for i in range(3):
for j in range(3):
s = "x_{}{}".format(i, j)
symbols.append(s)
symbols = ' '.join(symbols)
m = sympy.symbols(symbols)
m = np.array(m).reshape(3, 3)
P = np.array([
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
])
m_zyx = P.dot(m.dot(P))
m_xyz = P.dot(m_zyx.dot(P))
m_xyz
# -
# !pip install sympy
import sympy
u0, v0 = sympy.symbols("u0 v0")
u, v = sympy.symbols("u v")
sk = sympy.symbols("sk")
fx, fy = sympy.symbols("fx fy")
sx, sy = sympy.symbols("sx sy")
# +
c = np.array([
[1, sk, u0],
[0, 1, v0],
[0, 0, 1],
])
t = np.array([
[1, 0, u],
[0, 1, v],
[0, 0, 1],
])
s = np.array([
[sx, 0, 0],
[0, sy, 0],
[0, 0, 1],
])
t.dot(c), s.dot(c)
# -
def crop(image, joint_zyx, camera, return_param=False):
vu, z = camera.zyx2vu(joint_zyx, return_z=True)
vu_com, z_com = calc_com(vu, z)
zyx_com = camera.vu2zyx(
vu_com[np.newaxis],
z_com[np.newaxis]
).squeeze()
z_com, y_com, x_com = zyx_com
xmin = x_com-crop3dW / 2
ymin = y_com-crop3dH / 2
xmax = xmin+crop3dW
ymax = ymin+crop3dH
[
[vmin, umin],
[vmax, umax],
] = camera.zyx2vu(np.array([
[z_com, ymin, xmin],
[z_com, ymax, xmax],
])).astype(int)
domain = [vmin, umin, vmax, umax]
cropped, crop_param = crop_domain(image, domain)
translated = camera.translate_camera(
y_offset=crop_param["y_offset"],
x_offset=crop_param["x_offset"]
)
vu_cropped = translated.zyx2vu(joint_zyx)
if return_param:
param = dict()
param["zyx_com"] = zyx_com
param["z_com"] = z_com
param["y_com"] = y_com
param["x_com"] = x_com
return cropped, vu_cropped, translated, param
else:
return cropped, vu_cropped, translated
# +
import copy
# %matplotlib notebook
def visualize_crop(i):
example = dataset.get_example(i)
rgb_joint_zyx = example["rgb_joint"]
depth_joint_zyx = example["depth_joint"]
rgb = chainercv.utils.read_image(example["rgb_path"])
depth = dataset.read_depth(example["depth_path"])
depth_cropped, depth_vu_cropped, depth_camera_cropped, depth_crop_param = crop(
depth, depth_joint_zyx, dataset.depth_camera, return_param=True)
rgb_cropped, rgb_vu_cropped, rgb_camera_cropped, rgb_crop_param = crop(
rgb, rgb_joint_zyx, dataset.rgb_camera, return_param=True)
fig = plt.figure(figsize=(8, 8))
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223, projection="3d")
ax4 = fig.add_subplot(224, projection="3d")
vis_image(depth_cropped, ax1)
vis_pose(depth_vu_cropped, EDGES, point_color=POINT_COLOR,
edge_color=EDGE_COLOR, ax=ax1)
vis_image(rgb_cropped, ax2)
vis_pose(rgb_vu_cropped, EDGES, point_color=POINT_COLOR,
edge_color=EDGE_COLOR, ax=ax2)
# plot 3D
# pull back depth map
uvd = convert_depth_to_uvd(depth_cropped)
u, v, d = uvd[:, fc00:e968:6179::de52:7100, ::10]
u = u.reshape(-1, 1)
v = v.reshape(-1, 1)
z = d.reshape(-1, 1)
vu = np.concatenate([v, u], axis=1)
zyx = depth_camera_cropped.vu2zyx(vu, z)
vis_point(zyx, ax=ax3)
zyx_com = depth_crop_param["zyx_com"]
vis_pose(depth_joint_zyx, indices=EDGES, edge_color=EDGE_COLOR,
point_color=POINT_COLOR, ax=ax3)
vis_pose(rgb_joint_zyx, indices=EDGES, edge_color=EDGE_COLOR,
point_color=POINT_COLOR, ax=ax4)
for ax in [ax3, ax4]:
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.view_init(-65, -90)
interact(visualize_crop, i=sample_indices)
# -
# # resize
def scale(image, joint_zyx, camera, size, fit_short=True):
_, inH, inW = image.shape
out_image = chainercv.transforms.scale(
image,
size=max(crop2dH, crop2dW),
| |
<gh_stars>1-10
import numpy as np
import tensorflow as tf
from basic.read_data import DataSet
from my.nltk_utils import span_f1
from my.tensorflow import padded_reshape
from my.utils import argmax
from squad.utils import get_phrase, get_best_span
class Evaluation(object):
def __init__(self, data_type, global_step, idxs, yp, tensor_dict=None):
self.data_type = data_type
self.global_step = global_step
self.idxs = idxs
self.yp = yp
self.num_examples = len(yp)
self.tensor_dict = None
self.dict = {'data_type': data_type,
'global_step': global_step,
'yp': yp,
'idxs': idxs,
'num_examples': self.num_examples}
if tensor_dict is not None:
self.tensor_dict = {key: val.tolist() for key, val in tensor_dict.items()}
for key, val in self.tensor_dict.items():
self.dict[key] = val
self.summaries = None
def __repr__(self):
return "{} step {}".format(self.data_type, self.global_step)
def __add__(self, other):
if other == 0:
return self
assert self.data_type == other.data_type
assert self.global_step == other.global_step
new_yp = self.yp + other.yp
new_idxs = self.idxs + other.idxs
new_tensor_dict = None
if self.tensor_dict is not None:
new_tensor_dict = {key: val + other.tensor_dict[key] for key, val in self.tensor_dict.items()}
return Evaluation(self.data_type, self.global_step, new_idxs, new_yp, tensor_dict=new_tensor_dict)
def __radd__(self, other):
return self.__add__(other)
class LabeledEvaluation(Evaluation):
def __init__(self, data_type, global_step, idxs, yp, y, tensor_dict=None):
super(LabeledEvaluation, self).__init__(data_type, global_step, idxs, yp, tensor_dict=tensor_dict)
self.y = y
self.dict['y'] = y
def __add__(self, other):
if other == 0:
return self
assert self.data_type == other.data_type
assert self.global_step == other.global_step
new_yp = self.yp + other.yp
new_y = self.y + other.y
new_idxs = self.idxs + other.idxs
if self.tensor_dict is not None:
new_tensor_dict = {key: np.concatenate((val, other.tensor_dict[key]), axis=0) for key, val in self.tensor_dict.items()}
return LabeledEvaluation(self.data_type, self.global_step, new_idxs, new_yp, new_y, tensor_dict=new_tensor_dict)
class AccuracyEvaluation(LabeledEvaluation):
def __init__(self, data_type, global_step, idxs, yp, y, correct, loss, tensor_dict=None):
super(AccuracyEvaluation, self).__init__(data_type, global_step, idxs, yp, y, tensor_dict=tensor_dict)
self.loss = loss
self.correct = correct
self.acc = sum(correct) / len(correct)
self.dict['loss'] = loss
self.dict['correct'] = correct
self.dict['acc'] = self.acc
loss_summary = tf.Summary(value=[tf.Summary.Value(tag='{}/loss'.format(data_type), simple_value=self.loss)])
acc_summary = tf.Summary(value=[tf.Summary.Value(tag='{}/acc'.format(data_type), simple_value=self.acc)])
self.summaries = [loss_summary, acc_summary]
def __repr__(self):
return "{} step {}: accuracy={}, loss={}".format(self.data_type, self.global_step, self.acc, self.loss)
def __add__(self, other):
if other == 0:
return self
assert self.data_type == other.data_type
assert self.global_step == other.global_step
new_idxs = self.idxs + other.idxs
new_yp = self.yp + other.yp
new_y = self.y + other.y
new_correct = self.correct + other.correct
new_loss = (self.loss * self.num_examples + other.loss * other.num_examples) / len(new_correct)
if self.tensor_dict is not None:
new_tensor_dict = {key: np.concatenate((val, other.tensor_dict[key]), axis=0) for key, val in self.tensor_dict.items()}
return AccuracyEvaluation(self.data_type, self.global_step, new_idxs, new_yp, new_y, new_correct, new_loss, tensor_dict=new_tensor_dict)
class Evaluator(object):
def __init__(self, config, model, tensor_dict=None):
self.config = config
self.model = model
self.global_step = model.global_step
self.yp = model.yp
self.h = model.h
self.u = model.u
self.p0 = model.p0
self.g1 = model.g1
self.g2 = model.g2
self.tensor_dict = {} if tensor_dict is None else tensor_dict
def get_evaluation(self, sess, batch):
idxs, data_set = batch
feed_dict = self.model.get_feed_dict(data_set, False, supervised=False)
global_step, yp, vals = sess.run([self.global_step, self.yp, list(self.tensor_dict.values())], feed_dict=feed_dict)
yp = yp[:data_set.num_examples]
tensor_dict = dict(zip(self.tensor_dict.keys(), vals))
e = Evaluation(data_set.data_type, int(global_step), idxs, yp.tolist(), tensor_dict=tensor_dict)
return e
def get_evaluation_from_batches(self, sess, batches):
e = sum(self.get_evaluation(sess, batch) for batch in batches)
return e
class LabeledEvaluator(Evaluator):
def __init__(self, config, model, tensor_dict=None):
super(LabeledEvaluator, self).__init__(config, model, tensor_dict=tensor_dict)
self.y = model.y
def get_evaluation(self, sess, batch):
idxs, data_set = batch
feed_dict = self.model.get_feed_dict(data_set, False, supervised=False)
global_step, yp, vals = sess.run([self.global_step, self.yp, list(self.tensor_dict.values())], feed_dict=feed_dict)
yp = yp[:data_set.num_examples]
y = feed_dict[self.y]
tensor_dict = dict(zip(self.tensor_dict.keys(), vals))
e = LabeledEvaluation(data_set.data_type, int(global_step), idxs, yp.tolist(), y.tolist(), tensor_dict=tensor_dict)
return e
class AccuracyEvaluator(LabeledEvaluator):
def __init__(self, config, model, tensor_dict=None):
super(AccuracyEvaluator, self).__init__(config, model, tensor_dict=tensor_dict)
self.loss = model.loss
def get_evaluation(self, sess, batch):
idxs, data_set = batch
assert isinstance(data_set, DataSet)
feed_dict = self.model.get_feed_dict(data_set, False)
global_step, yp, loss, vals = sess.run([self.global_step, self.yp, self.loss, list(self.tensor_dict.values())], feed_dict=feed_dict)
y = data_set.data['y']
yp = yp[:data_set.num_examples]
correct = [self.__class__.compare(yi, ypi) for yi, ypi in zip(y, yp)]
tensor_dict = dict(zip(self.tensor_dict.keys(), vals))
e = AccuracyEvaluation(data_set.data_type, int(global_step), idxs, yp.tolist(), y, correct, float(loss), tensor_dict=tensor_dict)
return e
@staticmethod
def compare(yi, ypi):
for start, stop in yi:
if start == int(np.argmax(ypi)):
return True
return False
class AccuracyEvaluator2(AccuracyEvaluator):
@staticmethod
def compare(yi, ypi):
for start, stop in yi:
para_start = int(np.argmax(np.max(ypi, 1)))
sent_start = int(np.argmax(ypi[para_start]))
if tuple(start) == (para_start, sent_start):
return True
return False
class ForwardEvaluation(Evaluation):
def __init__(self, data_type, global_step, idxs, yp, yp2, loss,
id2answer_dict, na, u, h, p0, g1, g2, tensor_dict=None):
super(ForwardEvaluation, self).__init__(data_type, global_step, idxs, yp, tensor_dict=tensor_dict)
self.yp2 = yp2
self.loss = loss
self.id2answer_dict = id2answer_dict
self.na = na
self.u = u
self.h = h
self.p0 = p0
self.g1 = g1
self.g2 = g2
self.dict['loss'] = loss
self.dict['yp2'] = yp2
self.dict['na'] = na
self.dict['u'] = u
self.dict['h'] = h
self.dict['p0'] = p0
self.dict['g1'] = g1
self.dict['g2'] = g2
def __add__(self, other):
if other == 0:
return self
assert self.data_type == other.data_type
assert self.global_step == other.global_step
new_idxs = self.idxs + other.idxs
new_yp = self.yp + other.yp
new_yp2 = self.yp2 + other.yp2
new_loss = (self.loss * self.num_examples + other.loss * other.num_examples) / len(new_yp)
new_id2answer_dict = dict(list(self.id2answer_dict.items()) + list(other.id2answer_dict.items()))
new_id2score_dict = dict(list(self.id2answer_dict['scores'].items()) + list(other.id2answer_dict['scores'].items()))
new_id2answer_dict['scores'] = new_id2score_dict
new_na = self.na + other.na
new_u = self.u + other.u
new_h = self.h + other.h
new_p0 = self.p0 + other.p0
new_g1 = self.g1 + other.g1
new_g2 = self.g2 + other.g2
if self.tensor_dict is not None:
new_tensor_dict = {key: np.concatenate((val, other.tensor_dict[key]), axis=0) for key, val in self.tensor_dict.items()}
return ForwardEvaluation(self.data_type, self.global_step, new_idxs, new_yp, new_yp2, new_loss, new_id2answer_dict, new_na, new_u, new_h, new_p0, new_g1, new_g2, tensor_dict=new_tensor_dict)
def __repr__(self):
return "{} step {}: loss={:.4f}".format(self.data_type, self.global_step, self.loss)
class F1Evaluation(AccuracyEvaluation):
def __init__(self, data_type, global_step, idxs, yp, yp2, y, correct, loss, f1s, id2answer_dict, tensor_dict=None):
super(F1Evaluation, self).__init__(data_type, global_step, idxs, yp, y, correct, loss, tensor_dict=tensor_dict)
self.yp2 = yp2
self.f1s = f1s
self.f1 = float(np.mean(f1s))
self.dict['yp2'] = yp2
self.dict['f1s'] = f1s
self.dict['f1'] = self.f1
self.id2answer_dict = id2answer_dict
f1_summary = tf.Summary(value=[tf.Summary.Value(tag='{}/f1'.format(data_type), simple_value=self.f1)])
self.summaries.append(f1_summary)
def __add__(self, other):
if other == 0:
return self
assert self.data_type == other.data_type
assert self.global_step == other.global_step
new_idxs = self.idxs + other.idxs
new_yp = self.yp + other.yp
new_yp2 = self.yp2 + other.yp2
new_y = self.y + other.y
new_correct = self.correct + other.correct
new_f1s = self.f1s + other.f1s
new_loss = (self.loss * self.num_examples + other.loss * other.num_examples) / len(new_correct)
new_id2answer_dict = dict(list(self.id2answer_dict.items()) + list(other.id2answer_dict.items()))
new_id2score_dict = dict(list(self.id2answer_dict['scores'].items()) + list(other.id2answer_dict['scores'].items()))
new_id2na_dict = dict(list(self.id2answer_dict['na'].items()) + list(other.id2answer_dict['na'].items()))
new_id2answer_dict['scores'] = new_id2score_dict
new_id2answer_dict['na'] = new_id2na_dict
return F1Evaluation(self.data_type, self.global_step, new_idxs, new_yp, new_yp2, new_y, new_correct, new_loss, new_f1s, new_id2answer_dict)
def __repr__(self):
return "{} step {}: accuracy={:.4f}, f1={:.4f}, loss={:.4f}".format(self.data_type, self.global_step, self.acc, self.f1, self.loss)
class F1Evaluator(LabeledEvaluator):
def __init__(self, config, model, tensor_dict=None):
super(F1Evaluator, self).__init__(config, model, tensor_dict=tensor_dict)
self.yp2 = model.yp2
self.loss = model.loss
self.na = model.na_prob
def get_evaluation(self, sess, batch):
idxs, data_set = self._split_batch(batch)
assert isinstance(data_set, DataSet)
feed_dict = self._get_feed_dict(batch)
global_step, yp, yp2, loss, vals, na = sess.run([self.global_step, self.yp, self.yp2, self.loss, list(self.tensor_dict.values()), self.na], feed_dict=feed_dict)
y = data_set.data['y']
if self.config.squash:
new_y = []
for xi, yi in zip(data_set.data['x'], y):
new_yi = []
for start, stop in yi:
start_offset = sum(map(len, xi[:start[0]]))
stop_offset = sum(map(len, xi[:stop[0]]))
new_start = 0, start_offset + start[1]
new_stop = 0, stop_offset + stop[1]
new_yi.append((new_start, new_stop))
new_y.append(new_yi)
y = new_y
if self.config.single:
new_y = []
for yi in y:
new_yi = []
for start, stop in yi:
new_start = 0, start[1]
new_stop = 0, stop[1]
new_yi.append((new_start, new_stop))
new_y.append(new_yi)
y = new_y
yp, yp2 = yp[:data_set.num_examples], yp2[:data_set.num_examples]
spans, scores = zip(*[get_best_span(ypi, yp2i) for ypi, yp2i in zip(yp, yp2)])
def _get(xi, span):
if len(xi) <= span[0][0]:
return [""]
if len(xi[span[0][0]]) <= span[1][1]:
return [""]
return xi[span[0][0]][span[0][1]:span[1][1]]
def _get2(context, xi, span):
if len(xi) <= span[0][0]:
return ""
if len(xi[span[0][0]]) <= span[1][1]:
return ""
return get_phrase(context, xi, span)
id2answer_dict = {id_: _get2(context, xi, span)
for id_, xi, span, context in zip(data_set.data['ids'], data_set.data['x'], spans, data_set.data['p'])}
id2score_dict = {id_: score for id_, score in zip(data_set.data['ids'], scores)}
id2na_dict = {id_: float(each) for id_, each in zip(data_set.data['ids'], na)}
id2answer_dict['scores'] = id2score_dict
id2answer_dict['na'] = id2na_dict
correct = [self.__class__.compare2(yi, span) for yi, span in zip(y, spans)]
f1s = [self.__class__.span_f1(yi, span) for yi, span in zip(y, spans)]
tensor_dict = dict(zip(self.tensor_dict.keys(), vals))
e = F1Evaluation(data_set.data_type, int(global_step), idxs, yp.tolist(), yp2.tolist(), y,
correct, float(loss), f1s, id2answer_dict, tensor_dict=tensor_dict)
return e
def _split_batch(self, batch):
return batch
def _get_feed_dict(self, batch):
return self.model.get_feed_dict(batch[1], False)
@staticmethod
def compare(yi, ypi, yp2i):
for start, stop in yi:
aypi = argmax(ypi)
mask = np.zeros(yp2i.shape)
mask[aypi[0], aypi[1]:] = np.ones([yp2i.shape[1] - aypi[1]])
if tuple(start) == aypi and (stop[0], stop[1]-1) == argmax(yp2i * mask):
return True
return | |
"oid" : "1.3.6.1.4.1.890.172.16.31.10.42.1.2.1.1",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Integer32",
"ranges" : [
{
"min" : "1",
"max" : "65535"
},
],
"range" : {
"min" : "1",
"max" : "65535"
},
},
},
"access" : "readonly",
"description" :
"""The port number of the port for which this entry
contains Spanning Tree Protocol management
information.""",
"reference>" :
"""IEEE 802.1D-1990: Section 6.8.2.1.2""",
}, # column
"mrstpPortPriority" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.42.1.2.1.2",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Integer32",
"ranges" : [
{
"min" : "0",
"max" : "255"
},
],
"range" : {
"min" : "0",
"max" : "255"
},
},
},
"access" : "readwrite",
"description" :
"""The value of the priority field which is
contained in the first (in network byte order)
octet of the (2 octet long) Port ID. The other
octet of the Port ID is given by the value of
mrstpPort.""",
"reference>" :
"""IEEE 802.1D-1990: Section 4.5.5.1""",
}, # column
"mrstpPortState" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.42.1.2.1.3",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"disabled" : {
"nodetype" : "namednumber",
"number" : "1"
},
"blocking" : {
"nodetype" : "namednumber",
"number" : "2"
},
"listening" : {
"nodetype" : "namednumber",
"number" : "3"
},
"learning" : {
"nodetype" : "namednumber",
"number" : "4"
},
"forwarding" : {
"nodetype" : "namednumber",
"number" : "5"
},
"broken" : {
"nodetype" : "namednumber",
"number" : "6"
},
},
},
"access" : "readonly",
"description" :
"""The port's current state as defined by
application of the Spanning Tree Protocol. This
state controls what action a port takes on
reception of a frame. If the bridge has detected
a port that is malfunctioning it will place that
port into the broken(6) state. For ports which
are disabled (see mrstpPortEnable), this object
will have a value of disabled(1).""",
"reference>" :
"""IEEE 802.1D-1990: Section 4.5.5.2""",
}, # column
"mrstpPortEnable" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.42.1.2.1.4",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"enabled" : {
"nodetype" : "namednumber",
"number" : "1"
},
"disabled" : {
"nodetype" : "namednumber",
"number" : "2"
},
},
},
"access" : "readwrite",
"description" :
"""The enabled/disabled status of the port.""",
"reference>" :
"""IEEE 802.1D-1990: Section 4.5.5.2""",
}, # column
"mrstpPortPathCost" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.42.1.2.1.5",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Integer32",
"ranges" : [
{
"min" : "1",
"max" : "65535"
},
],
"range" : {
"min" : "1",
"max" : "65535"
},
},
},
"access" : "readwrite",
"description" :
"""The contribution of this port to the path cost of
paths towards the spanning tree root which include
this port. 802.1D-1990 recommends that the
default value of this parameter be in inverse
proportion to the speed of the attached LAN.""",
"reference>" :
"""IEEE 802.1D-1990: Section 4.5.5.3""",
}, # column
"mrstpPortDesignatedRoot" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.42.1.2.1.6",
"status" : "current",
"syntax" : {
"type" : { "module" :"BRIDGE-MIB", "name" : "BridgeId"},
},
"access" : "readonly",
"description" :
"""The unique Bridge Identifier of the Bridge
recorded as the Root in the Configuration BPDUs
transmitted by the Designated Bridge for the
segment to which the port is attached.""",
"reference>" :
"""IEEE 802.1D-1990: Section 4.5.5.4""",
}, # column
"mrstpPortDesignatedCost" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.42.1.2.1.7",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""The path cost of the Designated Port of the
segment connected to this port. This value is
compared to the Root Path Cost field in received
bridge PDUs.""",
"reference>" :
"""IEEE 802.1D-1990: Section 4.5.5.5""",
}, # column
"mrstpPortDesignatedBridge" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.42.1.2.1.8",
"status" : "current",
"syntax" : {
"type" : { "module" :"BRIDGE-MIB", "name" : "BridgeId"},
},
"access" : "readonly",
"description" :
"""The Bridge Identifier of the bridge which this
port considers to be the Designated Bridge for
this port's segment.""",
"reference>" :
"""IEEE 802.1D-1990: Section 4.5.5.6""",
}, # column
"mrstpPortDesignatedPort" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.42.1.2.1.9",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "OctetString",
"ranges" : [
{
"min" : "2",
"max" : "2"
},
],
"range" : {
"min" : "2",
"max" : "2"
},
},
},
"access" : "readonly",
"description" :
"""The Port Identifier of the port on the Designated
Bridge for this port's segment.""",
"reference>" :
"""IEEE 802.1D-1990: Section 4.5.5.7""",
}, # column
"mrstpPortForwardTransitions" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.42.1.2.1.10",
"status" : "current",
"access" : "readonly",
"description" :
"""The number of times this port has transitioned
from the Learning state to the Forwarding state.""",
}, # column
"mrstpPortOnBridgeIndex" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.42.1.2.1.11",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readwrite",
"description" :
"""Indetify the bridge index that this port joined to in MRSTP.""",
}, # column
"mrstpNotifications" : {
"nodetype" : "node",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.42.2",
}, # node
"radiusServerSetup" : {
"nodetype" : "node",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.43",
}, # node
"radiusAuthServerSetup" : {
"nodetype" : "node",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.43.1",
}, # node
"radiusAuthServerTimeout" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "172.16.17.32.4.1.890.1.5.8.20.43.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"radiusAuthServerTable" : {
"nodetype" : "table",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "172.16.17.32.4.1.890.172.16.31.10.43.1.3",
"status" : "current",
"description" :
"""""",
}, # table
"radiusAuthServerEntry" : {
"nodetype" : "row",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "172.16.17.32.4.1.890.1.5.8.20.43.1.3.1",
"status" : "current",
"linkage" : [
"radiusAuthServerIndex",
],
"description" :
"""An entry in radiusAuthServerTable.""",
}, # row
"radiusAuthServerIndex" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "172.16.17.32.4.1.890.172.16.31.10.43.1.3.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "noaccess",
"description" :
"""""",
}, # column
"radiusAuthServerIpAddr" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.43.1.3.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "IpAddress"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"radiusAuthServerUdpPort" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.43.1.3.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"radiusAuthServerSharedSecret" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.43.1.3.1.4",
"status" : "current",
"syntax" : {
"type" : { "module" :"RFC1213-MIB", "name" : "DisplayString"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"radiusAcctServerSetup" : {
"nodetype" : "node",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "172.16.17.32.4.1.890.172.16.31.10.43.2",
}, # node
"radiusAcctServerTimeout" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.43.2.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"radiusAcctServerTable" : {
"nodetype" : "table",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "172.16.17.32.4.1.890.1.5.8.20.43.2.2",
"status" : "current",
"description" :
"""""",
}, # table
"radiusAcctServerEntry" : {
"nodetype" : "row",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "172.16.17.32.4.1.890.1.5.8.20.43.2.2.1",
"status" : "current",
"linkage" : [
"radiusAcctServerIndex",
],
"description" :
"""An entry in radiusAcctServerTable.""",
}, # row
"radiusAcctServerIndex" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "172.16.17.32.4.1.890.1.5.8.20.43.2.2.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "noaccess",
"description" :
"""""",
}, # column
"radiusAcctServerIpAddr" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.43.2.2.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "IpAddress"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"radiusAcctServerUdpPort" : | |
<reponame>eddytion/scripts
#!/bin/python3.6
import paramiko
import sys
import datetime
import multiprocessing
import logging
import mysql.connector
import time
DBUSER = "root"
DBPASS = "<PASSWORD>"
DBHOST = "localhost"
DBNAME = "sap"
DBPORT = 3306
current_date = datetime.date.today()
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
threads = []
sys.tracebacklimit = 0
if len(sys.argv) < 1:
logging.error("Not enough arguments")
sys.exit(1)
logging.basicConfig(filename='/tmp/cloud_logfile.log', level=logging.INFO)
mydb = mysql.connector.connect(
host=DBHOST,
user=DBUSER,
passwd=<PASSWORD>,
database=DBNAME
)
mycursor = mydb.cursor()
class CloudUpdate(object):
def __init__(self, hmc):
self.hmc = hmc
self.csvfile_lpar_ms = "/tmp/lpar_ms_" + str(hmc) + "_" + str(current_date) + ".csv"
self.csvfile_mem_cpu_lpars = "/tmp/mem_cpu_lpars_" + str(hmc) + "_" + str(current_date) + ".csv"
self.csvfile_ms_fw = "/tmp/ms_fw_" + str(hmc) + "_" + str(current_date) + ".csv"
self.csvfile_ms_mem = "/tmp/ms_mem_" + str(hmc) + "_" + str(current_date) + ".csv"
self.csvfile_ms_cpu = "/tmp/ms_cpu_" + str(hmc) + "_" + str(current_date) + ".csv"
self.csvfile_ms_io = "/tmp/ms_io_" + str(hmc) + "_" + str(current_date) + ".csv"
self.csvfile_lpar_fc = "/tmp/ms_lpar_fc_" + str(hmc) + "_" + str(current_date) + ".csv"
self.csvfile_lpar_scsi = "/tmp/ms_lpar_scsi_" + str(hmc) + "_" + str(current_date) + ".csv"
self.csvfile_lpar_eth = "/tmp/ms_lpar_eth_" + str(hmc) + "_" + str(current_date) + ".csv"
self.csvfile_phys_mac = "/tmp/ms_phys_mac_" + str(hmc) + "_" + str(current_date) + ".csv"
self.csvfile_vios_wwpn = "/tmp/ms_vios_wwpn_" + str(hmc) + "_" + str(current_date) + ".csv"
self.csvfile_vios_disks = "/tmp/ms_vios_disks_" + str(hmc) + "_" + str(current_date) + ".csv"
self.csvfile_hmc_details = "/tmp/hmc_details_" + str(hmc) + "_" + str(current_date) + ".sql"
self.lpar_ms_results = []
self.mem_cpu_lpars = []
self.ms_fw = []
self.ms_mem = []
self.ms_cpu = []
self.ms_io = []
self.lpar_fc = []
self.lpar_scsi = []
self.lpar_eth = []
self.phys_mac = []
self.vios_wwpn = []
self.hmc_queries = []
self.vios_disks = []
def get_hmc_details(self, hmc):
hmc_last_update = time.strftime('%Y-%m-%d %H:%M:%S')
hmc_release = ""
hmc_servicepack = ""
hmc_model = ""
hmc_serial = ""
ssh.connect(hostname=hmc, username='hscroot', password='<PASSWORD>', timeout=120)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command('lshmc -V')
output_1 = ssh_stdout.readlines()
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command('lshmc -v')
output_2 = ssh_stdout.readlines()
for i in output_1:
if i:
if "Release:" in i:
hmc_release = (i.split(':')[-1].rstrip('\n'))
if "Service Pack:" in i:
hmc_servicepack = (i.split(':')[-1].rstrip('\n'))
for i in output_2:
if i:
if "*TM" in i:
hmc_model = (i.split(' ')[-1].rstrip('\n'))
if "*SE" in i:
hmc_serial = (i.split(' ')[-1].rstrip('\n'))
query = "UPDATE hmc SET version='" + hmc_release + "', servicepack='" + hmc_servicepack + "', model='" + hmc_model + "', serialnr='" + hmc_serial + "', last_update='" + hmc_last_update + "' WHERE name='" + hmc + "';"
self.hmc_queries.append(query)
with open(self.csvfile_hmc_details, 'a') as f:
for line in self.hmc_queries:
f.write(line)
def update_database_hmc_details(self):
with open(self.csvfile_hmc_details, 'r') as f:
for qr in f.readlines():
mycursor.execute(qr)
mydb.commit()
def get_lpar_ms(self, hmc):
ssh.connect(hostname=hmc, username='hscroot', password='<PASSWORD>', timeout=120)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(
'for ms in `lssyscfg -r sys -F name,state,type_model,serial_num | grep Operating | egrep -v "Authentication|No Connection|Mismatch|Power|HSCL"`;do MSNAME=`echo $ms | cut -f 1 -d ,`;MSMODEL=`echo $ms | cut -f 3 -d ,`;MSSERIAL=`echo $ms | cut -f 4 -d ,`; for lpar in `lssyscfg -r lpar -m $MSNAME -F name,lpar_env,os_version,state,rmc_ipaddr,rmc_state,curr_lpar_proc_compat_mode,lpar_id | sed \'s/ /-/g\'`;do HMCNAME=`uname -n | cut -f 1 -d .`; LPARNAME=`echo $lpar | cut -f 1 -d ,`; LPARENV=`echo $lpar | cut -f 2 -d ,`; LPAROS=`echo $lpar | cut -f 3 -d ,`; LPARSTATE=`echo $lpar | cut -f 4 -d ,`; LPARIP=`echo $lpar | cut -f 5 -d ,`; RMCSTATE=`echo $lpar | cut -f 6 -d ,`; PROC_COMPAT=`echo $lpar | cut -f 7 -d ,`; LPARID=`echo $lpar | cut -f 8 -d ,`; echo "DEFAULT,$HMCNAME,$MSNAME,$MSMODEL,$MSSERIAL,$LPARNAME,$LPARENV,$LPAROS,$LPARSTATE,$LPARIP,$RMCSTATE,$PROC_COMPAT,$LPARID";done;done')
output = ssh_stdout.readlines()
for i in output:
if len(i) > 0 and "No results were found." not in i:
self.lpar_ms_results.append([i])
with open(self.csvfile_lpar_ms, 'a') as f:
for line in self.lpar_ms_results:
f.write(line[0])
def update_database_lpar_ms(self):
query = "LOAD DATA LOCAL INFILE '" + self.csvfile_lpar_ms + "' IGNORE INTO TABLE sap.lpar_ms FIELDS TERMINATED BY ',' ENCLOSED BY '\"' LINES TERMINATED BY '\n'"
mycursor.execute(query)
mydb.commit()
def get_mem_cpu_lpars(self, hmc):
ssh.connect(hostname=hmc, username='hscroot', password='<PASSWORD>', timeout=120)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(
'for ms in `lssyscfg -r sys -F name,state | grep Operating | egrep -v "Authentication|No Connection|Mismatch|Power|HSCL" | cut -f 1 -d ,`;do lssyscfg -r prof -m $ms -F name,lpar_name,min_mem,desired_mem,max_mem,mem_mode,proc_mode,min_proc_units,desired_proc_units,max_proc_units,min_procs,desired_procs,max_procs,sharing_mode,uncap_weight;done | sed \'s/^/DEFAULT,/g\'')
output = ssh_stdout.readlines()
for i in output:
if len(i) > 0 and "No results were found." not in i:
self.mem_cpu_lpars.append([i])
with open(self.csvfile_mem_cpu_lpars, 'a') as f:
for line in self.mem_cpu_lpars:
f.write(line[0])
def update_database_mem_cpu_lpars(self):
query = "LOAD DATA LOCAL INFILE '" + self.csvfile_mem_cpu_lpars + "' IGNORE INTO TABLE sap.mem_cpu_lpars FIELDS TERMINATED BY ',' ENCLOSED BY '\"' LINES TERMINATED BY '\n'"
mycursor.execute(query)
mydb.commit()
def get_ms_fw(self, hmc):
ssh.connect(hostname=hmc, username='hscroot', password='<PASSWORD>', timeout=120)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(
'for ms in `lssyscfg -r sys -F name,state | grep Operating | egrep -v "Authentication|No Connection|Mismatch|Power|HSCL" | cut -f 1 -d ,`;do echo -n "$ms,";lslic -m $ms -t sys -Fcurr_ecnumber_primary:activated_level;done')
output = ssh_stdout.readlines()
for i in output:
if len(i) > 0 and "No results were found." not in i:
self.ms_fw.append([i])
with open(self.csvfile_ms_fw, 'a') as f:
for line in self.ms_fw:
f.write('DEFAULT,' + str(line[0]))
def update_database_ms_fw(self):
query = "LOAD DATA LOCAL INFILE '" + self.csvfile_ms_fw + "' IGNORE INTO TABLE sap.ms_fw FIELDS TERMINATED BY ',' ENCLOSED BY '\"' LINES TERMINATED BY '\n'"
mycursor.execute(query)
mydb.commit()
def get_ms_mem(self, hmc):
ssh.connect(hostname=hmc, username='hscroot', password='<PASSWORD>', timeout=120)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(
'for ms in `lssyscfg -r sys -F name,state | grep Operating | egrep -v "Authentication|No Connection|Mismatch|Power|HSCL" | cut -f 1 -d ,`; do for line in `lshwres -m $ms -r mem --level sys -F configurable_sys_mem,curr_avail_sys_mem,deconfig_sys_mem,sys_firmware_mem,mem_region_size`;do echo "$ms,"$line"";done;done')
output = ssh_stdout.readlines()
for i in output:
if len(i) > 0 and "No results were found." not in i:
self.ms_mem.append([i])
with open(self.csvfile_ms_mem, 'a') as f:
for line in self.ms_mem:
f.write('DEFAULT,' + str(line[0]))
def update_database_ms_mem(self):
query = "LOAD DATA LOCAL INFILE '" + self.csvfile_ms_mem + "' IGNORE INTO TABLE sap.ms_mem FIELDS TERMINATED BY ',' ENCLOSED BY '\"' LINES TERMINATED BY '\n'"
mycursor.execute(query)
mydb.commit()
def get_ms_cpu(self, hmc):
ssh.connect(hostname=hmc, username='hscroot', password='<PASSWORD>', timeout=120)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(
'for ms in `lssyscfg -r sys -F name,state | grep Operating | egrep -v "Authentication|No Connection|Mismatch|Power|HSCL" | cut -f 1 -d ,`; do for line in `lshwres -m $ms -r proc --level sys -F configurable_sys_proc_units,curr_avail_sys_proc_units,deconfig_sys_proc_units`;do echo "$ms,"$line"";done; done')
output = ssh_stdout.readlines()
for i in output:
if len(i) > 0 and "No results were found." not in i:
self.ms_cpu.append([i])
with open(self.csvfile_ms_cpu, 'a') as f:
for line in self.ms_cpu:
f.write('DEFAULT,' + str(line[0]))
def update_database_ms_cpu(self):
query = "LOAD DATA LOCAL INFILE '" + self.csvfile_ms_cpu + "' IGNORE INTO TABLE sap.ms_cpu FIELDS TERMINATED BY ',' ENCLOSED BY '\"' LINES TERMINATED BY '\n'"
mycursor.execute(query)
mydb.commit()
def get_ms_io(self, hmc):
ssh.connect(hostname=hmc, username='hscroot', password='<PASSWORD>', timeout=120)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(
'for ms in `lssyscfg -r sys -F name,state | grep Operating | egrep -v "Authentication|No Connection|Mismatch|Power|HSCL" | cut -f 1 -d ,`; do for line in `lshwres -r io --rsubtype slot -m $ms -F unit_phys_loc,phys_loc,description,lpar_name,drc_name | sed \'s/ /_/g\'`;do echo "$ms,"$line"";done;done')
output = ssh_stdout.readlines()
for i in output:
if len(i) > 0 and "No results were found." not in i:
self.ms_io.append([i])
with open(self.csvfile_ms_io, 'a') as f:
for line in self.ms_io:
f.write('DEFAULT,' + str(line[0]))
def update_database_ms_io(self):
query = "LOAD DATA LOCAL INFILE '" + self.csvfile_ms_io + "' IGNORE INTO TABLE sap.ms_io FIELDS TERMINATED BY ',' ENCLOSED BY '\"' LINES TERMINATED BY '\n'"
mycursor.execute(query)
mydb.commit()
def get_lpar_fc(self, hmc):
ssh.connect(hostname=hmc, username='hscroot', password='<PASSWORD>', timeout=120)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(
'for ms in `lssyscfg -r sys -F name,state | grep Operating | egrep -v "Authentication|No Connection|Mismatch|Power|HSCL" | cut -f 1 -d ,`;do lshwres -r virtualio -m $ms --rsubtype fc --level lpar -F lpar_name,adapter_type,state,remote_lpar_name,remote_slot_num,wwpns | sort ; done')
output = ssh_stdout.readlines()
for i in output:
if len(i) > 0 and "No results were found." not in i:
self.lpar_fc.append([i])
with open(self.csvfile_lpar_fc, 'a') as f:
for line in self.lpar_fc:
f.write('DEFAULT,' + str(line[0]))
def update_database_lpar_fc(self):
query = "LOAD DATA LOCAL INFILE '" + self.csvfile_lpar_fc + "' IGNORE INTO TABLE sap.lpar_fc FIELDS TERMINATED BY ',' ENCLOSED BY '\"' LINES TERMINATED BY '\n'"
mycursor.execute(query)
mydb.commit()
def get_lpar_scsi(self, hmc):
ssh.connect(hostname=hmc, username='hscroot', password='<PASSWORD>', timeout=120)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(
'for ms in `lssyscfg -r sys -F name,state | grep Operating | egrep -v "Authentication|No Connection|Mismatch|Power|HSCL" | cut -f 1 -d ,`; do lshwres -r virtualio -m $ms --rsubtype scsi -F lpar_name,slot_num,state,is_required,adapter_type,remote_lpar_name,remote_slot_num | sort; done')
output = ssh_stdout.readlines()
| |
for
each element of the DataStream. Each MapFunction call returns exactly one element.
Note that If user does not specify the output data type, the output data will be serialized
as pickle primitive byte array.
:param func: The MapFunction that is called for each element of the DataStream.
:param output_type: The type information of the MapFunction output data.
:return: The transformed DataStream.
"""
if not isinstance(func, MapFunction) and not callable(func):
raise TypeError("The input must be a MapFunction or a callable function")
class MapKeyedProcessFunctionAdapter(KeyedProcessFunction):
def __init__(self, map_func):
if isinstance(map_func, MapFunction):
self._open_func = map_func.open
self._close_func = map_func.close
self._map_func = map_func.map
else:
self._open_func = None
self._close_func = None
self._map_func = map_func
def open(self, runtime_context: RuntimeContext):
if self._open_func:
self._open_func(runtime_context)
def close(self):
if self._close_func:
self._close_func()
def process_element(self, value, ctx: 'KeyedProcessFunction.Context'):
yield self._map_func(value)
return self.process(MapKeyedProcessFunctionAdapter(func), output_type) \
.name("Map") # type: ignore
def flat_map(self,
func: Union[Callable, FlatMapFunction],
output_type: TypeInformation = None) -> 'DataStream':
"""
Applies a FlatMap transformation on a KeyedStream. The transformation calls a
FlatMapFunction for each element of the DataStream. Each FlatMapFunction call can return
any number of elements including none.
:param func: The FlatMapFunction that is called for each element of the DataStream.
:param output_type: The type information of output data.
:return: The transformed DataStream.
"""
if not isinstance(func, FlatMapFunction) and not callable(func):
raise TypeError("The input must be a FlatMapFunction or a callable function")
class FlatMapKeyedProcessFunctionAdapter(KeyedProcessFunction):
def __init__(self, flat_map_func):
if isinstance(flat_map_func, FlatMapFunction):
self._open_func = flat_map_func.open
self._close_func = flat_map_func.close
self._flat_map_func = flat_map_func.flat_map
else:
self._open_func = None
self._close_func = None
self._flat_map_func = flat_map_func
def open(self, runtime_context: RuntimeContext):
if self._open_func:
self._open_func(runtime_context)
def close(self):
if self._close_func:
self._close_func()
def process_element(self, value, ctx: 'KeyedProcessFunction.Context'):
yield from self._flat_map_func(value)
return self.process(FlatMapKeyedProcessFunctionAdapter(func), output_type) \
.name("FlatMap")
def reduce(self, func: Union[Callable, ReduceFunction]) -> 'DataStream':
"""
Applies a reduce transformation on the grouped data stream grouped on by the given
key position. The `ReduceFunction` will receive input values based on the key value.
Only input values with the same key will go to the same reducer.
Example:
::
>>> ds = env.from_collection([(1, 'a'), (2, 'a'), (3, 'a'), (4, 'b'])
>>> ds.key_by(lambda x: x[1]).reduce(lambda a, b: a[0] + b[0], b[1])
:param func: The ReduceFunction that is called for each element of the DataStream.
:return: The transformed DataStream.
"""
if not isinstance(func, ReduceFunction) and not callable(func):
raise TypeError("The input must be a ReduceFunction or a callable function")
output_type = _from_java_type(self._original_data_type_info.get_java_type_info())
class ReduceProcessKeyedProcessFunctionAdapter(KeyedProcessFunction):
def __init__(self, reduce_function):
if isinstance(reduce_function, ReduceFunction):
self._open_func = reduce_function.open
self._close_func = reduce_function.close
self._reduce_function = reduce_function.reduce
else:
self._open_func = None
self._close_func = None
self._reduce_function = reduce_function
self._reduce_value_state = None # type: ValueState
def open(self, runtime_context: RuntimeContext):
if self._open_func:
self._open_func(runtime_context)
self._reduce_value_state = runtime_context.get_state(
ValueStateDescriptor("_reduce_state" + str(uuid.uuid4()), output_type))
from pyflink.fn_execution.datastream.runtime_context import StreamingRuntimeContext
self._in_batch_execution_mode = \
cast(StreamingRuntimeContext, runtime_context)._in_batch_execution_mode
def close(self):
if self._close_func:
self._close_func()
def process_element(self, value, ctx: 'KeyedProcessFunction.Context'):
reduce_value = self._reduce_value_state.value()
if reduce_value is not None:
reduce_value = self._reduce_function(reduce_value, value)
else:
# register a timer for emitting the result at the end when this is the
# first input for this key
if self._in_batch_execution_mode:
ctx.timer_service().register_event_time_timer(0x7fffffffffffffff)
reduce_value = value
self._reduce_value_state.update(reduce_value)
if not self._in_batch_execution_mode:
# only emitting the result when all the data for a key is received
yield reduce_value
def on_timer(self, timestamp: int, ctx: 'KeyedProcessFunction.OnTimerContext'):
current_value = self._reduce_value_state.value()
if current_value is not None:
yield current_value
return self.process(ReduceProcessKeyedProcessFunctionAdapter(func), output_type) \
.name("Reduce")
def filter(self, func: Union[Callable, FilterFunction]) -> 'DataStream':
if not isinstance(func, FilterFunction) and not callable(func):
raise TypeError("The input must be a FilterFunction or a callable function")
class FilterKeyedProcessFunctionAdapter(KeyedProcessFunction):
def __init__(self, filter_func):
if isinstance(filter_func, FilterFunction):
self._open_func = filter_func.open
self._close_func = filter_func.close
self._filter_func = filter_func.filter
else:
self._open_func = None
self._close_func = None
self._filter_func = filter_func
def open(self, runtime_context: RuntimeContext):
if self._open_func:
self._open_func(runtime_context)
def close(self):
if self._close_func:
self._close_func()
def process_element(self, value, ctx: 'KeyedProcessFunction.Context'):
if self._filter_func(value):
yield value
return self.process(FilterKeyedProcessFunctionAdapter(func), self._original_data_type_info)\
.name("Filter")
def add_sink(self, sink_func: SinkFunction) -> 'DataStreamSink':
return self._values().add_sink(sink_func)
def key_by(self, key_selector: Union[Callable, KeySelector],
key_type: TypeInformation = None) -> 'KeyedStream':
return self._origin_stream.key_by(key_selector, key_type)
def process(self, func: KeyedProcessFunction, # type: ignore
output_type: TypeInformation = None) -> 'DataStream':
"""
Applies the given ProcessFunction on the input stream, thereby creating a transformed output
stream.
The function will be called for every element in the input streams and can produce zero or
more output elements.
:param func: The KeyedProcessFunction that is called for each element in the stream.
:param output_type: TypeInformation for the result type of the function.
:return: The transformed DataStream.
"""
if not isinstance(func, KeyedProcessFunction):
raise TypeError("KeyedProcessFunction is required for KeyedStream.")
from pyflink.fn_execution import flink_fn_execution_pb2
j_python_data_stream_function_operator, j_output_type_info = \
_get_one_input_stream_operator(
self,
func,
flink_fn_execution_pb2.UserDefinedDataStreamFunction.KEYED_PROCESS, # type: ignore
output_type)
return DataStream(self._j_data_stream.transform(
"KEYED PROCESS",
j_output_type_info,
j_python_data_stream_function_operator))
def window(self, window_assigner: WindowAssigner) -> 'WindowedStream':
"""
Windows this data stream to a WindowedStream, which evaluates windows over a key
grouped stream. Elements are put into windows by a WindowAssigner. The grouping of
elements is done both by key and by window.
A Trigger can be defined to specify when windows are evaluated. However, WindowAssigners
have a default Trigger that is used if a Trigger is not specified.
:param window_assigner: The WindowAssigner that assigns elements to windows.
:return: The trigger windows data stream.
"""
return WindowedStream(self, window_assigner)
def union(self, *streams) -> 'DataStream':
return self._values().union(*streams)
def shuffle(self) -> 'DataStream':
raise Exception('Cannot override partitioning for KeyedStream.')
def project(self, *field_indexes) -> 'DataStream':
return self._values().project(*field_indexes)
def rescale(self) -> 'DataStream':
raise Exception('Cannot override partitioning for KeyedStream.')
def rebalance(self) -> 'DataStream':
raise Exception('Cannot override partitioning for KeyedStream.')
def forward(self) -> 'DataStream':
raise Exception('Cannot override partitioning for KeyedStream.')
def broadcast(self) -> 'DataStream':
raise Exception('Cannot override partitioning for KeyedStream.')
def partition_custom(self, partitioner: Union[Callable, Partitioner],
key_selector: Union[Callable, KeySelector]) -> 'DataStream':
raise Exception('Cannot override partitioning for KeyedStream.')
def print(self, sink_identifier=None):
return self._values().print()
def _values(self) -> 'DataStream':
"""
Since python KeyedStream is in the format of Row(key_value, original_data), it is used for
getting the original_data.
"""
transformed_stream = self.map(lambda x: x, output_type=self._original_data_type_info)
transformed_stream.name(get_gateway().jvm.org.apache.flink.python.util.PythonConfigUtil
.KEYED_STREAM_VALUE_OPERATOR_NAME)
return DataStream(transformed_stream._j_data_stream)
def set_parallelism(self, parallelism: int):
raise Exception("Set parallelism for KeyedStream is not supported.")
def name(self, name: str):
raise Exception("Set name for KeyedStream is not supported.")
def get_name(self) -> str:
raise Exception("Get name of KeyedStream is not supported.")
def uid(self, uid: str):
raise Exception("Set uid for KeyedStream is not supported.")
def set_uid_hash(self, uid_hash: str):
raise Exception("Set uid hash for KeyedStream is not supported.")
def set_max_parallelism(self, max_parallelism: int):
raise Exception("Set max parallelism for KeyedStream is not supported.")
def force_non_parallel(self):
raise Exception("Set force non-parallel for KeyedStream is not supported.")
def set_buffer_timeout(self, timeout_millis: int):
raise Exception("Set buffer timeout for KeyedStream is not supported.")
def start_new_chain(self) -> 'DataStream':
raise Exception("Start new chain for KeyedStream is not supported.")
def disable_chaining(self) -> 'DataStream':
raise Exception("Disable chaining for KeyedStream is not supported.")
def slot_sharing_group(self, slot_sharing_group: Union[str, SlotSharingGroup]) -> 'DataStream':
raise Exception("Setting slot sharing group for KeyedStream is not supported.")
class WindowedStream(object):
"""
A WindowedStream represents a data stream where elements are grouped by key, and for each
key, the stream of elements is split into windows based on a WindowAssigner. Window emission
is triggered based on a Trigger.
The windows are conceptually evaluated for each key individually, meaning windows can trigger
at different points for each key.
Note that the WindowedStream is purely an API construct, during runtime the WindowedStream will
be collapsed together with the KeyedStream and the operation over the window into one single
operation.
"""
def __init__(self, keyed_stream: KeyedStream, window_assigner: WindowAssigner):
self._keyed_stream = keyed_stream
self._window_assigner = window_assigner
self._allowed_lateness = 0
self._window_trigger = None # type: Trigger
def get_execution_environment(self):
return self._keyed_stream.get_execution_environment()
def get_input_type(self):
return _from_java_type(self._keyed_stream._original_data_type_info.get_java_type_info())
def trigger(self, trigger: Trigger):
"""
Sets the Trigger that should be used to trigger window emission.
"""
self._window_trigger = trigger
return self
def allowed_lateness(self, time_ms: int):
"""
Sets the time by which elements are allowed to be late. Elements that arrive behind the
watermark by more than the specified time will be dropped. By default, the allowed lateness
is 0.
Setting an allowed lateness is only valid for event-time windows.
"""
self._allowed_lateness = time_ms
return self
def apply(self,
window_function: WindowFunction, result_type: TypeInformation | |
carpet item of a mob.
.. workswith:: Horses, Pigs, Llamas (Decor)
Parameters
----------
item : Optional[:attr:`~.ItemParam`], optional
New saddle item, or ``None`` to remove it.
.. note:: Specifying no saddle item will remove the mob's saddle.
target : Optional[:class:`~.EntityTarget`], optional
The target of this :class:`~.EntityAction`, or None for the current :class:`Entity` instance's target.
Defaults to ``None``.
Returns
-------
:class:`EntityAction`
The generated EntityAction instance.
Notes
-----
Pigs do not retain saddle metadata, only that they have a saddle.
Examples
--------
::
saddle = Item(Material.SADDLE) # the new saddle item. Specify None instead to remove it.
last_mob.set_saddle(saddle)
# OR
Entity(EntityTarget.LAST_MOB).set_saddle(saddle) # if the last spawned mob can have a saddle, its saddle item
# is set to the specified one.
"""
args = Arguments([
p_check(item, ItemParam, "item") if item is not None else None
])
return EntityAction(
action=EntityActionType.SET_SADDLE,
args=args,
target=self._digest_target(target),
append_to_reader=True
)
def set_sheep_sheared(
self, is_sheared: bool = True,
*, target: typing.Optional[EntityTarget] = None
):
"""Sets whether a sheep is currently sheared.
Parameters
----------
is_sheared : :class:`bool`, optional
Whether or not the sheep should be sheared. Defaults to ``True``.
target : Optional[:class:`~.EntityTarget`], optional
The target of this :class:`~.EntityAction`, or None for the current :class:`Entity` instance's target.
Defaults to ``None``.
Returns
-------
:class:`EntityAction`
The generated EntityAction instance.
Examples
--------
::
last_mob.set_sheep_sheared(True)
# OR
Entity(EntityTarget.LAST_MOB).set_sheep_sheared(True)
# if the last spawned mob is a sheep, it will become sheared. (Specify False to set to not sheared)
"""
args = Arguments([], tags=[
Tag(
"Is Sheared", option=bool(is_sheared), # default is True
action=EntityActionType.SET_SHEEP_SHEARED, block=BlockType.ENTITY_ACTION
)
])
return EntityAction(
action=EntityActionType.SET_SHEEP_SHEARED,
args=args,
target=self._digest_target(target),
append_to_reader=True
)
def set_slime_ai(
self, enable_ai: bool = True,
*, target: typing.Optional[EntityTarget] = None
):
"""Allows a slime's AI to be enabled and disabled, but unlike the disable AI action, the slime can still be moved.
.. workswith:: Slime, Magma Cube
Parameters
----------
enable_ai : :class:`bool`, optional
Whether or not the Slime/Magma Cube's AI should be enabled (True) or disabled (False). Defaults to ``True``.
target : Optional[:class:`~.EntityTarget`], optional
The target of this :class:`~.EntityAction`, or None for the current :class:`Entity` instance's target.
Defaults to ``None``.
Returns
-------
:class:`EntityAction`
The generated EntityAction instance.
Examples
--------
::
last_mob.set_slime_ai(False)
# OR
Entity(EntityTarget.LAST_MOB).set_slime_ai(False)
# if the last spawned mob is a Slime, then its AI is disabled, but it will still be able to be moved.
# Specify True to re-enable.
"""
args = Arguments([], tags=[
Tag(
"Do AI", option=bool(enable_ai), # default is True
action=EntityActionType.SET_SLIME_AI, block=BlockType.ENTITY_ACTION
)
])
return EntityAction(
action=EntityActionType.SET_SLIME_AI,
args=args,
target=self._digest_target(target),
append_to_reader=True
)
def set_target(
self, name: Textable,
*, target: typing.Optional[EntityTarget] = None
):
"""Instructs the mob's AI to target a specific mob or player.
.. rank:: Overlord
Parameters
----------
name : :attr:`~.Textable`
Target's name (either a player's or a mob's name).
target : Optional[:class:`~.EntityTarget`], optional
The target of this :class:`~.EntityAction`, or None for the current :class:`Entity` instance's target.
Defaults to ``None``.
Returns
-------
:class:`EntityAction`
The generated EntityAction instance.
Examples
--------
::
last_mob.set_target("John")
# OR
Entity(EntityTarget.LAST_MOB).set_target("John")
# the last spawned mob will now target the mob or player named John, if any.
"""
args = Arguments([
p_check(name, Textable, "name")
])
return EntityAction(
action=EntityActionType.SET_TARGET,
args=args,
target=self._digest_target(target),
append_to_reader=True
)
def set_tropical_fish_type(
self,
*, pattern_color: typing.Optional[EntityColor] = None, body_color: typing.Optional[EntityColor] = None,
pattern: typing.Optional[TropicalFishPattern] = None,
target: typing.Optional[EntityTarget] = None
):
"""Sets the appearance of a tropical fish.
Parameters
----------
pattern_color : Optional[:class:`~.EntityColor`], optional
The new color of the fish's pattern, or ``None`` to keep it unchanged. Defaults to ``None``.
body_color : Optional[:class:`~.EntityColor`], optional
The new color of the fish's body, or ``None`` to keep it unchanged. Defaults to ``None``.
pattern : Optional[:class:`~.TropicalFishPattern`], optional
The fish's new pattern, or ``None`` to keep it unchanged. Defaults to ``None``. (See TropicalFishPattern docs for options.)
target : Optional[:class:`~.EntityTarget`], optional
The target of this :class:`~.EntityAction`, or None for the current :class:`Entity` instance's target.
Defaults to ``None``.
Returns
-------
:class:`EntityAction`
The generated EntityAction instance.
Examples
--------
::
last_mob.set_tropical_fish_type(pattern_color=EntityColor.RED, body_color=EntityColor.BLACK, pattern=TropicalFishPattern.KOB)
# OR
Entity(EntityTarget.LAST_MOB).set_tropical_fish_type(pattern_color=EntityColor.RED, body_color=EntityColor.BLACK, pattern=TropicalFishPattern.KOB)
# if the last spawned mob is a tropical fish, then its characteristics are changed accordingly.
"""
args = Arguments([], tags=[
Tag(
"Pattern Color", option=EntityColor(pattern_color) if pattern_color is not None else "Don't Change",
action=EntityActionType.SET_TROP_FISH_TYPE, block=BlockType.ENTITY_ACTION
),
Tag(
"Body Color", option=EntityColor(body_color) if body_color is not None else "Don't Change",
action=EntityActionType.SET_TROP_FISH_TYPE, block=BlockType.ENTITY_ACTION
),
Tag(
"Pattern", option=TropicalFishPattern(pattern) if pattern is not None else "Don't Change",
action=EntityActionType.SET_TROP_FISH_TYPE, block=BlockType.ENTITY_ACTION
)
])
return EntityAction(
action=EntityActionType.SET_TROP_FISH_TYPE,
args=args,
target=self._digest_target(target),
append_to_reader=True
)
def set_villager_profession(
self, profession: typing.Optional[VillagerProfession] = VillagerProfession.NONE,
*, target: typing.Optional[EntityTarget] = None
):
"""Sets a villager's profession.
Parameters
----------
profession : Optional[:class:`~.VillagerProfession`], optional
The new villager's profession, or ``None`` / :attr:`~.VillagerProfession.NONE` for no profession.
Defaults to ``None``.
target : Optional[:class:`~.EntityTarget`], optional
The target of this :class:`~.EntityAction`, or None for the current :class:`Entity` instance's target.
Defaults to ``None``.
Returns
-------
:class:`EntityAction`
The generated EntityAction instance.
Examples
--------
::
last_mob.set_villager_profession(VillagerProfession.LIBRARIAN)
# OR
Entity(EntityTarget.LAST_MOB).set_villager_profession(VillagerProfession.LIBRARIAN)
# if the last spawned mob is a Villager, then its profession is set to Librarian.
"""
args = Arguments([], tags=[
Tag(
"Profession", option=VillagerProfession(profession) if profession is not None else "None",
action=EntityActionType.SET_VILLAGER_PROF, block=BlockType.ENTITY_ACTION
)
])
return EntityAction(
action=EntityActionType.SET_VILLAGER_PROF,
args=args,
target=self._digest_target(target),
append_to_reader=True
)
def set_villager_biome(
self, biome: VillagerBiome,
*, target: typing.Optional[EntityTarget] = None
):
"""Sets the biome type of a villager.
Parameters
----------
biome : :class:`~.VillagerBiome`
The villager's new biome type (see VillagerBiome docs for options).
target : Optional[:class:`~.EntityTarget`], optional
The target of this :class:`~.EntityAction`, or None for the current :class:`Entity` instance's target.
Defaults to ``None``.
Returns
-------
:class:`EntityAction`
The generated EntityAction instance.
Examples
--------
::
last_mob.set_villager_biome(VillagerBiome.DESERT)
# OR
Entity(EntityTarget.LAST_MOB).set_villager_biome(VillagerBiome.DESERT)
# if the last spawned mob is a villager, then its biome type is set to Desert.
"""
args = Arguments([], tags=[
Tag(
"Biome", option=VillagerBiome(biome), # default is Desert
action=EntityActionType.SET_VILLAGER_TYPE, block=BlockType.ENTITY_ACTION
)
])
return EntityAction(
action=EntityActionType.SET_VILLAGER_TYPE,
args=args,
target=self._digest_target(target),
append_to_reader=True
)
def set_wolf_angry(
self, is_angry: bool = True,
*, target: typing.Optional[EntityTarget] = None
):
"""Sets whether a wolf is angry.
Parameters
----------
is_angry : :class:`bool`, optional
Whether or not the wolf is angry. Defaults to ``True``.
target : Optional[:class:`~.EntityTarget`], optional
The target of this :class:`~.EntityAction`, or None for the current :class:`Entity` instance's target.
Defaults to ``None``.
Returns
-------
:class:`EntityAction`
The generated EntityAction instance.
Examples
--------
::
last_mob.set_wolf_angry(True)
# OR
Entity(EntityTarget.LAST_MOB).set_wolf_angry(True) # if the last spawned mob is a wolf, then it will be angry
"""
args = Arguments([], tags=[
Tag(
"Is Angry", option=bool(is_angry), # default is True
action=EntityActionType.SET_WOLF_ANGRY, block=BlockType.ENTITY_ACTION
)
])
return EntityAction(
action=EntityActionType.SET_WOLF_ANGRY,
args=args,
target=self._digest_target(target),
append_to_reader=True
)
def shear_sheep(self, *, target: typing.Optional[EntityTarget] = None):
"""Causes a sheep to be sheared.
Parameters
----------
target : Optional[:class:`~.EntityTarget`], optional
The target of this :class:`~.EntityAction`, or None for the current :class:`Entity` instance's target.
Defaults to ``None``.
Returns
-------
:class:`EntityAction`
The generated EntityAction instance.
Examples
--------
::
last_mob.shear_sheep()
# OR
Entity(EntityTarget.LAST_MOB).shear_sheep() # if the last spawned mob is a sheep, then it will be sheared.
"""
return EntityAction(
action=EntityActionType.SHEAR_SHEEP,
args=Arguments(),
target=self._digest_target(target),
append_to_reader=True
)
def sheep_eat(self, *, target: typing.Optional[EntityTarget] = None):
"""Causes a sheep to eat grass.
Parameters
----------
target : Optional[:class:`~.EntityTarget`], optional
The target of this :class:`~.EntityAction`, or None for the current :class:`Entity` instance's target.
Defaults to ``None``.
Returns
-------
:class:`EntityAction`
The generated EntityAction instance.
Examples
--------
::
last_mob.sheep_eat()
# OR
Entity(EntityTarget.LAST_MOB).sheep_eat() # if the last spawned mob is a sheep, then it will eat grass.
"""
return EntityAction(
action=EntityActionType.SHEEP_EAT,
args=Arguments(),
target=self._digest_target(target),
append_to_reader=True
)
def show_name(self, *, target: typing.Optional[EntityTarget] = None):
"""Shows the name tag of the entity.
Parameters
----------
target : Optional[:class:`~.EntityTarget`], optional
The target of this :class:`~.EntityAction`, or None for the current :class:`Entity` instance's target.
Defaults to ``None``.
Returns
-------
:class:`EntityAction`
The generated EntityAction instance.
Examples
--------
::
last_entity.show_name()
# OR
Entity(EntityTarget.LAST_ENTITY).show_name() # the last spawned entity's name tag is now shown.
"""
return EntityAction(
action=EntityActionType.SHOW_NAME,
args=Arguments(),
target=self._digest_target(target),
append_to_reader=True
)
def silence(self, *, target: typing.Optional[EntityTarget] = None):
"""Prevents the entity from making any sounds.
.. rank:: Noble
Parameters
----------
target : Optional[:class:`~.EntityTarget`], optional
The target of this :class:`~.EntityAction`, or None for the current | |
# Copyright 2022 Garena Online Private Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Misc functions.
Mostly copy-paste from torchvision references or other public repos like DETR and DINO:
https://github.com/facebookresearch/detr/blob/master/util/misc.py
https://github.com/facebookresearch/dino/blob/main/utils.py
"""
import datetime
import logging
import os
import subprocess
import sys
import time
from collections import defaultdict, deque
import numpy as np
import torch
import torch.distributed as dist
from torch import nn
def get_logger(file_path_name):
"""
build a logger which both write on the desk and also on the terminal
"""
logger = logging.getLogger()
logger.setLevel("INFO")
BASIC_FORMAT = "%(levelname)s:%(message)s"
DATE_FORMAT = ""
formatter = logging.Formatter(BASIC_FORMAT, DATE_FORMAT)
chlr = logging.StreamHandler()
chlr.setFormatter(formatter)
chlr.setLevel("INFO")
fhlr = logging.FileHandler(file_path_name)
fhlr.setFormatter(formatter)
logger.addHandler(chlr)
logger.addHandler(fhlr)
return logger
def restart_from_checkpoint(ckp_path, run_variables=None, **kwargs):
"""
Re-start from checkpoint
"""
if not os.path.isfile(ckp_path):
return
print("Found checkpoint at {}".format(ckp_path))
# open checkpoint file
checkpoint = torch.load(ckp_path, map_location="cpu")
# key is what to look for in the checkpoint file
# value is the object to load
# example: {'state_dict': model}
for key, value in kwargs.items():
if key in checkpoint and value is not None:
try:
msg = value.load_state_dict(checkpoint[key], strict=False)
print(
"=> loaded '{}' from checkpoint '{}' with msg {}".format(
key, ckp_path, msg
)
)
except TypeError:
try:
msg = value.load_state_dict(checkpoint[key])
print("=> loaded '{}' from checkpoint: '{}'".format(key, ckp_path))
except ValueError:
print(
"=> failed to load '{}' from checkpoint: '{}'".format(
key, ckp_path
)
)
else:
print("=> key '{}' not found in checkpoint: '{}'".format(key, ckp_path))
# reload variable important for the run
if run_variables is not None:
for var_name in run_variables:
if var_name in checkpoint:
run_variables[var_name] = checkpoint[var_name]
def bool_flag(s):
"""
Parse boolean arguments from the command line.
"""
FALSY_STRINGS = {"off", "false", "0"}
TRUTHY_STRINGS = {"on", "true", "1"}
if s.lower() in FALSY_STRINGS:
return False
elif s.lower() in TRUTHY_STRINGS:
return True
else:
raise argparse.ArgumentTypeError("invalid value for a boolean flag")
def fix_random_seeds(seed=31):
"""
Fix random seeds.
"""
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
def has_batchnorms(model):
"""
judge whether a model has batch normalization
"""
bn_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.SyncBatchNorm)
for name, module in model.named_modules():
if isinstance(module, bn_types):
return True
return False
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.6f} ({global_avg:.6f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda")
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value,
)
class MetricLogger(object):
"""
build a Metric Logger
"""
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError(
"'{}' object has no attribute '{}'".format(type(self).__name__, attr)
)
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append("{}: {}".format(name, str(meter)))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ""
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt="{avg:.6f}")
data_time = SmoothedValue(fmt="{avg:.6f}")
space_fmt = ":" + str(len(str(len(iterable)))) + "d"
if torch.cuda.is_available():
log_msg = self.delimiter.join(
[
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
"max mem: {memory:.0f}",
]
)
else:
log_msg = self.delimiter.join(
[
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
]
)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB,
)
)
else:
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
)
)
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print(
"{} Total time: {} ({:.6f} s / it)".format(
header, total_time_str, total_time / len(iterable)
)
)
def get_sha():
cwd = os.path.dirname(os.path.abspath(__file__))
def _run(command):
return subprocess.check_output(command, cwd=cwd).decode("ascii").strip()
sha = "N/A"
diff = "clean"
branch = "N/A"
try:
sha = _run(["git", "rev-parse", "HEAD"])
subprocess.check_output(["git", "diff"], cwd=cwd)
diff = _run(["git", "diff-index", "HEAD"])
diff = "has uncommited changes" if diff else "clean"
branch = _run(["git", "rev-parse", "--abbrev-ref", "HEAD"])
except Exception:
pass
message = f"sha: {sha}, status: {diff}, branch: {branch}"
return message
def is_dist_avail_and_initialized():
"""
judge whether distributed training is available and well-initialized
"""
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
"""
get the world size
"""
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
"""
get the rank
"""
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
"""
judge whether the current node is the master node
"""
return get_rank() == 0
def save_on_master(*args, **kwargs):
"""
save checkpoint on the master node
"""
if is_main_process():
torch.save(*args, **kwargs)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def init_distributed_ddpjob(args=None):
"""
initialize the ddp job
"""
if dist.is_available() and dist.is_initialized():
return dist.get_world_size(), dist.get_rank()
try:
os.environ["MASTER_PORT"] = "40101"
torch.distributed.init_process_group(backend="nccl")
except Exception:
world_size, rank = 1, 0
print("distributed training not available")
world_size = dist.get_world_size()
rank = dist.get_rank()
args.gpu = args.rank
args.world_size, args.rank = world_size, rank
return world_size, rank
def init_distributed_mode(args):
"""
initialize the normal job
"""
# launched with torch.distributed.launch
if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ["WORLD_SIZE"])
args.gpu = int(os.environ.get("LOCAL_RANK", 0))
print(
"args.rank",
args.rank,
"args.world_size",
args.world_size,
"args.gpu",
args.gpu,
)
print("get_rank()", get_rank())
# launched with submitit on a slurm cluster
elif "SLURM_PROCID" in os.environ:
args.rank = int(os.environ["SLURM_PROCID"])
args.gpu = args.rank % torch.cuda.device_count()
# launched naively with `python main_dino.py`
# we manually add MASTER_ADDR and MASTER_PORT to env variables
elif torch.cuda.is_available():
print("Will run the code on one GPU.")
args.rank, args.gpu, args.world_size = 0, 0, 1
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = "2950"
else:
print("Does not support training without GPU.")
sys.exit(1)
os.environ["MASTER_PORT"] = "6542"
dist.init_process_group(
backend="nccl",
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank,
)
torch.cuda.set_device(args.gpu)
print(
"| distributed init (rank {}): {}".format(args.rank, args.dist_url), flush=True
)
dist.barrier()
setup_for_distributed(args.rank == 0)
def accuracy(output, target, topk=(1,)):
"""
Computes the accuracy over the k top predictions for the specified values of k
"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
return [correct[:k].reshape(-1).float().sum(0) * 100.0 / batch_size for k in topk]
def multi_scale(samples, model):
"""
build a multi-scale features
"""
v = None
for s in [1, 1 / 2 ** (1 / 2), 1 / 2]: # we use 3 different scales
if s == 1:
inp = samples.clone()
else:
inp = nn.functional.interpolate(
samples, scale_factor=s, mode="bilinear", align_corners=False
)
feats = model.forward_knn(inp).clone()
if v is None:
v = feats
else:
v += feats
v /= 3
v /= v.norm()
return v
class AllGather(torch.autograd.Function):
"""
gather the variable on different nodes toghther
"""
@staticmethod
def forward(ctx, x):
if (
dist.is_available()
and dist.is_initialized()
and (dist.get_world_size() > 1)
):
outputs = [torch.zeros_like(x) for _ in range(dist.get_world_size())]
dist.all_gather(outputs, x)
return torch.cat(outputs, 0)
return | |
= BGAPIEvent()
wifi_evt_dfu_boot = BGAPIEvent()
wifi_evt_system_boot = BGAPIEvent()
wifi_evt_system_state = BGAPIEvent()
wifi_evt_system_sw_exception = BGAPIEvent()
wifi_evt_system_power_saving_state = BGAPIEvent()
wifi_evt_config_mac_address = BGAPIEvent()
wifi_evt_sme_wifi_is_on = BGAPIEvent()
wifi_evt_sme_wifi_is_off = BGAPIEvent()
wifi_evt_sme_scan_result = BGAPIEvent()
wifi_evt_sme_scan_result_drop = BGAPIEvent()
wifi_evt_sme_scanned = BGAPIEvent()
wifi_evt_sme_connected = BGAPIEvent()
wifi_evt_sme_disconnected = BGAPIEvent()
wifi_evt_sme_interface_status = BGAPIEvent()
wifi_evt_sme_connect_failed = BGAPIEvent()
wifi_evt_sme_connect_retry = BGAPIEvent()
wifi_evt_tcpip_configuration = BGAPIEvent()
wifi_evt_tcpip_dns_configuration = BGAPIEvent()
wifi_evt_tcpip_endpoint_status = BGAPIEvent()
wifi_evt_tcpip_dns_gethostbyname_result = BGAPIEvent()
wifi_evt_endpoint_syntax_error = BGAPIEvent()
wifi_evt_endpoint_data = BGAPIEvent()
wifi_evt_endpoint_status = BGAPIEvent()
wifi_evt_endpoint_closing = BGAPIEvent()
wifi_evt_hardware_soft_timer = BGAPIEvent()
wifi_evt_hardware_change_notification = BGAPIEvent()
wifi_evt_hardware_external_interrupt = BGAPIEvent()
wifi_evt_flash_ps_key = BGAPIEvent()
on_busy = BGAPIEvent()
on_idle = BGAPIEvent()
on_timeout = BGAPIEvent()
on_before_tx_command = BGAPIEvent()
on_tx_command_complete = BGAPIEvent()
bgapi_rx_buffer = b""
bgapi_rx_expected_length = 0
busy = False
packet_mode = False
debug = False
def send_command(self, ser, packet):
if self.packet_mode: packet = chr(len(packet) & 0xFF) + packet
if self.debug: print('=>[ ' + ' '.join(['%02X' % b for b in packet]) + ' ]')
self.on_before_tx_command()
self.busy = True
self.on_busy()
ser.write(packet)
self.on_tx_command_complete()
def check_activity(self, ser, timeout=0):
if timeout > 0:
ser.timeout = timeout
while 1:
x = ser.read()
if len(x) > 0:
self.parse(x)
else: # timeout
self.busy = False
self.on_idle()
self.on_timeout()
if not self.busy: # finished
break
else:
while ser.inWaiting(): self.parse(ser.read())
return self.busy
def parse(self, barray):
b=barray[0]
if len(self.bgapi_rx_buffer) == 0 and (b == 0x00 or b == 0x80 or b == 0x08 or b == 0x88):
self.bgapi_rx_buffer+=bytes([b])
elif len(self.bgapi_rx_buffer) == 1:
self.bgapi_rx_buffer+=bytes([b])
self.bgapi_rx_expected_length = 4 + (self.bgapi_rx_buffer[0] & 0x07) + self.bgapi_rx_buffer[1]
elif len(self.bgapi_rx_buffer) > 1:
self.bgapi_rx_buffer+=bytes([b])
"""
BGAPI packet structure (as of 2012-11-07):
Byte 0:
[7] - 1 bit, Message Type (MT) 0 = Command/Response, 1 = Event
[6:3] - 4 bits, Technology Type (TT) 0000 = Bluetooth 4.0 single mode, 0001 = Wi-Fi
[2:0] - 3 bits, Length High (LH) Payload length (high bits)
Byte 1: 8 bits, Length Low (LL) Payload length (low bits)
Byte 2: 8 bits, Class ID (CID) Command class ID
Byte 3: 8 bits, Command ID (CMD) Command ID
Bytes 4-n: 0 - 2048 Bytes, Payload (PL) Up to 2048 bytes of payload
"""
#print'%02X: %d, %d' % (b, len(self.bgapi_rx_buffer), self.bgapi_rx_expected_length)
if self.bgapi_rx_expected_length > 0 and len(self.bgapi_rx_buffer) == self.bgapi_rx_expected_length:
if self.debug: print('<=[ ' + ' '.join(['%02X' % b for b in self.bgapi_rx_buffer ]) + ' ]')
packet_type, payload_length, packet_class, packet_command = self.bgapi_rx_buffer[:4]
self.bgapi_rx_payload = self.bgapi_rx_buffer[4:]
self.bgapi_rx_buffer = b""
if packet_type & 0x88 == 0x00:
# 0x00 = BLE response packet
if packet_class == 0:
if packet_command == 0: # ble_rsp_system_reset
self.ble_rsp_system_reset({ })
self.busy = False
self.on_idle()
elif packet_command == 1: # ble_rsp_system_hello
self.ble_rsp_system_hello({ })
elif packet_command == 2: # ble_rsp_system_address_get
address = struct.unpack('<6s', self.bgapi_rx_payload[:6])[0]
address = address
self.ble_rsp_system_address_get({ 'address': address })
elif packet_command == 3: # ble_rsp_system_reg_write
result = struct.unpack('<H', self.bgapi_rx_payload[:2])[0]
self.ble_rsp_system_reg_write({ 'result': result })
elif packet_command == 4: # ble_rsp_system_reg_read
address, value = struct.unpack('<HB', self.bgapi_rx_payload[:3])
self.ble_rsp_system_reg_read({ 'address': address, 'value': value })
elif packet_command == 5: # ble_rsp_system_get_counters
txok, txretry, rxok, rxfail, mbuf = struct.unpack('<BBBBB', self.bgapi_rx_payload[:5])
self.ble_rsp_system_get_counters({ 'txok': txok, 'txretry': txretry, 'rxok': rxok, 'rxfail': rxfail, 'mbuf': mbuf })
elif packet_command == 6: # ble_rsp_system_get_connections
maxconn = struct.unpack('<B', self.bgapi_rx_payload[:1])[0]
self.ble_rsp_system_get_connections({ 'maxconn': maxconn })
elif packet_command == 7: # ble_rsp_system_read_memory
address, data_len = struct.unpack('<IB', self.bgapi_rx_payload[:5])
data_data = self.bgapi_rx_payload[5:]
self.ble_rsp_system_read_memory({ 'address': address, 'data': data_data })
elif packet_command == 8: # ble_rsp_system_get_info
major, minor, patch, build, ll_version, protocol_version, hw = struct.unpack('<HHHHHBB', self.bgapi_rx_payload[:12])
self.ble_rsp_system_get_info({ 'major': major, 'minor': minor, 'patch': patch, 'build': build, 'll_version': ll_version, 'protocol_version': protocol_version, 'hw': hw })
elif packet_command == 9: # ble_rsp_system_endpoint_tx
result = struct.unpack('<H', self.bgapi_rx_payload[:2])[0]
self.ble_rsp_system_endpoint_tx({ 'result': result })
elif packet_command == 10: # ble_rsp_system_whitelist_append
result = struct.unpack('<H', self.bgapi_rx_payload[:2])[0]
self.ble_rsp_system_whitelist_append({ 'result': result })
elif packet_command == 11: # ble_rsp_system_whitelist_remove
result = struct.unpack('<H', self.bgapi_rx_payload[:2])[0]
self.ble_rsp_system_whitelist_remove({ 'result': result })
elif packet_command == 12: # ble_rsp_system_whitelist_clear
self.ble_rsp_system_whitelist_clear({ })
elif packet_command == 13: # ble_rsp_system_endpoint_rx
result, data_len = struct.unpack('<HB', self.bgapi_rx_payload[:3])
data_data = self.bgapi_rx_payload[3:]
self.ble_rsp_system_endpoint_rx({ 'result': result, 'data': data_data })
elif packet_command == 14: # ble_rsp_system_endpoint_set_watermarks
result = struct.unpack('<H', self.bgapi_rx_payload[:2])[0]
self.ble_rsp_system_endpoint_set_watermarks({ 'result': result })
elif packet_class == 1:
if packet_command == 0: # ble_rsp_flash_ps_defrag
self.ble_rsp_flash_ps_defrag({ })
elif packet_command == 1: # ble_rsp_flash_ps_dump
self.ble_rsp_flash_ps_dump({ })
elif packet_command == 2: # ble_rsp_flash_ps_erase_all
self.ble_rsp_flash_ps_erase_all({ })
elif packet_command == 3: # ble_rsp_flash_ps_save
result = struct.unpack('<H', self.bgapi_rx_payload[:2])[0]
self.ble_rsp_flash_ps_save({ 'result': result })
elif packet_command == 4: # ble_rsp_flash_ps_load
result, value_len = struct.unpack('<HB', self.bgapi_rx_payload[:3])
value_data = self.bgapi_rx_payload[3:]
self.ble_rsp_flash_ps_load({ 'result': result, 'value': value_data })
elif packet_command == 5: # ble_rsp_flash_ps_erase
self.ble_rsp_flash_ps_erase({ })
elif packet_command == 6: # ble_rsp_flash_erase_page
result = struct.unpack('<H', self.bgapi_rx_payload[:2])[0]
self.ble_rsp_flash_erase_page({ 'result': result })
elif packet_command == 7: # ble_rsp_flash_write_words
self.ble_rsp_flash_write_words({ })
elif packet_class == 2:
if packet_command == 0: # ble_rsp_attributes_write
result = struct.unpack('<H', self.bgapi_rx_payload[:2])[0]
self.ble_rsp_attributes_write({ 'result': result })
elif packet_command == 1: # ble_rsp_attributes_read
handle, offset, result, value_len = struct.unpack('<HHHB', self.bgapi_rx_payload[:7])
value_data = self.bgapi_rx_payload[7:]
self.ble_rsp_attributes_read({ 'handle': handle, 'offset': offset, 'result': result, 'value': value_data })
elif packet_command == 2: # ble_rsp_attributes_read_type
handle, result, value_len = struct.unpack('<HHB', self.bgapi_rx_payload[:5])
value_data = self.bgapi_rx_payload[5:]
self.ble_rsp_attributes_read_type({ 'handle': handle, 'result': result, 'value': value_data })
elif packet_command == 3: # ble_rsp_attributes_user_read_response
self.ble_rsp_attributes_user_read_response({ })
elif packet_command == 4: # ble_rsp_attributes_user_write_response
self.ble_rsp_attributes_user_write_response({ })
elif packet_class == 3:
if packet_command == 0: # ble_rsp_connection_disconnect
connection, result = struct.unpack('<BH', self.bgapi_rx_payload[:3])
self.ble_rsp_connection_disconnect({ 'connection': connection, 'result': result })
elif packet_command == 1: # ble_rsp_connection_get_rssi
connection, rssi = struct.unpack('<Bb', self.bgapi_rx_payload[:2])
self.ble_rsp_connection_get_rssi({ 'connection': connection, 'rssi': rssi })
elif packet_command == 2: # ble_rsp_connection_update
connection, result = struct.unpack('<BH', self.bgapi_rx_payload[:3])
self.ble_rsp_connection_update({ 'connection': connection, 'result': result })
elif packet_command == 3: # ble_rsp_connection_version_update
connection, result = struct.unpack('<BH', self.bgapi_rx_payload[:3])
self.ble_rsp_connection_version_update({ 'connection': connection, 'result': result })
elif packet_command == 4: # ble_rsp_connection_channel_map_get
connection, map_len = struct.unpack('<BB', self.bgapi_rx_payload[:2])
map_data = self.bgapi_rx_payload[2:]
self.ble_rsp_connection_channel_map_get({ 'connection': connection, 'map': map_data })
elif packet_command == 5: # ble_rsp_connection_channel_map_set
connection, result = struct.unpack('<BH', self.bgapi_rx_payload[:3])
self.ble_rsp_connection_channel_map_set({ 'connection': connection, 'result': result })
elif packet_command == 6: # ble_rsp_connection_features_get
connection, result = struct.unpack('<BH', self.bgapi_rx_payload[:3])
self.ble_rsp_connection_features_get({ 'connection': connection, 'result': result })
elif packet_command == 7: # ble_rsp_connection_get_status
connection = struct.unpack('<B', self.bgapi_rx_payload[:1])[0]
self.ble_rsp_connection_get_status({ 'connection': connection })
elif packet_command == 8: # ble_rsp_connection_raw_tx
connection = struct.unpack('<B', self.bgapi_rx_payload[:1])[0]
self.ble_rsp_connection_raw_tx({ 'connection': connection })
elif packet_class == 4:
if packet_command == 0: # ble_rsp_attclient_find_by_type_value
connection, result = struct.unpack('<BH', self.bgapi_rx_payload[:3])
self.ble_rsp_attclient_find_by_type_value({ 'connection': connection, 'result': result })
elif packet_command == 1: # ble_rsp_attclient_read_by_group_type
connection, result = struct.unpack('<BH', self.bgapi_rx_payload[:3])
self.ble_rsp_attclient_read_by_group_type({ 'connection': connection, 'result': result })
elif packet_command == 2: # ble_rsp_attclient_read_by_type
connection, result = struct.unpack('<BH', self.bgapi_rx_payload[:3])
self.ble_rsp_attclient_read_by_type({ 'connection': connection, 'result': result })
elif packet_command == 3: # ble_rsp_attclient_find_information
connection, result = struct.unpack('<BH', self.bgapi_rx_payload[:3])
self.ble_rsp_attclient_find_information({ 'connection': connection, 'result': result })
elif packet_command == 4: # ble_rsp_attclient_read_by_handle
connection, result = struct.unpack('<BH', self.bgapi_rx_payload[:3])
self.ble_rsp_attclient_read_by_handle({ 'connection': connection, 'result': result })
elif packet_command == 5: # ble_rsp_attclient_attribute_write
connection, result = struct.unpack('<BH', self.bgapi_rx_payload[:3])
self.ble_rsp_attclient_attribute_write({ 'connection': connection, 'result': result })
elif packet_command == 6: # ble_rsp_attclient_write_command
connection, result = struct.unpack('<BH', self.bgapi_rx_payload[:3])
self.ble_rsp_attclient_write_command({ 'connection': connection, 'result': result })
elif packet_command == 7: # ble_rsp_attclient_indicate_confirm
result = struct.unpack('<H', self.bgapi_rx_payload[:2])[0]
self.ble_rsp_attclient_indicate_confirm({ 'result': result })
elif packet_command == 8: # ble_rsp_attclient_read_long
connection, result = struct.unpack('<BH', self.bgapi_rx_payload[:3])
self.ble_rsp_attclient_read_long({ 'connection': connection, 'result': result })
elif packet_command == 9: # ble_rsp_attclient_prepare_write
connection, result = struct.unpack('<BH', self.bgapi_rx_payload[:3])
self.ble_rsp_attclient_prepare_write({ 'connection': connection, 'result': result })
elif packet_command == 10: # ble_rsp_attclient_execute_write
connection, result = struct.unpack('<BH', self.bgapi_rx_payload[:3])
self.ble_rsp_attclient_execute_write({ 'connection': connection, 'result': result })
elif packet_command == 11: # ble_rsp_attclient_read_multiple
connection, result = struct.unpack('<BH', self.bgapi_rx_payload[:3])
self.ble_rsp_attclient_read_multiple({ 'connection': connection, 'result': result })
elif packet_class == 5:
if packet_command == 0: # ble_rsp_sm_encrypt_start
handle, result = struct.unpack('<BH', self.bgapi_rx_payload[:3])
self.ble_rsp_sm_encrypt_start({ 'handle': handle, 'result': result })
elif packet_command == 1: # ble_rsp_sm_set_bondable_mode
self.ble_rsp_sm_set_bondable_mode({ })
elif packet_command == 2: # ble_rsp_sm_delete_bonding
result = struct.unpack('<H', self.bgapi_rx_payload[:2])[0]
self.ble_rsp_sm_delete_bonding({ 'result': result })
elif packet_command == 3: # ble_rsp_sm_set_parameters
self.ble_rsp_sm_set_parameters({ })
elif packet_command == 4: # ble_rsp_sm_passkey_entry
result = struct.unpack('<H', self.bgapi_rx_payload[:2])[0]
self.ble_rsp_sm_passkey_entry({ 'result': result })
elif packet_command == 5: # ble_rsp_sm_get_bonds
bonds = struct.unpack('<B', self.bgapi_rx_payload[:1])[0]
self.ble_rsp_sm_get_bonds({ 'bonds': bonds })
elif packet_command == 6: # ble_rsp_sm_set_oob_data
self.ble_rsp_sm_set_oob_data({ })
elif packet_class == 6:
if packet_command == 0: # ble_rsp_gap_set_privacy_flags
self.ble_rsp_gap_set_privacy_flags({ })
elif packet_command == 1: # ble_rsp_gap_set_mode
result = struct.unpack('<H', self.bgapi_rx_payload[:2])[0]
self.ble_rsp_gap_set_mode({ 'result': | |
<filename>Code/GraphMol/FMCS/Wrap/testFMCS.py
import unittest
from rdkit import Chem
from rdkit.Chem import rdFMCS
class BondMatchOrderMatrix:
def __init__(self, ignoreAromatization):
self.MatchMatrix = [[False]*(Chem.BondType.ZERO + 1)
for i in range(Chem.BondType.ZERO + 1)]
for i in range(Chem.BondType.ZERO + 1):
# fill cells of the same and unspecified type
self.MatchMatrix[i][i] = True
self.MatchMatrix[Chem.BondType.UNSPECIFIED][i] = \
self.MatchMatrix[i][Chem.BondType.UNSPECIFIED] = True
self.MatchMatrix[Chem.BondType.ZERO][i] = \
self.MatchMatrix[i][Chem.BondType.ZERO] = True
if ignoreAromatization:
self.MatchMatrix[Chem.BondType.SINGLE][Chem.BondType.AROMATIC] = \
self.MatchMatrix[Chem.BondType.AROMATIC][Chem.BondType.SINGLE] = True
self.MatchMatrix[Chem.BondType.DOUBLE][Chem.BondType.AROMATIC] = \
self.MatchMatrix[Chem.BondType.AROMATIC][Chem.BondType.DOUBLE] = True
self.MatchMatrix[Chem.BondType.SINGLE][Chem.BondType.ONEANDAHALF] = \
self.MatchMatrix[Chem.BondType.ONEANDAHALF][Chem.BondType.SINGLE] = True
self.MatchMatrix[Chem.BondType.DOUBLE][Chem.BondType.TWOANDAHALF] = \
self.MatchMatrix[Chem.BondType.TWOANDAHALF][Chem.BondType.DOUBLE] = True
self.MatchMatrix[Chem.BondType.TRIPLE][Chem.BondType.THREEANDAHALF] = \
self.MatchMatrix[Chem.BondType.THREEANDAHALF][Chem.BondType.TRIPLE] = True
self.MatchMatrix[Chem.BondType.QUADRUPLE][Chem.BondType.FOURANDAHALF] = \
self.MatchMatrix[Chem.BondType.FOURANDAHALF][Chem.BondType.QUADRUPLE] = True
self.MatchMatrix[Chem.BondType.QUINTUPLE][Chem.BondType.FIVEANDAHALF] = \
self.MatchMatrix[Chem.BondType.FIVEANDAHALF][Chem.BondType.QUINTUPLE] = True
def isEqual(self, i, j):
return self.MatchMatrix[i][j]
class CompareAny(rdFMCS.MCSAtomCompare):
def __call__(self, p, mol1, atom1, mol2, atom2):
if (p.MatchChiralTag and not self.CheckAtomChirality(p, mol1, atom1, mol2, atom2)):
return False
if (p.MatchFormalCharge and not self.CheckAtomCharge(p, mol1, atom1, mol2, atom2)):
return False
if (p.RingMatchesRingOnly):
return self.CheckAtomRingMatch(p, mol1, atom1, mol2, atom2)
return True
class CompareAnyHeavyAtom(CompareAny):
def __call__(self, p, mol1, atom1, mol2, atom2):
a1 = mol1.GetAtomWithIdx(atom1)
a2 = mol2.GetAtomWithIdx(atom2)
# Any atom, including H, matches another atom of the same type, according to
# the other flags
if (a1.GetAtomicNum() == a2.GetAtomicNum() or
(a1.GetAtomicNum() > 1 and a2.GetAtomicNum() > 1)):
return CompareAny.__call__(self, p, mol1, atom1, mol2, atom2)
return False
class CompareElements(rdFMCS.MCSAtomCompare):
def __call__(self, p, mol1, atom1, mol2, atom2):
a1 = mol1.GetAtomWithIdx(atom1)
a2 = mol2.GetAtomWithIdx(atom2)
if (a1.GetAtomicNum() != a2.GetAtomicNum()):
return False
if (p.MatchValences and a1.GetTotalValence() != a2.GetTotalValence()):
return False
if (p.MatchChiralTag and not self.CheckAtomChirality(p, mol1, atom1, mol2, atom2)):
return False
if (p.MatchFormalCharge and not self.CheckAtomCharge(p, mol1, atom1, mol2, atom2)):
return False
if p.RingMatchesRingOnly:
return self.CheckAtomRingMatch(p, mol1, atom1, mol2, atom2)
return True
class CompareIsotopes(rdFMCS.MCSAtomCompare):
def __call__(self, p, mol1, atom1, mol2, atom2):
a1 = mol1.GetAtomWithIdx(atom1)
a2 = mol2.GetAtomWithIdx(atom2)
if (a1.GetIsotope() != a2.GetIsotope()):
return False
if (p.MatchChiralTag and not self.CheckAtomChirality(p, mol1, atom1, mol2, atom2)):
return False
if (p.MatchFormalCharge and not self.CheckAtomCharge(p, mol1, atom1, mol2, atom2)):
return False
if p.RingMatchesRingOnly:
return self.CheckAtomRingMatch(p, mol1, atom1, mol2, atom2)
return True
class CompareOrder(rdFMCS.MCSBondCompare):
match = BondMatchOrderMatrix(True) # ignore Aromatization
def __call__(self, p, mol1, bond1, mol2, bond2):
b1 = mol1.GetBondWithIdx(bond1)
b2 = mol2.GetBondWithIdx(bond2)
t1 = b1.GetBondType()
t2 = b2.GetBondType()
if self.match.isEqual(t1, t2):
if (p.MatchStereo and not self.CheckBondStereo(p, mol1, bond1, mol2, bond2)):
return False
if p.RingMatchesRingOnly:
return self.CheckBondRingMatch(p, mol1, bond1, mol2, bond2)
return True
return False
class AtomCompareCompareIsInt(rdFMCS.MCSAtomCompare):
__call__ = 1
class AtomCompareNoCompare(rdFMCS.MCSAtomCompare):
pass
class AtomCompareUserData(rdFMCS.MCSAtomCompare):
def __init__(self):
super().__init__()
self._matchAnyHet = False
def setMatchAnyHet(self, v):
self._matchAnyHet = v
def __call__(self, p, mol1, atom1, mol2, atom2):
a1 = mol1.GetAtomWithIdx(atom1)
a2 = mol2.GetAtomWithIdx(atom2)
if (a1.GetAtomicNum() != a2.GetAtomicNum() and
((not self._matchAnyHet) or
a1.GetAtomicNum() == 6 or
a2.GetAtomicNum() == 6)):
return False
if (p.MatchValences and a1.GetTotalValence() != a2.GetTotalValence()):
return False
if (p.MatchChiralTag and not self.CheckAtomChirality(p, mol1, atom1, mol2, atom2)):
return False
if (p.MatchFormalCharge and not self.CheckAtomCharge(p, mol1, atom1, mol2, atom2)):
return False
if p.RingMatchesRingOnly:
return self.CheckAtomRingMatch(p, mol1, atom1, mol2, atom2)
return True
class BondCompareCompareIsInt(rdFMCS.MCSBondCompare):
__call__ = 1
class BondCompareNoCompare(rdFMCS.MCSBondCompare):
pass
class BondCompareUserData(rdFMCS.MCSBondCompare):
def __init__(self):
super().__init__()
self.match = None
def setIgnoreAromatization(self, v):
self.match = BondMatchOrderMatrix(v)
def __call__(self, p, mol1, bond1, mol2, bond2):
b1 = mol1.GetBondWithIdx(bond1)
b2 = mol2.GetBondWithIdx(bond2)
t1 = b1.GetBondType()
t2 = b2.GetBondType()
if self.match.isEqual(t1, t2):
if (p.MatchStereo and not self.CheckBondStereo(p, mol1, bond1, mol2, bond2)):
return False
if p.RingMatchesRingOnly:
return self.CheckBondRingMatch(p, mol1, bond1, mol2, bond2)
return True
return False
class ProgressCallbackCallbackIsInt(rdFMCS.MCSProgress):
__call__ = 1
class ProgressCallbackNoCallback(rdFMCS.MCSProgress):
pass
class ProgressCallback(rdFMCS.MCSProgress):
def __init__(self, parent):
super().__init__()
self.parent = parent
self.callCount = 0
def __call__(self, stat, params):
self.callCount += 1
self.parent.assertTrue(isinstance(stat, rdFMCS.MCSProgressData))
self.parent.assertTrue(hasattr(stat, "numAtoms"))
self.parent.assertTrue(isinstance(stat.numAtoms, int))
self.parent.assertTrue(hasattr(stat, "numBonds"))
self.parent.assertTrue(isinstance(stat.numBonds, int))
self.parent.assertTrue(hasattr(stat, "seedProcessed"))
self.parent.assertTrue(isinstance(stat.seedProcessed, int))
self.parent.assertTrue(isinstance(params, rdFMCS.MCSParameters))
self.parent.assertTrue(isinstance(params.AtomTyper, rdFMCS.MCSAtomCompare))
self.parent.assertTrue(isinstance(params.BondTyper, rdFMCS.BondCompare))
self.parent.assertEqual(params.ProgressCallback, self)
return (self.callCount < 3)
class Common:
@staticmethod
def getParams(**kwargs):
params = rdFMCS.MCSParameters()
for kw in ("AtomTyper", "BondTyper"):
v = kwargs.get(kw, None)
if v is not None:
v_instance = v()
setattr(params, kw, v_instance)
return params
@staticmethod
def test1(self, **kwargs):
smis = (
"Cc1nc(CN(C(C)c2ncccc2)CCCCN)ccc1 CHEMBL1682991", # -- QUERY
"Cc1ccc(CN(C(C)c2ccccn2)CCCCN)nc1 CHEMBL1682990",
"Cc1cccnc1CN(C(C)c1ccccn1)CCCCN CHEMBL1682998",
"CC(N(CCCCN)Cc1c(N)cccn1)c1ccccn1 CHEMBL1682987",
"Cc1cc(C)c(CN(C(C)c2ccccn2)CCCCN)nc1 CHEMBL1682992",
"Cc1cc(C(C)N(CCCCN)Cc2c(C)cccn2)ncc1 CHEMBL1682993",
"Cc1nc(C(C)N(CCCCN)Cc2nc3c([nH]2)cccc3)ccc1 CHEMBL1682878",
"CC(c1ncccc1)N(CCCCN)Cc1nc2c([nH]1)cccc2 CHEMBL1682867",
"CC(N(CCCCN)Cc1c(C(C)(C)C)cccn1)c1ccccn1 CHEMBL1682989",
"CC(N(CCCCN)Cc1c(C(F)(F)F)cccn1)c1ccccn1 CHEMBL1682988",
)
ms = [Chem.MolFromSmiles(x.split()[0]) for x in smis]
qm = ms[0]
ms = ms[1:]
if kwargs:
params = Common.getParams(**kwargs)
mcs = rdFMCS.FindMCS(ms, params)
else:
mcs = rdFMCS.FindMCS(ms)
self.assertEqual(mcs.numBonds, 21)
self.assertEqual(mcs.numAtoms, 21)
self.assertEqual(
mcs.smartsString,
'[#6](:[#6]:[#6]):[#6]:[#7]:[#6]-[#6]-[#7](-[#6](-[#6])-[#6]1:[#6]:[#6]:[#6]:[#6]:[#7]:1)-[#6]-[#6]-[#6]-[#6]-[#7]'
)
qm = Chem.MolFromSmarts(mcs.smartsString)
self.assertTrue(qm is not None)
for m in ms:
self.assertTrue(m.HasSubstructMatch(qm))
if kwargs:
params = Common.getParams(**kwargs)
params.Threshold = 0.8
mcs = rdFMCS.FindMCS(ms, params)
else:
mcs = rdFMCS.FindMCS(ms, threshold=0.8)
self.assertEqual(mcs.numBonds, 21)
self.assertEqual(mcs.numAtoms, 21)
self.assertEqual(
mcs.smartsString,
'[#6](:[#6]:[#6]):[#6]:[#7]:[#6]-[#6]-[#7](-[#6](-[#6])-[#6]1:[#6]:[#6]:[#6]:[#6]:[#7]:1)-[#6]-[#6]-[#6]-[#6]-[#7]'
)
qm = Chem.MolFromSmarts(mcs.smartsString)
self.assertTrue(qm is not None)
for m in ms:
self.assertTrue(m.HasSubstructMatch(qm))
def test2(self, **kwargs):
smis = (
"CHEMBL122452 CN(CCCN(C)CCc1ccccc1)CCOC(c1ccccc1)c1ccccc1",
"CHEMBL123252 CN(CCCc1ccccc1)CCCN(C)CCOC(c1ccccc1)c1ccccc1",
"CHEMBL121611 Fc1ccc(C(OCCNCCCNCCc2ccccc2)c2ccc(F)cc2)cc1",
"CHEMBL121050 O=C(Cc1ccccc1)NCCCCNCCOC(c1ccc(F)cc1)c1ccc(F)cc1",
"CHEMBL333667 O=C(Cc1ccccc1)NCCNCCOC(c1ccc(F)cc1)c1ccc(F)cc1",
"CHEMBL121486 O=C(Cc1ccc(Br)cc1)NC=CNCCOC(c1ccc(F)cc1)c1ccc(F)cc1",
"CHEMBL123830 O=C(Cc1ccc(F)cc1)NCCNCCOC(c1ccc(F)cc1)c1ccc(F)cc1",
"CHEMBL420900 O=C(Cc1ccccc1)NCCCNCCOC(c1ccc(F)cc1)c1ccc(F)cc1",
"CHEMBL121460 CN(CCOC(c1ccc(F)cc1)c1ccc(F)cc1)CCN(C)CCOC(c1ccc(F)cc1)c1ccc(F)cc1",
"CHEMBL120901 COC(=O)C1C2CCC(CC1C(=O)Oc1ccccc1)N2C",
"CHEMBL122859 O=C1CN(CCc2ccccc2)CCN1CCOC(c1ccc(F)cc1)c1ccc(F)cc1",
"CHEMBL121027 CN(CCOC(c1ccccc1)c1ccccc1)CCN(C)CCc1ccc(F)cc1",
)
ms = [Chem.MolFromSmiles(x.split()[1]) for x in smis]
qm = ms[0]
ms = ms[1:]
if kwargs:
params = Common.getParams(**kwargs)
mcs = rdFMCS.FindMCS(ms, params)
else:
mcs = rdFMCS.FindMCS(ms)
self.assertEqual(mcs.numBonds, 9)
self.assertEqual(mcs.numAtoms, 10)
qm = Chem.MolFromSmarts(mcs.smartsString)
self.assertTrue(qm is not None)
for m in ms:
self.assertTrue(m.HasSubstructMatch(qm))
# smarts too hard to canonicalize this
# self.assertEqual(mcs.smartsString,'[#6]-,:[#6]-,:[#6]-,:[#6]-,:[#6](-[#6]-[#8]-[#6]:,-[#6])-,:[#6]')
if kwargs:
params = Common.getParams(**kwargs)
params.Threshold = 0.8
mcs = rdFMCS.FindMCS(ms, params)
else:
mcs = rdFMCS.FindMCS(ms, threshold=0.8)
self.assertEqual(mcs.numBonds, 20)
self.assertEqual(mcs.numAtoms, 19)
qm = Chem.MolFromSmarts(mcs.smartsString)
self.assertTrue(qm is not None)
nHits = 0
for m in ms:
if m.HasSubstructMatch(qm):
nHits += 1
self.assertTrue(nHits >= int(0.8 * len(smis)))
# smarts too hard to canonicalize this
# self.assertEqual(mcs.smartsString,'[#6]1:[#6]:[#6]:[#6](:[#6]:[#6]:1)-[#6](-[#8]-[#6]-[#6]-[#7]-[#6]-[#6])-[#6]2:[#6]:[#6]:[#6]:[#6]:[#6]:2')
def test3IsotopeMatch(self, **kwargs):
smis = (
"CC[14NH2]",
"CC[14CH3]",
)
ms = [Chem.MolFromSmiles(x) for x in smis]
if kwargs:
params = Common.getParams(**kwargs)
mcs = rdFMCS.FindMCS(ms, params)
else:
mcs = rdFMCS.FindMCS(ms)
self.assertEqual(mcs.numBonds, 1)
self.assertEqual(mcs.numAtoms, 2)
qm = Chem.MolFromSmarts(mcs.smartsString)
if kwargs:
params = Common.getParams(**kwargs)
params.AtomTyper = CompareIsotopes()
params.AtomCompareParameters.MatchIsotope = True
mcs = rdFMCS.FindMCS(ms, params)
else:
mcs = rdFMCS.FindMCS(ms, atomCompare=rdFMCS.AtomCompare.CompareIsotopes)
self.assertEqual(mcs.numBonds, 2)
self.assertEqual(mcs.numAtoms, 3)
qm = Chem.MolFromSmarts(mcs.smartsString)
self.assertTrue(Chem.MolFromSmiles('CC[14CH3]').HasSubstructMatch(qm))
self.assertFalse(Chem.MolFromSmiles('CC[13CH3]').HasSubstructMatch(qm))
self.assertTrue(Chem.MolFromSmiles('OO[14CH3]').HasSubstructMatch(qm))
self.assertFalse(Chem.MolFromSmiles('O[13CH2][14CH3]').HasSubstructMatch(qm))
def test4RingMatches(self, **kwargs):
smis = ['CCCCC', 'CCC1CCCCC1']
ms = [Chem.MolFromSmiles(x) for x in smis]
if kwargs:
params = Common.getParams(**kwargs)
mcs = rdFMCS.FindMCS(ms, params)
else:
mcs = rdFMCS.FindMCS(ms)
self.assertEqual(mcs.numBonds, 4)
self.assertEqual(mcs.numAtoms, 5)
self.assertEqual(mcs.smartsString, '[#6]-[#6]-[#6]-[#6]-[#6]')
if kwargs:
params = Common.getParams(**kwargs)
params.BondCompareParameters.CompleteRingsOnly = True
mcs = rdFMCS.FindMCS(ms, params)
else:
mcs = rdFMCS.FindMCS(ms, completeRingsOnly=True)
self.assertEqual(mcs.numBonds, 2)
self.assertEqual(mcs.numAtoms, 3)
self.assertEqual(mcs.smartsString, '[#6]-&!@[#6]-&!@[#6]')
if kwargs:
params = Common.getParams(**kwargs)
params.BondCompareParameters.CompleteRingsOnly = True
params.BondCompareParameters.MatchFusedRings = True
params.BondCompareParameters.MatchFusedRingsStrict = False
mcs = rdFMCS.FindMCS(ms, params)
else:
mcs = rdFMCS.FindMCS(ms, completeRingsOnly=True,
ringCompare=rdFMCS.RingCompare.PermissiveRingFusion)
self.assertEqual(mcs.numBonds, 2)
self.assertEqual(mcs.numAtoms, 3)
self.assertEqual(mcs.smartsString, '[#6]-&!@[#6]-&!@[#6]')
if kwargs:
params = Common.getParams(**kwargs)
params.BondCompareParameters.CompleteRingsOnly = True
params.BondCompareParameters.MatchFusedRings = True
params.BondCompareParameters.MatchFusedRingsStrict = True
mcs = rdFMCS.FindMCS(ms, params)
else:
mcs = rdFMCS.FindMCS(ms, completeRingsOnly=True,
ringCompare=rdFMCS.RingCompare.StrictRingFusion)
self.assertEqual(mcs.numBonds, 2)
self.assertEqual(mcs.numAtoms, 3)
self.assertEqual(mcs.smartsString, '[#6]-&!@[#6]-&!@[#6]')
if kwargs:
params = Common.getParams(**kwargs)
params.AtomCompareParameters.RingMatchesRingOnly = True
params.BondCompareParameters.RingMatchesRingOnly = True
mcs = rdFMCS.FindMCS(ms, params)
else:
mcs = rdFMCS.FindMCS(ms, ringMatchesRingOnly=True)
self.assertEqual(mcs.numBonds, 1)
self.assertEqual(mcs.numAtoms, 2)
self.assertEqual(mcs.smartsString, '[#6&!R]-&!@[#6&!R]')
smis = ['CC1CCC1', 'CCC1CCCCC1']
ms = [Chem.MolFromSmiles(x) for x in smis]
if kwargs:
params = Common.getParams(**kwargs)
mcs = rdFMCS.FindMCS(ms, params)
else:
mcs = rdFMCS.FindMCS(ms)
self.assertEqual(mcs.numBonds, 4)
self.assertEqual(mcs.numAtoms, 5)
self.assertEqual(mcs.smartsString, '[#6]-[#6](-[#6]-[#6])-[#6]')
if kwargs:
params = Common.getParams(**kwargs)
params.BondCompareParameters.CompleteRingsOnly = True
mcs = rdFMCS.FindMCS(ms, params)
else:
mcs = rdFMCS.FindMCS(ms, completeRingsOnly=True)
self.assertEqual(mcs.numBonds, 1)
self.assertEqual(mcs.numAtoms, 2)
self.assertEqual(mcs.smartsString, '[#6]-&!@[#6]')
if kwargs:
params = Common.getParams(**kwargs)
params.AtomCompareParameters.RingMatchesRingOnly = True
params.BondCompareParameters.CompleteRingsOnly = True
params.BondCompareParameters.RingMatchesRingOnly = True
mcs = rdFMCS.FindMCS(ms, params)
else:
mcs = rdFMCS.FindMCS(ms, ringMatchesRingOnly=True, completeRingsOnly=True)
self.assertEqual(mcs.numBonds, 1)
self.assertEqual(mcs.numAtoms, 2)
self.assertEqual(mcs.smartsString, '[#6&!R]-&!@[#6&R]')
if kwargs:
params = Common.getParams(**kwargs)
params.AtomCompareParameters.RingMatchesRingOnly = True
params.BondCompareParameters.CompleteRingsOnly = True
params.BondCompareParameters.RingMatchesRingOnly = True
params.BondCompareParameters.MatchFusedRings = True
params.BondCompareParameters.MatchFusedRingsStrict = False
mcs = rdFMCS.FindMCS(ms, params)
else:
mcs = rdFMCS.FindMCS(ms, ringMatchesRingOnly=True, completeRingsOnly=True,
ringCompare=rdFMCS.RingCompare.PermissiveRingFusion)
self.assertEqual(mcs.numBonds, 1)
self.assertEqual(mcs.numAtoms, 2)
self.assertEqual(mcs.smartsString, '[#6&!R]-&!@[#6&R]')
if kwargs:
params = Common.getParams(**kwargs)
params.AtomCompareParameters.RingMatchesRingOnly = True
params.BondCompareParameters.CompleteRingsOnly = True
params.BondCompareParameters.RingMatchesRingOnly = True
params.BondCompareParameters.MatchFusedRings = True
params.BondCompareParameters.MatchFusedRingsStrict = True
mcs = rdFMCS.FindMCS(ms, params)
else:
mcs = rdFMCS.FindMCS(ms, ringMatchesRingOnly=True, completeRingsOnly=True,
ringCompare=rdFMCS.RingCompare.StrictRingFusion)
self.assertEqual(mcs.numBonds, 1)
self.assertEqual(mcs.numAtoms, 2)
self.assertEqual(mcs.smartsString, '[#6&!R]-&!@[#6&R]')
if kwargs:
params = Common.getParams(**kwargs)
params.AtomCompareParameters.RingMatchesRingOnly = True
params.BondCompareParameters.RingMatchesRingOnly = True
mcs = rdFMCS.FindMCS(ms, params)
else:
mcs = rdFMCS.FindMCS(ms, ringMatchesRingOnly=True)
self.assertEqual(mcs.numBonds, 4)
self.assertEqual(mcs.numAtoms, 5)
self.assertEqual(mcs.smartsString, '[#6&!R]-&!@[#6&R](-&@[#6&R]-&@[#6&R])-&@[#6&R]')
def test5AnyMatch(self, **kwargs):
smis = ('c1ccccc1C', 'c1ccccc1O', 'c1ccccc1Cl')
ms = [Chem.MolFromSmiles(x) for x in smis]
if kwargs:
params = Common.getParams(**kwargs)
params.AtomTyper = CompareAny()
mcs = rdFMCS.FindMCS(ms, params)
else:
mcs = rdFMCS.FindMCS(ms, atomCompare=rdFMCS.AtomCompare.CompareAny)
self.assertEqual(mcs.numBonds, 7)
self.assertEqual(mcs.numAtoms, 7)
qm = Chem.MolFromSmarts(mcs.smartsString)
for m in ms:
self.assertTrue(m.HasSubstructMatch(qm))
smis = | |
49,
"eye_color": "purple",
"name": "<NAME>",
"gender": "female",
"has_beard": false,
"email": "<EMAIL>",
"company": {
"name": "greendale"
}
},
{
"id": "56c4f0c6f8ec5462005779ae",
"is_active": true,
"number_of_children": 2,
"age": 31,
"eye_color": "green",
"name": "<NAME>",
"gender": "female",
"has_beard": false,
"email": "<EMAIL>",
"company": {
"name": "house of congress"
}
},
{
"id": "56c4f0c6679c2d76c4e7047e",
"is_active": true,
"number_of_children": 2,
"age": 56,
"eye_color": "brown",
"name": "<NAME>",
"gender": "female",
"has_beard": false,
"email": "<EMAIL>",
"company": {
"name": "greendale"
}
},
{
"id": "56c4f0c6da63130d511abf26",
"is_active": false,
"number_of_children": 3,
"age": 23,
"eye_color": "green",
"name": "<NAME>",
"gender": "female",
"has_beard": true,
"email": "<EMAIL>",
"company": {
"name": "house of congress"
}
},
{
"id": "56c4f0c648c72e2d0135dc5b",
"is_active": true,
"number_of_children": 2,
"age": 16,
"eye_color": "brown",
"name": "<NAME>",
"gender": "female",
"has_beard": true,
"email": "<EMAIL>",
"company": {
"name": "house of congress"
}
},
{
"id": "56c4f0c6d8391b4541f69791",
"is_active": false,
"number_of_children": 2,
"age": 62,
"eye_color": "brown",
"name": "<NAME>",
"gender": "female",
"has_beard": true,
"email": "<EMAIL>",
"company": {
"name": "greendale"
}
},
{
"id": "56c4f0c6904730d577c3beb4",
"is_active": false,
"number_of_children": 1,
"age": 33,
"eye_color": "blue",
"name": "<NAME>",
"gender": "female",
"has_beard": true,
"email": "<EMAIL>",
"company": {
"name": "greendale"
}
},
{
"id": "56c4f0c69eb29aabd150d1f1",
"is_active": false,
"number_of_children": 4,
"age": 25,
"eye_color": "blue",
"name": "<NAME>",
"gender": "male",
"has_beard": false,
"email": "<EMAIL>",
"company": {
"name": "house of congress"
}
},
{
"id": "56c4f0c6c71cdc69b57124c2",
"is_active": true,
"number_of_children": 1,
"age": 63,
"eye_color": "blue",
"name": "<NAME>",
"gender": "male",
"has_beard": false,
"email": "<EMAIL>",
"company": {
"name": "house of congress"
}
},
{
"id": "56c4f0c6b534593cb27c6ecd",
"is_active": false,
"number_of_children": 3,
"age": 31,
"eye_color": "green",
"name": "<NAME>",
"gender": "male",
"has_beard": true,
"email": "<EMAIL>",
"company": {
"name": "house of congress"
}
},
{
"id": "56c4f0c67313759b66b23ebf",
"is_active": true,
"number_of_children": 4,
"age": 19,
"eye_color": "blue",
"name": "<NAME>",
"gender": "male",
"has_beard": false,
"email": "<EMAIL>",
"company": {
"name": "house of congress"
}
},
{
"id": "56c4f0c67800f388fa6d58b0",
"is_active": true,
"number_of_children": 4,
"age": 29,
"eye_color": "purple",
"name": "<NAME>",
"gender": "male",
"has_beard": false,
"email": "<EMAIL>",
"company": {
"name": "greendale"
}
},
{
"id": "56c4f0c60e5e0bad8286fcbe",
"is_active": true,
"number_of_children": 3,
"age": 64,
"eye_color": "brown",
"name": "<NAME>",
"gender": "female",
"has_beard": false,
"email": "<EMAIL>",
"company": {
"name": "house of congress"
}
},
{
"id": "56c4f0c6afa8716439280135",
"is_active": false,
"number_of_children": 1,
"age": 18,
"eye_color": "brown",
"name": "<NAME>",
"gender": "female",
"has_beard": true,
"email": "<EMAIL>",
"company": {
"name": "house of congress"
}
},
{
"id": "56c4f0c65793d05c1ceaf1d6",
"is_active": true,
"number_of_children": 3,
"age": 35,
"eye_color": "purple",
"name": "<NAME>",
"gender": "female",
"has_beard": false,
"email": "<EMAIL>",
"company": {
"name": "house of congress"
}
},
{
"id": "56c4f0c672d360fd8853a1ee",
"is_active": true,
"number_of_children": 1,
"age": 45,
"eye_color": "blue",
"name": "<NAME>",
"gender": "male",
"has_beard": false,
"email": "<EMAIL>",
"company": {
"name": "greendale"
}
},
{
"id": "56c4f0c63631300199a961c6",
"is_active": true,
"number_of_children": 2,
"age": 39,
"eye_color": "brown",
"name": "<NAME>",
"gender": "female",
"has_beard": true,
"email": "<EMAIL>",
"company": {
"name": "greendale"
}
},
{
"id": "56c4f0c6b69986c9badc5afc",
"is_active": true,
"number_of_children": 4,
"age": 42,
"eye_color": "green",
"name": "<NAME>",
"gender": "male",
"has_beard": true,
"email": "<EMAIL>",
"company": {
"name": "greendale"
}
},
{
"id": "56c4f0c60c649b4ee89bc716",
"is_active": false,
"number_of_children": 3,
"age": 43,
"eye_color": "green",
"name": "<NAME>",
"gender": "male",
"has_beard": false,
"email": "<EMAIL>",
"company": {
"name": "greendale"
}
},
{
"id": "56c4f0c6f34d3dc556be10dc",
"is_active": true,
"number_of_children": 0,
"age": 55,
"eye_color": "green",
"name": "<NAME>",
"gender": "male",
"has_beard": true,
"email": "<EMAIL>",
"company": {
"name": "greendale"
}
},
{
"id": "56c4f0c654d710a9cd85c3ec",
"is_active": false,
"number_of_children": 1,
"age": 25,
"eye_color": "purple",
"name": "<NAME>",
"gender": "male",
"has_beard": true,
"email": "<EMAIL>",
"company": {
"name": "house of congress"
}
},
{
"id": "56c4f0c6233dbebc75492389",
"is_active": false,
"number_of_children": 1,
"age": 62,
"eye_color": "purple",
"name": "<NAME>",
"gender": "female",
"has_beard": true,
"email": "<EMAIL>",
"company": {
"name": "house of congress"
}
},
{
"id": "56c4f0c69ea4391bd8efb2e1",
"is_active": true,
"number_of_children": 2,
"age": 37,
"eye_color": "brown",
"name": "<NAME>",
"gender": "female",
"has_beard": true,
"email": "<EMAIL>",
"company": {
"name": "greendale"
}
},
{
"id": "56c4f0c6b601a813bb562daf",
"is_active": true,
"number_of_children": 3,
"age": 62,
"eye_color": "purple",
"name": "<NAME>",
"gender": "female",
"has_beard": false,
"email": "<EMAIL>",
"company": {
"name": "greendale"
}
},
{
"id": "56c4f0c671327a79d6e206d7",
"is_active": false,
"number_of_children": 0,
"age": 48,
"eye_color": "green",
"name": "<NAME>",
"gender": "female",
"has_beard": false,
"email": "<EMAIL>",
"company": {
"name": "greendale"
}
},
{
"id": "56c4f0c6f1a79a1f2db2ab05",
"is_active": true,
"number_of_children": 2,
"age": 65,
"eye_color": "purple",
"name": "<NAME>",
"gender": "female",
"has_beard": true,
"email": "<EMAIL>",
"company": {
"name": "greendale"
}
},
{
"id": "56c4f0c6965011f738097f73",
"is_active": true,
"number_of_children": 2,
"age": 59,
"eye_color": "green",
"name": "<NAME>",
"gender": "male",
"has_beard": false,
"email": "<EMAIL>",
"company": {
"name": "house of congress"
}
},
{
"id": "56c4f0c60576fccb0fb87363",
"is_active": true,
"number_of_children": 1,
"age": 21,
"eye_color": "purple",
"name": "<NAME>",
"gender": "male",
"has_beard": true,
"email": "<EMAIL>",
"company": {
"name": "greendale"
}
},
{
"id": "56c4f0c68b3d50e008d99889",
"is_active": false,
"number_of_children": 2,
"age": 66,
"eye_color": "brown",
"name": "<NAME>",
"gender": "female",
"has_beard": false,
"email": "<EMAIL>",
"company": {
"name": "greendale"
}
},
{
"id": "56c4f0c6432dabfa11fd8627",
"is_active": false,
"number_of_children": 1,
"age": 24,
"eye_color": "purple",
"name": "<NAME>",
"gender": "male",
"has_beard": false,
"email": "<EMAIL>",
"company": {
"name": "house of congress"
}
},
{
"id": "56c4f0c65874bb8b81e8244c",
"is_active": false,
"number_of_children": 2,
"age": 58,
"eye_color": "green",
"name": "<NAME>",
"gender": "female",
"has_beard": true,
"email": "<EMAIL>",
"company": {
"name": "greendale"
}
},
{
"id": "56c4f0c6e26433fc108d57e0",
"is_active": true,
"number_of_children": 3,
"age": 47,
"eye_color": "blue",
"name": "<NAME>",
"gender": "male",
"has_beard": false,
"email": "<EMAIL>",
"company": {
"name": "greendale"
}
},
{
"id": "56c4f0c6a5dd5f7989eec332",
"is_active": false,
"number_of_children": 2,
"age": 54,
"eye_color": "green",
"name": "<NAME>",
"gender": "female",
"has_beard": true,
"email": "<EMAIL>",
"company": {
"name": "greendale"
}
},
{
"id": "56c4f0c668bdccbe25440473",
"is_active": true,
"number_of_children": 1,
"age": 20,
"eye_color": "blue",
"name": "<NAME>",
"gender": "female",
"has_beard": true,
"email": "<EMAIL>",
"company": {
"name": "greendale"
}
},
{
"id": "56c4f0c6eaa359431618e58f",
"is_active": false,
"number_of_children": 1,
"age": 60,
"eye_color": "blue",
"name": "<NAME>",
"gender": "female",
"has_beard": true,
"email": "<EMAIL>",
"company": {
"name": "house of congress"
}
},
{
"id": "56c4f0c670935b2a0b683480",
"is_active": true,
"number_of_children": 0,
"age": 51,
"eye_color": "blue",
"name": "<NAME>",
"gender": "male",
"has_beard": false,
"email": "<EMAIL>",
"company": {
"name": "house of congress"
}
},
{
"id": "56c4f0c61fad2176413f334b",
"is_active": true,
"number_of_children": 3,
"age": 31,
"eye_color": "blue",
"name": "<NAME>",
"gender": "male",
"has_beard": true,
"email": "<EMAIL>",
"company": {
"name": "house of congress"
}
},
{
"id": "56c4f0c6c3855d417fb9efca",
"is_active": false,
"number_of_children": 0,
"age": 68,
"eye_color": "blue",
"name": "<NAME>",
"gender": "female",
"has_beard": false,
"email": "<EMAIL>",
"company": {
"name": "house of congress"
}
},
{
"id": "56c4f0c6489ef564f0d07ff8",
"is_active": true,
"number_of_children": 1,
"age": 23,
"eye_color": "green",
"name": "<NAME>",
"gender": "male",
"has_beard": false,
"email": "<EMAIL>",
"company": {
"name": "house of congress"
}
},
{
"id": "56c4f0c62646561929edbb17",
"is_active": false,
"number_of_children": 3,
"age": 55,
"eye_color": "blue",
"name": "<NAME>",
"gender": "male",
"has_beard": true,
"email": "<EMAIL>",
"company": {
"name": "house of congress"
}
},
{
"id": "56c4f0c621a0f900267f1765",
"is_active": false,
"number_of_children": 3,
"age": 46,
"eye_color": "green",
"name": "<NAME>",
"gender": "female",
"has_beard": false,
"email": "<EMAIL>",
"company": {
"name": "greendale"
}
},
{
"id": "56c4f0c6a3beab51eb38c046",
"is_active": false,
"number_of_children": 3,
"age": 63,
"eye_color": "green",
"name": "<NAME>",
"gender": "female",
"has_beard": true,
"email": "<EMAIL>",
"company": {
"name": "house of congress"
}
},
{
"id": "56c4f0c6e1714f6c253a616e",
"is_active": true,
"number_of_children": 0,
"age": 40,
"eye_color": "green",
"name": "<NAME>",
"gender": "male",
"has_beard": true,
"email": "<EMAIL>",
"company": {
"name": "house of congress"
}
},
{
"id": "56c4f0c63e6c571958fea02a",
"is_active": true,
"number_of_children": 3,
"age": 65,
"eye_color": "brown",
"name": "<NAME>",
"gender": "male",
"has_beard": true,
"email": "<EMAIL>",
"company": {
"name": "greendale"
}
},
{
"id": "56c4f0c6ff91ba748f8c33d0",
"is_active": true,
"number_of_children": 4,
"age": 36,
"eye_color": "blue",
"name": "<NAME>",
"gender": "male",
"has_beard": true,
"email": "<EMAIL>",
"company": {
"name": "greendale"
}
},
{
"id": "56c4f0c69fc521e43bd487e0",
"is_active": false,
"number_of_children": 3,
"age": 54,
"eye_color": "brown",
"name": "<NAME>",
"gender": "male",
"has_beard": false,
"email": "<EMAIL>",
"company": {
"name": "house of congress"
}
},
{
"id": "56c4f0c6bc1bf96cc4bb6491",
"is_active": false,
"number_of_children": 1,
"age": 36,
"eye_color": "purple",
"name": "<NAME>",
"gender": "female",
"has_beard": true,
"email": "<EMAIL>",
"company": {
"name": "house of congress"
}
},
{
"id": "56c4f0c6bb4e5b1dc6dd236b",
"is_active": true,
"number_of_children": 3,
"age": 33,
"eye_color": "blue",
"name": "<NAME>",
"gender": "female",
"has_beard": false,
"email": "<EMAIL>",
"company": {
"name": "greendale"
}
},
{
"id": "56c4f0c6db71359c29b30b18",
"is_active": false,
"number_of_children": 4,
"age": 56,
"eye_color": "green",
"name": "<NAME>",
"gender": "male",
"has_beard": false,
"email": "<EMAIL>",
"company": {
"name": "house of congress"
}
},
{
"id": "56c4f0c61430bb3be9433057",
"is_active": true,
"number_of_children": 1,
"age": 33,
"eye_color": "purple",
"name": "<NAME>",
| |
<gh_stars>1-10
from twisted.internet import reactor
from twisted.internet.defer import DeferredList
from twisted.internet.endpoints import serverFromString, clientFromString
from twisted.internet.protocol import Factory
from twisted.internet.task import LoopingCall
from twisted.internet.threads import deferToThread
from twisted.internet.interfaces import ISSLTransport
from twisted.python import log
from twisted.words.protocols import irc
from txircd.server import ConnectUser, IntroduceServer, ServerProtocol, protocol_version
from txircd.utils import CaseInsensitiveDictionary, epoch, now, resolveEndpointDescription
from txircd.user import IRCUser
from txircd import __version__
import imp, json, os, socket, yaml
# Add additional numerics to complement the ones in the RFC
irc.RPL_LOCALUSERS = "265"
irc.RPL_GLOBALUSERS = "266"
irc.RPL_CREATIONTIME = "329"
irc.RPL_TOPICWHOTIME = "333"
default_options = {
# App details
"app_verbose": False,
"app_ssl_key": "test.key",
"app_ssl_pem": "test.pem",
"app_irc_spec": "rfc1459",
"app_log_dir": "logs",
# Server details
"server_name": socket.getfqdn(),
"server_description": "A txircd server",
"server_network_name": "txircd",
"server_motd": "Welcome to txircd",
"server_motd_line_length": 80,
"server_client_ports": [],
"server_link_ports": [],
"server_stats_public": "ou",
"server_modules": [],
"server_password": None,
"serverlinks": {},
"serverlink_autoconnect": [],
# Client details
"client_vhosts": {"127.0.0.1":"localhost"},
"client_max_data": 5000, # Bytes per 5 seconds
"client_peer_connections": 3,
"client_peer_exempt": {"127.0.0.1":0},
"client_ping_interval": 60,
"client_timeout_delay": 120,
"client_ban_msg": "You're banned! Email <EMAIL> for help.",
# Oper details
"oper_ips": ["127.0.0.1"],
"oper_logins": {},
# Channel details
"channel_default_mode": {"n": None, "t": None},
"channel_default_status": "o",
"channel_exempt_chanops": "", # list of modes from which channel operators are exempt
"channel_status_minimum_change": {},
# Admin details
"admin_info_server": "Host Corp: 123 Example Street, Seattle, WA, USA",
"admin_info_organization": "Umbrella Corp: 123 Example Street, Seattle, WA, USA",
"admin_info_person": "Lazy Admin <<EMAIL>>",
}
class IRCProtocol(irc.IRC):
def __init__(self, ip):
self.dead = False
self.type = None
self.secure = False
self.data = 0
self.data_checker = LoopingCall(self.checkData)
self.pinger = LoopingCall.withCount(self.ping)
self.connection_expire = reactor.callLater(15, self.connectionLost, None)
self.ip = ip
def connectionMade(self):
self.connection_expire.cancel()
self.type = IRCUser(self)
tryagain = []
for function in self.factory.actions["connect"]:
result = function(self.type)
if result == "again":
tryagain.append(function)
elif not result:
self.transport.loseConnection()
self.type = None
break
if self.type:
for function in tryagain:
if not function(self.type):
self.transport.loseConnection()
self.type = None
break
if self.type:
self.secure = ISSLTransport(self.transport, None) is not None
self.data_checker.start(5)
self.pinger.start(self.factory.servconfig["client_ping_interval"], now=False)
for server in self.factory.servers.itervalues():
if server.nearHop == self.factory.name:
server.callRemote(ConnectUser, uuid=self.type.uuid, ip=self.type.ip, server=self.factory.name, secure=self.secure, signon=epoch(self.type.signon))
def dataReceived(self, data):
if self.dead:
return
# Get and store the peer certificate if the client is using SSL and providing a client certificate
# I don't like handling this here, but twisted does not provide a hook to process it in a better place (e.g.
# when the SSL handshake is complete); see http://twistedmatrix.com/trac/ticket/6024
# This will be moved in the future when we can.
if self.secure and self.transport:
certificate = self.transport.getPeerCertificate()
if certificate is not None:
self.type.setMetadata("server", "certfp", certificate.digest("md5").lower().replace(":", ""))
# Handle the received data
for modfunc in self.factory.actions["recvdata"]:
modfunc(self.type, data)
self.data += len(data)
if self.pinger.running:
self.pinger.reset()
irc.IRC.dataReceived(self, data)
def checkData(self):
if self.type:
self.type.checkData(self.data)
self.data = 0
def ping(self, intervals):
timeout = self.factory.servconfig["client_timeout_delay"] + self.factory.servconfig["client_ping_interval"] * (intervals - 1)
if (now() - self.type.lastpong).total_seconds() > timeout:
log.msg("Client has stopped responding to PING and is now disconnecting.")
self.transport.loseConnection()
self.connectionLost(None)
elif self.type.lastactivity > self.type.lastpong:
self.type.lastpong = now()
else:
self.sendMessage("PING",":{}".format(self.factory.name))
def handleCommand(self, command, prefix, params):
log.msg("handleCommand: {!r} {!r} {!r}".format(command, prefix, params))
return self.type.handleCommand(command, prefix, params)
def sendLine(self, line):
if self.dead:
return
for modfunc in self.factory.actions["senddata"]:
modfunc(self.type, line)
log.msg("sendLine: {!r}".format(line))
return irc.IRC.sendLine(self, line)
def connectionLost(self, reason):
if self.dead:
return
self.dead = True
self.factory.unregisterProtocol(self)
if self.type:
self.type.connectionLost(reason)
if self.data_checker.running:
self.data_checker.stop()
if self.pinger.running:
self.pinger.stop()
class IRCD(Factory):
protocol = IRCProtocol
def __init__(self, config, options = None, sslCert = None):
reactor.addSystemEventTrigger("before", "shutdown", self.cleanup)
self.dead = False
self.config = config
self.version = "txircd-{}".format(__version__)
self.created = now()
self.servers = CaseInsensitiveDictionary()
self.users = CaseInsensitiveDictionary()
self.userid = {}
self.channels = CaseInsensitiveDictionary()
self.peerConnections = {}
self.ssl_cert = sslCert
self.client_ports = {}
self.server_ports = {}
self.modules = {}
self.module_abilities = {}
self.actions = {
"connect": [],
"register": [],
"welcome": [],
"join": [],
"joinmessage": [],
"nick": [],
"quit": [],
"topic": [],
"mode": [],
"nameslistentry": [],
"chancreate": [],
"chandestroy": [],
"commandextra": [],
"commandunknown": [],
"commandpermission": [],
"metadataupdate": [],
"recvdata": [],
"senddata": [],
"netmerge": [],
"netsplit": []
}
self.commands = {}
self.channel_modes = [{}, {}, {}, {}]
self.channel_mode_type = {}
self.user_modes = [{}, {}, {}, {}]
self.user_mode_type = {}
self.prefixes = {}
self.prefix_symbols = {}
self.prefix_order = []
self.server_commands = {}
self.module_data_cache = {}
self.server_factory = None
self.common_modules = set()
log.msg("Loading module data...")
try:
with open("data.yaml", "r") as dataFile:
self.serialized_data = yaml.safe_load(dataFile)
if self.serialized_data is None:
self.serialized_data = {}
except IOError:
self.serialized_data = {}
self.isupport = {}
self.usercount = {
"localmax": 0,
"globalmax": 0
}
log.msg("Loading configuration...")
self.servconfig = {}
if not options:
options = {}
self.load_options(options)
self.name = self.servconfig["server_name"]
log.msg("Loading modules...")
self.all_module_load()
self.save_serialized_deferred = None
self.autoconnect_servers = LoopingCall(self.server_autoconnect)
self.autoconnect_servers.start(60, now=False) # The server factory isn't added to here yet
# Fill in the default ISUPPORT dictionary once config and modules are loaded, since some values depend on those
self.isupport["CASEMAPPING"] = "rfc1459"
self.isupport["CHANMODES"] = ",".join(["".join(modedict.keys()) for modedict in self.channel_modes])
self.isupport["CHANNELLEN"] = "64"
self.isupport["CHANTYPES"] = "#"
self.isupport["MODES"] = 20
self.isupport["NETWORK"] = self.servconfig["server_network_name"]
self.isupport["NICKLEN"] = "32"
self.isupport["PREFIX"] = "({}){}".format("".join(self.prefix_order), "".join([self.prefixes[mode][0] for mode in self.prefix_order]))
self.isupport["STATUSMSG"] = "".join([self.prefixes[mode][0] for mode in self.prefix_order])
self.isupport["TOPICLEN"] = "316"
self.isupport["USERMODES"] = ",".join(["".join(modedict.keys()) for modedict in self.user_modes])
def all_module_load(self):
# load RFC-required modules
rfc_spec = [
# commands
"cmd_user", "cmd_nick", "cmd_pass", # registration
"cmd_ping", "cmd_pong", # connection keep-alive
"cmd_join", "cmd_part", "cmd_kick", "cmd_topic", "cmd_mode", "cmd_invite", # channels
"cmd_quit", # connection end
"cmd_privmsg_notice", # messages
"cmd_oper", "umode_o", "cmd_rehash", "cmd_wallops", # oper
"cmd_admin", "cmd_info", "cmd_lusers", "cmd_motd", "cmd_stats", "cmd_time", "cmd_version", # server info
"cmd_away", "cmd_ison", "cmd_userhost", "cmd_who", "cmd_whois", "cmd_whowas", # user info
"cmd_names", "cmd_list", # channel info
"cmd_kill", "cmd_eline", "cmd_gline", "cmd_kline", "cmd_qline", "cmd_zline", # user management
"cmd_links", "cmd_connect", "cmd_squit", # linked servers
# channel modes
"cmode_b", "cmode_i", "cmode_k", "cmode_l", "cmode_m", "cmode_n", "cmode_o", "cmode_p", "cmode_s", "cmode_t", "cmode_v",
# user modes
"umode_i", "umode_s"
]
ircv3_spec = [ # http://ircv3.atheme.org/
"ircv3_cap", # capability mechanism which essentially serves as the base for everything else
"ircv3_multi-prefix", "ircv3_sasl", # other IRC 3.1 base extensions
"ircv3_account-notify", "ircv3_away-notify", "ircv3_extended-join", "ircv3_tls", # IRC 3.1 optional extensions
"ircv3_monitor", "ircv3_metadata" # IRC 3.2 base extensions
]
for module in rfc_spec:
check = self.load_module(module)
if not check:
log.msg("An RFC-required capability could not be loaded!")
raise RuntimeError("A module required for RFC compatibility could not be loaded.")
return
if self.servconfig["app_irc_spec"] == "ircv3":
for module in ircv3_spec:
check = self.load_module(module)
if not check:
log.msg("IRCv3 compatibility was specified, but a required IRCv3 module could not be loaded!")
raise RuntimeError("A module required for IRCv3 compatibility could not be loaded.")
return
for module in self.servconfig["server_modules"]:
self.load_module(module)
def rehash(self):
log.msg("Rehashing config file and reloading modules")
try:
with open(self.config) as f:
self.load_options(yaml.safe_load(f))
self.all_module_load()
self.save_module_data()
self.rebind_ports()
except:
return False
return True
def load_options(self, options):
for var, value in options.iteritems():
self.servconfig[var] = value
for var, value in default_options.iteritems():
if var not in self.servconfig:
self.servconfig[var] = value
def cleanup(self):
# Track the disconnections so we know they get done
deferreds = []
log.msg("Disconnecting servers...")
for server in self.servers.values():
if server.nearHop == self.name:
server.transport.loseConnection()
deferreds.append(server.disconnected)
# Cleanly disconnect all clients
log.msg("Disconnecting clients...")
for u in self.users.values():
u.sendMessage("ERROR", ":Closing Link: {} [Server shutting down]".format(u.hostname), to=None, prefix=None)
u.socket.transport.loseConnection()
deferreds.append(u.disconnected)
log.msg("Unloading modules...")
for name, spawner in self.modules.iteritems():
try:
spawner.cleanup()
except AttributeError:
pass # If the module has no extra cleanup to do, that's fine
try:
data_to_save, free_data = self.modules[name].data_serialize()
if data_to_save:
self.serialized_data[name] = data_to_save
except AttributeError:
pass # If the module has no data to save, that's also fine.
log.msg("Saving serialized data...")
if not self.save_module_data():
self.save_serialized_deferred.addCallback(self.save_serialized)
deferreds.append(self.save_serialized_deferred)
# Return deferreds
log.msg("Waiting on deferreds...")
self.dead = True
return DeferredList(deferreds)
def connect_server(self, servername):
def sendServerHandshake(protocol, password):
protocol.callRemote(IntroduceServer, name=self.name, password=password, description=self.servconfig["server_description"], version=protocol_version, commonmodules=self.common_modules)
protocol.sentDataBurst = False
if servername in self.servers:
raise RuntimeError ("Server {} is already connected".format(servername))
if servername not in self.servconfig["serverlinks"]:
raise RuntimeError ("Server {} is not configured".format(servername))
servinfo = self.servconfig["serverlinks"][servername]
if "ip" not in servinfo:
raise RuntimeError ("Server {} is not properly configured: IP address must be specified".format(servername))
if "connect" not in servinfo:
raise RuntimeError ("Server {} is not properly configured: Connection description not provided".format(servername))
if "incoming_password" not in servinfo or "outgoing_password" not in servinfo:
raise RuntimeError ("Server {} is not properly configured: Passwords not specified".format(servername))
try:
endpoint = clientFromString(reactor, resolveEndpointDescription(servinfo["connect"]))
except ValueError as e:
| |
<filename>AWSteria/src_Testbench_AWS/Top/Gen_Bytevec/Gen_Bytevec_Mux_BSV.py
#!/usr/bin/python3 -B
# Copyright (c) 2020 <NAME>
# See main Gen.py and README for details
# ================================================================
import sys
import os
import stat
import importlib
import pprint
pp = pprint.PrettyPrinter()
from Gen_Bytevec_Mux_Common import *
# ================================================================
def Gen_BSV (spec_filename,
package_name,
C_to_BSV_structs, C_to_BSV_packet_bytes,
BSV_to_C_structs, BSV_to_C_packet_bytes):
output_bsv_filename = package_name + ".bsv"
file_bsv = open (output_bsv_filename, 'w')
file_bsv.write ("// This file was generated from spec file '{:s}'\n".format (spec_filename))
file_bsv.write ("\n")
file_bsv.write ("package {:s};\n".format (package_name))
file_bsv.write ("\n")
file_bsv.write ("import Vector :: *;\n")
file_bsv.write ("import FIFOF :: *;\n")
file_bsv.write ("import Semi_FIFOF :: *;\n")
file_bsv.write ("\n")
for struct in C_to_BSV_structs + BSV_to_C_structs:
code = gen_struct_decl (struct)
file_bsv.write (code)
file_bsv.write (gen_interface (package_name,
C_to_BSV_structs, C_to_BSV_packet_bytes,
BSV_to_C_structs, BSV_to_C_packet_bytes))
file_bsv.write (gen_module (package_name,
C_to_BSV_structs, C_to_BSV_packet_bytes,
BSV_to_C_structs, BSV_to_C_packet_bytes))
file_bsv.write ("// ================================================================\n")
file_bsv.write ("\n")
file_bsv.write ("endpackage\n")
file_bsv.close ()
sys.stdout.write ("Wrote output to file: {:s}\n".format (output_bsv_filename))
# ================================================================
def gen_struct_decl (struct):
struct_name = struct ['struct_name']
result = "\n"
result += "// ================================================================\n"
result += "// Size on the wire: {:d} bytes\n".format (struct ['size_bytes'])
result += "\n"
result += "typedef struct {\n"
for f in struct ['fields']:
field_name = f ['field_name']
width_bytes = f ['width_bytes']
dimension = f ['dimension']
bsv_type = "Bit #({:d})".format (f ['width_bits'])
result += " {:14s} {:s}; // {:d} bytes\n".format (bsv_type, field_name, (width_bytes * dimension))
result += "}} {:s}\n".format (struct_name)
result += "deriving (Bits, FShow);\n"
return result
# ================================================================
def gen_interface (package_name,
C_to_BSV_structs, C_to_BSV_packet_bytes,
BSV_to_C_structs, BSV_to_C_packet_bytes):
Bytevec_C_to_BSV_Size = total_packet_size_bytes (C_to_BSV_packet_bytes)
Bytevec_BSV_to_C_Size = total_packet_size_bytes (BSV_to_C_packet_bytes)
result = "\n"
result += "// ================================================================\n"
result += "// Bytevecs\n"
result += "\n"
result += "typedef {:d} Bytevec_C_to_BSV_Size;\n".format (Bytevec_C_to_BSV_Size)
result += "Integer bytevec_C_to_BSV_size = {:d};\n".format (Bytevec_C_to_BSV_Size)
result += "typedef Vector #(Bytevec_C_to_BSV_Size, Bit #(8)) Bytevec_C_to_BSV;\n"
result += "\n"
result += "typedef {:d} BSV_to_C_Bytevec_Size;\n".format (Bytevec_BSV_to_C_Size)
result += "Integer bytevec_BSV_to_C_size = {:d};\n".format (Bytevec_BSV_to_C_Size)
result += "typedef Vector #(BSV_to_C_Bytevec_Size, Bit #(8)) BSV_to_C_Bytevec;\n"
result += "\n"
result += "// ================================================================\n"
result += "// INTERFACE\n"
result += "\n"
result += "interface {:s}_IFC;\n".format (package_name)
result += " // ---------------- Facing BSV\n"
w_format = max ([len (s ['struct_name']) for s in C_to_BSV_structs])
result += " // C to BSV\n"
for s in C_to_BSV_structs:
text = "interface FIFOF_O #({:s})".format (s ['struct_name'])
text = text.ljust (len ("interface FIFOF_I #()") + w_format)
result += " {:s} fo_{:s};\n".format (text, s ['struct_name'])
result += "\n"
w_format = max ([len (s ['struct_name']) for s in BSV_to_C_structs])
result += " // BSV to C\n"
for s in BSV_to_C_structs:
text = "interface FIFOF_I #({:s})".format (s ['struct_name'])
text = text.ljust (len ("interface FIFOF_I #()") + w_format)
result += " {:s} fi_{:s};\n".format (text, s ['struct_name'])
result += "\n"
result += " // ---------------- Facing C\n"
result += " interface FIFOF_I #(Bytevec_C_to_BSV) fi_C_to_BSV_bytevec;\n"
result += " interface FIFOF_O #(BSV_to_C_Bytevec) fo_BSV_to_C_bytevec;\n"
result += "endinterface\n"
result += "\n"
return result
# ================================================================
def gen_module (package_name,
C_to_BSV_structs, C_to_BSV_packet_bytes,
BSV_to_C_structs, BSV_to_C_packet_bytes):
result = ("// ================================================================\n" +
"\n" +
"(* synthesize *)\n" +
"module mk{0:s} ({0:s}_IFC);\n".format (package_name) +
" Integer verbosity = 0;\n")
result += "\n"
result += " // FIFOs and credit counters for C_to_BSV\n"
result += " FIFOF #(Bytevec_C_to_BSV) f_C_to_BSV_bytevec <- mkFIFOF;\n"
for s in C_to_BSV_structs:
struct_name = s ['struct_name']
result += "\n"
result += " FIFOF #({0:s}) f_{0:s} <- mkSizedFIFOF (128);\n".format (struct_name)
result += " Reg #(Bit #(8)) rg_credits_{:s} <- mkReg (128);\n".format (struct_name)
result += "\n"
result += " // FIFOs and credit counters for BSV_to_C\n"
result += " FIFOF #(BSV_to_C_Bytevec) f_BSV_to_C_bytevec <- mkFIFOF;\n"
for s in BSV_to_C_structs:
struct_name = s ['struct_name']
result += "\n"
result += " FIFOF #({0:s}) f_{0:s} <- mkFIFOF;\n".format (struct_name)
result += " Reg #(Bit #(8)) rg_credits_{:s} <- mkReg (0);\n".format (struct_name)
result += "\n"
result += " // ================================================================\n"
result += " // BEHAVIOR: C to BSV packets\n"
type_C_to_BSV = C_to_BSV_packet_bytes ['packet_len'] + C_to_BSV_packet_bytes ['num_credits']
result += "\n"
result += " let bytevec_C_to_BSV = f_C_to_BSV_bytevec.first;\n"
result += ("\n" +
" rule rl_debug_bytevec_C_to_BSV (False);\n" +
' $write ("{:s}.rl_debug\\n ");\n'.format (package_name) +
" for (Integer j = 0; j < valueOf (Bytevec_C_to_BSV_Size); j = j + 1)\n" +
" if (fromInteger (j) < bytevec_C_to_BSV [0])\n" +
' $write (" %02h", bytevec_C_to_BSV [j]);\n' +
' $display ("\\n");\n' +
" endrule\n")
result += ("\n" +
" // Common function to restore credits for BSV-to-C channels\n" +
" function Action restore_credits_for_BSV_to_C ();\n" +
" action\n")
for j in range (1, type_C_to_BSV):
s_BSV_to_C = BSV_to_C_structs [j-1]
rg_credits = "rg_credits_{:s}".format (s_BSV_to_C ['struct_name'])
result += " {0:s} <= {0:s} + bytevec_C_to_BSV [{1:d}];\n".format (rg_credits, j)
result += (" endaction\n" +
" endfunction\n")
# C-to-BSV credits-only packet
result += ("\n" +
" rule rl_C_to_BSV_credits_only (bytevec_C_to_BSV [5] == 0);\n" +
"\n" +
" restore_credits_for_BSV_to_C;\n" +
"\n" +
" f_C_to_BSV_bytevec.deq;\n" +
" if (verbosity != 0)\n" +
' $display ("{:s}.rl_C_to_BSV_credits_only");\n'.format (package_name) +
" endrule\n")
# C-to-BSV packets for each struct type
for j in range (len (C_to_BSV_structs)):
s = C_to_BSV_structs [j]
chan_id = j + 1
struct_name = s ['struct_name']
result += "\n"
result += (" rule rl_C_to_BSV_{:s} (bytevec_C_to_BSV [{:d}] == {:d});\n".
format (struct_name, type_C_to_BSV, chan_id))
result += ("\n" +
" restore_credits_for_BSV_to_C;\n")
result += "\n"
result += " // Build a C-to-BSV struct from the bytevec\n"
result += "\n"
result += " let s = {:s} {{\n".format (struct_name)
offset = type_C_to_BSV + 1
fields = s ['fields']
for fj in range (len (fields)):
f = fields [fj]
field_name = f ['field_name']
width_bytes = f ['width_bytes']
dimension = f ['dimension']
if (width_bytes == 0):
result += (" {:s} : ?".format (field_name))
elif ((dimension * width_bytes) == 1):
result += (" {:s} : truncate (bytevec_C_to_BSV [{:d}])".
format (field_name, offset))
else:
line = " {:s} : truncate ({{ ".format (field_name)
result += line
for j in reversed (range (offset, offset + (dimension * width_bytes))):
term = "bytevec_C_to_BSV [{:d}]".format (j)
if (j == offset + (dimension * width_bytes) - 1):
result += term
else:
result += " ".rjust (len (line)) + term
if (j != offset):
result += ",\n"
result += " } )"
if fj < (len (fields) - 1):
result += ",\n"
else:
result += " };\n"
offset += (dimension * width_bytes)
result += "\n"
result += " // Enqueue the C-to-BSV struct and dequeue the bytevec\n"
result += " f_{:s}.enq (s);\n".format (struct_name)
result += " f_C_to_BSV_bytevec.deq;\n"
result += " if (verbosity != 0)\n"
result += (' $display ("{:s}: received {:s}: ", fshow (s));\n'.
format (package_name, struct_name))
result += " endrule\n"
# ================================================================
# Process BSV-to-C packets
result += "\n"
result += " // ================================================================\n"
result += " // BEHAVIOR: BSV to C structs\n"
type_BSV_to_C = BSV_to_C_packet_bytes ['packet_len'] + BSV_to_C_packet_bytes ['num_credits']
result += ("\n" +
" // Common function to fill in credits for C_to_BSV channels\n" +
" function ActionValue #(BSV_to_C_Bytevec) fill_credits_for_C_to_BSV (BSV_to_C_Bytevec bv);\n" +
" actionvalue\n")
for j in range (1, type_BSV_to_C):
s_C_to_BSV = C_to_BSV_structs [j-1]
rg_credits = "rg_credits_{:s}".format (s_C_to_BSV ['struct_name'])
result += (" bv [{:d}] = {:s};".format (j, rg_credits) +
" {:s} <= 0;\n".format (rg_credits))
result += (" return bv;\n" +
" endactionvalue\n" +
" endfunction\n")
for j in range (len (BSV_to_C_structs)):
s = BSV_to_C_structs [j]
chan_id = j + 1
struct_name = s ['struct_name']
size_bytes = s ['size_bytes']
result += ("\n" +
" Bool ready_{:s} =\n".format(struct_name) +
" (f_{:s}.notEmpty\n".format (struct_name) +
" && (rg_credits_{:s} != 0));\n".format (struct_name) +
"\n" +
" rule rl_BSV_to_C_{0:s} (ready_{0:s});\n".format (struct_name))
result += (" BSV_to_C_Bytevec bytevec_BSV_to_C = replicate (0);\n" +
" bytevec_BSV_to_C [0] = {:d};\n".
format (this_packet_size_bytes (BSV_to_C_packet_bytes, size_bytes)) +
"\n" +
" bytevec_BSV_to_C <- fill_credits_for_C_to_BSV (bytevec_BSV_to_C);\n" +
"\n" +
" bytevec_BSV_to_C [{:d}] = {:d};\n".format (type_BSV_to_C, chan_id))
result += "\n"
result += " // Unpack the BSV-to-C struct into the bytevec\n"
result += " let s = f_{:s}.first;\n".format (struct_name)
result += " f_{:s}.deq;\n".format (struct_name)
byte_offset = type_BSV_to_C + 1
for f in s ['fields']:
field_name = f ['field_name']
width_bytes = f ['width_bytes']
width_bits = f ['width_bits']
dimension = f ['dimension']
bit_lo = 0
while (bit_lo < width_bits):
bit_hi = bit_lo + 7;
if (bit_hi >= width_bits):
bit_hi = width_bits - 1
result += (" bytevec_BSV_to_C [{:d}] = zeroExtend (s.{:s} [{:d}:{:d}]);\n".
format (byte_offset, field_name, bit_hi, | |
<reponame>RoboVigor/gold-miner-robot
# Copyright (c) 2013-2015, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Baxter RSDK Joint Trajectory Action Server
"""
import bisect
from copy import deepcopy
import math
import operator
import numpy as np
import bezier
import rospy
import actionlib
from control_msgs.msg import (
FollowJointTrajectoryAction,
FollowJointTrajectoryFeedback,
FollowJointTrajectoryResult,
)
from std_msgs.msg import (
UInt16,
)
from trajectory_msgs.msg import (
JointTrajectoryPoint,
)
import baxter_control
import baxter_dataflow
import baxter_interface
class JointTrajectoryActionServer(object):
def __init__(self, limb, reconfig_server, rate=100.0,
mode='position_w_id'):
self._dyn = reconfig_server
self._ns = 'robot/limb/' + limb
self._fjt_ns = self._ns + '/follow_joint_trajectory'
self._server = actionlib.SimpleActionServer(
self._fjt_ns,
FollowJointTrajectoryAction,
execute_cb=self._on_trajectory_action,
auto_start=False)
self._action_name = rospy.get_name()
self._limb = baxter_interface.Limb(limb)
self._enable = baxter_interface.RobotEnable()
self._name = limb
self._cuff = baxter_interface.DigitalIO('%s_lower_cuff' % (limb,))
self._cuff.state_changed.connect(self._cuff_cb)
# Verify joint control mode
self._mode = mode
if (self._mode != 'position' and self._mode != 'position_w_id'
and self._mode != 'velocity'):
rospy.logerr("%s: Action Server Creation Failed - "
"Provided Invalid Joint Control Mode '%s' (Options: "
"'position_w_id', 'position', 'velocity')" %
(self._action_name, self._mode,))
return
self._server.start()
self._alive = True
self._cuff_state = False
# Action Feedback/Result
self._fdbk = FollowJointTrajectoryFeedback()
self._result = FollowJointTrajectoryResult()
# Controller parameters from arguments, messages, and dynamic
# reconfigure
self._control_rate = rate # Hz
self._control_joints = []
self._pid_gains = {'kp': dict(), 'ki': dict(), 'kd': dict()}
self._goal_time = 0.0
self._stopped_velocity = 0.0
self._goal_error = dict()
self._path_thresh = dict()
# Create our PID controllers
self._pid = dict()
for joint in self._limb.joint_names():
self._pid[joint] = baxter_control.PID()
# Create our spline coefficients
self._coeff = [None] * len(self._limb.joint_names())
# Set joint state publishing to specified control rate
self._pub_rate = rospy.Publisher(
'/robot/joint_state_publish_rate',
UInt16,
queue_size=10)
self._pub_rate.publish(self._control_rate)
self._pub_ff_cmd = rospy.Publisher(
self._ns + '/inverse_dynamics_command',
JointTrajectoryPoint,
tcp_nodelay=True,
queue_size=1)
def robot_is_enabled(self):
return self._enable.state().enabled
def clean_shutdown(self):
self._alive = False
self._limb.exit_control_mode()
def _cuff_cb(self, value):
self._cuff_state = value
def _get_trajectory_parameters(self, joint_names, goal):
# For each input trajectory, if path, goal, or goal_time tolerances
# provided, we will use these as opposed to reading from the
# parameter server/dynamic reconfigure
# Goal time tolerance - time buffer allowing goal constraints to be met
if goal.goal_time_tolerance:
self._goal_time = goal.goal_time_tolerance.to_sec()
else:
self._goal_time = self._dyn.config['goal_time']
# Stopped velocity tolerance - max velocity at end of execution
self._stopped_velocity = self._dyn.config['stopped_velocity_tolerance']
# Path execution and goal tolerances per joint
for jnt in joint_names:
if jnt not in self._limb.joint_names():
rospy.logerr(
"%s: Trajectory Aborted - Provided Invalid Joint Name %s" %
(self._action_name, jnt,))
self._result.error_code = self._result.INVALID_JOINTS
self._server.set_aborted(self._result)
return
# Path execution tolerance
path_error = self._dyn.config[jnt + '_trajectory']
if goal.path_tolerance:
for tolerance in goal.path_tolerance:
if jnt == tolerance.name:
if tolerance.position != 0.0:
self._path_thresh[jnt] = tolerance.position
else:
self._path_thresh[jnt] = path_error
else:
self._path_thresh[jnt] = path_error
# Goal error tolerance
goal_error = self._dyn.config[jnt + '_goal']
if goal.goal_tolerance:
for tolerance in goal.goal_tolerance:
if jnt == tolerance.name:
if tolerance.position != 0.0:
self._goal_error[jnt] = tolerance.position
else:
self._goal_error[jnt] = goal_error
else:
self._goal_error[jnt] = goal_error
# PID gains if executing using the velocity (integral) controller
if self._mode == 'velocity':
self._pid[jnt].set_kp(self._dyn.config[jnt + '_kp'])
self._pid[jnt].set_ki(self._dyn.config[jnt + '_ki'])
self._pid[jnt].set_kd(self._dyn.config[jnt + '_kd'])
self._pid[jnt].initialize()
def _get_current_position(self, joint_names):
return [self._limb.joint_angle(joint) for joint in joint_names]
def _get_current_velocities(self, joint_names):
return [self._limb.joint_velocity(joint) for joint in joint_names]
def _get_current_error(self, joint_names, set_point):
current = self._get_current_position(joint_names)
error = map(operator.sub, set_point, current)
return zip(joint_names, error)
def _update_feedback(self, cmd_point, jnt_names, cur_time):
self._fdbk.header.stamp = rospy.Duration.from_sec(rospy.get_time())
self._fdbk.joint_names = jnt_names
self._fdbk.desired = cmd_point
self._fdbk.desired.time_from_start = rospy.Duration.from_sec(cur_time)
self._fdbk.actual.positions = self._get_current_position(jnt_names)
self._fdbk.actual.time_from_start = rospy.Duration.from_sec(cur_time)
self._fdbk.error.positions = map(operator.sub,
self._fdbk.desired.positions,
self._fdbk.actual.positions
)
self._fdbk.error.time_from_start = rospy.Duration.from_sec(cur_time)
self._server.publish_feedback(self._fdbk)
def _reorder_joints_ff_cmd(self, joint_names, point):
joint_name_order = self._limb.joint_names()
pnt = JointTrajectoryPoint()
pnt.time_from_start = point.time_from_start
pos_cmd = dict(zip(joint_names, point.positions))
for jnt_name in joint_name_order:
pnt.positions.append(pos_cmd[jnt_name])
if point.velocities:
vel_cmd = dict(zip(joint_names, point.velocities))
for jnt_name in joint_name_order:
pnt.velocities.append(vel_cmd[jnt_name])
if point.accelerations:
accel_cmd = dict(zip(joint_names, point.accelerations))
for jnt_name in joint_name_order:
pnt.accelerations.append(accel_cmd[jnt_name])
return pnt
def _command_stop(self, joint_names, joint_angles, start_time, dimensions_dict):
if self._mode == 'velocity':
velocities = [0.0] * len(joint_names)
cmd = dict(zip(joint_names, velocities))
while (not self._server.is_new_goal_available() and self._alive
and self.robot_is_enabled()):
self._limb.set_joint_velocities(cmd)
if self._cuff_state:
self._limb.exit_control_mode()
break
rospy.sleep(1.0 / self._control_rate)
elif self._mode == 'position' or self._mode == 'position_w_id':
raw_pos_mode = (self._mode == 'position_w_id')
if raw_pos_mode:
pnt = JointTrajectoryPoint()
pnt.positions = self._get_current_position(joint_names)
if dimensions_dict['velocities']:
pnt.velocities = [0.0] * len(joint_names)
if dimensions_dict['accelerations']:
pnt.accelerations = [0.0] * len(joint_names)
while (not self._server.is_new_goal_available() and self._alive
and self.robot_is_enabled()):
self._limb.set_joint_positions(joint_angles, raw=raw_pos_mode)
# zero inverse dynamics feedforward command
if self._mode == 'position_w_id':
pnt.time_from_start = rospy.Duration(
rospy.get_time() - start_time)
ff_pnt = self._reorder_joints_ff_cmd(joint_names, pnt)
self._pub_ff_cmd.publish(ff_pnt)
if self._cuff_state:
self._limb.exit_control_mode()
break
rospy.sleep(1.0 / self._control_rate)
def _command_joints(self, joint_names, point, start_time, dimensions_dict):
if self._server.is_preempt_requested() or not self.robot_is_enabled():
rospy.loginfo("%s: Trajectory Preempted" % (self._action_name,))
self._server.set_preempted()
self._command_stop(
joint_names, self._limb.joint_angles(), start_time, dimensions_dict)
return False
velocities = []
deltas = self._get_current_error(joint_names, point.positions)
for delta in deltas:
if ((math.fabs(delta[1]) >= self._path_thresh[delta[0]]
and self._path_thresh[delta[0]] >= 0.0)) or not self.robot_is_enabled():
rospy.logerr("%s: Exceeded Error Threshold on %s: %s" %
(self._action_name, delta[0], str(delta[1]),))
self._result.error_code = self._result.PATH_TOLERANCE_VIOLATED
self._server.set_aborted(self._result)
self._command_stop(
joint_names, self._limb.joint_angles(), start_time, dimensions_dict)
return False
if self._mode == 'velocity':
velocities.append(self._pid[delta[0]].compute_output(delta[1]))
if ((self._mode == 'position' or self._mode == 'position_w_id')
and self._alive):
cmd = dict(zip(joint_names, point.positions))
raw_pos_mode = (self._mode == 'position_w_id')
self._limb.set_joint_positions(cmd, raw=raw_pos_mode)
if raw_pos_mode:
ff_pnt = self._reorder_joints_ff_cmd(joint_names, point)
self._pub_ff_cmd.publish(ff_pnt)
elif self._alive:
cmd = dict(zip(joint_names, velocities))
self._limb.set_joint_velocities(cmd)
return True
def _get_bezier_point(self, b_matrix, idx, t, cmd_time, dimensions_dict):
pnt = JointTrajectoryPoint()
pnt.time_from_start = rospy.Duration(cmd_time)
num_joints = b_matrix.shape[0]
pnt.positions = [0.0] * num_joints
if dimensions_dict['velocities']:
pnt.velocities = [0.0] * num_joints
if dimensions_dict['accelerations']:
pnt.accelerations = [0.0] * num_joints
for jnt in range(num_joints):
b_point = bezier.bezier_point(b_matrix[jnt, :, :, :], idx, t)
# Positions at specified time
pnt.positions[jnt] = b_point[0]
# Velocities at specified time
if dimensions_dict['velocities']:
pnt.velocities[jnt] = b_point[1]
# Accelerations at specified time
if dimensions_dict['accelerations']:
pnt.accelerations[jnt] = b_point[-1]
return pnt
def _compute_bezier_coeff(self, joint_names, trajectory_points, dimensions_dict):
# Compute Full Bezier Curve
num_joints = len(joint_names)
num_traj_pts = len(trajectory_points)
num_traj_dim = sum(dimensions_dict.values())
num_b_values = len(['b0', 'b1', 'b2', 'b3'])
b_matrix = np.zeros(shape=(num_joints, num_traj_dim,
num_traj_pts-1, num_b_values))
for jnt in xrange(num_joints):
traj_array = np.zeros(shape=(len(trajectory_points), num_traj_dim))
for idx, point in enumerate(trajectory_points):
current_point = list()
current_point.append(point.positions[jnt])
if dimensions_dict['velocities']:
current_point.append(point.velocities[jnt])
if dimensions_dict['accelerations']:
current_point.append(point.accelerations[jnt])
traj_array[idx, :] = current_point
d_pts = bezier.de_boor_control_pts(traj_array)
b_matrix[jnt, :, :, :] = bezier.bezier_coefficients(
traj_array, d_pts)
return b_matrix
def _determine_dimensions(self, trajectory_points):
# Determine dimensions supplied
position_flag = True
velocity_flag = (len(trajectory_points[0].velocities) != 0 and
len(trajectory_points[-1].velocities) != 0)
acceleration_flag = (len(trajectory_points[0].accelerations) != 0 and
len(trajectory_points[-1].accelerations) != 0)
return {'positions': position_flag,
'velocities': velocity_flag,
'accelerations': acceleration_flag}
def _on_trajectory_action(self, goal):
joint_names = goal.trajectory.joint_names
trajectory_points = goal.trajectory.points
# Load parameters for trajectory
self._get_trajectory_parameters(joint_names, goal)
# Create a new discretized joint trajectory
num_points = len(trajectory_points)
if num_points == 0:
rospy.logerr("%s: Empty Trajectory" % (self._action_name,))
self._server.set_aborted()
return
rospy.loginfo("%s: Executing requested joint trajectory" %
(self._action_name,))
rospy.logdebug("Trajectory Points: {0}".format(trajectory_points))
control_rate = rospy.Rate(self._control_rate)
dimensions_dict = self._determine_dimensions(trajectory_points)
if num_points == 1:
# Add current position as trajectory point
first_trajectory_point = JointTrajectoryPoint()
first_trajectory_point.positions = self._get_current_position(
joint_names)
# To preserve desired velocities and accelerations, copy them to the first
# trajectory point if the trajectory is only 1 point.
if dimensions_dict['velocities']:
first_trajectory_point.velocities = deepcopy(
trajectory_points[0].velocities)
if dimensions_dict['accelerations']:
first_trajectory_point.accelerations = deepcopy(
trajectory_points[0].accelerations)
first_trajectory_point.time_from_start = rospy.Duration(0)
trajectory_points.insert(0, | |
<reponame>motey/py2neo
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2021, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
__all__ = [
"Subgraph",
"Walkable",
"Entity",
"Node",
"Relationship",
"Path",
"walk",
"UniquenessError",
]
from collections import OrderedDict
from itertools import chain
from uuid import uuid4
# noinspection PyUnresolvedReferences
from interchange import geo as spatial
# noinspection PyUnresolvedReferences
from interchange import time
from interchange.collections import SetView, PropertyDict
from py2neo.compat import string_types, ustr, xstr
from py2neo.cypher import cypher_escape, cypher_repr, cypher_join
from py2neo.cypher.encoding import CypherEncoder, LabelSetView
from py2neo.cypher.queries import (
unwind_create_nodes_query,
unwind_merge_nodes_query,
unwind_merge_relationships_query,
)
class Subgraph(object):
""" A :class:`.Subgraph` is an arbitrary collection of nodes and
relationships. It is also the base class for :class:`.Node`,
:class:`.Relationship` and :class:`.Path`.
By definition, a subgraph must contain at least one node;
`null subgraphs <http://mathworld.wolfram.com/NullGraph.html>`_
should be represented by :const:`None`. To test for
`emptiness <http://mathworld.wolfram.com/EmptyGraph.html>`_ the
built-in :func:`bool` function can be used.
The simplest way to construct a subgraph is by combining nodes and
relationships using standard set operations. For example::
>>> s = ab | ac
>>> s
{(alice:Person {name:"Alice"}),
(bob:Person {name:"Bob"}),
(carol:Person {name:"Carol"}),
(Alice)-[:KNOWS]->(Bob),
(Alice)-[:WORKS_WITH]->(Carol)}
>>> s.nodes()
frozenset({(alice:Person {name:"Alice"}),
(bob:Person {name:"Bob"}),
(carol:Person {name:"Carol"})})
>>> s.relationships()
frozenset({(Alice)-[:KNOWS]->(Bob),
(Alice)-[:WORKS_WITH]->(Carol)})
.. describe:: subgraph | other | ...
Union.
Return a new subgraph containing all nodes and relationships from *subgraph* as well as all those from *other*.
Any entities common to both will only be included once.
.. describe:: subgraph & other & ...
Intersection.
Return a new subgraph containing all nodes and relationships common to both *subgraph* and *other*.
.. describe:: subgraph - other - ...
Difference.
Return a new subgraph containing all nodes and relationships that exist in *subgraph* but do not exist in *other*,
as well as all nodes that are connected by the relationships in *subgraph* regardless of whether or not they exist in *other*.
.. describe:: subgraph ^ other ^ ...
Symmetric difference.
Return a new subgraph containing all nodes and relationships that exist in *subgraph* or *other*, but not in both,
as well as all nodes that are connected by those relationships regardless of whether or not they are common to *subgraph* and *other*.
"""
def __init__(self, nodes=None, relationships=None):
self.__nodes = frozenset(nodes or [])
self.__relationships = frozenset(relationships or [])
self.__nodes |= frozenset(chain.from_iterable(r.nodes for r in self.__relationships))
#if not self.__nodes:
# raise ValueError("Subgraphs must contain at least one node")
def __repr__(self):
return "Subgraph({%s}, {%s})" % (", ".join(map(repr, self.nodes)),
", ".join(map(repr, self.relationships)))
def __eq__(self, other):
try:
return self.nodes == other.nodes and self.relationships == other.relationships
except (AttributeError, TypeError):
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
value = 0
for entity in self.__nodes:
value ^= hash(entity)
for entity in self.__relationships:
value ^= hash(entity)
return value
def __len__(self):
return len(self.__relationships)
def __iter__(self):
return iter(self.__relationships)
def __bool__(self):
return bool(self.__relationships)
def __nonzero__(self):
return bool(self.__relationships)
def __or__(self, other):
return Subgraph(set(self.nodes) | set(other.nodes), set(self.relationships) | set(other.relationships))
def __and__(self, other):
return Subgraph(set(self.nodes) & set(other.nodes), set(self.relationships) & set(other.relationships))
def __sub__(self, other):
r = set(self.relationships) - set(other.relationships)
n = (set(self.nodes) - set(other.nodes)) | set().union(*(set(rel.nodes) for rel in r))
return Subgraph(n, r)
def __xor__(self, other):
r = set(self.relationships) ^ set(other.relationships)
n = (set(self.nodes) ^ set(other.nodes)) | set().union(*(set(rel.nodes) for rel in r))
return Subgraph(n, r)
@classmethod
def _is_bound(cls, entity, graph):
if entity.graph is None:
return False
elif entity.graph == graph:
return True
else:
raise ValueError("Entity %r is already bound to graph %r" % (entity, graph))
def __db_create__(self, tx):
""" Create new data in a remote :class:`.Graph` from this
:class:`.Subgraph`.
:param tx:
"""
graph = tx.graph
# Convert nodes into a dictionary of
# {frozenset(labels): [Node, Node, ...]}
node_dict = {}
for node in self.nodes:
if not self._is_bound(node, tx.graph):
key = frozenset(node.labels)
node_dict.setdefault(key, []).append(node)
# Convert relationships into a dictionary of
# {rel_type: [Rel, Rel, ...]}
rel_dict = {}
for relationship in self.relationships:
if not self._is_bound(relationship, tx.graph):
key = type(relationship).__name__
rel_dict.setdefault(key, []).append(relationship)
for labels, nodes in node_dict.items():
pq = unwind_create_nodes_query(list(map(dict, nodes)), labels=labels)
pq = cypher_join(pq, "RETURN id(_)")
records = tx.run(*pq)
for i, record in enumerate(records):
node = nodes[i]
node.graph = graph
node.identity = record[0]
node._remote_labels = labels
for r_type, relationships in rel_dict.items():
data = map(lambda r: [r.start_node.identity, dict(r), r.end_node.identity],
relationships)
pq = unwind_merge_relationships_query(data, r_type)
pq = cypher_join(pq, "RETURN id(_)")
for i, record in enumerate(tx.run(*pq)):
relationship = relationships[i]
relationship.graph = graph
relationship.identity = record[0]
def __db_delete__(self, tx):
""" Delete data in a remote :class:`.Graph` based on this
:class:`.Subgraph`.
:param tx:
"""
graph = tx.graph
node_identities = []
for relationship in self.relationships:
if self._is_bound(relationship, graph):
relationship.graph = None
relationship.identity = None
for node in self.nodes:
if self._is_bound(node, graph):
node_identities.append(node.identity)
node.graph = None
node.identity = None
# TODO: this might delete remote relationships that aren't
# represented in the local subgraph - is this OK?
list(tx.run("MATCH (_) WHERE id(_) IN $x DETACH DELETE _", x=node_identities))
def __db_exists__(self, tx):
""" Determine whether one or more graph entities all exist
within the database. Note that if any nodes or relationships in
this :class:`.Subgraph` are not bound to remote counterparts,
this method will return ``False``.
:param tx:
:returns: ``True`` if all entities exist remotely, ``False``
otherwise
"""
graph = tx.graph
node_ids = set()
relationship_ids = set()
for i, node in enumerate(self.nodes):
try:
if self._is_bound(node, graph):
node_ids.add(node.identity)
else:
return False
except ValueError:
return False
for i, relationship in enumerate(self.relationships):
try:
if self._is_bound(relationship, graph):
relationship_ids.add(relationship.identity)
else:
return False
except ValueError:
return False
statement = ("OPTIONAL MATCH (a) WHERE id(a) IN $x "
"OPTIONAL MATCH ()-[r]->() WHERE id(r) IN $y "
"RETURN count(DISTINCT a) + count(DISTINCT r)")
parameters = {"x": list(node_ids), "y": list(relationship_ids)}
return tx.evaluate(statement, parameters) == len(node_ids) + len(relationship_ids)
def __db_merge__(self, tx, primary_label=None, primary_key=None):
""" Merge data into a remote :class:`.Graph` from this
:class:`.Subgraph`.
:param tx:
:param primary_label:
:param primary_key:
"""
graph = tx.graph
# Convert nodes into a dictionary of
# {(p_label, p_key, frozenset(labels)): [Node, Node, ...]}
node_dict = {}
for node in self.nodes:
if not self._is_bound(node, graph):
# Determine primary label
if node.__primarylabel__ is not None:
p_label = node.__primarylabel__
elif node.__model__ is not None:
p_label = node.__model__.__primarylabel__ or primary_label
else:
p_label = primary_label
# Determine primary key
if node.__primarykey__ is not None:
p_key = node.__primarykey__
elif node.__model__ is not None:
p_key = node.__model__.__primarykey__ or primary_key
else:
p_key = primary_key
# Add node to the node dictionary
key = (p_label, p_key, frozenset(node.labels))
node_dict.setdefault(key, []).append(node)
# Convert relationships into a dictionary of
# {rel_type: [Rel, Rel, ...]}
rel_dict = {}
for relationship in self.relationships:
if not self._is_bound(relationship, graph):
key = type(relationship).__name__
rel_dict.setdefault(key, []).append(relationship)
for (pl, pk, labels), nodes in node_dict.items():
if pl is None or pk is None:
raise ValueError("Primary label and primary key are required for MERGE operation")
pq = unwind_merge_nodes_query(map(dict, nodes), (pl, pk), labels)
pq = cypher_join(pq, "RETURN id(_)")
identities = [record[0] for record in tx.run(*pq)]
if len(identities) > len(nodes):
raise UniquenessError("Found %d matching nodes for primary label %r and primary "
"key %r with labels %r but merging requires no more than "
"one" % (len(identities), pl, pk, set(labels)))
for i, identity in enumerate(identities):
node = nodes[i]
node.graph = graph
node.identity = identity
node._remote_labels = labels
for r_type, relationships in rel_dict.items():
data = map(lambda r: [r.start_node.identity, dict(r), r.end_node.identity],
relationships)
pq = unwind_merge_relationships_query(data, r_type)
pq = cypher_join(pq, "RETURN id(_)")
for i, record in enumerate(tx.run(*pq)):
relationship = relationships[i]
relationship.graph = graph
relationship.identity = record[0]
def __db_pull__(self, tx):
""" Copy data from a remote :class:`.Graph` into this
:class:`.Subgraph`.
:param tx:
"""
# Pull nodes
nodes = {}
for node in self.nodes:
if self._is_bound(node, tx.graph):
nodes[node.identity] = node
query = tx.run("MATCH (_) WHERE id(_) in $x "
"RETURN id(_), labels(_), properties(_)", | |
import sys
sys.path.append("..")
from common.utils import Node
from rust.long_macros import *
#%%
def paintLineOn(buff, text, indent):
buff += indent + text + "\n"
return buff
def paint_type(typeName):
if typeName.value == "Generic":
base, *args = typeName.children
base, args = paint_type(base), [paint_type(arg) for arg in args]
if base == "Ref":
return f"&{args[0]}"
if base == "MutRef":
return f"&mut {args[0]}"
return f"{base}<{', '.join(args)}>"
if typeName.value == "TypeCall":
base, arg = typeName.children
base, arg = paint_type(base), paint_type(arg)
return f"{base}({arg})"
# handle combos
if typeName.value == "TypeExpr":
op, left, right = typeName.children
left, right = paint_type(left), paint_type(right)
op = op.value
left, right = (left, right) if "mut" in left else (right, left)
if op == "*" :
return left + " " + right
if op == "::":
return right + "::" + left
if typeName.value == "FixedArray":
type, count = typeName.children
type = paint_type(type)
return f"[{type} ; {paint_expression(count)}]"
if typeName.value == "ID":
name = typeName.children[0].value
if name == "Int":
return "i32"
if name == "uSize":
return "usize"
if name == "Float":
return "f32"
if name == "String":
return "String"
if name == "Bool":
return "bool"
if name == "Mut":
return "mut"
if name in ["Obj", "Object"]:
return "HashMap<&str, Box<dyn Any + 'static>>"
return name
if typeName.value == "RawMacroCall":
extra = typeName.children
macroName = extra[0].children[0].value
macroContents = extra[1].value
if macroName == "rust":
return macroContents.strip()
else:
return f"{macroName}! {macroContents}"
return "typeNotImplemented"
def paint_call(name, args):
argText = ", ".join(str(paint_expression(arg)) for arg in args)
if name == "print":
return 'println!("{:#?}", ' + argText + ")"
if name == "Box":
return f"Box::new({argText})"
if name == "Ref":
return f"&({argText})"
if name == "Mut":
return f"mut ({argText})"
if name == "MutRef":
return f"&mut ({argText})"
if name == "Unbox":
return f"*({argText})"
return f"{name}({argText})"
def paint_expression(expr, currentIndent=""):
if expr.value == "None":
return expr.value
if expr.value == "String" or expr.value == "Number":
return expr.children[0].value
if expr.value == "ID":
return expr.children[0].value
if expr.value == "BinOp":
# TODO handle Glace-specific ops
op, left, right = expr.children
left, right = paint_expression(left, currentIndent), paint_expression(right, currentIndent)
op = op.value
if op == "=":
left = f"let {left}"
if op == "@":
return f"{left} {right}"
return f"({left} {op} {right})"
if expr.value == "Call":
if len(expr.children) > 1:
iden, *arg = expr.children
if iden.value == "ID":
name = iden[1][0][0]
return paint_call(name, arg)
else:
return paint_call(expr.children[0][1][0][0], [Node("ID", [Node("", [])])])
if expr.value == "ComplexCall":
out = ""
iden, *extra = expr.children
out += str(iden[1][0][0])
for call in extra:
if call.value == "Parg":
out += "(" + \
", ".join(str(paint_expression(child, currentIndent))
for child in call.children) + ")"
if call.value == "Aidx":
if len(call.children) != 0:
out += "[" + str(paint_expression(call.children[0], currentIndent)) + "]"
else:
out += "[:]"
if call.value == "Spawn":
out += "{ " + \
", ".join(kwarg.children[0][1][0][0] + ": " + \
str(paint_expression(kwarg.children[1], currentIndent))
if len(kwarg.children) > 1 else
paint_expression(kwarg)
for kwarg in call.children ) + " }"
if call.value == "ObjGet":
vartype, pName = call.children[0].children
vartype = paint_type(vartype)
out += f'.get("{pName[1][0][0]}").unwrap().downcast_ref::<{vartype}>().unwrap()'
if call.value == "ObjGet?":
vartype, pName = call.children[0].children
vartype = paint_type(vartype)
out = "{" + object_none_checker(
vartype, pName, out, currentIndent
) + "\n" + currentIndent + "}"
if call.value == "DGen":
out += "::<" + paint_type(call[1][0]) + ">"
if call.value == "Dcol":
out += "::" + call[1][0][1][0][0]
if call.value == "Dot":
out += "." + call[1][0][1][0][0]
return out
if expr.value == "Rest":
return ".." + paint_expression(expr.children[0])
# Reworking this
if expr.value == "Object":
assigns = expr.children
out = "{\n" + currentIndent + "\t" + "let mut object: HashMap<&str, Box<dyn Any>> = HashMap::new();" + "\n"
for assign in assigns:
name, value = assign.children
if name.value == "ID":
name = name[1][0][0]
value = paint_expression(value, currentIndent+"\t")
out += currentIndent + "\t" + f'object.insert("{name}", Box::new({value}));' + "\n"
return out + currentIndent + "\t" + "object" + "\n" + currentIndent + "}"
if expr.value == "TypedDecl":
vartype, varname = expr.children
if expr.value == "Function":
default = paint_function("§§", expr, currentIndent)
return default.split("§§")[1][3:-2].replace("\n", "\n" + currentIndent + "\t")
if expr.value == "Block":
prg = paint_program(expr.children, currentIndent+"\t")
return "{\n" + prg + currentIndent + "}"
if expr.value == "Vector":
return "vec![" + \
", ".join(str(paint_expression(e, currentIndent)) for e in expr.children) + \
"]"
if expr.value == "Array":
return "[" + \
", ".join(str(paint_expression(e, currentIndent)) for e in expr.children) + \
"]"
if expr.value == "Tuple":
return "(" + \
", ".join(str(paint_expression(e, currentIndent)) for e in expr.children) + \
")"
if expr.value == "FixedArray":
type, count = expr.children
return "[" + paint_type(type) + " ; " + str(paint_expression(count)) + "]"
if expr.value == "RawMacroCall":
name = expr.children[0][1][0][0]
body = expr.children[1].value
if name == "rust":
return body
return f"{name}! {body}"
if expr.value == "Match":
extra = expr.children
test = extra[0]
match_out = f"match {paint_expression(test)} {{\n"
conds = extra[1::3]
props = extra[2::3]
vals = extra[3::3]
for cond, prop, val in zip(conds, props, vals):
cond, val = paint_type(cond), paint_expression(val, currentIndent + "\t")
prop = ", ".join(paint_expression(p) for p in prop.children)
if len(prop) > 0:
prop = " { " + prop + " }"
match_out = paintLineOn(match_out, f"{cond}{prop} => {val},", currentIndent + "\t")
match_out += "}"
return match_out
return "exprNotImplemented"
def paint_function(name, tree, currentIndent=""):
argument, body = tree.children
# Normal function
if body.value == "FunctionBody":
argsText = ""
bodyText = ""
if argument.children[0].value != "None":
argsText = ""
for i, argument in enumerate(argument.children):
if argument.value == "TypedDecl":
argName, type = argument[1][1][1][0][0], paint_type(argument.children[0])
if "mut" in type:
type = type.replace("mut ", "")
argName = "mut " + argName
argsText += f"{argName}: {type}, "
else:
argsText += f"obj{i}: &{paint_type(Node('ID', [Node('Object', [])]))}, "
for decl in argument.children:
vname = decl.children[1].children[0].value
bodyText += currentIndent+"\t" + f"let {vname} = " + paint_expression(Node("ComplexCall", [
Node("ID", [Node(f"obj{i}", [])]),
Node("ObjGet" + ("?" if decl.value[-1] == "?" else ""), [decl])
]), currentIndent+"\t") + ";\n"
argsText = argsText[:-2]
retType, retValue = body.children
outputType = paint_type(retType)
if retValue.value == "Block":
bodyText += paint_program(retValue.children, currentIndent+"\t")
else:
bodyText += currentIndent + "\t" + str(paint_expression(retValue, currentIndent)) + "\n"
outputText = f" -> {outputType}" if outputType != "Void" else ""
pub = ""
if len(name.split("pub_")) > 1:
pub = "pub "
name = name.split("pub_")[-1]
return f"{pub}fn {name}({argsText}){outputText} " + "{" + f"\n{bodyText}{currentIndent}" + "}\n"
# Lambda
else:
argsText = ""
if argument.children[0].value != "None":
for argument in argument.children:
if argument.value == "TypedDecl":
argName, type = argument[1][1][1][0][0], paint_type(argument.children[0])
argsText += f"{argName}: {type}, "
else:
argsText += argument.children[0][0] + ", "
argsText = argsText[:-2]
bodyText = compl = ""
if body.value == "Block":
bodyText = "{\n" + paint_program(body.children, currentIndent+"\t") + currentIndent + "}"
else:
bodyText = str(paint_expression(body, currentIndent))
if bodyText[0] == '|':
compl = " move"
newBody = ""
for i, e in enumerate(bodyText.splitlines()):
if i == 0:
newBody += e
else:
extraTab = "\t" if i != len(bodyText.splitlines())-1 else ""
newBody += "\n" + currentIndent + extraTab + e.strip()
bodyText = newBody
return f"let {name} = |{argsText}|{compl} {bodyText};" + "\n"
def paint_struct(name, tree, currentIndent=""):
_, *sections = tree.children
out = ""
for section in sections:
secName, *program = section.children
if secName[1][0].value in ["data", "shader_data"]:
if secName[1][0].value == "shader_data":
out += currentIndent + "#[repr(C)]\n"
out += currentIndent + "#[derive(Clone, Copy, Debug, PartialEq, Default)]\n"
pub = ""
if len(name.split("pub_")) > 1:
pub = "pub "
name = name.split("pub_")[-1]
out += f"{pub}struct {name}" + " {\n" + currentIndent
for decl in program:
type, val = decl.children
type = paint_type(type)
fname, pub = val[1][0][0], ""
if "pub_" in fname:
pub = "pub "
fname = fname.split("pub_")[-1]
out += "\t" + f"{pub}{fname}: {type}," + "\n" + currentIndent
out += "}\n" + currentIndent
if secName[1][0].value == "shader_data":
out += currentIndent + f"unsafe impl ocl::OclPrm for {name} {{}}" + "\n"
if secName[1][0].value == "methods":
out += f"impl {name}" + " {\n" + currentIndent
for decl in program:
funcName, func = paint_varname(decl.children)
body = paint_function(funcName, func, currentIndent + "\t")
out += "\t" + body\
.replace(f"self: mut {name}", f"&mut self")\
.replace(f"self: {name}", "&self")\
.replace("mut &self", "&mut self")\
.replace(f"mut self: &{name}", "&mut self")
out += | |
{}".format(type(error_)))
if result_ is not None and not isinstance(result_, (dict, ApplicationInfo)):
raise Exception("Expected result_ to be a ApplicationInfo, received: {}".format(type(result_)))
self.error = error_
self.result = result_
self.unknown_fields = unknown_fields
class ApplicationInfoResults(Type):
_toSchema = {'results': 'results'}
_toPy = {'results': 'results'}
def __init__(self, results=None, **unknown_fields):
'''
results : typing.Sequence[~ApplicationInfoResult]
'''
results_ = [ApplicationInfoResult.from_json(o) for o in results or []]
# Validate arguments against known Juju API types.
if results_ is not None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class ApplicationMetricCredential(Type):
_toSchema = {'application': 'application', 'metrics_credentials': 'metrics-credentials'}
_toPy = {'application': 'application', 'metrics-credentials': 'metrics_credentials'}
def __init__(self, application=None, metrics_credentials=None, **unknown_fields):
'''
application : str
metrics_credentials : typing.Sequence[int]
'''
application_ = application
metrics_credentials_ = metrics_credentials
# Validate arguments against known Juju API types.
if application_ is not None and not isinstance(application_, (bytes, str)):
raise Exception("Expected application_ to be a str, received: {}".format(type(application_)))
if metrics_credentials_ is not None and not isinstance(metrics_credentials_, (bytes, str, list)):
raise Exception("Expected metrics_credentials_ to be a Sequence, received: {}".format(type(metrics_credentials_)))
self.application = application_
self.metrics_credentials = metrics_credentials_
self.unknown_fields = unknown_fields
class ApplicationMetricCredentials(Type):
_toSchema = {'creds': 'creds'}
_toPy = {'creds': 'creds'}
def __init__(self, creds=None, **unknown_fields):
'''
creds : typing.Sequence[~ApplicationMetricCredential]
'''
creds_ = [ApplicationMetricCredential.from_json(o) for o in creds or []]
# Validate arguments against known Juju API types.
if creds_ is not None and not isinstance(creds_, (bytes, str, list)):
raise Exception("Expected creds_ to be a Sequence, received: {}".format(type(creds_)))
self.creds = creds_
self.unknown_fields = unknown_fields
class ApplicationOffer(Type):
_toSchema = {'access': 'access', 'application_description': 'application-description', 'bindings': 'bindings', 'endpoints': 'endpoints', 'offer_name': 'offer-name', 'offer_url': 'offer-url', 'source_model_tag': 'source-model-tag', 'spaces': 'spaces'}
_toPy = {'access': 'access', 'application-description': 'application_description', 'bindings': 'bindings', 'endpoints': 'endpoints', 'offer-name': 'offer_name', 'offer-url': 'offer_url', 'source-model-tag': 'source_model_tag', 'spaces': 'spaces'}
def __init__(self, access=None, application_description=None, bindings=None, endpoints=None, offer_name=None, offer_url=None, source_model_tag=None, spaces=None, **unknown_fields):
'''
access : str
application_description : str
bindings : typing.Mapping[str, str]
endpoints : typing.Sequence[~RemoteEndpoint]
offer_name : str
offer_url : str
source_model_tag : str
spaces : typing.Sequence[~RemoteSpace]
'''
access_ = access
application_description_ = application_description
bindings_ = bindings
endpoints_ = [RemoteEndpoint.from_json(o) for o in endpoints or []]
offer_name_ = offer_name
offer_url_ = offer_url
source_model_tag_ = source_model_tag
spaces_ = [RemoteSpace.from_json(o) for o in spaces or []]
# Validate arguments against known Juju API types.
if access_ is not None and not isinstance(access_, (bytes, str)):
raise Exception("Expected access_ to be a str, received: {}".format(type(access_)))
if application_description_ is not None and not isinstance(application_description_, (bytes, str)):
raise Exception("Expected application_description_ to be a str, received: {}".format(type(application_description_)))
if bindings_ is not None and not isinstance(bindings_, dict):
raise Exception("Expected bindings_ to be a Mapping, received: {}".format(type(bindings_)))
if endpoints_ is not None and not isinstance(endpoints_, (bytes, str, list)):
raise Exception("Expected endpoints_ to be a Sequence, received: {}".format(type(endpoints_)))
if offer_name_ is not None and not isinstance(offer_name_, (bytes, str)):
raise Exception("Expected offer_name_ to be a str, received: {}".format(type(offer_name_)))
if offer_url_ is not None and not isinstance(offer_url_, (bytes, str)):
raise Exception("Expected offer_url_ to be a str, received: {}".format(type(offer_url_)))
if source_model_tag_ is not None and not isinstance(source_model_tag_, (bytes, str)):
raise Exception("Expected source_model_tag_ to be a str, received: {}".format(type(source_model_tag_)))
if spaces_ is not None and not isinstance(spaces_, (bytes, str, list)):
raise Exception("Expected spaces_ to be a Sequence, received: {}".format(type(spaces_)))
self.access = access_
self.application_description = application_description_
self.bindings = bindings_
self.endpoints = endpoints_
self.offer_name = offer_name_
self.offer_url = offer_url_
self.source_model_tag = source_model_tag_
self.spaces = spaces_
self.unknown_fields = unknown_fields
class ApplicationOfferAdminDetails(Type):
_toSchema = {'application_description': 'application-description', 'application_name': 'application-name', 'applicationofferdetails': 'ApplicationOfferDetails', 'bindings': 'bindings', 'charm_url': 'charm-url', 'connections': 'connections', 'endpoints': 'endpoints', 'offer_name': 'offer-name', 'offer_url': 'offer-url', 'offer_uuid': 'offer-uuid', 'source_model_tag': 'source-model-tag', 'spaces': 'spaces', 'users': 'users'}
_toPy = {'ApplicationOfferDetails': 'applicationofferdetails', 'application-description': 'application_description', 'application-name': 'application_name', 'bindings': 'bindings', 'charm-url': 'charm_url', 'connections': 'connections', 'endpoints': 'endpoints', 'offer-name': 'offer_name', 'offer-url': 'offer_url', 'offer-uuid': 'offer_uuid', 'source-model-tag': 'source_model_tag', 'spaces': 'spaces', 'users': 'users'}
def __init__(self, applicationofferdetails=None, application_description=None, application_name=None, bindings=None, charm_url=None, connections=None, endpoints=None, offer_name=None, offer_url=None, offer_uuid=None, source_model_tag=None, spaces=None, users=None, **unknown_fields):
'''
applicationofferdetails : ApplicationOfferDetails
application_description : str
application_name : str
bindings : typing.Mapping[str, str]
charm_url : str
connections : typing.Sequence[~OfferConnection]
endpoints : typing.Sequence[~RemoteEndpoint]
offer_name : str
offer_url : str
offer_uuid : str
source_model_tag : str
spaces : typing.Sequence[~RemoteSpace]
users : typing.Sequence[~OfferUserDetails]
'''
applicationofferdetails_ = ApplicationOfferDetails.from_json(applicationofferdetails) if applicationofferdetails else None
application_description_ = application_description
application_name_ = application_name
bindings_ = bindings
charm_url_ = charm_url
connections_ = [OfferConnection.from_json(o) for o in connections or []]
endpoints_ = [RemoteEndpoint.from_json(o) for o in endpoints or []]
offer_name_ = offer_name
offer_url_ = offer_url
offer_uuid_ = offer_uuid
source_model_tag_ = source_model_tag
spaces_ = [RemoteSpace.from_json(o) for o in spaces or []]
users_ = [OfferUserDetails.from_json(o) for o in users or []]
# Validate arguments against known Juju API types.
if applicationofferdetails_ is not None and not isinstance(applicationofferdetails_, (dict, ApplicationOfferDetails)):
raise Exception("Expected applicationofferdetails_ to be a ApplicationOfferDetails, received: {}".format(type(applicationofferdetails_)))
if application_description_ is not None and not isinstance(application_description_, (bytes, str)):
raise Exception("Expected application_description_ to be a str, received: {}".format(type(application_description_)))
if application_name_ is not None and not isinstance(application_name_, (bytes, str)):
raise Exception("Expected application_name_ to be a str, received: {}".format(type(application_name_)))
if bindings_ is not None and not isinstance(bindings_, dict):
raise Exception("Expected bindings_ to be a Mapping, received: {}".format(type(bindings_)))
if charm_url_ is not None and not isinstance(charm_url_, (bytes, str)):
raise Exception("Expected charm_url_ to be a str, received: {}".format(type(charm_url_)))
if connections_ is not None and not isinstance(connections_, (bytes, str, list)):
raise Exception("Expected connections_ to be a Sequence, received: {}".format(type(connections_)))
if endpoints_ is not None and not isinstance(endpoints_, (bytes, str, list)):
raise Exception("Expected endpoints_ to be a Sequence, received: {}".format(type(endpoints_)))
if offer_name_ is not None and not isinstance(offer_name_, (bytes, str)):
raise Exception("Expected offer_name_ to be a str, received: {}".format(type(offer_name_)))
if offer_url_ is not None and not isinstance(offer_url_, (bytes, str)):
raise Exception("Expected offer_url_ to be a str, received: {}".format(type(offer_url_)))
if offer_uuid_ is not None and not isinstance(offer_uuid_, (bytes, str)):
raise Exception("Expected offer_uuid_ to be a str, received: {}".format(type(offer_uuid_)))
if source_model_tag_ is not None and not isinstance(source_model_tag_, (bytes, str)):
raise Exception("Expected source_model_tag_ to be a str, received: {}".format(type(source_model_tag_)))
if spaces_ is not None and not isinstance(spaces_, (bytes, str, list)):
raise Exception("Expected spaces_ to be a Sequence, received: {}".format(type(spaces_)))
if users_ is not None and not isinstance(users_, (bytes, str, list)):
raise Exception("Expected users_ to be a Sequence, received: {}".format(type(users_)))
self.applicationofferdetails = applicationofferdetails_
self.application_description = application_description_
self.application_name = application_name_
self.bindings = bindings_
self.charm_url = charm_url_
self.connections = connections_
self.endpoints = endpoints_
self.offer_name = offer_name_
self.offer_url = offer_url_
self.offer_uuid = offer_uuid_
self.source_model_tag = source_model_tag_
self.spaces = spaces_
self.users = users_
self.unknown_fields = unknown_fields
class ApplicationOfferDetails(Type):
_toSchema = {'application_description': 'application-description', 'bindings': 'bindings', 'endpoints': 'endpoints', 'offer_name': 'offer-name', 'offer_url': 'offer-url', 'offer_uuid': 'offer-uuid', 'source_model_tag': 'source-model-tag', 'spaces': 'spaces', 'users': 'users'}
_toPy = {'application-description': 'application_description', 'bindings': 'bindings', 'endpoints': 'endpoints', 'offer-name': 'offer_name', 'offer-url': 'offer_url', 'offer-uuid': 'offer_uuid', 'source-model-tag': 'source_model_tag', 'spaces': 'spaces', 'users': 'users'}
def __init__(self, application_description=None, bindings=None, endpoints=None, offer_name=None, offer_url=None, offer_uuid=None, source_model_tag=None, spaces=None, users=None, **unknown_fields):
'''
application_description : str
bindings : typing.Mapping[str, str]
endpoints : typing.Sequence[~RemoteEndpoint]
offer_name : str
offer_url : str
offer_uuid : str
source_model_tag : str
spaces : typing.Sequence[~RemoteSpace]
users : typing.Sequence[~OfferUserDetails]
'''
application_description_ = application_description
bindings_ = bindings
endpoints_ = [RemoteEndpoint.from_json(o) for o in endpoints or []]
offer_name_ = offer_name
offer_url_ = offer_url
offer_uuid_ = offer_uuid
source_model_tag_ = source_model_tag
spaces_ = [RemoteSpace.from_json(o) for o in spaces or []]
users_ = [OfferUserDetails.from_json(o) for o in users or []]
# Validate arguments against known Juju API types.
if application_description_ is not None and not isinstance(application_description_, (bytes, str)):
raise Exception("Expected application_description_ to be a str, received: {}".format(type(application_description_)))
if bindings_ is not None and not isinstance(bindings_, dict):
raise Exception("Expected bindings_ to be a Mapping, received: {}".format(type(bindings_)))
if endpoints_ is not None and not isinstance(endpoints_, (bytes, str, list)):
raise Exception("Expected endpoints_ to be a Sequence, received: {}".format(type(endpoints_)))
if offer_name_ is not None and not isinstance(offer_name_, (bytes, str)):
raise Exception("Expected offer_name_ to be a str, received: {}".format(type(offer_name_)))
if offer_url_ is not None and not isinstance(offer_url_, (bytes, str)):
raise Exception("Expected offer_url_ to be a str, received: {}".format(type(offer_url_)))
if offer_uuid_ is not None and not isinstance(offer_uuid_, (bytes, str)):
raise Exception("Expected offer_uuid_ to be a str, received: {}".format(type(offer_uuid_)))
if source_model_tag_ is not None and not isinstance(source_model_tag_, (bytes, str)):
| |
<gh_stars>0
import sys
import os
import glob
import re
import math
import scipy
import numpy
import matplotlib.pyplot as plt
import Common
import Plotting
MOD_NAME_STR = "PY2108"
HOME = False
USER = 'Robert' if HOME else 'robertsheehan/OneDrive - University College Cork/Documents'
def RC_FR_Plots():
# Plot the measured Frequency Response Data for some RC circuits
# <NAME> 13 - 5 - 2021
FUNC_NAME = ".RC_FR_Plots()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
DATA_HOME = "c:/Users/" + USER + "/Teaching/PY2108/Data/AM_SFMG_Test/"
os.chdir(DATA_HOME)
print(os.getcwd())
# import the data
hv_data = []; labels = []; marks = [];
count = 0;
Rlist = [10, 46, 100, 1000]
fname_tmplt = "FR_R_%(v1)d_C_2u_Alt_MCP602.txt"
for R in Rlist:
filename = fname_tmplt%{"v1":R}
data = numpy.loadtxt(filename, delimiter = '\t', unpack = True);
scl = data[1][0]
for i in range(0, len(data[1]), 1):
data[1][i] = 10.0*math.log10( data[1][i] / scl )
hv_data.append(data); labels.append("R = %(v1)d $\Omega$"%{"v1":R}); marks.append(Plotting.labs_pts[ count%len(Plotting.labs_pts) ] );
count = count + 1;
# plot the data
args = Plotting.plot_arg_multiple()
args.loud = True
args.crv_lab_list = labels
args.mrk_list = marks
args.x_label = 'Frequency / kHz'
args.y_label = 'Response / dB'
args.fig_name = "RC_LPF_C_2u_Alt_MCP602"
args.plt_range = [0.5, 30, -7, 1]
args.plt_title = "C = 0.22 $\mu$F"
Plotting.plot_multiple_curves(hv_data, args)
except Exception as e:
print(ERR_STATEMENT)
print(e)
def RC_FR_Compar_Plots():
# Plot the measured Frequency Response Data for some RC circuits
# <NAME> 13 - 5 - 2021
FUNC_NAME = ".RC_FR_Compar_Plots()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
DATA_HOME = "c:/Users/" + USER + "/Teaching/PY2108/Data/AM_SFMG_Test/"
os.chdir(DATA_HOME)
print(os.getcwd())
# import the data
Rlist = [10, 46, 100, 1000]
fname_tmplt = "FR_R_%(v1)d_C_2u%(v2)s"
for R in Rlist:
hv_data = []; labels = []; marks = []; count = 0;
filename = fname_tmplt%{"v1":R,"v2":".txt"}
data = numpy.loadtxt(filename, delimiter = '\t', unpack = True);
scl = data[1][0]
for i in range(0, len(data[1]), 1):
data[1][i] = 10.0*math.log10( data[1][i] / scl )
hv_data.append(data); labels.append("SFMG"); marks.append(Plotting.labs_pts[ count%len(Plotting.labs_pts) ] );
count = count + 1;
filename = fname_tmplt%{"v1":R,"v2":"_Alt_MCP602.txt"}
data = numpy.loadtxt(filename, delimiter = '\t', unpack = True);
scl = data[1][0]
for i in range(0, len(data[1]), 1):
data[1][i] = 10.0*math.log10( data[1][i] / scl )
hv_data.append(data); labels.append("AD9833+MCP602"); marks.append(Plotting.labs_pts[ count%len(Plotting.labs_pts) ] );
count = count + 1;
# plot the data
args = Plotting.plot_arg_multiple()
args.loud = True
args.crv_lab_list = labels
args.mrk_list = marks
args.x_label = 'Frequency / kHz'
args.y_label = 'Response / dB'
args.fig_name = "RC_LPF_C_2u_R_%(v1)d_MCP602"%{"v1":R}
args.plt_range = [0.5, 30, -7, 1]
args.plt_title = "C = 0.22 $\mu$F, R = %(v1)d $\Omega$"%{"v1":R}
Plotting.plot_multiple_curves(hv_data, args)
del args; del hv_data; del labels; del marks;
except Exception as e:
print(ERR_STATEMENT)
print(e)
def LRC_FR_Plots():
# Plot the measured Frequency Response Data for some LRC circuits
# <NAME> 13 - 5 - 2021
FUNC_NAME = ".LRC_FR_Plots()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
DATA_HOME = "c:/Users/" + USER + "/Teaching/PY2108/Data/AM_SFMG_Test/"
os.chdir(DATA_HOME)
print(os.getcwd())
# import the data
hv_data = []; labels = []; marks = [];
count = 0;
Llist = [100, 220]
Clist = [100, 220]
fname_tmplt = "RLC_R_10_L_%(v1)d_C_%(v2)dn_Alt_MCP6022.txt"
for i in range(0, len(Llist), 1):
for j in range(0, len(Clist), 1):
filename = fname_tmplt%{"v1":Llist[i], "v2":Clist[j]}
data = numpy.loadtxt(filename, delimiter = '\t', unpack = True);
scl = numpy.amax(data[1])
for k in range(0, len(data[1]), 1):
data[1][k] = 10*math.log10( data[1][k] / scl )
hv_data.append(data); labels.append("L = %(v1)d $\mu$H, C = %(v2)d nF"%{"v1":Llist[i], "v2":Clist[j]}); marks.append(Plotting.labs_pts[ count%len(Plotting.labs_pts) ] );
count = count + 1;
# plot the data
args = Plotting.plot_arg_multiple()
args.loud = True
args.crv_lab_list = labels
args.mrk_list = marks
args.x_label = 'Frequency / kHz'
args.y_label = 'Response / dB'
args.fig_name = "RLC_BPF_R_10_Alt_MCP6022"
args.plt_range = [10, 80, -6, 0]
args.plt_title = "R = 10 $\Omega$"
Plotting.plot_multiple_curves(hv_data, args)
except Exception as e:
print(ERR_STATEMENT)
print(e)
def LRC_FR_Compar_Plots():
# Plot the measured Frequency Response Data for some LRC circuits
# <NAME> 13 - 5 - 2021
FUNC_NAME = ".LRC_FR_Compar_Plots()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
DATA_HOME = "c:/Users/" + USER + "/Teaching/PY2108/Data/AM_SFMG_Test/"
os.chdir(DATA_HOME)
print(os.getcwd())
# import the data
Llist = [100, 220]
Clist = [100, 220]
fname_tmplt = "RLC_R_10_L_%(v1)d_C_%(v2)dn%(v3)s"
for i in range(0, len(Llist), 1):
for j in range(0, len(Clist), 1):
hv_data = []; labels = []; marks = [];
count = 0;
filename = fname_tmplt%{"v1":Llist[i], "v2":Clist[j], "v3":".txt"}
data = numpy.loadtxt(filename, delimiter = '\t', unpack = True);
scl = numpy.amax(data[1])
for k in range(0, len(data[1]), 1):
data[1][k] = 10*math.log10( data[1][k] / scl )
hv_data.append(data); labels.append("SFMG"); marks.append(Plotting.labs_pts[ count%len(Plotting.labs_pts) ] );
count = count + 1;
filename = fname_tmplt%{"v1":Llist[i], "v2":Clist[j], "v3":"_Alt_MCP6022.txt"}
data = numpy.loadtxt(filename, delimiter = '\t', unpack = True);
scl = numpy.amax(data[1])
for k in range(0, len(data[1]), 1):
data[1][k] = 10*math.log10( data[1][k] / scl )
hv_data.append(data); labels.append("AD9833+MCP6022"); marks.append(Plotting.labs_pts[ count%len(Plotting.labs_pts) ] );
count = count + 1;
# plot the data
args = Plotting.plot_arg_multiple()
args.loud = True
args.crv_lab_list = labels
args.mrk_list = marks
args.x_label = 'Frequency / kHz'
args.y_label = 'Response / dB'
args.fig_name = "RLC_BPF_L_%(v1)d_C_%(v2)d_MCP6022"%{"v1":Llist[i], "v2":Clist[j]}
args.plt_range = [10, 80, -6, 0]
args.plt_title = "R = 10 $\Omega$, L = %(v1)d $\mu$H, C = %(v2)d nF"%{"v1":Llist[i], "v2":Clist[j]}
Plotting.plot_multiple_curves(hv_data, args)
del args; del hv_data; del labels; del marks;
except Exception as e:
print(ERR_STATEMENT)
print(e)
def AM_Diode_Meas_Compar_1():
# Plot the measured diode characteristic data
# data taken from a standard set-up and an AM based set-up
# <NAME> 8 - 7 - 2021
FUNC_NAME = ".AM_Diode_Meas_Compar()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
DATA_HOME = "c:/Users/" + USER + "/Teaching/PY2108/Data/AM_Diode_Test/"
os.chdir(DATA_HOME)
print(os.getcwd())
# the data
Rd = 0.01; # impedance of resistor used to determine diode current
Vr_std = [0, 0, 0, 0, 0, 0.004, 0.0256, 0.0727, 0.1306, 0.1983, 0.269,
0.35, 0.427, 0.508, 0.588, 0.678, 0.76, 0.845, 0.929, 1.019, 1.103]; # voltage across diode resistor measured using standard set up
Vd_std = [0, 0.0922, 0.1895, 0.295, 0.392, 0.487, 0.561, 0.615, 0.649, 0.675,
0.694, 0.708, 0.726, 0.737, 0.747, 0.755, 0.765, 0.772, 0.779, 0.786, 0.791]; # voltage across diode measured using standard set up
Id_std = []; # current going into diode measured using standard set up
for i in range(0, len(Vr_std), 1):
Id_std.append(Vr_std[i] / Rd)
# make a fit to the data using the diode equation
T = 25;
pars_std = diode_fit(Id_std, Vd_std, T);
# generate residual data based on fit
Vd_std_fit = []
for i in range(0, len(Id_std), 1):
Vd_std_fit.append( diode_voltage(Id_std[i], pars_std[0], pars_std[1], T) )
Vr_AM = [0, 0, 0.0028, 0.0878, 0.176, 0.355, 0.521, 0.72, 0.902]; # voltage across diode resistor measured using AM
Vd_AM = [0, 0.185, 0.47, 0.617, 0.661, 0.706, 0.735, 0.756, 0.774]; # voltage across diode measured using AM
Id_AM = [];
for i in range(0, len(Vd_AM), 1):
Id_AM.append(Vr_AM[i] / Rd)
# make a fit to the data using the diode equation
pars_AM = diode_fit(Id_AM, Vd_AM, T);
# generate residual data based on fit
Vd_AM_fit = []
for i in range(0, len(Id_AM), 1):
Vd_AM_fit.append( diode_voltage(Id_AM[i], pars_AM[0], pars_AM[1], T) )
# Make a plot of the standard measurement with its fit
args = Plotting.plot_arg_multiple()
hv_data = [];
hv_data.append([Id_std, Vd_std])
hv_data.append([Id_std, Vd_std_fit])
args.loud = True
args.crv_lab_list = ["Std.", "Fit"]
args.mrk_list = [ Plotting.labs_pts[0], Plotting.labs_lins[1] ]
args.x_label = 'Current / mA'
args.y_label = 'Voltage / V'
args.fig_name = "Diode_Std_Meas_Rd_10"
args.plt_range = [0, 111, 0, 0.8]
Plotting.plot_multiple_curves(hv_data, args)
hv_data = [];
hv_data.append([Id_AM, Vd_AM])
hv_data.append([Id_AM, Vd_AM_fit])
args.crv_lab_list = ["AM", "Fit"]
args.fig_name = "Diode_AM_Meas_Rd_10"
Plotting.plot_multiple_curves(hv_data, args)
# plot the combined data
#args = Plotting.plot_arg_multiple()
#hv_data = [];
#hv_data.append([Id_std, Vd_std])
#hv_data.append([Id_AM, Vd_AM])
#args.loud = False
#args.crv_lab_list = ["Std.", "AM"]
#args.mrk_list = [ Plotting.labs[0], Plotting.labs[1] ]
#args.x_label = 'Current / mA'
#args.y_label = 'Voltage / V'
#args.fig_name = "Diode_Meas_Rd_10"
#args.plt_range = [0, 111, 0, 0.8]
#Plotting.plot_multiple_curves(hv_data, args)
except Exception as e:
print(ERR_STATEMENT)
print(e)
def AM_Diode_Meas_Compar_2():
# Plot the measured diode characteristic data
# data | |
""" Code allowing tools to define extra files associated with an output datset.
"""
import glob
import json
import logging
import operator
import os
import re
from collections import namedtuple
from galaxy import util
from galaxy.dataset_collections.structure import UnitializedTree
from galaxy.tools.parser.output_collection_def import (
DEFAULT_DATASET_COLLECTOR_DESCRIPTION,
INPUT_DBKEY_TOKEN,
ToolProvidedMetadataDatasetCollection,
)
from galaxy.util import (
ExecutionTimer,
odict
)
DATASET_ID_TOKEN = "DATASET_ID"
log = logging.getLogger(__name__)
class NullToolProvidedMetadata(object):
def get_new_datasets(self, output_name):
return []
def get_new_dataset_meta_by_basename(self, output_name, basename):
return {}
def has_failed_outputs(self):
return False
def get_unnamed_outputs(self):
return []
class LegacyToolProvidedMetadata(object):
def __init__(self, job_wrapper, meta_file):
self.job_wrapper = job_wrapper
self.tool_provided_job_metadata = []
with open(meta_file, 'r') as f:
for line in f:
try:
line = json.loads(line)
assert 'type' in line
except Exception:
log.exception('(%s) Got JSON data from tool, but data is improperly formatted or no "type" key in data' % job_wrapper.job_id)
log.debug('Offending data was: %s' % line)
continue
# Set the dataset id if it's a dataset entry and isn't set.
# This isn't insecure. We loop the job's output datasets in
# the finish method, so if a tool writes out metadata for a
# dataset id that it doesn't own, it'll just be ignored.
if line['type'] == 'dataset' and 'dataset_id' not in line:
try:
line['dataset_id'] = job_wrapper.get_output_file_id(line['dataset'])
except KeyError:
log.warning('(%s) Tool provided job dataset-specific metadata without specifying a dataset' % job_wrapper.job_id)
continue
self.tool_provided_job_metadata.append(line)
def get_meta_by_dataset_id(self, dataset_id):
for meta in self.tool_provided_job_metadata:
if meta['type'] == 'dataset' and meta['dataset_id'] == dataset_id:
return meta
def get_new_dataset_meta_by_basename(self, output_name, basename):
for meta in self.tool_provided_job_metadata:
if meta['type'] == 'new_primary_dataset' and meta['filename'] == basename:
return meta
def get_new_datasets(self, output_name):
log.warning("Called get_new_datasets with legacy tool metadata provider - that is unimplemented.")
return []
def has_failed_outputs(self):
found_failed = False
for meta in self.tool_provided_job_metadata:
if meta.get("failed", False):
found_failed = True
return found_failed
def get_unnamed_outputs(self):
return []
class ToolProvidedMetadata(object):
def __init__(self, job_wrapper, meta_file):
self.job_wrapper = job_wrapper
with open(meta_file, 'r') as f:
self.tool_provided_job_metadata = json.load(f)
def get_meta_by_name(self, name):
return self.tool_provided_job_metadata.get(name, {})
def get_new_dataset_meta_by_basename(self, output_name, basename):
datasets = self.tool_provided_job_metadata.get(output_name, {}).get("datasets", [])
for meta in datasets:
if meta['filename'] == basename:
return meta
def get_new_datasets(self, output_name):
datasets = self.tool_provided_job_metadata.get(output_name, {}).get("datasets", [])
if not datasets:
elements = self.tool_provided_job_metadata.get(output_name, {}).get("elements", [])
if elements:
datasets = self._elements_to_datasets(elements)
return datasets
def _elements_to_datasets(self, elements, level=0):
for element in elements:
extra_kwds = {"identifier_%d" % level: element["name"]}
if "elements" in element:
for inner_element in self._elements_to_datasets(element["elements"], level=level + 1):
dataset = extra_kwds.copy()
dataset.update(inner_element)
yield dataset
else:
dataset = extra_kwds
extra_kwds.update(element)
yield extra_kwds
def has_failed_outputs(self):
found_failed = False
for output_name, meta in self.tool_provided_job_metadata.items():
if output_name == "__unnamed_outputs":
continue
if meta.get("failed", False):
found_failed = True
return found_failed
def get_unnamed_outputs(self):
log.debug("unnamed outputs [%s]" % self.tool_provided_job_metadata)
return self.tool_provided_job_metadata.get("__unnamed_outputs", [])
def collect_dynamic_outputs(
tool,
output_collections,
tool_provided_metadata,
job_working_directory,
inp_data={},
job=None,
input_dbkey="?",
):
app = tool.app
collections_service = tool.app.dataset_collections_service
job_context = JobContext(
tool,
tool_provided_metadata,
job,
job_working_directory,
inp_data,
input_dbkey,
)
# unmapped outputs do not correspond to explicit outputs of the tool, they were inferred entirely
# from the tool provided metadata (e.g. galaxy.json).
for unnamed_output_dict in tool_provided_metadata.get_unnamed_outputs():
assert "destination" in unnamed_output_dict
assert "elements" in unnamed_output_dict
destination = unnamed_output_dict["destination"]
elements = unnamed_output_dict["elements"]
assert "type" in destination
destination_type = destination["type"]
assert destination_type in ["library_folder", "hdca", "hdas"]
trans = job_context.work_context
# three destination types we need to handle here - "library_folder" (place discovered files in a library folder),
# "hdca" (place discovered files in a history dataset collection), and "hdas" (place discovered files in a history
# as stand-alone datasets).
if destination_type == "library_folder":
# populate a library folder (needs to be already have been created)
library_folder_manager = app.library_folder_manager
library_folder = library_folder_manager.get(trans, app.security.decode_id(destination.get("library_folder_id")))
def add_elements_to_folder(elements, library_folder):
for element in elements:
if "elements" in element:
assert "name" in element
name = element["name"]
description = element.get("description")
nested_folder = library_folder_manager.create(trans, library_folder.id, name, description)
add_elements_to_folder(element["elements"], nested_folder)
else:
discovered_file = discovered_file_for_unnamed_output(element, job_working_directory)
fields_match = discovered_file.match
designation = fields_match.designation
visible = fields_match.visible
ext = fields_match.ext
dbkey = fields_match.dbkey
info = element.get("info", None)
link_data = discovered_file.match.link_data
# Create new primary dataset
name = fields_match.name or designation
job_context.create_dataset(
ext=ext,
designation=designation,
visible=visible,
dbkey=dbkey,
name=name,
filename=discovered_file.path,
info=info,
library_folder=library_folder,
link_data=link_data
)
add_elements_to_folder(elements, library_folder)
elif destination_type == "hdca":
# create or populate a dataset collection in the history
history = job.history
assert "collection_type" in unnamed_output_dict
object_id = destination.get("object_id")
if object_id:
sa_session = tool.app.model.context
hdca = sa_session.query(app.model.HistoryDatasetCollectionAssociation).get(int(object_id))
else:
name = unnamed_output_dict.get("name", "unnamed collection")
collection_type = unnamed_output_dict["collection_type"]
collection_type_description = collections_service.collection_type_descriptions.for_collection_type(collection_type)
structure = UnitializedTree(collection_type_description)
hdca = collections_service.precreate_dataset_collection_instance(
trans, history, name, structure=structure
)
filenames = odict.odict()
def add_to_discovered_files(elements, parent_identifiers=[]):
for element in elements:
if "elements" in element:
add_to_discovered_files(element["elements"], parent_identifiers + [element["name"]])
else:
discovered_file = discovered_file_for_unnamed_output(element, job_working_directory, parent_identifiers)
filenames[discovered_file.path] = discovered_file
add_to_discovered_files(elements)
collection = hdca.collection
collection_builder = collections_service.collection_builder_for(
collection
)
job_context.populate_collection_elements(
collection,
collection_builder,
filenames,
)
collection_builder.populate()
elif destination_type == "hdas":
# discover files as individual datasets for the target history
history = job.history
datasets = []
def collect_elements_for_history(elements):
for element in elements:
if "elements" in element:
collect_elements_for_history(element["elements"])
else:
discovered_file = discovered_file_for_unnamed_output(element, job_working_directory)
fields_match = discovered_file.match
designation = fields_match.designation
ext = fields_match.ext
dbkey = fields_match.dbkey
info = element.get("info", None)
link_data = discovered_file.match.link_data
# Create new primary dataset
name = fields_match.name or designation
hda_id = discovered_file.match.object_id
primary_dataset = None
if hda_id:
sa_session = tool.app.model.context
primary_dataset = sa_session.query(app.model.HistoryDatasetAssociation).get(hda_id)
dataset = job_context.create_dataset(
ext=ext,
designation=designation,
visible=True,
dbkey=dbkey,
name=name,
filename=discovered_file.path,
info=info,
link_data=link_data,
primary_data=primary_dataset,
)
dataset.raw_set_dataset_state('ok')
if not hda_id:
datasets.append(dataset)
collect_elements_for_history(elements)
job.history.add_datasets(job_context.sa_session, datasets)
for name, has_collection in output_collections.items():
if name not in tool.output_collections:
continue
output_collection_def = tool.output_collections[name]
if not output_collection_def.dynamic_structure:
continue
# Could be HDCA for normal jobs or a DC for mapping
# jobs.
if hasattr(has_collection, "collection"):
collection = has_collection.collection
else:
collection = has_collection
# We are adding dynamic collections, which may be precreated, but their actually state is still new!
collection.populated_state = collection.populated_states.NEW
try:
collection_builder = collections_service.collection_builder_for(
collection
)
dataset_collectors = map(dataset_collector, output_collection_def.dataset_collector_descriptions)
output_name = output_collection_def.name
filenames = job_context.find_files(output_name, collection, dataset_collectors)
job_context.populate_collection_elements(
collection,
collection_builder,
filenames,
name=output_collection_def.name,
metadata_source_name=output_collection_def.metadata_source,
)
collection_builder.populate()
except Exception:
log.exception("Problem gathering output collection.")
collection.handle_population_failed("Problem building datasets for collection.")
class JobContext(object):
def __init__(self, tool, tool_provided_metadata, job, job_working_directory, inp_data, input_dbkey):
self.inp_data = inp_data
self.input_dbkey = input_dbkey
self.app = tool.app
self.sa_session = tool.sa_session
self.job = job
self.job_working_directory = job_working_directory
self.tool_provided_metadata = tool_provided_metadata
self._permissions = None
@property
def work_context(self):
from galaxy.work.context import WorkRequestContext
return WorkRequestContext(self.app, user=self.job.user)
@property
def permissions(self):
if self._permissions is None:
inp_data = self.inp_data
existing_datasets = [inp for inp in inp_data.values() if inp]
if existing_datasets:
permissions = self.app.security_agent.guess_derived_permissions_for_datasets(existing_datasets)
else:
# No valid inputs, we will use history defaults
permissions = self.app.security_agent.history_get_default_permissions(self.job.history)
self._permissions = permissions
return self._permissions
def find_files(self, output_name, collection, dataset_collectors):
filenames = odict.odict()
for discovered_file in discover_files(output_name, self.tool_provided_metadata, dataset_collectors, self.job_working_directory, collection):
filenames[discovered_file.path] = discovered_file
return filenames
def populate_collection_elements(self, collection, root_collection_builder, filenames, name=None, metadata_source_name=None):
# TODO: allow configurable sorting.
# <sort by="lexical" /> <!-- default -->
# <sort by="reverse_lexical" />
# <sort regex="example.(\d+).fastq" by="1:numerical" />
# <sort regex="part_(\d+)_sample_([^_]+).fastq" by="2:lexical,1:numerical" />
if name is None:
name = "unnamed output"
element_datasets = []
for filename, discovered_file in filenames.items():
create_dataset_timer = ExecutionTimer()
fields_match = discovered_file.match
if not fields_match:
raise Exception("Problem parsing metadata fields for file %s" % filename)
element_identifiers = fields_match.element_identifiers
designation = fields_match.designation
visible = fields_match.visible
ext = fields_match.ext
dbkey = fields_match.dbkey
if dbkey == INPUT_DBKEY_TOKEN:
dbkey = self.input_dbkey
# Create new primary dataset
name = fields_match.name or designation
link_data = discovered_file.match.link_data
dataset = self.create_dataset(
ext=ext,
designation=designation,
visible=visible,
dbkey=dbkey,
name=name,
filename=filename,
metadata_source_name=metadata_source_name,
link_data=link_data,
)
log.debug(
"(%s) Created dynamic collection dataset for path [%s] with element identifier [%s] for output [%s] %s",
self.job.id,
filename,
designation,
name,
create_dataset_timer,
)
element_datasets.append((element_identifiers, dataset))
app = self.app
sa_session = self.sa_session
job = self.job
if job:
add_datasets_timer = ExecutionTimer()
job.history.add_datasets(sa_session, [d for (ei, d) in element_datasets])
log.debug(
"(%s) Add dynamic collection datasets to history for output [%s] %s",
self.job.id,
name,
add_datasets_timer,
)
for (element_identifiers, dataset) in element_datasets:
current_builder = root_collection_builder
for element_identifier in element_identifiers[:-1]:
current_builder = current_builder.get_level(element_identifier)
current_builder.add_dataset(element_identifiers[-1], dataset)
# Associate new dataset with job
if job:
element_identifier_str = ":".join(element_identifiers)
# Below was changed from '__new_primary_file_%s|%s__' % (name, designation )
assoc = app.model.JobToOutputDatasetAssociation('__new_primary_file_%s|%s__' % (name, element_identifier_str), dataset)
assoc.job = self.job
sa_session.add(assoc)
dataset.raw_set_dataset_state('ok')
sa_session.flush()
def create_dataset(
self,
ext,
designation,
visible,
dbkey,
name,
filename,
metadata_source_name=None,
info=None,
library_folder=None,
link_data=False,
primary_data=None,
):
app = self.app
sa_session = self.sa_session
if primary_data | |
_, hidden = encoder(batch1, batch2)
# source_ids = range(len(domain_encs))
if args.metric == "biaffine":
alphas = [biaffine_metric_fast(hidden, mu[0], Us[0]) \
for mu in domain_encs]
else:
alphas = [mahalanobis_metric_fast(hidden, mu[0], U, mu[1], P, mu[2], N) \
for (mu, U, P, N) in zip(cur_domain_encs, cur_Us, cur_Ps, cur_Ns)]
# alphas = [ (1 - x / sum(alphas)) for x in alphas ]
alphas = softmax(alphas)
# print("alphas", alphas[0].mean(), alphas[1].mean(), alphas[2].mean())
# print("alphas", alphas)
alphas = []
for al_i in range(len(support_ids)):
alphas.append(torch.zeros(size=(batch1.size()[0],)))
alphas[src_i] = torch.ones(size=(batch1.size()[0],))
alpha_cat = torch.zeros(size=(alphas[0].shape[0], len(support_ids)))
for col, a_list in enumerate(alphas):
alpha_cat[:, col] = a_list
cur_alpha_weights_stack = np.concatenate((cur_alpha_weights_stack, alpha_cat.detach().numpy()))
# for j, supp_id in enumerate(support_ids):
# cur_alpha_weights[supp_id] += alphas[j].data.tolist()
# cur_alpha_weights[supp_id].append(alphas[j].mean().item())
if args.cuda:
alphas = [alpha.cuda() for alpha in alphas]
alphas = [Variable(alpha) for alpha in alphas]
outputs = [F.softmax(classifiers[j](hidden), dim=1) for j in support_ids]
output = sum([alpha.unsqueeze(1).repeat(1, 2) * output_i \
for (alpha, output_i) in zip(alphas, outputs)])
# print("pred output", output)
pred = output.data.max(dim=1)[1]
oracle_eq = compute_oracle(outputs, label, args)
if args.eval_only:
for i in range(batch1.shape[0]):
for j in range(len(alphas)):
say("{:.4f}: [{:.4f}, {:.4f}], ".format(
alphas[j].data[i], outputs[j].data[i][0], outputs[j].data[i][1])
)
oracle_TF = "T" if oracle_eq[i] == 1 else colored("F", 'red')
say("gold: {}, pred: {}, oracle: {}\n".format(label[i], pred[i], oracle_TF))
say("\n")
# print torch.cat(
# [
# torch.cat([ x.unsqueeze(1) for x in alphas ], 1),
# torch.cat([ x for x in outputs ], 1)
# ], 1
# )
y_true += label.tolist()
y_pred += pred.tolist()
y_score += output[:, 1].data.tolist()
correct += pred.eq(label).sum()
oracle_correct += oracle_eq.sum()
tot_cnt += output.size(0)
# print("y_true", y_true)
# print("y_pred", y_pred)
# for j in support_ids:
# print(src_i, j, cur_alpha_weights[j])
# alphas_weights[src_i, j] = np.mean(cur_alpha_weights[j])
# print(alphas_weights)
alphas_weights[src_i, support_ids] = np.mean(cur_alpha_weights_stack, axis=0)
if thr is not None:
print("using threshold %.4f" % thr[src_i])
y_score = np.array(y_score)
y_pred = np.zeros_like(y_score)
y_pred[y_score > thr[src_i]] = 1
# prec, rec, f1, _ = precision_recall_fscore_support(y_true, y_pred, average="binary")
acc = float(correct) / tot_cnt
oracle_acc = float(oracle_correct) / tot_cnt
# print("source", src_i, "validation results: precision: {:.2f}, recall: {:.2f}, f1: {:.2f}".format(
# prec*100, rec*100, f1*100))
# return (acc, oracle_acc), confusion_matrix(y_true, y_pred)
prec, rec, f1, _ = precision_recall_fscore_support(y_true, y_pred, average="binary")
auc = roc_auc_score(y_true, y_score)
print("source {}, AUC: {:.2f}, Prec: {:.2f}, Rec: {:.2f}, F1: {:.2f}".format(
src_i, auc * 100, prec * 100, rec * 100, f1 * 100))
metrics.append([auc, prec, rec, f1])
if return_best_thrs:
precs, recs, thrs = precision_recall_curve(y_true, y_score)
f1s = 2 * precs * recs / (precs + recs)
f1s = f1s[:-1]
thrs = thrs[~np.isnan(f1s)]
f1s = f1s[~np.isnan(f1s)]
best_thr = thrs[np.argmax(f1s)]
print("best threshold=%4f, f1=%.4f", best_thr, np.max(f1s))
thresholds.append(best_thr)
print("source domain weight matrix\n", alphas_weights)
metrics = np.array(metrics)
return thresholds, metrics, alphas_weights
def predict(args):
encoder, classifiers, Us, Ps, Ns = torch.load(args.load_model)
map(lambda m: m.eval(), [encoder] + classifiers)
# args = argparser.parse_args()
# say(args)
if args.cuda:
map(lambda m: m.cuda(), [encoder] + classifiers)
Us = [U.cuda() for U in Us]
Ps = [P.cuda() for P in Ps]
Ns = [N.cuda() for N in Ns]
say("\nTransferring from %s to %s\n" % (args.train, args.test))
source_train_sets = args.train.split(',')
train_loaders = []
for source in source_train_sets:
filepath = os.path.join(DATA_DIR, "%s_train.svmlight" % (source))
train_dataset = AmazonDataset(filepath)
train_loader = data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=0
)
train_loaders.append(train_loader)
test_filepath = os.path.join(DATA_DIR, "%s_test.svmlight" % (args.test))
test_dataset = AmazonDataset(test_filepath)
test_loader = data.DataLoader(
test_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=0
)
say("Corpus loaded.\n")
mats = [Us, Ps, Ns]
(acc, oracle_acc), confusion_mat = evaluate(
encoder, classifiers,
mats,
[train_loaders, test_loader],
args
)
say(colored("Test accuracy/oracle {:.4f}/{:.4f}\n".format(acc, oracle_acc), 'red'))
def train(args):
''' Training Strategy
Input: source = {S1, S2, ..., Sk}, target = {T}
Train:
Approach 1: fix metric and learn encoder only
Approach 2: learn metric and encoder alternatively
'''
# test_mahalanobis_metric() and return
args.cuda = not args.no_cuda and torch.cuda.is_available()
say('cuda is available %s\n' % args.cuda)
np.random.seed(args.seed)
torch.manual_seed(args.seed + args.seed_delta)
if args.cuda:
torch.cuda.manual_seed(args.seed + args.seed_delta)
source_train_sets = args.train.split(',')
print("sources", source_train_sets)
encoders = []
for _ in range(len(source_train_sets)):
# encoder_class = get_model_class("mlp")
encoder_class = CNNMatchModel(input_matrix_size1=args.matrix_size1, input_matrix_size2=args.matrix_size2,
mat1_channel1=args.mat1_channel1, mat1_kernel_size1=args.mat1_kernel_size1,
mat1_channel2=args.mat1_channel2, mat1_kernel_size2=args.mat1_kernel_size2,
mat1_hidden=args.mat1_hidden, mat2_channel1=args.mat2_channel1,
mat2_kernel_size1=args.mat2_kernel_size1, mat2_hidden=args.mat2_hidden)
# encoder_class.add_config(argparser)
encoders.append(encoder_class)
encoder_dst = CNNMatchModel(input_matrix_size1=args.matrix_size1, input_matrix_size2=args.matrix_size2,
mat1_channel1=args.mat1_channel1, mat1_kernel_size1=args.mat1_kernel_size1,
mat1_channel2=args.mat1_channel2, mat1_kernel_size2=args.mat1_kernel_size2,
mat1_hidden=args.mat1_hidden, mat2_channel1=args.mat2_channel1,
mat2_kernel_size1=args.mat2_kernel_size1, mat2_hidden=args.mat2_hidden)
critic_class = get_critic_class(args.critic)
critic_class.add_config(argparser)
args = argparser.parse_args()
say(args)
# encoder is shared across domains
# encoder = encoder_class(args)
# encoder = encoder_class
print()
print("encoder", encoders[0])
say("Transferring from %s to %s\n" % (args.train, args.test))
train_loaders = []
# valid_loaders_src = []
# test_loaders_src = []
Us = []
Ps = []
Ns = []
Ws = []
Vs = []
# Ms = []
for source in source_train_sets:
# filepath = os.path.join(DATA_DIR, "%s_train.svmlight" % (source))
filepath = os.path.join(settings.DOM_ADAPT_DIR, "{}_train.pkl".format(source))
assert (os.path.exists(filepath))
# train_dataset = AmazonDataset(filepath)
train_dataset = ProcessedCNNInputDataset(source, "train")
train_loader = data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=0
)
train_loaders.append(train_loader)
# cur_valid_dataset = ProcessedCNNInputDataset(source, "valid")
# cur_valid_loader = data.DataLoader(
# cur_valid_dataset,
# batch_size=args.batch_size,
# shuffle=False,
# num_workers=0
# )
# valid_loaders_src.append(cur_valid_loader)
#
# cur_test_dataset = ProcessedCNNInputDataset(source, "test")
# cur_test_loader = data.DataLoader(
# cur_test_dataset,
# batch_size=args.batch_size,
# shuffle=False,
# num_workers=0
# )
# test_loaders_src.append(cur_test_loader)
if args.metric == "biaffine":
U = torch.FloatTensor(encoders[0].n_d, encoders[0].n_d)
W = torch.FloatTensor(encoders[0].n_d, 1)
nn.init.xavier_uniform(W)
Ws.append(W)
V = torch.FloatTensor(encoders[0].n_d, 1)
nn.init.xavier_uniform(V)
Vs.append(V)
else:
U = torch.FloatTensor(encoders[0].n_d, args.m_rank)
nn.init.xavier_uniform_(U)
Us.append(U)
P = torch.FloatTensor(encoders[0].n_d, args.m_rank)
nn.init.xavier_uniform_(P)
Ps.append(P)
N = torch.FloatTensor(encoders[0].n_d, args.m_rank)
nn.init.xavier_uniform_(N)
Ns.append(N)
# Ms.append(U.mm(U.t()))
# unl_filepath = os.path.join(DATA_DIR, "%s_train.svmlight" % (args.test))
unl_filepath = os.path.join(settings.DOM_ADAPT_DIR, "{}_train.pkl".format(args.test))
print("****************", unl_filepath)
assert (os.path.exists(unl_filepath))
# unl_dataset = AmazonDomainDataset(unl_filepath) # using domain as labels
unl_dataset = OAGDomainDataset(args.test, "train")
unl_loader = data.DataLoader(
unl_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=0
)
train_dataset_dst = ProcessedCNNInputDataset(args.test, "train")
train_loader_dst = data.DataLoader(
train_dataset_dst,
batch_size=args.batch_size,
shuffle=False,
num_workers=0
)
# valid_filepath = os.path.join(DATA_DIR, "%s_test.svmlight" % (args.test)) # No dev files
# valid_dataset = AmazonDataset(valid_filepath)
valid_dataset = ProcessedCNNInputDataset(args.test, "valid")
print("valid y", len(valid_dataset), valid_dataset.y)
valid_loader = data.DataLoader(
valid_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=0
)
# test_filepath = os.path.join(DATA_DIR, "%s_test.svmlight" % (args.test))
# assert (os.path.exists(test_filepath))
# test_dataset = AmazonDataset(test_filepath)
test_dataset = ProcessedCNNInputDataset(args.test, "test")
test_loader = data.DataLoader(
test_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=0
)
say("Corpus loaded.\n")
classifiers = []
for source in source_train_sets: # only one layer
classifier = nn.Linear(encoders[0].n_out, 2) # binary classification
# classifier = encoder.fc_out
# nn.init.xavier_normal(classifier.weight)
# nn.init.constant(classifier.bias, 0.1)
classifiers.append(classifier)
classifier_dst = nn.Linear(encoder_dst.n_out, 2)
# classifier_mix = nn.Linear(2, 2)
classifier_mix = WeightScaler()
critic = critic_class(encoders[0], args)
# if args.save_model:
# say(colored("Save model to {}\n".format(args.save_model + ".init"), 'red'))
# torch.save([encoder, classifiers, Us, Ps, Ns], args.save_model + ".init")
if args.cuda:
map(lambda m: m.cuda(), [encoder_dst, critic, classifier_dst, classifier_mix] + encoders + classifiers)
Us = [Variable(U.cuda(), requires_grad=True) for U in Us]
Ps = [Variable(P.cuda(), requires_grad=True) for P in Ps]
Ns = [Variable(N.cuda(), requires_grad=True) for N in Ns]
if args.metric == "biaffine":
Ws = [Variable(W.cuda(), requires_grad=True) for W in Ws]
Vs = [Variable(V.cuda(), requires_grad=True) for V in Vs]
# Ms = [ U.mm(U.t()) for U in Us ]
# say("\nEncoder: {}\n".format(encoder))
for i, classifier in enumerate(classifiers):
say("Classifier-{}: {}\n".format(i, classifier))
say("Critic: {}\n".format(critic))
requires_grad = lambda x: x.requires_grad
# task_params = list(encoder.parameters())
task_params = []
for encoder in encoders:
task_params += encoder.parameters()
task_params += encoder_dst.parameters()
for classifier in classifiers:
task_params += list(classifier.parameters())
task_params += classifier_dst.parameters()
task_params += classifier_mix.parameters()
# task_params += [classifier_mix.data]
task_params += list(critic.parameters())
task_params += Us
task_params += Ps
task_params += Ns
if args.metric == "biaffine":
task_params += Ws
task_params += Vs
optim_model = optim.Adagrad( # use adagrad instead of adam
filter(requires_grad, task_params),
lr=args.lr,
weight_decay=1e-4
)
say("Training will begin from scratch\n")
best_dev = 0
best_test = 0
iter_cnt = 0
# encoder.load_state_dict(torch.load(os.path.join(settings.OUT_VENUE_DIR, "venue-matching-cnn.mdl")))
for epoch in range(args.max_epoch):
say("epoch: {}\n".format(epoch))
if args.metric == "biaffine":
mats = [Us, Ws, Vs]
else:
mats = [Us, Ps, Ns]
iter_cnt = train_epoch(
iter_cnt,
[encoders, encoder_dst],
[classifiers, classifier_dst, classifier_mix], critic,
mats,
[train_loaders, train_loader_dst, unl_loader, valid_loader],
args,
optim_model,
epoch
)
# thrs, metrics_val, src_weights_val = evaluate_cross(
# encoder, classifiers,
# mats,
# [train_loaders, valid_loaders_src],
# return_best_thrs=True,
# args=args
# )
#
# _, metrics_test, src_weights_test = evaluate_cross(
# encoder, classifiers,
# mats,
# [train_loaders, test_loaders_src],
# return_best_thrs=False,
# args=args,
# thr=thrs
# )
thr, metrics_val = evaluate(
epoch,
[encoders, encoder_dst],
[classifiers, classifier_dst, classifier_mix],
mats,
[train_loaders, valid_loader],
True,
args
)
# say("Dev accuracy/oracle: {:.4f}/{:.4f}\n".format(curr_dev, oracle_curr_dev))
_, metrics_test = evaluate(
epoch,
[encoders, encoder_dst],
[classifiers, classifier_dst, | |
timeout(self) -> Optional[int]:
"""
The time in seconds as an integer to perform the mitigation action. This field is required if the `mode` is either `simulate` or `ban`. Must be the same or greater than the period (min: 1, max: 86400).
"""
return pulumi.get(self, "timeout")
@pulumi.output_type
class RateLimitActionResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "contentType":
suggest = "content_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RateLimitActionResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RateLimitActionResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RateLimitActionResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
body: str,
content_type: str):
"""
:param str body: The body to return, the content here should conform to the content_type.
:param str content_type: The content-type of the body, must be one of: 'text/plain', 'text/xml', 'application/json'.
"""
pulumi.set(__self__, "body", body)
pulumi.set(__self__, "content_type", content_type)
@property
@pulumi.getter
def body(self) -> str:
"""
The body to return, the content here should conform to the content_type.
"""
return pulumi.get(self, "body")
@property
@pulumi.getter(name="contentType")
def content_type(self) -> str:
"""
The content-type of the body, must be one of: 'text/plain', 'text/xml', 'application/json'.
"""
return pulumi.get(self, "content_type")
@pulumi.output_type
class RateLimitCorrelate(dict):
def __init__(__self__, *,
by: Optional[str] = None):
"""
:param str by: If set to 'nat', NAT support will be enabled for rate limiting.
"""
if by is not None:
pulumi.set(__self__, "by", by)
@property
@pulumi.getter
def by(self) -> Optional[str]:
"""
If set to 'nat', NAT support will be enabled for rate limiting.
"""
return pulumi.get(self, "by")
@pulumi.output_type
class RateLimitMatch(dict):
def __init__(__self__, *,
request: Optional['outputs.RateLimitMatchRequest'] = None,
response: Optional['outputs.RateLimitMatchResponse'] = None):
"""
:param 'RateLimitMatchRequestArgs' request: Matches HTTP requests (from the client to Cloudflare). See definition below.
:param 'RateLimitMatchResponseArgs' response: Custom content-type and body to return, this overrides the custom error for the zone. This field is not required. Omission will result in default HTML error page. Definition below.
"""
if request is not None:
pulumi.set(__self__, "request", request)
if response is not None:
pulumi.set(__self__, "response", response)
@property
@pulumi.getter
def request(self) -> Optional['outputs.RateLimitMatchRequest']:
"""
Matches HTTP requests (from the client to Cloudflare). See definition below.
"""
return pulumi.get(self, "request")
@property
@pulumi.getter
def response(self) -> Optional['outputs.RateLimitMatchResponse']:
"""
Custom content-type and body to return, this overrides the custom error for the zone. This field is not required. Omission will result in default HTML error page. Definition below.
"""
return pulumi.get(self, "response")
@pulumi.output_type
class RateLimitMatchRequest(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "urlPattern":
suggest = "url_pattern"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RateLimitMatchRequest. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RateLimitMatchRequest.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RateLimitMatchRequest.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
methods: Optional[Sequence[str]] = None,
schemes: Optional[Sequence[str]] = None,
url_pattern: Optional[str] = None):
"""
:param Sequence[str] methods: HTTP Methods, can be a subset ['POST','PUT'] or all ['\_ALL\_']. Default: ['\_ALL\_'].
:param Sequence[str] schemes: HTTP Schemes, can be one ['HTTPS'], both ['HTTP','HTTPS'] or all ['\_ALL\_']. Default: ['\_ALL\_'].
:param str url_pattern: The URL pattern to match comprised of the host and path, i.e. example.org/path. Wildcard are expanded to match applicable traffic, query strings are not matched. Use * for all traffic to your zone. Default: '*'.
"""
if methods is not None:
pulumi.set(__self__, "methods", methods)
if schemes is not None:
pulumi.set(__self__, "schemes", schemes)
if url_pattern is not None:
pulumi.set(__self__, "url_pattern", url_pattern)
@property
@pulumi.getter
def methods(self) -> Optional[Sequence[str]]:
"""
HTTP Methods, can be a subset ['POST','PUT'] or all ['\_ALL\_']. Default: ['\_ALL\_'].
"""
return pulumi.get(self, "methods")
@property
@pulumi.getter
def schemes(self) -> Optional[Sequence[str]]:
"""
HTTP Schemes, can be one ['HTTPS'], both ['HTTP','HTTPS'] or all ['\_ALL\_']. Default: ['\_ALL\_'].
"""
return pulumi.get(self, "schemes")
@property
@pulumi.getter(name="urlPattern")
def url_pattern(self) -> Optional[str]:
"""
The URL pattern to match comprised of the host and path, i.e. example.org/path. Wildcard are expanded to match applicable traffic, query strings are not matched. Use * for all traffic to your zone. Default: '*'.
"""
return pulumi.get(self, "url_pattern")
@pulumi.output_type
class RateLimitMatchResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "originTraffic":
suggest = "origin_traffic"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RateLimitMatchResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RateLimitMatchResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RateLimitMatchResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
headers: Optional[Sequence[Mapping[str, str]]] = None,
origin_traffic: Optional[bool] = None,
statuses: Optional[Sequence[int]] = None):
"""
:param Sequence[Mapping[str, str]] headers: block is a list of maps with the following attributes:
:param bool origin_traffic: Only count traffic that has come from your origin servers. If true, cached items that Cloudflare serve will not count towards rate limiting. Default: `true`.
:param Sequence[int] statuses: HTTP Status codes, can be one [403], many [401,403] or indicate all by not providing this value.
"""
if headers is not None:
pulumi.set(__self__, "headers", headers)
if origin_traffic is not None:
pulumi.set(__self__, "origin_traffic", origin_traffic)
if statuses is not None:
pulumi.set(__self__, "statuses", statuses)
@property
@pulumi.getter
def headers(self) -> Optional[Sequence[Mapping[str, str]]]:
"""
block is a list of maps with the following attributes:
"""
return pulumi.get(self, "headers")
@property
@pulumi.getter(name="originTraffic")
def origin_traffic(self) -> Optional[bool]:
"""
Only count traffic that has come from your origin servers. If true, cached items that Cloudflare serve will not count towards rate limiting. Default: `true`.
"""
return pulumi.get(self, "origin_traffic")
@property
@pulumi.getter
def statuses(self) -> Optional[Sequence[int]]:
"""
HTTP Status codes, can be one [403], many [401,403] or indicate all by not providing this value.
"""
return pulumi.get(self, "statuses")
@pulumi.output_type
class RecordData(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "digestType":
suggest = "digest_type"
elif key == "keyTag":
suggest = "key_tag"
elif key == "latDegrees":
suggest = "lat_degrees"
elif key == "latDirection":
suggest = "lat_direction"
elif key == "latMinutes":
suggest = "lat_minutes"
elif key == "latSeconds":
suggest = "lat_seconds"
elif key == "longDegrees":
suggest = "long_degrees"
elif key == "longDirection":
suggest = "long_direction"
elif key == "longMinutes":
suggest = "long_minutes"
elif key == "longSeconds":
suggest = "long_seconds"
elif key == "matchingType":
suggest = "matching_type"
elif key == "precisionHorz":
suggest = "precision_horz"
elif key == "precisionVert":
suggest = "precision_vert"
elif key == "publicKey":
suggest = "public_key"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RecordData. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RecordData.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RecordData.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
algorithm: Optional[int] = None,
altitude: Optional[float] = None,
certificate: Optional[str] = None,
content: Optional[str] = None,
digest: Optional[str] = None,
digest_type: Optional[int] = None,
fingerprint: Optional[str] = None,
flags: Optional[str] = None,
key_tag: Optional[int] = None,
lat_degrees: Optional[int] = None,
lat_direction: Optional[str] = None,
lat_minutes: Optional[int] = None,
lat_seconds: Optional[float] = None,
long_degrees: Optional[int] = None,
long_direction: Optional[str] = None,
long_minutes: Optional[int] = None,
long_seconds: Optional[float] = None,
matching_type: Optional[int] = None,
name: Optional[str] = None,
order: Optional[int] = None,
port: Optional[int] = None,
precision_horz: Optional[float] = None,
precision_vert: Optional[float] = None,
preference: Optional[int] = None,
priority: Optional[int] = None,
proto: Optional[str] = None,
protocol: Optional[int] = None,
public_key: Optional[str] = None,
regex: Optional[str] = None,
replacement: Optional[str] = None,
selector: Optional[int] = None,
service: Optional[str] = None,
size: Optional[float] = None,
tag: Optional[str] = None,
target: Optional[str] = None,
type: Optional[int] = None,
usage: Optional[int] = None,
value: Optional[str] = None,
weight: Optional[int] = None):
"""
:param str name: The name of the record
:param int priority: The priority of the record
:param int type: The type of the record
:param str value: The (string) value of the record. Either this or `data` must be specified
"""
if algorithm is not None:
pulumi.set(__self__, "algorithm", algorithm)
if altitude is not None:
pulumi.set(__self__, "altitude", altitude)
if certificate is not None:
pulumi.set(__self__, "certificate", certificate)
if content is not None:
pulumi.set(__self__, "content", content)
if digest is not None:
pulumi.set(__self__, "digest", digest)
if digest_type is not None:
pulumi.set(__self__, "digest_type", digest_type)
if | |
<filename>pycqed/simulations/cz_superoperator_simulation_FAQUAD.py
"""
April 2018
Simulates the trajectory implementing a CZ gate.
June 2018
Included noise in the simulation.
"""
import time
import numpy as np
import qutip as qtp
from pycqed.measurement import detector_functions as det
from scipy.interpolate import interp1d
from pycqed.measurement.waveform_control_CC import waveform as wf
import scipy
import matplotlib.pyplot as plt
#np.set_printoptions(threshold=np.inf)
# operators
b = qtp.tensor(qtp.destroy(3), qtp.qeye(3)) # LSB is static qubit
a = qtp.tensor(qtp.qeye(3), qtp.destroy(3))
n_q0 = a.dag() * a
n_q1 = b.dag() * b
H_c=n_q0
'''alpha_q0 = -275e6 * 2*np.pi
alpha_q1 = -310e6 * 2*np.pi
w_q0 = 5.11e9 * 2*np.pi # Higher frequency qubit (fluxing) qubit
w_q1 = 4.10e9 * 2*np.pi # Lower frequency
J = 3.07e6 * 2 * np.pi # coupling strength
# caracteristic timescales for jump operators
T1_q0=34e-6
T1_q1=42e-6
Tphi_q0_ket0toket0=0 # here useless parameters
Tphi_q0_ket1toket1=0
Tphi_q0_ket2toket2=0
Tphi_q1_ket0toket0=0
Tphi_q1_ket1toket1=0
T2_q0=23e-6 # these two are the coherence times for q0 and q1 as qubits
T2_q1=23e-6
Tphi_q0_sigmaZ_01=1/(-1/(2*T1_q0)+1/T2_q0) # extracting Tphi which is not the Tphi above
Tphi_q0_sigmaZ_12=Tphi_q0_sigmaZ_01 # we will assume for the moment that the pure decoherence
# is caused by wiggles in the frequency, which cause
# a fluctuation half as large for 02 wrt 01 and 12
# (ignoring the anharmonicity)
Tphi_q0_sigmaZ_02=Tphi_q0_sigmaZ_01/2
Tphi_q1_sigmaZ_01=1/(-1/(2*T1_q1)+1/T2_q1)'''
#scalefactor=1e6
# Hamiltonian
def coupled_transmons_hamiltonian(w_q0, w_q1, alpha_q0, alpha_q1, J):
"""
Hamiltonian of two coupled anharmonic transmons.
Because the intention is to tune one qubit into resonance with the other,
the number of levels is limited.
q1 -> static qubit, 3-levels
q0 -> fluxing qubit, 3-levels
intended avoided crossing:
11 <-> 02 (q1 is the first qubit and q0 the second one)
N.B. the frequency of q0 is expected to be larger than that of q1
w_q0 > w_q1
and the anharmonicities alpha negative
"""
#psi02_11=qtp.tensor(qtp.basis(3,0),qtp.basis(3,2))*qtp.tensor(qtp.basis(3,1),qtp.basis(3,1)).dag()
H_0 = w_q0 * n_q0 + w_q1 * n_q1 + \
1/2*alpha_q0*(a.dag()*a.dag()*a*a) + 1/2*alpha_q1*(b.dag()*b.dag()*b*b) + \
J * (a.dag() + a) * (b + b.dag()) #np.sqrt(2)*J*(psi02_11+psi02_11.dag())
return H_0
#H_0 = coupled_transmons_hamiltonian(w_q0=w_q0, w_q1=w_q1, alpha_q0=alpha_q0,alpha_q1=alpha_q1,J=J)
# target in the case with no noise
# note that the Hilbert space is H_q1 /otimes H_q0
# so the ordering of basis states below is 00,01,02,10,11,12,20,21,22
U_target = qtp.Qobj([[1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, -1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1]],
type='oper',
dims=[[3, 3], [3, 3]])
#U_target._type = 'oper'
U_target_diffdims = qtp.Qobj([[1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, -1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1]],
type='oper',
dims=[[9], [9]]) # otherwise average_gate_fidelity doesn't work
# if there is noise the target is the corresponding superoperator
U_super_target = qtp.to_super(U_target)
'''
remember that qutip uses the Liouville (matrix) representation for superoperators,
with column stacking.
This means that
rho_{xy,x'y'}=rho[3*x+y,3*x'+y']
rho_{xy,x'y'}=operator_to_vector(rho)[3*x+y+27*x'+9*y'] VERIFY
where xy is the row and x'y' is the column
'''
def plot(x_plot_vec,y_plot_vec,title='No title',xlabel='No xlabel',ylabel='No ylabel',legend_labels=list(),yscale='linear'):
if isinstance(y_plot_vec,list):
y_length=len(y_plot_vec)
else:
y_length=np.size(y_plot_vec)
if legend_labels==[]:
legend_labels=np.arange(y_length)
for i in range(y_length):
if isinstance(y_plot_vec[i],list):
y_plot_vec[i]=np.array(y_plot_vec[i])
if isinstance(legend_labels[i],int):
legend_labels[i]=str(legend_labels[i])
if len(x_plot_vec)==1:
if isinstance(x_plot_vec[0],list):
x_plot_vec[0]=np.array(x_plot_vec[0])
plt.plot(x_plot_vec[0], y_plot_vec[i], label=legend_labels[i])
else:
if isinstance(x_plot_vec[i],list):
x_plot_vec[i]=np.array(x_plot_vec[i])
plt.plot(x_plot_vec[i], y_plot_vec[i], label=legend_labels[i])
plt.legend()
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.yscale(yscale)
plt.show()
def jump_operators(T1_q0,T1_q1,Tphi_q0_ket0toket0,Tphi_q0_ket1toket1,Tphi_q0_ket2toket2,Tphi_q1_ket0toket0,Tphi_q1_ket1toket1,
Tphi_q0_sigmaZ_01,Tphi_q0_sigmaZ_12,Tphi_q0_sigmaZ_02,Tphi_q1_sigmaZ_01,Tphi_q1_sigmaZ_12,Tphi_q1_sigmaZ_02):
# time independent case
c_ops=[]
if T1_q0 != 0:
c_ops.append(np.sqrt(1/T1_q0)*a)
if T1_q1 != 0:
c_ops.append(np.sqrt(1/T1_q1)*b)
if Tphi_q0_ket0toket0 != 0:
collapse=qtp.tensor(qtp.qeye(3),qtp.ket2dm(qtp.basis(3,0)))
c_ops.append(np.sqrt(1/Tphi_q0_ket0toket0)*collapse)
if Tphi_q0_ket1toket1 != 0:
collapse=qtp.tensor(qtp.qeye(3),qtp.ket2dm(qtp.basis(3,1)))
c_ops.append(np.sqrt(1/Tphi_q0_ket1toket1)*collapse)
if Tphi_q0_ket2toket2 != 0:
collapse=qtp.tensor(qtp.qeye(3),qtp.ket2dm(qtp.basis(3,2)))
c_ops.append(np.sqrt(1/Tphi_q0_ket2toket2)*collapse)
if Tphi_q1_ket0toket0 != 0:
collapse=qtp.tensor(qtp.ket2dm(qtp.basis(3,0)),qtp.qeye(3))
c_ops.append(np.sqrt(1/Tphi_q1_ket0toket0)*collapse)
if Tphi_q1_ket1toket1 != 0:
collapse=qtp.tensor(qtp.ket2dm(qtp.basis(3,1)),qtp.qeye(3))
c_ops.append(np.sqrt(1/Tphi_q1_ket1toket1)*collapse)
if Tphi_q0_sigmaZ_01 != 0:
sigmaZinqutrit = qtp.Qobj([[1,0,0],
[0,-1,0],
[0,0,0]])
collapse=qtp.tensor(qtp.qeye(3),sigmaZinqutrit)
c_ops.append(np.sqrt(1/(2*Tphi_q0_sigmaZ_01))*collapse)
if Tphi_q0_sigmaZ_12 != 0:
sigmaZinqutrit = qtp.Qobj([[0,0,0],
[0,1,0],
[0,0,-1]])
collapse=qtp.tensor(qtp.qeye(3),sigmaZinqutrit)
c_ops.append(np.sqrt(1/(2*Tphi_q0_sigmaZ_12))*collapse)
if Tphi_q0_sigmaZ_02 != 0:
sigmaZinqutrit = qtp.Qobj([[1,0,0],
[0,0,0],
[0,0,-1]])
collapse=qtp.tensor(qtp.qeye(3),sigmaZinqutrit)
c_ops.append(np.sqrt(1/(2*Tphi_q0_sigmaZ_02))*collapse)
if Tphi_q1_sigmaZ_01 != 0:
sigmaZinqutrit = qtp.Qobj([[1,0,0],
[0,-1,0],
[0,0,0]])
collapse=qtp.tensor(sigmaZinqutrit,qtp.qeye(3))
c_ops.append(np.sqrt(1/(2*Tphi_q1_sigmaZ_01))*collapse)
if Tphi_q1_sigmaZ_12 != 0:
sigmaZinqutrit = qtp.Qobj([[0,0,0],
[0,1,0],
[0,0,-1]])
collapse=qtp.tensor(sigmaZinqutrit,qtp.qeye(3))
c_ops.append(np.sqrt(1/(2*Tphi_q1_sigmaZ_12))*collapse)
if Tphi_q1_sigmaZ_02 != 0:
sigmaZinqutrit = qtp.Qobj([[1,0,0],
[0,0,0],
[0,0,-1]])
collapse=qtp.tensor(sigmaZinqutrit,qtp.qeye(3))
c_ops.append(np.sqrt(1/(2*Tphi_q1_sigmaZ_02))*collapse)
return c_ops
#c_ops=jump_operators(T1_q0,T1_q1,Tphi_q0_ket0toket0,Tphi_q0_ket1toket1,Tphi_q0_ket2toket2,Tphi_q1_ket0toket0,Tphi_q1_ket1toket1,
# Tphi_q0_sigmaZ_01,Tphi_q0_sigmaZ_12,Tphi_q0_sigmaZ_02,Tphi_q1_sigmaZ_01)
def c_ops_interpolating(T1_q0,T1_q1,Tphi01_q0_vec,Tphi01_q1):
# case where the pure decoherence for qubit q0 is time dependent, or better pulse-amplitude dependent
c_ops=[]
if T1_q0 != 0:
c_ops.append(np.sqrt(1/T1_q0)*a)
if T1_q1 != 0:
c_ops.append(np.sqrt(1/T1_q1)*b)
if Tphi01_q1 != 0: # we automatically put also the decoherence for 12 and 02
sigmaZinqutrit = qtp.Qobj([[1,0,0],
[0,-1,0],
[0,0,0]])
collapse=qtp.tensor(sigmaZinqutrit,qtp.qeye(3))
c_ops.append(collapse*np.sqrt(1/(2*Tphi01_q1)))
Tphi12_q1=Tphi01_q1
sigmaZinqutrit = qtp.Qobj([[0,0,0],
[0,1,0],
[0,0,-1]])
collapse=qtp.tensor(sigmaZinqutrit,qtp.qeye(3))
c_ops.append(collapse*np.sqrt(1/(2*Tphi12_q1)))
Tphi02_q1=Tphi01_q1/2
sigmaZinqutrit = qtp.Qobj([[1,0,0],
[0,0,0],
[0,0,-1]])
collapse=qtp.tensor(sigmaZinqutrit,qtp.qeye(3))
c_ops.append(collapse*np.sqrt(1/(2*Tphi02_q1)))
if Tphi01_q0_vec != []: # we automatically put also the decoherence for 12 and 02
sigmaZinqutrit = qtp.Qobj([[1,0,0],
[0,-1,0],
[0,0,0]])
collapse=qtp.tensor(qtp.qeye(3),sigmaZinqutrit)
c_ops.append([collapse,np.sqrt(1/(2*Tphi01_q0_vec))])
Tphi12_q0_vec=Tphi01_q0_vec
sigmaZinqutrit = qtp.Qobj([[0,0,0],
[0,1,0],
[0,0,-1]])
collapse=qtp.tensor(qtp.qeye(3),sigmaZinqutrit)
c_ops.append([collapse,np.sqrt(1/(2*Tphi12_q0_vec))])
Tphi02_q0_vec=Tphi01_q0_vec/2
sigmaZinqutrit = qtp.Qobj([[1,0,0],
[0,0,0],
[0,0,-1]])
collapse=qtp.tensor(qtp.qeye(3),sigmaZinqutrit)
c_ops.append([collapse,np.sqrt(1/(2*Tphi02_q0_vec))])
return c_ops
def rotating_frame_transformation(U, t: float,
w_q0: float=0, w_q1: float =0):
"""
Transforms the frame of the unitary according to
U' = U_{RF}*U*U_{RF}^dag
with
U_{RF} = e^{-i w_q0 a^dag a t } otimes e^{-i w_q1 b^dag b t }
Args:
U (QObj): Unitary to be transformed
t (float): time at which to transform
w_q0 (float): freq of frame for q0
w_q1 (float): freq of frame for q1
"""
U_RF = (1j*w_q0*n_q0*t).expm() * (1j*w_q1*n_q1*t).expm()
U_prime = U_RF * U
""" U_RF only on one side because that's the operator that
satisfies the Schroedinger equation in the interaction picture.
Anyway we won't use this function.
In case we would need to rotate in the new picture the jump operators as well !
"""
return U_prime
def phases_from_superoperator(U):
"""
Returns the phases from the unitary or superoperator U
"""
if U.type=='oper':
phi_00 = np.rad2deg(np.angle(U[0, 0])) # expected to equal 0 because of our
# choice for the energy, not because of rotating frame
phi_01 = np.rad2deg(np.angle(U[1, 1]))
phi_10 = np.rad2deg(np.angle(U[3, 3]))
phi_11 = np.rad2deg(np.angle(U[4, 4]))
phi_02 = np.rad2deg(np.angle(U[2, 2])) # used only for avgatefid_superoperator_phasecorrected
phi_20 = np.rad2deg(np.angle(U[6, 6]))
phi_cond = (phi_11 - phi_01 - phi_10 + phi_00) % 360
# notice the + even if it is irrelevant
return phi_00, phi_01, phi_10, phi_11, phi_02, phi_20, phi_cond
elif U.type=='super':
phi_00 = 0 # we set it to 0 arbitrarily but it is actually not knowable
phi_01 = np.rad2deg(np.angle(U[1, 1])) # actually phi_01-phi_00
phi_10 = np.rad2deg(np.angle(U[3, 3]))
phi_11 = np.rad2deg(np.angle(U[4, 4]))
phi_02 = np.rad2deg(np.angle(U[2, 2]))
phi_20 = np.rad2deg(np.angle(U[6, 6]))
phi_cond = (phi_11 - phi_01 - phi_10 + phi_00) % 360 # still the right formula
# independently from phi_00
return phi_00, phi_01, phi_10, phi_11, phi_02, phi_20, phi_cond
# !!! check that this is a good formula for superoperators: there is a lot of redundancy
# there if the evolution is unitary, but not necessarily if it's noisy!
def pro_avfid_superoperator_compsubspace(U,L1):
"""
Average process (gate) fidelity in the qubit computational subspace for two qutrits
Leakage has to be taken into account, see Woods & Gambetta
"""
if U.type=='oper':
inner = U.dag()*U_target
part_idx = [0, 1, 3, 4] # only computational subspace
ptrace = 0
for i in part_idx:
ptrace += inner[i, i]
dim = 4 # 2 qubits comp subspace
return np.real(((np.abs(ptrace))**2+dim*(1-L1))/(dim*(dim+1)))
elif U.type=='super':
kraus_form = qtp.to_kraus(U)
dim=4 # 2 qubits in the computational subspace
part_idx = [0, 1, 3, 4] # only computational subspace
psum=0
for A_k in kraus_form:
ptrace = 0
inner = U_target_diffdims.dag()*A_k # otherwise dimension mismatch
for i in part_idx:
ptrace += inner[i, i]
psum += (np.abs(ptrace))**2
return np.real((dim*(1-L1) + psum) / (dim*(dim + 1)))
def pro_avfid_superoperator_compsubspace_phasecorrected(U,L1,phases):
"""
Average process (gate) fidelity in the qubit computational subspace for two qutrits
Leakage has to be taken into account, see Woods & Gambetta
The phase is corrected with Z rotations considering both transmons as qubits
"""
Ucorrection = qtp.Qobj([[np.exp(-1j*np.deg2rad(phases[0])), 0, 0, 0, 0, 0, 0, 0, 0],
[0, np.exp(-1j*np.deg2rad(phases[1])), 0, 0, 0, 0, 0, 0, 0],
[0, 0, np.exp(-1j*np.deg2rad(phases[0])), 0, 0, 0, 0, 0, 0],
[0, 0, 0, np.exp(-1j*np.deg2rad(phases[2])), 0, 0, 0, 0, 0],
[0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[3]-phases[-1])), 0, 0, 0, 0],
[0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[2])), 0, 0, 0],
[0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[0])), 0, 0],
[0, 0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[1])), 0],
[0, 0, 0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[0]))]],
type='oper',
dims=[[3, 3], [3, 3]])
if U.type=='oper':
U=Ucorrection*U
inner | |
self._build_ready = ScoredHeap()
self._fetch_jobs = {}
self._fetch_ready = ScoredHeap()
# List of total package installs represented in deps_map.
install_jobs = [x for x in deps_map if deps_map[x]["action"] == "merge"]
self._total_jobs = len(install_jobs)
self._show_output = show_output
if "--pretend" in emerge.opts:
print "Skipping merge because of --pretend mode."
sys.exit(0)
# Set a process group so we can easily terminate all children.
os.setsid()
# Setup scheduler graph object. This is used by the child processes
# to help schedule jobs.
emerge.scheduler_graph = emerge.depgraph.schedulerGraph()
# Calculate how many jobs we can run in parallel. We don't want to pass
# the --jobs flag over to emerge itself, because that'll tell emerge to
# hide its output, and said output is quite useful for debugging hung
# jobs.
procs = min(self._total_jobs,
emerge.opts.pop("--jobs", multiprocessing.cpu_count()))
self._build_procs = self._fetch_procs = max(1, procs)
self._load_avg = emerge.opts.pop("--load-average", None)
self._job_queue = multiprocessing.Queue()
self._print_queue = multiprocessing.Queue()
self._fetch_queue = multiprocessing.Queue()
args = (self._fetch_queue, self._job_queue, emerge, package_db, True)
self._fetch_pool = multiprocessing.Pool(self._fetch_procs, EmergeWorker,
args)
self._build_queue = multiprocessing.Queue()
args = (self._build_queue, self._job_queue, emerge, package_db)
self._build_pool = multiprocessing.Pool(self._build_procs, EmergeWorker,
args)
self._print_worker = multiprocessing.Process(target=PrintWorker,
args=[self._print_queue])
self._print_worker.start()
# Initialize the failed queue to empty.
self._retry_queue = []
self._failed = set()
# Setup an exit handler so that we print nice messages if we are
# terminated.
self._SetupExitHandler()
# Schedule our jobs.
self._state_map.update(
(pkg, TargetState(pkg, data)) for pkg, data in deps_map.iteritems())
self._fetch_ready.multi_put(self._state_map.itervalues())
def _SetupExitHandler(self):
def ExitHandler(signum, _frame):
# Set KILLED flag.
KILLED.set()
# Kill our signal handlers so we don't get called recursively
signal.signal(signal.SIGINT, KillHandler)
signal.signal(signal.SIGTERM, KillHandler)
# Print our current job status
for job in self._build_jobs.itervalues():
if job:
self._print_queue.put(JobPrinter(job, unlink=True))
# Notify the user that we are exiting
self._Print("Exiting on signal %s" % signum)
self._print_queue.put(None)
self._print_worker.join()
# Kill child threads, then exit.
os.killpg(0, signal.SIGKILL)
sys.exit(1)
# Print out job status when we are killed
signal.signal(signal.SIGINT, ExitHandler)
signal.signal(signal.SIGTERM, ExitHandler)
def _Schedule(self, pkg_state):
# We maintain a tree of all deps, if this doesn't need
# to be installed just free up its children and continue.
# It is possible to reinstall deps of deps, without reinstalling
# first level deps, like so:
# chromeos (merge) -> eselect (nomerge) -> python (merge)
this_pkg = pkg_state.info
target = pkg_state.target
if pkg_state.info is not None:
if this_pkg["action"] == "nomerge":
self._Finish(target)
elif target not in self._build_jobs:
# Kick off the build if it's marked to be built.
self._build_jobs[target] = None
self._build_queue.put(pkg_state)
return True
def _ScheduleLoop(self):
# If the current load exceeds our desired load average, don't schedule
# more than one job.
if self._load_avg and os.getloadavg()[0] > self._load_avg:
needed_jobs = 1
else:
needed_jobs = self._build_procs
# Schedule more jobs.
while self._build_ready and len(self._build_jobs) < needed_jobs:
state = self._build_ready.get()
if state.target not in self._failed:
self._Schedule(state)
def _Print(self, line):
"""Print a single line."""
self._print_queue.put(LinePrinter(line))
def _Status(self):
"""Print status."""
current_time = time.time()
no_output = True
# Print interim output every minute if --show-output is used. Otherwise,
# print notifications about running packages every 2 minutes, and print
# full output for jobs that have been running for 60 minutes or more.
if self._show_output:
interval = 60
notify_interval = 0
else:
interval = 60 * 60
notify_interval = 60 * 2
for job in self._build_jobs.itervalues():
if job:
last_timestamp = max(job.start_timestamp, job.last_output_timestamp)
if last_timestamp + interval < current_time:
self._print_queue.put(JobPrinter(job))
job.last_output_timestamp = current_time
no_output = False
elif (notify_interval and
job.last_notify_timestamp + notify_interval < current_time):
job_seconds = current_time - job.start_timestamp
args = (job.pkgname, job_seconds / 60, job_seconds % 60, job.filename)
info = "Still building %s (%dm%.1fs). Logs in %s" % args
job.last_notify_timestamp = current_time
self._Print(info)
no_output = False
# If we haven't printed any messages yet, print a general status message
# here.
if no_output:
seconds = current_time - GLOBAL_START
fjobs, fready = len(self._fetch_jobs), len(self._fetch_ready)
bjobs, bready = len(self._build_jobs), len(self._build_ready)
retries = len(self._retry_queue)
pending = max(0, len(self._deps_map) - fjobs - bjobs)
line = "Pending %s/%s, " % (pending, self._total_jobs)
if fjobs or fready:
line += "Fetching %s/%s, " % (fjobs, fready + fjobs)
if bjobs or bready or retries:
line += "Building %s/%s, " % (bjobs, bready + bjobs)
if retries:
line += "Retrying %s, " % (retries,)
load = " ".join(str(x) for x in os.getloadavg())
line += ("[Time %dm%.1fs Load %s]" % (seconds/60, seconds %60, load))
self._Print(line)
def _Finish(self, target):
"""Mark a target as completed and unblock dependencies."""
this_pkg = self._deps_map[target]
if this_pkg["needs"] and this_pkg["nodeps"]:
# We got installed, but our deps have not been installed yet. Dependent
# packages should only be installed when our needs have been fully met.
this_pkg["action"] = "nomerge"
else:
for dep in this_pkg["provides"]:
dep_pkg = self._deps_map[dep]
state = self._state_map[dep]
del dep_pkg["needs"][target]
state.update_score()
if not state.prefetched:
if dep in self._fetch_ready:
# If it's not currently being fetched, update the prioritization
self._fetch_ready.sort()
elif not dep_pkg["needs"]:
if dep_pkg["nodeps"] and dep_pkg["action"] == "nomerge":
self._Finish(dep)
else:
self._build_ready.put(self._state_map[dep])
self._deps_map.pop(target)
def _Retry(self):
while self._retry_queue:
state = self._retry_queue.pop(0)
if self._Schedule(state):
self._Print("Retrying emerge of %s." % state.target)
break
def _Shutdown(self):
# Tell emerge workers to exit. They all exit when 'None' is pushed
# to the queue.
# Shutdown the workers first; then jobs (which is how they feed things back)
# then finally the print queue.
def _stop(queue, pool):
if pool is None:
return
try:
queue.put(None)
pool.close()
pool.join()
finally:
pool.terminate()
_stop(self._fetch_queue, self._fetch_pool)
self._fetch_queue = self._fetch_pool = None
_stop(self._build_queue, self._build_pool)
self._build_queue = self._build_pool = None
if self._job_queue is not None:
self._job_queue.close()
self._job_queue = None
# Now that our workers are finished, we can kill the print queue.
if self._print_worker is not None:
try:
self._print_queue.put(None)
self._print_queue.close()
self._print_worker.join()
finally:
self._print_worker.terminate()
self._print_queue = self._print_worker = None
def Run(self):
"""Run through the scheduled ebuilds.
Keep running so long as we have uninstalled packages in the
dependency graph to merge.
"""
if not self._deps_map:
return
# Start the fetchers.
for _ in xrange(min(self._fetch_procs, len(self._fetch_ready))):
state = self._fetch_ready.get()
self._fetch_jobs[state.target] = None
self._fetch_queue.put(state)
# Print an update, then get going.
self._Status()
retried = set()
while self._deps_map:
# Check here that we are actually waiting for something.
if (self._build_queue.empty() and
self._job_queue.empty() and
not self._fetch_jobs and
not self._fetch_ready and
not self._build_jobs and
not self._build_ready and
self._deps_map):
# If we have failed on a package, retry it now.
if self._retry_queue:
self._Retry()
else:
# Tell the user why we're exiting.
if self._failed:
print 'Packages failed:\n\t%s' % '\n\t'.join(self._failed)
status_file = os.environ.get("PARALLEL_EMERGE_STATUS_FILE")
if status_file:
failed_pkgs = set(portage.versions.cpv_getkey(x)
for x in self._failed)
with open(status_file, "a") as f:
f.write("%s\n" % " ".join(failed_pkgs))
else:
print "Deadlock! Circular dependencies!"
sys.exit(1)
for _ in xrange(12):
try:
job = self._job_queue.get(timeout=5)
break
except Queue.Empty:
# Check if any more jobs can be scheduled.
self._ScheduleLoop()
else:
# Print an update every 60 seconds.
self._Status()
continue
target = job.target
if job.fetch_only:
if not job.done:
self._fetch_jobs[job.target] = job
else:
state = self._state_map[job.target]
state.prefetched = True
state.fetched_successfully = (job.retcode == 0)
del self._fetch_jobs[job.target]
self._Print("Fetched %s in %2.2fs"
% (target, time.time() - job.start_timestamp))
if self._show_output or job.retcode != 0:
self._print_queue.put(JobPrinter(job, unlink=True))
else:
os.unlink(job.filename)
# Failure or not, let build work with it next.
if not self._deps_map[job.target]["needs"]:
self._build_ready.put(state)
self._ScheduleLoop()
if self._fetch_ready:
state = self._fetch_ready.get()
self._fetch_queue.put(state)
self._fetch_jobs[state.target] = None
else:
# Minor optimization; shut down fetchers early since we know
# the queue is empty.
self._fetch_queue.put(None)
continue
if not job.done:
self._build_jobs[target] = job
self._Print("Started %s (logged in %s)" % (target, job.filename))
continue
# Print output of job
if self._show_output or job.retcode != 0:
self._print_queue.put(JobPrinter(job, unlink=True))
else:
os.unlink(job.filename)
del self._build_jobs[target]
seconds = time.time() - job.start_timestamp
details = "%s (in %dm%.1fs)" % (target, seconds / 60, seconds % 60)
previously_failed = target in self._failed
# Complain if necessary.
if job.retcode != 0:
# Handle job failure.
if previously_failed:
# If this job has failed previously, give up.
self._Print("Failed %s. Your build has failed." % details)
else:
# Queue up this build to try again after a long while.
retried.add(target)
self._retry_queue.append(self._state_map[target])
self._failed.add(target)
self._Print("Failed %s, retrying later." % details)
else:
if previously_failed:
# Remove target from list of failed packages.
self._failed.remove(target)
self._Print("Completed %s" % | |
the transform gate
reference: https://arxiv.org/pdf/1505.00387.pdf
This layer expects its input to be a two dimensional tensor of shape (batch size, # input features).
Outputs will be in the same shape.
"""
def __init__(self,
activation_fn='relu',
biases_initializer='zeros',
weights_initializer=None,
**kwargs):
"""
Parameters
----------
activation_fn: object
the Tensorflow activation function to apply to the output
biases_initializer: callable object
the initializer for bias values. This may be None, in which case the layer
will not include biases.
weights_initializer: callable object
the initializer for weight values
"""
super(Highway, self).__init__(**kwargs)
self.activation_fn = activation_fn
self.biases_initializer = biases_initializer
self.weights_initializer = weights_initializer
def get_config(self):
config = super(Highway, self).get_config()
config['activation_fn'] = self.activation_fn
config['biases_initializer'] = self.biases_initializer
config['weights_initializer'] = self.weights_initializer
return config
def build(self, input_shape):
if isinstance(input_shape, collections.Sequence):
input_shape = input_shape[0]
out_channels = input_shape[1]
if self.weights_initializer is None:
weights_initializer = tf.keras.initializers.VarianceScaling
else:
weights_initializer = self.weights_initializer
self.dense_H = tf.keras.layers.Dense(
out_channels,
activation=self.activation_fn,
bias_initializer=self.biases_initializer,
kernel_initializer=weights_initializer)
self.dense_T = tf.keras.layers.Dense(
out_channels,
activation=tf.nn.sigmoid,
bias_initializer=tf.constant_initializer(-1),
kernel_initializer=weights_initializer)
self.built = True
def call(self, inputs):
if isinstance(inputs, collections.Sequence):
parent = inputs[0]
else:
parent = inputs
dense_H = self.dense_H(parent)
dense_T = self.dense_T(parent)
return tf.multiply(dense_H, dense_T) + tf.multiply(parent, 1 - dense_T)
class WeaveLayer(tf.keras.layers.Layer):
def __init__(self,
n_atom_input_feat=75,
n_pair_input_feat=14,
n_atom_output_feat=50,
n_pair_output_feat=50,
n_hidden_AA=50,
n_hidden_PA=50,
n_hidden_AP=50,
n_hidden_PP=50,
update_pair=True,
init='glorot_uniform',
activation='relu',
**kwargs):
"""
Parameters
----------
n_atom_input_feat: int, optional
Number of features for each atom in input.
n_pair_input_feat: int, optional
Number of features for each pair of atoms in input.
n_atom_output_feat: int, optional
Number of features for each atom in output.
n_pair_output_feat: int, optional
Number of features for each pair of atoms in output.
n_hidden_XX: int, optional
Number of units(convolution depths) in corresponding hidden layer
update_pair: bool, optional
Whether to calculate for pair features,
could be turned off for last layer
init: str, optional
Weight initialization for filters.
activation: str, optional
Activation function applied
"""
super(WeaveLayer, self).__init__(**kwargs)
self.init = init # Set weight initialization
self.activation = activation # Get activations
self.activation_fn = activations.get(activation)
self.update_pair = update_pair # last weave layer does not need to update
self.n_hidden_AA = n_hidden_AA
self.n_hidden_PA = n_hidden_PA
self.n_hidden_AP = n_hidden_AP
self.n_hidden_PP = n_hidden_PP
self.n_hidden_A = n_hidden_AA + n_hidden_PA
self.n_hidden_P = n_hidden_AP + n_hidden_PP
self.n_atom_input_feat = n_atom_input_feat
self.n_pair_input_feat = n_pair_input_feat
self.n_atom_output_feat = n_atom_output_feat
self.n_pair_output_feat = n_pair_output_feat
self.W_AP, self.b_AP, self.W_PP, self.b_PP, self.W_P, self.b_P = None, None, None, None, None, None
def get_config(self):
config = super(WeaveLayer, self).get_config()
config['n_atom_input_feat'] = self.n_atom_input_feat
config['n_pair_input_feat'] = self.n_pair_input_feat
config['n_atom_output_feat'] = self.n_atom_output_feat
config['n_pair_output_feat'] = self.n_pair_output_feat
config['n_hidden_AA'] = self.n_hidden_AA
config['n_hidden_PA'] = self.n_hidden_PA
config['n_hidden_AP'] = self.n_hidden_AP
config['n_hidden_PP'] = self.n_hidden_PP
config['update_pair'] = self.update_pair
config['init'] = self.init
config['activation'] = self.activation
return config
def build(self, input_shape):
""" Construct internal trainable weights."""
init = initializers.get(self.init) # Set weight initialization
self.W_AA = init([self.n_atom_input_feat, self.n_hidden_AA])
self.b_AA = backend.zeros(shape=[
self.n_hidden_AA,
])
self.W_PA = init([self.n_pair_input_feat, self.n_hidden_PA])
self.b_PA = backend.zeros(shape=[
self.n_hidden_PA,
])
self.W_A = init([self.n_hidden_A, self.n_atom_output_feat])
self.b_A = backend.zeros(shape=[
self.n_atom_output_feat,
])
if self.update_pair:
self.W_AP = init([self.n_atom_input_feat * 2, self.n_hidden_AP])
self.b_AP = backend.zeros(shape=[
self.n_hidden_AP,
])
self.W_PP = init([self.n_pair_input_feat, self.n_hidden_PP])
self.b_PP = backend.zeros(shape=[
self.n_hidden_PP,
])
self.W_P = init([self.n_hidden_P, self.n_pair_output_feat])
self.b_P = backend.zeros(shape=[
self.n_pair_output_feat,
])
self.built = True
def call(self, inputs):
"""Creates weave tensors.
inputs: [atom_features, pair_features], pair_split, atom_to_pair
"""
atom_features = inputs[0]
pair_features = inputs[1]
pair_split = inputs[2]
atom_to_pair = inputs[3]
activation = self.activation_fn
AA = tf.matmul(atom_features, self.W_AA) + self.b_AA
AA = activation(AA)
PA = tf.matmul(pair_features, self.W_PA) + self.b_PA
PA = activation(PA)
PA = tf.math.segment_sum(PA, pair_split)
A = tf.matmul(tf.concat([AA, PA], 1), self.W_A) + self.b_A
A = activation(A)
if self.update_pair:
AP_ij = tf.matmul(
tf.reshape(
tf.gather(atom_features, atom_to_pair),
[-1, 2 * self.n_atom_input_feat]), self.W_AP) + self.b_AP
AP_ij = activation(AP_ij)
AP_ji = tf.matmul(
tf.reshape(
tf.gather(atom_features, tf.reverse(atom_to_pair, [1])),
[-1, 2 * self.n_atom_input_feat]), self.W_AP) + self.b_AP
AP_ji = activation(AP_ji)
PP = tf.matmul(pair_features, self.W_PP) + self.b_PP
PP = activation(PP)
P = tf.matmul(tf.concat([AP_ij + AP_ji, PP], 1), self.W_P) + self.b_P
P = activation(P)
else:
P = pair_features
return [A, P]
class WeaveGather(tf.keras.layers.Layer):
def __init__(self,
batch_size,
n_input=128,
gaussian_expand=False,
init='glorot_uniform',
activation='tanh',
epsilon=1e-3,
momentum=0.99,
**kwargs):
"""
Parameters
----------
batch_size: int
number of molecules in a batch
n_input: int, optional
number of features for each input molecule
gaussian_expand: boolean. optional
Whether to expand each dimension of atomic features by gaussian histogram
init: str, optional
Weight initialization for filters.
activation: str, optional
Activation function applied
"""
super(WeaveGather, self).__init__(**kwargs)
self.n_input = n_input
self.batch_size = batch_size
self.gaussian_expand = gaussian_expand
self.init = init # Set weight initialization
self.activation = activation # Get activations
self.activation_fn = activations.get(activation)
self.epsilon = epsilon
self.momentum = momentum
def get_config(self):
config = super(WeaveGather, self).get_config()
config['batch_size'] = self.batch_size
config['n_input'] = self.n_input
config['gaussian_expand'] = self.gaussian_expand
config['init'] = self.init
config['activation'] = self.activation
config['epsilon'] = self.epsilon
config['momentum'] = self.momentum
return config
def build(self, input_shape):
if self.gaussian_expand:
init = initializers.get(self.init)
self.W = init([self.n_input * 11, self.n_input])
self.b = backend.zeros(shape=[self.n_input])
self.built = True
def call(self, inputs):
outputs = inputs[0]
atom_split = inputs[1]
if self.gaussian_expand:
outputs = self.gaussian_histogram(outputs)
output_molecules = tf.math.segment_sum(outputs, atom_split)
if self.gaussian_expand:
output_molecules = tf.matmul(output_molecules, self.W) + self.b
output_molecules = self.activation_fn(output_molecules)
return output_molecules
def gaussian_histogram(self, x):
gaussian_memberships = [(-1.645, 0.283), (-1.080, 0.170), (-0.739, 0.134),
(-0.468, 0.118), (-0.228, 0.114), (0., 0.114),
(0.228, 0.114), (0.468, 0.118), (0.739, 0.134),
(1.080, 0.170), (1.645, 0.283)]
dist = [tfp.distributions.Normal(p[0], p[1]) for p in gaussian_memberships]
dist_max = [dist[i].prob(gaussian_memberships[i][0]) for i in range(11)]
outputs = [dist[i].prob(x) / dist_max[i] for i in range(11)]
outputs = tf.stack(outputs, axis=2)
outputs = outputs / tf.reduce_sum(outputs, axis=2, keepdims=True)
outputs = tf.reshape(outputs, [-1, self.n_input * 11])
return outputs
class DTNNEmbedding(tf.keras.layers.Layer):
def __init__(self,
n_embedding=30,
periodic_table_length=30,
init='glorot_uniform',
**kwargs):
"""
Parameters
----------
n_embedding: int, optional
Number of features for each atom
periodic_table_length: int, optional
Length of embedding, 83=Bi
init: str, optional
Weight initialization for filters.
"""
super(DTNNEmbedding, self).__init__(**kwargs)
self.n_embedding = n_embedding
self.periodic_table_length = periodic_table_length
self.init = init # Set weight initialization
def get_config(self):
config = super(DTNNEmbedding, self).get_config()
config['n_embedding'] = self.n_embedding
config['periodic_table_length'] = self.periodic_table_length
config['init'] = self.init
return config
def build(self, input_shape):
init = initializers.get(self.init)
self.embedding_list = init([self.periodic_table_length, self.n_embedding])
self.built = True
def call(self, inputs):
"""
parent layers: atom_number
"""
atom_number = inputs
return tf.nn.embedding_lookup(self.embedding_list, atom_number)
class DTNNStep(tf.keras.layers.Layer):
def __init__(self,
n_embedding=30,
n_distance=100,
n_hidden=60,
init='glorot_uniform',
activation='tanh',
**kwargs):
"""
Parameters
----------
n_embedding: int, optional
Number of features for each atom
n_distance: int, optional
granularity of distance matrix
n_hidden: int, optional
Number of nodes in hidden layer
init: str, optional
Weight initialization for filters.
activation: str, optional
Activation function applied
"""
super(DTNNStep, self).__init__(**kwargs)
self.n_embedding = n_embedding
self.n_distance = n_distance
self.n_hidden = n_hidden
self.init = init # Set weight initialization
self.activation = activation # Get activations
self.activation_fn = activations.get(activation)
def get_config(self):
config = super(DTNNStep, self).get_config()
config['n_embedding'] = self.n_embedding
config['n_distance'] = self.n_distance
config['n_hidden'] = self.n_hidden
config['activation'] = self.activation
config['init'] = self.init
return config
def build(self, input_shape):
init = initializers.get(self.init)
self.W_cf = init([self.n_embedding, self.n_hidden])
self.W_df = init([self.n_distance, self.n_hidden])
self.W_fc = init([self.n_hidden, self.n_embedding])
self.b_cf = backend.zeros(shape=[
self.n_hidden,
])
self.b_df = backend.zeros(shape=[
self.n_hidden,
])
self.built = True
def call(self, inputs):
"""
parent layers: atom_features, distance, distance_membership_i, distance_membership_j
"""
atom_features = inputs[0]
distance = inputs[1]
distance_membership_i = inputs[2]
distance_membership_j = inputs[3]
distance_hidden = tf.matmul(distance, self.W_df) + self.b_df
atom_features_hidden = tf.matmul(atom_features, self.W_cf) + self.b_cf
outputs = tf.multiply(
distance_hidden, tf.gather(atom_features_hidden, distance_membership_j))
# for atom i in a molecule m, this step multiplies together distance info of atom pair(i,j)
# and embeddings of atom j(both gone through a hidden layer)
outputs = tf.matmul(outputs, self.W_fc)
outputs = self.activation_fn(outputs)
output_ii = tf.multiply(self.b_df, atom_features_hidden)
output_ii = tf.matmul(output_ii, self.W_fc)
output_ii = self.activation_fn(output_ii)
# for atom i, sum the influence from all other atom j in the molecule
return tf.math.segment_sum(
outputs, distance_membership_i) - output_ii + atom_features
class DTNNGather(tf.keras.layers.Layer):
def __init__(self,
n_embedding=30,
n_outputs=100,
layer_sizes=[100],
output_activation=True,
init='glorot_uniform',
activation='tanh',
**kwargs):
"""
Parameters
----------
n_embedding: int, optional
Number of features for each atom
n_outputs: int, optional
Number of features for each molecule(output)
layer_sizes: list of int, optional(default=[1000])
Structure of hidden layer(s)
init: str, optional
Weight initialization for filters.
activation: str, optional
Activation function applied
"""
super(DTNNGather, self).__init__(**kwargs)
self.n_embedding = n_embedding
self.n_outputs = n_outputs
self.layer_sizes = layer_sizes
self.output_activation = output_activation
self.init = init # Set weight initialization
self.activation = activation # Get activations
self.activation_fn = activations.get(activation)
def get_config(self):
config = super(DTNNGather, self).get_config()
config['n_embedding'] = self.n_embedding
config['n_outputs'] = self.n_outputs
config['layer_sizes'] = self.layer_sizes
config['output_activation'] | |
PyPys
with pytest.raises(TypeError):
p.a1 = "def"
if sys.version_info < (3,):
BEnum2 = new_enum_type(unicode("foo"), (unicode('abc'),), (5,), BInt)
assert string(cast(BEnum2, 5)) == 'abc'
assert type(string(cast(BEnum2, 5))) is str
def test_enum_overflow():
max_uint = 2 ** (size_of_int()*8) - 1
max_int = max_uint // 2
max_ulong = 2 ** (size_of_long()*8) - 1
max_long = max_ulong // 2
for BPrimitive in [new_primitive_type("int"),
new_primitive_type("unsigned int"),
new_primitive_type("long"),
new_primitive_type("unsigned long")]:
for x in [max_uint, max_int, max_ulong, max_long]:
for testcase in [x, x+1, -x-1, -x-2]:
if int(cast(BPrimitive, testcase)) == testcase:
# fits
BEnum = new_enum_type("foo", ("AA",), (testcase,),
BPrimitive)
assert int(cast(BEnum, testcase)) == testcase
else:
# overflows
py.test.raises(OverflowError, new_enum_type,
"foo", ("AA",), (testcase,), BPrimitive)
def test_callback_returning_enum():
BInt = new_primitive_type("int")
BEnum = new_enum_type("foo", ('def', 'c', 'ab'), (0, 1, -20), BInt)
def cb(n):
if n & 1:
return cast(BEnum, n)
else:
return n
BFunc = new_function_type((BInt,), BEnum)
f = callback(BFunc, cb)
assert f(0) == 0
assert f(1) == 1
assert f(-20) == -20
assert f(20) == 20
assert f(21) == 21
def test_callback_returning_enum_unsigned():
BInt = new_primitive_type("int")
BUInt = new_primitive_type("unsigned int")
BEnum = new_enum_type("foo", ('def', 'c', 'ab'), (0, 1, 20), BUInt)
def cb(n):
if n & 1:
return cast(BEnum, n)
else:
return n
BFunc = new_function_type((BInt,), BEnum)
f = callback(BFunc, cb)
assert f(0) == 0
assert f(1) == 1
assert f(-21) == 2**32 - 21
assert f(20) == 20
assert f(21) == 21
def test_callback_returning_char():
BInt = new_primitive_type("int")
BChar = new_primitive_type("char")
def cb(n):
return bytechr(n)
BFunc = new_function_type((BInt,), BChar)
f = callback(BFunc, cb)
assert f(0) == b'\x00'
assert f(255) == b'\xFF'
def _hacked_pypy_uni4():
pyuni4 = {1: True, 2: False}[len(u+'\U00012345')]
return 'PY_DOT_PY' in globals() and not pyuni4
def test_callback_returning_wchar_t():
BInt = new_primitive_type("int")
BWChar = new_primitive_type("wchar_t")
def cb(n):
if n == -1:
return u+'\U00012345'
if n == -2:
raise ValueError
return unichr(n)
BFunc = new_function_type((BInt,), BWChar)
f = callback(BFunc, cb)
assert f(0) == unichr(0)
assert f(255) == unichr(255)
assert f(0x1234) == u+'\u1234'
if sizeof(BWChar) == 4 and not _hacked_pypy_uni4():
assert f(-1) == u+'\U00012345'
assert f(-2) == u+'\x00' # and an exception printed to stderr
def test_struct_with_bitfields():
BLong = new_primitive_type("long")
BStruct = new_struct_type("struct foo")
LONGBITS = 8 * sizeof(BLong)
complete_struct_or_union(BStruct, [('a1', BLong, 1),
('a2', BLong, 2),
('a3', BLong, 3),
('a4', BLong, LONGBITS - 5)])
d = BStruct.fields
assert d[0][1].offset == d[1][1].offset == d[2][1].offset == 0
assert d[3][1].offset == sizeof(BLong)
def f(m, r):
if sys.byteorder == 'little':
return r
else:
return LONGBITS - m - r
assert d[0][1].bitshift == f(1, 0)
assert d[0][1].bitsize == 1
assert d[1][1].bitshift == f(2, 1)
assert d[1][1].bitsize == 2
assert d[2][1].bitshift == f(3, 3)
assert d[2][1].bitsize == 3
assert d[3][1].bitshift == f(LONGBITS - 5, 0)
assert d[3][1].bitsize == LONGBITS - 5
assert sizeof(BStruct) == 2 * sizeof(BLong)
assert alignof(BStruct) == alignof(BLong)
def test_bitfield_instance():
BInt = new_primitive_type("int")
BUnsignedInt = new_primitive_type("unsigned int")
BStruct = new_struct_type("struct foo")
complete_struct_or_union(BStruct, [('a1', BInt, 1),
('a2', BUnsignedInt, 2),
('a3', BInt, 3)])
p = newp(new_pointer_type(BStruct), None)
p.a1 = -1
assert p.a1 == -1
p.a1 = 0
with pytest.raises(OverflowError):
p.a1 = 2
assert p.a1 == 0
#
p.a1 = -1
p.a2 = 3
p.a3 = -4
with pytest.raises(OverflowError):
p.a3 = 4
with pytest.raises(OverflowError) as e:
p.a3 = -5
assert str(e.value) == ("value -5 outside the range allowed by the "
"bit field width: -4 <= x <= 3")
assert p.a1 == -1 and p.a2 == 3 and p.a3 == -4
#
# special case for convenience: "int x:1", while normally signed,
# allows also setting the value "1" (it still gets read back as -1)
p.a1 = 1
assert p.a1 == -1
with pytest.raises(OverflowError) as e:
p.a1 = -2
assert str(e.value) == ("value -2 outside the range allowed by the "
"bit field width: -1 <= x <= 1")
def test_bitfield_instance_init():
BInt = new_primitive_type("int")
BStruct = new_struct_type("struct foo")
complete_struct_or_union(BStruct, [('a1', BInt, 1)])
p = newp(new_pointer_type(BStruct), [-1])
assert p.a1 == -1
p = newp(new_pointer_type(BStruct), {'a1': -1})
assert p.a1 == -1
#
BUnion = new_union_type("union bar")
complete_struct_or_union(BUnion, [('a1', BInt, 1)])
p = newp(new_pointer_type(BUnion), [-1])
assert p.a1 == -1
def test_weakref():
import _weakref
BInt = new_primitive_type("int")
BPtr = new_pointer_type(BInt)
rlist = [_weakref.ref(BInt),
_weakref.ref(newp(BPtr, 42)),
_weakref.ref(cast(BPtr, 42)),
_weakref.ref(cast(BInt, 42)),
_weakref.ref(buffer(newp(BPtr, 42))),
]
for i in range(5):
import gc; gc.collect()
if [r() for r in rlist] == [None for r in rlist]:
break
def test_no_inheritance():
BInt = new_primitive_type("int")
try:
class foo(type(BInt)): pass
except TypeError:
pass
else:
raise AssertionError
x = cast(BInt, 42)
try:
class foo(type(x)): pass
except TypeError:
pass
else:
raise AssertionError
def test_assign_string():
BChar = new_primitive_type("char")
BArray1 = new_array_type(new_pointer_type(BChar), 5)
BArray2 = new_array_type(new_pointer_type(BArray1), 5)
a = newp(BArray2, [b"abc", b"de", b"ghij"])
assert string(a[1]) == b"de"
assert string(a[2]) == b"ghij"
a[2] = b"."
assert string(a[2]) == b"."
a[2] = b"12345"
assert string(a[2]) == b"12345"
with pytest.raises(IndexError) as e:
a[2] = b"123456"
assert 'char[5]' in str(e.value)
assert 'got 6 characters' in str(e.value)
def test_add_error():
x = cast(new_primitive_type("int"), 42)
with pytest.raises(TypeError):
x + 1
with pytest.raises(TypeError):
x - 1
def test_void_errors():
py.test.raises(ValueError, alignof, new_void_type())
py.test.raises(TypeError, newp, new_pointer_type(new_void_type()), None)
def test_too_many_items():
BChar = new_primitive_type("char")
BArray = new_array_type(new_pointer_type(BChar), 5)
py.test.raises(IndexError, newp, BArray, tuple(b'123456'))
py.test.raises(IndexError, newp, BArray, list(b'123456'))
py.test.raises(IndexError, newp, BArray, b'123456')
BStruct = new_struct_type("struct foo")
complete_struct_or_union(BStruct, [])
py.test.raises(TypeError, newp, new_pointer_type(BStruct), b'')
py.test.raises(ValueError, newp, new_pointer_type(BStruct), [b'1'])
def test_more_type_errors():
BInt = new_primitive_type("int")
BChar = new_primitive_type("char")
BArray = new_array_type(new_pointer_type(BChar), 5)
py.test.raises(TypeError, newp, BArray, 12.34)
BArray = new_array_type(new_pointer_type(BInt), 5)
py.test.raises(TypeError, newp, BArray, 12.34)
BFloat = new_primitive_type("float")
py.test.raises(TypeError, cast, BFloat, newp(BArray, None))
def test_more_overflow_errors():
BUInt = new_primitive_type("unsigned int")
py.test.raises(OverflowError, newp, new_pointer_type(BUInt), -1)
py.test.raises(OverflowError, newp, new_pointer_type(BUInt), 2**32)
def test_newp_copying():
"""Test that we can do newp(<type>, <cdata of the given type>) for most
types, including same-type arrays.
"""
BInt = new_primitive_type("int")
p = newp(new_pointer_type(BInt), cast(BInt, 42))
assert p[0] == 42
#
BUInt = new_primitive_type("unsigned int")
p = newp(new_pointer_type(BUInt), cast(BUInt, 42))
assert p[0] == 42
#
BChar = new_primitive_type("char")
p = newp(new_pointer_type(BChar), cast(BChar, '!'))
assert p[0] == b'!'
#
BFloat = new_primitive_type("float")
p = newp(new_pointer_type(BFloat), cast(BFloat, 12.25))
assert p[0] == 12.25
#
BStruct = new_struct_type("struct foo_s")
BStructPtr = new_pointer_type(BStruct)
complete_struct_or_union(BStruct, [('a1', BInt, -1)])
s1 = newp(BStructPtr, [42])
p1 = newp(new_pointer_type(BStructPtr), s1)
assert p1[0] == s1
#
BArray = new_array_type(new_pointer_type(BInt), None)
a1 = newp(BArray, [1, 2, 3, 4])
py.test.raises(TypeError, newp, BArray, a1)
BArray6 = new_array_type(new_pointer_type(BInt), 6)
a1 = newp(BArray6, [10, 20, 30])
a2 = newp(BArray6, a1)
assert list(a2) == [10, 20, 30, 0, 0, 0]
#
s1 = newp(BStructPtr, [42])
s2 = newp(BStructPtr, s1[0])
assert s2.a1 == 42
#
BUnion = new_union_type("union foo_u")
BUnionPtr = new_pointer_type(BUnion)
complete_struct_or_union(BUnion, [('a1', BInt, -1)])
u1 = newp(BUnionPtr, [42])
u2 = newp(BUnionPtr, u1[0])
assert u2.a1 == 42
#
BFunc = new_function_type((BInt,), BUInt)
p1 = cast(BFunc, 42)
p2 = newp(new_pointer_type(BFunc), p1)
assert p2[0] == p1
def test_string():
BChar = new_primitive_type("char")
assert string(cast(BChar, 42)) == b'*'
assert string(cast(BChar, 0)) == b'\x00'
BCharP = new_pointer_type(BChar)
BArray = new_array_type(BCharP, 10)
a = newp(BArray, b"hello")
assert len(a) == 10
assert string(a) == b"hello"
p = a + 2
assert string(p) == b"llo"
assert string(newp(new_array_type(BCharP, 4), b"abcd")) == b"abcd"
py.test.raises(RuntimeError, string, cast(BCharP, 0))
assert string(a, 4) == b"hell"
assert string(a, 5) == b"hello"
assert string(a, 6) == b"hello"
def test_string_byte():
BByte = new_primitive_type("signed char")
assert string(cast(BByte, 42)) == b'*'
assert string(cast(BByte, 0)) == b'\x00'
BArray = new_array_type(new_pointer_type(BByte), None)
a = newp(BArray, [65, 66, 67])
assert type(string(a)) is bytes and string(a) == b'ABC'
#
BByte = new_primitive_type("unsigned char")
assert string(cast(BByte, 42)) == b'*'
assert string(cast(BByte, 0)) == b'\x00'
BArray = new_array_type(new_pointer_type(BByte), None)
a = newp(BArray, [65, 66, 67])
assert type(string(a)) is bytes and string(a) == b'ABC'
if 'PY_DOT_PY' not in globals() and sys.version_info < (3,):
assert string(a, 8).startswith(b'ABC') # may contain additional garbage
def test_string_wchar():
for typename in ["wchar_t", "char16_t", "char32_t"]:
_test_string_wchar_variant(typename)
def _test_string_wchar_variant(typename):
BWChar = new_primitive_type(typename)
assert string(cast(BWChar, 42)) == u+'*'
assert string(cast(BWChar, 0x4253)) == u+'\u4253'
assert string(cast(BWChar, 0)) == u+'\x00'
BArray = new_array_type(new_pointer_type(BWChar), None)
a = newp(BArray, [u+'A', u+'B', u+'C'])
assert type(string(a)) is unicode and string(a) == u+'ABC'
if 'PY_DOT_PY' not in globals() and sys.version_info < (3,):
try:
# may contain additional garbage
assert string(a, 8).startswith(u+'ABC')
except ValueError: # garbage contains values > 0x10FFFF
assert sizeof(BWChar) == 4
def test_string_typeerror():
BShort = new_primitive_type("short")
BArray = | |
def foo(x):
raise TestException(x)
lc = task.LoopingCall(foo, "bar")
return self.assertFailure(lc.start(0.1), TestException)
def testFailAndStop(self):
def foo(x):
lc.stop()
raise TestException(x)
lc = task.LoopingCall(foo, "bar")
return self.assertFailure(lc.start(0.1), TestException)
def testEveryIteration(self):
ran = []
def foo():
ran.append(None)
if len(ran) > 5:
lc.stop()
lc = task.LoopingCall(foo)
d = lc.start(0)
def stopped(ign):
self.assertEqual(len(ran), 6)
return d.addCallback(stopped)
def testStopAtOnceLater(self):
# Ensure that even when LoopingCall.stop() is called from a
# reactor callback, it still prevents any subsequent calls.
d = defer.Deferred()
def foo():
d.errback(failure.DefaultException(
"This task also should never get called."))
self._lc = task.LoopingCall(foo)
self._lc.start(1, now=False)
reactor.callLater(0, self._callback_for_testStopAtOnceLater, d)
return d
def _callback_for_testStopAtOnceLater(self, d):
self._lc.stop()
reactor.callLater(0, d.callback, "success")
def testWaitDeferred(self):
# Tests if the callable isn't scheduled again before the returned
# deferred has fired.
timings = [0.2, 0.8]
clock = task.Clock()
def foo():
d = defer.Deferred()
d.addCallback(lambda _: lc.stop())
clock.callLater(1, d.callback, None)
return d
lc = TestableLoopingCall(clock, foo)
lc.start(0.2)
clock.pump(timings)
self.assertFalse(clock.calls)
def testFailurePropagation(self):
# Tests if the failure of the errback of the deferred returned by the
# callable is propagated to the lc errback.
#
# To make sure this test does not hang trial when LoopingCall does not
# wait for the callable's deferred, it also checks there are no
# calls in the clock's callLater queue.
timings = [0.3]
clock = task.Clock()
def foo():
d = defer.Deferred()
clock.callLater(0.3, d.errback, TestException())
return d
lc = TestableLoopingCall(clock, foo)
d = lc.start(1)
self.assertFailure(d, TestException)
clock.pump(timings)
self.assertFalse(clock.calls)
return d
def test_deferredWithCount(self):
"""
In the case that the function passed to L{LoopingCall.withCount}
returns a deferred, which does not fire before the next interval
elapses, the function should not be run again. And if a function call
is skipped in this fashion, the appropriate count should be
provided.
"""
testClock = task.Clock()
d = defer.Deferred()
deferredCounts = []
def countTracker(possibleCount):
# Keep a list of call counts
deferredCounts.append(possibleCount)
# Return a deferred, but only on the first request
if len(deferredCounts) == 1:
return d
else:
return None
# Start a looping call for our countTracker function
# Set the increment to 0.2, and do not call the function on startup.
lc = task.LoopingCall.withCount(countTracker)
lc.clock = testClock
d = lc.start(0.2, now=False)
# Confirm that nothing has happened yet.
self.assertEqual(deferredCounts, [])
# Advance the clock by 0.2 and then 0.4;
testClock.pump([0.2, 0.4])
# We should now have exactly one count (of 1 call)
self.assertEqual(len(deferredCounts), 1)
# Fire the deferred, and advance the clock by another 0.2
d.callback(None)
testClock.pump([0.2])
# We should now have exactly 2 counts...
self.assertEqual(len(deferredCounts), 2)
# The first count should be 1 (one call)
# The second count should be 3 (calls were missed at about 0.6 and 0.8)
self.assertEqual(deferredCounts, [1, 3])
class DeferLaterTests(unittest.TestCase):
"""
Tests for L{task.deferLater}.
"""
def test_callback(self):
"""
The L{Deferred} returned by L{task.deferLater} is called back after
the specified delay with the result of the function passed in.
"""
results = []
flag = object()
def callable(foo, bar):
results.append((foo, bar))
return flag
clock = task.Clock()
d = task.deferLater(clock, 3, callable, 'foo', bar='bar')
d.addCallback(self.assertIdentical, flag)
clock.advance(2)
self.assertEqual(results, [])
clock.advance(1)
self.assertEqual(results, [('foo', 'bar')])
return d
def test_errback(self):
"""
The L{Deferred} returned by L{task.deferLater} is errbacked if the
supplied function raises an exception.
"""
def callable():
raise TestException()
clock = task.Clock()
d = task.deferLater(clock, 1, callable)
clock.advance(1)
return self.assertFailure(d, TestException)
def test_cancel(self):
"""
The L{Deferred} returned by L{task.deferLater} can be
cancelled to prevent the call from actually being performed.
"""
called = []
clock = task.Clock()
d = task.deferLater(clock, 1, called.append, None)
d.cancel()
def cbCancelled(ignored):
# Make sure there are no calls outstanding.
self.assertEqual([], clock.getDelayedCalls())
# And make sure the call didn't somehow happen already.
self.assertFalse(called)
self.assertFailure(d, defer.CancelledError)
d.addCallback(cbCancelled)
return d
class _FakeReactor(object):
def __init__(self):
self._running = False
self._clock = task.Clock()
self.callLater = self._clock.callLater
self.seconds = self._clock.seconds
self.getDelayedCalls = self._clock.getDelayedCalls
self._whenRunning = []
self._shutdownTriggers = {'before': [], 'during': []}
def callWhenRunning(self, callable, *args, **kwargs):
if self._whenRunning is None:
callable(*args, **kwargs)
else:
self._whenRunning.append((callable, args, kwargs))
def addSystemEventTrigger(self, phase, event, callable, *args):
assert phase in ('before', 'during')
assert event == 'shutdown'
self._shutdownTriggers[phase].append((callable, args))
def run(self):
"""
Call timed events until there are no more or the reactor is stopped.
@raise RuntimeError: When no timed events are left and the reactor is
still running.
"""
self._running = True
whenRunning = self._whenRunning
self._whenRunning = None
for callable, args, kwargs in whenRunning:
callable(*args, **kwargs)
while self._running:
calls = self.getDelayedCalls()
if not calls:
raise RuntimeError("No DelayedCalls left")
self._clock.advance(calls[0].getTime() - self.seconds())
shutdownTriggers = self._shutdownTriggers
self._shutdownTriggers = None
for (trigger, args) in shutdownTriggers['before'] + shutdownTriggers['during']:
trigger(*args)
def stop(self):
"""
Stop the reactor.
"""
if not self._running:
raise error.ReactorNotRunning()
self._running = False
class ReactTests(unittest.SynchronousTestCase):
"""
Tests for L{twisted.internet.task.react}.
"""
def test_runsUntilAsyncCallback(self):
"""
L{task.react} runs the reactor until the L{Deferred} returned by the
function it is passed is called back, then stops it.
"""
timePassed = []
def main(reactor):
finished = defer.Deferred()
reactor.callLater(1, timePassed.append, True)
reactor.callLater(2, finished.callback, None)
return finished
r = _FakeReactor()
exitError = self.assertRaises(
SystemExit, task.react, main, _reactor=r)
self.assertEqual(0, exitError.code)
self.assertEqual(timePassed, [True])
self.assertEqual(r.seconds(), 2)
def test_runsUntilSyncCallback(self):
"""
L{task.react} returns quickly if the L{Deferred} returned by the
function it is passed has already been called back at the time it is
returned.
"""
def main(reactor):
return defer.succeed(None)
r = _FakeReactor()
exitError = self.assertRaises(
SystemExit, task.react, main, _reactor=r)
self.assertEqual(0, exitError.code)
self.assertEqual(r.seconds(), 0)
def test_runsUntilAsyncErrback(self):
"""
L{task.react} runs the reactor until the L{defer.Deferred} returned by
the function it is passed is errbacked, then it stops the reactor and
reports the error.
"""
class ExpectedException(Exception):
pass
def main(reactor):
finished = defer.Deferred()
reactor.callLater(1, finished.errback, ExpectedException())
return finished
r = _FakeReactor()
exitError = self.assertRaises(
SystemExit, task.react, main, _reactor=r)
self.assertEqual(1, exitError.code)
errors = self.flushLoggedErrors(ExpectedException)
self.assertEqual(len(errors), 1)
def test_runsUntilSyncErrback(self):
"""
L{task.react} returns quickly if the L{defer.Deferred} returned by the
function it is passed has already been errbacked at the time it is
returned.
"""
class ExpectedException(Exception):
pass
def main(reactor):
return defer.fail(ExpectedException())
r = _FakeReactor()
exitError = self.assertRaises(
SystemExit, task.react, main, _reactor=r)
self.assertEqual(1, exitError.code)
self.assertEqual(r.seconds(), 0)
errors = self.flushLoggedErrors(ExpectedException)
self.assertEqual(len(errors), 1)
def test_singleStopCallback(self):
"""
L{task.react} doesn't try to stop the reactor if the L{defer.Deferred}
the function it is passed is called back after the reactor has already
been stopped.
"""
def main(reactor):
reactor.callLater(1, reactor.stop)
finished = defer.Deferred()
reactor.addSystemEventTrigger(
'during', 'shutdown', finished.callback, None)
return finished
r = _FakeReactor()
exitError = self.assertRaises(
SystemExit, task.react, main, _reactor=r)
self.assertEqual(r.seconds(), 1)
self.assertEqual(0, exitError.code)
def test_singleStopErrback(self):
"""
L{task.react} doesn't try to stop the reactor if the L{defer.Deferred}
the function it is passed is errbacked after the reactor has already
been stopped.
"""
class ExpectedException(Exception):
pass
def main(reactor):
reactor.callLater(1, reactor.stop)
finished = defer.Deferred()
reactor.addSystemEventTrigger(
'during', 'shutdown', finished.errback, ExpectedException())
return finished
r = _FakeReactor()
exitError = self.assertRaises(
SystemExit, task.react, main, _reactor=r)
self.assertEqual(1, exitError.code)
self.assertEqual(r.seconds(), 1)
errors = self.flushLoggedErrors(ExpectedException)
self.assertEqual(len(errors), 1)
def test_arguments(self):
"""
L{task.react} passes the elements of the list it is passed as
positional arguments to the function it is passed.
"""
args = []
def main(reactor, x, y, z):
args.extend((x, y, z))
return defer.succeed(None)
r = _FakeReactor()
exitError = self.assertRaises(
SystemExit, task.react, main, [1, 2, 3], _reactor=r)
self.assertEqual(0, exitError.code)
self.assertEqual(args, [1, 2, 3])
def test_defaultReactor(self):
"""
L{twisted.internet.reactor} is used if no reactor argument is passed to
L{task.react}.
"""
def main(reactor):
self.passedReactor = reactor
return defer.succeed(None)
reactor = _FakeReactor()
with NoReactor():
installReactor(reactor)
exitError = self.assertRaises(SystemExit, task.react, main, [])
self.assertEqual(0, exitError.code)
self.assertIdentical(reactor, self.passedReactor)
def test_exitWithDefinedCode(self):
"""
L{task.react} forwards the exit code specified by the C{SystemExit}
error returned by the passed function, if any.
"""
def main(reactor):
return defer.fail(SystemExit(23))
r = _FakeReactor()
exitError = self.assertRaises(
SystemExit, task.react, main, [], _reactor=r)
self.assertEqual(23, exitError.code)
def test_synchronousStop(self):
"""
L{task.react} handles when the reactor is stopped just before the
returned L{Deferred} fires.
"""
def main(reactor):
d = defer.Deferred()
def stop():
reactor.stop()
d.callback(None)
reactor.callWhenRunning(stop)
return d
r = _FakeReactor()
exitError = self.assertRaises(
SystemExit, task.react, main, [], _reactor=r)
self.assertEqual(0, exitError.code)
def test_asynchronousStop(self):
"""
L{task.react} handles when the reactor is stopped and the
returned L{Deferred} doesn't fire.
"""
def main(reactor):
reactor.callLater(1, reactor.stop)
return | |
under the ParallelLoader
if self.use_tpu:
device = xm.xla_device(self.tpu_id)
train_dataloader = xla_pl.ParallelLoader(train_dataloader, [device])
train_dataloader = train_dataloader.per_device_loader(device)
return train_dataloader
def run_on_epoch_start_hook(self, model):
# Epoch start events
with self.profiler.profile('on_epoch_start'):
# callbacks
self.on_epoch_start()
# model hooks
if self.is_function_implemented('on_epoch_start'):
model.on_epoch_start()
def run_training_epoch(self):
# get model
model = self.get_model()
# Epoch start events
self.run_on_epoch_start_hook(model)
# modify dataloader if needed (ddp, etc...)
train_dataloader = self.prepare_train_loop_dataloader(self.train_dataloader)
# bookkeeping
epoch_output = []
should_check_val = False
# run epoch
for batch_idx, (batch, is_last_batch) in self.profiler.profile_iterable(
enumerate(_with_is_last(train_dataloader)), "get_train_batch"
):
# stop epoch if we limited the number of training batches
if batch_idx >= self.num_training_batches:
break
self.batch_idx = batch_idx
model.global_step = self.global_step
# ------------------------------------
# TRAINING_STEP + TRAINING_STEP_END
# ------------------------------------
batch_output = self.run_training_batch(batch, batch_idx)
# only track outputs when user implements training_epoch_end
# otherwise we will build up unnecessary memory
if self.is_overridden('training_epoch_end', model=self.get_model()):
epoch_output.append(batch_output.training_step_output_for_epoch_end)
# update LR schedulers
self.update_train_loop_lr_schedulers()
# when returning -1 from train_step, we end epoch early
self.should_stop = batch_output.signal == -1
# -----------------------------------------
# VALIDATE IF NEEDED + CHECKPOINT CALLBACK
# -----------------------------------------
should_check_val = self.should_check_val(batch_idx, is_last_batch)
if self.fast_dev_run or should_check_val:
self.run_evaluation(test_mode=False)
# -----------------------------------------
# SAVE LOGGERS (ie: Tensorboard, etc...)
# -----------------------------------------
self.save_loggers_in_training_loop(batch_idx)
# -----------------------------------------
# SAVE METRICS TO LOGGERS
# -----------------------------------------
self.save_train_loop_metrics_to_loggers(batch_idx, batch_output)
# progress global step according to grads progress
self.increment_accumulated_grad_global_step()
# max steps reached, end training
if self.max_steps is not None and self.max_steps == self.global_step:
break
# end epoch early
# stop when the flag is changed or we've gone past the amount
# requested in the batches
if self.fast_dev_run or self.should_stop:
break
# let ddp devices catch up when using horovod
self.sync_horovod()
# process epoch outputs
self.run_training_epoch_end(epoch_output)
# checkpoint callback
self.check_checkpoint_callback(should_check_val)
# epoch end hook
self.run_on_epoch_end_hook(model)
def check_checkpoint_callback(self, should_check_val):
# when no val loop is present or fast-dev-run still need to call checkpoints
# TODO bake this logic into the checkpoint callback
should_activate = not self.is_overridden('validation_step') and not (self.fast_dev_run or should_check_val)
if should_activate:
checkpoint_callbacks = [c for c in self.callbacks if isinstance(c, ModelCheckpoint)]
[c.on_validation_end(self, self.get_model()) for c in checkpoint_callbacks]
def update_train_loop_lr_schedulers(self):
if (self.batch_idx + 1) % self.accumulate_grad_batches == 0:
# update lr
self.update_learning_rates(interval='step')
def run_on_epoch_end_hook(self, model):
with self.profiler.profile('on_epoch_end'):
# callbacks
self.on_epoch_end()
# model hooks
if self.is_function_implemented('on_epoch_end'):
model.on_epoch_end()
def run_training_epoch_end(self, epoch_output):
model = self.get_model()
if self.is_overridden('training_epoch_end', model=model):
self.global_step += 1
epoch_output = model.training_epoch_end(epoch_output)
_processed_outputs = self.process_output(epoch_output)
log_epoch_metrics = _processed_outputs[2]
callback_epoch_metrics = _processed_outputs[3]
# add the metrics to the loggers
self.log_metrics(log_epoch_metrics, {})
# add metrics to callbacks
self.callback_metrics.update(callback_epoch_metrics)
# add metrics to progress_bar
self.add_progress_bar_metrics(_processed_outputs[1])
def sync_horovod(self):
if self.use_horovod:
hvd.join(hvd.local_rank() if self.on_gpu else -1)
def increment_accumulated_grad_global_step(self):
# progress global step according to grads progress
if (self.batch_idx + 1) % self.accumulate_grad_batches == 0:
self.global_step += 1
self.total_batch_idx += 1
def save_train_loop_metrics_to_loggers(self, batch_idx, batch_output):
# when metrics should be logged
should_log_metrics = batch_idx % self.row_log_interval == 0 or self.should_stop
if should_log_metrics or self.fast_dev_run:
# logs user requested information to logger
self.log_metrics(batch_output.batch_log_metrics, batch_output.grad_norm_dic)
def save_loggers_in_training_loop(self, batch_idx):
# when loggers should save to disk
should_save_log = (batch_idx + 1) % self.log_save_interval == 0 or self.should_stop
if should_save_log or self.fast_dev_run:
if self.is_global_zero and self.logger is not None:
self.logger.save()
def should_check_val(self, batch_idx, is_last_batch):
# decide if we should run validation
is_val_check_batch = (batch_idx + 1) % self.val_check_batch == 0
can_check_epoch = (self.current_epoch + 1) % self.check_val_every_n_epoch == 0
can_check_val = not self.disable_validation and can_check_epoch
should_check_val = is_val_check_batch or self.should_stop
is_last_batch_for_infinite_dataset = (is_last_batch and self.val_check_batch == float('inf'))
should_check_val = can_check_val and (should_check_val or is_last_batch_for_infinite_dataset)
return should_check_val
def run_training_batch(self, batch, batch_idx):
"""
:param batch: dict; contains three keys: input_ids, attention_mask, decoder_input_ids
Example for 'batch':
batch: {'input_ids': tensor([[ 0, 36, 230, ..., 8, 41, 2]]),
'attention_mask': tensor([[1, 1, 1, ..., 1, 1, 1]]),
'decoder_input_ids': tensor([[ 0, 287, 10, 2107, 111, 10468, 226, 47385, 11579, 1012,
2156, 5, 5302, 47385, 281, 47385, 10003, 255, 47385, 347,
111, 2107, 47385, 574, 47385, 1000, 47385, 398, 47385, 245,
16, 10, 205, 1374, 12576, 479, 646, 1000, 1215, 3388,
510, 742, 85, 128, 579, 65, 9, 5, 357, 3092,
23, 63, 1836, 11, 5, 3555, 111, 672, 2156, 26180,
47385, 642, 111, 3547, 4120, 479, 646, 1000, 1215, 3388,
510, 742, 7192, 8806, 10262, 3444, 7951, 2170, 1318, 2]])}
:param batch_idx: number of batch
:return:
"""
# load tokenizer
tokenizer = BartTokenizer.from_pretrained('facebook/bart-large')
# load config for GSM
config = yaml_load(f"{self.default_root_dir}/data/config/gsm.yaml")
# load dict
dictionary = Dictionary.load(datapath('dict-www-cnndm-unigram'))
# remove [SEP]
sep_list = ['[SEP_0]', '[SEP_1]', '[SEP_2]', '[SEP_3]', '[SEP_4]', '[SEP_5]', '[SEP_6]', '[SEP_7]',
'[SEP_8]', '[SEP_9]', '<S_SEP>']
# vocab size for topic modeling
vocab_size = len(dictionary)
# model
config['hidden']['features'][0] = vocab_size
# trainer batch
config['trainer_batch']['test_sample'] = 1
config = extend_config_reference(config)
gsm_trainer = config['GSMtrainer']
gsm_trainer['base_dir'] = f"{self.default_root_dir}/log/bart-large-cnn-finetune"
gsm_trainer = GSMTrainer.from_config(gsm_trainer)
# number of topics
K = config['gsmtopic']['k']
# yaml_dump(gsm_trainer,
# os.path.join(f"{self.default_root_dir}/log/bart-large-cnn-finetune", "gsm_trainer.yaml"))
# -----------------------------------------
# Topic Modeling - GSM
# -----------------------------------------
batch_size = batch['input_ids'].size()[0]
docs = []
for batch_num in range(batch_size):
# extract the batch_sentence
batch_sentence = tokenizer.decode(batch['input_ids'][batch_num].tolist(), skip_special_tokens=True)
# change to lowercase and split to list
batch_sentence_list = batch_sentence.split(" ")
# remove [SEP]
batch_sentence_list_nosep = [item for item in batch_sentence_list if item not in sep_list]
text = ' '.join([x for x in batch_sentence_list_nosep])
fine_text = text.replace(' ##', '').lower()
batch_sentence = re.sub(r'[^\w\s]', '', fine_text)
# batch_sentence: change to the cleaned news for topic modeling
# change to training data format in topic modeling
gsm_data_bow = dictionary.doc2bow(batch_sentence.split(" "))
docs.append(gsm_data_bow)
# gsm_data: data for topic modeling
gsm_data = DataLoader(DocDataset(docs, len(dictionary), device='cuda'), batch_size=config['dataset']['batch_size'], drop_last=False, num_workers=0)
gsm_trainer.__dict__['train_iterator'] = gsm_data
gsm_loss, gsm_p = gsm_trainer.co_train(vocab_size, training=True)
del gsm_data
# track grad norms
grad_norm_dic = {}
# track all metrics for callbacks
batch_callback_metrics = []
# track metrics to log
batch_log_metrics = []
if batch is None:
return AttributeDict(signal=0, grad_norm_dic=grad_norm_dic)
# Batch start events
with self.profiler.profile('on_batch_start'):
# callbacks
self.on_batch_start()
# hooks
if self.is_function_implemented('on_batch_start'):
response = self.get_model().on_batch_start(batch)
if response == -1:
return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)
splits = [batch]
if self.truncated_bptt_steps is not None:
model_ref = self.get_model()
with self.profiler.profile('tbptt_split_batch'):
splits = model_ref.tbptt_split_batch(batch, self.truncated_bptt_steps)
self.hiddens = None
for split_idx, split_batch in enumerate(splits):
self.split_idx = split_idx
for opt_idx, optimizer in self._get_optimizers_iterable():
# make sure only the gradients of the current optimizer's parameters are calculated
# in the training step to prevent dangling gradients in multiple-optimizer setup.
if len(self.optimizers) > 1:
for param in self.get_model().parameters():
param.requires_grad = False
for group in optimizer.param_groups:
for param in group['params']:
param.requires_grad = True
# -------------------
# calculate loss
# -------------------
beta = 0.01
opt_closure_result = self.optimizer_closure(
split_batch,
batch_idx,
opt_idx,
optimizer,
self.hiddens,
gsm_p, # topic distribution
gsm_loss, # loss for topic modeling
K, # number of topics
beta,
)
# ------------------------------
# POST forward bookkeeping
# ------------------------------
batch_callback_metrics.append(opt_closure_result.training_step_output.callback_metrics)
batch_log_metrics.append(opt_closure_result.training_step_output.log_metrics)
self.add_progress_bar_metrics(opt_closure_result.training_step_output.pbar_on_batch_end)
# track hiddens
self.hiddens = opt_closure_result.hiddens
# check if loss or model weights are nan
if self.terminate_on_nan:
self.detect_nan_tensors(opt_closure_result.loss)
# track total loss for logging (avoid mem leaks)
self.batch_loss_value.append(opt_closure_result.loss)
# ------------------------------
# BACKWARD PASS
# ------------------------------
# gradient update with accumulated gradients
if (self.batch_idx + 1) % self.accumulate_grad_batches == 0:
# backward
grad_norm_dic = self.run_batch_backward_pass(split_batch, batch_idx, opt_idx, optimizer)
# calculate running loss for display
self.running_loss.append(self.batch_loss_value.mean())
# reset for next set of accumulated grads
self.batch_loss_value.reset()
# Batch end events
with self.profiler.profile('on_batch_end'):
# callbacks
self.on_batch_end()
# model hooks
if self.is_function_implemented('on_batch_end'):
self.get_model().on_batch_end()
# collapse all metrics into one dict
batch_log_metrics = {k: v for d in batch_log_metrics for k, v in d.items()}
# track all metrics for callbacks
self.callback_metrics.update({k: v for d in batch_callback_metrics for k, v in d.items()})
result = AttributeDict(
signal=0,
grad_norm_dic=grad_norm_dic,
batch_log_metrics=batch_log_metrics,
training_step_output_for_epoch_end=opt_closure_result.training_step_output_for_epoch_end
)
return result
def run_batch_backward_pass(self, split_batch, batch_idx, opt_idx, optimizer):
# ------------------
# GRAD NORMS
# ------------------
# track gradient norms when requested
grad_norm_dic = {}
if batch_idx % self.row_log_interval == 0:
if float(self.track_grad_norm) > 0:
model = self.get_model()
grad_norm_dic = model.grad_norm(
self.track_grad_norm)
# ------------------
# CLIP GRADS
# ------------------
if self.use_amp and NATIVE_AMP_AVALAIBLE and not self.use_tpu:
self.scaler.unscale_(optimizer)
self.clip_gradients()
# ------------------
# .STEP + ZERO_GRAD
# ------------------
self.call_optimizer_step(optimizer, opt_idx, batch_idx, split_batch)
return grad_norm_dic
def call_optimizer_step(self, optimizer, opt_idx, batch_idx, split_batch):
# calls .step(), .zero_grad()
# override function to modify this behavior
model = self.get_model()
with self.profiler.profile('optimizer_step'):
| |
import numpy as np
from scipy import sparse
from . import auxiliary_function as ax
from . import comdet_functions as cd
from . import cp_functions as cp
from . import solver
class DirectedGraph:
def __init__(
self,
adjacency=None,
edgelist=None,
):
self.n_nodes = None
self.n_edges = None
self.adjacency = None
self.is_sparse = False
self.edgelist = None
self.degree_sequence_out = None
self.degree_sequence_in = None
self.strength_sequence_out = None
self.strength_sequence_in = None
self.nodes_dict = None
self.is_initialized = False
self.is_weighted = False
self._initialize_graph(
adjacency=adjacency,
edgelist=edgelist,
)
def _initialize_graph(
self,
adjacency=None,
edgelist=None,
):
if adjacency is not None:
if not isinstance(
adjacency, (list, np.ndarray)
) and not sparse.isspmatrix(adjacency):
raise TypeError(
"The adjacency matrix must be passed as a list or numpy"
" array or scipy sparse matrix."
)
if isinstance(
adjacency, list
):
self.adjacency = np.array(adjacency)
elif isinstance(
adjacency, np.ndarray
):
self.adjacency = adjacency
else:
self.adjacency = adjacency
self.is_sparse = True
elif edgelist is not None:
if not isinstance(edgelist, (list, np.ndarray)):
raise TypeError(
"The edgelist must be passed as a list or numpy array."
)
elif len(edgelist) > 0:
if len(edgelist[0]) == 2:
self.adjacency = ax.from_edgelist(edgelist,
self.is_sparse,
True)
self.edgelist = edgelist
elif len(edgelist[0]) == 3:
self.adjacency = ax.from_weighted_edgelist(edgelist,
self.is_sparse,
True)
self.edgelist = edgelist
else:
raise ValueError(
"This is not an edgelist. An edgelist must be a list"
" or array of couples of nodes with optional weights."
" Is this an adjacency matrix?"
)
else:
raise TypeError(
"UndirectedGraph is missing one positional argument"
" adjacency.")
ax.check_adjacency(self.adjacency, self.is_sparse, True)
if np.sum(self.adjacency) == np.sum(self.adjacency > 0):
self.degree_sequence_in, self.degree_sequence_out = ax.compute_degree(
self.adjacency,
True
)
self.degree_sequence_in = self.degree_sequence_in.astype(np.int64)
self.degree_sequence_out = self.degree_sequence_out.astype(
np.int64)
else:
self.degree_sequence_in, self.degree_sequence_out = ax.compute_degree(
self.adjacency,
True
)
self.degree_sequence_in = self.degree_sequence_in.astype(np.int64)
self.degree_sequence_out = self.degree_sequence_out.astype(
np.int64)
self.strength_sequence_in, self.strength_sequence_out = ax.compute_strength(
self.adjacency,
True
)
self.strength_sequence_in = self.strength_sequence_in.astype(
np.float64)
self.strength_sequence_out = self.strength_sequence_out.astype(
np.float64)
self.adjacency_weighted = self.adjacency
self.adjacency = (self.adjacency_weighted.astype(bool)).astype(
np.int16)
self.is_weighted = True
self.n_nodes = len(self.degree_sequence_out)
self.n_edges = int(np.sum(self.degree_sequence_out) / 2)
self.is_initialized = True
def set_adjacency_matrix(self, adjacency):
if self.is_initialized:
raise ValueError(
"Graph already contains edges or has a degree sequence."
" Use 'clean_edges()' first."
)
else:
self._initialize_graph(adjacency=adjacency)
def set_edgelist(self, edgelist):
if self.is_initialized:
raise ValueError(
"Graph already contains edges or has a degree sequence."
" Use 'clean_edges()' first."
)
else:
self._initialize_graph(edgelist=edgelist)
def clean_edges(self):
self.adjacency = None
self.edgelist = None
self.is_initialized = False
def run_enhanced_cp_detection(self,
initial_guess="random",
num_sim=2,
sorting_method="default",
print_output=False):
self._initialize_problem_cp(
initial_guess=initial_guess,
enhanced=True,
weighted=True,
sorting_method=sorting_method)
sol = solver.solver_cp(
adjacency_matrix=self.aux_adj,
cluster_assignment=self.init_guess,
num_sim=num_sim,
sort_edges=self.sorting_function,
calculate_surprise=self.surprise_function,
correct_partition_labeling=self.partition_labeler,
flipping_function=self.flipping_function,
print_output=print_output)
self._set_solved_problem(sol)
def run_discrete_cp_detection(self,
initial_guess="random",
weighted=None,
num_sim=2,
sorting_method="default",
print_output=False):
self._initialize_problem_cp(
initial_guess=initial_guess,
enhanced=False,
weighted=weighted,
sorting_method=sorting_method)
sol = solver.solver_cp(
adjacency_matrix=self.aux_adj,
cluster_assignment=self.init_guess,
num_sim=num_sim,
sort_edges=self.sorting_function,
calculate_surprise=self.surprise_function,
correct_partition_labeling=self.partition_labeler,
flipping_function=self.flipping_function,
print_output=print_output)
self._set_solved_problem(sol)
def _initialize_problem_cp(self,
initial_guess,
enhanced,
weighted,
sorting_method):
self._set_initial_guess_cp(initial_guess)
if weighted is None:
if self.is_weighted:
self.aux_adj = self.adjacency_weighted
self.method = "weighted"
else:
self.aux_adj = self.adjacency
self.method = "binary"
elif weighted:
if enhanced:
self.method = "enhanced"
else:
self.method = "weighted"
if hasattr(self, "adjacency_weighted"):
self.aux_adj = self.adjacency_weighted
cond2 = (self.aux_adj.astype(np.int64).sum() !=
self.aux_adj.sum())
if cond2:
raise ValueError("The selected method works for discrete "
"weights, but the initialised graph has "
"continuous weights.")
else:
raise TypeError(
"You choose weighted core peryphery detection but the"
" graph you initialised is binary.")
else:
self.aux_adj = self.adjacency
self.method = "binary"
if (sorting_method == "default") and self.is_weighted:
sorting_method = "random"
elif (sorting_method == "default") and (not self.is_weighted):
sorting_method = "jaccard"
sort_func = {
"random": lambda x: ax.shuffled_edges(x, True),
"degrees": None,
"strengths": None,
}
try:
self.sorting_function = sort_func[sorting_method]
except Exception:
raise ValueError(
"Sorting method can be 'random', 'degrees' or 'strengths'.")
surp_fun = {
"binary": lambda x, y: cp.calculate_surprise_logsum_cp_bin(
x,
y,
True),
"weighted": lambda x, y: cp.calculate_surprise_logsum_cp_weigh(
x,
y,
True),
"enhanced": lambda x, y: cp.calculate_surprise_logsum_cp_enhanced(
x,
y,
True),
}
try:
self.surprise_function = surp_fun[self.method]
except Exception:
raise ValueError("CP method can be 'binary' or 'weighted'.")
self.flipping_function = lambda x: cp.flipping_function_cp(x, 1)
self.partition_labeler = lambda x, y: cp.labeling_core_periphery(x, y)
def _set_initial_guess_cp(self, initial_guess):
# TODO: Sistemare parte pesata
if isinstance(initial_guess, str):
if initial_guess == "random":
self.init_guess = np.ones(self.n_nodes, dtype=np.int32)
aux_n = int(np.ceil((5 * self.n_nodes) / 100))
self.init_guess[:aux_n] = 0
np.random.shuffle(self.init_guess[:aux_n])
elif initial_guess == "ranked":
self.init_guess = np.ones(self.n_nodes, dtype=np.int32)
aux_n = int(np.ceil((5 * self.n_nodes) / 100))
if self.is_weighted:
self.init_guess[
self.strength_sequence_out.argsort()[-aux_n:]] = 0
else:
self.init_guess[
self.degree_sequence_out.argsort()[-aux_n:]] = 0
elif initial_guess == "eigenvector":
self.init_guess = ax.eigenvector_init_guess(self.adjacency,
False)
else:
raise ValueError("Valid values of initial guess are 'random', "
"eigenvector or a custom initial guess ("
"np.ndarray or list).")
elif isinstance(initial_guess, np.ndarray):
self.init_guess = initial_guess
elif isinstance(initial_guess, list):
self.init_guess = np.array(initial_guess)
if np.unique(self.init_guess).shape[0] != 2:
raise ValueError("The custom initial_guess passed is not valid."
" The initial guess for core-periphery detection"
" must have nodes' membership that are 0 or 1."
" Pay attention that at least one node has to "
"belong to the core (0) or the periphery (1).")
if self.init_guess.shape[0] != self.n_nodes:
raise ValueError(
"The length of the initial guess provided is different from"
" the network number of nodes.")
def run_continuous_community_detection(self,
method="aglomerative",
initial_guess="random",
approx=None,
num_sim=2,
num_clusters=None,
prob_mix=0.1,
sorting_method="default",
print_output=False
):
self._initialize_problem_cd(
method=method,
num_clusters=num_clusters,
initial_guess=initial_guess,
enhanced=False,
weighted=True,
continuous=True,
sorting_method=sorting_method)
if method == "aglomerative":
sol = solver.solver_com_det_aglom(
adjacency_matrix=self.aux_adj,
cluster_assignment=self.init_guess,
num_sim=num_sim,
sort_edges=self.sorting_function,
calculate_surprise=self.surprise_function,
correct_partition_labeling=self.partition_labeler,
prob_mix=prob_mix,
flipping_function=cd.flipping_function_comdet_agl_new,
approx=approx,
is_directed=True,
print_output=print_output)
elif method == "fixed-clusters":
sol = solver.solver_com_det_divis(
adjacency_matrix=self.aux_adj,
cluster_assignment=self.init_guess,
num_sim=num_sim,
sort_edges=self.sorting_function,
calculate_surprise=self.surprise_function,
correct_partition_labeling=self.partition_labeler,
flipping_function=cd.flipping_function_comdet_div_new,
approx=approx,
is_directed=True,
print_output=print_output)
else:
raise ValueError("Method can be 'aglomerative' or 'fixed-clusters'.")
self._set_solved_problem(sol)
def run_enhanced_community_detection(self,
method="aglomerative",
initial_guess="random",
num_sim=2,
num_clusters=None,
prob_mix=0.1,
sorting_method="default",
print_output=False
):
self._initialize_problem_cd(
method=method,
num_clusters=num_clusters,
initial_guess=initial_guess,
enhanced=True,
weighted=True,
continuous=False,
sorting_method=sorting_method)
if method == "aglomerative":
sol = solver.solver_com_det_aglom(
adjacency_matrix=self.aux_adj,
cluster_assignment=self.init_guess,
num_sim=num_sim,
sort_edges=self.sorting_function,
calculate_surprise=self.surprise_function,
correct_partition_labeling=self.partition_labeler,
prob_mix=prob_mix,
flipping_function=cd.flipping_function_comdet_agl_new,
approx=None,
is_directed=True,
print_output=print_output)
elif method == "fixed-clusters":
sol = solver.solver_com_det_divis(
adjacency_matrix=self.aux_adj,
cluster_assignment=self.init_guess,
num_sim=num_sim,
sort_edges=self.sorting_function,
calculate_surprise=self.surprise_function,
correct_partition_labeling=self.partition_labeler,
flipping_function=cd.flipping_function_comdet_div_new,
approx=None,
is_directed=True,
print_output=print_output)
else:
raise ValueError("Method can be 'aglomerative' or 'fixed-clusters'.")
self._set_solved_problem(sol)
def run_discrete_community_detection(self,
method="aglomerative",
initial_guess=None,
weighted=None,
num_sim=None,
num_clusters=2,
prob_mix=0.1,
sorting_method="default",
print_output=False):
self._initialize_problem_cd(
method=method,
num_clusters=num_clusters,
initial_guess=initial_guess,
enhanced=False,
weighted=weighted,
continuous=False,
sorting_method=sorting_method)
if method == "aglomerative":
sol = solver.solver_com_det_aglom(
adjacency_matrix=self.aux_adj,
cluster_assignment=self.init_guess,
num_sim=num_sim,
sort_edges=self.sorting_function,
calculate_surprise=self.surprise_function,
correct_partition_labeling=self.partition_labeler,
prob_mix=prob_mix,
flipping_function=cd.flipping_function_comdet_agl_new,
approx=None,
is_directed=True,
print_output=print_output)
elif method == "fixed-clusters":
sol = solver.solver_com_det_divis(
adjacency_matrix=self.aux_adj,
cluster_assignment=self.init_guess,
num_sim=num_sim,
sort_edges=self.sorting_function,
calculate_surprise=self.surprise_function,
correct_partition_labeling=self.partition_labeler,
flipping_function=cd.flipping_function_comdet_div_new,
approx=None,
is_directed=True,
print_output=print_output)
else:
raise ValueError("Method can be 'aglomerative' or 'fixed-clusters'.")
self._set_solved_problem(sol)
def _initialize_problem_cd(self,
method,
num_clusters,
initial_guess,
enhanced,
weighted,
continuous,
sorting_method):
self._set_initial_guess_cd(method, num_clusters, initial_guess)
if weighted is None:
if self.is_weighted:
self.aux_adj = self.adjacency_weighted
self.method = "weighted"
else:
self.aux_adj = self.adjacency
self.method = "binary"
elif weighted:
if enhanced:
self.method = "enhanced"
elif continuous:
self.method = "continuous"
else:
self.method = "weighted"
if hasattr(self, "adjacency_weighted"):
self.aux_adj = self.adjacency_weighted
cond1 = (self.method == "enhanced" or
self.method == "weighted")
cond2 = (self.aux_adj.astype(np.int64).sum() !=
self.aux_adj.sum())
if cond1 and cond2:
raise ValueError("The selected method works for discrete "
"weights, but the initialised graph has "
"continuous weights.")
else:
raise TypeError(
"You choose weighted community detection but the"
" graph you initialised is binary.")
else:
self.aux_adj = self.adjacency
self.method = "binary"
if (sorting_method == "default") and self.is_weighted:
sorting_method = "random"
elif (sorting_method == "default") and (not self.is_weighted):
sorting_method = "random"
sort_func = {
"random": lambda x: ax.shuffled_edges(x, True),
"strengths": None,
}
try:
self.sorting_function = sort_func[sorting_method]
except Exception:
raise ValueError(
"Sorting method can be 'random' or 'strengths'.")
surp_fun = {
"binary": cd.calculate_surprise_logsum_clust_bin_new,
"weighted": cd.calculate_surprise_logsum_clust_weigh_new,
"enhanced": cd.calculate_surprise_logsum_clust_enhanced_new,
"continuous": cd.calculate_surprise_logsum_clust_weigh_continuos,
}
self.surprise_function = surp_fun[self.method]
# self.flipping_function = lambda x: CD.flipping_function_comdet(x)
# self.flipping_function = cd.flipping_function_comdet_new
self.partition_labeler = lambda x: cd.labeling_communities(x)
def _set_initial_guess_cd(self,
method,
num_clusters,
initial_guess):
if num_clusters is None and method == "fixed-clusters":
raise ValueError("When 'fixed-clusters' is passed as clustering 'method'"
" the 'num_clusters' argument must be specified.")
if isinstance(initial_guess, str):
if initial_guess == "random":
if method == "aglomerative":
self.init_guess = np.array(
[k for k in np.arange(self.n_nodes, dtype=np.int32)])
elif method == "fixed-clusters":
self.init_guess = np.random.randint(
low=num_clusters,
size=self.n_nodes)
elif (initial_guess == "common-neigh-weak") or \
(initial_guess == "common-neighbours"):
if method == "aglomerative":
self.init_guess = ax.common_neigh_init_guess_weak(
self.adjacency)
elif method == "fixed-clusters":
self.init_guess = ax.fixed_clusters_init_guess_cn(
adjacency=self.adjacency,
n_clust=num_clusters)
elif initial_guess == "common-neigh-strong":
if method == "aglomerative":
self.init_guess = ax.common_neigh_init_guess_strong(
self.adjacency)
elif method == "fixed-clusters":
self.init_guess = ax.fixed_clusters_init_guess_cn(
adjacency=self.adjacency,
n_clust=num_clusters)
else:
raise ValueError(
"The 'initial_guess' selected is not a valid."
"Initial guess can be an array specifying nodes membership"
" or an initialisation method ['common-neighbours',"
" 'random', 'common-neigh-weak', 'common-neigh-strong']."
" For more details see | |
<filename>thumt/data.py<gh_stars>1-10
import numpy
import theano
import theano.tensor as tensor
import cPickle
import json
import datetime
import logging
def getbatch(lx, ly,lxfms,lyfms, config):
'''
Get a batch for training.
:type lx: numpy array
:param lx: 2-D numpy arrays, each row contains an indexed source sentence
:type ly: numpy array
:param ly: 2-D numpy arrays, each row contains an indexed target sentence
:type config: dict
:param config: the configuration
'''
assert len(lx) == len(ly)
# get the length of longest sentence in one batch
xlen = min(max([len(i) for i in lx]), config['maxlength']) + 1
ylen = min(max([len(i) for i in ly]), config['maxlength']) + 1
# filter sentences that are too long.
# Although sentences have been filtered in data preparation,
# we filter again for robusty.
tx = []
ty = []
txfms=[]
tyfms=[]
for i in range(len(lx)):
if len(lx[i]) <= config['maxlength'] and len(ly[i]) <= config['maxlength']:
tx.append(numpy.concatenate((lx[i], [config['index_eos_src']])))
ty.append(numpy.concatenate((ly[i], [config['index_eos_trg']])))
txfms.append(lxfms[i])
tyfms.append(lyfms[i])
assert len(tx) == len(ty)
if len(tx) > 0:
pass
else:
xlen = 0
ylen = 0
# prepare the masks that indicate the length of sentences in one batch
x = config['index_eos_src'] * numpy.ones((xlen, len(tx)), dtype = 'int64')
y = config['index_eos_trg'] * numpy.ones((ylen, len(ty)), dtype = 'int64')
xmask = numpy.zeros((xlen, len(tx)), dtype = 'float32')
ymask = numpy.zeros((ylen, len(ty)), dtype = 'float32')
for i in range(len(tx)):
x[:len(tx[i]),i] = tx[i]
xmask[:len(tx[i]),i] = 1.
y[:len(ty[i]),i] = ty[i]
ymask[:len(ty[i]),i] = 1.
return x, xmask, y, ymask,txfms,tyfms
class DataCollection(object):
'''
The data manager. It also reserve the training status.
:type config: dict
:param config: the configuration
:type train: bool
:param train: Only set to true on training. If true, the vocabulary and corpus will be loaded, and the training status will be recorded.
'''
def __init__(self, config, train = True):
self.config = config
if not train:
return
self.load_vocab()
self.inited = False
self.peeked_batch = None
self.batch_id = 0
self.next_offset = 0
self.valid_result = {}
self.valid_time = {}
self.num_iter = 0
self.time = 0.
self.updatetime = 0.
self.train_cost = []
self.status_variable = ['next_offset', 'num_iter', 'time', 'updatetime', 'valid_result', 'valid_time', 'train_cost']
self.load_data()
if self.config['semi_learning']:
self.next_offset_source = 0
self.next_offset_target = 0
self.status_variable += ['next_offset_source', 'next_offset_target']
self.load_data_mono()
def next(self):
'''
Get the next batch of training corpus
:returns: x, y are 2-D numpy arrays, each row contains an indexed source/target sentence
'''
# read several batches and sort them by length for training efficiency
if not self.inited or self.batch_id == self.config['sort_batches']:
self.batch_id = 0
self.inited = True
startid = self.next_offset
endid = self.next_offset + self.config['batchsize'] * self.config['sort_batches']
self.peeked_batch = []
while endid >= self.num_sentences:
cx = self.source[startid : self.num_sentences]
cy = self.target[startid : self.num_sentences]
self.peeked_batch += [[x, y] for x,y in zip(cx, cy)]
endid -= self.num_sentences
startid = 0
self.next_offset = endid
cx = self.source[startid : endid]
cy = self.target[startid : endid]
cxfms=self.xfms[startid : endid]
cyfms = self.yfms[startid: endid]
if startid < endid:
self.peeked_batch += [[x, y, m, n] for x, y , m, n in zip(cx, cy,cxfms,cyfms)]
self.peeked_batch = sorted(self.peeked_batch, key = lambda x : max(len(x[0]), len(x[1])))
# return a batch of sentences
x = numpy.asarray(numpy.asarray(self.peeked_batch[self.batch_id * self.config['batchsize'] : (self.batch_id + 1) * self.config['batchsize']])[:, 0])
y = numpy.asarray(numpy.asarray(self.peeked_batch[self.batch_id * self.config['batchsize'] : (self.batch_id + 1) * self.config['batchsize']])[:, 1])
xfms = numpy.asarray(numpy.asarray(self.peeked_batch[
self.batch_id * self.config['batchsize']: (self.batch_id + 1) * self.config[
'batchsize']])[:, 2])
yfms = numpy.asarray(numpy.asarray(self.peeked_batch[
self.batch_id * self.config['batchsize']: (self.batch_id + 1) * self.config[
'batchsize']])[:, 3])
self.batch_id += 1
return x, y, xfms, yfms
def next_mono(self):
'''
Get the next batch of monolingual training corpus. Only used in semi-supervised training.
:returns: x, y are 2-D numpy arrays, each row contains an indexed source/target sentence
'''
if not self.inited or self.batch_id == self.config['sort_batches']:
self.batch_id = 0
self.inited = True
startid_src = self.next_offset_source
startid_trg = self.next_offset_target
endid_src = self.next_offset_source + self.config['batchsize'] * self.config['sort_batches']
endid_trg = self.next_offset_target + self.config['batchsize'] * self.config['sort_batches']
self.peeked_batch_mono = []
cx = []
cy = []
while endid_src >= self.num_sentences_mono_source:
cx += self.source[startid : self.num_sentences_mono_source]
endid_src -= self.num_sentences_mono_source
startid_src = 0
self.next_offset_source = endid_src
while endid_trg >= self.num_sentences_mono_target:
cx += self.target[startid : self.num_sentences_mono_target]
endid_trg -= self.num_sentences_mono_target
startid_trg = 0
self.next_offset_target = endid_trg
if startid_src < endid_src:
cx += self.source[startid_src : endid_src]
if startid_trg < endid_trg:
cy += self.target[startid_trg : endid_trg]
self.peeked_batch = [[x, y] for x, y in zip(cx, cy)]
self.peeked_batch = sorted(self.peeked_batch, key = lambda x : max(len(x[0]), len(x[1])))
x = numpy.asarray(numpy.asarray(self.peeked_batch[self.batch_id * self.config['batchsize'] : (self.batch_id + 1) * self.config['batchsize']])[:, 0])
y = numpy.asarray(numpy.asarray(self.peeked_batch[self.batch_id * self.config['batchsize'] : (self.batch_id + 1) * self.config['batchsize']])[:, 1])
self.batch_id += 1
return x, y
def load_data(self):
'''
Load training corpus.
'''
# load corpus from file
if self.config['data_corpus'] == 'cPickle':
self.source_old = cPickle.load(open(self.config['src_shuf'], 'rb'))
self.target_old = cPickle.load(open(self.config['trg_shuf'], 'rb'))
elif self.config['data_corpus'] == 'json':
self.source_old = json.load(open(self.config['src_shuf'], 'rb'))
self.target_old = json.load(open(self.config['trg_shuf'], 'rb'))
# fms file read
self.xfms_old = json.load(open("/home/tfzhang/experiment_x/hansard_zmantree/corpus/xfms.shuf", 'rb'))
self.yfms_old = json.load(open("/home/tfzhang/experiment_x/hansard_zmantree/corpus/xfms.shuf", 'rb'))
assert len(self.target_old) == len(self.source_old)
assert len(self.xfms_old) == len(self.source_old)
assert len(self.xfms_old) == len(self.yfms_old)
logging.info('total %d sentences' % len(self.source_old))
# filter sentences that are too long
self.source = []
self.target = []
self.xfms=[]
self.yfms=[]
num = 0
while num < len(self.source_old):
if len(self.source_old[num]) <= self.config['maxlength'] and len(self.target_old[num]) <= self.config['maxlength']:
self.source.append(self.source_old[num])
self.target.append(self.target_old[num])
self.xfms.append(self.xfms_old[num])
self.yfms.append(self.yfms_old[num])
num += 1
if num % 100000 == 0:
logging.debug(str(num))
assert len(self.target) == len(self.source)
logging.info('Discarding long sentences. %d sentences left.' % len(self.source))
self.num_sentences = len(self.source)
def load_data_mono(self):
'''
Load monolingual training courpus. Only used in semi-supervised training.
'''
if self.config['src_mono_shuf']:
logging.info('Loading monolingual source corpus.')
self.source_mono = json.load(open(self.config['src_mono_shuf'], 'rb'))
num = 0
while num < len(self.source_mono):
if len(self.source_mono[num]) > self.config['maxlength']:
del self.source_mono[num]
num -= 1
num += 1
if num % 100000 == 0:
logging.debug(str(num))
logging.info('%d monolingual source sentences' % len(self.source_mono))
self.num_sentences_mono_source = len(self.source_mono)
if self.config['trg_mono_shuf']:
logging.info('Loading monolingual target corpus.')
self.target_mono = json.load(open(self.config['trg_mono_shuf'], 'rb'))
num = 0
while num < len(self.target_mono):
if len(self.target_mono[num]) > self.config['maxlength']:
del self.target_mono[num]
num -= 1
num += 1
if num % 100000 == 0:
logging.debug(str(num))
logging.info('%d monolingual target sentences' % len(self.target_mono))
self.num_sentences_mono_target = len(self.target_mono)
def load_vocab(self):
'''
Load the vocabulary.
'''
self.vocab_src = cPickle.load(open(self.config['vocab_src'], 'rb'))
self.vocab_trg = cPickle.load(open(self.config['vocab_trg'], 'rb'))
self.ivocab_src = cPickle.load(open(self.config['ivocab_src'], 'rb'))
self.ivocab_trg = cPickle.load(open(self.config['ivocab_trg'], 'rb'))
return
def index_word_target(self, index):
'''
Get the target word given index.
:type index: int
:param index: the word index
:returns: string, the corresponding word.
'''
if index == self.config['index_eos_trg']:
return '<eos>'
return self.vocab_trg[index]
def print_sentence(self, sentence, vocab, index_eos):
'''
get the text form of a sentence represented by an index vector.
:type sentence: numpy array
:param sentence: indexed sentence. size:(length, 1)
:type vocab: list
:param vocab: vocabulary
:type index_eos: int
:param index_eos: the index of the end-of-sentence symbol
:returns: string, the text form of the sentence
'''
result = []
for pos in range(sentence.shape[0]):
word = sentence[pos]
if len(word.shape) != 0:
word = word[0]
if word == index_eos:
break
else:
if word < len(vocab):
result.append(vocab[word])
else:
result.append('UNK')
return ' '.join(result)
def print_source(self, sentence):
'''
Print a source sentence represented by an index vector.
:type sentence: numpy array
:param sentence: indexed sentence. size:(length, 1)
:returns: string, the text form of the source sentence
'''
return self.print_sentence(sentence, self.vocab_src, self.config['index_eos_src'])
def print_target(self, sentence):
'''
Print a target sentence represented by an index vector.
:type sentence: numpy array
:param sentence: indexed sentence. size:(length, 1)
:returns: string, the text form of the target sentence
'''
return self.print_sentence(sentence, self.vocab_trg, self.config['index_eos_trg'])
def toindex(self, sentence, ivocab, index_unk, index_eos):
'''
Transform a sentence text to indexed sentence.
:type sentence: string
:param sentence: sentence text
:type ivocab: dict
:param ivocab: the vocabulary to index
:type indexed_unk: int
:param indexed_unk: the index of unknown word symbol
:type index_eos: int
:param index_eos: the index of end-of-sentence symbol
:returns: numpy array, the indexed sentence
'''
result = []
for word in sentence:
if ivocab.has_key(word):
result.append(ivocab[word])
else:
result.append(index_unk)
result.append(index_eos)
return numpy.asarray(result, dtype = 'int64').reshape((len(result), 1))
def toindex_source(self, sentence):
'''
Transform a source language word list to index list.
:type sentence: string
:param sentence: sentence text
:returns: numpy array, the indexed source sentence
'''
return self.toindex(sentence, self.ivocab_src, self.config['index_unk_src'], self.config['index_eos_src'])
def toindex_target(self, sentence):
'''
Transform a target language word list to index list.
:type sentence: string
:param sentence: sentence text
:returns: numpy array, the indexed target sentence
'''
return self.toindex(sentence, self.ivocab_trg, self.config['index_unk_trg'], self.config['index_eos_trg'])
def save_status(self, path):
'''
Save the training status to file.
:type path: string
:param path: the path to a file
'''
status = {}
for st in self.status_variable:
exec('status["' + st + '"] = self.' + st)
with open(path, 'wb') as f:
cPickle.dump(status, f)
def load_status(self, path):
'''
Load the training status from file.
:type path: string
:param path: the path to a file
'''
try:
status = cPickle.load(open(path, 'rb'))
for st in self.status_variable:
exec('self.' + st + ' = status["' + st + '"]')
except:
logging.info('No status file. Starting from scratch.')
def last_improved(self, last = False):
'''
:type last: bool
:param last: if True, considering getting the same result as improved. And vice versa.
:returns: int. The number of iteration passed after the latest improvement
'''
recent = -1
recent_iter = -1
best = -1
best_iter = -1
for i in self.valid_result:
if i > recent_iter:
recent_iter = i
recent = self.valid_result[i]
if self.valid_result[i] > best:
best_iter = i
best = self.valid_result[i]
elif self.valid_result[i] == best:
if last:
if i > best_iter:
best_iter = i
else:
if i < best_iter:
best_iter = i
return recent_iter - best_iter
def format_num(self, x):
result = str(round(x, 2))
while result[-3] != '.':
result += '0'
return result
def print_log(self):
validations = sorted(self.valid_result.items(), key = lambda x : x[0])
result = ' ' * 15 + 'Time' + ' ' * 5 + 'Iteration' + \
' ' * 8 + 'Cost' + ' ' * 8 + 'BLEU\n' + \
'-' * 58 + '\n'
for i in validations:
time = str(self.valid_time[i[0]])
iteration = str(i[0])
cost = sum(self.train_cost[(i[0] - self.config['save_freq']): i[0]]) / self.config['save_freq']
cost = str(self.format_num(cost))
bleu = str(self.format_num(i[1]))
result += time + \
' ' * (14 - len(iteration)) + iteration + \
' ' * (12 - len(cost)) + cost + \
' ' * (12 - len(bleu)) + bleu + '\n'
return result
def print_valid(self):
result = sorted(self.valid_result.items(), key = lambda x : x[0])
for i in result:
logging.info('iter %d: %.2f' % (i[0], i[1]))
return
def encode_vocab(self, encoding='utf-8'):
'''
Change the encoding of | |
+", rest, 1)
cname = splitted.pop(0)
rest = splitted[0] if splitted else ""
if cname:
if cname in COMMANDS.keys():
got = False
for fn in COMMANDS[cname]:
if fn.__doc__:
got = True
if callable(fn.__doc__):
msg = botconfig.CMD_CHAR+cname+": "+fn.__doc__(rest)
else:
msg = botconfig.CMD_CHAR+cname+": "+fn.__doc__
reply(cli, nick, chan, msg, private=True)
else:
got = False
continue
else:
if got:
return
reply(cli, nick, chan, messages["documentation_unavailable"], private=True)
else:
reply(cli, nick, chan, messages["command_not_found"], private=True)
return
# if command was not found, or if no command was given:
for name, fn in COMMANDS.items():
if (name and not fn[0].flag and not fn[0].owner_only and
name not in fn[0].aliases and fn[0].chan):
fns.append("{0}{1}{0}".format("\u0002", name))
afns = []
if is_admin(nick, ident, host):
for name, fn in COMMANDS.items():
if fn[0].flag and name not in fn[0].aliases:
afns.append("{0}{1}{0}".format("\u0002", name))
fns.sort() # Output commands in alphabetical order
reply(cli, nick, chan, messages["commands_list"].format(break_long_message(fns, ", ")), private=True)
if afns:
afns.sort()
reply(cli, nick, chan, messages["admin_commands_list"].format(break_long_message(afns, ", ")), private=True)
def get_wiki_page(URI):
try:
response = urllib.request.urlopen(URI, timeout=2).read().decode("utf-8", errors="replace")
except (urllib.error.URLError, socket.timeout):
return False, messages["wiki_request_timed_out"]
if not response:
return False, messages["wiki_open_failure"]
parsed = json.loads(response)
if not parsed:
return False, messages["wiki_open_failure"]
return True, parsed
@cmd("wiki", pm=True)
def wiki(cli, nick, chan, rest):
"""Prints information on roles from the wiki."""
# no arguments, just print a link to the wiki
if not rest:
reply(cli, nick, chan, "https://werewolf.chat")
return
rest = rest.replace(" ", "_").lower()
# Get suggestions, for autocompletion
URI = "https://werewolf.chat/w/api.php?action=opensearch&format=json&search={0}".format(rest)
success, suggestionjson = get_wiki_page(URI)
if not success:
reply(cli, nick, chan, suggestionjson, private=True)
return
# Parse suggested pages, take the first result
try:
suggestion = suggestionjson[1][0].replace(" ", "_")
except IndexError:
reply(cli, nick, chan, messages["wiki_no_info"], private=True)
return
# Fetch a page from the api, in json format
URI = "https://werewolf.chat/w/api.php?action=query&prop=extracts&exintro=true&explaintext=true&titles={0}&format=json".format(suggestion)
success, pagejson = get_wiki_page(URI)
if not success:
reply(cli, nick, chan, pagejson, private=True)
return
try:
page = pagejson["query"]["pages"].popitem()[1]["extract"]
except (KeyError, IndexError):
reply(cli, nick, chan, messages["wiki_no_info"], private=True)
return
# We only want the first paragraph
if page.find("\n") >= 0:
page = page[:page.find("\n")]
wikilink = "https://werewolf.chat/{0}".format(suggestion.capitalize())
if nick == chan:
pm(cli, nick, wikilink)
pm(cli, nick, break_long_message(page.split()))
else:
cli.msg(chan, wikilink)
cli.notice(nick, break_long_message(page.split()))
@hook("invite")
def on_invite(cli, raw_nick, something, chan):
if chan == botconfig.CHANNEL:
cli.join(chan)
return # No questions
(nick, _, ident, host) = parse_nick(raw_nick)
if is_admin(nick, ident, host):
cli.join(chan) # Allows the bot to be present in any channel
debuglog(nick, "INVITE", chan, display=True)
@cmd("admins", "ops", pm=True)
def show_admins(cli, nick, chan, rest):
"""Pings the admins that are available."""
admins = []
pl = list_players()
if (chan != nick and var.LAST_ADMINS and var.LAST_ADMINS +
timedelta(seconds=var.ADMINS_RATE_LIMIT) > datetime.now()):
cli.notice(nick, messages["command_ratelimited"])
return
if chan != nick or (var.PHASE in var.GAME_PHASES or nick in pl):
var.LAST_ADMINS = datetime.now()
if var.ADMIN_PINGING:
return
var.ADMIN_PINGING = True
def admin_whoreply(event, var, chan, user):
if not var.ADMIN_PINGING or chan is not channels.Main:
return
if is_admin(user.nick): # FIXME: Using the old interface for now; user.is_admin() is better
if user is not users.Bot and not event.params.away:
admins.append(user.nick) # FIXME
def admin_endwho(event, var, target):
if not var.ADMIN_PINGING or target is not channels.Main:
return
admins.sort(key=str.lower)
msg = messages["available_admins"] + ", ".join(admins)
reply(cli, nick, chan, msg)
var.ADMIN_PINGING = False
events.remove_listener("who_result", admin_whoreply)
events.remove_listener("who_end", admin_endwho)
events.add_listener("who_result", admin_whoreply)
events.add_listener("who_end", admin_endwho)
channels.Main.who()
@command("coin", pm=True)
def coin(var, wrapper, message):
"""It's a bad idea to base any decisions on this command."""
wrapper.send(messages["coin_toss"].format(wrapper.source))
rnd = random.random()
# 59/29/12 split, 59+29=88
if rnd < 0.59:
coin = messages["coin_choices"][0]
elif rnd < 0.88:
coin = messages["coin_choices"][1]
else:
coin = messages["coin_choices"][2]
wrapper.send(messages["coin_land"].format(coin))
@command("pony", "horse", pm=True)
def pony(var, wrapper, message):
"""Toss a magical pony into the air and see what happens!"""
wrapper.send(messages["pony_toss"].format(wrapper.source))
# 59/29/7/5 split
rnd = random.random()
if rnd < 0.59:
pony = messages["pony_choices"][0]
elif rnd < 0.88:
pony = messages["pony_choices"][1]
elif rnd < 0.95:
pony = messages["pony_choices"][2].format(nick=wrapper.source)
else:
wrapper.send(messages["pony_fly"])
return
wrapper.send(messages["pony_land"].format(pony))
@command("cat", pm=True)
def cat(var, wrapper, message):
"""Toss a cat into the air and see what happens!"""
wrapper.send(messages["cat_toss"].format(wrapper.source), messages["cat_land"], sep="\n")
@cmd("time", pm=True, phases=("join", "day", "night"))
def timeleft(cli, nick, chan, rest):
"""Returns the time left until the next day/night transition."""
if (chan != nick and var.LAST_TIME and
var.LAST_TIME + timedelta(seconds=var.TIME_RATE_LIMIT) > datetime.now()):
cli.notice(nick, messages["command_ratelimited"])
return
if chan != nick:
var.LAST_TIME = datetime.now()
if var.PHASE == "join":
dur = int((var.CAN_START_TIME - datetime.now()).total_seconds())
msg = None
if dur > 1:
msg = messages["start_timer_plural"].format(dur)
elif dur == 1:
msg = messages["start_timer_singular"]
if msg is not None:
reply(cli, nick, chan, msg)
if var.PHASE in var.TIMERS:
if var.PHASE == "day":
what = "sunset"
elif var.PHASE == "night":
what = "sunrise"
elif var.PHASE == "join":
what = "the game is canceled if it's not started"
remaining = int((var.TIMERS[var.PHASE][1] + var.TIMERS[var.PHASE][2]) - time.time())
msg = "There is \u0002{0[0]:0>2}:{0[1]:0>2}\u0002 remaining until {1}.".format(divmod(remaining, 60), what)
else:
msg = messages["timers_disabled"].format(var.PHASE.capitalize())
reply(cli, nick, chan, msg)
@command("roles", pm=True)
def list_roles(var, wrapper, message):
"""Display which roles are in play for a specific gamemode."""
lpl = len(var.ALL_PLAYERS)
specific = 0
pieces = re.split(" +", message.strip())
gamemode = var.CURRENT_GAMEMODE
if gamemode.name == "villagergame":
gamemode = var.GAME_MODES["default"][0]()
if (not pieces[0] or pieces[0].isdigit()) and not hasattr(gamemode, "ROLE_GUIDE"):
wrapper.reply("There {0} \u0002{1}\u0002 playing. {2}roles is disabled for the {3} game mode.".format("is" if lpl == 1 else "are", lpl, botconfig.CMD_CHAR, gamemode.name), prefix_nick=True)
return
msg = []
if not pieces[0] and lpl:
msg.append("There {0} \u0002{1}\u0002 playing.".format("is" if lpl == 1 else "are", lpl))
if var.PHASE in var.GAME_PHASES:
msg.append("Using the {0} game mode.".format(gamemode.name))
pieces[0] = str(lpl)
if pieces[0] and not pieces[0].isdigit():
valid = var.GAME_MODES.keys() - var.DISABLED_GAMEMODES - {"roles", "villagergame"}
mode = pieces.pop(0)
if mode not in valid:
matches = complete_match(mode, valid)
if not matches:
wrapper.reply(messages["invalid_mode"].format(mode), prefix_nick=True)
return
if len(matches) > 1:
wrapper.reply(messages["ambiguous_mode"].format(mode, ", ".join(matches)), prefix_nick=True)
return
mode = matches[0]
gamemode = var.GAME_MODES[mode][0]()
try:
gamemode.ROLE_GUIDE
except AttributeError:
wrapper.reply("{0}roles is disabled for the {1} game mode.".format(botconfig.CMD_CHAR, gamemode.name), prefix_nick=True)
return
roles = list(gamemode.ROLE_GUIDE.items())
roles.sort(key=lambda x: x[0])
if pieces and pieces[0].isdigit():
specific = int(pieces[0])
new = []
for role in itertools.chain.from_iterable([y for x, y in roles if x <= specific]):
if role.startswith("-"):
new.remove(role[1:])
else:
new.append(role)
msg.append("[{0}]".format(specific))
msg.append(", ".join(new))
else:
final = []
for num, role in roles:
snum = "[{0}]".format(num)
if num <= lpl:
snum = "\u0002{0}\u0002".format(snum)
final.append("{0} {1}".format(snum, ", ".join(role)))
msg.append(" ".join(final))
if not msg:
msg.append("No roles are defined for {0}p games.".format(specific or lpl))
wrapper.send(*msg)
@command("myrole", pm=True, phases=("day", "night"))
def myrole(var, wrapper, message):
"""Reminds you of your current role."""
ps = get_participants()
if wrapper.source not in ps:
return
role = get_main_role(wrapper.source)
if role in Hidden:
role = var.HIDDEN_ROLE
evt = Event("myrole", {"role": role, "messages": []})
if not evt.dispatch(var, wrapper.source):
return
role = evt.data["role"]
an = "n" if role.startswith(("a", "e", "i", "o", "u")) else ""
wrapper.pm(messages["show_role"].format(an, role))
for msg in evt.data["messages"]:
wrapper.pm(msg)
@command("aftergame", "faftergame", flag="D", pm=True)
def aftergame(var, wrapper, message):
"""Schedule a command to be run after the current game."""
if not message.strip():
wrapper.pm(messages["incorrect_syntax"])
return
args = re.split(" +", message)
before, prefix, after = args.pop(0).lower().partition(botconfig.CMD_CHAR)
if not prefix: # the prefix was not in the string
cmd = before
elif after and not before: # message was prefixed
cmd = after
else: # some weird thing, e.g. "fsay!" or even "fs!ay"; we don't care about that
return
if cmd in COMMANDS:
def do_action():
for fn in COMMANDS[cmd]:
fn.aftergame = True
fn.caller(wrapper.source.client, wrapper.source.rawnick, channels.Main.name if fn.chan else users.Bot.nick, " ".join(args))
fn.aftergame = False
else:
wrapper.pm(messages["command_not_found"])
return
if var.PHASE == "none":
do_action()
return
channels.Main.send(messages["command_scheduled"].format(" ".join([cmd] + args), wrapper.source))
var.AFTER_FLASTGAME = do_action
def _command_disabled(var, wrapper, message):
wrapper.send(messages["command_disabled_admin"])
def _command_disabled_oldapi(cli, nick, chan, rest):
# FIXME: kill this off when the old @cmd API is completely killed off
reply(cli, nick, chan, messages["command_disabled_admin"])
@command("lastgame", "flastgame", flag="D", pm=True)
def flastgame(var, wrapper, message):
"""Disables starting or joining a game, and optionally schedules a command to run after the current game ends."""
for cmdcls in (COMMANDS["join"] + COMMANDS["start"]):
if isinstance(cmdcls, command):
cmdcls.func = _command_disabled
else:
# FIXME: kill this off when the old @cmd API is completely killed off
cmdcls.func = _command_disabled_oldapi
channels.Main.send(messages["disable_new_games"].format(wrapper.source))
var.ADMIN_TO_PING = wrapper.source
if message.strip():
aftergame.func(var, wrapper, message)
@command("gamestats", "gstats", pm=True)
def gamestats(var, wrapper, message):
"""Get the game stats for a given game size or lists game totals for all game sizes if no game size is given."""
if wrapper.public:
if (var.GSTATS_RATE_LIMIT and var.LAST_GSTATS and
var.LAST_GSTATS + timedelta(seconds=var.GSTATS_RATE_LIMIT) > datetime.now()):
wrapper.pm(messages["command_ratelimited"])
return
var.LAST_GSTATS = datetime.now()
if var.PHASE | |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .functional import capsule_linear
class StackedBRNN(nn.Module):
def __init__(self,**kwargs):
super(StackedBRNN,self).__init__()
self.dropout_output = kwargs.get("dropout_output",0.1)
self.dropout_rate = kwargs.get("dropout_rate",0.15)
self.num_layers = kwargs.get("num_layers",4)
self.hidden_size = kwargs.get("hidden_size",100)
self.input_size = kwargs.get("input_size",100)
self.concat_layers = kwargs.get("concat_layers",False)
self.rnn_type = kwargs.get("rnn_type","lstm")
if self.rnn_type == "lstm":
RNNModel = nn.LSTM
elif self.rnn_type == "gru":
RNNModel = nn.GRU
else:
raise TypeError("Error for the RNN Model type %s"%self.rnn_type)
self.rnns = nn.ModuleList()
for i in range(self.num_layers):
input_size = self.input_size if i == 0 else 2 * self.hidden_size
self.rnns.append(RNNModel(input_size, self.hidden_size,num_layers=1,bidirectional=True,batch_first=True))
def forward(self,inputs):
"""Faster encoding that ignores any padding."""
# Transpose batch and sequence dims
#inputs = inputs.transpose(0, 1)
# Encode all layers
outputs = [inputs]
for i in range(self.num_layers):
rnn_inputs = outputs[-1]
# Apply dropout to hidden input
if self.dropout_rate > 0:
rnn_inputs = F.dropout(rnn_inputs,p=self.dropout_rate,training=self.training)
# Forward
self.rnns[i].flatten_parameters()
rnn_outputs = self.rnns[i](rnn_inputs)[0]
outputs.append(rnn_outputs)
# Concat hidden layers
if self.concat_layers:
output = torch.cat(outputs[1:], 2)
else:
output = outputs[-1]
# Transpose back
# output = output.transpose(0, 1)
# Dropout on output layer
if self.dropout_output and self.dropout_rate > 0:
output = F.dropout(output,p=self.dropout_rate,training=self.training)
return output
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.input_size) + ' -> ' \
+ str(self.hidden_size) + ')'
class CapsuleLinear(nn.Module):
r"""Applies a linear combination to the incoming capsules.
Args:
out_capsules(int): number of the output capsules
in_length(int): length of each input capsule
out_length(int): length of each output capsule
in_capsules(int,optional): the number of input capsules
share_weight(bool,optional): if True,share the weight between input capsules
rounting_type(str,optional):rounting algorithm type
--options: ['dynamic','k_means']
num_iterations(int,optional):number of routing iterations.
squash(bool,optional): squash output capsules or not,it works for all rounting.
kwargs (dict, optional): other args:
- similarity (str, optional): metric of similarity between capsules, it only works for 'k_means' routing
-- options: ['dot', 'cosine', 'tonimoto', 'pearson']
Shape:
- Input: (Tensor): (N, in_capsules, in_length)
- Output: (Tensor): (N, out_capsules, out_length)
Attributes:
if share_weight:
- weight (Tensor): the learnable weights of the module of shape
(out_capsules, out_length, in_length)
else:
- weight (Tensor): the learnable weights of the module of shape
(out_capsules, in_capsules, out_length, in_length)
"""
def __init__(self,out_capsules,in_length,out_length,in_capsules=None,share_weight=True,
routing_type='k_means', num_iterations=3, squash=False, **kwargs):
super(CapsuleLinear,self).__init__()
if num_iterations<1:
raise ValueError('num_iterations must have to be greater than 0,but got {}.'.format(num_iterations))
self.out_capsules = out_capsules
self.in_length = in_length
self.out_length = out_length
self.in_capsules = in_capsules
self.share_weight = share_weight
self.routing_type = routing_type
self.num_iterations = num_iterations
self.squash = squash
self.kwargs = kwargs
if self.share_weight:
if in_capsules is not None:
raise ValueError('Exceted in_capsules must be None.')
else:
self.weight = nn.Parameter(torch.Tensor(out_capsules,out_length,in_length))
else:
if in_capsules is None:
raise ValueError('Excepted in_capsules must be int')
else:
self.weight = nn.Parameter(torch.Tensor(out_capsules,in_capsules,out_length,in_length))
nn.init.xavier_uniform_(self.weight)
def forward(self,inputs):
return capsule_linear(inputs, self.weight, self.share_weight, self.routing_type, self.num_iterations,
self.squash, **self.kwargs)
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_capsules) + ' -> ' \
+ str(self.out_capsules) + ')'
class Squash(nn.Module):
def forward(self,x, dim=-1):
squared_norm = (x ** 2).sum(dim=dim, keepdim=True)
scale = squared_norm / (1 + squared_norm)
return scale * x / (squared_norm.sqrt() + 1e-8)
class WordsCapsLayer(nn.Module):
"""Words capsule layer."""
def __init__(self, in_dim, num_caps, dim_caps, num_routing):
"""
Initialize the layer.
Args:
in_dim: Dimensionality (i.e. length) of each capsule vector.
num_caps: Number of capsules in the capsule layer
dim_caps: Dimensionality, i.e. length, of the output capsule vector.
num_routing: Number of iterations during routing algorithm
"""
super(WordsCapsLayer, self).__init__()
self.in_dim = in_dim
self.num_caps = num_caps
self.dim_caps = dim_caps
self.num_routing = num_routing
self.W = nn.Parameter(0.001*torch.randn(num_caps,in_dim,dim_caps),
requires_grad=True)
self.squash =Squash()
def forward(self, input_tensor):
"""
input_tensor: shape of (batch_size, in_caps, in_dim)
"""
batch_size = input_tensor.size(0)
device = input_tensor.device
x = input_tensor.unsqueeze(1) # (batch_size, in_caps, in_dim) -> (batch_size, 1, in_caps, in_dim)
# W @ x = (batch_size, 1, in_caps, in_dim) @ (num_caps,in_dim,dim_caps) =
# (batch_size, num_caps, in_caps, dim_caps)
u_hat = torch.matmul(x,self.W)
# detach u_hat during routing iterations to prevent gradients from flowing
temp_u_hat = u_hat.detach()
in_caps = temp_u_hat.shape[2]
b = torch.rand(batch_size, self.num_caps, in_caps, 1).to(device)
for route_iter in range(self.num_routing - 1):
# (batch_size, num_caps, in_caps, 1) -> Softmax along num_caps
c = b.softmax(dim=1)
# element-wise multiplication
# (batch_size, num_caps, in_caps, 1) * (batch_size, in_caps, num_caps, dim_caps) ->
c_extend = c.expand_as(temp_u_hat)
# (batch_size, num_caps, in_caps, dim_caps) sum across in_caps ->
# (batch_size, num_caps, dim_caps)
s = (c_extend * temp_u_hat).sum(dim=2)
# apply "squashing" non-linearity along dim_caps
v = self.squash(s)
# dot product agreement between the current output vj and the prediction uj|i
# (batch_size, num_caps, in_caps, dim_caps) @ (batch_size, num_caps, dim_caps, 1)
# -> (batch_size, num_caps, in_caps, 1)
uv = torch.matmul(temp_u_hat, v.unsqueeze(-1))
b += uv
# last iteration is done on the original u_hat, without the routing weights update
c = b.softmax(dim=1)
c_extend = c.expand_as(u_hat)
s = (c * u_hat).sum(dim=2)
# apply "squashing" non-linearity along dim_caps
v = self.squash(s)
return v
def __repr__(self) -> str:
return self.__class__.__name__ \
+ ' (in_dim{}\n'.format(self.in_dim) \
+ ' (num_caps{}\n'.format(self.num_caps) \
+ ' (dim_caps{}\n'.format(self.dim_caps) \
+ ' (num_routing{}\n'.format(self.num_routing)+')'
class Attention(nn.Module):
"""
Compute 'Scaled Dot Product Attention
"""
def forward(self, query, key, value, mask=None, dropout=None):
scores = torch.matmul(query, key.transpose(-2, -1)) \
/ math.sqrt(query.size(-1))
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class MultiHeadedAttention(nn.Module):
"""
Take in model size and number of heads.
"""
def __init__(self, h, d_model, dropout=0.1):
super().__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linear_layers = nn.ModuleList([nn.Linear(d_model, d_model) for _ in range(3)])
self.output_linear = nn.Linear(d_model, d_model)
self.attention = Attention()
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
batch_size = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = [l(x).view(batch_size, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linear_layers, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, attn = self.attention(query, key, value, mask=mask, dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous().view(batch_size, -1, self.h * self.d_k)
return self.output_linear(x)
class LayerNorm(nn.Module):
"Construct a layernorm module (See citation for details)."
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class ResConnectionLayer(nn.Module):
def __init__(self, in_dim, dropout):
super(ResConnectionLayer, self).__init__()
self.norm = LayerNorm(in_dim)
self.dropout = nn.Dropout(dropout)
self.ffn = FeedForwardNetwork(in_dim,in_dim)
def forward(self, x):
"Apply residual connection to any sublayer with the same size."
return x + self.dropout(self.ffn(self.norm(x)))
class SFU(nn.Module):
"""Semantic Fusion Unit
The ouput vector is expected to not only retrieve correlative information from fusion vectors,
but also retain partly unchange as the input vector
"""
def __init__(self, input_size, fusion_size):
super(SFU, self).__init__()
self.linear_r = nn.Linear(input_size + fusion_size, input_size)
self.linear_g = nn.Linear(input_size + fusion_size, input_size)
def forward(self, x, fusions):
r_f = torch.cat([x, fusions], 2)
r = torch.tanh(self.linear_r(r_f))
g = torch.sigmoid(self.linear_g(r_f))
o = g * r + (1-g) * x
return o
class GELU(nn.Module):
def forward(self, x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
class SelfAttnMatch(nn.Module):
"""Given sequences X and Y, match sequence Y to each element in X.
* o_i = sum(alpha_j * x_j) for i in X
* alpha_j = softmax(x_j * x_i)
"""
def __init__(self, input_size, identity=False, diag=True):
super(SelfAttnMatch, self).__init__()
if not identity:
self.linear = nn.Linear(input_size, input_size)
else:
self.linear = None
self.diag = diag
self.gelu = GELU()
def forward(self,inputs):
"""
Args:
x: batch * len1 * dim1
x_mask: batch * len1 (1 for padding, 0 for true)
Output:
matched_seq: batch * len1 * dim1
"""
# Project vectors
if self.linear:
x_proj = self.linear(inputs)
x_proj = self.gelu(x_proj)
else:
x_proj = inputs
# Compute scores
scores = x_proj.bmm(x_proj.transpose(2, 1))
if not self.diag:
x_len = inputs.size(1)
for i in range(x_len):
scores[:, i, i] = 0
# Normalize with softmax
alpha = F.softmax(scores, dim=2)
# Take weighted average
matched_seq = alpha.bmm(inputs)
return matched_seq,alpha.sum(dim=1)
class FeedForwardNetwork(nn.Module):
def __init__(self,in_dim,hid_dim) -> None:
super().__init__()
self.lin1 = nn.Linear(in_dim,hid_dim)
self.lin2 = nn.Linear(hid_dim,in_dim)
self.gleu = GELU()
self.dropout = nn.Dropout()
def forward(self,inputs):
hid = self.gleu(self.lin1(inputs))
return self.lin2(self.dropout(hid))
class InitializedConv1d(nn.Module):
def | |
<reponame>Tru-Dev/WWIIPacificFront<filename>game/sprite_extension.py
# WWII Pacific Front - sprite_extension.py
# (C) 2021 <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
'''
This module offers two distinct functionalities: nestable groups (and sprites to
go along with them) and behavior groups.
`NesterSprites` can only be in one `NestingGroup`, which is why the `BehaviorGroup` class was
implemented. They act like a regular `pygame.sprite.Group`, but do not implement rendering
and can hold as many sprites as needed.
'''
from collections import defaultdict
from typing import DefaultDict, Dict, Iterable, List, Optional, Union
from enum import IntEnum
import warnings
import pygame
import pygame.event
import pygame.key
import pygame.display
import pygame.freetype
import pygame.sprite
from .misc_utils import TypeWarning
class DirtyEnum(IntEnum):
'''
Interchangeable with normal integer Dirty values.
Use this for readability.
'''
NOT_DIRTY = 0
DIRTY = 1
ALWAYS_DIRTY = 2
class NestingRecursionError(Exception):
'''
Raised when attempting to recursively nest groups:
```py
g = NestingGroup(parent_group=NestingGroup())
g.add(g.parent) # bad, raises NestingRecursionError
```
'''
class NesterSprite(pygame.sprite.Sprite):
'''
A version of `DirtySprite` optimized for NestingGroups.
It inherits from normal `Sprite` but implements much of the same dirtying functionality.
One difference is that one `NesterSprite` can only belong to one `NestingGroup`, while
`NestingGroup`s can still have as many `NesterSprite`s as needed.
'''
def __init__(
self,
rect: pygame.Rect,
source_rect: Optional[pygame.Rect]=None,
parent_group: Optional['NestingGroup']=None,
layer: int=0
) -> None:
super().__init__()
self.rect = rect
self.source_rect = source_rect
self.dirty = DirtyEnum.DIRTY
self.visible = True
self._layer = layer
self.blendmode = 0
self.source_rect = 0
self._parent = parent_group
if parent_group is not None:
self.add(parent_group)
@classmethod
def from_sprite(cls, spr: pygame.sprite.Sprite) -> 'NesterSprite':
'''
Turns a regular `pygame.sprite.Sprite` into a `NesterSprite`. Copies as much as
it can from the original `Sprite`, except for groups.
'''
# Make sure to take care of whichever attributes are
# implemented in this variation of Sprite
newspr = cls(
getattr(spr, 'rect', pygame.Rect(0, 0, 0, 0)),
source_rect=getattr(spr, 'source_rect', None),
layer=spr.layer if hasattr(spr, 'layer') else 0
)
newspr.dirty = getattr(spr, 'dirty', newspr.dirty)
newspr.visible = getattr(spr, 'visible', newspr.visible)
newspr.blendmode = getattr(spr, 'blendmode', newspr.blendmode)
newspr.image = getattr(spr, 'image', newspr.image)
return newspr
@property
def visible(self) -> bool:
'''
If True, this `NesterSprite` will be visible. Set to False to hide this `NesterSprite`.
'''
return self._visible
@visible.setter
def visible(self, value: Union[bool, int]) -> None:
self.visible = bool(value)
if self.dirty == DirtyEnum.NOT_DIRTY:
self.dirty == DirtyEnum.DIRTY
@property
def layer(self) -> int:
'''
Gets and sets the layer for this `NesterSprite`. Unlike regular `Sprite`s, you can set
the layer after adding the `NesterSprite` to the `NestedGroup`. This works by calling
`NestedGroup.change_layer()` with the `Sprite`'s parent group inside the setter.
'''
return self._layer
@layer.setter
def layer(self, value: int) -> None:
if self.parent is not None:
self.parent.change_layer(self, value)
else:
self._layer = value
@property
def parent(self) -> Optional['NestingGroup']:
'''
Gets and sets the parent `NestingGroup` of this `NesterSprite`.
'''
return self._parent
@parent.setter
def parent(self, nesting_group: Optional['NestingGroup']) -> None:
if nesting_group is None:
self.remove()
else:
self.add(nesting_group)
def add(self, nesting_group: 'NestingGroup') -> None:
'''
Add the `NesterSprite` to a `NestingGroup`.
Unlike normal `Sprite`s, `NestingSprite` can only be in one `NestingGroup`.
'''
if self in nesting_group:
return
self.remove_internal()
self._parent = nesting_group
nesting_group.add_internal(self)
self.add_internal(nesting_group)
def remove(self) -> 'NestingGroup':
'''
Remove the `NesterSprite` from its parent group and return it.
Does not require a `group` argument because it can only be in one `NestingGroup`.
Because of this, it is synonomous to `self.kill()`.
'''
return self.remove_internal()
def kill(self) -> 'NestingGroup':
'''
Remove the `NesterSprite` from its parent group and return it.
Synonomous to `self.remove()`.
'''
return self.remove_internal()
def add_internal(self, nesting_group) -> None:
'''
# For internal use only!
# Do not use unless you are going to subclass this class!
This function is only documented for subclassers and main devs.
---
This method adds the `NestingGroup` to the `NesterSprite`'s internal data.
'''
self.remove_internal()
super().add_internal(nesting_group)
self._parent = nesting_group
def remove_internal(self) -> 'NestingGroup':
'''
# For internal use only!
# Do not use unless you are going to subclass this class!
This function is only documented for subclassers and main devs.
---
This method removes the parent `NestingGroup` from the `NesterSprite`'s internal data
and returns it.
'''
old_parent = self._parent
super().remove(self._parent)
self._parent = None
return old_parent
def __repr__(self) -> str:
'''
String Representation of `NesterSprite`
'''
if self.parent is None:
return f'<{type(self).__name__} NesterSprite(unassigned)>'
return f'<{type(self).__name__} NesterSprite(belongs to a {type(self.parent).__name__})>'
NestingGroupAddable = Union[NesterSprite, 'NestingGroup', Iterable['NestingGroupAddable']]
class NestingGroup(pygame.sprite.AbstractGroup):
'''
A group that nests. Update rect optimized. Supports layers.
Does not remember a `NesterSprite`'s position within a layer.
If it matters, use another `NestingGroup`.
`layer` parameter refers to the default layer for adding sprites or groups
to, while `parent_layer` refers to the position of the group itself in its parent.
'''
_init_rect: pygame.Rect = pygame.Rect(0, 0, 0, 0)
def __init__(
self,
*sprites_or_groups: NestingGroupAddable,
layer: Optional[int]=None,
parent_group: Optional['NestingGroup']=None,
parent_layer: int=0,
**kwargs
) -> None:
super().__init__()
self._spr_layerdict: DefaultDict[int, List[NesterSprite]] = defaultdict(list)
self._grp_layerdict: DefaultDict[int, List[NestingGroup]] = defaultdict(list)
self._layerdict_full: Dict[int, List[Union[NesterSprite, 'NestingGroup']]] = {}
self.groupdict: Dict[NestingGroup, int] = {}
# Internal Cache Objects
self._cache_sorted_sprites: Optional[List[NesterSprite]] = None
self._cache_sorted_groups: Optional[List[NestingGroup]] = None
self._cache_sorted_children: Optional[List[Union[NesterSprite, NestingGroup]]] = None
# Adding type annotations
self.spritedict: Dict[NesterSprite, pygame.Rect] = self.spritedict
self.lostsprites: List[pygame.Rect] = self.lostsprites
self._layer = parent_layer
self.add(*sprites_or_groups, layer=layer, **kwargs)
if parent_group is not None:
parent_group.add(self)
@property
def parent(self) -> Optional['NestingGroup']:
'''
Gets and sets the parent `NestingGroup` of this group.
'''
return self._parent
@parent.setter
def parent(self, nesting_group: 'NestingGroup') -> None:
nesting_group.add(self)
@property
def parent_layer(self) -> int:
return self._layer
@parent_layer.setter
def parent_layer(self, layer: int) -> None:
if self.parent is not None:
self.parent.change_layer(self, layer)
else:
self._layer = layer
def add(
self,
*sprites: NestingGroupAddable,
layer: Optional[int]=None,
**kwargs
) -> None:
'''
Adds `NesterSprite`s or other `NestingGroup`s to the `NestingGroup`. Can take
plain `NesterSprite`s or iterables of them. `NestingGroup`s, however, will
be nested. If you want to add the contents themselves, use `NestingGroup.sprites()`,
`NestingGroup.groups()`, or `NestingGroup.children()`.
This method raises `NestingRecursionError` if you attempt to add a parent group, direct
or indirect, to this group.
'''
# the recursion detecting logic isn't here, it's in _add_group()
for s in sprites:
if isinstance(s, NesterSprite):
if not self.has_internal(s):
self.add_internal(s, layer=layer)
s.add_internal(self)
elif isinstance(s, NestingGroup):
if not self._has_group(s):
self._add_group(s)
s._add_self_to_group(self)
elif isinstance(s, Iterable):
self.add(*s, layer=layer, **kwargs)
else:
warnings.warn(
'NestingGroup can only add NesterSprites or other NestingGroups',
TypeWarning,
stacklevel=2,
)
# Refresh the cache for the get_ordered_* methods
self._cache_sorted_sprites = None
self._cache_sorted_groups = None
self._cache_sorted_children = None
def remove(self, *sprites: NestingGroupAddable) -> None:
'''
Removes `NesterSprite`s or other `NestingGroup`s from this `NestingGroup`. Follows
the same logic as `add()`.
'''
for s in sprites:
if isinstance(s, NesterSprite):
if self.has_internal(s):
self.remove_internal(s)
s.remove_internal(self)
elif isinstance(s, NestingGroup):
self._remove_group(s)
s._remove_self_from_group(self)
elif isinstance(s, Iterable):
self.remove(*s)
def _add_self_to_group(self, parent_group: 'NestingGroup'):
'''
# For internal use only!
# Do not use unless you are going to subclass this class!
This function is only documented for subclassers and main devs.
---
Used by `NestingGroup` to add itself to other `NestingGroup`s internally.
'''
self._parent = parent_group
self._propagate_dirty()
def _add_group(self, nesting_group: 'NestingGroup', layer: Optional[int]=None):
'''
# For internal use only!
# Do not use unless you are going to subclass this class!
This function is only documented for subclassers and main devs.
---
Used by `NestingGroup` to add other `NestingGroup`s to itself internally.
'''
# Yes, we combat recursive nesting with recursive methods. It should work...
if nesting_group.has_recursive(self):
raise NestingRecursionError('Group recursion is not allowed.')
self.groupdict[nesting_group] = 0
if layer is None:
self._grp_layerdict[nesting_group.parent_layer].append(nesting_group)
else:
self._grp_layerdict[layer]
def _propagate_dirty(self) -> None:
'''
# For internal use only!
# Do not use unless you are going to subclass this class!
This function is only documented for subclassers and main devs.
---
Used by `NestingGroup` to indicate a redraw when added to another group.
'''
for s in self.spritedict:
if s.dirty == DirtyEnum.NOT_DIRTY:
s.dirty = DirtyEnum.DIRTY
for g in self.groupdict:
g._propagate_dirty()
def _remove_self_from_group(self):
'''
# For internal use only!
# Do not use unless you are going to subclass this class!
This function is only documented for subclassers and main devs.
---
Used by `NestingGroup` to remove itself from other `NestingGroup`s internally.
'''
self._parent = None
def _remove_group(self, | |
#!/usr/bin/python
# Copyright (c) 2015, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import copy
import json
import os
from PIL import (
Image,
ImageFont
)
import rospy
import rospkg
from sensor_msgs.msg import Image as ImageMsg
from baxter_interface import (
Navigator,
RobotEnable,
CameraController,
Gripper
)
from baxter_core_msgs.msg import AssemblyState
from .baxter_procs import (
kill_python_procs,
mk_process,
python_proc_ids,
RosProcess,
)
from .demo_buttons import BrrButton
from .demo_windows import BrrWindow
from .img_proc import (
cv_to_msg,
gen_cv,
overlay,
)
import demo_functions
'''~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# UI wrapper class for Baxter Research Robot.
# This class has 2 purposes:
# 1) To handle user interaction with the robot and interpret
# those interactions in a UI context
# 2) To run robot utility functions and button_press functions
#
#
# Initializiation aguments:
# windows - a dictionary of BrrWindow objects
# (all of the windows configured in the UI)
# btn_context - a dictionary containing metadata for the buttons
# (what functions they run and what window they take you to
# commands - a list of strings to be searched through when killing
# running example programs
# share_path - path to this package's share/ folder
#
# Public Parameters:
# img - The current image being displayed by the UI, in cv format
# windows - Dict with the full list of windows in the UI
# active_window - The BrrWindow object currently selected in the UI
# xdisp - Publisher for xdisplay topic
# cameras - Dict of Camera objects from baxter_interface
# camera_sub - Camera subscriber.
#
# Public Methods:
# selected(self) - Returns the BrrButton selected in the current active
# BrrWindow object.
# draw(self) - Draws windows recursively, sets self.img, and publishes
# to the screen.
# scroll(self, direction) - Calls the scroll function for the active
# window passing the scroll direction.
# ok_pressed(self, v, side) - Enacts the function for the selected button
# in the active window,
# back(self, v) - If in a window with a back button, this will kill all
# examples and set the previous window as active.
# kill_examples(self, v) - Kills all processes matching the criteria in
# self.commands
# error_screen(self, error) - Will display the selected error screen on
# top of the current display.
# Sets the error window's parent to preserve
# "back" functionality
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'''
class BrrUi(object):
def __init__(self, share_path, conf_path, commands):
self.share_path = share_path
self.conf_path = conf_path
self.windows = dict()
self._btn_context = dict()
self._functions = demo_functions
self._load_config()
self.img = Image.new('RGB', (1024, 600), 'white')
self.active_window = self.windows['demo_1']
self.xdisp = rospy.Publisher('/robot/xdisplay', ImageMsg,
latch=True, queue_size=1)
self._status = RobotEnable()
self._commands = commands
self._font = ImageFont.truetype(
'%s/HelveticaLight.ttf' % share_path, 30
)
self._textHeight = self._font.getsize('W')[1]
self._active_example = False
self._navigators = {'left': Navigator('left'),
'torso_left': Navigator('torso_left'),
'right': Navigator('right'),
'torso_right': Navigator('torso_right')}
self._listeners_connected = False
self._connect_listeners()
self._estop_state = False
self._estop_sub = rospy.Subscriber('robot/state', AssemblyState,
self._estop_callback)
self._wheel_ok = True
self.cameras = dict()
camera_list = ['left_hand', 'right_hand', 'head']
for idx, cam in enumerate(camera_list):
try:
self.cameras[cam] = CameraController('%s_camera' % cam)
except AttributeError:
try:
# This camera might not be powered
# Turn off the power to the last camera
# this will turn power on to the current camera
CameraController('%s_camera' % camera_list[idx-1]).close()
# And try again to locate the camera service
self.cameras[cam] = CameraController('%s_camera' % cam)
except AttributeError:
# This camera is unavailable (might be broken)
# Disable camera button in the UI
self.windows['cam_submenu'].set_btn_selectable('cam_%s' % cam,
False)
sel = self.windows['cam_submenu'].selected_btn()
bad_cam = self.windows['cam_submenu'].get_btn('cam_%s' % cam)
if (sel == bad_cam and
not self.windows['cam_submenu'].scroll(1)):
self.windows['cam_submenu'].selected_btn_index = 0
self.cam_sub = None
self._l_grip = {'interface': Gripper('left'), 'type': 'custom'}
self._r_grip = {'interface': Gripper('right'), 'type': 'custom'}
rospy.Timer(rospy.Duration(.5), self._check_enable)
self.error_state = False
self._enable()
self.calib_stage = 0
self.draw()
mk_process('rosrun baxter_tools tuck_arms.py -u')
def _estop_callback(self, msg):
if self._estop_state != msg.stopped:
self._estop_state = msg.stopped
if msg.stopped and self._listeners_connected:
self._disconnect_listeners()
elif not msg.stopped and not self._listeners_connected:
self._connect_listeners()
def _connect_listeners(self):
# Navigator OK Button
self._navigators['left'].button0_changed.connect(self._left_ok_pressed)
self._navigators['torso_left'].button0_changed.connect(
self._left_ok_pressed)
self._navigators['right'].button0_changed.connect(
self._right_ok_pressed)
self._navigators['torso_right'].button0_changed.connect(
self._right_ok_pressed)
# Navigator Wheel
self._navigators['left'].wheel_changed.connect(self._left_wheel_moved)
self._navigators['torso_left'].wheel_changed.connect(
self._left_wheel_moved)
self._navigators['right'].wheel_changed.connect(
self._right_wheel_moved)
self._navigators['torso_right'].wheel_changed.connect(
self._right_wheel_moved)
# Navigator Baxter Button
self._navigators['left'].button2_changed.connect(self._enable)
self._navigators['torso_left'].button2_changed.connect(self._enable)
self._navigators['right'].button2_changed.connect(self._enable)
self._navigators['torso_right'].button2_changed.connect(self._enable)
# Navigator Back Button
self._navigators['left'].button1_changed.connect(self.back)
self._navigators['torso_left'].button1_changed.connect(self.back)
self._navigators['right'].button1_changed.connect(self.back)
self._navigators['torso_right'].button1_changed.connect(self.back)
self._listeners_connected = True
def _disconnect_listeners(self):
# Navigator OK Button
self._navigators['left'].button0_changed.disconnect(
self._left_ok_pressed)
self._navigators['torso_left'].button0_changed.disconnect(
self._left_ok_pressed)
self._navigators['right'].button0_changed.disconnect(
self._right_ok_pressed)
self._navigators['torso_right'].button0_changed.disconnect(
self._right_ok_pressed)
# Navigator Wheel
self._navigators['left'].wheel_changed.disconnect(
self._left_wheel_moved)
self._navigators['torso_left'].wheel_changed.disconnect(
self._left_wheel_moved)
self._navigators['right'].wheel_changed.disconnect(
self._right_wheel_moved)
self._navigators['torso_right'].wheel_changed.disconnect(
self._right_wheel_moved)
# Navigator Baxter Button
self._navigators['left'].button2_changed.disconnect(self._enable)
self._navigators['torso_left'].button2_changed.disconnect(self._enable)
self._navigators['right'].button2_changed.disconnect(self._enable)
self._navigators['torso_right'].button2_changed.disconnect(self._enable)
# Navigator Back Button
self._navigators['left'].button1_changed.disconnect(self.back)
self._navigators['torso_left'].button1_changed.disconnect(self.back)
self._navigators['right'].button1_changed.disconnect(self.back)
self._navigators['torso_right'].button1_changed.disconnect(self.back)
self._listeners_connected = False
def _load_config(self):
f = open(self.conf_path).read()
conf_data = json.loads(f)
for window in conf_data['Windows']:
buttons = dict()
if window['back']:
name = '%s_back' % window['name']
size = window['back']['size']
offset = window['back']['offset']
icon_prefix = 'Inner_Back'
icon_offset = window['back']['icon_offset']
buttons[name] = BrrButton(name, size, offset, 0,
icon_prefix, 'TopSmall', icon_offset,
'', 0, True, self.share_path)
self._btn_context[name] = {'nextWindow': window['parent'],
'function': 'Back'}
if 'Buttons' in window.keys():
for btn in window['Buttons']:
buttons[btn['name']] = BrrButton(
btn['name'],
btn['size'],
btn['offset'],
btn['index'],
btn['icon_prefix'],
btn['button'],
btn['icon_offset'],
btn['label'],
btn['label_y'],
btn['selectable'],
self.share_path
)
self._btn_context[btn['name']] = {
'nextWindow': btn['nextWindow'],
'function': btn['function']
}
self.windows[window['name']] = BrrWindow(window,
buttons,
self.share_path)
errors = conf_data['Error']
for error in errors['errors']:
name = error['name']
buttons = dict()
buttons['OK'] = BrrButton(
'%s_OK' % name, # name
[200, 60], # size
errors['OK']['offset'], # button offset
0, # index
None, # icon prefix
"Wide", # button type
[0, 0], # icon offset
"OK", # label
16, # label y-offset
True, # selectable?
self.share_path
)
self._btn_context["%s_OK" % name] = {
'nextWindow': None,
'function': 'Back'
}
window = {
'name': '%s_error' % name,
'bg': errors['bg'],
'back': False,
'offset': errors['offset'],
'parent': False,
'default': '%s_OK' % name,
'no_scroll': False,
'text': [{'text': error['text'],
'text_y': error['text_y']}],
}
self.windows['%s_error' % name] = BrrWindow(window, buttons,
self.share_path)
for win in conf_data['Confirmation']['Windows']:
conf = conf_data['Confirmation']
name = win['name']
labels = list()
for text in win['messages']:
labels.append({'text': text['text'],
'text_y': text['text_y']})
buttons = dict()
buttons['OK'] = BrrButton(
'%s_OK' % name, # name
[200, 60], # size
conf['OK']['offset'], # button offset
1, # index
None, # icon prefix
"Wide", # button type
[0, 0], # icon offset
win['conf_text'], # label
16, # label y-offset
True, # selectable?
self.share_path)
self._btn_context['%s_OK' % name] = {
'nextWindow': win['nextWindow'],
'function': win['function']}
buttons['Cancel'] = BrrButton(
'%s_Back' % name,
[200, 60],
conf['Cancel']['offset'],
0,
None,
"Wide",
[0, 0],
"Cancel",
16,
True,
self.share_path
)
self._btn_context['%s_Back' % name] = {
'nextWindow': win['parent'],
'function': 'Back'}
window = {
'name': '%s_conf' % win['name'],
'bg': conf['bg'],
'back': False,
'offset': conf['offset'],
'parent': win['parent'],
'default': '%s_OK' % name,
'no_scroll': False,
'text': labels
}
self.windows['%s_conf' % name] = BrrWindow(window, buttons,
self.share_path)
def selected(self):
return self.active_window.selected_btn()
'''~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Main Draw function.
# Converts the appropriate frame to a ros message and sends
# it to the screen.
# Also sets the current_frame parameter, in expectation of
# future hooks to merge images into the current view
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'''
def draw(self):
img = Image.new('RGB', (1024, 600), 'white')
img = gen_cv(self._draw_window(img, self.active_window.name))
self.img = img
msg = cv_to_msg(img)
self.xdisp.publish(msg)
rospy.sleep(.1)
'''~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Simple method that sets the active window based on the window's name
# and re-draws the UI.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'''
def set_active_window(self, name):
self.active_window = self.windows[name]
self.draw()
def _draw_window(self, img, window, | |
FILE -> uncomment lines with this file in it to get series information. This should be made available
# through a RomTrainer SolutionExport or something similar, or perhaps just an Output DataObject, in the future.
writeTrainDebug = False
if writeTrainDebug:
debugfile = open('debugg_varma.csv','w')
# obtain pivot parameter
self.raiseADebug('... gathering pivot values ...')
self.pivotParameterValues = targetVals[:,:,self.target.index(self.pivotParameterID)]
# NOTE: someday, this ARMA could be expanded to take Fourier signals in time on the TypicalHistory,
# and then use several realizations of the target to train an ND ARMA that captures not only
# the mean and variance in time, but the mean, variance, skewness, and kurtosis over time and realizations.
# In this way, outliers in the training data could be captured with significantly more representation.
if len(self.pivotParameterValues) > 1:
self.raiseAnError(Exception,self.printTag +" does not handle multiple histories data yet! # histories: "+str(len(self.pivotParameterValues)))
self.pivotParameterValues.shape = (self.pivotParameterValues.size,)
targetVals = np.delete(targetVals,self.target.index(self.pivotParameterID),2)[0]
# targetVals now has shape (1, # time samples, # targets)
self.target.pop(self.target.index(self.pivotParameterID))
# prep the correlation data structure
correlationData = np.zeros([len(self.pivotParameterValues),len(self.correlations)])
for t,target in enumerate(self.target):
timeSeriesData = targetVals[:,t]
if writeTrainDebug:
debugfile.writelines('{}_original,'.format(target)+','.join(str(d) for d in timeSeriesData)+'\n')
# if this target governs the zero filter, extract it now
if target == self.zeroFilterTarget:
self.notZeroFilterMask = self._trainZeroRemoval(timeSeriesData,tol=self.zeroFilterTol) # where zeros are not
self.zeroFilterMask = np.logical_not(self.notZeroFilterMask) # where zeroes are
# if we're removing Fourier signal, do that now.
if target in self.fourierParams:
self.raiseADebug('... analyzing Fourier signal for target "{}" ...'.format(target))
self.fourierResults[target] = self._trainFourier(self.pivotParameterValues,
self.fourierParams[target]['periods'],
self.fourierParams[target]['orders'],
timeSeriesData,
zeroFilter = target == self.zeroFilterTarget)
if writeTrainDebug:
debugfile.writelines('{}_fourier,'.format(target)+','.join(str(d) for d in self.fourierResults[target]['predict'])+'\n')
timeSeriesData -= self.fourierResults[target]['predict']
if writeTrainDebug:
debugfile.writelines('{}_nofourier,'.format(target)+','.join(str(d) for d in timeSeriesData)+'\n')
# zero filter application
## find the mask for the requested target where values are nonzero
if target == self.zeroFilterTarget:
# artifically force signal to 0 post-fourier subtraction where it should be zero
targetVals[:,t][self.notZeroFilterMask] = 0.0
if writeTrainDebug:
debugfile.writelines('{}_zerofilter,'.format(target)+','.join(str(d) for d in timeSeriesData)+'\n')
# Transform data to obatain normal distrbuted series. See
# J.M.Morales, R.Minguez, A.J.Conejo "A methodology to generate statistically dependent wind speed scenarios,"
# Applied Energy, 87(2010) 843-855
for t,target in enumerate(self.target):
# if target correlated with the zero-filter target, truncate the training material now?
timeSeriesData = targetVals[:,t]
self.raiseADebug('... analyzing ARMA properties for target "{}" ...'.format(target))
self.cdfParams[target] = self._trainCDF(timeSeriesData)
# normalize data
normed = self._normalizeThroughCDF(timeSeriesData, self.cdfParams[target])
if writeTrainDebug:
debugfile.writelines('{}_normed,'.format(target)+','.join(str(d) for d in normed)+'\n')
# check if this target is part of a correlation set, or standing alone
if target in self.correlations:
# store the data and train it separately in a moment
## keep data in order of self.correlations
correlationData[:,self.correlations.index(target)] = normed
else:
# go ahead and train it now
## if using zero filtering and target is the zero-filtered, only train on the masked part
if target == self.zeroFilterTarget:
# don't bother training the part that's all zeros; it'll still be all zeros
# just train the data portions
normed = normed[self.zeroFilterMask]
self.raiseADebug('... ... training ...')
self.armaResult[target] = self._trainARMA(normed)
self.raiseADebug('... ... finished training target "{}"'.format(target))
# now handle the training of the correlated armas
if len(self.correlations):
self.raiseADebug('... ... training correlated: {} ...'.format(self.correlations))
# if zero filtering, then all the correlation data gets split
if self.zeroFilterTarget in self.correlations:
# split data into the zero-filtered and non-zero filtered
unzeroed = correlationData[self.zeroFilterMask]
zeroed = correlationData[self.notZeroFilterMask]
## throw out the part that's all zeros (axis 1, row corresponding to filter target)
zeroed = np.delete(zeroed, self.correlations.index(self.zeroFilterTarget), 1)
self.raiseADebug('... ... ... training unzeroed ...')
unzVarma, unzNoise, unzInit = self._trainVARMA(unzeroed)
self.raiseADebug('... ... ... training zeroed ...')
## the VAR fails if only 1 variable is non-constant, so we need to decide whether "zeroed" is actually an ARMA
## -> instead of a VARMA
if zeroed.shape[1] == 1:
# then actually train an ARMA instead
zVarma = self._trainARMA(zeroed)
zNoise = None # NOTE this is used to check whether an ARMA was trained later!
zInit = None
else:
zVarma, zNoise, zInit = self._trainVARMA(zeroed)
self.varmaResult = (unzVarma, zVarma) # NOTE how for zero-filtering we split the results up
self.varmaNoise = (unzNoise, zNoise)
self.varmaInit = (unzInit, zInit)
else:
varma, noiseDist, initDist = self._trainVARMA(correlationData)
# FUTURE if extending to multiple VARMA per training, these will need to be dictionaries
self.varmaResult = (varma,)
self.varmaNoise = (noiseDist,)
self.varmaInit = (initDist,)
if writeTrainDebug:
debugfile.close()
def __evaluateLocal__(self,featureVals):
"""
@ In, featureVals, float, a scalar feature value is passed as scaling factor
@ Out, returnEvaluation , dict, dictionary of values for each target (and pivot parameter)
"""
if featureVals.size > 1:
self.raiseAnError(ValueError, 'The input feature for ARMA for evaluation cannot have size greater than 1. ')
# Instantiate a normal distribution for time series synthesis (noise part)
# TODO USE THIS, but first retrofix rvs on norm to take "size=") for number of results
# make sure pivot value is in return object
returnEvaluation = {self.pivotParameterID:self.pivotParameterValues}
# TODO when we have output printing for ROMs, the distinct signals here could be outputs!
# leaving "debuggFile" as examples of this, in comments
#debuggFile = open('signal_bases.csv','w')
#debuggFile.writelines('Time,'+','.join(str(x) for x in self.pivotParameterValues)+'\n')
correlatedSample = None
for tIdx,target in enumerate(self.target):
# start with the random gaussian signal
if target in self.correlations:
# where is target in correlated data
corrIndex = self.correlations.index(target)
# check if we have zero-filtering in play here
if len(self.varmaResult) > 1:
# where would the filter be in the index lineup had we included it in the zeroed varma?
filterTargetIndex = self.correlations.index(self.zeroFilterTarget)
# if so, we need to sample both VARMAs
# have we already taken the correlated sample yet?
if correlatedSample is None:
# if not, take the samples now
unzeroedSample = self._generateVARMASignal(self.varmaResult[0],
numSamples = self.zeroFilterMask.sum(),
randEngine = self.normEngine.rvs,
rvsIndex = 0)
## zero sampling is dependent on whether the trained model is a VARMA or ARMA
if self.varmaNoise[1] is not None:
zeroedSample = self._generateVARMASignal(self.varmaResult[1],
numSamples = self.notZeroFilterMask.sum(),
randEngine = self.normEngine.rvs,
rvsIndex = 1)
else:
result = self.varmaResult[1]
sample = self._generateARMASignal(result,
numSamples = self.notZeroFilterMask.sum(),
randEngine = self.normEngine.rvs)
zeroedSample = np.zeros((self.notZeroFilterMask.sum(),1))
zeroedSample[:,0] = sample
correlatedSample = True # placeholder, signifies we've sampled the correlated distribution
# reconstruct base signal from samples
## initialize
signal = np.zeros(len(self.pivotParameterValues))
## first the data from the non-zero portions of the original signal
signal[self.zeroFilterMask] = unzeroedSample[:,corrIndex]
## then the data from the zero portions (if the filter target, don't bother because they're zero anyway)
if target != self.zeroFilterTarget:
# fix offset since we didn't include zero-filter target in zeroed correlated arma
indexOffset = 0 if corrIndex < filterTargetIndex else -1
signal[self.notZeroFilterMask] = zeroedSample[:,corrIndex+indexOffset]
# if no zero-filtering (but still correlated):
else:
## check if sample taken yet
if correlatedSample is None:
## if not, do so now
correlatedSample = self._generateVARMASignal(self.varmaResult[0],
numSamples = len(self.pivotParameterValues),
randEngine = self.normEngine.rvs,
rvsIndex = 0)
# take base signal from sample
signal = correlatedSample[:,self.correlations.index(target)]
# if NOT correlated
else:
result = self.armaResult[target] # ARMAResults object
# generate baseline ARMA + noise
# are we zero-filtering?
if target == self.zeroFilterTarget:
sample = self._generateARMASignal(result,
numSamples = self.zeroFilterMask.sum(),
randEngine = self.normEngine.rvs)
## if so, then expand result into signal space (functionally, put back in all the zeros)
signal = np.zeros(len(self.pivotParameterValues))
signal[self.zeroFilterMask] = sample
else:
## if not, no extra work to be done here!
sample = self._generateARMASignal(result,
numSamples = len(self.pivotParameterValues),
randEngine = self.normEngine.rvs)
signal = sample
# END creating base signal
# DEBUGG adding arbitrary variables for debugging, TODO find a more elegant way, leaving these here as markers
#returnEvaluation[target+'_0base'] = copy.copy(signal)
# denoise
signal = self._denormalizeThroughCDF(signal,self.cdfParams[target])
# DEBUGG adding arbitrary variables
#returnEvaluation[target+'_1denorm'] = copy.copy(signal)
#debuggFile.writelines('signal_arma,'+','.join(str(x) for x in signal)+'\n')
# Add fourier trends
if target in self.fourierParams:
signal += self.fourierResults[target]['predict']
# DEBUGG adding arbitrary variables
#returnEvaluation[target+'_2fourier'] = copy.copy(signal)
#debuggFile.writelines('signal_fourier,'+','.join(str(x) for x in self.fourierResults[target]['predict'])+'\n')
# Re-zero out zero filter target's zero regions
if target == self.zeroFilterTarget:
# DEBUGG adding arbitrary variables
#returnEvaluation[target+'_3zerofilter'] = copy.copy(signal)
signal[self.notZeroFilterMask] = 0.0
# Domain limitations
for domain,requests in self.outTruncation.items():
if target in requests:
if domain == 'positive':
signal = np.absolute(signal)
elif domain == | |
#!/usr/bin/env python3
import os
from docx import Document
from docx.shared import Inches
from docx.shared import Cm
from docx.shared import RGBColor
import datetime
import xlsxwriter
import requests
from requests.auth import HTTPDigestAuth
import json
import getpass
requests.packages.urllib3.disable_warnings()
def set_column_width(column, width):
column.width = width
for cell in column.cells:
cell.width = width
def create_asm_excel_file (filename, overview, allowed_responses, file_types, urls, parameters, signatures_overview, signature_sets, methods, headers, cookies, domains, ipi, ipi_categories, blocking_settings, compliance, evasions, whitelist, policy_builder):
# Create a workbook and add a worksheet.
excel_name = filename + ".xlsx"
workbook = xlsxwriter.Workbook(excel_name)
xls_overview = workbook.add_worksheet('Overview')
xls_settings = workbook.add_worksheet('Blocking Settings')
xls_file_types = workbook.add_worksheet('File Types')
xls_urls = workbook.add_worksheet('URLs')
xls_parameters = workbook.add_worksheet('Parameters')
xls_signatures = workbook.add_worksheet('Signatures')
xls_headers = workbook.add_worksheet('Headers')
xls_methods = workbook.add_worksheet('Methods')
xls_readiness = workbook.add_worksheet('Readiness')
cell_format = workbook.add_format()
cell_format.set_text_wrap()
cell_format_center = workbook.add_format({'align': 'center'})
cell_format_center.set_text_wrap()
xls_overview.set_column(0,10, 25)
xls_settings.set_column('A:A', 47)
xls_settings.set_column('B:B', 10)
xls_settings.set_column('C:C', 10)
xls_settings.set_column('D:D', 10)
xls_settings.set_column('G:G', 38)
xls_settings.set_column('H:H', 10)
xls_settings.set_column('I:I', 10)
xls_file_types.set_column('A:A', 10)
xls_file_types.set_column('B:B', 8)
xls_file_types.set_column('C:C', 10)
xls_file_types.set_column('D:D', 12)
xls_file_types.set_column('E:E', 10)
xls_file_types.set_column('F:F', 14)
xls_file_types.set_column('G:G', 20)
xls_urls.set_column('A:A', 30)
xls_urls.set_column('B:B', 10)
xls_urls.set_column('C:C', 8)
xls_urls.set_column('D:D', 15)
xls_urls.set_column('E:E', 15)
xls_urls.set_column('F:F', 30)
xls_urls.set_column('G:G', 17)
xls_urls.set_column('H:H', 19)
xls_parameters.set_column('A:A', 20)
xls_parameters.set_column('B:B', 30)
xls_parameters.set_column('C:C', 8)
xls_parameters.set_column('D:D', 15)
xls_parameters.set_column('E:E', 15)
xls_parameters.set_column('F:F', 30)
xls_parameters.set_column('G:G', 17)
xls_parameters.set_column('I:I', 13)
xls_parameters.set_column('H:H', 19)
xls_signatures.set_column('A:A', 30)
xls_signatures.set_column('B:B', 14)
xls_signatures.set_column('C:C', 15)
xls_signatures.set_column('D:D', 16)
xls_headers.set_column('A:A', 20)
xls_headers.set_column('B:B', 14)
xls_headers.set_column('C:C', 30)
xls_headers.set_column('D:D', 17)
xls_headers.set_column('E:E', 17)
xls_headers.set_column('F:F', 17)
xls_headers.set_column('G:G', 17)
xls_headers.set_column('H:I', 18)
xls_methods.set_column('A:A', 15)
xls_methods.set_column('B:B', 15)
xls_methods.set_column('C:C', 18)
xls_methods.set_column('D:D', 17)
####################### Print Overview ###############
row = 2
col = 0
xls_overview.write(row, col, 'Policy Settings')
xls_overview.write(row+1, col, '*********************************')
row += 2
xls_overview.write(row, col, 'Policy Name')
xls_overview.write(row + 1, col, 'Partition')
xls_overview.write(row + 2, col, 'Enforcement mode')
xls_overview.write(row + 3, col, 'Applied to vServers')
xls_overview.write(row + 4, col, 'Application Language')
xls_overview.write(row + 5, col, 'Brute force Protection')
xls_overview.write(row + 6, col, 'DataGuard')
xls_overview.write(row + 7, col, 'Antivirus')
xls_overview.write(row + 8, col, 'Created By')
xls_overview.write(row + 9, col, 'Created Date')
xls_overview.write(row + 10, col, 'Last Updated')
xls_overview.write(row + 11, col, 'Policy is case sensitive')
xls_overview.write(row + 12, col, 'Mask Credit Card Numbers in Request Log', cell_format)
xls_overview.write(row + 13, col, 'Trust XFF')
xls_overview.write(row + 14, col, 'Custom XFF')
xls_overview.write(row + 15, col, 'Trigger ASM iRule Events', cell_format)
col +=1
xls_overview.write(row, col, overview['name'])
xls_overview.write(row + 1, col, overview['partition'] )
xls_overview.write(row + 2, col, overview['enforcementMode'])
xls_overview.write(row + 3, col, '\n'.join(overview['virtualServers']), cell_format)
xls_overview.write(row + 4, col, overview['applicationLanguage'])
xls_overview.write(row + 5, col, overview['brute_enabled'] + " (on " + str(overview['Login_pages_totalItems']) + " login pages)")
xls_overview.write(row + 6, col, overview['data_guard_enabled'])
xls_overview.write(row + 7, col, overview['inspectHttpUploads'])
xls_overview.write(row + 8, col, overview['creatorName'])
xls_overview.write(row + 9, col, overview['createdDatetime'])
xls_overview.write(row + 10, col, overview['lastUpdateMicros'])
xls_overview.write(row + 11, col, overview['caseInsensitive'])
xls_overview.write(row + 12, col, overview['maskCreditCardNumbersInRequest'])
xls_overview.write(row + 13, col, overview['trustXff'])
xls_overview.write(row + 14, col, '\n'.join(overview['customXffHeaders']), cell_format)
xls_overview.write(row + 15, col, overview['triggerAsmIruleEvent'])
row = 0
col = 3
xls_overview.write(row, col, 'Learning Settings')
xls_overview.write(row + 1, col, '*********************************')
row += 2
xls_overview.write(row, col, 'Learning Mode')
xls_overview.write(row + 1, col, 'Trust All IPs')
xls_overview.write(row + 2, col, 'Trusted sources for learning')
xls_overview.write(row + 3, col, 'Trusted hours for learning')
xls_overview.write(row + 4, col, 'Untrusted sources for learning')
xls_overview.write(row + 5, col, 'Untrusted hours for learning')
xls_overview.write(row + 6, col, 'Learn File Types')
xls_overview.write(row + 7, col, 'Max File Types')
xls_overview.write(row + 8, col, 'Learn URLs')
xls_overview.write(row + 9, col, 'Max URLs')
xls_overview.write(row + 10, col, 'Learn Parameters')
xls_overview.write(row + 11, col, 'Max Parameters')
xls_overview.write(row + 12, col, 'Parameter Learning Level')
xls_overview.write(row + 13, col, 'Learn Integer values')
xls_overview.write(row + 14, col, 'Clasify Paramters')
xls_overview.write(row + 15, col, 'Learn Cookies')
xls_overview.write(row + 16, col, 'Max Cookies')
xls_overview.write(row + 17, col, 'Learn Redirection Domains')
xls_overview.write(row + 18, col, 'Full Inspection')
xls_overview.write(row + 19, col, 'Learn Inactive Entities')
col += 1
xls_overview.write(row, col, policy_builder['learningMode'])
xls_overview.write(row + 1, col, policy_builder['trustAllIps'])
xls_overview.write(row + 2, col, policy_builder['trusted_loosen_source'])
xls_overview.write(row + 3, col, policy_builder['trusted_loosen_hours'])
xls_overview.write(row + 4, col, policy_builder['untrusted_loosen_source'])
xls_overview.write(row + 5, col, policy_builder['untrusted_loosen_hours'])
xls_overview.write(row + 6, col, policy_builder['learnExplicitFiletypes'])
xls_overview.write(row + 7, col, policy_builder['maximumFileTypes'])
xls_overview.write(row + 8, col, policy_builder['learnExplicitUrls'])
xls_overview.write(row + 9, col, policy_builder['maximumUrls'])
xls_overview.write(row + 10, col, policy_builder['learnExplicitParameters'])
xls_overview.write(row + 11, col, policy_builder['maximumParameters'])
xls_overview.write(row + 12, col, policy_builder['parameterLearningLevel'])
xls_overview.write(row + 13, col, policy_builder['parametersIntegerValue'])
xls_overview.write(row + 14, col, policy_builder['classifyParameters'])
xls_overview.write(row + 15, col, policy_builder['learnExplicitCookies'])
xls_overview.write(row + 16, col, policy_builder['maximumCookies'])
xls_overview.write(row + 17, col, policy_builder['learnExplicitRedirectionDomains'])
xls_overview.write(row + 18, col, policy_builder['enableFullPolicyInspection'])
xls_overview.write(row + 19, col, policy_builder['learnInactiveEntities'])
#################### Print File Types ###############
row = 0
col = 0
row += 1
xls_file_types.write(row, col, 'File Type')
xls_file_types.write(row, col + 1, 'Staging',cell_format_center)
xls_file_types.write(row, col + 2, 'URL Length',cell_format_center)
xls_file_types.write(row, col + 3, 'Query Length',cell_format_center)
xls_file_types.write(row, col + 4, 'POST Length',cell_format_center)
xls_file_types.write(row, col + 5, 'Request Length',cell_format_center)
xls_file_types.write(row, col + 6, 'Last Modified')
row += 1
for key in file_types:
xls_file_types.write(row, col, key['name'])
xls_file_types.write(row, col + 1, key['performStaging'],cell_format_center)
xls_file_types.write(row, col + 2, key['urlLength'],cell_format_center)
xls_file_types.write(row, col + 3, key['queryStringLength'],cell_format_center)
xls_file_types.write(row, col + 4, key['postDataLength'],cell_format_center)
xls_file_types.write(row, col + 5, key['requestLength'],cell_format_center)
xls_file_types.write(row, col + 6, key['lastUpdateMicros'])
row += 1
#################### Print Parameters ###############
row = 0
col = 0
row += 1
xls_parameters.write(row, col, 'Parameter Name')
xls_parameters.write(row, col + 1, 'Enforcement')
xls_parameters.write(row, col + 2, 'Staging',cell_format_center)
xls_parameters.write(row, col + 3, 'Check Signatures',cell_format_center)
xls_parameters.write(row, col + 4, 'Check Meta-Char',cell_format_center)
xls_parameters.write(row, col + 5, 'Signature Overides',cell_format_center)
xls_parameters.write(row, col + 6, 'Meta-Char Overides',cell_format_center)
xls_parameters.write(row, col + 7, 'Is Sensitive', cell_format_center)
xls_parameters.write(row, col + 8, 'Last Modified')
row += 1
for key in parameters:
xls_parameters.write(row, col, key['name'])
xls_parameters.write(row, col + 1, key['enforcement'])
xls_parameters.write(row, col + 2, key['performStaging'],cell_format_center)
xls_parameters.write(row, col + 3, key['attackSignaturesCheck'],cell_format_center)
xls_parameters.write(row, col + 4, key['metacharsOnParameterValueCheck'],cell_format_center)
xls_parameters.write(row, col + 5, '\n'.join(key['signatureOverrides']),cell_format)
xls_parameters.write(row, col + 6, key['valueMetacharOverrides'],cell_format_center)
xls_parameters.write(row, col + 7, key['sensitiveParameter'], cell_format_center)
xls_parameters.write(row, col + 8, key['lastUpdateMicros'])
row += 1
#################### Print URLs ###############
row = 0
col = 0
row += 1
xls_urls.write(row, col, 'URL')
xls_urls.write(row, col + 1, 'Protocol')
xls_urls.write(row, col + 2, 'Staging',cell_format_center)
xls_urls.write(row, col + 3, 'Check Signatures',cell_format_center)
xls_urls.write(row, col + 4, 'Check Meta-Char',cell_format_center)
xls_urls.write(row, col + 5, 'Signature Overides',cell_format_center)
xls_urls.write(row, col + 6, 'Meta-Char Overides',cell_format_center)
xls_urls.write(row, col + 7, 'Last Modified')
row += 1
for key in urls:
xls_urls.write(row, col, key['name'])
xls_urls.write(row, col + 1, key['protocol'])
xls_urls.write(row, col + 2, key['performStaging'],cell_format_center)
xls_urls.write(row, col + 3, key['attackSignaturesCheck'],cell_format_center)
xls_urls.write(row, col + 4, key['metacharsOnUrlCheck'],cell_format_center)
xls_urls.write(row, col + 5, '\n'.join(key['signatureOverrides']),cell_format)
xls_urls.write(row, col + 6, key['metacharOverrides'],cell_format_center)
xls_urls.write(row, col + 7, key['lastUpdateMicros'])
row += 1
#################### Print Signatures ###############
row = 0
col = 0
xls_signatures.write(row, col, 'Signature Staging')
xls_signatures.write(row + 1, col, 'Place New Signature in Staging')
xls_signatures.write(row + 2, col, 'Latest Signature update')
col = 1
xls_signatures.write(row, col, signatures_overview['signatureStaging'])
xls_signatures.write(row + 1, col, signatures_overview['placeSignaturesInStaging'])
xls_signatures.write(row + 2, col, signatures_overview['latest_sig_update'])
col = 1
row = 4
xls_signatures.write(row, col, 'Total Signatures', cell_format_center)
xls_signatures.write(row, col + 1, 'Staging Signatures', cell_format_center)
xls_signatures.write(row, col + 2, 'Disabled Signatures', cell_format_center)
row += 1
xls_signatures.write(row, col, signatures_overview['total'], cell_format_center)
xls_signatures.write(row, col + 1, signatures_overview['staging'], cell_format_center)
xls_signatures.write(row, col + 2, signatures_overview['enabled'], cell_format_center)
col = 0
row += 3
xls_signatures.write(row, col, 'Signature Set Name')
xls_signatures.write(row, col + 1, 'Learn', cell_format_center)
xls_signatures.write(row, col + 2, 'Alarm', cell_format_center)
xls_signatures.write(row, col + 3, 'Block', cell_format_center)
row += 1
for key in signature_sets:
xls_signatures.write(row, col, key['name'], cell_format)
xls_signatures.write(row, col + 1, key['learn'], cell_format_center)
xls_signatures.write(row, col + 2, key['alarm'], cell_format_center)
xls_signatures.write(row, col + 3, key['block'], cell_format_center)
row += 1
#################### Print Headers ###############
row = 0
col = 0
row += 1
xls_headers.write(row, col, 'Header Name')
xls_headers.write(row, col + 1, 'Check Signatures')
xls_headers.write(row, col + 2, 'Signature Overides')
xls_headers.write(row, col + 3, 'Evasion Techniques', cell_format_center)
xls_headers.write(row, col + 4, 'URL Normalization', cell_format_center)
xls_headers.write(row, col + 5, 'Percent Decoding', cell_format_center)
xls_headers.write(row, col + 6, 'HTML Normalization', cell_format_center)
xls_headers.write(row, col + 7, 'Last Modified')
row += 1
for key in headers:
xls_headers.write(row, col, key['name'], cell_format)
xls_headers.write(row, col + 1, key['checkSignatures'], cell_format_center)
xls_headers.write(row, col + 2, '\n'.join(key['signatureOverrides']), cell_format)
xls_headers.write(row, col + 3, key['normalizationViolations'], cell_format_center)
xls_headers.write(row, col + 4, key['urlNormalization'], cell_format_center)
xls_headers.write(row, col + 5, key['percentDecoding'], cell_format_center)
xls_headers.write(row, col + 6, key['htmlNormalization'], cell_format_center)
xls_headers.write(row, col + 7, key['lastUpdateMicros'])
row += 1
row += 5
col = 0
row += 1
xls_headers.write(row, col, 'Cookie Name')
xls_headers.write(row, col + 1, 'Check Signatures', cell_format_center)
xls_headers.write(row, col + 2, 'Signature Overides')
xls_headers.write(row, col + 3, 'Enforcement Type', cell_format_center)
xls_headers.write(row, col + 4, 'Secure', cell_format_center)
xls_headers.write(row, col + 5, 'HTTPOnly', cell_format_center)
xls_headers.write(row, col + 6, 'HTTP Same Side', cell_format_center)
xls_headers.write(row, col + 7, 'Staging', cell_format_center)
xls_headers.write(row, col + 8, 'Last Modified')
row += 1
for key in cookies:
xls_headers.write(row, col, key['name'], cell_format)
xls_headers.write(row, col + 1, key['attackSignaturesCheck'], cell_format_center)
xls_headers.write(row, col + 2, '\n'.join(key['signatureOverrides']), cell_format)
xls_headers.write(row, col + 3, key['enforcementType'], cell_format_center)
xls_headers.write(row, col + 4, key['securedOverHttpsConnection'], cell_format_center)
xls_headers.write(row, col + 5, key['accessibleOnlyThroughTheHttpProtocol'], cell_format_center)
xls_headers.write(row, col + 6, key['insertSameSiteAttribute'], cell_format_center)
xls_headers.write(row, col + 7, key['performStaging'], cell_format_center)
xls_headers.write(row, col + 8, key['lastUpdateMicros'])
row += 1
#################### Print Redirection ###############
row = 0
xls_methods.write(row, col, 'Redirection Domains')
row += 2
xls_methods.write(row, col, 'Domain Name')
xls_methods.write(row, col + 1, 'Include SubDomains')
xls_methods.write(row, col + 2, 'Last Modified')
row += 1
for key in domains:
xls_methods.write(row, col, key['domainName'])
xls_methods.write(row, col + 1, key['includeSubdomains'])
xls_methods.write(row, col + 2, key['lastUpdateMicros'])
row += 1
#------------------ Print Allowed Responses --------------##
row += 3
col = 0
xls_methods.write(row, col, 'Allowed HTTP Response Codes')
row += 1
xls_methods.write(row, col, 'Response Code')
row +=1
for key in allowed_responses:
xls_methods.write(row, col, key, cell_format_center)
row +=1
#------------------ Print Methods --------------##
row += 3
col = 0
xls_methods.write(row, col, 'Allowed HTTP Methods')
row += 1
xls_methods.write(row, col, 'Method Name')
xls_methods.write(row, col + 1, 'Act as Method')
xls_methods.write(row, col + 2, 'Last Modified')
row += 1
for key in methods:
xls_methods.write(row, col, key['name'])
xls_methods.write(row, col + 1, key['actAsMethod'])
xls_methods.write(row, col + 2, key['lastUpdateMicros'])
row += 1
#################### Print Blocking Settings ###############
row = 0
col = 0
xls_settings.write(row, col, 'Blocking Settings')
row += 2
xls_settings.write(row, col, 'Violation')
xls_settings.write(row, col + 1, 'Learn', cell_format_center)
xls_settings.write(row, col + 2, 'Alarm', cell_format_center)
xls_settings.write(row, col + 3, 'Block', cell_format_center)
row += 1
for key in blocking_settings:
xls_settings.write(row, col, key['name'])
xls_settings.write(row, col + 1, key['learn'], cell_format_center)
xls_settings.write(row, col + 2, key['alarm'], cell_format_center)
xls_settings.write(row, col + 3, key['block'], cell_format_center)
row += 1
##-------------- Print Compliance ----------------#
row = 0
col = 6
xls_settings.write(row, col, 'Compliance Settings')
row += 2
xls_settings.write(row, col, 'HTTP Compliance Violation')
xls_settings.write(row, col + 1, 'Enabled', cell_format_center)
xls_settings.write(row, col + 2, 'Learn', cell_format_center)
row += 1
for key in compliance:
xls_settings.write(row, col, key['name'])
xls_settings.write(row, | |
global gdb_lastresult
global gdb_lastline
global gdb_stack_frame
global gdb_run_status
global gdb_stack_index
command_result_regex = re.compile("^\d+\^")
run_status_regex = re.compile("(^\d*\*)([^,]+)")
while True:
try:
line = pipe.readline()
if len(line) == 0:
log_debug("gdb_%s: broken pipe\n" % ("stdout" if pipe == gdb_process.stdout else "stderr"))
break
line = line.strip().decode(sys.getdefaultencoding())
log_debug("gdb_%s: %s\n" % ("stdout" if pipe == gdb_process.stdout else "stderr", line))
gdb_session_view.add_line("%s\n" % line, False)
if pipe != gdb_process.stdout:
continue
run_status = run_status_regex.match(line)
if run_status is not None:
gdb_run_status = run_status.group(2)
reason = re.search("(?<=reason=\")[a-zA-Z0-9\-]+(?=\")", line)
if reason is not None and reason.group(0).startswith("exited"):
log_debug("gdb: exiting %s" % line)
run_cmd("-gdb-exit")
elif not "running" in gdb_run_status and not gdb_shutting_down:
thread_id = re.search('thread-id="(\d+)"', line)
if thread_id is not None:
gdb_threads_view.select_thread(int(thread_id.group(1)))
sublime.set_timeout(update_cursor, 0)
if not line.startswith("(gdb)"):
gdb_lastline = line
if command_result_regex.match(line) is not None:
gdb_lastresult = line
if line.startswith("~"):
gdb_console_view.add_line(
line[2:-1].replace("\\n", "\n").replace("\\\"", "\"").replace("\\t", "\t"), False)
except:
traceback.print_exc()
if pipe == gdb_process.stdout:
log_debug("GDB session ended\n")
gdb_session_view.add_line("GDB session ended\n")
sublime.set_timeout(session_ended_status_message, 0)
gdb_stack_frame = None
global gdb_cursor_position
gdb_stack_index = -1
gdb_cursor_position = 0
gdb_run_status = None
sublime.set_timeout(update_view_markers, 0)
for view in gdb_views:
sublime.set_timeout(view.on_session_ended, 0)
sublime.set_timeout(cleanup, 0)
def cleanup():
global __debug_file_handle
for view in gdb_views:
view.close()
gdb_bkp_window.set_layout(gdb_bkp_layout)
gdb_bkp_window.focus_view(gdb_bkp_view)
if __debug_file_handle is not None:
if __debug_file_handle != sys.stdout:
__debug_file_handle.close()
__debug_file_handle = None
def programio(pty, tty):
global gdb_process
exception_count = 0
class MyFD(object):
def __init__(self, pty, tty):
self.pty = pty
self.tty = tty
self.off = 0
self.queue = Queue.Queue()
def on_done(self, s):
log_debug("programinput: %s\n" % s)
log_debug("Wrote: %d bytes\n" % os.write(self.pty, bencode("%s\n" % s)))
os.fsync(self.pty)
self.queue.put(None)
def get_input(self):
sublime.active_window().show_input_panel("stdin input expected: ", "input", self.on_done, None, lambda: self.queue.put(None))
def readline(self):
ret = ""
while True:
if not os.isatty(self.pty):
s = os.fstat(self.pty)
if self.off >= s.st_size and len(ret) == 0:
return ret
else:
import select
r, w, x = select.select([self.pty], [self.pty], [], 5.0)
if len(r) == 0 and len(w) != 0:
log_debug("Ready for input\n")
sublime.set_timeout(self.get_input, 0)
self.queue.get()
continue
elif len(r) == 0:
log_debug("timed out\n")
break
read = os.read(self.pty, 1)
self.off += len(read)
ret += bdecode(read)
if len(read) == 0 or ret.endswith("\n"):
break
return ret
def close(self):
os.close(self.pty)
if self.tty:
os.close(self.tty)
pipe = MyFD(pty, tty)
while exception_count < 100:
try:
line = pipe.readline()
if len(line) > 0:
log_debug("programoutput: %s" % line)
gdb_console_view.add_line(line, False)
else:
if gdb_process.poll() is not None:
break
time.sleep(0.1)
except:
traceback.print_exc()
exception_count = exception_count + 1
if pipe is not None:
pipe.close()
gdb_input_view = None
gdb_command_history = []
gdb_command_history_pos = 0
def set_input(edit, text):
gdb_input_view.erase(edit, sublime.Region(0, gdb_input_view.size()))
gdb_input_view.insert(edit, 0, text)
class GdbPrevCmd(sublime_plugin.TextCommand):
def run(self, edit):
global gdb_command_history_pos
if gdb_command_history_pos > 0:
gdb_command_history_pos -= 1
if gdb_command_history_pos < len(gdb_command_history):
set_input(edit, gdb_command_history[gdb_command_history_pos])
class GdbNextCmd(sublime_plugin.TextCommand):
def run(self, edit):
global gdb_command_history_pos
if gdb_command_history_pos < len(gdb_command_history):
gdb_command_history_pos += 1
if gdb_command_history_pos < len(gdb_command_history):
set_input(edit, gdb_command_history[gdb_command_history_pos])
else:
set_input(edit, "")
def show_input():
global gdb_input_view
global gdb_command_history_pos
gdb_command_history_pos = len(gdb_command_history)
gdb_input_view = sublime.active_window().show_input_panel("GDB", "", input_on_done, input_on_change, input_on_cancel)
def input_on_done(s):
if s.strip() != "quit":
show_input()
gdb_command_history.append(s)
run_cmd(s)
def input_on_cancel():
pass
def input_on_change(s):
pass
def is_running():
return gdb_process is not None and gdb_process.poll() is None
class GdbInput(sublime_plugin.WindowCommand):
def run(self):
show_input()
class GdbLaunch(sublime_plugin.WindowCommand):
def run(self):
global exec_settings
s = self.window.active_view().settings()
exec_choices = s.get("sublimegdb_executables")
if exec_choices is None or type(exec_choices) != dict:
# No executable specific settings, go ahead and launch
exec_settings = {}
self.launch()
return
def on_choose(index):
global exec_settings
if index == -1:
# User cancelled the panel, abort launch
return
exec_name = list(exec_choices)[index]
exec_settings = exec_choices[exec_name]
self.launch()
self.window.show_quick_panel(list(exec_choices), on_choose)
def launch(self):
global gdb_process
global gdb_server_process
global gdb_run_status
global gdb_bkp_window
global gdb_bkp_view
global gdb_bkp_layout
global gdb_shutting_down
global DEBUG
global DEBUG_FILE
view = self.window.active_view()
DEBUG = get_setting("debug", False, view)
DEBUG_FILE = expand_path(get_setting("debug_file", "stdout", view), self.window)
#if DEBUG:
#print("Will write debug info to file: %s" % DEBUG_FILE)
if (gdb_process is not None) and (gdb_process.poll() is None):
sublime.status_message("GDB is already running! - Killing")
gdb_process.terminate()
gdb_process.kill()
gdb_process = None
for view in sublime.active_window().views():
if "GDB" in view.name():
view.close()
commandline = get_setting("commandline", view=view)
commandline = ' --interpreter=mi ' + commandline
commandline = expand_path(commandline, self.window)
log_debug("Commandline : %s\n" % commandline)
arguments = expand_path(get_setting("arguments", ""), self.window)
log_debug("Program argv : %s\n" % arguments)
loaded_folders = self.window.folders()
if (loaded_folders == None) or ( len(loaded_folders) == 0):
sublime.error_message("No loaded folders")
sublime.run_command("new_window")
return
workingdir = loaded_folders[0] + '/'
log_debug("In directory : %s\n" % workingdir)
if not os.path.exists(workingdir):
sublime.error_message("The directory given does not exist: %s" % workingdir)
sublime.run_command("new_window")
return
# get env settings
gdb_env = get_setting("env", dict())
if 'DISPLAY' not in gdb_env:
gdb_env['DISPLAY'] = ':100'
env_copy = os.environ.copy()
env_copy.update(gdb_env)
# --------------------------------
predebug_command = get_setting("predebug_command", "")
print( subprocess.getstatusoutput( predebug_command ) )
# --------------------------------
executable_name = get_setting("executable_name", "")
if executable_name == '':
sublime.error_message("executable_name not set")
sublime.run_command("new_window")
return
process_call = 'gdb ' + commandline + executable_name
gdb_process = subprocess.Popen(process_call,
shell=True,
cwd=workingdir,
env=env_copy,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
log_debug("Process: %s\n" % gdb_process)
# Optionally Launch the GDB Server
gdb_server_cmd = get_setting("server_commandline", "notset")
gdb_server_dir = get_setting("server_workingdir", "notset")
if (gdb_server_cmd != "notset") and (gdb_server_dir != "notset"):
gdb_server_cmd = expand_path(gdb_server_cmd, self.window)
gdb_server_dir = expand_path(gdb_server_dir, self.window)
gdb_server_shell = get_setting("server_shell", False)
log_debug("gdb_server_cmd: %s" % gdb_server_cmd)
log_debug("gdb_server_dir: %s" % gdb_server_dir)
log_debug("gdb_server_dir: %s" % gdb_server_shell)
gdb_server_process = subprocess.Popen(gdb_server_cmd, shell=gdb_server_shell, cwd=gdb_server_dir, env=gdb_env)
'''
gdb_process = subprocess.Popen(commandline, shell=True, cwd=path, env=gdb_env,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
'''
gdb_bkp_window = sublime.active_window()
#back up current layout before opening the debug one
#it will be restored when debug is finished
gdb_bkp_layout = gdb_bkp_window.get_layout()
gdb_bkp_view = gdb_bkp_window.active_view()
gdb_bkp_window.set_layout(
{
"cols": [0.0, 1.0],
"rows": [0.0, 0.7, 1.0],
"cells": [[0, 0, 1, 1], [0, 1, 1, 2]]
}
)
for view in gdb_views:
if view.is_closed() and view.open_at_start():
view.open()
view.clear()
gdb_shutting_down = False
t = threading.Thread(target=gdboutput, args=(gdb_process.stdout,))
t.start()
t = threading.Thread(target=gdboutput, args=(gdb_process.stderr,))
t.start()
try:
raise Exception("Nope")
pty, tty = os.openpty()
name = os.ttyname(tty)
except:
pipe, name = tempfile.mkstemp()
pty, tty = pipe, None
log_debug("pty: %s, tty: %s, name: %s" % (pty, tty, name))
t = threading.Thread(target=programio, args=(pty,tty))
t.start()
try:
run_cmd("-gdb-show interpreter", True, timeout=get_setting("gdb_timeout", 20))
except:
sublime.error_message("""\
It seems you're not running gdb with the "mi" interpreter. Please add
"--interpreter=mi" to your gdb command line""")
gdb_process.stdin.write("quit\n")
return
run_cmd("-inferior-tty-set %s" % name, True)
run_cmd("-enable-pretty-printing")
run_cmd("-gdb-set target-async 1")
run_cmd("-gdb-set pagination off")
dis_asm_flavor = get_setting("disassembly_flavor", "att", view)
if dis_asm_flavor == "intel":
run_cmd("-gdb-set disassembly-flavor intel")
else:
run_cmd("-gdb-set disassembly-flavor att")
# if gdb_nonstop:
# run_cmd("-gdb-set non-stop on")
attach_cmd = get_setting("attach_cmd","notset")
if(attach_cmd != "notset"):
run_cmd(attach_cmd)
gdb_breakpoint_view.sync_breakpoints()
if(get_setting("run_after_init", True)):
gdb_run_status = "running"
if arguments:
run_cmd("-exec-arguments " + arguments)
run_cmd(get_setting("exec_cmd", "-exec-run"), True)
else:
gdb_run_status = "stopped"
show_input()
def is_enabled(self):
return not is_running()
def is_visible(self):
return not is_running()
class GdbContinue(sublime_plugin.WindowCommand):
def run(self):
global gdb_cursor_position
gdb_cursor_position = 0
update_view_markers()
resume()
def is_enabled(self):
return is_running() and gdb_run_status != "running"
def is_visible(self):
return is_running()
class GdbExit(sublime_plugin.WindowCommand):
def run(self):
global gdb_shutting_down
gdb_shutting_down = True
wait_until_stopped()
run_cmd("-gdb-exit", True)
if gdb_server_process:
gdb_server_process.terminate()
def is_enabled(self):
return is_running()
def is_visible(self):
return is_running()
class GdbLoad(sublime_plugin.WindowCommand):
def run(self):
run_cmd(get_setting("load_cmd", "-target-download"))
def is_enabled(self):
return is_running() and gdb_run_status != "running"
def is_visible(self):
return is_running() and gdb_run_status != "running"
class GdbPause(sublime_plugin.WindowCommand):
def run(self):
run_cmd("-exec-interrupt")
def is_enabled(self):
return is_running() and gdb_run_status != "stopped"
def is_visible(self):
return is_running() and gdb_run_status != "stopped"
class GdbStepOver(sublime_plugin.WindowCommand):
def run(self):
run_cmd("-exec-next")
def is_enabled(self):
return is_running() and gdb_run_status != "running"
def is_visible(self):
return is_running()
class GdbStepInto(sublime_plugin.WindowCommand):
def run(self):
run_cmd("-exec-step")
def is_enabled(self):
return is_running() and gdb_run_status != "running"
def is_visible(self):
return is_running()
class GdbNextInstruction(sublime_plugin.WindowCommand):
def run(self):
run_cmd("-exec-next-instruction")
def is_enabled(self):
return is_running() and gdb_run_status != "running"
def is_visible(self):
return is_running()
class GdbStepOut(sublime_plugin.WindowCommand):
def run(self):
run_cmd("-exec-finish")
def is_enabled(self):
return is_running() and gdb_run_status != "running"
def is_visible(self):
return is_running()
class GdbAddWatch(sublime_plugin.TextCommand):
def run(self, edit):
if gdb_variables_view.is_open() and self.view.id() == gdb_variables_view.get_view().id():
var = gdb_variables_view.get_variable_at_line(self.view.rowcol(self.view.sel()[0].begin())[0])
if var is not None:
gdb_breakpoint_view.toggle_watch(var.get_expression())
else:
sublime.status_message("Don't know how to watch that variable")
else:
exp = self.view.substr(self.view.word(self.view.sel()[0].begin()))
gdb_breakpoint_view.toggle_watch(exp)
class GdbToggleBreakpoint(sublime_plugin.TextCommand):
def run(self, edit):
fn = self.view.file_name()
if gdb_breakpoint_view.is_open() and self.view.id() == gdb_breakpoint_view.get_view().id():
row = self.view.rowcol(self.view.sel()[0].begin())[0]
if row < len(gdb_breakpoint_view.breakpoints):
gdb_breakpoint_view.breakpoints[row].remove()
gdb_breakpoint_view.breakpoints.pop(row)
gdb_breakpoint_view.update_view()
elif gdb_variables_view.is_open() and self.view.id() == gdb_variables_view.get_view().id():
var = gdb_variables_view.get_variable_at_line(self.view.rowcol(self.view.sel()[0].begin())[0])
if var is not None:
gdb_breakpoint_view.toggle_watch(var.get_expression())
elif gdb_disassembly_view.is_open() and self.view.id() == gdb_disassembly_view.get_view().id():
for sel in self.view.sel():
line = self.view.substr(self.view.line(sel))
addr = re.match(r"^[^:]+", line)
if addr:
gdb_breakpoint_view.toggle_breakpoint_addr(addr.group(0))
elif fn is not None:
for sel in self.view.sel():
line, col = self.view.rowcol(sel.a)
gdb_breakpoint_view.toggle_breakpoint(fn, line + 1)
update_view_markers(self.view)
class GdbClick(sublime_plugin.TextCommand):
def run(self, edit):
if not is_running():
return
row, col = self.view.rowcol(self.view.sel()[0].a)
if gdb_variables_view.is_open() and self.view.id() == gdb_variables_view.get_view().id():
gdb_variables_view.expand_collapse_variable(self.view, | |
<filename>web/labio/SMWrapper.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
Wrapper module for survey monkey api
"""
import time
import re
import traceback
import requests
import textblob
from .NPParser import NPExtractor
import nltk
# -------------------------------------------------------------------------------------------------
# Wrapper class for survey monkey api
# -------------------------------------------------------------------------------------------------
class SurveyApi():
"""
Class to interface with survey monkey api
"""
__client = None
__access_token = None
__HOST = "https://api.surveymonkey.net"
__ENDPOINTS = {
"get_surveys": "/v3/surveys",
"get_survey": "/v3/surveys/%s/details",
"get_survey_data": "/v3/surveys/%s/responses/bulk",
"get_survey_respondents": "/v3/surveys/%s/responses",
"get_question_details": "/v3/surveys/%s/pages/%s/questions/%s",
"get_collector_details": "/v3/surveys/%s/collectors",
"get_response_details": "/v3/surveys/%s/responses/%s/details"
}
__HEADERS = {
"Authorization": None,
"Content-Type": "application/json"
}
def __init__(self, access_token, proxy=None):
"""
Class constructor
"""
self.__client = requests.session()
if proxy:
self.__client.proxies.update(proxy)
self.__access_token = access_token
self.__HEADERS['Authorization'] = "bearer %s" % access_token
self.__client.headers.update(self.__HEADERS)
def get_from_url(self, url, param=None):
"""
return generic json from generic url
"""
response = self.__client.get(url, params=param)
response_json = response.json()
return response_json
def get_paginated_results(self, url, page_interval, param=None):
"""
Get all the results from all the pages of the url
It is expected the result returned from the first url
will have metadata to indicate what is the next page to
be fetched.
"""
response_page = self.get_from_url(url, param)
raw_data = response_page['data']
if 'next' in response_page['links']:
while True:
response_page = self.get_from_url(response_page['links']['next'], param)
raw_data += response_page['data']
if 'next' in response_page['links']:
time.sleep(page_interval)
continue
break
return raw_data
def get_surveys(self, param=None):
"""
return list of surveys
"""
uri = "%s%s" % (self.__HOST, self.__ENDPOINTS[self.get_surveys.__name__])
return self.get_paginated_results(uri, 2, param)
def get_survey(self, survey_id):
"""
return the survey details (pages, questions)
"""
uri = "%s%s" % (self.__HOST, self.__ENDPOINTS[self.get_survey.__name__])
uri = uri % survey_id
return self.get_from_url(uri)
def get_survey_data(self, survey_id):
"""
get survey responses
"""
uri = "%s%s" % (self.__HOST, self.__ENDPOINTS[self.get_survey_data.__name__])
uri = uri % survey_id
return self.get_paginated_results(uri, 2)
def get_survey_respondents(self, survey_id):
"""
get survey responses
"""
uri = "%s%s" % (self.__HOST, self.__ENDPOINTS[self.get_survey_respondents.__name__])
uri = uri % survey_id
return self.get_paginated_results(uri, 2)
def get_question_details(self, survey_id, page_id, question_id):
"""
get survey question details
"""
uri = "%s%s" % (self.__HOST, self.__ENDPOINTS[self.get_question_details.__name__])
uri = uri % (survey_id, page_id, question_id)
return self.get_paginated_results(uri, 2)
def get_collector_details(self, survey_id):
"""
get collector details
"""
uri = "%s%s" % (self.__HOST, self.__ENDPOINTS[self.get_collector_details.__name__])
uri = uri % (survey_id)
return self.get_paginated_results(uri, 2)
def get_response_details(self, survey_id, response_id):
"""
get response details
"""
uri = "%s%s" % (self.__HOST, self.__ENDPOINTS[self.get_response_details.__name__])
uri = uri % (survey_id, response_id)
return self.get_from_url(uri)
# -------------------------------------------------------------------------------------------------
# Methods to build the data file structures
# -------------------------------------------------------------------------------------------------
class SurveyProcessor():
"""
Class to interface with SurveyAPI and process the survey data
"""
PUNCTUATION = ['.', ',', ':', '-', '?', '!', '%']
CONTRACTIONS = {
"ain't": "am not",
"aren't": "are not",
"can't": "cannot",
"can't've": "cannot have",
"'cause": "because",
"could've": "could have",
"couldn't": "could not",
"couldn't've": "could not have",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"hadn't": "had not",
"hadn't've": "had not have",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'd've": "he would have",
"he'll": "he will",
"he'll've": "he will have",
"he's": "he is",
"how'd": "how did",
"how'd'y": "how do you",
"how'll": "how will",
"how's": "how is",
"I'd": "I would",
"I'd've": "I would have",
"I'll": "I will",
"I'll've": "I will have",
"I'm": "I am",
"I've": "I have",
"isn't": "is not",
"it'd": "it would",
"it'd've": "it would have",
"it'll": "it will",
"it'll've": "it will have",
"it's": "it is",
"let's": "let us",
"ma'am": "madam",
"mayn't": "may not",
"might've": "might have",
"mightn't": "might not",
"mightn't've": "might not have",
"must've": "must have",
"mustn't": "must not",
"mustn't've": "must not have",
"needn't": "need not",
"needn't've": "need not have",
"o'clock": "of the clock",
"oughtn't": "ought not",
"oughtn't've": "ought not have",
"shan't": "shall not",
"sha'n't": "shall not",
"shan't've": "shall not have",
"she'd": "she would",
"she'd've": "she would have",
"she'll": "she will",
"she'll've": "she will have",
"she's": "she is",
"should've": "should have",
"shouldn't": "should not",
"shouldn't've": "should not have",
"so've": "so have",
"so's": "so is",
"that'd": "that would",
"that'd've": "that would have",
"that's": "that is",
"there'd": "there would",
"there'd've": "there would have",
"there's": "there is",
"they'd": "they would",
"they'd've": "they would have",
"they'll": "they will",
"they'll've": "they will have",
"they're": "they are",
"they've": "they have",
"to've": "to have",
"wasn't": "was not",
"we'd": "we would",
"we'd've": "we would have",
"we'll": "we will",
"we'll've": "we will have",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'll": "what will",
"what'll've": "what will have",
"what're": "what are",
"what's": "what is",
"what've": "what have",
"when's": "when is",
"when've": "when have",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'll": "who will",
"who'll've": "who will have",
"who's": "who is",
"who've": "who have",
"why's": "why is",
"why've": "why have",
"will've": "will have",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"y'all'd": "you all would",
"y'all'd've": "you all would have",
"y'all're": "you all are",
"y'all've": "you all have",
"you'd": "you would",
"you'd've": "you would have",
"you'll": "you will",
"you'll've": "you will have",
"you're": "you are",
"you've": "you have"
}
__api = None
__survey_id = None
__questions = None
__respondents = None
__answers = None
__open_ended = None
__collectors = None
def __init__(self, survey_id, access_token, proxy=None):
"""
Class constructor
"""
self.__survey_id = survey_id
self.__api = SurveyApi(access_token, proxy)
def __return_score(self, value):
"""
Analyze the textual answer and retrieve the value for score
"""
return_value = 0
regex = r"^([-+]*[\s]*[0-9]+)"
matches = re.finditer(regex, value)
for _, match in enumerate(matches):
return_value = int(match.group().lstrip().rstrip().replace(" ",""))
break
return return_value
def __create_question_type_list(self):
"""
build the question type list
"""
question_data = self.__api.get_survey(self.__survey_id)
questions = {}
for page in question_data['pages']:
for question in page['questions']:
if question['family'] not in ['presentation']:
questions[question['id']] = {'family': question['family'], 'choices': {}}
if question['family'] in ['multiple_choice', 'single_choice']:
for choice in question['answers']['choices']:
questions[question['id']]['choices'][choice['id']] = choice['text']
elif question['family'] == "matrix":
for row in question['answers']['rows']:
row_id = row['id']
questions[question['id']]['choices'][row_id] = {'text': row['text'],
'data':{}}
for choice in question['answers']['choices']:
questions[question['id']]['choices'][row_id]['data'][choice['id']] = choice['text']
elif question['family'] == "open_ended" and ('subtype' in question and question['subtype'] == "multi"):
for row in question['answers']['rows']:
row_id = row['id']
questions[question['id']]['choices'][row_id] = {'text': row['text'],
'data':{}}
return questions
def __decontract(self, phrase):
"""
Eliminate the word contractions in the sentences
"""
#print(phrase)
dec_phrase = phrase #.replace("´", "'")
for key in self.CONTRACTIONS:
dec_phrase = dec_phrase.replace(key, self.CONTRACTIONS[key])
return dec_phrase
def __clean_html(self, rec):
"""
Strip HTML tags
"""
TAG_RE = re.compile(r'<[^>]+>')
for key in rec:
if rec[key] and type(rec[key]) is str:
rec[key] = TAG_RE.sub('', rec[key]).lstrip().rstrip()
return rec
def __return_topics(self, question_id):
"""
Return the list of topics for generating the padded answers
"""
topics = []
for question in self.__answers:
if question['question_id'] == question_id:
if question['topic'] != '' and question['topic'] not in topics:
topics.append(question['topic'])
return topics
def build_question_data(self):
"""
build the question records
"""
self.__questions = []
question_data = self.__api.get_survey(self.__survey_id)
question_label = 1
for page in question_data['pages']:
page_id = page['id']
for question in page['questions']:
if question['family'] not in ['presentation']:
record = {
"survey_id": self.__survey_id,
"page_id" : page_id,
"question_id": question['id'],
"question_label": 'Question ' + str(question_label),
"question_heading": question['headings'][0]['heading'],
"question_type": question['family']
}
self.__questions.append(self.__clean_html(record))
question_label += 1
return self.__questions
def build_respondent_data(self):
"""
build respondent records
"""
self.__respondents = []
respondent_list = self.__api.get_survey_data(self.__survey_id)
for respondent in respondent_list:
record = {
"survey_id": self.__survey_id,
"respondent_id": respondent['id']
}
record['duration_seconds'] = respondent['total_time']
record['start_date'] = respondent['date_created']
record['end_date'] = respondent['date_modified']
record['ip_address'] = respondent['ip_address']
record['collector_id'] = respondent['collector_id']
record['status'] = respondent['response_status']
self.__respondents.append(self.__clean_html(record))
return self.__respondents
def build_answer_data(self):
"""
build answer records
"""
self.__answers = []
questions = self.__create_question_type_list()
raw_data = self.__api.get_survey_data(self.__survey_id)
with open('raw_data.json', 'w') as fo:
fo.write(str(raw_data))
for answer in raw_data:
response_id = answer['id']
for page in answer['pages']:
page_id = page['id']
for question in page['questions']:
question_id = question['id']
if questions[question_id]['family'] not in ['presentation']:
for idx, item in enumerate(question['answers']):
record = {
"survey_id": self.__survey_id,
"page_id": page_id,
"respondent_id": response_id,
"question_id": question_id,
"question_type": questions[question_id]['family'],
"answer": None,
"topic": None,
"score": 0
}
if questions[question_id]['family'] == "open_ended":
record['answer'] = item['text']
if 'row_id' in item:
if len(questions[question_id]['choices']) > 0:
record['topic'] = questions[question_id]['choices'][item['row_id']]['text']
else:
if idx == 0:
record['topic'] = "X"
else:
record['topic'] = "Y"
else:
record['topic'] = "Open Ended"
elif questions[question_id]['family'] in ["multiple_choice",
"single_choice"]:
if 'choice_id' not in item:
record['topic'] = 'Open Ended'
record['answer'] = item['text']
else:
record['answer'] = questions[question_id]['choices'][item['choice_id']]
record['score'] = self.__return_score(questions[question_id]['choices'][item['choice_id']])
elif questions[question_id]['family'] == "matrix":
if 'row_id' not in item:
record['topic'] = 'Open Ended'
| |
<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2013-2021 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - <NAME> <<EMAIL>>, 2013-2021
# - <NAME> <<EMAIL>>, 2017-2021
# - <NAME> <<EMAIL>>, 2017
# - <NAME> <<EMAIL>>, 2018
# - <NAME> <<EMAIL>>, 2018-2021
# - dciangot <<EMAIL>>, 2018
# - <NAME> <<EMAIL>>, 2018-2019
# - <NAME> <<EMAIL>>, 2018
# - <NAME> <<EMAIL>>, 2019
# - <NAME> <<EMAIL>>, 2019
# - <NAME> <<EMAIL>>, 2019-2021
# - <NAME>' <<EMAIL>>, 2019
# - <NAME> <<EMAIL>>, 2019-2020
# - <NAME> <<EMAIL>>, 2020
# - <NAME> <<EMAIL>>, 2020
# - <NAME> <<EMAIL>>, 2020
# - <NAME> <<EMAIL>>, 2020
# - <NAME> <<EMAIL>>, 2020
# - <NAME> <<EMAIL>>, 2021
# - <NAME> <<EMAIL>>, 2021
from __future__ import division
import copy
import datetime
import imp
import json
import logging
import re
import time
from typing import TYPE_CHECKING
from dogpile.cache import make_region
from dogpile.cache.api import NoValue
from sqlalchemy import and_
from sqlalchemy.exc import IntegrityError
from sqlalchemy.sql.expression import false
from rucio.common import constants
from rucio.common.config import config_get
from rucio.common.constants import SUPPORTED_PROTOCOLS, FTS_STATE
from rucio.common.exception import (InvalidRSEExpression, NoDistance,
RequestNotFound, RSEProtocolNotSupported,
RucioException, UnsupportedOperation)
from rucio.common.rse_attributes import get_rse_attributes
from rucio.common.types import InternalAccount
from rucio.common.utils import construct_surl
from rucio.core import did, message as message_core, request as request_core
from rucio.core.config import get as core_config_get
from rucio.core.monitor import record_counter, record_timer
from rucio.core.oidc import get_token_for_account_operation
from rucio.core.replica import add_replicas
from rucio.core.request import queue_requests, set_requests_state
from rucio.core.rse import get_rse_name, get_rse_vo, list_rses, get_rse_supported_checksums
from rucio.core.rse_expression_parser import parse_expression
from rucio.db.sqla import models, filter_thread_work
from rucio.db.sqla.constants import DIDType, RequestState, RSEType, RequestType, ReplicaState
from rucio.db.sqla.session import read_session, transactional_session
from rucio.rse import rsemanager as rsemgr
from rucio.transfertool.fts3 import FTS3Transfertool
from rucio.transfertool.mock import MockTransfertool
if TYPE_CHECKING:
from typing import List, Tuple
# Extra modules: Only imported if available
EXTRA_MODULES = {'globus_sdk': False}
for extra_module in EXTRA_MODULES:
try:
imp.find_module(extra_module)
EXTRA_MODULES[extra_module] = True
except ImportError:
EXTRA_MODULES[extra_module] = False
if EXTRA_MODULES['globus_sdk']:
from rucio.transfertool.globus import GlobusTransferTool # pylint: disable=import-error
"""
The core transfer.py is specifically for handling transfer-requests, thus requests
where the external_id is already known.
Requests accessed by request_id are covered in the core request.py
"""
REGION_SHORT = make_region().configure('dogpile.cache.memcached',
expiration_time=600,
arguments={'url': config_get('cache', 'url', False, '127.0.0.1:11211'), 'distributed_lock': True})
ALLOW_USER_OIDC_TOKENS = config_get('conveyor', 'allow_user_oidc_tokens', False, False)
REQUEST_OIDC_SCOPE = config_get('conveyor', 'request_oidc_scope', False, 'fts:submit-transfer')
REQUEST_OIDC_AUDIENCE = config_get('conveyor', 'request_oidc_audience', False, 'fts:example')
WEBDAV_TRANSFER_MODE = config_get('conveyor', 'webdav_transfer_mode', False, None)
def submit_bulk_transfers(external_host, files, transfertool='fts3', job_params={}, timeout=None, user_transfer_job=False, logger=logging.log):
"""
Submit transfer request to a transfertool.
:param external_host: External host name as string
:param files: List of Dictionary containing request file.
:param transfertool: Transfertool as a string.
:param job_params: Metadata key/value pairs for all files as a dictionary.
:param logger: Optional decorated logger that can be passed from the calling daemons or servers.
:returns: Transfertool external ID.
"""
record_counter('core.request.submit_transfer')
transfer_id = None
if transfertool == 'fts3':
start_time = time.time()
job_files = []
for file in files:
job_file = {}
for key in file:
if key == 'sources':
# convert sources from (src_rse, url, src_rse_id, rank) to url
job_file[key] = []
for source in file[key]:
job_file[key].append(source[1])
else:
job_file[key] = file[key]
job_files.append(job_file)
# getting info about account and OIDC support of the RSEs
use_oidc = job_params.get('use_oidc', False)
transfer_token = None
if use_oidc:
logger(logging.DEBUG, 'OAuth2/OIDC available at RSEs')
account = job_params.get('account', None)
getadmintoken = False
if ALLOW_USER_OIDC_TOKENS is False:
getadmintoken = True
logger(logging.DEBUG, 'Attempting to get a token for account %s. Admin token option set to %s' % (account, getadmintoken))
# find the appropriate OIDC token and exchange it (for user accounts) if necessary
token_dict = get_token_for_account_operation(account, req_audience=REQUEST_OIDC_AUDIENCE, req_scope=REQUEST_OIDC_SCOPE, admin=getadmintoken)
if token_dict is not None:
logger(logging.DEBUG, 'Access token has been granted.')
if 'token' in token_dict:
logger(logging.DEBUG, 'Access token used as transfer token.')
transfer_token = token_dict['token']
transfer_id = FTS3Transfertool(external_host=external_host, token=transfer_token).submit(files=job_files, job_params=job_params, timeout=timeout)
record_timer('core.request.submit_transfers_fts3', (time.time() - start_time) * 1000 / len(files))
elif transfertool == 'globus':
logger(logging.DEBUG, '... Starting globus xfer ...')
job_files = []
for file in files:
job_file = {}
for key in file:
if key == 'sources':
# convert sources from (src_rse, url, src_rse_id, rank) to url
job_file[key] = []
for source in file[key]:
job_file[key].append(source[1])
else:
job_file[key] = file[key]
job_files.append(job_file)
logger(logging.DEBUG, 'job_files: %s' % job_files)
transfer_id = GlobusTransferTool(external_host=None).bulk_submit(submitjob=job_files, timeout=timeout)
elif transfertool == 'mock':
transfer_id = MockTransfertool(external_host=None).submit(files, None)
return transfer_id
@transactional_session
def prepare_sources_for_transfers(transfers, session=None):
"""
Prepare the sources for transfers.
:param transfers: Dictionary containing request transfer info.
:param session: Database session to use.
"""
try:
for request_id in transfers:
rowcount = session.query(models.Request)\
.filter_by(id=request_id)\
.filter(models.Request.state == RequestState.QUEUED)\
.update({'state': transfers[request_id]['state'],
'external_id': transfers[request_id]['external_id'],
'external_host': transfers[request_id]['external_host'],
'dest_url': transfers[request_id]['dest_url'],
'submitted_at': datetime.datetime.utcnow()},
synchronize_session=False)
if rowcount == 0:
raise RequestNotFound("Failed to prepare transfer: request %s does not exist or is not in queued state" % (request_id))
if 'file' in transfers[request_id]:
file = transfers[request_id]['file']
for src_rse, src_url, src_rse_id, rank in file['sources']:
src_rowcount = session.query(models.Source)\
.filter_by(request_id=request_id)\
.filter(models.Source.rse_id == src_rse_id)\
.update({'is_using': True}, synchronize_session=False)
if src_rowcount == 0:
models.Source(request_id=file['metadata']['request_id'],
scope=file['metadata']['scope'],
name=file['metadata']['name'],
rse_id=src_rse_id,
dest_rse_id=file['metadata']['dest_rse_id'],
ranking=rank if rank else 0,
bytes=file['metadata']['filesize'],
url=src_url,
is_using=True).\
save(session=session, flush=False)
except IntegrityError as error:
raise RucioException(error.args)
@transactional_session
def set_transfers_state(transfers, submitted_at, session=None):
"""
Update the transfer info of a request.
:param transfers: Dictionary containing request transfer info.
:param session: Database session to use.
"""
try:
for request_id in transfers:
rowcount = session.query(models.Request)\
.filter_by(id=request_id)\
.filter(models.Request.state == RequestState.SUBMITTING)\
.update({'state': transfers[request_id]['state'],
'external_id': transfers[request_id]['external_id'],
'external_host': transfers[request_id]['external_host'],
'source_rse_id': transfers[request_id]['src_rse_id'],
'submitted_at': submitted_at},
synchronize_session=False)
if rowcount == 0:
raise RucioException("Failed to set requests %s tansfer %s: request doesn't exist or is not in SUBMITTING state" % (request_id, transfers[request_id]))
request_type = transfers[request_id].get('request_type', None)
msg = {'request-id': request_id,
'request-type': request_type,
'scope': transfers[request_id]['scope'].external,
'name': transfers[request_id]['name'],
'src-rse-id': transfers[request_id]['metadata'].get('src_rse_id', None),
'src-rse': transfers[request_id]['metadata'].get('src_rse', None),
'dst-rse-id': transfers[request_id]['metadata'].get('dst_rse_id', None),
'dst-rse': transfers[request_id]['metadata'].get('dst_rse', None),
'state': transfers[request_id]['state'],
'activity': transfers[request_id]['metadata'].get('activity', None),
'file-size': transfers[request_id]['metadata'].get('filesize', None),
'bytes': transfers[request_id]['metadata'].get('filesize', None),
'checksum-md5': transfers[request_id]['metadata'].get('md5', None),
'checksum-adler': transfers[request_id]['metadata'].get('adler32', None),
'external-id': transfers[request_id]['external_id'],
'external-host': transfers[request_id]['external_host'],
'queued_at': str(submitted_at)}
if transfers[request_id]['scope'].vo != 'def':
msg['vo'] = transfers[request_id]['scope'].vo
if msg['request-type']:
transfer_status = '%s-%s' % (msg['request-type'].name, msg['state'].name)
else:
transfer_status = 'transfer-%s' % msg['state']
transfer_status = transfer_status.lower()
message_core.add_message(transfer_status, msg, session=session)
except IntegrityError as error:
raise RucioException(error.args)
def bulk_query_transfers(request_host, transfer_ids, transfertool='fts3', timeout=None, logger=logging.log):
"""
Query the status of a transfer.
:param request_host: Name of the external host.
:param transfer_ids: List of (External-ID as a 32 character hex string)
:param transfertool: Transfertool name as a string.
:param logger: Optional decorated logger that can be passed from the calling daemons or servers.
:returns: Request status information as a dictionary.
"""
record_counter('core.request.bulk_query_transfers')
if transfertool == 'fts3':
try:
start_time = time.time()
fts_resps = FTS3Transfertool(external_host=request_host).bulk_query(transfer_ids=transfer_ids, timeout=timeout)
record_timer('core.request.bulk_query_transfers', (time.time() - start_time) * 1000 / len(transfer_ids))
except Exception:
raise
for transfer_id in transfer_ids:
if transfer_id not in fts_resps:
fts_resps[transfer_id] = Exception("Transfer id %s is not returned" % transfer_id)
if fts_resps[transfer_id] and not isinstance(fts_resps[transfer_id], Exception):
for request_id in fts_resps[transfer_id]:
if fts_resps[transfer_id][request_id]['file_state'] in (FTS_STATE.FAILED,
FTS_STATE.FINISHEDDIRTY,
FTS_STATE.CANCELED):
fts_resps[transfer_id][request_id]['new_state'] = RequestState.FAILED
elif fts_resps[transfer_id][request_id]['file_state'] in FTS_STATE.FINISHED:
fts_resps[transfer_id][request_id]['new_state'] = RequestState.DONE
return fts_resps
elif transfertool == 'globus':
try:
start_time = time.time()
logger(logging.DEBUG, 'transfer_ids: %s' % transfer_ids)
responses = GlobusTransferTool(external_host=None).bulk_query(transfer_ids=transfer_ids, timeout=timeout)
record_timer('core.request.bulk_query_transfers', (time.time() - start_time) * 1000 / len(transfer_ids))
except Exception:
raise
for k, v in responses.items():
if v == 'FAILED':
responses[k] = RequestState.FAILED
elif v == 'SUCCEEDED':
responses[k] = RequestState.DONE
else:
responses[k] = RequestState.SUBMITTED
return responses
else:
raise NotImplementedError
return None
@transactional_session
def set_transfer_update_time(external_host, transfer_id, update_time=datetime.datetime.utcnow(), session=None):
"""
Update the state of a request. Fails silently if the transfer_id does not exist.
:param external_host: Selected external host as string in format protocol://fqdn:port
:param transfer_id: External transfer job id as a string.
:param update_time: Time stamp.
:param session: Database session to use.
"""
record_counter('core.request.set_transfer_update_time')
try:
rowcount = session.query(models.Request).filter_by(external_id=transfer_id, state=RequestState.SUBMITTED).update({'updated_at': update_time}, synchronize_session=False)
except IntegrityError as error:
raise RucioException(error.args)
if not rowcount:
raise UnsupportedOperation("Transfer %s doesn't exist or its status is not submitted." % (transfer_id))
def query_latest(external_host, state, last_nhours=1, logger=logging.log):
"""
Query the latest transfers in last n hours with state.
:param external_host: FTS host name as a string.
:param state: FTS job state as a string or a dictionary.
:param last_nhours: Latest n hours as an integer.
:param logger: Optional decorated logger that can be passed from the calling daemons or servers.
:returns: Requests status information as a dictionary.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.