input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<filename>python-src/fastpdb/__init__.py
__name__ = "fastpdb"
__author__ = "<NAME>"
__all__ = ["PDBFile"]
__version__ = "1.0.1"
import numpy as np
import biotite
import biotite.structure as struc
import biotite.structure.io.pdb as pdb
from .fastpdb import PDBFile as RustPDBFile
class PDBFile(biotite.TextFile):
r"""
This class represents a PDB file.
This class only provides support for reading/writing the pure atom
information (``ATOM``, ``HETATM``, ``MODEL`` and ``ENDMDL``
records).
``TER`` records cannot be written.
See also
--------
PDBxFile
Examples
--------
Load a ``\\*.pdb`` file, modify the structure and save the new
structure into a new file:
>>> import os.path
>>> file = PDBFile.read(os.path.join(path_to_structures, "1l2y.pdb"))
>>> array_stack = file.get_structure()
>>> array_stack_mod = rotate(array_stack, [1,2,3])
>>> file = PDBFile()
>>> file.set_structure(array_stack_mod)
>>> file.write(os.path.join(path_to_directory, "1l2y_mod.pdb"))
"""
def __init__(self):
super().__init__()
self._pdb_file = RustPDBFile([])
@classmethod
def read(cls, file):
file = super().read(file)
file._pdb_file = RustPDBFile(file.lines)
return file
def get_model_count(self):
"""
Get the number of models contained in the PDB file.
Returns
-------
model_count : int
The number of models.
"""
return self._pdb_file.get_model_count()
def get_coord(self, model=None):
"""
Get only the coordinates of the PDB file.
Parameters
----------
model : int, optional
If this parameter is given, the function will return a
2D coordinate array from the atoms corresponding to the
given model number (starting at 1).
Negative values are used to index models starting from the
last model insted of the first model.
If this parameter is omitted, an 2D coordinate array
containing all models will be returned, even if
the structure contains only one model.
Returns
-------
coord : ndarray, shape=(m,n,3) or shape=(n,2), dtype=float
The coordinates read from the ``ATOM`` and ``HETATM``
records of the file.
Notes
-----
Note that :func:`get_coord()` may output more coordinates than
the atom array (stack) from the corresponding
:func:`get_structure()` call has.
The reason for this is, that :func:`get_structure()` filters
*altloc* IDs, while `get_coord()` does not.
Examples
--------
Read an :class:`AtomArrayStack` from multiple PDB files, where
each PDB file contains the same atoms but different positions.
This is an efficient approach when a trajectory is spread into
multiple PDB files, as done e.g. by the *Rosetta* modeling
software.
For the purpose of this example, the PDB files are created from
an existing :class:`AtomArrayStack`.
>>> import os.path
>>> from tempfile import gettempdir
>>> file_names = []
>>> for i in range(atom_array_stack.stack_depth()):
... pdb_file = PDBFile()
... pdb_file.set_structure(atom_array_stack[i])
... file_name = os.path.join(gettempdir(), f"model_{i+1}.pdb")
... pdb_file.write(file_name)
... file_names.append(file_name)
>>> print(file_names)
['...model_1.pdb', '...model_2.pdb', ..., '...model_38.pdb']
Now the PDB files are used to create an :class:`AtomArrayStack`,
where each model represents a different model.
Construct a new :class:`AtomArrayStack` with annotations taken
from one of the created files used as template and coordinates
from all of the PDB files.
>>> template_file = PDBFile.read(file_names[0])
>>> template = template_file.get_structure()
>>> coord = []
>>> for i, file_name in enumerate(file_names):
... pdb_file = PDBFile.read(file_name)
... coord.append(pdb_file.get_coord(model=1))
>>> new_stack = from_template(template, np.array(coord))
The newly created :class:`AtomArrayStack` should now be equal to
the :class:`AtomArrayStack` the PDB files were created from.
>>> print(np.allclose(new_stack.coord, atom_array_stack.coord))
True
"""
if model is None:
coord = self._pdb_file.parse_coord_multi_model()
else:
coord = self._pdb_file.parse_coord_single_model(model)
return coord
def get_structure(self, model=None, altloc="first", extra_fields=None, include_bonds=False):
"""
Get an :class:`AtomArray` or :class:`AtomArrayStack` from the PDB file.
Parameters
----------
model : int, optional
If this parameter is given, the function will return an
:class:`AtomArray` from the atoms corresponding to the given
model number (starting at 1).
Negative values are used to index models starting from the
last model insted of the first model.
If this parameter is omitted, an :class:`AtomArrayStack`
containing all models will be returned, even if the
structure contains only one model.
altloc : {'first', 'occupancy', 'all'}
This parameter defines how *altloc* IDs are handled:
- ``'first'`` - Use atoms that have the first
*altloc* ID appearing in a residue.
- ``'occupancy'`` - Use atoms that have the *altloc* ID
with the highest occupancy for a residue.
- ``'all'`` - Use all atoms.
Note that this leads to duplicate atoms.
When this option is chosen, the ``altloc_id``
annotation array is added to the returned structure.
extra_fields : list of str, optional
The strings in the list are optional annotation categories
that should be stored in the output array or stack.
These are valid values:
``'atom_id'``, ``'b_factor'``, ``'occupancy'`` and
``'charge'``.
include_bonds : bool, optional
If set to true, a :class:`BondList` will be created for the
resulting :class:`AtomArray` containing the bond information
from the file.
All bonds have :attr:`BondType.ANY`, since the PDB format
does not support bond orders.
Returns
-------
array : AtomArray or AtomArrayStack
The return type depends on the `model` parameter.
"""
if extra_fields is not None:
include_atom_id = "atom_id" in extra_fields
include_b_factor = "b_factor" in extra_fields
include_occupancy = "occupancy" in extra_fields
include_charge = "charge" in extra_fields
else:
include_atom_id = False
include_b_factor = False
include_occupancy = False
include_charge = False
if include_bonds:
# Required for mapping the bonded atom IDs to atom indices
include_atom_id = True
if altloc == "occupancy":
include_occupancy = True
if model is None:
coord = self._pdb_file.parse_coord_multi_model()
annotations = self._pdb_file.parse_annotations(
1,
include_atom_id, include_b_factor,
include_occupancy, include_charge
)
else:
coord = self._pdb_file.parse_coord_single_model(model)
annotations = self._pdb_file.parse_annotations(
model,
include_atom_id, include_b_factor,
include_occupancy, include_charge
)
(
chain_id, res_id, ins_code, res_name,
hetero, atom_name, element, altloc_id,
atom_id, b_factor, occupancy, charge
) = annotations
# Interpret uint32 arrays as unicode arrays
chain_id = np.frombuffer(chain_id, dtype="U4")
ins_code = np.frombuffer(ins_code, dtype="U1")
res_name = np.frombuffer(res_name, dtype="U3")
atom_name = np.frombuffer(atom_name, dtype="U6")
element = np.frombuffer(element, dtype="U2")
altloc_id = np.frombuffer(altloc_id, dtype="U1")
if coord.ndim == 3:
atoms = struc.AtomArrayStack(coord.shape[0], coord.shape[1])
atoms.coord = coord
else:
atoms = struc.AtomArray(coord.shape[0])
atoms.coord = coord
atoms.chain_id = chain_id
atoms.res_id = res_id
atoms.ins_code = ins_code
atoms.res_name = res_name
atoms.hetero = hetero
atoms.atom_name = atom_name
atoms.element = element
for field in (extra_fields if extra_fields is not None else []):
if field == "atom_id":
# Copy is necessary to avoid double masking in
# later altloc ID filtering
atoms.set_annotation("atom_id", atom_id.copy())
elif field == "charge":
atoms.set_annotation("charge", charge)
elif field == "occupancy":
atoms.set_annotation("occupancy", occupancy)
elif field == "b_factor":
atoms.set_annotation("b_factor", b_factor)
else:
raise ValueError(f"Unknown extra field: {field}")
box = self._pdb_file.parse_box()
if box is None:
atoms.box = None
else:
len_a, len_b, len_c, alpha, beta, gamma = box
box = struc.vectors_from_unitcell(
len_a, len_b, len_c,
np.deg2rad(alpha), np.deg2rad(beta), np.deg2rad(gamma)
)
if isinstance(atoms, struc.AtomArray):
atoms.box = box
else:
atoms.box = np.repeat(
box[np.newaxis, ...], atoms.stack_depth(), axis=0
)
# Filter altloc IDs
if altloc == "occupancy":
filter = struc.filter_highest_occupancy_altloc(
atoms, altloc_id, occupancy
)
atoms = atoms[..., filter]
atom_id = atom_id[filter] if atom_id is not None else None
elif altloc == "first":
filter = struc.filter_first_altloc(atoms, altloc_id)
atoms = atoms[..., filter]
atom_id = atom_id[filter] if atom_id is not None else None
elif altloc == "all":
atoms.set_annotation("altloc_id", altloc_id)
else:
raise ValueError(f"'{altloc}' is not a valid 'altloc' option")
if include_bonds:
bond_list = struc.BondList(
atoms.array_length(), self._pdb_file.parse_bonds(atom_id)
)
bond_list = bond_list.merge(struc.connect_via_residue_names(
atoms,
# The information for non-hetero residues and water
# are not part of CONECT records
(~atoms.hetero) | struc.filter_solvent(atoms)
))
# Remove bond order from inter residue bonds for consistency
bond_list.remove_bond_order()
atoms.bonds = bond_list
return atoms
def set_structure(self, atoms):
"""
Set the :class:`AtomArray` or :class:`AtomArrayStack` for the
file.
This makes also use of the optional annotation arrays
``'atom_id'``, ``'b_factor'``, ``'occupancy'`` and ``'charge'``.
If the atom array (stack) contains the annotation ``'atom_id'``,
these values will be used for atom numbering instead of
continuous numbering.
Parameters
----------
array : AtomArray or AtomArrayStack
The array or stack to be saved into this file. If a stack
is given, each array in the stack is saved as separate
model.
Notes
-----
If `array` has an associated :class:`BondList`, ``CONECT``
records are also written for all non-water hetero residues
and all inter-residue connections.
"""
# Reset lines of text
self._pdb_file = RustPDBFile([])
# Write 'CRYST1' record
if atoms.box is not None:
box = atoms.box
if box.ndim == 3:
box = box[0]
len_a, len_b, len_c, alpha, beta, gamma \
= struc.unitcell_from_vectors(box)
self._pdb_file.write_box(
| |
total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(list[EntityLinkingFeedback], status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_linking_feedback" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['http_auth', 'openlattice_auth'] # noqa: E501
return self.api_client.call_api(
'/linker/linkingfeedback/all', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[EntityLinkingFeedback]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_all_linking_feedback_with_features(self, **kwargs): # noqa: E501
"""Returns all feedback submitted walong with the features of pairwise entities # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_linking_feedback_with_features(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: list[EntityLinkingFeatures]
"""
kwargs['_return_http_data_only'] = True
return self.get_all_linking_feedback_with_features_with_http_info(**kwargs) # noqa: E501
def get_all_linking_feedback_with_features_with_http_info(self, **kwargs): # noqa: E501
"""Returns all feedback submitted walong with the features of pairwise entities # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_linking_feedback_with_features_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(list[EntityLinkingFeatures], status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_linking_feedback_with_features" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['openlattice_auth'] # noqa: E501
return self.api_client.call_api(
'/linker/linkingfeedback/features/all', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[EntityLinkingFeatures]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_linking_feedback_with_features(self, entity_key_pair, **kwargs): # noqa: E501
"""Returns the feedback on the given entity pair along with their features # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_linking_feedback_with_features(entity_key_pair, async_req=True)
>>> result = thread.get()
:param entity_key_pair: (required)
:type entity_key_pair: EntityKeyPair
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: EntityLinkingFeatures
"""
kwargs['_return_http_data_only'] = True
return self.get_linking_feedback_with_features_with_http_info(entity_key_pair, **kwargs) # noqa: E501
def get_linking_feedback_with_features_with_http_info(self, entity_key_pair, **kwargs): # noqa: E501
"""Returns the feedback on the given entity pair along with their features # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_linking_feedback_with_features_with_http_info(entity_key_pair, async_req=True)
>>> result = thread.get()
:param entity_key_pair: (required)
:type entity_key_pair: EntityKeyPair
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(EntityLinkingFeatures, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'entity_key_pair'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_linking_feedback_with_features" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'entity_key_pair' is set
if self.api_client.client_side_validation and ('entity_key_pair' not in local_var_params or # noqa: E501
local_var_params['entity_key_pair'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `entity_key_pair` when calling `get_linking_feedback_with_features`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'entity_key_pair' in local_var_params:
body_params = local_var_params['entity_key_pair']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['http_auth', 'openlattice_auth'] # noqa: E501
return self.api_client.call_api(
'/linker/linkingfeedback/features', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EntityLinkingFeatures', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_linking_feedbacks_on_entity(self, feedback_type, entity_data_key, **kwargs): # noqa: E501
"""Returns positive/negative/all linking feedbacks on the given entity. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_linking_feedbacks_on_entity(feedback_type, entity_data_key, async_req=True)
>>> result = thread.get()
:param feedback_type: (required)
:type feedback_type: str
:param entity_data_key: (required)
:type entity_data_key: EntityDataKey
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: list[EntityLinkingFeedback]
"""
kwargs['_return_http_data_only'] = True
return self.get_linking_feedbacks_on_entity_with_http_info(feedback_type, entity_data_key, **kwargs) # noqa: E501
def get_linking_feedbacks_on_entity_with_http_info(self, feedback_type, entity_data_key, **kwargs): # noqa: E501
"""Returns positive/negative/all linking feedbacks on the given entity. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_linking_feedbacks_on_entity_with_http_info(feedback_type, entity_data_key, async_req=True)
>>> result = thread.get()
:param feedback_type: (required)
| |
<reponame>Yard1/scikit-learn-intelex
#===============================================================================
# Copyright 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
from abc import ABCMeta, abstractmethod
from enum import Enum
import sys
from numbers import Number
import numpy as np
from scipy import sparse as sp
from ..common import (
_validate_targets,
_check_X_y,
_check_array,
_check_is_fitted,
_column_or_1d,
_check_n_features
)
try:
from _onedal4py_dpc import (
PySvmParams,
PyRegressionSvmTrain,
PyRegressionSvmInfer,
PyClassificationSvmTrain,
PyClassificationSvmInfer,
PyNuRegressionSvmTrain,
PyNuRegressionSvmInfer,
PyNuClassificationSvmTrain,
PyNuClassificationSvmInfer
)
except ImportError:
from _onedal4py_host import (
PySvmParams,
PyRegressionSvmTrain,
PyRegressionSvmInfer,
PyClassificationSvmTrain,
PyClassificationSvmInfer,
PyNuRegressionSvmTrain,
PyNuRegressionSvmInfer,
PyNuClassificationSvmTrain,
PyNuClassificationSvmInfer
)
class SVMtype(Enum):
c_svc = 0
epsilon_svr = 1
nu_svc = 2
nu_svr = 3
class BaseSVM(BaseEstimator, metaclass=ABCMeta):
@abstractmethod
def __init__(self, C, nu, epsilon, kernel='rbf', *, degree, gamma,
coef0, tol, shrinking, cache_size, max_iter, tau,
class_weight, decision_function_shape,
break_ties, algorithm, svm_type=None, **kwargs):
self.C = C
self.nu = nu
self.epsilon = epsilon
self.kernel = kernel
self.degree = degree
self.coef0 = coef0
self.gamma = gamma
self.tol = tol
self.shrinking = shrinking
self.cache_size = cache_size
self.max_iter = max_iter
self.tau = tau
self.class_weight = class_weight
self.decision_function_shape = decision_function_shape
self.break_ties = break_ties
self.algorithm = algorithm
self.svm_type = svm_type
def _compute_gamma_sigma(self, gamma, X):
if isinstance(gamma, str):
if gamma == 'scale':
if sp.isspmatrix(X):
# var = E[X^2] - E[X]^2
X_sc = (X.multiply(X)).mean() - (X.mean())**2
else:
X_sc = X.var()
_gamma = 1.0 / (X.shape[1] * X_sc) if X_sc != 0 else 1.0
elif gamma == 'auto':
_gamma = 1.0 / X.shape[1]
else:
raise ValueError(
"When 'gamma' is a string, it should be either 'scale' or "
"'auto'. Got '{}' instead.".format(gamma)
)
else:
_gamma = gamma
return _gamma, np.sqrt(0.5 / _gamma)
def _validate_targets(self, y, dtype):
self.class_weight_ = None
self.classes_ = None
return _column_or_1d(y, warn=True).astype(dtype, copy=False)
def _get_sample_weight(self, X, y, sample_weight):
n_samples = X.shape[0]
dtype = X.dtype
if n_samples == 1:
raise ValueError("n_samples=1")
sample_weight = np.asarray([]
if sample_weight is None
else sample_weight, dtype=np.float64)
sample_weight_count = sample_weight.shape[0]
if sample_weight_count != 0 and sample_weight_count != n_samples:
raise ValueError("sample_weight and X have incompatible shapes: "
"%r vs %r\n"
"Note: Sparse matrices cannot be indexed w/"
"boolean masks (use `indices=True` in CV)."
% (len(sample_weight), X.shape))
ww = None
if sample_weight_count == 0 and self.class_weight_ is None:
return ww
elif sample_weight_count == 0:
sample_weight = np.ones(n_samples, dtype=dtype)
elif isinstance(sample_weight, Number):
sample_weight = np.full(n_samples, sample_weight, dtype=dtype)
else:
sample_weight = _check_array(
sample_weight, accept_sparse=False, ensure_2d=False,
dtype=dtype, order="C"
)
if sample_weight.ndim != 1:
raise ValueError("Sample weights must be 1D array or scalar")
if sample_weight.shape != (n_samples,):
raise ValueError("sample_weight.shape == {}, expected {}!"
.format(sample_weight.shape, (n_samples,)))
if self.svm_type == SVMtype.nu_svc:
weight_per_class = [np.sum(sample_weight[y == class_label])
for class_label in np.unique(y)]
for i in range(len(weight_per_class)):
for j in range(i + 1, len(weight_per_class)):
if self.nu * (weight_per_class[i] + weight_per_class[j]) / 2 > \
min(weight_per_class[i], weight_per_class[j]):
raise ValueError('specified nu is infeasible')
if np.all(sample_weight <= 0):
if self.svm_type == SVMtype.nu_svc:
err_msg = 'negative dimensions are not allowed'
else:
err_msg = 'Invalid input - all samples have zero or negative weights.'
raise ValueError(err_msg)
elif np.any(sample_weight <= 0):
if self.svm_type == SVMtype.c_svc and \
len(np.unique(y[sample_weight > 0])) != len(self.classes_):
raise ValueError(
'Invalid input - all samples with positive weights '
'have the same label.')
ww = sample_weight
if self.class_weight_ is not None:
for i, v in enumerate(self.class_weight_):
ww[y == i] *= v
if not ww.flags.c_contiguous and not ww.flags.f_contiguous:
ww = np.ascontiguousarray(ww, dtype)
return ww
def _get_onedal_params(self):
max_iter = 10000 if self.max_iter == -1 else self.max_iter
class_count = 0 if self.classes_ is None else len(self.classes_)
return PySvmParams(method=self.algorithm, kernel=self.kernel,
c=self.C, nu=self.nu, epsilon=self.epsilon,
class_count=class_count, accuracy_threshold=self.tol,
max_iteration_count=max_iter, cache_size=self.cache_size,
shrinking=self.shrinking,
scale=self._scale_, sigma=self._sigma_,
shift=self.coef0, degree=self.degree, tau=self.tau)
def _reset_context(func):
def wrapper(*args, **kwargs):
if 'daal4py.oneapi' in sys.modules:
import daal4py.oneapi as d4p_oneapi
devname = d4p_oneapi._get_device_name_sycl_ctxt()
if devname == 'gpu':
gpu_ctx = d4p_oneapi._get_sycl_ctxt()
host_ctx = d4p_oneapi.sycl_execution_context('host')
try:
host_ctx.apply()
res = func(*args, **kwargs)
finally:
del host_ctx
gpu_ctx.apply()
return res
else:
return func(*args, **kwargs)
else:
return func(*args, **kwargs)
return wrapper
@_reset_context
def _fit(self, X, y, sample_weight, Computer):
if hasattr(self, 'decision_function_shape'):
if self.decision_function_shape not in ('ovr', 'ovo', None):
raise ValueError(
f"decision_function_shape must be either 'ovr' or 'ovo', "
f"got {self.decision_function_shape}."
)
if y is None:
if self._get_tags()['requires_y']:
raise ValueError(
f"This {self.__class__.__name__} estimator "
f"requires y to be passed, but the target y is None."
)
X, y = _check_X_y(
X, y, dtype=[np.float64, np.float32],
force_all_finite=True, accept_sparse='csr')
y = self._validate_targets(y, X.dtype)
sample_weight = self._get_sample_weight(X, y, sample_weight)
self._sparse = sp.isspmatrix(X)
if self.kernel == 'linear':
self._scale_, self._sigma_ = 1.0, 1.0
self.coef0 = 0.0
else:
self._scale_, self._sigma_ = self._compute_gamma_sigma(self.gamma, X)
c_svm = Computer(self._get_onedal_params())
c_svm.train(X, y, sample_weight)
if self._sparse:
self.dual_coef_ = sp.csr_matrix(c_svm.get_coeffs().T)
self.support_vectors_ = sp.csr_matrix(c_svm.get_support_vectors())
else:
self.dual_coef_ = c_svm.get_coeffs().T
self.support_vectors_ = c_svm.get_support_vectors()
self.intercept_ = c_svm.get_biases().ravel()
self.support_ = c_svm.get_support_indices().ravel().astype('int')
self.n_features_in_ = X.shape[1]
self.shape_fit_ = X.shape
if getattr(self, 'classes_', None) is not None:
indices = y.take(self.support_, axis=0)
self._n_support = np.array([
np.sum(indices == i) for i, _ in enumerate(self.classes_)])
self._gamma = self._scale_
self._onedal_model = c_svm.get_model()
return self
@_reset_context
def _predict(self, X, Computer):
_check_is_fitted(self)
if self.break_ties and self.decision_function_shape == 'ovo':
raise ValueError("break_ties must be False when "
"decision_function_shape is 'ovo'")
if self.break_ties and self.decision_function_shape == 'ovr' and \
len(self.classes_) > 2:
y = np.argmax(self.decision_function(X), axis=1)
else:
X = _check_array(X, dtype=[np.float64, np.float32],
force_all_finite=True, accept_sparse='csr')
_check_n_features(self, X, False)
if self._sparse and not sp.isspmatrix(X):
X = sp.csr_matrix(X)
if self._sparse:
X.sort_indices()
if sp.issparse(X) and not self._sparse and not callable(self.kernel):
raise ValueError(
"cannot use sparse input in %r trained on dense data"
% type(self).__name__)
c_svm = Computer(self._get_onedal_params())
if hasattr(self, '_onedal_model'):
c_svm.infer(X, self._onedal_model)
else:
c_svm.infer_builder(X, self.support_vectors_,
self.dual_coef_.T, self.intercept_)
y = c_svm.get_labels()
return y
def _ovr_decision_function(self, predictions, confidences, n_classes):
n_samples = predictions.shape[0]
votes = np.zeros((n_samples, n_classes))
sum_of_confidences = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
sum_of_confidences[:, i] -= confidences[:, k]
sum_of_confidences[:, j] += confidences[:, k]
votes[predictions[:, k] == 0, i] += 1
votes[predictions[:, k] == 1, j] += 1
k += 1
transformed_confidences = \
sum_of_confidences / (3 * (np.abs(sum_of_confidences) + 1))
return votes + transformed_confidences
@_reset_context
def _decision_function(self, X, Computer):
_check_is_fitted(self)
X = _check_array(X, dtype=[np.float64, np.float32],
force_all_finite=False, accept_sparse='csr')
_check_n_features(self, X, False)
if self._sparse and not sp.isspmatrix(X):
X = sp.csr_matrix(X)
if self._sparse:
X.sort_indices()
if sp.issparse(X) and not self._sparse and not callable(self.kernel):
raise ValueError(
"cannot use sparse input in %r trained on dense data"
% type(self).__name__)
c_svm = Computer(self._get_onedal_params())
if hasattr(self, '_onedal_model'):
c_svm.infer(X, self._onedal_model)
else:
c_svm.infer_builder(X, self.support_vectors_,
self.dual_coef_.T, self.intercept_)
decision_function = c_svm.get_decision_function()
if len(self.classes_) == 2:
decision_function = decision_function.ravel()
if self.decision_function_shape == 'ovr' and len(self.classes_) > 2:
decision_function = self._ovr_decision_function(
decision_function < 0, -decision_function, len(self.classes_))
return decision_function
class SVR(RegressorMixin, BaseSVM):
"""
Epsilon--Support Vector Regression.
"""
def __init__(self, C=1.0, epsilon=0.1, kernel='rbf', *, degree=3,
gamma='scale', coef0=0.0, tol=1e-3, shrinking=True,
cache_size=200.0, max_iter=-1, tau=1e-12,
algorithm='thunder', **kwargs):
super().__init__(C=C, nu=0.5, epsilon=epsilon, kernel=kernel,
degree=degree, gamma=gamma,
coef0=coef0, tol=tol,
shrinking=shrinking, cache_size=cache_size,
max_iter=max_iter, tau=tau, class_weight=None,
decision_function_shape=None,
break_ties=False, algorithm=algorithm)
self.svm_type = SVMtype.epsilon_svr
def fit(self, X, y, sample_weight=None):
return super()._fit(X, y, sample_weight, PyRegressionSvmTrain)
def predict(self, X):
y = super()._predict(X, PyRegressionSvmInfer)
return y.ravel()
class SVC(ClassifierMixin, BaseSVM):
"""
C-Support Vector Classification.
"""
def __init__(self, C=1.0, kernel='rbf', *, degree=3, gamma='scale',
coef0=0.0, tol=1e-3, shrinking=True, cache_size=200.0,
max_iter=-1, tau=1e-12, class_weight=None,
decision_function_shape='ovr', break_ties=False,
algorithm='thunder', **kwargs):
super().__init__(C=C, nu=0.5, epsilon=0.0, kernel=kernel, degree=degree,
gamma=gamma, coef0=coef0, tol=tol,
shrinking=shrinking, cache_size=cache_size,
max_iter=max_iter, tau=tau, class_weight=class_weight,
decision_function_shape=decision_function_shape,
break_ties=break_ties, algorithm=algorithm)
self.svm_type = SVMtype.c_svc
def _validate_targets(self, y, dtype):
y, self.class_weight_, self.classes_ = _validate_targets(
y, self.class_weight, dtype)
return y
def fit(self, X, y, sample_weight=None):
return super()._fit(X, y, sample_weight, PyClassificationSvmTrain)
def predict(self, X):
y = super()._predict(X, PyClassificationSvmInfer)
if len(self.classes_) == 2:
y = y.ravel()
return self.classes_.take(np.asarray(y, dtype=np.intp)).ravel()
def decision_function(self, X):
return super()._decision_function(X, PyClassificationSvmInfer)
class NuSVR(RegressorMixin, BaseSVM):
"""
Nu-Support Vector Regression.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', *, degree=3,
gamma='scale', coef0=0.0, tol=1e-3, shrinking=True,
cache_size=200.0, max_iter=-1, tau=1e-12,
algorithm='thunder', **kwargs):
super().__init__(C=C, nu=nu, epsilon=0.0, kernel=kernel,
degree=degree, gamma=gamma,
coef0=coef0, tol=tol,
shrinking=shrinking, cache_size=cache_size,
max_iter=max_iter, tau=tau, class_weight=None,
decision_function_shape=None,
break_ties=False, algorithm=algorithm)
self.svm_type = | |
<gh_stars>10-100
# For each channel, we assume all the pixel (x,y) dimensions are i.i.d., and solve a scalar quantization problem
import numpy as np
np.random.seed(0)
import tensorflow as tf
import utils
class ChannelwisePriorCDFQuantizer:
def __init__(self, num_channels, max_bits_per_coord, float_type='float32', int_type='int32'):
super().__init__()
self.max_bits_per_coord = max_bits_per_coord
self.num_channels = num_channels
self.float_type = float_type
self.int_type = int_type
self.quantization_levels = 2 ** (max_bits_per_coord + 1) - 1 # 2^0 + 2^1 + 2^2 + ... + 2^max_bits_per_coord
self.raw_code_length_entropy_models = None
self.entropy_models = None
def build_code_points(self, prior_model, **kwargs):
N = self.max_bits_per_coord
num_channels = self.num_channels
float_type = self.float_type
all_bin_floats = np.hstack([utils.n_bit_binary_floats(n) for n in range(N + 1)]) # xi representatives
all_bin_floats_rep = np.repeat(all_bin_floats[:, None],
num_channels, axis=1) # quantization_levels x num_channels
all_code_points = prior_model.inverse_cdf(all_bin_floats_rep, **kwargs) # quantization_levels x num_channels
all_code_points = tf.cast(all_code_points, float_type)
all_code_points = tf.transpose(all_code_points) # num_channels x quantization_levels
self.all_code_points = all_code_points
self.code_points_by_channel = tf.sort(all_code_points, axis=1) # MUST BE SORTED!!!
# code_points_by_bits[c][b] gives a length-2^b array of quantization/code points for channel c when using b bits
code_points_by_bits = []
for c in range(num_channels):
code_points = []
for n in range(N + 1):
code_points.append(all_code_points[c, 2 ** n - 1: 2 ** (n + 1) - 1]) # n-bit code points
code_points_by_bits.append(code_points)
self.code_points_by_bits = code_points_by_bits
# build grids of sorted quantization points represented by increasing number of raw bits, to make it faster to
# search for "best" n-bit quantization when encoding
search_grids = []
for c in range(num_channels):
search_grid = []
for n in range(N + 1):
if n == 0:
n_bit_grid = np.pad([code_points_by_bits[c][n][0]] * 2, (2 ** (N - 1) - 1,), 'edge')
else:
n_bit_grid = np.pad(code_points_by_bits[c][n], (2 ** (N - 1) - 2 ** (n - 1),), 'edge')
search_grid.append(n_bit_grid)
search_grid = np.array(search_grid)
search_grids.append(search_grid)
search_grids = np.array(search_grids) # num_channels x (N+1) x 2^N
_search_grids = tf.constant(search_grids, dtype=float_type)
self._search_grids = _search_grids # C x (N+1) x 2^N
def get_all_N_bit_intervals(self, Z):
"""
:param Z: batch of latent representations of shape B x C
:return: two tensors of shape C x (N+1) x B
"""
N = self.max_bits_per_coord
# backend has to be tensorflow >= 1.15 for searchsorted to work below (numpy's version doesn't support matrices)
Z_repeated = tf.repeat(tf.transpose(Z)[:, None, :], N + 1, axis=1) # C x (N+1) x B
right_endpoint_idx = tf.searchsorted(self._search_grids, Z_repeated, side='left') # search inner-most dim
right_endpoint_idx = tf.clip_by_value(right_endpoint_idx, 0, 2 ** N - 1) # C x (N+1) x B
left_endpoint_idx = tf.clip_by_value(right_endpoint_idx - 1, 0, 2 ** N - 1)
right_endpoints = tf.gather(self._search_grids, right_endpoint_idx, batch_dims=2)
left_endpoints = tf.gather(self._search_grids, left_endpoint_idx, batch_dims=2)
return left_endpoints, right_endpoints
def build_entropy_models(self, X, vae, lambs, add_n_smoothing):
N = self.max_bits_per_coord
float_type = self.float_type
posterior_means, posterior_logvars = vae.encode(X)
posterior_vars = tf.exp(posterior_logvars)
C = posterior_vars.shape[-1] # number of latent channels
assert C == self.num_channels
batch_means, batch_vars = map(lambda r: tf.reshape(r, (-1, C)),
[posterior_means, posterior_vars]) # num_samples x C
batch_stds = batch_vars ** 0.5
raw_code_length_entropy_models = dict()
# first compress with raw number of bits (i.e., the number of binary decimal places of \hat xis)
Z_hat_dict, raw_num_bits_dict = self.compress_batch_channel_latents(batch_means, batch_stds, lambs,
return_np=False) # each dict entry is B x C
for lamb in lambs:
Z_hat = Z_hat_dict[lamb]
raw_num_bits = raw_num_bits_dict[lamb]
# build entropy model for raw number of bits
raw_num_bits_counts_by_channel = np.array([np.bincount(raw_num_bits[:, c], minlength=N + 1)
for c in range(C)], dtype=float_type) # C x (N+1)
raw_num_bits_counts_by_channel += add_n_smoothing
raw_num_bits_freqs_by_channel = raw_num_bits_counts_by_channel / np.sum(raw_num_bits_counts_by_channel,
axis=1)[:, None]
raw_code_length_entropy_model = -np.log2(raw_num_bits_freqs_by_channel)
raw_code_length_entropy_models[lamb] = raw_code_length_entropy_model
self.raw_code_length_entropy_models = raw_code_length_entropy_models
# now run compression again to build entropy model for code points, but using the newly-built
# raw_code_length_entropy_models that corrects for raw code length overhead when solving the encoding
# optimization problems
entropy_models = dict()
Z_hat_dict, cl_corrected_num_bits_dict = self.compress_batch_channel_latents(batch_means, batch_stds, lambs,
return_np=False) # each dict entry is B x C
for lamb in lambs:
Z_hat = Z_hat_dict[lamb]
raw_num_bits = raw_num_bits_dict[lamb]
#
# # build entropy model for raw number of bits
# raw_num_bits_counts_by_channel = np.array([np.bincount(raw_num_bits[:, c], minlength=N + 1)
# for c in range(C)], dtype=float_type) # C x (N+1)
# raw_num_bits_counts_by_channel += add_n_smoothing
# raw_num_bits_freqs_by_channel = raw_num_bits_counts_by_channel / np.sum(raw_num_bits_counts_by_channel,
# axis=1)[:, None]
# raw_code_length_entropy_model = -np.log2(raw_num_bits_freqs_by_channel)
# raw_code_length_entropy_models[lamb] = raw_code_length_entropy_model
# build entropy model for code points directly
qidx = tf.searchsorted(self.code_points_by_channel, tf.transpose(Z_hat)) # C x B; requires TF
assert tf.reduce_all(tf.equal(tf.gather(self.code_points_by_channel, qidx, batch_dims=1),
tf.transpose(Z_hat))) # qidx is the tensor of quantization indices
qidx_counts_by_channel = np.array(
[np.bincount(qidx[c], minlength=self.quantization_levels) for c in range(C)],
dtype=float_type) # num_latent_channels by quantization_levels
qidx_counts_by_channel += add_n_smoothing
qidx_freqs_by_channel = qidx_counts_by_channel / np.sum(qidx_counts_by_channel,
axis=1)[:, None]
entropy_model = -np.log2(qidx_freqs_by_channel)
entropy_models[lamb] = entropy_model
self.entropy_models = entropy_models
return None
@property
def lambs(self):
return list(sorted(self.entropy_models.keys()))
def compress_batch_channel_latents(self, batch_means, batch_stds, lambs, **kwargs):
Z = batch_means # B x C
N = self.max_bits_per_coord
B, C = Z.shape
float_type = self.float_type
int_type = self.int_type
left_endpoints, right_endpoints = self.get_all_N_bit_intervals(Z) # C x (N+1) x B
left_endpoints = tf.transpose(left_endpoints, [1, 2, 0])
right_endpoints = tf.transpose(right_endpoints, [1, 2, 0]) # (N+1) x B x C
if not self.raw_code_length_entropy_models: # use naive (raw) code lengths, i.e., the number of floating points
code_lengths = tf.range(N + 1, dtype=int_type)[:, None, None] * tf.ones((N + 1, B, C),
dtype=int_type) # (N+1) x B x C
code_lengths = tf.concat([code_lengths, code_lengths[1:]], axis=0) # (2N+1) x B x C
else: # add code length overhead
raw_code_lengths = tf.repeat(tf.range(N + 1, dtype=int_type)[:, None], C, axis=1) # (N+1) x C
raw_code_lengths = tf.cast(raw_code_lengths, self.raw_code_length_entropy_models[lambs[0]].dtype)
code_lengths = []
for lamb in lambs:
code_lengths_for_lamb = raw_code_lengths + tf.transpose(self.raw_code_length_entropy_models[lamb])
code_lengths_for_lamb = tf.repeat(code_lengths_for_lamb[:, None, :], B, axis=1) # (N+1) x B x C
code_lengths_for_lamb = tf.concat([code_lengths_for_lamb, code_lengths_for_lamb[1:]],
axis=0) # (2N+1) x B x C
code_lengths.append(code_lengths_for_lamb)
code_lengths = tf.stack(code_lengths)
# only considering right_endpoints with >=1 bits b/c 0-bit endpoints coincide for both left and right end points
code_points = tf.concat([left_endpoints, right_endpoints[1:]], axis=0) # (2N+1) x B x C
fun = utils.curry_normal_logpdf(loc=batch_means, scale=batch_stds, ignore_const=True, backend=tf)
Z_hat_dict, raw_num_bits_dict = utils.batch_quantize_indep_dims(Z.shape, code_points, code_lengths, fun,
lambs=lambs, backend=tf, **kwargs)
return Z_hat_dict, raw_num_bits_dict # each dict entry is B x C
def compress_latents(self, posterior_means, posterior_logvars, lambs):
float_type = self.float_type
posterior_vars = tf.exp(posterior_logvars)
C = posterior_vars.shape[-1] # number of latent channels
assert C == self.num_channels
batch_means, batch_vars = map(lambda r: tf.reshape(r, (-1, C)),
[posterior_means, posterior_vars]) # num_samples x C
batch_stds = batch_vars ** 0.5
Z_hat_dict, raw_num_bits_dict = self.compress_batch_channel_latents(batch_means, batch_stds, lambs,
return_np=False)
out_keys = ('Z_hat', 'raw_num_bits', 'num_bits_cl', 'num_bits')
output = {key: dict() for key in out_keys}
for lamb in lambs:
tmp_res_for_lamb = dict()
Z_hat = Z_hat_dict[lamb]
raw_num_bits = raw_num_bits_dict[lamb]
tmp_res_for_lamb['Z_hat'] = Z_hat
tmp_res_for_lamb['raw_num_bits'] = raw_num_bits
# # calculate code length using raw num bits + codelength overhead approach
# raw_code_length_entropy_model = self.raw_code_length_entropy_models[lamb] # C x (N+1)
# raw_code_length_overhead_num_bits = tf.gather(raw_code_length_entropy_model,
# tf.transpose(raw_num_bits), batch_dims=1) # C x B
# raw_code_length_overhead_num_bits = tf.transpose(raw_code_length_overhead_num_bits) # B x C
# num_bits_cl = tf.cast(raw_num_bits, float_type) + \
# tf.cast(raw_code_length_overhead_num_bits, float_type) # B x C
# calculate code length using direct entropy coding approach
I = tf.searchsorted(self.code_points_by_channel, tf.transpose(Z_hat)) # C x B; requires TF
# assert tf.reduce_all(tf.equal(tf.gather(self.code_points_by_channel, I, batch_dims=1),
# tf.transpose(Z_hat)))
entropy_model = self.entropy_models[lamb] # C x quantization_levels
num_bits = tf.gather(entropy_model, I, batch_dims=1) # gather across innermost axis
num_bits = tf.transpose(num_bits) # B x C
# tmp_res_for_lamb['num_bits_cl'] = num_bits_cl
if self.raw_code_length_entropy_models:
tmp_res_for_lamb['num_bits_cl'] = raw_num_bits
tmp_res_for_lamb['num_bits'] = num_bits
for key in out_keys:
# tmp_res_for_lamb[key] = np.reshape(item, posterior_means.shape)
output[key][lamb] = np.reshape(tmp_res_for_lamb[key],
posterior_means.shape) # same shape as latents; np.reshape moves to CPU
return output
def compress(self, X, vae, lambs, clip=True):
posterior_means, posterior_logvars = vae.encode(X)
output = self.compress_latents(posterior_means, posterior_logvars, lambs)
Z_hat_dict = output['Z_hat']
Z_hat_batch = tf.stack([Z_hat_dict[lamb] for lamb in lambs]) # len(lambs) by latent_shape
Z_hat_batch_shape = Z_hat_batch.shape # len(lambs) by posterior_means.shape
Z_hat_flat_batch = tf.reshape(Z_hat_batch, [-1, *posterior_means.shape[1:]])
X_hat_batch = tf.reshape(vae.decode(Z_hat_flat_batch),
[len(lambs), *X.shape]) # 0th dimension corresponds to different lamb used
if clip: # pixel float values
X_hat_batch = np.clip(X_hat_batch, 0, 1)
X_hat_dict = {lamb: X_hat_batch[i] for i, lamb in enumerate(lambs)}
output['X_hat'] = X_hat_dict
return output
class UniformQuantizer:
def __init__(self, quantization_levels, int_type=np.int32):
super(UniformQuantizer, self).__init__()
self.quantization_levels = quantization_levels
self.int_type = int_type
def fit(self, samples, add_n_smoothing=1.):
min = np.min(samples)
max = np.max(samples)
N = self.quantization_levels
delta = (max - min) / N
offset = min | |
import pytest
from spacy import registry
from spacy.tokens import Doc, Span
from spacy.language import Language
from spacy.lang.en import English
from spacy.pipeline import EntityRuler, EntityRecognizer, merge_entities
from spacy.pipeline.ner import DEFAULT_NER_MODEL
from spacy.errors import MatchPatternError
from spacy.tests.util import make_tempdir
from thinc.api import NumpyOps, get_current_ops
@pytest.fixture
def nlp():
return Language()
@pytest.fixture
@registry.misc("entity_ruler_patterns")
def patterns():
return [
{"label": "HELLO", "pattern": "hello world"},
{"label": "BYE", "pattern": [{"LOWER": "bye"}, {"LOWER": "bye"}]},
{"label": "HELLO", "pattern": [{"ORTH": "HELLO"}]},
{"label": "COMPLEX", "pattern": [{"ORTH": "foo", "OP": "*"}]},
{"label": "TECH_ORG", "pattern": "Apple", "id": "a1"},
{"label": "TECH_ORG", "pattern": "Microsoft", "id": "a2"},
]
@Language.component("add_ent")
def add_ent_component(doc):
doc.ents = [Span(doc, 0, 3, label="ORG")]
return doc
@pytest.mark.issue(3345)
def test_issue3345():
"""Test case where preset entity crosses sentence boundary."""
nlp = English()
doc = Doc(nlp.vocab, words=["I", "live", "in", "New", "York"])
doc[4].is_sent_start = True
ruler = EntityRuler(nlp, patterns=[{"label": "GPE", "pattern": "New York"}])
cfg = {"model": DEFAULT_NER_MODEL}
model = registry.resolve(cfg, validate=True)["model"]
ner = EntityRecognizer(doc.vocab, model)
# Add the OUT action. I wouldn't have thought this would be necessary...
ner.moves.add_action(5, "")
ner.add_label("GPE")
doc = ruler(doc)
# Get into the state just before "New"
state = ner.moves.init_batch([doc])[0]
ner.moves.apply_transition(state, "O")
ner.moves.apply_transition(state, "O")
ner.moves.apply_transition(state, "O")
# Check that B-GPE is valid.
assert ner.moves.is_valid(state, "B-GPE")
@pytest.mark.issue(4849)
def test_issue4849():
nlp = English()
patterns = [
{"label": "PERSON", "pattern": "<NAME>", "id": "joe-biden"},
{"label": "PERSON", "pattern": "<NAME>", "id": "bernie-sanders"},
]
ruler = nlp.add_pipe("entity_ruler", config={"phrase_matcher_attr": "LOWER"})
ruler.add_patterns(patterns)
text = """
The left is starting to take aim at Democratic front-runner <NAME>.
Sen. <NAME> joined in her criticism: "There is no 'middle ground' when it comes to climate policy."
"""
# USING 1 PROCESS
count_ents = 0
for doc in nlp.pipe([text], n_process=1):
count_ents += len([ent for ent in doc.ents if ent.ent_id > 0])
assert count_ents == 2
# USING 2 PROCESSES
if isinstance(get_current_ops, NumpyOps):
count_ents = 0
for doc in nlp.pipe([text], n_process=2):
count_ents += len([ent for ent in doc.ents if ent.ent_id > 0])
assert count_ents == 2
@pytest.mark.issue(5918)
def test_issue5918():
# Test edge case when merging entities.
nlp = English()
ruler = nlp.add_pipe("entity_ruler")
patterns = [
{"label": "ORG", "pattern": "Digicon Inc"},
{"label": "ORG", "pattern": "Rotan Mosle Inc's"},
{"label": "ORG", "pattern": "Rotan Mosle Technology Partners Ltd"},
]
ruler.add_patterns(patterns)
text = """
Digicon Inc said it has completed the previously-announced disposition
of its computer systems division to an investment group led by
Rotan Mosle Inc's Rotan Mosle Technology Partners Ltd affiliate.
"""
doc = nlp(text)
assert len(doc.ents) == 3
# make it so that the third span's head is within the entity (ent_iob=I)
# bug #5918 would wrongly transfer that I to the full entity, resulting in 2 instead of 3 final ents.
# TODO: test for logging here
# with pytest.warns(UserWarning):
# doc[29].head = doc[33]
doc = merge_entities(doc)
assert len(doc.ents) == 3
@pytest.mark.issue(8168)
def test_issue8168():
nlp = English()
ruler = nlp.add_pipe("entity_ruler")
patterns = [
{"label": "ORG", "pattern": "Apple"},
{
"label": "GPE",
"pattern": [{"LOWER": "san"}, {"LOWER": "francisco"}],
"id": "san-francisco",
},
{
"label": "GPE",
"pattern": [{"LOWER": "san"}, {"LOWER": "fran"}],
"id": "san-francisco",
},
]
ruler.add_patterns(patterns)
assert ruler._ent_ids == {8043148519967183733: ("GPE", "san-francisco")}
@pytest.mark.issue(8216)
def test_entity_ruler_fix8216(nlp, patterns):
"""Test that patterns don't get added excessively."""
ruler = nlp.add_pipe("entity_ruler", config={"validate": True})
ruler.add_patterns(patterns)
pattern_count = sum(len(mm) for mm in ruler.matcher._patterns.values())
assert pattern_count > 0
ruler.add_patterns([])
after_count = sum(len(mm) for mm in ruler.matcher._patterns.values())
assert after_count == pattern_count
def test_entity_ruler_init(nlp, patterns):
ruler = EntityRuler(nlp, patterns=patterns)
assert len(ruler) == len(patterns)
assert len(ruler.labels) == 4
assert "HELLO" in ruler
assert "BYE" in ruler
ruler = nlp.add_pipe("entity_ruler")
ruler.add_patterns(patterns)
doc = nlp("hello world bye bye")
assert len(doc.ents) == 2
assert doc.ents[0].label_ == "HELLO"
assert doc.ents[1].label_ == "BYE"
def test_entity_ruler_no_patterns_warns(nlp):
ruler = EntityRuler(nlp)
assert len(ruler) == 0
assert len(ruler.labels) == 0
nlp.add_pipe("entity_ruler")
assert nlp.pipe_names == ["entity_ruler"]
with pytest.warns(UserWarning):
doc = nlp("hello world bye bye")
assert len(doc.ents) == 0
def test_entity_ruler_init_patterns(nlp, patterns):
# initialize with patterns
ruler = nlp.add_pipe("entity_ruler")
assert len(ruler.labels) == 0
ruler.initialize(lambda: [], patterns=patterns)
assert len(ruler.labels) == 4
doc = nlp("hello world bye bye")
assert doc.ents[0].label_ == "HELLO"
assert doc.ents[1].label_ == "BYE"
nlp.remove_pipe("entity_ruler")
# initialize with patterns from misc registry
nlp.config["initialize"]["components"]["entity_ruler"] = {
"patterns": {"@misc": "entity_ruler_patterns"}
}
ruler = nlp.add_pipe("entity_ruler")
assert len(ruler.labels) == 0
nlp.initialize()
assert len(ruler.labels) == 4
doc = nlp("hello world bye bye")
assert doc.ents[0].label_ == "HELLO"
assert doc.ents[1].label_ == "BYE"
def test_entity_ruler_init_clear(nlp, patterns):
"""Test that initialization clears patterns."""
ruler = nlp.add_pipe("entity_ruler")
ruler.add_patterns(patterns)
assert len(ruler.labels) == 4
ruler.initialize(lambda: [])
assert len(ruler.labels) == 0
def test_entity_ruler_clear(nlp, patterns):
"""Test that initialization clears patterns."""
ruler = nlp.add_pipe("entity_ruler")
ruler.add_patterns(patterns)
assert len(ruler.labels) == 4
doc = nlp("hello world")
assert len(doc.ents) == 1
ruler.clear()
assert len(ruler.labels) == 0
with pytest.warns(UserWarning):
doc = nlp("hello world")
assert len(doc.ents) == 0
def test_entity_ruler_existing(nlp, patterns):
ruler = nlp.add_pipe("entity_ruler")
ruler.add_patterns(patterns)
nlp.add_pipe("add_ent", before="entity_ruler")
doc = nlp("OH HELLO WORLD bye bye")
assert len(doc.ents) == 2
assert doc.ents[0].label_ == "ORG"
assert doc.ents[1].label_ == "BYE"
def test_entity_ruler_existing_overwrite(nlp, patterns):
ruler = nlp.add_pipe("entity_ruler", config={"overwrite_ents": True})
ruler.add_patterns(patterns)
nlp.add_pipe("add_ent", before="entity_ruler")
doc = nlp("OH HELLO WORLD bye bye")
assert len(doc.ents) == 2
assert doc.ents[0].label_ == "HELLO"
assert doc.ents[0].text == "HELLO"
assert doc.ents[1].label_ == "BYE"
def test_entity_ruler_existing_complex(nlp, patterns):
ruler = nlp.add_pipe("entity_ruler", config={"overwrite_ents": True})
ruler.add_patterns(patterns)
nlp.add_pipe("add_ent", before="entity_ruler")
doc = nlp("foo foo bye bye")
assert len(doc.ents) == 2
assert doc.ents[0].label_ == "COMPLEX"
assert doc.ents[1].label_ == "BYE"
assert len(doc.ents[0]) == 2
assert len(doc.ents[1]) == 2
def test_entity_ruler_entity_id(nlp, patterns):
ruler = nlp.add_pipe("entity_ruler", config={"overwrite_ents": True})
ruler.add_patterns(patterns)
doc = nlp("Apple is a technology company")
assert len(doc.ents) == 1
assert doc.ents[0].label_ == "TECH_ORG"
assert doc.ents[0].ent_id_ == "a1"
def test_entity_ruler_cfg_ent_id_sep(nlp, patterns):
config = {"overwrite_ents": True, "ent_id_sep": "**"}
ruler = nlp.add_pipe("entity_ruler", config=config)
ruler.add_patterns(patterns)
assert "TECH_ORG**a1" in ruler.phrase_patterns
doc = nlp("Apple is a technology company")
assert len(doc.ents) == 1
assert doc.ents[0].label_ == "TECH_ORG"
assert doc.ents[0].ent_id_ == "a1"
def test_entity_ruler_serialize_bytes(nlp, patterns):
ruler = EntityRuler(nlp, patterns=patterns)
assert len(ruler) == len(patterns)
assert len(ruler.labels) == 4
ruler_bytes = ruler.to_bytes()
new_ruler = EntityRuler(nlp)
assert len(new_ruler) == 0
assert len(new_ruler.labels) == 0
new_ruler = new_ruler.from_bytes(ruler_bytes)
assert len(new_ruler) == len(patterns)
assert len(new_ruler.labels) == 4
assert len(new_ruler.patterns) == len(ruler.patterns)
for pattern in ruler.patterns:
assert pattern in new_ruler.patterns
assert sorted(new_ruler.labels) == sorted(ruler.labels)
def test_entity_ruler_serialize_phrase_matcher_attr_bytes(nlp, patterns):
ruler = EntityRuler(nlp, phrase_matcher_attr="LOWER", patterns=patterns)
assert len(ruler) == len(patterns)
assert len(ruler.labels) == 4
ruler_bytes = ruler.to_bytes()
new_ruler = EntityRuler(nlp)
assert len(new_ruler) == 0
assert len(new_ruler.labels) == 0
assert new_ruler.phrase_matcher_attr is None
new_ruler = new_ruler.from_bytes(ruler_bytes)
assert len(new_ruler) == len(patterns)
assert len(new_ruler.labels) == 4
assert new_ruler.phrase_matcher_attr == "LOWER"
def test_entity_ruler_validate(nlp):
ruler = EntityRuler(nlp)
validated_ruler = EntityRuler(nlp, validate=True)
valid_pattern = {"label": "HELLO", "pattern": [{"LOWER": "HELLO"}]}
invalid_pattern = {"label": "HELLO", "pattern": [{"ASDF": "HELLO"}]}
# invalid pattern raises error without validate
with pytest.raises(ValueError):
ruler.add_patterns([invalid_pattern])
# valid pattern is added without errors with validate
validated_ruler.add_patterns([valid_pattern])
# invalid pattern raises error with validate
with pytest.raises(MatchPatternError):
validated_ruler.add_patterns([invalid_pattern])
def test_entity_ruler_properties(nlp, patterns):
ruler = EntityRuler(nlp, patterns=patterns, overwrite_ents=True)
assert sorted(ruler.labels) == sorted(["HELLO", "BYE", "COMPLEX", "TECH_ORG"])
assert sorted(ruler.ent_ids) == ["a1", "a2"]
def test_entity_ruler_overlapping_spans(nlp):
ruler = EntityRuler(nlp)
patterns = [
{"label": "FOOBAR", "pattern": "foo bar"},
{"label": "BARBAZ", "pattern": "bar baz"},
]
ruler.add_patterns(patterns)
doc = ruler(nlp.make_doc("foo bar baz"))
assert len(doc.ents) == 1
assert doc.ents[0].label_ == "FOOBAR"
@pytest.mark.parametrize("n_process", [1, 2])
def test_entity_ruler_multiprocessing(nlp, n_process):
if isinstance(get_current_ops, NumpyOps) or n_process < 2:
texts = ["I enjoy eating Pizza Hut pizza."]
patterns = [{"label": "FASTFOOD", "pattern": "Pizza Hut", "id": "1234"}]
ruler = nlp.add_pipe("entity_ruler")
ruler.add_patterns(patterns)
for doc in nlp.pipe(texts, n_process=2):
for ent in doc.ents:
assert ent.ent_id_ == "1234"
def test_entity_ruler_serialize_jsonl(nlp, patterns):
ruler = nlp.add_pipe("entity_ruler")
ruler.add_patterns(patterns)
with make_tempdir() as d:
ruler.to_disk(d / "test_ruler.jsonl")
ruler.from_disk(d / "test_ruler.jsonl") # read from an existing jsonl file
with pytest.raises(ValueError):
ruler.from_disk(d / "non_existing.jsonl") # read from a bad jsonl file
def test_entity_ruler_serialize_dir(nlp, patterns):
ruler = nlp.add_pipe("entity_ruler")
ruler.add_patterns(patterns)
with make_tempdir() as d:
ruler.to_disk(d / "test_ruler")
ruler.from_disk(d / "test_ruler") # read from an existing directory
with pytest.raises(ValueError):
ruler.from_disk(d / "non_existing_dir") # read from a bad directory
def test_entity_ruler_remove_basic(nlp):
ruler = EntityRuler(nlp)
patterns = [
{"label": "PERSON", "pattern": "Duygu", "id": "duygu"},
{"label": "ORG", "pattern": "ACME", "id": "acme"},
{"label": "ORG", "pattern": "ACM"},
]
ruler.add_patterns(patterns)
doc = ruler(nlp.make_doc("Duygu went to school"))
assert len(ruler.patterns) == 3
assert len(doc.ents) == 1
assert doc.ents[0].label_ == "PERSON"
assert doc.ents[0].text == "Duygu"
assert "PERSON||duygu" in ruler.phrase_matcher
ruler.remove("duygu")
doc = ruler(nlp.make_doc("Duygu went to school"))
assert len(doc.ents) == 0
assert "PERSON||duygu" not in ruler.phrase_matcher
assert len(ruler.patterns) == 2
def test_entity_ruler_remove_same_id_multiple_patterns(nlp):
ruler = EntityRuler(nlp)
patterns = [
{"label": "PERSON", "pattern": "Duygu", "id": "duygu"},
{"label": "ORG", "pattern": "DuyguCorp", "id": "duygu"},
{"label": "ORG", "pattern": | |
<gh_stars>0
"""Tucker Sync common module.
Common code used by server and client implementations.
License:
The MIT License (MIT), see LICENSE.txt for more details.
Copyright:
Copyright (c) 2014 <NAME> and <NAME>.
"""
import inspect
import json
import logging
import os
from schematics.models import Model
from schematics.types import StringType, IntType, BaseType, LongType, \
EmailType, UUIDType, URLType, BooleanType
from schematics.types.compound import ListType, ModelType
from schematics.transforms import whitelist
from app_config import USER_PASSWORD_MIN_LEN
class Logger(object):
"""Custom logger wrapper.
Typical use includes the module (file) and class name in the log output.
By creating a module logger with the file name and adding a 'tag' to the
message.
Usage:
# module.py:
LOG = Logger(__file__)
class ExampleClass(object):
def __init__(self):
LOG.debug(self, 'init')
@classmethod
def class_method(cls):
LOG.debug(cls, 'class_method')
@staticmethod
def static_method():
LOG.debug(ExampleClass, 'static_method')
LOG.debug(None, 'Example with None tag, msg = %s', 'no tag')
LOG.debug(msg='Example with msg = %s' % 'hello')
LOG.debug('StringTag', 'Example with string tag and %s', 'arg')
"""
# Internal class logger.
_log = logging.getLogger(
os.path.basename(__file__).split('.')[0] + ':Logger')
_log.propagate = 0
_handler = logging.StreamHandler()
_handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
_log.addHandler(_handler)
# Normally set to WARN. Set to logging.DEBUG to debug this class.
_log.setLevel(logging.WARN)
def __init__(self, p):
"""Init logger given a path string like __file__ (or a custom name)."""
self._log.debug('init')
name = os.path.basename(p).split('.')[0]
self._log.debug('Get logger name = %s', name)
self.logger = logging.getLogger(name)
def get_tag(self, tag):
"""Given a tag (e.g. None, 'tag', cls, self) return None or a string.
The returned tag string is determined from the class name or string
provided.
"""
self._log.debug('get_tag')
if not tag:
self._log.debug('not tag')
return
elif type(tag) is str:
self._log.debug('is str')
return tag
elif inspect.isclass(tag):
self._log.debug('is class')
return tag.__name__
else:
self._log.debug('else object (imagine that)')
return tag.__class__.__name__
# noinspection PyAugmentAssignment
# minor IntelliJ bug see: https://youtrack.jetbrains.com/issue/PY-7605
# noinspection should be removed when the fix filters through.
def debug(self, tag=None, msg='', *args, **kwargs):
"""Log at the debug level with an optional tag."""
if not self.logger.isEnabledFor(logging.DEBUG):
return
t = self.get_tag(tag)
if t:
msg = '%s:' + msg
args = (t,) + args
self.logger.debug(msg, *args, **kwargs)
# Optional module logger for this module.
# LOG = Logger(__file__)
class APIRequestType(object):
"""The API request constants."""
TEST = 'test'
BASE_DATA_DOWN = 'baseDataDown'
SYNC_DOWN = 'syncDown'
SYNC_UP = 'syncUp'
ACCOUNT_OPEN = 'accountOpen'
ACCOUNT_CLOSE = 'accountClose'
ACCOUNT_MODIFY = 'accountModify'
class JSONKey(object):
"""The JSON key constants."""
ERROR = 'error'
DATA = 'data'
OBJECTS = 'objects'
class APIErrorCode(object):
"""The API error code constants."""
SUCCESS = 0
INTERNAL_SERVER_ERROR = 1
MALFORMED_REQUEST = 2
INVALID_KEY = 3
INVALID_EMAIL = 4
INVALID_PASSWORD = 5
AUTH_FAIL = 6
INVALID_JSON_OBJECT = 7
EMAIL_NOT_UNIQUE = 8
CLIENT_UUID_NOT_UNIQUE = 9
FULL_SYNC_REQUIRED = 10
@classmethod
def name(cls, error_code):
"""Lazy reverse lookup.
Returns the first name that matches error_code."""
for k, v in cls.__dict__.items():
if v == error_code:
return k
class APIErrorResponse(object):
"""The API error response constants."""
SUCCESS = '{"%s":%s}' % (
JSONKey.ERROR, APIErrorCode.SUCCESS)
INTERNAL_SERVER_ERROR = '{"%s":%s}' % (
JSONKey.ERROR, APIErrorCode.INTERNAL_SERVER_ERROR)
MALFORMED_REQUEST = '{"%s":%s}' % (
JSONKey.ERROR, APIErrorCode.MALFORMED_REQUEST)
INVALID_KEY = '{"%s":%s}' % (
JSONKey.ERROR, APIErrorCode.INVALID_KEY)
INVALID_EMAIL = '{"%s":%s}' % (
JSONKey.ERROR, APIErrorCode.INVALID_EMAIL)
INVALID_PASSWORD = '{"%s":%s}' % (
JSONKey.ERROR, APIErrorCode.INVALID_PASSWORD)
AUTH_FAIL = '{"%s":%s}' % (
JSONKey.ERROR, APIErrorCode.AUTH_FAIL)
INVALID_JSON_OBJECT = '{"%s":%s}' % (
JSONKey.ERROR, APIErrorCode.INVALID_JSON_OBJECT)
EMAIL_NOT_UNIQUE = '{"%s":%s}' % (
JSONKey.ERROR, APIErrorCode.EMAIL_NOT_UNIQUE)
CLIENT_UUID_NOT_UNIQUE = '{"%s":%s}' % (
JSONKey.ERROR, APIErrorCode.CLIENT_UUID_NOT_UNIQUE)
FULL_SYNC_REQUIRED = '{"%s":%s}' % (
JSONKey.ERROR, APIErrorCode.FULL_SYNC_REQUIRED)
class HTTP(object):
"""HTTP constants."""
OK = 200
CONTENT_TYPE_APP_JSON = 'application/json'
class JSON(object):
"""Custom json wrapper."""
COMPACT_SEPARATORS = (',', ':')
@staticmethod
def dumps(obj):
"""Dump an object to a compact json string."""
return json.dumps(obj, separators=JSON.COMPACT_SEPARATORS)
@staticmethod
def loads(s):
"""Load a string and return a Python native json object."""
return json.loads(s)
@staticmethod
def load(fp):
"""Load (read) a file like object.
Return a Python native json object."""
return json.load(fp)
class APIRequest(Model):
"""API Request Model."""
base_url = URLType()
type = StringType()
key = StringType()
email = StringType()
password = StringType()
user_agent = StringType(serialized_name='User-Agent',
default='TuckerSync')
accept = StringType(serialized_name='Accept',
default=CONTENT_TYPE_APP_JSON)
content_type = StringType(serialized_name='Content-Type',
default=CONTENT_TYPE_APP_JSON)
body = StringType()
class Options(object):
roles = {'params': whitelist('type', 'key', 'email', 'password'),
'base_headers': whitelist('user_agent'),
'accept_headers': whitelist('user_agent', 'accept'),
'content_headers': whitelist('user_agent',
'accept',
'content_type')}
@property
def params(self):
return self.to_native(role='params')
@property
def headers(self):
if self.body:
return self.to_native(role='content_headers')
else:
return self.to_native(role='accept_headers')
@property
def base_headers(self):
return self.to_native(role='base_headers')
class SyncDownRequestBody(Model):
"""Sync download request body model."""
objectClass = StringType(required=True)
clientUUID = UUIDType(required=True)
lastSync = LongType(required=True)
class BaseDataDownRequestBody(SyncDownRequestBody):
"""Base data download request body model."""
pass
class SyncUpRequestBody(Model):
"""Sync upload request body model."""
objectClass = StringType(required=True)
clientUUID = UUIDType(required=True)
objects = ListType(ModelType(Model), required=True)
class AccountOpenRequestBody(Model):
"""Account open request body model."""
clientUUID = UUIDType(required=True)
class AccountModifyRequestBody(Model):
"""Account modify request body model."""
email = StringType(required=True)
password = StringType(required=True)
class ResponseBody(Model):
"""Response body model."""
error = IntType(default=0)
committedSyncCount = LongType(serialize_when_none=False)
moreObjects = BooleanType(serialize_when_none=False)
objects = BaseType(serialize_when_none=False)
class SQLResult(Model):
"""SQL results and errors."""
errno = IntType()
err_msg = StringType()
rowcount = LongType()
lastrowid = LongType()
objects = ListType(ModelType(Model), default=[])
class SyncCount(Model):
"""SyncCount is a core application database model."""
sync_count = LongType()
object_class = StringType()
is_committed = BooleanType()
# Select committed sync count by object class.
# Operation:
# Select the uncommitted sessions for object class and return the lowest
# syncCount - 1,
# otherwise if no uncommitted sessions return the highest sync count for
# object class,
# otherwise if no records return 0.
SELECT_COMMITTED_SC = """SELECT
CASE WHEN COUNT(*) THEN MIN(syncCount) - 1
ELSE (SELECT
CASE WHEN COUNT(*) THEN MAX(syncCount)
ELSE 0
END
FROM SyncCount
WHERE objectClass = %s)
END AS sync_count
FROM SyncCount
WHERE objectClass = %s
AND isCommitted = 0"""
def select_committed_sc_params(self):
return self.object_class, self.object_class
# Insert uncommitted session for object class.
INSERT = """INSERT INTO SyncCount (objectClass) VALUES (%s)"""
def insert_params(self):
return self.object_class,
# Delete committed sessions prior to the currently inserted one.
# Dependant on LAST_INSERT_ID() of the current database connection.
DELETE_TRAILING_COMMITTED = """DELETE
FROM SyncCount
WHERE syncCount < LAST_INSERT_ID()
AND objectClass = %s
AND isCommitted = 1"""
def delete_trailing_committed_params(self):
return self.object_class,
# Select session sync count by object class.
# Putting the sequence together to issue a session sync count.
# Must be executed outside of the main data transaction.
# Operation:
# First a new uncommitted session is inserted.
# This becomes the new sync count head marker (not committed_sc).
# Then trailing committed sessions are deleted to minimise table size.
# Some rows are locked during the delete but insert with auto_increment
# will still function for parallel sessions.
# The session sync count is returned from LAST_INSERT_ID() which is within
# the current database connection and does not read from the table.
SELECT_SESSION_SC = (INSERT,
'COMMIT',
DELETE_TRAILING_COMMITTED,
'COMMIT',
'SELECT LAST_INSERT_ID() AS sync_count')
def select_session_sc_params(self):
return (self.insert_params(),
None,
self.delete_trailing_committed_params(),
None,
None)
# Mark session sync count as committed.
# Marking the session committed must be atomic with the data commit.
# However the session must still be marked as committed after a data
# transaction fail/rollback.
# Therefore should initially be executed within the same connection and
# transaction as the data and again if the data transaction fails.
UPDATE_SET_IS_COMMITTED = """UPDATE SyncCount
SET isCommitted = 1
WHERE syncCount = %s"""
def update_set_is_committed_params(self):
return self.sync_count,
# Mark expired past and future sessions as committed.
# Provides self healing from any rare cases of sessions that failed to be
# marked as committed.
# Configured expiry time is 1 hour 20 min.
# Which should allow sessions at least 20 min to commit even in the case
# of daylight savings being applied to server (although the UTC to local
# time zone may handle this effectively).
# The normal case of time jitter and drift/update should be handled by the
# expiry time.
# The committed rows will be deleted when the next session sync count is
# issued.
# If any rows are affected a warning should be logged:
WARN_EXPIRED_SESSIONS_COMMITTED = (
'There were uncommitted sessions over 1 hour 20 min in the'
' past or future! These expired sessions (%s) have been'
' marked as committed.')
UPDATE_SET_IS_COMMITTED_EXPIRED = """UPDATE SyncCount
SET isCommitted = 1
WHERE objectClass = %s
AND isCommitted = 0
AND (createAt < SUBTIME(NOW(),'01:20:00')
OR createAt > ADDTIME(NOW(),'01:20:00'))"""
def update_set_is_committed_expired_params(self):
return self.object_class,
class Client(Model):
"""Client is a core application database model."""
rowid = | |
the Project name
:type config_name: string
:param config_name: the logtail config name to apply
:type group_name: string
:param group_name: the machine group name
:return: RemoveConfigToMachineGroupResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/machinegroups/" + group_name + "/configs/" + config_name
(resp, header) = self._send("DELETE", project_name, None, resource, params, headers)
return RemoveConfigToMachineGroupResponse(header)
def get_machine_group_applied_configs(self, project_name, group_name):
""" get the logtail config names applied in a machine group
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type group_name: string
:param group_name: the group name list
:return: GetMachineGroupAppliedConfigResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/machinegroups/" + group_name + "/configs"
(resp, header) = self._send("GET", project_name, None, resource, params, headers)
return GetMachineGroupAppliedConfigResponse(resp, header)
def get_config_applied_machine_groups(self, project_name, config_name):
""" get machine group names where the logtail config applies to
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type config_name: string
:param config_name: the logtail config name used to apply
:return: GetConfigAppliedMachineGroupsResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/configs/" + config_name + "/machinegroups"
(resp, header) = self._send("GET", project_name, None, resource, params, headers)
return GetConfigAppliedMachineGroupsResponse(resp, header)
def _update_acl(self, project_name, logstore_name, acl_action, acl_config) :
headers = {}
params = {}
params['type'] = 'acl'
resource = "/"
if logstore_name != None and len(logstore_name) > 0 :
resource = "/logstores/" + logstore_name
body = acl_config.to_json()
body['action'] = acl_action
body = json.dumps(body)
headers['Content-Type'] = 'application/json'
headers['x-log-bodyrawsize'] = str(len(body))
(resp, headers) = self._send("PUT", project_name, body, resource, params, headers)
return UpdateAclResponse(headers)
def update_project_acl(self, project_name, acl_action, acl_config):
""" update acl of a project
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type acl_action: string
:param acl_action: "grant" or "revoke", grant or revoke the acl_config to/from a project
:type acl_config: acl_config.AclConfig
:param acl_config: the detail acl config info
:return: UpdateAclResponse
:raise: LogException
"""
return self._update_acl(project_name, None, acl_action, acl_config)
def update_logstore_acl(self, project_name, logstore_name, acl_action, acl_config):
""" update acl of a logstore
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type acl_action: string
:param acl_action: "grant" or "revoke", grant or revoke the acl_config to/from a logstore
:type acl_config: acl_config.AclConfig
:param acl_config: the detail acl config info
:return: UpdateAclResponse
:raise: LogException
"""
return self._update_acl(project_name, logstore_name, acl_action, acl_config)
def _list_acl(self, project_name, logstore_name, offset = 0 , size = 100) :
headers = {}
params = {}
params['type'] = 'acl'
params['offset'] = str(offset)
params['size'] = str(size)
resource = "/"
if logstore_name != None and len(logstore_name) > 0 :
resource = "/logstores/" + logstore_name
(resp, headers) = self._send("GET", project_name, None, resource, params, headers)
return ListAclResponse(resp, headers)
def list_project_acl(self, project_name, offset = 0 , size = 100) :
""" list acl of a project
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type offset: int
:param offset: the offset of all acl
:type size: int
:param size: the max return acl count
:return: ListAclResponse
:raise: LogException
"""
return self._list_acl(project_name, None, offset, size)
def list_logstore_acl(self, project_name, logstore_name, offset = 0 ,size = 100) :
""" list acl of a logstore
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type offset: int
:param offset: the offset of all acl
:type size: int
:param size: the max return acl count
:return: ListAclResponse
:raise: LogException
"""
return self._list_acl(project_name, logstore_name, offset, size)
def create_shipper(self, project_name, logstore_name, shipper_name, shipper_type, shipper_config) :
""" create odps/oss shipper
for every type, it only allowed one shipper
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type shipper_name: string
:param shipper_name: the shipper name
:type shipper_type: string
:param shipper_type: only support "odps" or "oss"
:type shipper_config : OssShipperConfig or OdpsShipperConfig
:param shipper_config : the detail shipper config, must be OssShipperConfig or OdpsShipperConfig type
:return: CreateShipperResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/logstores/" + logstore_name + "/shipper"
body = {}
body["shipperName"] = shipper_name
body["targetType"] = shipper_type
body["targetConfiguration"] = shipper_config.to_json()
body = json.dumps(body)
headers['Content-Type'] = 'application/json'
headers['x-log-bodyrawsize'] = str(len(body))
(resp, headers) = self._send("POST", project_name, body, resource, params, headers)
return CreateShipperResponse(headers)
def update_shipper(self, project_name, logstore_name, shipper_name, shipper_type, shipper_config) :
""" update odps/oss shipper
for every type, it only allowed one shipper
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type shipper_name: string
:param shipper_name: the shipper name
:type shipper_type: string
:param shipper_type: only support "odps" or "oss" , the type must be same with the oringal shipper
:type shipper_config : OssShipperConfig or OdpsShipperConfig
:param shipper_config : the detail shipper config, must be OssShipperConfig or OdpsShipperConfig type
:return: UpdateShipperResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/logstores/" + logstore_name + "/shipper/" + shipper_name
body = {}
body["shipperName"] = shipper_name
body["targetType"] = shipper_type
body["targetConfiguration"] = shipper_config.to_json()
body = json.dumps(body)
headers['Content-Type'] = 'application/json'
headers['x-log-bodyrawsize'] = str(len(body))
(resp, headers) = self._send("PUT", project_name, body, resource, params, headers)
return UpdateShipperResponse(headers)
def delete_shipper(self, project_name, logstore_name, shipper_name) :
""" delete odps/oss shipper
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type shipper_name: string
:param shipper_name: the shipper name
:return: DeleteShipperResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/logstores/" + logstore_name + "/shipper/" + shipper_name
(resp, header) = self._send("DELETE", project_name, None, resource, params, headers)
return DeleteShipperResponse(header)
def get_shipper_config(self, project_name, logstore_name, shipper_name) :
""" get odps/oss shipper
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type shipper_name: string
:param shipper_name: the shipper name
:return: GetShipperConfigResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/logstores/" + logstore_name + "/shipper/" + shipper_name
(resp, header) = self._send("GET", project_name, None, resource, params, headers)
return GetShipperConfigResponse(resp, header)
def list_shipper(self, project_name, logstore_name) :
""" list odps/oss shipper
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:return: ListShipperResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/logstores/" + logstore_name + "/shipper"
(resp, header) = self._send("GET", project_name, None, resource, params, headers)
return ListShipperResponse(resp, header)
def get_shipper_tasks(self, project_name, logstore_name, shipper_name, start_time, end_time, status_type = '', offset = 0, size = 100):
""" get odps/oss shipper tasks in a certain time range
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type shipper_name: string
:param shipper_name: the shipper name
:type start_time: int
:param start_time: the start timestamp
:type end_time: int
:param end_time: the end timestamp
:type status_type : string
:param status_type : support one of ['', 'fail', 'success', 'running'] , if the status_type = '' , return all kinds of status type
:type offset : int
:param offset : the begin task offset
:type size : int
:param size : the needed tasks count
:return: ListShipperResponse
:raise: LogException
"""
headers = {}
params = {}
params["from"] = str(int(start_time))
params["to"] = str(int(end_time))
params["status"] = status_type
params["offset"] = str(int(offset))
params["size"] = str(int(size))
resource = "/logstores/" + logstore_name + "/shipper/" + shipper_name + "/tasks"
(resp, header) = self._send("GET", project_name, None, resource, params, headers)
return GetShipperTasksResponse(resp, header)
def retry_shipper_tasks(self, project_name, logstore_name, shipper_name, task_list) :
""" retry failed tasks , | |
angle = WritableAngle(top.angles[angle_idx])
angle_spring_constants1.append(angle.spring_constant)
for proper_idx in alchemizer._exclusive_propers:
proper = WritableProper(top.propers[proper_idx])
proper_constants1.append(proper.constant)
for improper_idx in alchemizer._exclusive_impropers:
improper = WritableImproper(top.impropers[improper_idx])
improper_constants1.append(improper.constant)
top = alchemizer.get_alchemical_topology(fep_lambda=0.0,
coul_lambda=0.2)
sigmas2 = list()
epsilons2 = list()
SASA_radii2 = list()
charges2 = list()
bond_spring_constants2 = list()
angle_spring_constants2 = list()
proper_constants2 = list()
improper_constants2 = list()
for atom_idx in alchemizer._exclusive_atoms:
atom = WritableAtom(top.atoms[atom_idx])
sigmas2.append(atom.sigma)
epsilons2.append(atom.epsilon)
SASA_radii2.append(atom.SASA_radius)
charges2.append(atom.charge)
for bond_idx in alchemizer._exclusive_bonds:
bond = WritableBond(top.bonds[bond_idx])
bond_spring_constants2.append(bond.spring_constant)
for angle_idx in alchemizer._exclusive_angles:
angle = WritableAngle(top.angles[angle_idx])
angle_spring_constants2.append(angle.spring_constant)
for proper_idx in alchemizer._exclusive_propers:
proper = WritableProper(top.propers[proper_idx])
proper_constants2.append(proper.constant)
for improper_idx in alchemizer._exclusive_impropers:
improper = WritableImproper(top.impropers[improper_idx])
improper_constants2.append(improper.constant)
for sigma1, sigma2 in zip(sigmas1, sigmas2):
assert sigma2 - sigma1 < 1e-5, \
'Unexpected ratio between sigmas'
for epsilon1, epsilon2 in zip(epsilons1, epsilons2):
assert epsilon2 - epsilon1 < 1e-5, \
'Unexpected ratio between epsilons'
for SASA_radius1, SASA_radius2 in zip(SASA_radii1, SASA_radii2):
assert SASA_radius2 - SASA_radius1 < 1e-5, \
'Unexpected ratio between SASA radii'
for charge1, charge2 in zip(charges1, charges2):
assert (charge2 / charge1) - (1 - 0.2) < 1e-5, \
'Unexpected ratio between charges'
for bond_sc1, bond_sc2 in zip(bond_spring_constants1,
bond_spring_constants2):
assert bond_sc2 - bond_sc1 < 1e-5, \
'Unexpected ratio between bond spring constants'
for angle_sc1, angle_sc2 in zip(angle_spring_constants1,
angle_spring_constants2):
assert angle_sc2 - angle_sc1 < 1e-5, \
'Unexpected ratio between angle spring constants'
for proper_c1, proper_c2 in zip(proper_constants1,
proper_constants2):
assert proper_c2 - proper_c1 < 1e-5, \
'Unexpected ratio between proper constants'
for improper_c1, improper_c2 in zip(improper_constants1,
improper_constants2):
assert improper_c2 - improper_c1 < 1e-5, \
'Unexpected ratio between improper constants'
top = alchemizer.get_alchemical_topology(fep_lambda=0.0,
coul_lambda=1.0)
sigmas1 = list()
epsilons1 = list()
SASA_radii1 = list()
charges1 = list()
bond_spring_constants1 = list()
angle_spring_constants1 = list()
proper_constants1 = list()
improper_constants1 = list()
for atom_idx in alchemizer._non_native_atoms:
atom = WritableAtom(top.atoms[atom_idx])
sigmas1.append(atom.sigma)
epsilons1.append(atom.epsilon)
SASA_radii1.append(atom.SASA_radius)
charges1.append(atom.charge)
for bond_idx in alchemizer._non_native_bonds:
bond = WritableBond(top.bonds[bond_idx])
bond_spring_constants1.append(bond.spring_constant)
for angle_idx in alchemizer._non_native_angles:
angle = WritableAngle(top.angles[angle_idx])
angle_spring_constants1.append(angle.spring_constant)
for proper_idx in alchemizer._non_native_propers:
proper = WritableProper(top.propers[proper_idx])
proper_constants1.append(proper.constant)
for improper_idx in alchemizer._non_native_impropers:
improper = WritableImproper(top.impropers[improper_idx])
improper_constants1.append(improper.constant)
top = alchemizer.get_alchemical_topology(fep_lambda=0.0,
coul_lambda=0.5)
sigmas2 = list()
epsilons2 = list()
SASA_radii2 = list()
charges2 = list()
bond_spring_constants2 = list()
angle_spring_constants2 = list()
proper_constants2 = list()
improper_constants2 = list()
for atom_idx in alchemizer._non_native_atoms:
atom = WritableAtom(top.atoms[atom_idx])
sigmas2.append(atom.sigma)
epsilons2.append(atom.epsilon)
SASA_radii2.append(atom.SASA_radius)
charges2.append(atom.charge)
for bond_idx in alchemizer._non_native_bonds:
bond = WritableBond(top.bonds[bond_idx])
bond_spring_constants2.append(bond.spring_constant)
for angle_idx in alchemizer._non_native_angles:
angle = WritableAngle(top.angles[angle_idx])
angle_spring_constants2.append(angle.spring_constant)
for proper_idx in alchemizer._non_native_propers:
proper = WritableProper(top.propers[proper_idx])
proper_constants2.append(proper.constant)
for improper_idx in alchemizer._non_native_impropers:
improper = WritableImproper(top.impropers[improper_idx])
improper_constants2.append(improper.constant)
for sigma1, sigma2 in zip(sigmas1, sigmas2):
assert sigma2 - sigma1 < 1e-5, \
'Unexpected ratio between sigmas'
for epsilon1, epsilon2 in zip(epsilons1, epsilons2):
assert epsilon2 - epsilon1 < 1e-5, \
'Unexpected ratio between epsilons'
for SASA_radius1, SASA_radius2 in zip(SASA_radii1, SASA_radii2):
assert SASA_radius2 - SASA_radius1 < 1e-5, \
'Unexpected ratio between SASA radii'
for charge1, charge2 in zip(charges1, charges2):
assert (charge2 / charge1) - 0.8 < 1e-5, \
'Unexpected ratio between charges'
for bond_sc1, bond_sc2 in zip(bond_spring_constants1,
bond_spring_constants2):
assert bond_sc2 - bond_sc1 < 1e-5, \
'Unexpected ratio between bond spring constants'
for angle_sc1, angle_sc2 in zip(angle_spring_constants1,
angle_spring_constants2):
assert angle_sc2 - angle_sc1 < 1e-5, \
'Unexpected ratio between angle spring constants'
for proper_c1, proper_c2 in zip(proper_constants1,
proper_constants2):
assert proper_c2 - proper_c1 < 1e-5, \
'Unexpected ratio between proper constants'
for improper_c1, improper_c2 in zip(improper_constants1,
improper_constants2):
assert improper_c2 - improper_c1 < 1e-5, \
'Unexpected ratio between improper constants'
top = alchemizer.get_alchemical_topology(fep_lambda=0.0,
coul_lambda=0.0)
sigmas1 = list()
epsilons1 = list()
SASA_radii1 = list()
charges1 = list()
bond_spring_constants1 = list()
angle_spring_constants1 = list()
proper_constants1 = list()
improper_constants1 = list()
for atom_idx in range(0, len(top.atoms)):
if (atom_idx not in alchemizer._exclusive_atoms and
atom_idx not in alchemizer._non_native_atoms):
atom = WritableAtom(top.atoms[atom_idx])
sigmas1.append(atom.sigma)
epsilons1.append(atom.epsilon)
SASA_radii1.append(atom.SASA_radius)
charges1.append(atom.charge)
for bond_idx in range(0, len(top.bonds)):
if (bond_idx not in alchemizer._exclusive_bonds and
bond_idx not in alchemizer._non_native_bonds):
bond = WritableBond(top.bonds[bond_idx])
bond_spring_constants1.append(bond.spring_constant)
for angle_idx in range(0, len(top.angles)):
if (angle_idx not in alchemizer._exclusive_angles and
angle_idx not in alchemizer._non_native_angles):
angle = WritableAngle(top.angles[angle_idx])
angle_spring_constants1.append(angle.spring_constant)
for proper_idx in range(0, len(top.propers)):
if (proper_idx not in alchemizer._exclusive_propers and
proper_idx not in alchemizer._non_native_propers):
proper = WritableProper(top.propers[proper_idx])
proper_constants1.append(proper.constant)
for improper_idx in range(0, len(top.impropers)):
if (improper_idx not in alchemizer._exclusive_impropers and
improper_idx not in alchemizer._non_native_impropers):
improper = WritableImproper(top.impropers[improper_idx])
improper_constants1.append(improper.constant)
top = alchemizer.get_alchemical_topology(fep_lambda=0.0,
coul_lambda=0.5)
sigmas2 = list()
epsilons2 = list()
SASA_radii2 = list()
charges2 = list()
bond_spring_constants2 = list()
angle_spring_constants2 = list()
proper_constants2 = list()
improper_constants2 = list()
for atom_idx in range(0, len(top.atoms)):
if (atom_idx not in alchemizer._exclusive_atoms and
atom_idx not in alchemizer._non_native_atoms):
atom = WritableAtom(top.atoms[atom_idx])
sigmas2.append(atom.sigma)
epsilons2.append(atom.epsilon)
SASA_radii2.append(atom.SASA_radius)
charges2.append(atom.charge)
for bond_idx in range(0, len(top.bonds)):
if (bond_idx not in alchemizer._exclusive_bonds and
bond_idx not in alchemizer._non_native_bonds):
bond = WritableBond(top.bonds[bond_idx])
bond_spring_constants2.append(bond.spring_constant)
for angle_idx in range(0, len(top.angles)):
if (angle_idx not in alchemizer._exclusive_angles and
angle_idx not in alchemizer._non_native_angles):
angle = WritableAngle(top.angles[angle_idx])
angle_spring_constants2.append(angle.spring_constant)
for proper_idx in range(0, len(top.propers)):
if (proper_idx not in alchemizer._exclusive_propers and
proper_idx not in alchemizer._non_native_propers):
proper = WritableProper(top.propers[proper_idx])
proper_constants2.append(proper.constant)
for improper_idx in range(0, len(top.impropers)):
if (improper_idx not in alchemizer._exclusive_impropers and
improper_idx not in alchemizer._non_native_impropers):
improper = WritableImproper(top.impropers[improper_idx])
improper_constants2.append(improper.constant)
top = alchemizer.get_alchemical_topology(fep_lambda=0.0,
coul_lambda=1.0)
sigmas3 = list()
epsilons3 = list()
SASA_radii3 = list()
charges3 = list()
bond_spring_constants3 = list()
angle_spring_constants3 = list()
proper_constants3 = list()
improper_constants3 = list()
for atom_idx in range(0, len(top.atoms)):
if (atom_idx not in alchemizer._exclusive_atoms and
atom_idx not in alchemizer._non_native_atoms):
atom = WritableAtom(top.atoms[atom_idx])
sigmas3.append(atom.sigma)
epsilons3.append(atom.epsilon)
SASA_radii3.append(atom.SASA_radius)
charges3.append(atom.charge)
for bond_idx in range(0, len(top.bonds)):
if (bond_idx not in alchemizer._exclusive_bonds and
bond_idx not in alchemizer._non_native_bonds):
bond = WritableBond(top.bonds[bond_idx])
bond_spring_constants3.append(bond.spring_constant)
for angle_idx in range(0, len(top.angles)):
if (angle_idx not in alchemizer._exclusive_angles and
angle_idx not in alchemizer._non_native_angles):
angle = WritableAngle(top.angles[angle_idx])
angle_spring_constants3.append(angle.spring_constant)
for proper_idx in range(0, len(top.propers)):
if (proper_idx not in alchemizer._exclusive_propers and
proper_idx not in alchemizer._non_native_propers):
proper = WritableProper(top.propers[proper_idx])
proper_constants3.append(proper.constant)
for improper_idx in range(0, len(top.impropers)):
if (improper_idx not in alchemizer._exclusive_impropers and
improper_idx not in alchemizer._non_native_impropers):
improper = WritableImproper(top.impropers[improper_idx])
improper_constants3.append(improper.constant)
for sigma1, sigma2, sigma3 in zip(sigmas1, sigmas2, sigmas3):
assert sigma1 - sigma2 < 1e-5, \
'Unexpected ratio between sigmas'
assert sigma1 - sigma3 < 1e-5, \
'Unexpected ratio between sigmas'
for epsilon1, epsilon2, epsilon3 in zip(epsilons1, epsilons2, epsilons3):
assert epsilon1 - epsilon2 < 1e-5, \
'Unexpected ratio between epsilons'
assert epsilon1 - epsilon3 < 1e-5, \
'Unexpected ratio between epsilons'
for radius1, radius2, radius3 in zip(SASA_radii1, SASA_radii2,
SASA_radii3):
assert radius1 - radius2 < 1e-5, \
'Unexpected ratio between SASA radii'
assert radius1 - radius3 < 1e-5, \
'Unexpected ratio between SASA radii'
for charge1, charge2, charge3 in zip(charges1, charges2, charges3):
assert charge1 / charge2 - charge2 / charge3 < 1e-5, \
'Unexpected ratio between charges'
assert abs(charge1 - charge2) > 1e-5, \
'Unexpected invariant charges'
for bond_sc1, bond_sc2, bond_sc3 in zip(bond_spring_constants1,
bond_spring_constants2,
bond_spring_constants3):
assert bond_sc1 - bond_sc2 < 1e-5, \
'Unexpected ratio between bond spring constants'
assert bond_sc1 - bond_sc3 < 1e-5, \
'Unexpected ratio between bond spring constants'
for angle_sc1, angle_sc2, angle_sc3 in zip(angle_spring_constants1,
angle_spring_constants2,
angle_spring_constants3):
assert angle_sc1 - angle_sc2 < 1e-5, \
'Unexpected ratio between angle spring constants'
assert angle_sc1 - angle_sc3 < 1e-5, \
'Unexpected ratio between angle spring constants'
for proper_c1, proper_c2, proper_c3 in zip(proper_constants1,
proper_constants2,
proper_constants3):
assert proper_c1 - proper_c2 < 1e-5, \
'Unexpected ratio between proper constants'
assert proper_c1 - proper_c3 < 1e-5, \
'Unexpected ratio between proper constants'
for improper_c1, improper_c2, improper_c3 in zip(improper_constants1,
improper_constants2,
improper_constants3):
assert improper_c1 - improper_c2 < 1e-5, \
'Unexpected ratio between improper constants'
assert improper_c1 - improper_c3 < 1e-5, \
'Unexpected ratio between improper constants'
def test_coul1_lambda(self):
"""
It validates the effects of coul1 lambda on atom parameters.
"""
from peleffy.topology import Alchemizer
from peleffy.template.impact import (WritableAtom, WritableBond,
WritableAngle, WritableProper,
WritableImproper)
mol1, mol2, top1, top2 = \
generate_molecules_and_topologies_from_smiles('C=C',
'C(Cl)(Cl)(Cl)')
alchemizer = Alchemizer(top1, top2)
top = alchemizer.get_alchemical_topology(fep_lambda=0,
coul1_lambda=0)
sigmas1 = list()
epsilons1 = list()
SASA_radii1 = list()
charges1 = list()
bond_spring_constants1 = list()
angle_spring_constants1 = list()
proper_constants1 = list()
improper_constants1 = list()
for atom_idx in alchemizer._exclusive_atoms:
atom = WritableAtom(top.atoms[atom_idx])
sigmas1.append(atom.sigma)
epsilons1.append(atom.epsilon)
SASA_radii1.append(atom.SASA_radius)
charges1.append(atom.charge)
for bond_idx in alchemizer._exclusive_bonds:
bond = WritableBond(top.bonds[bond_idx])
bond_spring_constants1.append(bond.spring_constant)
for angle_idx in alchemizer._exclusive_angles:
angle = WritableAngle(top.angles[angle_idx])
angle_spring_constants1.append(angle.spring_constant)
for proper_idx in alchemizer._exclusive_propers:
proper = WritableProper(top.propers[proper_idx])
proper_constants1.append(proper.constant)
for improper_idx in alchemizer._exclusive_impropers:
improper = WritableImproper(top.impropers[improper_idx])
improper_constants1.append(improper.constant)
top = alchemizer.get_alchemical_topology(fep_lambda=0.0,
coul1_lambda=0.2)
sigmas2 = list()
epsilons2 = list()
SASA_radii2 | |
import numpy as np
from numpy.linalg import inv
from geomdl import NURBS
from geomdl import multi
from geomdl import construct
from geomdl import convert
from geomdl.visualization import VisVTK as vis
from geomdl.visualization import VisMpL
from geomdl import exchange
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# CTRL_knot_list = [[0.0, 0.0], [0.0, 1.0 / 3.0], [0.0, 2.0 / 3.0], [0.0, 1.0],
# [1.0 / 3.0, 0.0], [1.0 / 3.0, 1.0 / 3.0], [1.0 / 3.0, 2.0 / 3.0], [1.0 / 3.0, 1.0],
# [2.0 / 3.0, 0.0], [2.0 / 3.0, 1.0 / 3.0], [2.0 / 3.0, 2.0 / 3.0], [2.0 / 3.0, 1.0],
# [1.0, 0.0], [1.0, 1.0 / 3.0], [1.0, 2.0 / 3.0], [1.0, 1.0]]
# CNTRL_Knot_Side = [[0.0, 0.0], [0.0, 0.0], [0.0, 1.0], [0.0, 1.0],
# [0.0, 1.0], [0.0, 1.0], [1.0, 1.0], [1.0, 1.0],
# [1.0, 1.0], [1.0, 1.0], [1.0, 0.0], [1.0, 0.0],
# [1.0, 0.0], [1.0, 0.0], [0.0, 0.0], [0.0, 0.0]]
# tri_list = [[0, 1, 4], [5, 4, 1], [1, 2, 5], [6, 5, 2], [2, 3, 6], [7, 6, 3],
# [4, 5, 8], [9, 8, 5], [5, 6, 9], [10, 9, 6], [6, 7, 10], [11, 10, 7],
# [8, 9, 12], [13, 12, 9], [9, 10, 13], [14, 13, 10], [10, 11, 14], [15, 14, 11]]
tri_list_side = [[0, 2, 1], [0, 3, 1], [3, 2, 1], [3, 2, 0],
[3, 11, 7], [3, 15, 7], [15, 11, 7], [15, 11, 3],
[15, 13, 14], [15, 12, 14], [12, 13, 14], [12, 13, 15],
[12, 4, 8], [12, 0, 8], [0, 4, 8], [0, 4, 12]]
def bound_box(cntrl_pt):
bmax = np.empty([3])
bmin = np.empty([3])
bmin = [min(cntrl_pt[:, 0]), min(cntrl_pt[:, 1]), min(cntrl_pt[:, 2])]
bmax = [max(cntrl_pt[:, 0]), max(cntrl_pt[:, 1]), max(cntrl_pt[:, 2])]
return bmin, bmax
def bound_box_simul(ctrl_pts):
b_max = np.empty([ctrl_pts.shape[0], 3], dtype=np.float32)
b_min = np.empty([ctrl_pts.shape[0], 3], dtype=np.float32)
e = 4
for i in range(0, b_max.shape[0]):
b_min[i, :] = [min(ctrl_pts[i, :, 0]),
min(ctrl_pts[i, :, 1]),
min(ctrl_pts[i, :, 2])]
b_max[i, :] = np.array([max(ctrl_pts[i, :, 0]),
max(ctrl_pts[i, :, 1]),
max(ctrl_pts[i, :, 2])])
pass
return b_min, b_max
def padding_simul(b_min, b_max, vox_count):
origin = np.empty([b_max.shape[0], 3], dtype=np.float32)
vox_size = np.empty([b_max.shape[0]], dtype=np.float32)
for i in range(0, b_min.shape[0]):
g_max = [max(b_max[i, :, 0]), max(b_max[i, :, 1]), max(b_max[i, :, 2])]
g_min = [min(b_min[i, :, 0]), min(b_min[i, :, 1]), min(b_min[i, :, 2])]
maj_x = g_max[0] - g_min[0]
maj_y = g_max[1] - g_min[1]
maj_z = g_max[2] - g_min[2]
maj_axis = max(max(maj_x, maj_y), maj_z)
vox_size[i] = maj_axis / vox_count
pad_x = maj_axis - maj_x
pad_y = maj_axis - maj_y
pad_z = maj_axis - maj_z
if pad_x != 0:
g_max[0] += pad_x / 2
g_min[0] -= pad_x / 2
if pad_y != 0:
g_max[1] += pad_y / 2
g_min[1] -= pad_y / 2
if pad_z != 0:
g_max[2] += pad_z / 2
g_min[2] -= pad_z / 2
origin[i] = [g_min[0] + (vox_size[i] / 2), g_min[1] + (vox_size[i] / 2), g_min[2] + (vox_size[i] / 2)]
return origin, vox_size
pass
def voxel_assign_single(voxels_all, val, direc, i, t_count, vox_count):
if direc == 0:
for inst in range(0, voxels_all.shape[0]):
if voxels_all[inst][i // vox_count][i % vox_count][t_count] == 0:
voxels_all[inst][i // vox_count][i % vox_count][t_count] = val
break
elif voxels_all[inst][i // vox_count][i % vox_count][t_count] == val:
break
elif direc == 1:
for inst in range(0, voxels_all.shape[0]):
if voxels_all[inst][i % vox_count][t_count][i // vox_count] == 0:
voxels_all[inst][i % vox_count][t_count][i // vox_count] = val
break
elif voxels_all[inst][i % vox_count][t_count][i // vox_count] == val:
break
elif direc == 2:
for inst in range(0, voxels_all.shape[0]):
if voxels_all[inst][t_count][i // vox_count][i % vox_count] == 0:
voxels_all[inst][t_count][i // vox_count][i % vox_count] = val
break
elif voxels_all[inst][t_count][i // vox_count][i % vox_count] == val:
break
pass
pass
def nr_inter_single(b_max, b_min, vox_count, vox_size, origin, dir_1, dir_2, ray_d,
tri_list_3, ctrl_pts, knot_list_3, vox_all, direc, arr_idx):
tri = np.empty([3, 3], dtype=np.float32)
for j in range(0, vox_count * vox_count):
ray = [origin[0] + ((j // vox_count) * vox_size * dir_1[0]) + ((j % vox_count) * vox_size * dir_2[0]),
origin[1] + ((j // vox_count) * vox_size * dir_1[1]) + ((j % vox_count) * vox_size * dir_2[1]),
origin[2] + ((j // vox_count) * vox_size * dir_1[2]) + ((j % vox_count) * vox_size * dir_2[2])]
for k in range(0, b_max.shape[0]):
if ray_box_inter(b_min[k], b_max[k], ray, ray_d):
for t in range(0, tri_list_3.shape[0]):
TRI_ptS = ctrl_pts[k // 6][k % 6]
tri[0] = [TRI_ptS[tri_list_3[t][0]][0], tri_pts[tri_list_3[t][0]][1], tri_pts[tri_list_3[t][0]][2]]
tri[1] = [tri_pts[tri_list_3[t][1]][0], tri_pts[tri_list_3[t][1]][1], tri_pts[tri_list_3[t][1]][2]]
tri[2] = [tri_pts[tri_list_3[t][2]][0], tri_pts[tri_list_3[t][2]][1], tri_pts[tri_list_3[t][2]][2]]
A = np.array([[-ray_d[0], tri[2][0] - tri[0][0], tri[1][0] - tri[0][0]],
[-ray_d[1], tri[2][1] - tri[0][1], tri[1][1] - tri[0][1]],
[-ray_d[2], tri[2][2] - tri[0][2], tri[1][2] - tri[0][2]]])
B = np.array([[ray[0] - tri[0][0]], [ray[1] - tri[0][1]], [ray[2] - tri[0][2]]])
param = np.matmul(inv(A), B)
if param[1] >= 0.0 and param[2] >= 0.0:
if param[1] + param[2] <= 1.0:
# print('intersection')
knot_inter = [knot_list_3[tri_list_3[t][0]][0], knot_list_3[tri_list_3[t][0]][1]]
t_inter = param[0]
if t % 2 == 0:
u_inter = knot_inter[0] + (param[1] * 0.33)
v_inter = knot_inter[1] + (param[2] * 0.33)
else:
u_inter = knot_inter[0] - (param[1] * 0.33)
v_inter = knot_inter[1] - (param[2] * 0.33)
[bol, t_count] = newton_method(t_inter, u_inter, v_inter, ray, ray_d, vox_size, tri_pts, 3)
if bol:
# val = (int(k // 6) + 1)
val = int(arr_idx[j])
voxel_assign_single(vox_all, val, direc, j, t_count, vox_count)
return vox_all
def post_process(voxels_all, voxel_master, vox_x, vox_y, vox_z, direc, vox_1, vox_2):
for i in range(0, vox_1 * vox_2):
inout_vox = np.empty(2, dtype=np.uint8)
vox_list_1 = np.zeros(5, dtype=np.float32)
vox_list_2 = np.zeros(5, dtype=np.float32)
if direc == 0:
for j in range(0, vox_z):
if voxels_all[0][i // vox_y][i % vox_y][j] != 0:
for inst in range(0, voxels_all.shape[0]):
vox_list_2[inst] = voxels_all[inst][i // vox_y][i % vox_y][j]
elem = list_compare(voxels_all.shape[0], vox_list_1, vox_list_2)
if elem != -1:
inout_vox[1] = j
for vox in range(inout_vox[0], inout_vox[1] + 1):
voxel_master[i // vox_y][i % vox_y][vox] = elem
for inst in range(0, voxels_all.shape[0]):
vox_list_1[inst] = voxels_all[inst][i // vox_y][i % vox_y][j]
inout_vox[0] = j
elif direc == 1:
for j in range(0, vox_y):
if voxels_all[0][i % vox_x][j][i // vox_x] != 0:
for inst in range(0, voxels_all.shape[0]):
vox_list_2[inst] = voxels_all[inst][i % vox_x][j][i // vox_x]
elem = list_compare(voxels_all.shape[0], vox_list_1, vox_list_2)
if elem != -1:
inout_vox[1] = j
for vox in range(inout_vox[0], inout_vox[1] + 1):
voxel_master[i % vox_x][vox][i // vox_x] = elem
for inst in range(0, voxels_all.shape[0]):
vox_list_1[inst] = voxels_all[inst][i % vox_x][j][i // vox_x]
inout_vox[0] = j
elif direc == 2:
for j in range(0, vox_x):
if voxels_all[0][j][i // vox_z][i % vox_z] != 0:
for inst in range(0, voxels_all.shape[0]):
vox_list_2[inst] = voxels_all[inst][j][i // vox_z][i % vox_z]
elem = list_compare(voxels_all.shape[0], vox_list_1, vox_list_2)
if elem != -1:
inout_vox[1] = j
for vox in range(inout_vox[0], inout_vox[1] + 1):
voxel_master[vox][i // vox_z][i % vox_z] = elem
for inst in range(0, voxels_all.shape[0]):
vox_list_1[inst] = voxels_all[inst][j][i // vox_z][i % vox_z]
inout_vox[0] = j
return voxel_master
pass
def list_compare(depth, vox_list_1, vox_list_2):
elem = -1
for idx_1 in range(0, depth):
if vox_list_1[idx_1] != 0:
for idx_2 in range(0, depth):
if vox_list_2[idx_2] != 0:
if vox_list_1[idx_1] == vox_list_2[idx_2]:
elem = vox_list_1[idx_1]
return elem
pass
# def gauss_val(vox_master, vox_count, origin, vox_size, stress): # for two chamber
def gauss_val(vox_master, vox_count, origin, vox_size, ctrl_pts, stress): # for Aorta
for i in range(0, vox_count * vox_count):
for j in range(0, vox_count):
if vox_master[i // vox_count][i % vox_count][j] != 0:
elem = int(vox_master[i // vox_count][i % vox_count][j]) - 1
gauss_vals = min_dist_vox(j, origin, vox_count, vox_size, i, ctrl_pts[elem])
vox_master[i // vox_count][i % vox_count][j] = gauss_vals
return vox_master
pass
def newton_single(t, u, v, ray, direc, cps, degree):
non_conv = 0
iter_count = 0
t_count = 0
knot_u = np.array([0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0])
knot_v = np.array([0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0])
s_1 = surf_pt(u, v, cps, knot_u, knot_v, degree, degree)
p_1 = ray + (t * direc)
obj_func = np.abs(np.array([[s_1[0] - p_1[0]], [s_1[1] - p_1[1]], [s_1[2] - p_1[2]]]))
# obj_func = np.array([[s_1[0] - p_1[0]], [s_1[1] - p_1[1]], [s_1[2] - p_1[2]]])
dist = np.linalg.norm(s_1 - p_1)
print(dist)
while dist > 0.001:
deri = deri_surf(u, v, 1, cps, knot_u, knot_v, degree, degree)
jacob = np.array([[-direc[0], deri[1][0][0], deri[0][1][0]],
[-direc[1], deri[1][0][1], deri[0][1][1]],
[-direc[2], deri[1][0][2], deri[0][1][2]]])
opti_sub = np.matmul(inv(jacob), obj_func)
t -= opti_sub[0]
u -= opti_sub[1]
v -= opti_sub[2]
print(t, u, v)
if u < 0.0: u = np.random.random()
if | |
a instance of `wx.Menu`.
"""
menu = wx.Menu(style=wx.MENU_TEAROFF)
if PHOENIX: menu.AppendItem = menu.Append
mi = wx.MenuItem(menu, ID_SANDBOX_RGB, u'%s\t%s' %(_(u'&RGB'), RGBShortcut), _(u'Updates the current view colors: RGB'))
bmp = wx.Image(gIconDir + os.sep + 'rgb.png', wx.BITMAP_TYPE_PNG).Scale(16, 16).ConvertToBitmap()
mi.SetBitmap(bmp)
menu.AppendItem(mi)
mi = wx.MenuItem(menu, ID_SANDBOX_SANDBOX, u'%s\t%s' %(_(u'&Sandbox'), SandboxShortcut), _(u'A sandbox to play in: SANDBOX'))
bmp = wx.Image(gIconDir + os.sep + 'sandbox.png', wx.BITMAP_TYPE_PNG).Scale(16, 16).ConvertToBitmap()
mi.SetBitmap(bmp)
menu.AppendItem(mi)
return menu
def CreateMenu_Draw(self):
"""
Create the `Draw` top-level menu.
:return: a instance of `wx.Menu`.
"""
menu = wx.Menu(style=wx.MENU_TEAROFF)
if PHOENIX: menu.AppendItem = menu.Append
mi = wx.MenuItem(menu, ID_DRAW_CIRCLE, u'%s\t%s' %(_(u'&Circle'), CircleShortcut), _(u'Creates a circle: CIRCLE'))
bmp = wx.Image(gIconDir + os.sep + 'circle.png', wx.BITMAP_TYPE_PNG).Scale(16, 16).ConvertToBitmap()
mi.SetBitmap(bmp)
menu.AppendItem(mi)
mi = wx.MenuItem(menu, ID_DRAW_DOLPHIN, u'%s\t%s' %(_(u'&Dolphin'), DolphinShortcut), _(u'Creates a dolphin: DOLPHIN'))
bmp = wx.Image(gIconDir + os.sep + 'dolphin.png', wx.BITMAP_TYPE_PNG).Scale(16, 16).ConvertToBitmap()
mi.SetBitmap(bmp)
menu.AppendItem(mi)
mi = wx.MenuItem(menu, ID_DRAW_ELLIPSE, u'%s\t%s' %(_(u'&Ellipse'), EllipseShortcut), _(u'Creates a ellipse: ELLIPSE'))
bmp = wx.Image(gIconDir + os.sep + 'ellipse.png', wx.BITMAP_TYPE_PNG).Scale(16, 16).ConvertToBitmap()
mi.SetBitmap(bmp)
menu.AppendItem(mi)
mi = wx.MenuItem(menu, ID_DRAW_HEART, u'%s\t%s' %(_(u'&Heart'), HeartShortcut), _(u'Creates a heart: HEART'))
bmp = wx.Image(gIconDir + os.sep + 'heart.png', wx.BITMAP_TYPE_PNG).Scale(16, 16).ConvertToBitmap()
mi.SetBitmap(bmp)
menu.AppendItem(mi)
mi = wx.MenuItem(menu, ID_DRAW_LINE, u'%s\t%s' %(_(u'&Line'), LineShortcut), _(u'Creates straight line segments: LINE'))
bmp = wx.Image(gIconDir + os.sep + 'line.png', wx.BITMAP_TYPE_PNG).Scale(16, 16).ConvertToBitmap()
mi.SetBitmap(bmp)
menu.AppendItem(mi)
mi = wx.MenuItem(menu, ID_DRAW_PATH, u'%s\t%s' %(_(u'&Path'), PathShortcut), _(u'Creates a 2D path: PATH'))
bmp = wx.Image(gIconDir + os.sep + 'path.png', wx.BITMAP_TYPE_PNG).Scale(16, 16).ConvertToBitmap()
mi.SetBitmap(bmp)
menu.AppendItem(mi)
mi = wx.MenuItem(menu, ID_DRAW_POINT, u'%s\t%s' %(_(u'&Point'), PointShortcut), _(u'Creates multiple points: POINT'))
bmp = wx.Image(gIconDir + os.sep + 'point.png', wx.BITMAP_TYPE_PNG).Scale(16, 16).ConvertToBitmap()
mi.SetBitmap(bmp)
menu.AppendItem(mi)
mi = wx.MenuItem(menu, ID_DRAW_POLYGON, u'%s\t%s' %(_(u'&Polygon'), PolygonShortcut), _(u'Creates a rectangular polygon: POLYGON'))
bmp = wx.Image(gIconDir + os.sep + 'polygon.png', wx.BITMAP_TYPE_PNG).Scale(16, 16).ConvertToBitmap()
mi.SetBitmap(bmp)
menu.AppendItem(mi)
mi = wx.MenuItem(menu, ID_DRAW_POLYLINE, u'%s\t%s' %(_(u'&Polyline'), PolylineShortcut), _(u'Creates a 2D polyline: PLINE'))
bmp = wx.Image(gIconDir + os.sep + 'polyline.png', wx.BITMAP_TYPE_PNG).Scale(16, 16).ConvertToBitmap()
mi.SetBitmap(bmp)
menu.AppendItem(mi)
mi = wx.MenuItem(menu, ID_DRAW_RECTANGLE, u'%s\t%s' %(_(u'&Rectangle'), RectangleShortcut), _(u'Creates a rectangular polyline: RECTANGLE'))
bmp = wx.Image(gIconDir + os.sep + 'rectangle.png', wx.BITMAP_TYPE_PNG).Scale(16, 16).ConvertToBitmap()
mi.SetBitmap(bmp)
menu.AppendItem(mi)
mi = wx.MenuItem(menu, ID_DRAW_SINGLELINETEXT, u'%s\t%s' %(_(u'&Single Line Text'), SingleLineTextShortcut), _(u'Creates single-line text objects: TEXT'))
bmp = wx.Image(gIconDir + os.sep + 'singlelinetext.png', wx.BITMAP_TYPE_PNG).Scale(16, 16).ConvertToBitmap()
mi.SetBitmap(bmp)
menu.AppendItem(mi)
mi = wx.MenuItem(menu, ID_DRAW_SNOWFLAKE, u'%s\t%s' %(_(u'&Snowflake'), SnowflakeShortcut), _(u'Creates a snowflake: SNOWFLAKE'))
bmp = wx.Image(gIconDir + os.sep + 'snowflake.png', wx.BITMAP_TYPE_PNG).Scale(16, 16).ConvertToBitmap()
mi.SetBitmap(bmp)
menu.AppendItem(mi)
mi = wx.MenuItem(menu, ID_DRAW_STAR, u'%s\t%s' %(_(u'&Star'), StarShortcut), _(u'Creates a star: STAR'))
bmp = wx.Image(gIconDir + os.sep + 'star.png', wx.BITMAP_TYPE_PNG).Scale(16, 16).ConvertToBitmap()
mi.SetBitmap(bmp)
menu.AppendItem(mi)
return menu
def CreateMenu_Settings(self):
"""
Create the `Settings` top-level menu.
:return: a instance of `wx.Menu`.
"""
menu = wx.Menu(style=wx.MENU_TEAROFF)
if PHOENIX: menu.AppendItem = menu.Append
mi = wx.MenuItem(menu, wx.ID_HELP, u'%s\t%s' %(_(u'&Settings'), SettingsShortcut), _(u'Configure settings specific to this project.'))
bmp = wx.Image(gIconDir + os.sep + 'settingsdialog.png', wx.BITMAP_TYPE_PNG).Scale(16, 16).ConvertToBitmap()
mi.SetBitmap(bmp)
menu.AppendItem(mi)
return menu
def CreateMenu_Window(self):
"""
Create the `Window` top-level menu.
:return: a instance of `wx.Menu`.
"""
menu = wx.Menu() # Window Menu isn't Tear-off able... why not # style=wx.MENU_TEAROFF
if PHOENIX: menu.AppendItem = menu.Append
mi = wx.MenuItem(menu, wx.ID_CLOSE, u'%s\t%s' %(_(u'&Close'), WindowCloseShortcut), _(u'Close the active window.'))
bmp = wx.Image(gIconDir + os.sep + 'windowclose.png', wx.BITMAP_TYPE_PNG).Scale(16, 16).ConvertToBitmap()
mi.SetBitmap(bmp)
menu.AppendItem(mi)
mi = wx.MenuItem(menu, wx.ID_CLOSE_ALL, u'%s\t%s' %(_(u'Close &All'), WindowCloseAllShortcut), _(u'Close all the windows.'))
bmp = wx.Image(gIconDir + os.sep + 'windowcloseall.png', wx.BITMAP_TYPE_PNG).Scale(16, 16).ConvertToBitmap()
mi.SetBitmap(bmp)
menu.AppendItem(mi)
menu.AppendSeparator()
mi = wx.MenuItem(menu, ID_WINDOW_LAYOUT_CASCADE, u'%s\t%s' %(_(u'&Cascade'), WindowCascadeShortcut), _(u'Cascade the windows layout.'))
bmp = wx.Image(gIconDir + os.sep + 'windowcascade.png', wx.BITMAP_TYPE_PNG).Scale(16, 16).ConvertToBitmap()
mi.SetBitmap(bmp)
menu.AppendItem(mi)
mi = wx.MenuItem(menu, ID_WINDOW_LAYOUT_TILE, u'%s\t%s' %(_(u'&Tile'), WindowTileShortcut), _(u'Tile the windows layout.'))
bmp = wx.Image(gIconDir + os.sep + 'windowtile.png', wx.BITMAP_TYPE_PNG).Scale(16, 16).ConvertToBitmap()
mi.SetBitmap(bmp)
menu.AppendItem(mi)
menu.AppendSeparator()
mi = wx.MenuItem(menu, ID_WINDOW_NEXT, u'%s\t%s' %(_(u'&Next'), WindowNextShortcut), _(u'Move the focus to the next window.'))
bmp = wx.Image(gIconDir + os.sep + 'windownext.png', wx.BITMAP_TYPE_PNG).Scale(16, 16).ConvertToBitmap()
mi.SetBitmap(bmp)
menu.AppendItem(mi)
mi = wx.MenuItem(menu, ID_WINDOW_PREVIOUS, u'%s\t%s' %(_(u'&Previous'), WindowPreviousShortcut), _(u'Move the focus to the previous window.'))
bmp = wx.Image(gIconDir + os.sep + 'windowprevious.png', wx.BITMAP_TYPE_PNG).Scale(16, 16).ConvertToBitmap()
mi.SetBitmap(bmp)
menu.AppendItem(mi)
menu.AppendSeparator()
return menu
def CreateMenu_Help(self):
"""
Create the `Help` top-level menu.
:return: a instance of `wx.Menu`.
"""
menu = wx.Menu(style=wx.MENU_TEAROFF)
if PHOENIX: menu.AppendItem = menu.Append
mi = wx.MenuItem(menu, wx.ID_HELP, u'%s\t%s' %(_(u'&Help'), HelpShortcut), _(u'Displays help.'))
bmp = wx.Image(gIconDir + os.sep + 'help.png', wx.BITMAP_TYPE_PNG).Scale(16, 16).ConvertToBitmap()
mi.SetBitmap(bmp)
menu.AppendItem(mi)
mi = wx.MenuItem(menu, ID_CHANGELOG, u'%s\t%s' %(_(u'&Changelog'), ChangelogShortcut), _(u'Describes new features in this product.'))
bmp = wx.Image(gIconDir + os.sep + 'changelog.png', wx.BITMAP_TYPE_PNG).Scale(16, 16).ConvertToBitmap()
mi.SetBitmap(bmp)
menu.AppendItem(mi)
menu.AppendSeparator()
mi = wx.MenuItem(menu, ID_TIPOFTHEDAY, u'%s\t%s' %(_(u'&Tip Of The Day'), TipOfTheDayShortcut), _(u'Displays a dialog with useful tips.'))
bmp = wx.Image(gIconDir + os.sep + 'tipoftheday.png', wx.BITMAP_TYPE_PNG).Scale(16, 16).ConvertToBitmap()
mi.SetBitmap(bmp)
menu.AppendItem(mi)
mi = wx.MenuItem(menu, wx.ID_ABOUT, u'%s\t%s' %(_(u'&About Embroidermodder'), AboutEmbroidermodderShortcut), _(u'Displays information about this product.'))
bmp = wx.Image(gIconDir + os.sep + 'about.png', wx.BITMAP_TYPE_PNG).Scale(16, 16).ConvertToBitmap()
mi.SetBitmap(bmp)
menu.AppendItem(mi)
menu.AppendSeparator()
menu.AppendSubMenu(self.CreateMenu_OnlineWeblinks(), _(u'Online Weblinks'))
return menu
def CreateMenu_OnlineWeblinks(self):
"""
Create the `Online Weblinks` menu.
:return: a instance of `wx.Menu`.
"""
menu = wx.Menu(style=wx.MENU_TEAROFF)
if PHOENIX: menu.AppendItem = menu.Append
mi = wx.MenuItem(menu, ID_ONLINE_WEBSITE, _(u'Visit Embroidermodder Online'), _(u'Opens the Embroidermodder website up in your default webbrowser.'))
bmp = wx.Image(gIconDir + os.sep + 'world.png', wx.BITMAP_TYPE_PNG).Scale(16, 16).ConvertToBitmap()
mi.SetBitmap(bmp)
menu.AppendItem(mi)
return menu
def OnOpen(self, event=None):
"""
Show the Open `wx.FileDialog`.
:param `event`: an `wx.MenuEvent`_ to be processed.
"""
print('CWD: %s' % os.getcwd())
# Create the dialog. In this case the current directory is forced as the starting
# directory for the dialog, and no default file name is forced. This can easilly
# be changed in your program. This is an 'open' dialog, and allows multitple
# file selections as well.
#
# Finally, if the directory is changed in the process of getting files, this
# dialog is set up to change the current working directory to the path chosen.
dlg = wx.FileDialog(
self, message=_(u'Choose a file...'),
defaultDir=os.getcwd(),
defaultFile='',
wildcard=WILDCARD_ALL,
style=wx.FD_OPEN | wx.FD_MULTIPLE |
wx.FD_CHANGE_DIR | wx.FD_FILE_MUST_EXIST |
wx.FD_PREVIEW
)
# Show the dialog and retrieve the user response. If it is the OK response,
# process the data.
if dlg.ShowModal() == wx.ID_OK:
# This returns a Python list of files that were selected.
paths = dlg.GetPaths()
print('You selected %d files:' % len(paths))
for path in paths:
print(' %s' % path)
# Compare this with the debug above; did we change working dirs?
print('CWD: %s' % os.getcwd())
# Destroy the dialog. Don't do this until you are done with it!
# BAD things can happen otherwise!
dlg.Destroy()
def OnSaveAs(self, event=None):
"""
Show the Save `wx.FileDialog`.
:param `event`: an `wx.MenuEvent`_ to be processed.
"""
print('CWD: %s' % os.getcwd())
# Create the dialog. In this case the current directory is forced as the starting
# directory for the dialog, and no default file name is forced. This can easilly
# be changed in your program. This is an 'save' dialog.
#
# Unlike the 'open dialog' example found elsewhere, this example does NOT
# force the current working directory to change if the user chooses a different
# directory than the one initially set.
dlg = wx.FileDialog(
self, message=_(u'Save file as ...'), defaultDir=os.getcwd(),
defaultFile='', wildcard=WILDCARD_ALL, style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT
)
# This sets the default filter that the user will initially see. Otherwise,
# the first filter in the list will be used by default.
dlg.SetFilterIndex(1)
# Show the dialog and retrieve the user response. If it is the OK response,
# process the data.
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
print('You selected "%s"' % path)
# Normally, at this point you would save your data using the file and path
# data that the user provided to you, but since we didn't actually start
# with any data to work with, that would be difficult.
#
# The code to do so would be similar to this, assuming 'data' contains
# the data you want to save:
#
# fp = file(path, 'w') # Create file anew
# fp.write(data)
# fp.close()
#
# You might want to add some error checking :-)
# Note that the current working dir didn't change. This is good since
# that's the way we set it up.
print('CWD: %s' % os.getcwd())
# Destroy the dialog. Don't do this until you are done with it!
# BAD | |
175.
# o Encoded Additional Authenticated Data (AAD); this example uses the
# Additional Authenticated Data from Figure 173, encoded to
# base64url [RFC4648] as Figure 176.
# 75m1ALsYv10pZTKPWrsqdg
# Figure 174: Content Encryption Key, base64url-encoded
# veCx9ece2orS7c_N
# Figure 175: Initialization Vector, base64url-encoded
# WyJ2Y2FyZCIsW1sidmVyc2lvbiIse30sInRleHQiLCI0LjAiXSxbImZuIix7fS
# widGV4dCIsIk1lcmlhZG9jIEJyYW5keWJ1Y2siXSxbIm4iLHt9LCJ0ZXh0Iixb
# IkJyYW5keWJ1Y2siLCJNZXJpYWRvYyIsIk1yLiIsIiJdXSxbImJkYXkiLHt9LC
# J0ZXh0IiwiVEEgMjk4MiJdLFsiZ2VuZGVyIix7fSwidGV4dCIsIk0iXV1d
# Figure 176: Additional Authenticated Data, base64url-encoded
# Miller Informational [Page 89]
# RFC 7520 JOSE Cookbook May 2015
# 5.10.3. Encrypting the Key
# Performing the key encryption operation over the CEK (Figure 174)
# with the AES symmetric key (Figure 151) produces the following
# Encrypted Key:
# <KEY>
# Figure 177: Encrypted Key, base64url-encoded
# 5.10.4. Encrypting the Content
# The following is generated before encrypting the content:
# o JWE Protected Header; this example uses the header from
# Figure 178, encoded to base64url [RFC4648] as Figure 179.
# {
# "alg": "A128KW",
# "kid": "81b20965-8332-43d9-a468-82160ad91ac8",
# "enc": "A128GCM"
# }
# Figure 178: JWE Protected Header JSON
# eyJhbGciOiJBMTI4S1ciLCJraWQiOiI4MWIyMDk2NS04MzMyLTQzZDktYTQ2OC
# 04MjE2MGFkOTFhYzgiLCJlbmMiOiJBMTI4R0NNIn0
# Figure 179: JWE Protected Header, base64url-encoded
# Performing the content encryption operation over the Plaintext with
# the following:
# o CEK (Figure 174);
# o Initialization Vector (Figure 175); and
# o Concatenation of the JWE Protected Header (Figure 179), ".", and
# the base64url [RFC4648] encoding of Figure 173 as authenticated
# data
# produces the following:
# o Ciphertext from Figure 180.
# o Authentication Tag from Figure 181.
# Miller Informational [Page 90]
# RFC 7520 JOSE Cookbook May 2015
# Z_3cbr0k3bVM6N3oSNmHz7Lyf3iPppGf3Pj17wNZqteJ0Ui8p74SchQP8xygM1
# oFRWCNzeIa6s6BcEtp8qEFiqTUEyiNkOWDNoF14T_4NFqF-p2Mx8zkbKxI7oPK
# 8KNarFbyxIDvICNqBLba-v3uzXBdB89fzOI-Lv4PjOFAQGHrgv1rjXAmKbgkft
# 9cB4WeyZw8MldbBhc-V_KWZslrsLNygon_JJWd_ek6LQn5NRehvApqf9ZrxB4a
# q3FXBxOxCys35PhCdaggy2kfUfl2OkwKnWUbgXVD1C6HxLIlqHhCwXDG59weHr
# RDQeHyMRoBljoV3X_bUTJDnKBFOod7nLz-cj48JMx3SnCZTpbQAkFV
# Figure 180: Ciphertext, base64url-encoded
# vOaH_Rajnpy_3hOtqvZHRA
# Figure 181: Authentication Tag, base64url-encoded
# 5.10.5. Output Results
# The following compose the resulting JWE object:
# o JWE Protected Header (Figure 179)
# o Encrypted Key (Figure 177)
# o Initialization Vector (Figure 175)
# o Additional Authenticated Data (Figure 176)
# o Ciphertext (Figure 180)
# o Authentication Tag (Figure 181)
# The JWE Compact Serialization is not presented because it does not
# support this use case.
# Miller Informational [Page 91]
# RFC 7520 JOSE Cookbook May 2015
# The resulting JWE object using the general JWE JSON Serialization:
# {
# "recipients": [
# {
# "encrypted_key": "<KEY>"
# }
# ],
# "protected": "<KEY>
# <KEY>YT<KEY>
# 0",
# "iv": "veCx9ece2orS7c_N",
# "aad": "<KEY>
# <KEY>
# <KEY>
# <KEY>
# I<KEY>",
# "ciphertext": "<KEY>
# Ui8p74SchQP8xygM1oFRWCNzeIa6s6BcEtp8qEFiqTUEyiNkOWDNoF14
# T_4NFqF-p2Mx8zkbKxI7oPK8KNarFbyxIDvICNqBLba-v3uzXBdB89fz
# O<KEY>
# L<KEY>
# 2kfUfl2OkwKnWUbgXVD1C6HxLIlqHhCwXDG59weHrRDQeHyMRoBljoV3
# X_bUTJDnKBFOod7nLz-cj48JMx3SnCZTpbQAkFV",
# "tag": "vOaH_Rajnpy_3hOtqvZHRA"
# }
# Figure 182: General JWE JSON Serialization
# Miller Informational [Page 92]
# RFC 7520 JOSE Cookbook May 2015
# The resulting JWE object using the flattened JWE JSON Serialization:
# {
# "protected": "<KEY>
# <KEY>GFkOTFhYzgiLCJlbmMiOiJBMTI4R0NNIn
# 0",
# "encrypted_key": "<KEY>",
# "aad": "<KEY>",
# "iv": "veCx9ece2orS7c_N",
# "ciphertext": "<KEY>
# <KEY>
# T_4NFqF-p2Mx8zkbKxI7oPK8KNarFbyxIDvICNqBLba-v3uzXBdB89fz
# O<KEY>
# LNygon_JJWd_ek6LQn5NRehvApqf9Z<KEY>
# 2kfUfl2OkwKnWUbgXVD1C6HxLIlqHhCwXDG59weHrRDQeHyMRoBljoV3
# X_bUTJDnKBFOod7nLz-cj48JMx3SnCZTpbQAkFV",
# "tag": "vOaH_Rajnpy_3hOtqvZHRA"
# }
# Figure 183: Flattened JWE JSON Serialization
# 5.11. Protecting Specific Header Fields
# This example illustrates encrypting content where only certain JOSE
# Header Parameters are protected. As this example includes parameters
# in the JWE Shared Unprotected Header, only the general JWE JSON
# Serialization and flattened JWE JSON Serialization are possible.
# Note that whitespace is added for readability as described in
# Section 1.1.
# 5.11.1. Input Factors
# The following are supplied before beginning the encryption process:
# o Plaintext content; this example uses the content from Figure 72.
# o Recipient encryption key; this example uses the key from
# Figure 151.
# o Key encryption algorithm; this example uses "A128KW".
# o Content encryption algorithm; this example uses "A128GCM".
# Miller Informational [Page 93]
# RFC 7520 JOSE Cookbook May 2015
# 5.11.2. Generated Factors
# The following are generated before encrypting:
# o AES symmetric key as the Content Encryption Key (CEK); this
# example uses the key from Figure 184.
# o Initialization Vector; this example uses the Initialization Vector
# from Figure 185.
# WDgEptBmQs9ouUvArz6x6g
# Figure 184: Content Encryption Key, base64url-encoded
# WgEJsDS9bkoXQ3nR
# Figure 185: Initialization Vector, base64url-encoded
# 5.11.3. Encrypting the Key
# Performing the key encryption operation over the CEK (Figure 184)
# with the AES symmetric key (Figure 151) produces the following
# Encrypted Key:
# jJIcM9J-hbx3wnqhf5FlkEYos0sHsF0H
# Figure 186: Encrypted Key, base64url-encoded
# 5.11.4. Encrypting the Content
# The following is generated before encrypting the content:
# o JWE Protected Header; this example uses the header from
# Figure 187, encoded to base64url [RFC4648] as Figure 188.
# {
# "enc": "A128GCM"
# }
# Figure 187: JWE Protected Header JSON
# eyJlbmMiOiJBMTI4R0NNIn0
# Figure 188: JWE Protected Header, base64url-encoded
# Miller Informational [Page 94]
# RFC 7520 JOSE Cookbook May 2015
# Performing the content encryption operation over the Plaintext with
# the following:
# o CEK (Figure 184);
# o Initialization Vector (Figure 185); and
# o JWE Protected Header (Figure 188) as authenticated data
# produces the following:
# o Ciphertext from Figure 189.
# o Authentication Tag from Figure 190.
# lIbCyRmRJxnB2yLQOTqjCDKV3H30ossOw3uD9DPsqLL2DM3swKkjOwQyZtWsFL
# YMj5YeLht_StAn21tHmQJuuNt64T8D4t6C7kC9OCCJ1IHAolUv4MyOt80MoPb8
# fZYbNKqplzYJgIL58g8N2v46OgyG637d6uuKPwhAnTGm_zWhqc_srOvgiLkzyF
# XPq1hBAURbc3-8BqeRb48iR1-_5g5UjWVD3lgiLCN_P7AW8mIiFvUNXBPJK3nO
# WL4teUPS8yHLbWeL83olU4UAgL48x-8dDkH23JykibVSQju-f7e-1xreHWXzWL
# Hs1NqBbre0dEwK3HX_xM0LjUz77Krppgegoutpf5qaKg3l-_xMINmf
# Figure 189: Ciphertext, base64url-encoded
# fNYLqpUe84KD45lvDiaBAQ
# Figure 190: Authentication Tag, base64url-encoded
# 5.11.5. Output Results
# The following compose the resulting JWE object:
# o JWE Shared Unprotected Header (Figure 191)
# o JWE Protected Header (Figure 188)
# o Encrypted Key (Figure 186)
# o Initialization Vector (Figure 185)
# o Ciphertext (Figure 189)
# o Authentication Tag (Figure 190)
# The JWE Compact Serialization is not presented because it does not
# support this use case.
# Miller Informational [Page 95]
# RFC 7520 JOSE Cookbook May 2015
# The following JWE Shared Unprotected Header is generated before
# assembling the output results:
# {
# "alg": "A128KW",
# "kid": "81b20965-8332-43d9-a468-82160ad91ac8"
# }
# Figure 191: JWE Shared Unprotected Header JSON
# The resulting JWE object using the general JWE JSON Serialization:
# {
# "recipients": [
# {
# "encrypted_key": "jJIcM9J-hbx3wnqhf5FlkEYos0sHsF0H"
# }
# ],
# "unprotected": {
# "alg": "A128KW",
# "kid": "81b20965-8332-43d9-a468-82160ad91ac8"
# },
# "protected": "<KEY>",
# "iv": "WgEJsDS9bkoXQ3nR",
# "ciphertext": "lIbCyRmRJxnB2yLQOTqjCDKV3H30ossOw3uD9DPsqLL2D
# M3swKkjOwQyZtWsFLYMj5YeLht_StAn21tHmQJuuNt64T8D4t6C7kC9O
# CCJ1IHAolUv4MyOt80MoPb8fZYbNKqplzYJgIL58g8N2v46OgyG637d6
# uuKPwhAnTGm_zWhqc_srOvgiLkzyFXPq1hBAURbc3-8BqeRb48iR1-_5
# g5UjWVD3lgiLCN_P7AW8mIiFvUNXBPJK3nOWL4teUPS8yHLbWeL83olU
# 4UAgL48x-8dDkH23JykibVSQju-f7e-1xreHWXzWLHs1NqBbre0dEwK3
# HX_xM0LjUz77Krppgegoutpf5qaKg3l-_xMINmf",
# "tag": "fNYLqpUe84KD45lvDiaBAQ"
# }
# Figure 192: General JWE JSON Serialization
# Miller Informational [Page 96]
# RFC 7520 JOSE Cookbook May 2015
# The resulting JWE object using the flattened JWE JSON Serialization:
# {
# "protected": "<KEY>",
# "unprotected": {
# "alg": "A128KW",
# "kid": "81b20965-8332-43d9-a468-82160ad91ac8"
# },
# "encrypted_key": "<KEY>",
# "iv": "WgEJsDS9bkoXQ3nR",
# "ciphertext": "lIbCyRmRJxnB2yLQOTqjCDKV3H30ossOw3uD9DPsqLL2D
# M3swKkjOwQyZtWsFLYMj5YeLht_StAn21tHmQJuuNt64T8D4t6C7kC9O
# CCJ1IHAolUv4MyOt80MoPb8fZYbNKqplzYJgIL58g8N2v46OgyG637d6
# uuKPwhAnTGm_zWhqc_srOvgiLkzyFXPq1hBAURbc3-8BqeRb48iR1-_5
# g5UjWVD3lgiLCN_P7AW8mIiFvUNXBPJK3nOWL4teUPS8yHLbWeL83olU
# 4UAgL48x-8dDkH23JykibVSQju-f7e-1xreHWXzWLHs1NqBbre0dEwK3
# HX_xM0LjUz77Krppgegoutpf5qaKg3l-_xMINmf",
# "tag": "fNYLqpUe84KD45lvDiaBAQ"
# }
# Figure 193: Flattened JWE JSON Serialization
# 5.12. Protecting Content Only
# This example illustrates encrypting content where none of the JOSE
# header parameters are protected. As this example includes parameters
# only in the JWE Shared Unprotected Header, only the flattened JWE
# JSON Serialization and general JWE JSON Serialization are possible.
# Note that whitespace is added for readability as described in
# Section 1.1.
# 5.12.1. Input Factors
# The following are supplied before beginning the encryption process:
# o Plaintext content; this example uses the content from Figure 72.
# o Recipient encryption key; this example uses the key from
# Figure 151.
# o Key encryption algorithm; this example uses "A128KW".
# o Content encryption algorithm; this example uses "A128GCM".
# Miller Informational [Page 97]
# RFC 7520 JOSE Cookbook May 2015
# 5.12.2. Generated Factors
# The following are generated before encrypting:
# o AES symmetric key as the Content Encryption Key; this example the
# key from Figure 194.
# o Initialization Vector; this example uses the Initialization Vector
# from Figure 195.
# KBooAFl30QPV3vkcZlXnzQ
# Figure 194: Content Encryption Key, base64url-encoded
# YihBoVOGsR1l7jCD
# Figure 195: Initialization Vector, base64url-encoded
# 5.12.3. Encrypting the Key
# Performing the key encryption operation over the CEK (Figure 194)
# with the AES symmetric key (Figure 151) produces the following
# Encrypted Key:
# <KEY>
# Figure 196: Encrypted Key, base64url-encoded
# 5.12.4. Encrypting the Content
# Performing the content encryption operation over the Plaintext
# (Figure 72) using the following:
# o CEK (Figure 194);
# o Initialization Vector (Figure 195); and
# o Empty string as authenticated data
# produces the following:
# o Ciphertext from Figure 197.
# o Authentication Tag from Figure 198.
# Miller Informational [Page 98]
# RFC 7520 JOSE Cookbook May 2015
# qtPIMMaOBRgASL10dNQhOa7Gqrk7Eal1vwht7R4TT1uq-arsVCPaIeFwQfzrSS
# 6oEUWbBtxEasE0vC6r7sphyVziMCVJEuRJyoAHFSP3eqQPb4Ic1SDSqyXjw_L3
# svybhHYUGyQuTmUQEDjgjJfBOifwHIsDsRPeBz1NomqeifVPq5GTCWFo5k_MNI
# QURR2Wj0AHC2k7JZfu2iWjUHLF8ExFZLZ4nlmsvJu_mvifMYiikfNfsZAudISO
# a6O73yPZtL04k_1FI7WDfrb2w7OqKLWDXzlpcxohPVOLQwpA3mFNRKdY-bQz4Z
# 4KX9lfz1cne31N4-8BKmojpw-OdQjKdLOGkC445Fb_K1tlDQXw2sBF
# Figure 197: Ciphertext, base64url-encoded
# e2m0Vm7JvjK2VpCKXS-kyg
# Figure 198: Authentication Tag, base64url-encoded
# 5.12.5. Output Results
# The JWE Compact Serialization is not presented because it does not
# support this use case.
# The following JWE Shared Unprotected Header is generated before
# assembling the output results:
# {
# "alg": "A128KW",
# "kid": "81b20965-8332-43d9-a468-82160ad91ac8",
# "enc": "A128GCM"
# }
# Figure 199: JWE Shared Unprotected Header JSON
# The following compose the resulting JWE object:
# o JWE Shared Unprotected Header (Figure 199)
# o Encrypted Key (Figure 196)
# o Initialization Vector (Figure 195)
# o Ciphertext (Figure 197)
# o Authentication Tag (Figure 198)
# Miller Informational [Page 99]
# RFC 7520 JOSE Cookbook May 2015
# The resulting JWE object using the general JWE JSON Serialization:
# {
# "recipients": [
# {
# "encrypted_key": "<KEY>"
# }
# ],
# "unprotected": {
# "alg": "A128KW",
# "kid": "81b20965-8332-43d9-a468-82160ad91ac8",
# "enc": "A128GCM"
# },
# "iv": | |
issubclass(list, typing.Reversible)
assert not issubclass(int, typing.Reversible)
def test_protocol_instance_type_error(self):
with self.assertRaises(TypeError):
isinstance([], typing.Reversible)
class GenericTests(TestCase):
def test_basics(self):
X = SimpleMapping[str, Any]
Y = SimpleMapping[XK, str]
X[str, str]
Y[str, str]
with self.assertRaises(TypeError):
X[int, str]
with self.assertRaises(TypeError):
Y[str, bytes]
def test_init(self):
T = TypeVar('T')
S = TypeVar('S')
with self.assertRaises(TypeError):
Generic[T, T]
with self.assertRaises(TypeError):
Generic[T, S, T]
def test_repr(self):
self.assertEqual(repr(SimpleMapping),
__name__ + '.' + 'SimpleMapping[~XK, ~XV]')
self.assertEqual(repr(MySimpleMapping),
__name__ + '.' + 'MySimpleMapping[~XK, ~XV]')
def test_dict(self):
T = TypeVar('T')
class B(Generic[T]):
pass
b = B()
b.foo = 42
self.assertEqual(b.__dict__, {'foo': 42})
class C(B[int]):
pass
c = C()
c.bar = 'abc'
self.assertEqual(c.__dict__, {'bar': 'abc'})
def test_pickle(self):
T = TypeVar('T')
class B(Generic[T]):
pass
global C # pickle wants to reference the class by name
class C(B[int]):
pass
c = C()
c.foo = 42
c.bar = 'abc'
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
z = pickle.dumps(c, proto)
x = pickle.loads(z)
self.assertEqual(x.foo, 42)
self.assertEqual(x.bar, 'abc')
self.assertEqual(x.__dict__, {'foo': 42, 'bar': 'abc'})
def test_errors(self):
with self.assertRaises(TypeError):
B = SimpleMapping[XK, Any]
class C(Generic[B]):
pass
def test_repr_2(self):
PY32 = sys.version_info[:2] < (3, 3)
class C(Generic[T]):
pass
assert C.__module__ == __name__
if not PY32:
assert C.__qualname__ == 'GenericTests.test_repr_2.<locals>.C'
assert repr(C).split('.')[-1] == 'C[~T]'
X = C[int]
assert X.__module__ == __name__
if not PY32:
assert X.__qualname__ == 'C'
assert repr(X).split('.')[-1] == 'C[int]'
class Y(C[int]):
pass
assert Y.__module__ == __name__
if not PY32:
assert Y.__qualname__ == 'GenericTests.test_repr_2.<locals>.Y'
assert repr(Y).split('.')[-1] == 'Y[int]'
def test_eq_1(self):
assert Generic == Generic
assert Generic[T] == Generic[T]
assert Generic[KT] != Generic[VT]
def test_eq_2(self):
class A(Generic[T]):
pass
class B(Generic[T]):
pass
assert A == A
assert A != B
assert A[T] == A[T]
assert A[T] != B[T]
def test_multiple_inheritance(self):
class A(Generic[T, VT]):
pass
class B(Generic[KT, T]):
pass
class C(A, Generic[KT, VT], B):
pass
assert C.__parameters__ == (T, VT, KT)
def test_nested(self):
class G(Generic):
pass
class Visitor(G[T]):
a = None
def set(self, a: T):
self.a = a
def get(self):
return self.a
def visit(self) -> T:
return self.a
V = Visitor[typing.List[int]]
class IntListVisitor(V):
def append(self, x: int):
self.a.append(x)
a = IntListVisitor()
a.set([])
a.append(1)
a.append(42)
assert a.get() == [1, 42]
def test_type_erasure(self):
T = TypeVar('T')
class Node(Generic[T]):
def __init__(self, label: T,
left: 'Node[T]' = None,
right: 'Node[T]' = None):
self.label = label # type: T
self.left = left # type: Optional[Node[T]]
self.right = right # type: Optional[Node[T]]
def foo(x: T):
a = Node(x)
b = Node[T](x)
c = Node[Any](x)
assert type(a) is Node
assert type(b) is Node
assert type(c) is Node
foo(42)
class VarianceTests(TestCase):
def test_invariance(self):
# Because of invariance, List[subclass of X] is not a subclass
# of List[X], and ditto for MutableSequence.
assert not issubclass(typing.List[Manager], typing.List[Employee])
assert not issubclass(typing.MutableSequence[Manager],
typing.MutableSequence[Employee])
# It's still reflexive.
assert issubclass(typing.List[Employee], typing.List[Employee])
assert issubclass(typing.MutableSequence[Employee],
typing.MutableSequence[Employee])
def test_covariance_tuple(self):
# Check covariace for Tuple (which are really special cases).
assert issubclass(Tuple[Manager], Tuple[Employee])
assert not issubclass(Tuple[Employee], Tuple[Manager])
# And pairwise.
assert issubclass(Tuple[Manager, Manager], Tuple[Employee, Employee])
assert not issubclass(Tuple[Employee, Employee],
Tuple[Manager, Employee])
# And using ellipsis.
assert issubclass(Tuple[Manager, ...], Tuple[Employee, ...])
assert not issubclass(Tuple[Employee, ...], Tuple[Manager, ...])
def test_covariance_sequence(self):
# Check covariance for Sequence (which is just a generic class
# for this purpose, but using a covariant type variable).
assert issubclass(typing.Sequence[Manager], typing.Sequence[Employee])
assert not issubclass(typing.Sequence[Employee],
typing.Sequence[Manager])
def test_covariance_mapping(self):
# Ditto for Mapping (covariant in the value, invariant in the key).
assert issubclass(typing.Mapping[Employee, Manager],
typing.Mapping[Employee, Employee])
assert not issubclass(typing.Mapping[Manager, Employee],
typing.Mapping[Employee, Employee])
assert not issubclass(typing.Mapping[Employee, Manager],
typing.Mapping[Manager, Manager])
assert not issubclass(typing.Mapping[Manager, Employee],
typing.Mapping[Manager, Manager])
class CastTests(TestCase):
def test_basics(self):
assert cast(int, 42) == 42
assert cast(float, 42) == 42
assert type(cast(float, 42)) is int
assert cast(Any, 42) == 42
assert cast(list, 42) == 42
assert cast(Union[str, float], 42) == 42
assert cast(AnyStr, 42) == 42
assert cast(None, 42) == 42
def test_errors(self):
# Bogus calls are not expected to fail.
cast(42, 42)
cast('hello', 42)
class ForwardRefTests(TestCase):
def test_basics(self):
class Node(Generic[T]):
def __init__(self, label: T):
self.label = label
self.left = self.right = None
def add_both(self,
left: 'Optional[Node[T]]',
right: 'Node[T]' = None,
stuff: int = None,
blah=None):
self.left = left
self.right = right
def add_left(self, node: Optional['Node[T]']):
self.add_both(node, None)
def add_right(self, node: 'Node[T]' = None):
self.add_both(None, node)
t = Node[int]
both_hints = get_type_hints(t.add_both, globals(), locals())
assert both_hints['left'] == both_hints['right'] == Optional[Node[T]]
assert both_hints['stuff'] == Optional[int]
assert 'blah' not in both_hints
left_hints = get_type_hints(t.add_left, globals(), locals())
assert left_hints['node'] == Optional[Node[T]]
right_hints = get_type_hints(t.add_right, globals(), locals())
assert right_hints['node'] == Optional[Node[T]]
def test_forwardref_instance_type_error(self):
fr = typing._ForwardRef('int')
with self.assertRaises(TypeError):
isinstance(42, fr)
def test_union_forward(self):
def foo(a: Union['T']):
pass
self.assertEqual(get_type_hints(foo, globals(), locals()),
{'a': Union[T]})
def test_tuple_forward(self):
def foo(a: Tuple['T']):
pass
self.assertEqual(get_type_hints(foo, globals(), locals()),
{'a': Tuple[T]})
def test_callable_forward(self):
def foo(a: Callable[['T'], 'T']):
pass
self.assertEqual(get_type_hints(foo, globals(), locals()),
{'a': Callable[[T], T]})
def test_callable_with_ellipsis_forward(self):
def foo(a: 'Callable[..., T]'):
pass
self.assertEqual(get_type_hints(foo, globals(), locals()),
{'a': Callable[..., T]})
def test_syntax_error(self):
with self.assertRaises(SyntaxError):
Generic['/T']
def test_delayed_syntax_error(self):
def foo(a: 'Node[T'):
pass
with self.assertRaises(SyntaxError):
get_type_hints(foo)
def test_type_error(self):
def foo(a: Tuple['42']):
pass
with self.assertRaises(TypeError):
get_type_hints(foo)
def test_name_error(self):
def foo(a: 'Noode[T]'):
pass
with self.assertRaises(NameError):
get_type_hints(foo, locals())
def test_no_type_check(self):
@no_type_check
def foo(a: 'whatevers') -> {}:
pass
th = get_type_hints(foo)
self.assertEqual(th, {})
def test_no_type_check_class(self):
@no_type_check
class C:
def foo(a: 'whatevers') -> {}:
pass
cth = get_type_hints(C.foo)
self.assertEqual(cth, {})
ith = get_type_hints(C().foo)
self.assertEqual(ith, {})
def test_meta_no_type_check(self):
@no_type_check_decorator
def magic_decorator(deco):
return deco
self.assertEqual(magic_decorator.__name__, 'magic_decorator')
@magic_decorator
def foo(a: 'whatevers') -> {}:
pass
@magic_decorator
class C:
def foo(a: 'whatevers') -> {}:
pass
self.assertEqual(foo.__name__, 'foo')
th = get_type_hints(foo)
self.assertEqual(th, {})
cth = get_type_hints(C.foo)
self.assertEqual(cth, {})
ith = get_type_hints(C().foo)
self.assertEqual(ith, {})
def test_default_globals(self):
code = ("class C:\n"
" def foo(self, a: 'C') -> 'D': pass\n"
"class D:\n"
" def bar(self, b: 'D') -> C: pass\n"
)
ns = {}
exec(code, ns)
hints = get_type_hints(ns['C'].foo)
assert hints == {'a': ns['C'], 'return': ns['D']}
class OverloadTests(TestCase):
def test_overload_exists(self):
from typing import overload
def test_overload_fails(self):
from typing import overload
with self.assertRaises(RuntimeError):
@overload
def blah():
pass
T_a = TypeVar('T')
class AwaitableWrapper(typing.Awaitable[T_a]):
def __init__(self, value):
self.value = value
def __await__(self) -> typing.Iterator[T_a]:
yield
return self.value
class AsyncIteratorWrapper(typing.AsyncIterator[T_a]):
def __init__(self, value: typing.Iterable[T_a]):
self.value = value
def __aiter__(self) -> typing.AsyncIterator[T_a]:
return self
@asyncio.coroutine
def __anext__(self) -> T_a:
data = yield from self.value
if data:
return data
else:
raise StopAsyncIteration
class CollectionsAbcTests(TestCase):
def test_hashable(self):
assert isinstance(42, typing.Hashable)
assert not isinstance([], typing.Hashable)
def test_iterable(self):
assert isinstance([], typing.Iterable)
# Due to ABC caching, the second time takes a separate code
# path and could fail. So call this a few times.
assert isinstance([], typing.Iterable)
assert isinstance([], typing.Iterable)
assert isinstance([], typing.Iterable[int])
assert not isinstance(42, typing.Iterable)
# Just in case, also test issubclass() a few times.
assert issubclass(list, typing.Iterable)
assert issubclass(list, typing.Iterable)
def test_iterator(self):
it = iter([])
assert isinstance(it, typing.Iterator)
assert isinstance(it, typing.Iterator[int])
assert not isinstance(42, typing.Iterator)
def test_awaitable(self):
async def foo() -> typing.Awaitable[int]:
return await AwaitableWrapper(42)
g = foo()
assert issubclass(type(g), typing.Awaitable[int])
assert isinstance(g, typing.Awaitable)
assert not isinstance(foo, typing.Awaitable)
assert issubclass(typing.Awaitable[Manager],
typing.Awaitable[Employee])
assert not issubclass(typing.Awaitable[Employee],
typing.Awaitable[Manager])
g.send(None) # Run foo() till completion, to avoid warning.
def test_async_iterable(self):
base_it = range(10) # type: Iterator[int]
it = AsyncIteratorWrapper(base_it)
assert isinstance(it, typing.AsyncIterable)
assert isinstance(it, typing.AsyncIterable)
assert issubclass(typing.AsyncIterable[Manager],
typing.AsyncIterable[Employee])
assert not isinstance(42, typing.AsyncIterable)
def test_async_iterator(self):
base_it = range(10) # type: Iterator[int]
it = AsyncIteratorWrapper(base_it)
assert isinstance(it, typing.AsyncIterator)
assert issubclass(typing.AsyncIterator[Manager],
typing.AsyncIterator[Employee])
assert not isinstance(42, typing.AsyncIterator)
def test_sized(self):
assert isinstance([], typing.Sized)
assert not isinstance(42, typing.Sized)
def test_container(self):
assert isinstance([], typing.Container)
assert not isinstance(42, typing.Container)
def test_abstractset(self):
assert isinstance(set(), typing.AbstractSet)
assert not isinstance(42, typing.AbstractSet)
def test_mutableset(self):
assert isinstance(set(), typing.MutableSet)
assert not isinstance(frozenset(), typing.MutableSet)
def test_mapping(self):
assert isinstance({}, typing.Mapping)
assert not isinstance(42, typing.Mapping)
def test_mutablemapping(self):
assert isinstance({}, typing.MutableMapping)
assert not isinstance(42, typing.MutableMapping)
def test_sequence(self):
assert isinstance([], typing.Sequence)
assert not isinstance(42, typing.Sequence)
def test_mutablesequence(self):
assert isinstance([], typing.MutableSequence)
assert not isinstance((), typing.MutableSequence)
def test_bytestring(self):
assert isinstance(b'', typing.ByteString)
assert isinstance(bytearray(b''), typing.ByteString)
def test_list(self):
assert issubclass(list, typing.List)
def test_set(self):
assert issubclass(set, typing.Set)
assert not issubclass(frozenset, typing.Set)
def test_frozenset(self):
assert issubclass(frozenset, typing.FrozenSet)
assert not issubclass(set, typing.FrozenSet)
def test_dict(self):
assert issubclass(dict, typing.Dict)
def test_no_list_instantiation(self):
with self.assertRaises(TypeError):
typing.List()
with self.assertRaises(TypeError):
typing.List[T]()
with self.assertRaises(TypeError):
typing.List[int]()
def test_list_subclass_instantiation(self):
class MyList(typing.List[int]):
pass
a = MyList()
assert isinstance(a, MyList)
def test_no_dict_instantiation(self):
with self.assertRaises(TypeError):
typing.Dict()
with self.assertRaises(TypeError):
typing.Dict[KT, VT]()
with self.assertRaises(TypeError):
typing.Dict[str, int]()
def test_dict_subclass_instantiation(self):
| |
"""
super().__init__()
if CONFIG_SEPARATOR in name:
raise ValueError("Name cannot contain the config-hierarchy divider ({})".format(CONFIG_SEPARATOR))
self._name = name
self._description = description or ""
self._default = default
self._optional = optional
self._requirements = {} # type: Dict[str, RequirementInterface]
def __repr__(self) -> str:
return "<" + self.__class__.__name__ + ": " + self.name + ">"
@property
def name(self) -> str:
"""The name of the Requirement.
Names cannot contain CONFIG_SEPARATOR ('.' by default) since
this is used within the configuration hierarchy.
"""
return self._name
@property
def description(self) -> str:
"""A short description of what the Requirement is designed to affect or
achieve."""
return self._description
@property
def default(self) -> Optional[ConfigSimpleType]:
"""Returns the default value if one is set."""
return self._default
@property
def optional(self) -> bool:
"""Whether the Requirement is optional or not."""
return self._optional
@optional.setter
def optional(self, value) -> None:
"""Sets the optional value for a requirement."""
self._optional = bool(value)
def config_value(self,
context: 'interfaces.context.ContextInterface',
config_path: str,
default: ConfigSimpleType = None) -> ConfigSimpleType:
"""Returns the value for this Requirement from its config path.
Args:
context: the configuration store to find the value for this requirement
config_path: the configuration path of the instance of the requirement to be recovered
default: a default value to provide if the requirement's configuration value is not found
"""
return context.config.get(config_path, default)
# Child operations
@property
def requirements(self) -> Dict[str, 'RequirementInterface']:
"""Returns a dictionary of all the child requirements, indexed by
name."""
return self._requirements.copy()
def add_requirement(self, requirement: 'RequirementInterface') -> None:
"""Adds a child to the list of requirements.
Args:
requirement: The requirement to add as a child-requirement
"""
self._requirements[requirement.name] = requirement
def remove_requirement(self, requirement: 'RequirementInterface') -> None:
"""Removes a child from the list of requirements.
Args:
requirement: The requirement to remove as a child-requirement
"""
del self._requirements[requirement.name]
def unsatisfied_children(self, context: 'interfaces.context.ContextInterface',
config_path: str) -> Dict[str, 'RequirementInterface']:
"""Method that will validate all child requirements.
Args:
context: the context containing the configuration data for this requirement
config_path: the configuration path of this instance of the requirement
Returns:
A dictionary of full configuration paths for each unsatisfied child-requirement
"""
result = {}
for requirement in self.requirements.values():
if not requirement.optional:
subresult = requirement.unsatisfied(context, path_join(config_path, self._name))
result.update(subresult)
return result
# Validation routines
@abstractmethod
def unsatisfied(self, context: 'interfaces.context.ContextInterface',
config_path: str) -> Dict[str, 'RequirementInterface']:
"""Method to validate the value stored at config_path for the
configuration object against a context.
Returns a list containing its own name (or multiple unsatisfied requirement names) when invalid
Args:
context: The context object containing the configuration for this requirement
config_path: The configuration path for this requirement to test satisfaction
Returns:
A dictionary of configuration-paths to requirements that could not be satisfied
"""
class SimpleTypeRequirement(RequirementInterface):
"""Class to represent a single simple type (such as a boolean, a string, an
integer or a series of bytes)"""
instance_type = bool # type: ClassVar[Type]
def add_requirement(self, requirement: RequirementInterface):
"""Always raises a TypeError as instance requirements cannot have
children."""
raise TypeError("Instance Requirements cannot have subrequirements")
def remove_requirement(self, requirement: RequirementInterface):
"""Always raises a TypeError as instance requirements cannot have
children."""
raise TypeError("Instance Requirements cannot have subrequirements")
def unsatisfied(self, context: 'interfaces.context.ContextInterface',
config_path: str) -> Dict[str, RequirementInterface]:
"""Validates the instance requirement based upon its
`instance_type`."""
config_path = path_join(config_path, self.name)
value = self.config_value(context, config_path, None)
if not isinstance(value, self.instance_type):
vollog.log(
constants.LOGLEVEL_V,
"TypeError - {} requirements only accept {} type: {}".format(self.name, self.instance_type.__name__,
repr(value)))
return {config_path: self}
return {}
class ClassRequirement(RequirementInterface):
"""Requires a specific class.
This is used as means to serialize specific classes for
:class:`TranslationLayerRequirement` and
:class:`SymbolTableRequirement` classes.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cls = None
@property
def cls(self) -> Type:
"""Contains the actual chosen class based on the configuration value's
class name."""
return self._cls
def unsatisfied(self, context: 'interfaces.context.ContextInterface',
config_path: str) -> Dict[str, RequirementInterface]:
"""Checks to see if a class can be recovered."""
config_path = path_join(config_path, self.name)
value = self.config_value(context, config_path, None)
self._cls = None
if value is not None and isinstance(value, str):
if "." in value:
# TODO: consider importing the prefix
module = sys.modules.get(value[:value.rindex(".")], None)
class_name = value[value.rindex(".") + 1:]
if hasattr(module, class_name):
self._cls = getattr(module, class_name)
else:
if value in globals():
self._cls = globals()[value]
if self._cls is None:
return {config_path: self}
return {}
class ConstructableRequirementInterface(RequirementInterface):
"""Defines a Requirement that can be constructed based on their own
requirements.
This effectively offers a means for serializing specific python
types, to be reconstructed based on simple configuration data. Each
constructable records a `class` requirement, which indicates the
object that will be constructed. That class may have its own
requirements (which is why validation of a ConstructableRequirement
must happen after the class configuration value has been provided).
These values are then provided to the object's constructor by name
as arguments (as well as the standard `context` and `config_path`
arguments.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_requirement(ClassRequirement("class", "Class of the constructable requirement"))
self._current_class_requirements = set()
@abstractmethod
def construct(self, context: 'interfaces.context.ContextInterface', config_path: str) -> None:
"""Method for constructing within the context any required elements
from subrequirements.
Args:
context: The context object containing the configuration data for the constructable
config_path: The configuration path for the specific instance of this constructable
"""
def _validate_class(self, context: 'interfaces.context.ContextInterface', config_path: str) -> None:
"""Method to check if the class Requirement is valid and if so populate
the other requirements (but no need to validate, since we're invalid
already)
Args:
context: The context object containing the configuration data for the constructable
config_path: The configuration path for the specific instance of this constructable
"""
class_req = self.requirements['class']
subreq_config_path = path_join(config_path, self.name)
if not class_req.unsatisfied(context, subreq_config_path) and isinstance(class_req, ClassRequirement):
# We have a class, and since it's validated we can construct our requirements from it
if issubclass(class_req.cls, ConfigurableInterface):
# In case the class has changed, clear out the old requirements
for old_req in self._current_class_requirements.copy():
del self._requirements[old_req]
self._current_class_requirements.remove(old_req)
# And add the new ones
for requirement in class_req.cls.get_requirements():
self._current_class_requirements.add(requirement.name)
self.add_requirement(requirement)
def _construct_class(self,
context: 'interfaces.context.ContextInterface',
config_path: str,
requirement_dict: Dict[str, object] = None) -> Optional['interfaces.objects.ObjectInterface']:
"""Constructs the class, handing args and the subrequirements as
parameters to __init__"""
if self.requirements["class"].unsatisfied(context, config_path):
return None
if not isinstance(self.requirements["class"], ClassRequirement):
return None
cls = self.requirements["class"].cls
# These classes all have a name property
# We could subclass this out as a NameableInterface, but it seems a little excessive
# FIXME: We can't test this, because importing the other interfaces causes all kinds of import loops
# if not issubclass(cls, [interfaces.layers.TranslationLayerInterface,
# interfaces.symbols.SymbolTableInterface]):
# return None
if requirement_dict is None:
requirement_dict = {}
# Fulfillment must happen, exceptions happening here mean the requirements aren't correct
# and these need to be raised and fixed, rather than caught and ignored
obj = cls(**requirement_dict)
context.config[config_path] = obj.name
return obj
class ConfigurableRequirementInterface(RequirementInterface):
"""Simple Abstract class to provide build_required_config."""
def build_configuration(self, context: 'interfaces.context.ContextInterface', config_path: str,
value: Any) -> HierarchicalDict:
"""Proxies to a ConfigurableInterface if necessary."""
class ConfigurableInterface(metaclass = ABCMeta):
"""Class to allow objects to have requirements and read configuration data
from the context config tree."""
def __init__(self, context: 'interfaces.context.ContextInterface', config_path: str) -> None:
"""Basic initializer that allows configurables to access their own
config settings."""
super().__init__()
self._context = context
self._config_path = config_path
self._config_cache = None # type: Optional[HierarchicalDict]
@property
def context(self) -> 'interfaces.context.ContextInterface':
"""The context object that this configurable belongs to/configuration
is stored in."""
return self._context
@property
def config_path(self) -> str:
"""The configuration path on which this configurable lives."""
return self._config_path
@config_path.setter
def config_path(self, value: str) -> None:
"""The configuration path on which this configurable lives."""
self._config_path = value
self._config_cache = None
@property
def config(self) -> HierarchicalDict:
"""The Hierarchical configuration Dictionary for this Configurable
object."""
if not hasattr(self, "_config_cache") or self._config_cache is None:
self._config_cache = self._context.config.branch(self._config_path)
return self._config_cache
def build_configuration(self) -> HierarchicalDict:
"""Constructs a HierarchicalDictionary of all the options required to
build this component in the current context.
Ensures that if the class has been created, it can be recreated
using the configuration built Inheriting classes must override
this to ensure any dependent classes update their configurations
too
"""
result = HierarchicalDict()
| |
<reponame>mosesn/pants
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import errno
import logging
import os
import pkgutil
import threading
import xml.etree.ElementTree as ET
from collections import OrderedDict, defaultdict, namedtuple
from contextlib import contextmanager
import six
from twitter.common.collections import OrderedSet
from pants.backend.jvm.jar_dependency_utils import M2Coordinate, ResolvedJar
from pants.backend.jvm.targets.exclude import Exclude
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.base.generator import Generator, TemplateData
from pants.base.revision import Revision
from pants.build_graph.target import Target
from pants.util.dirutil import safe_mkdir, safe_open
IvyModule = namedtuple('IvyModule', ['ref', 'artifact', 'callers'])
logger = logging.getLogger(__name__)
class IvyResolveMappingError(Exception):
"""Raised when there is a failure mapping the ivy resolve results to pants objects."""
class IvyModuleRef(object):
# latest.integration is ivy magic meaning "just get the latest version"
_ANY_REV = 'latest.integration'
def __init__(self, org, name, rev, classifier=None, ext=None):
self.org = org
self.name = name
self.rev = rev
self.classifier = classifier
self.ext = ext or 'jar'
self._id = (self.org, self.name, self.rev, self.classifier, self.ext)
def __eq__(self, other):
return isinstance(other, IvyModuleRef) and self._id == other._id
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._id)
def __str__(self):
return 'IvyModuleRef({})'.format(':'.join((x or '') for x in self._id))
def __repr__(self):
return ('IvyModuleRef(org={!r}, name={!r}, rev={!r}, classifier={!r}, ext={!r})'
.format(*self._id))
def __cmp__(self, other):
# We can't just re-use __repr__ or __str_ because we want to order rev last
return cmp((self.org, self.name, self.classifier, self.ext, self.rev),
(other.org, other.name, other.classifier, other.ext, other.rev))
@property
def caller_key(self):
"""This returns an identifier for an IvyModuleRef that only retains the caller org and name.
Ivy represents dependees as `<caller/>`'s with just org and name and rev information.
This method returns a `<caller/>` representation of the current ref.
"""
return IvyModuleRef(name=self.name, org=self.org, rev=self._ANY_REV)
@property
def unversioned(self):
"""This returns an identifier for an IvyModuleRef without version information.
It's useful because ivy might return information about a different version of a dependency than
the one we request, and we want to ensure that all requesters of any version of that dependency
are able to learn about it.
"""
return IvyModuleRef(name=self.name, org=self.org, rev=self._ANY_REV, classifier=self.classifier,
ext=self.ext)
class IvyInfo(object):
def __init__(self, conf):
self._conf = conf
self.modules_by_ref = {} # Map from ref to referenced module.
self.refs_by_unversioned_refs = {} # Map from unversioned ref to the resolved versioned ref
# Map from ref of caller to refs of modules required by that caller.
self._deps_by_caller = defaultdict(OrderedSet)
# Map from _unversioned_ ref to OrderedSet of IvyArtifact instances.
self._artifacts_by_ref = defaultdict(OrderedSet)
def add_module(self, module):
if not module.artifact:
# Module was evicted, so do not record information about it
return
ref_unversioned = module.ref.unversioned
if ref_unversioned in self.refs_by_unversioned_refs:
raise IvyResolveMappingError('Already defined module {}, as rev {}!'
.format(ref_unversioned, module.ref.rev))
if module.ref in self.modules_by_ref:
raise IvyResolveMappingError('Already defined module {}, would be overwritten!'
.format(module.ref))
self.refs_by_unversioned_refs[ref_unversioned] = module.ref
self.modules_by_ref[module.ref] = module
for caller in module.callers:
self._deps_by_caller[caller.caller_key].add(module.ref)
self._artifacts_by_ref[ref_unversioned].add(module.artifact)
def _do_traverse_dependency_graph(self, ref, collector, memo, visited):
memoized_value = memo.get(ref)
if memoized_value:
return memoized_value
if ref in visited:
# Ivy allows for circular dependencies
# If we're here, that means we're resolving something that
# transitively depends on itself
return set()
visited.add(ref)
acc = collector(ref)
# NB(zundel): ivy does not return deps in a consistent order for the same module for
# different resolves. Sort them to get consistency and prevent cache invalidation.
# See https://github.com/pantsbuild/pants/issues/2607
deps = sorted(self._deps_by_caller.get(ref.caller_key, ()))
for dep in deps:
acc.update(self._do_traverse_dependency_graph(dep, collector, memo, visited))
memo[ref] = acc
return acc
def traverse_dependency_graph(self, ref, collector, memo=None):
"""Traverses module graph, starting with ref, collecting values for each ref into the sets
created by the collector function.
:param ref an IvyModuleRef to start traversing the ivy dependency graph
:param collector a function that takes a ref and returns a new set of values to collect for
that ref, which will also be updated with all the dependencies accumulated values
:param memo is a dict of ref -> set that memoizes the results of each node in the graph.
If provided, allows for retaining cache across calls.
:returns the accumulated set for ref
"""
resolved_ref = self.refs_by_unversioned_refs.get(ref.unversioned)
if resolved_ref:
ref = resolved_ref
if memo is None:
memo = dict()
visited = set()
return self._do_traverse_dependency_graph(ref, collector, memo, visited)
def get_resolved_jars_for_jar_library(self, jar_library, memo=None):
"""Collects jars for the passed jar_library.
Because artifacts are only fetched for the "winning" version of a module, the artifacts
will not always represent the version originally declared by the library.
This method is transitive within the library's jar_dependencies, but will NOT
walk into its non-jar dependencies.
:param jar_library A JarLibrary to collect the transitive artifacts for.
:param memo see `traverse_dependency_graph`
:returns: all the artifacts for all of the jars in this library, including transitive deps
:rtype: list of :class:`pants.backend.jvm.jar_dependency_utils.ResolvedJar`
"""
def to_resolved_jar(jar_ref, jar_path):
return ResolvedJar(coordinate=M2Coordinate(org=jar_ref.org,
name=jar_ref.name,
rev=jar_ref.rev,
classifier=jar_ref.classifier,
ext=jar_ref.ext),
cache_path=jar_path)
resolved_jars = OrderedSet()
def create_collection(dep):
return OrderedSet([dep])
for jar in jar_library.jar_dependencies:
classifier = jar.classifier if self._conf == 'default' else self._conf
jar_module_ref = IvyModuleRef(jar.org, jar.name, jar.rev, classifier)
for module_ref in self.traverse_dependency_graph(jar_module_ref, create_collection, memo):
for artifact_path in self._artifacts_by_ref[module_ref.unversioned]:
resolved_jars.add(to_resolved_jar(module_ref, artifact_path))
return resolved_jars
class IvyUtils(object):
"""Useful methods related to interaction with ivy."""
ivy_lock = threading.RLock()
INTERNAL_ORG_NAME = 'internal'
class IvyError(Exception):
"""Indicates an error preparing an ivy operation."""
class IvyResolveReportError(IvyError):
"""Indicates that an ivy report cannot be found."""
class IvyResolveConflictingDepsError(IvyError):
"""Indicates two or more locally declared dependencies conflict."""
class BadRevisionError(IvyError):
"""Indicates an unparseable version number."""
@staticmethod
def _generate_exclude_template(exclude):
return TemplateData(org=exclude.org, name=exclude.name)
@staticmethod
def _generate_override_template(jar):
return TemplateData(org=jar.org, module=jar.module, version=jar.version)
@staticmethod
@contextmanager
def cachepath(path):
if not os.path.exists(path):
yield ()
else:
with safe_open(path, 'r') as cp:
yield (path.strip() for path in cp.read().split(os.pathsep) if path.strip())
@classmethod
def symlink_cachepath(cls, ivy_cache_dir, inpath, symlink_dir, outpath):
"""Symlinks all paths listed in inpath that are under ivy_cache_dir into symlink_dir.
If there is an existing symlink for a file under inpath, it is used rather than creating
a new symlink. Preserves all other paths. Writes the resulting paths to outpath.
Returns a map of path -> symlink to that path.
"""
safe_mkdir(symlink_dir)
# The ivy_cache_dir might itself be a symlink. In this case, ivy may return paths that
# reference the realpath of the .jar file after it is resolved in the cache dir. To handle
# this case, add both the symlink'ed path and the realpath to the jar to the symlink map.
real_ivy_cache_dir = os.path.realpath(ivy_cache_dir)
symlink_map = OrderedDict()
with safe_open(inpath, 'r') as infile:
inpaths = filter(None, infile.read().strip().split(os.pathsep))
paths = OrderedSet([os.path.realpath(path) for path in inpaths])
for path in paths:
if path.startswith(real_ivy_cache_dir):
symlink_map[path] = os.path.join(symlink_dir, os.path.relpath(path, real_ivy_cache_dir))
else:
# This path is outside the cache. We won't symlink it.
symlink_map[path] = path
# Create symlinks for paths in the ivy cache dir.
for path, symlink in six.iteritems(symlink_map):
if path == symlink:
# Skip paths that aren't going to be symlinked.
continue
safe_mkdir(os.path.dirname(symlink))
try:
os.symlink(path, symlink)
except OSError as e:
# We don't delete and recreate the symlink, as this may break concurrently executing code.
if e.errno != errno.EEXIST:
raise
# (re)create the classpath with all of the paths
with safe_open(outpath, 'w') as outfile:
outfile.write(':'.join(OrderedSet(symlink_map.values())))
return dict(symlink_map)
@staticmethod
def identify(targets):
targets = list(targets)
if len(targets) == 1 and targets[0].is_jvm and getattr(targets[0], 'provides', None):
return targets[0].provides.org, targets[0].provides.name
else:
return IvyUtils.INTERNAL_ORG_NAME, Target.maybe_readable_identify(targets)
@classmethod
def xml_report_path(cls, cache_dir, resolve_hash_name, conf):
"""The path to the xml report ivy creates after a retrieve.
:param string cache_dir: The path of the ivy cache dir used for resolves.
:param string resolve_hash_name: Hash from the Cache key from the VersionedTargetSet used for
resolution.
:param string conf: The ivy conf name (e.g. "default").
:returns: The report path.
:rtype: string
"""
return os.path.join(cache_dir, '{}-{}-{}.xml'.format(IvyUtils.INTERNAL_ORG_NAME,
resolve_hash_name, conf))
@classmethod
def parse_xml_report(cls, cache_dir, resolve_hash_name, conf):
"""Parse the ivy xml report corresponding to the name passed to ivy.
:param string cache_dir: The path of the ivy cache dir used for resolves.
:param string resolve_hash_name: Hash from the Cache key from the VersionedTargetSet used for
resolution; if `None` returns `None` instead of attempting to
parse any report.
:param string conf: the ivy conf name (e.g. "default")
:returns: The info in the xml report or None if target is empty.
:rtype: :class:`IvyInfo`
:raises: :class:`IvyResolveMappingError` if no report exists.
"""
| |
<gh_stars>1-10
"""A few utils (specshow, melspectrogram) vendored from librosa.
This code was copied from parts of librosa, and adapted, so as to be able to use
targeted functionality with less dependencies and manual installation
(namely for libsndfile) than librosa has.
Librosa can be found here: https://librosa.org/
Librosa's license follows.
ISC License
Copyright (c) 2013--2017, librosa development team.
Permission to use, copy, modify, and/or distribute this software for any purpose with or
without fee is hereby granted, provided that the above copyright notice and this
permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT,
OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
import warnings
import numpy as np
from matplotlib.cm import get_cmap
from matplotlib.axes import Axes
from matplotlib.ticker import Formatter, ScalarFormatter
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import SymmetricalLogLocator
import matplotlib
from packaging.version import parse as version_parse
import scipy
import scipy.signal
from numpy.lib.stride_tricks import as_strided
import re
MAX_MEM_BLOCK = 2 ** 8 * 2 ** 10
# specshow
def specshow(
data,
x_coords=None,
y_coords=None,
x_axis=None,
y_axis=None,
sr=22050,
hop_length=512,
fmin=None,
fmax=None,
tuning=0.0,
bins_per_octave=12,
key='C:maj',
Sa=None,
mela=None,
thaat=None,
auto_aspect=True,
htk=False,
ax=None,
**kwargs,
):
if np.issubdtype(data.dtype, np.complexfloating):
warnings.warn(
'Trying to display complex-valued input. ' 'Showing magnitude instead.'
)
data = np.abs(data)
kwargs.setdefault('cmap', cmap(data))
kwargs.setdefault('rasterized', True)
kwargs.setdefault('edgecolors', 'None')
kwargs.setdefault('shading', 'flat')
all_params = dict(
kwargs=kwargs,
sr=sr,
fmin=fmin,
fmax=fmax,
tuning=tuning,
bins_per_octave=bins_per_octave,
hop_length=hop_length,
key=key,
htk=htk,
)
# Get the x and y coordinates
y_coords = __mesh_coords(y_axis, y_coords, data.shape[0], **all_params)
x_coords = __mesh_coords(x_axis, x_coords, data.shape[1], **all_params)
axes = __check_axes(ax)
out = axes.pcolormesh(x_coords, y_coords, data, **kwargs)
__set_current_image(ax, out)
axes.set_xlim(x_coords.min(), x_coords.max())
axes.set_ylim(y_coords.min(), y_coords.max())
# Set up axis scaling
__scale_axes(axes, x_axis, 'x')
__scale_axes(axes, y_axis, 'y')
# Construct tickers and locators
__decorate_axis(axes.xaxis, x_axis, key=key, Sa=Sa, mela=mela, thaat=thaat)
__decorate_axis(axes.yaxis, y_axis, key=key, Sa=Sa, mela=mela, thaat=thaat)
# If the plot is a self-similarity/covariance etc. plot, square it
if __same_axes(x_axis, y_axis, axes.get_xlim(), axes.get_ylim()) and auto_aspect:
axes.set_aspect('equal')
return out
def cmap(data, robust=True, cmap_seq='magma', cmap_bool='gray_r', cmap_div='coolwarm'):
data = np.atleast_1d(data)
if data.dtype == 'bool':
return get_cmap(cmap_bool, lut=2)
data = data[np.isfinite(data)]
if robust:
min_p, max_p = 2, 98
else:
min_p, max_p = 0, 100
min_val, max_val = np.percentile(data, [min_p, max_p])
if min_val >= 0 or max_val <= 0:
return get_cmap(cmap_seq)
return get_cmap(cmap_div)
def __mesh_coords(ax_type, coords, n, **kwargs):
"""Compute axis coordinates"""
if coords is not None:
if len(coords) < n:
raise Exception(
'Coordinate shape mismatch: ' '{}<{}'.format(len(coords), n)
)
return coords
coord_map = {
'linear': __coord_fft_hz,
'fft': __coord_fft_hz,
'fft_note': __coord_fft_hz,
'fft_svara': __coord_fft_hz,
'hz': __coord_fft_hz,
'log': __coord_fft_hz,
'mel': __coord_mel_hz,
'cqt': __coord_cqt_hz,
'cqt_hz': __coord_cqt_hz,
'cqt_note': __coord_cqt_hz,
'cqt_svara': __coord_cqt_hz,
'chroma': __coord_chroma,
'chroma_c': __coord_chroma,
'chroma_h': __coord_chroma,
'time': __coord_time,
's': __coord_time,
'ms': __coord_time,
'lag': __coord_time,
'lag_s': __coord_time,
'lag_ms': __coord_time,
'tonnetz': __coord_n,
'off': __coord_n,
'tempo': __coord_tempo,
'fourier_tempo': __coord_fourier_tempo,
'frames': __coord_n,
None: __coord_n,
}
if ax_type not in coord_map:
raise Exception('Unknown axis type: {}'.format(ax_type))
return coord_map[ax_type](n, **kwargs)
def __coord_fourier_tempo(n, sr=22050, hop_length=512, **_kwargs):
"""Fourier tempogram coordinates"""
n_fft = 2 * (n - 1)
# The following code centers the FFT bins at their frequencies
# and clips to the non-negative frequency range [0, nyquist]
basis = fourier_tempo_frequencies(sr=sr, hop_length=hop_length, win_length=n_fft)
fmax = basis[-1]
basis -= 0.5 * (basis[1] - basis[0])
basis = np.append(np.maximum(0, basis), [fmax])
return basis
def fourier_tempo_frequencies(sr=22050, win_length=384, hop_length=512):
# sr / hop_length gets the frame rate
# multiplying by 60 turns frames / sec into frames / minute
return fft_frequencies(sr=sr * 60 / float(hop_length), n_fft=win_length)
def frames_to_time(frames, sr=22050, hop_length=512, n_fft=None):
samples = frames_to_samples(frames, hop_length=hop_length, n_fft=n_fft)
return samples_to_time(samples, sr=sr)
def samples_to_time(samples, sr=22050):
return np.asanyarray(samples) / float(sr)
def frames_to_samples(frames, hop_length=512, n_fft=None):
offset = 0
if n_fft is not None:
offset = int(n_fft // 2)
return (np.asanyarray(frames) * hop_length + offset).astype(int)
def __coord_time(n, sr=22050, hop_length=512, **_kwargs):
"""Get time coordinates from frames"""
return frames_to_time(np.arange(n + 1), sr=sr, hop_length=hop_length)
def __coord_chroma(n, bins_per_octave=12, **_kwargs):
"""Get chroma bin numbers"""
return np.linspace(0, (12.0 * n) / bins_per_octave, num=n + 1, endpoint=True)
def tempo_frequencies(n_bins, hop_length=512, sr=22050):
bin_frequencies = np.zeros(int(n_bins), dtype=np.float)
bin_frequencies[0] = np.inf
bin_frequencies[1:] = 60.0 * sr / (hop_length * np.arange(1.0, n_bins))
return bin_frequencies
def __coord_tempo(n, sr=22050, hop_length=512, **_kwargs):
"""Tempo coordinates"""
basis = tempo_frequencies(n + 2, sr=sr, hop_length=hop_length)[1:]
edges = np.arange(1, n + 2)
return basis * (edges + 0.5) / edges
def __coord_cqt_hz(n, fmin=None, bins_per_octave=12, sr=22050, **_kwargs):
"""Get CQT bin frequencies"""
if fmin is None:
fmin = note_to_hz('C1')
# Apply tuning correction
fmin = fmin * 2.0 ** (_kwargs.get('tuning', 0.0) / bins_per_octave)
# we drop by half a bin so that CQT bins are centered vertically
freqs = cqt_frequencies(
n + 1,
fmin=fmin / 2.0 ** (0.5 / bins_per_octave),
bins_per_octave=bins_per_octave,
)
if np.any(freqs > 0.5 * sr):
warnings.warn(
'Frequency axis exceeds Nyquist. '
'Did you remember to set all spectrogram parameters in specshow?'
)
return freqs
def cqt_frequencies(n_bins, fmin, bins_per_octave=12, tuning=0.0):
correction = 2.0 ** (float(tuning) / bins_per_octave)
frequencies = 2.0 ** (np.arange(0, n_bins, dtype=float) / bins_per_octave)
return correction * fmin * frequencies
def note_to_hz(note, **kwargs):
return midi_to_hz(note_to_midi(note, **kwargs))
def midi_to_hz(notes):
return 440.0 * (2.0 ** ((np.asanyarray(notes) - 69.0) / 12.0))
def note_to_midi(note, round_midi=True):
if not isinstance(note, str):
return np.array([note_to_midi(n, round_midi=round_midi) for n in note])
pitch_map = {'C': 0, 'D': 2, 'E': 4, 'F': 5, 'G': 7, 'A': 9, 'B': 11}
acc_map = {
'#': 1,
'': 0,
'b': -1,
'!': -1,
'♯': 1,
'𝄪': 2,
'♭': -1,
'𝄫': -2,
'♮': 0,
}
match = re.match(
r'^(?P<note>[A-Ga-g])'
r'(?P<accidental>[#♯𝄪b!♭𝄫♮]*)'
r'(?P<octave>[+-]?\d+)?'
r'(?P<cents>[+-]\d+)?$',
note,
)
if not match:
raise Exception('Improper note format: {:s}'.format(note))
pitch = match.group('note').upper()
offset = np.sum([acc_map[o] for o in match.group('accidental')])
octave = match.group('octave')
cents = match.group('cents')
if not octave:
octave = 0
else:
octave = int(octave)
if not cents:
cents = 0
else:
cents = int(cents) * 1e-2
note_value = 12 * (octave + 1) + pitch_map[pitch] + offset + cents
if round_midi:
note_value = int(np.round(note_value))
return note_value
def __coord_n(n, **_kwargs):
"""Get bare positions"""
return np.arange(n + 1)
def __coord_mel_hz(n, fmin=0, fmax=None, sr=22050, htk=False, **_kwargs):
"""Get the frequencies for Mel bins"""
if fmin is None:
fmin = 0
if fmax is None:
fmax = 0.5 * sr
basis = mel_frequencies(n, fmin=fmin, fmax=fmax, htk=htk)
basis[1:] -= 0.5 * np.diff(basis)
basis = np.append(np.maximum(0, basis), [fmax])
return basis
def __coord_fft_hz(n, sr=22050, **_kwargs):
"""Get the frequencies for FFT bins"""
n_fft = 2 * (n - 1)
# The following code centers the FFT bins at their frequencies
# and clips to the non-negative frequency range [0, nyquist]
basis = fft_frequencies(sr=sr, n_fft=n_fft)
fmax = basis[-1]
basis -= 0.5 * (basis[1] - basis[0])
basis = np.append(np.maximum(0, basis), [fmax])
return basis
def __check_axes(axes):
"""Check if "axes" is an instance of an axis object. If not, use `gca`."""
if axes is None:
import matplotlib.pyplot as plt
axes = plt.gca()
elif not isinstance(axes, Axes):
raise Exception(
'`axes` must be an instance of matplotlib.axes.Axes. '
'Found type(axes)={}'.format(type(axes))
)
return axes
def __set_current_image(ax, img):
"""Helper to set the current image in pyplot mode.
If the provided ``ax`` is not `None`, then we assume that the user is using the object API.
In this case, the pyplot current image is not set.
"""
if ax is None:
import matplotlib.pyplot as plt
plt.sci(img)
def __scale_axes(axes, ax_type, which):
"""Set the axis scaling"""
kwargs = dict()
if which == 'x':
if version_parse(matplotlib.__version__) < version_parse('3.3.0'):
thresh = 'linthreshx'
base = 'basex'
scale = 'linscalex'
else:
thresh = 'linthresh'
base = 'base'
scale = 'linscale'
scaler = axes.set_xscale
limit = axes.set_xlim
else:
if version_parse(matplotlib.__version__) < version_parse('3.3.0'):
thresh = 'linthreshy'
base = 'basey'
scale = 'linscaley'
else:
thresh = 'linthresh'
base = 'base'
scale = 'linscale'
scaler = axes.set_yscale
limit = axes.set_ylim
# Map ticker scales
if ax_type == 'mel':
mode = 'symlog'
kwargs[thresh] = 1000.0
kwargs[base] = 2
elif ax_type in ['cqt', 'cqt_hz', 'cqt_note', 'cqt_svara']:
mode = 'log'
kwargs[base] = 2
elif ax_type in ['log', 'fft_note', 'fft_svara']:
mode = 'symlog'
kwargs[base] = 2
# kwargs[thresh] = core.note_to_hz(
# 'C2'
# ) # in librosa/core.py but I don't think it is needed
kwargs[scale] | |
<reponame>rhan1498/marine-integrations
"""
@package mi.instrument.satlantic.suna_deep.ooicore.driver
@file marine-integrations/mi/instrument/satlantic/suna_deep/ooicore/driver.py
@author <NAME>
@brief Driver for the ooicore
Release notes:
initial_rev
"""
__author__ = '<NAME>'
__license__ = 'Apache 2.0'
from mi.core.log import get_logger
log = get_logger()
import re
import json
import time
import pprint
from mi.core.common import BaseEnum
from mi.core.common import InstErrorCode
from mi.core.instrument.instrument_protocol import CommandResponseInstrumentProtocol
from mi.core.instrument.instrument_fsm import InstrumentFSM
from mi.core.instrument.instrument_driver import SingleConnectionInstrumentDriver
from mi.core.instrument.instrument_driver import DriverEvent
from mi.core.instrument.instrument_driver import DriverAsyncEvent
from mi.core.instrument.instrument_driver import DriverProtocolState
from mi.core.instrument.instrument_driver import DriverParameter
from mi.core.instrument.instrument_driver import ResourceAgentState
from mi.core.instrument.data_particle import DataParticle
from mi.core.instrument.data_particle import DataParticleKey
from mi.core.instrument.data_particle import CommonDataParticleType
from mi.core.instrument.chunker import StringChunker
from mi.core.instrument.protocol_param_dict import ParameterDictVisibility
from mi.core.instrument.protocol_param_dict import ParameterDictType
from mi.core.exceptions import SampleException
from mi.core.exceptions import InstrumentProtocolException
from mi.core.exceptions import InstrumentParameterException
from mi.core.exceptions import InstrumentException
# newline.
NEWLINE = '\r\n'
# default timeout.
TIMEOUT = 15
# default number of retries for a command
RETRY = 3
# SUNA ASCII FRAME REGEX
SUNA_SAMPLE_PATTERN = r'SAT' # Sentinal
SUNA_SAMPLE_PATTERN += r'([A-Z]{3})' # 1: Frame Type (string)
SUNA_SAMPLE_PATTERN += r'(\d{4}),' # 2: Serial Number (int)
SUNA_SAMPLE_PATTERN += r'(\d{7}),' # 3: Date, year and day-of-year (int)
SUNA_SAMPLE_PATTERN += r'([+-]?\d*.\d*),' # 4. Time, hours of day (float)
SUNA_SAMPLE_PATTERN += r'([+-]?\d*.\d*),' # 5. Nitrate concentration [uM] (float)
SUNA_SAMPLE_PATTERN += r'([+-]?\d*.\d*),' # 6. Nitrogen in nitrate [mg/l] (float)
SUNA_SAMPLE_PATTERN += r'([+-]?\d*.\d*),' # 7. Absorbance at 254 nm (float)
SUNA_SAMPLE_PATTERN += r'([+-]?\d*.\d*),' # 8. Absorbance at 350 nm (float)
SUNA_SAMPLE_PATTERN += r'([+-]?\d*.\d*),' # 9. Bromide trace [mg/l] (float)
SUNA_SAMPLE_PATTERN += r'([+-]?\d*),' # 10. Spectrum average (int)
SUNA_SAMPLE_PATTERN += r'([+-]?\d*),' # 11. Dark value used for fit (int)
SUNA_SAMPLE_PATTERN += r'([+-]?\d*),' # 12. Integration time factor (int)
SUNA_SAMPLE_PATTERN += r'(' # 13. Spectrum channels (open group)
for i in range(255):
SUNA_SAMPLE_PATTERN += r'[+-]?\d*,' # 13. Spectrum channels (255 x int)
SUNA_SAMPLE_PATTERN += r'[+-]?\d*),' # 13. Spectrum channels (close group, last int = 256th)
SUNA_SAMPLE_PATTERN += r'([+-]?\d*.\d*),' # 14. Internal temperature [C] (float)
SUNA_SAMPLE_PATTERN += r'([+-]?\d*.\d*),' # 15. Spectrometer temperature [C] (float)
SUNA_SAMPLE_PATTERN += r'([+-]?\d*.\d*),' # 16. Lamp temperature [C] (float)
SUNA_SAMPLE_PATTERN += r'([+-]?\d*),' # 17. Cumulative lamp on-time [s] (int)
SUNA_SAMPLE_PATTERN += r'([+-]?\d*.\d*),' # 18. Relative Humidity [%] (float)
SUNA_SAMPLE_PATTERN += r'([+-]?\d*.\d*),' # 19. Main Voltage [V] (float)
SUNA_SAMPLE_PATTERN += r'([+-]?\d*.\d*),' # 20. Lamp Voltage [V] (float)
SUNA_SAMPLE_PATTERN += r'([+-]?\d*.\d*),' # 21. Internal Voltage [V] (float)
SUNA_SAMPLE_PATTERN += r'([+-]?\d*.\d*),' # 22. Main Current [mA] (float)
SUNA_SAMPLE_PATTERN += r'([+-]?\d*.\d*),' # 23. Fit Aux 1 (float)
SUNA_SAMPLE_PATTERN += r'([+-]?\d*.\d*),' # 24. Fit Aux 2 (float)
SUNA_SAMPLE_PATTERN += r'([+-]?\d*.\d*),' # 25. Fit Base 1 (float)
SUNA_SAMPLE_PATTERN += r'([+-]?\d*.\d*),' # 26. Fit Base 2 (float)
SUNA_SAMPLE_PATTERN += r'([+-]?\d*.\d*),' # 27. Fit RMSE (float)
SUNA_SAMPLE_PATTERN += r',' # r'([+-]?\d*),' # 28. CTD Time [seconds since 1970] (int)
SUNA_SAMPLE_PATTERN += r',' # r'([+-]?\d*.\d*),' # 29. CTD Salinity [PSU] (float)
SUNA_SAMPLE_PATTERN += r',' # r'([+-]?\d*.\d*),' # 30. CTD Temperature [C] (float)
SUNA_SAMPLE_PATTERN += r',' # r'([+-]?\d*.\d*),' # 31. CTD Pressure [dBar] (float)
SUNA_SAMPLE_PATTERN += r'([+-]?\d*)' # 32. Check Sum (int)
SUNA_SAMPLE_PATTERN += r'\r\n' # <Carriage Return> <Line Feed>
SUNA_SAMPLE_REGEX = re.compile(SUNA_SAMPLE_PATTERN)
# SUNA STATUS REGEX
SUNA_STATUS_PATTERN = r'SENSTYPE\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'SENSVERS\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'SERIALNO\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'INTWIPER\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'EXTPPORT\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'LMPSHUTR\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'REFDTECT\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'PROTECTR\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'SUPRCAPS\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'PWRSVISR\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'USBSWTCH\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'RELAYBRD\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'SDI12BRD\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'ANALGBRD\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'INTDATLG\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'APFIFACE\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'SCHDLING\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'FANATLMP\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'OWIRETLP\s+([0-9a-f]+)\s+'
SUNA_STATUS_PATTERN += r'OWIRETSP\s+([0-9a-f]+)\s+'
SUNA_STATUS_PATTERN += r'OWIRETHS\s+([0-9a-f]+)\s+'
SUNA_STATUS_PATTERN += r'ZSPEC_SN\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'FIBERLSN\s+([\w.]+)\s+'
SUNA_STATUS_PATTERN += r'STUPSTUS\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'BRNHOURS\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'BRNNUMBR\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'DRKHOURS\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'DRKNUMBR\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'CHRLDURA\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'CHRDDURA\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'BAUDRATE\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'MSGLEVEL\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'MSGFSIZE\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'DATFSIZE\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'OUTFRTYP\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'LOGFRTYP\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'OUTDRKFR\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'LOGDRKFR\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'TIMERESL\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'LOGFTYPE\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'ACQCOUNT\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'CNTCOUNT\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'DCMINNO3\s+([+-]?\d+.\d+)\s+'
SUNA_STATUS_PATTERN += r'DCMAXNO3\s+([+-]?\d+.\d+)\s+'
SUNA_STATUS_PATTERN += r'WDAT_LOW\s+([+-]?\d+.\d+)\s+'
SUNA_STATUS_PATTERN += r'WDAT_HGH\s+([+-]?\d+.\d+)\s+'
SUNA_STATUS_PATTERN += r'SDI12ADD\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'DATAMODE\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'OPERMODE\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'OPERCTRL\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'EXDEVTYP\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'EXDEVPRE\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'EXDEVRUN\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'WATCHDOG\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'COUNTDWN\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'FIXDDURA\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'PERDIVAL\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'PERDOFFS\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'PERDDURA\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'PERDSMPL\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'POLLTOUT\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'APFATOFF\s+([+-]?\d+.\d+)\s+'
SUNA_STATUS_PATTERN += r'STBLTIME\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'REFLIMIT\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'SKPSLEEP\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'LAMPTOFF\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'SPINTPER\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'DRKAVERS\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'LGTAVERS\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'REFSMPLS\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'DRKSMPLS\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'LGTSMPLS\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'DRKDURAT\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'LGTDURAT\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'TEMPCOMP\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'SALINFIT\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'BRMTRACE\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'BL_ORDER\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'FITCONCS\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'DRKCORMT\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'DRKCOEFS\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'DAVGPRM0\s+([+-]?\d+.\d+)\s+'
SUNA_STATUS_PATTERN += r'DAVGPRM1\s+([+-]?\d+.\d+)\s+'
SUNA_STATUS_PATTERN += r'DAVGPRM2\s+([+-]?\d+.\d+)\s+'
SUNA_STATUS_PATTERN += r'DAVGPRM3\s+([+-]?\d+.\d+)\s+'
SUNA_STATUS_PATTERN += r'A_CUTOFF\s+([+-]?\d+.\d+)\s+'
SUNA_STATUS_PATTERN += r'INTPRADJ\s+(\w+)\s+'
SUNA_STATUS_PATTERN += r'INTPRFAC\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'INTADSTP\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'INTADMAX\s+(\d+)\s+'
SUNA_STATUS_PATTERN += r'WFIT_LOW\s+([+-]?\d+.\d+)\s+'
SUNA_STATUS_PATTERN += r'WFIT_HGH\s+([+-]?\d+.\d+)\s+'
SUNA_STATUS_PATTERN += r'LAMPTIME\s+(\d+)\s+'
SUNA_STATUS_REGEX = re.compile(SUNA_STATUS_PATTERN)
# SUNA TEST REGEX
SUNA_TEST_PATTERN = r'Extrn Disk Size; Free , (\d+); (\d+)\s+'
SUNA_TEST_PATTERN += r'Intrn Disk Size; Free , (\d+); (\d+)\s+'
SUNA_TEST_PATTERN += r'Fiberlite\s+Odometer , (\d+:\d+:\d+)\s+'
SUNA_TEST_PATTERN += r'Temperatures Hs Sp Lm , ([+-]?\d+.\d+) ([+-]?\d+.\d+) ([+-]?\d+.\d+)\s+'
SUNA_TEST_PATTERN += r'Humidity\s+, ([+-]?\d+.\d+)\s+'
SUNA_TEST_PATTERN += r'Electrical Mn Bd Pr C , ([+-]?\d+.\d+) ([+-]?\d+.\d+) ([+-]?\d+.\d+) ([+-]?\d+.\d+)\s+'
SUNA_TEST_PATTERN += r'Lamp\s+Power , (\d+) mW\s+'
SUNA_TEST_PATTERN += r'Spec Dark av sd mi ma ,\s+(\d+) \(\+/-\s+(\d+)\) \[\s*(\d+):\s*(\d+)\]\s+'
SUNA_TEST_PATTERN += r'Spec Lght av sd mi ma ,\s+(\d+) \(\+/-\s+(\d+)\) \[\s*(\d+):\s*(\d+)\]\s+'
SUNA_TEST_PATTERN += r'\$(Ok|Error)'
SUNA_TEST_REGEX = re.compile(SUNA_TEST_PATTERN)
###
# Driver Constant Definitions
###
class DataParticleType(BaseEnum):
"""
Data particle types produced by this driver
"""
RAW = CommonDataParticleType.RAW
SUNA_SAMPLE = "suna_sample"
SUNA_STATUS = "suna_status"
SUNA_TEST = "suna_test"
class ProtocolState(BaseEnum):
"""
Instrument protocol states
"""
UNKNOWN = DriverProtocolState.UNKNOWN
COMMAND = DriverProtocolState.COMMAND
DIRECT_ACCESS = DriverProtocolState.DIRECT_ACCESS
POLL = DriverProtocolState.POLL
AUTOSAMPLE = DriverProtocolState.AUTOSAMPLE
class ProtocolEvent(BaseEnum):
"""
Protocol events
"""
DISCOVER = DriverEvent.DISCOVER
INITIALIZE = DriverEvent.INITIALIZE
ACQUIRE_SAMPLE = DriverEvent.ACQUIRE_SAMPLE
START_AUTOSAMPLE = DriverEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = DriverEvent.STOP_AUTOSAMPLE
TEST = DriverEvent.TEST
START_DIRECT = DriverEvent.START_DIRECT
STOP_DIRECT = DriverEvent.STOP_DIRECT
CLOCK_SYNC = DriverEvent.CLOCK_SYNC
ACQUIRE_STATUS = DriverEvent.ACQUIRE_STATUS
RESET = DriverEvent.RESET
START_POLL = "DRIVER_EVENT_START_POLL"
STOP_POLL = "DRIVER_EVENT_STOP_POLL"
MEASURE_N = "DRIVER_EVENT_MEASURE_N"
MEASURE_0 = "DRIVER_EVENT_MEASURE_0"
TIMED_N = "DRIVER_EVENT_TIMED_N"
GET = DriverEvent.GET
SET = DriverEvent.SET
EXECUTE_DIRECT = DriverEvent.EXECUTE_DIRECT
ENTER = DriverEvent.ENTER
EXIT = DriverEvent.EXIT
class Capability(BaseEnum):
"""
Protocol events that should be exposed to users (subset of above).
"""
# Get Sample & Status Data
ACQUIRE_SAMPLE = ProtocolEvent.ACQUIRE_SAMPLE
ACQUIRE_STATUS = ProtocolEvent.ACQUIRE_STATUS
MEASURE_N = ProtocolEvent.MEASURE_N
MEASURE_0 = ProtocolEvent.MEASURE_0
TIMED_N = ProtocolEvent.TIMED_N
TEST = ProtocolEvent.TEST
# Change States
START_AUTOSAMPLE = ProtocolEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = ProtocolEvent.STOP_AUTOSAMPLE
START_POLL = ProtocolEvent.START_POLL
STOP_POLL = ProtocolEvent.STOP_POLL
# Parameter Accessors/Mutators
GET = ProtocolEvent.GET
SET = ProtocolEvent.SET
class Parameter(DriverParameter):
#Data Acquisition
OPERATION_MODE = "opermode"
OPERATION_CONTROL = "operctrl"
LIGHT_SAMPLES = "lgtsmpls"
DARK_SAMPLES = "drksmpls"
LIGHT_DURATION = "lgtdurat"
DARK_DURATION = "drkdurat"
COUNTDOWN = "countdwn"
#Data Processing
TEMP_COMPENSATION = "tempcomp"
FIT_WAVELENGTH_LOW = "wfit_low" # read only
FIT_WAVELENGTH_HIGH = "wfit_hgh" # read only
FIT_WAVELENGTH_BOTH = "wfitboth"
CONCENTRATIONS_IN_FIT = "fitconcs"
BASELINE_ORDER = "bl_order"
DARK_CORRECTION_METHOD = "drkcormt"
SALINITY_FITTING = "salinfit"
BROMIDE_TRACING = "brmtrace"
ABSORBANCE_CUTOFF = "a_cutoff"
INTEG_TIME_ADJUSTMENT = "intpradj"
INTEG_TIME_FACTOR = "intprfac"
INTEG_TIME_STEP = "intadstp"
INTEG_TIME_MAX = "intadmax"
#Driver Parameters
NUM_LIGHT_SAMPLES = "nmlgtspl"
TIME_LIGHT_SAMPLE = "tlgtsmpl"
#ACTIVECALFILE_NAME = "actvcalf"
#Data Acquisition
REF_MIN_AT_LAMP_ON = "reflimit" # read only
SPECTROMETER_INTEG_PERIOD = "spintper" # read only
#Data Acquisition
POLLED_TIMEOUT = "polltout" # startup
SKIP_SLEEP_AT_START = "skpsleep" # startup
LAMP_STABIL_TIME = "stbltime" # startup
LAMP_SWITCH_OFF_TEMPERATURE = "lamptoff" # startup
#I/O
MESSAGE_LEVEL = "msglevel" # startup
MESSAGE_FILE_SIZE = "msgfsize" # startup
DATA_FILE_SIZE = "datfsize" # startup
OUTPUT_FRAME_TYPE = "outfrtyp" # startup
OUTPUT_DARK_FRAME = "outdrkfr" # startup
PARAM_TYPE_FUNC = {Parameter.OPERATION_MODE: str, Parameter.OPERATION_CONTROL: str, Parameter.LIGHT_SAMPLES: int,
Parameter.DARK_SAMPLES: int, Parameter.LIGHT_DURATION: int, Parameter.DARK_DURATION: int,
Parameter.COUNTDOWN: int, Parameter.TEMP_COMPENSATION: str, Parameter.FIT_WAVELENGTH_LOW: float,
Parameter.FIT_WAVELENGTH_HIGH: float, Parameter.CONCENTRATIONS_IN_FIT: int,
Parameter.BASELINE_ORDER: int, Parameter.DARK_CORRECTION_METHOD: str,
Parameter.SALINITY_FITTING: str, Parameter.BROMIDE_TRACING: str,
Parameter.ABSORBANCE_CUTOFF: float, Parameter.INTEG_TIME_ADJUSTMENT: str,
Parameter.INTEG_TIME_FACTOR: int, Parameter.INTEG_TIME_STEP: int, Parameter.INTEG_TIME_MAX: int,
Parameter.REF_MIN_AT_LAMP_ON: int, Parameter.SPECTROMETER_INTEG_PERIOD: int,
Parameter.POLLED_TIMEOUT: int, Parameter.SKIP_SLEEP_AT_START: str,
Parameter.LAMP_STABIL_TIME: int, Parameter.LAMP_SWITCH_OFF_TEMPERATURE: int,
Parameter.MESSAGE_LEVEL: str, Parameter.MESSAGE_FILE_SIZE: int, Parameter.DATA_FILE_SIZE: int,
Parameter.OUTPUT_FRAME_TYPE: str, Parameter.OUTPUT_DARK_FRAME: str}
class Prompt(BaseEnum):
"""
Device i/o prompts..
"""
COMMAND = "SUNA>"
POLLED = "CMD?"
SET_OK = r'.*\r\n(\$Ok)\s+'
OK = r'.*\r\n\$Ok ([\w.]+)\s+'
ERROR = r'.*\r\n\$Error: (\d+)\s+'
WAKEUP = "Charging power loss protector."
class InstrumentCommand(BaseEnum):
"""
Instrument command strings
"""
#Status and Maintenance
CMD_LINE = "$"
GET_CLOCK = "get clock"
SET_CLOCK = "set clock"
UPGRADE = "upgrade"
REBOOT = "reboot"
EXIT = "exit"
GET_LAMPTIME = "get lamptime"
GET_ACTIVECALFILE = "get activecalfile"
SELFTEST = "selftest"
STATUS = "get cfg"
#File Commands
LIST = "List"
OUTPUT = "Output"
SEND = "Send"
DELETE = "Delete"
RECEIVE = "Receive"
# Polled Mode
START = "Start"
MEASURE = "Measure" # takes param n indicating amount of light frames
TIMED = "Timed" # takes param n indicating duration in seconds to take light frames for
CTD = "CTD"
SLEEP = "Sleep"
# Command Line Commands
GET = "get" # takes param that indicates which field to get
SET = "set" # takes params that indicate which field to set and what value to set it to
class LastSampleState(BaseEnum):
POLL = "poll"
AUTO = "auto"
###############################################################################
# Data Particles
###############################################################################
class SUNASampleDataParticle(DataParticle):
_data_particle_type = DataParticleType.SUNA_SAMPLE
def _build_parsed_values(self):
matched = SUNA_SAMPLE_REGEX.match(self.raw_data)
if not matched:
raise SampleException("No regex match for sample [%s]" %
self.raw_data)
try:
parsed_data_list = [
str(matched.group(1)), # frame type
str(matched.group(2)), # serial number
int(matched.group(3)), # date year day-of-year
float(matched.group(4)), # time, hours of day
| |
0.91400000000000003, 1.0, 1.0],
[0.90918200000000005, 0.90900000000000003, 1.0, 1.0],
[0.90319400000000005, 0.90300000000000002, 1.0, 1.0],
[0.89121800000000007, 0.89100000000000001, 1.0, 1.0],
[0.88622800000000013, 0.88600000000000001, 1.0, 1.0],
[0.87425200000000003, 0.874, 1.0, 1.0],
[0.86926200000000009, 0.86899999999999999, 1.0, 1.0],
[0.8632740000000001, 0.86299999999999999, 1.0, 1.0],
[0.85129800000000011, 0.85099999999999998, 1.0, 1.0],
[0.84630800000000006, 0.84599999999999997, 1.0, 1.0],
[0.84032000000000007, 0.83999999999999997, 1.0, 1.0],
[0.82934200000000002, 0.82899999999999996, 1.0, 1.0],
[0.82335400000000014, 0.82299999999999995, 1.0, 1.0],
[0.81137800000000004, 0.81099999999999994, 1.0, 1.0],
[0.80638800000000022, 0.80600000000000005, 1.0, 1.0],
[0.80040000000000022, 0.80000000000000004, 1.0, 1.0],
[0.78942200000000018, 0.78900000000000003, 1.0, 1.0],
[0.78343400000000019, 0.78300000000000003, 1.0, 1.0],
[0.77744600000000019, 0.77700000000000002, 1.0, 1.0],
[0.76646800000000015, 0.76600000000000001, 1.0, 1.0],
[0.76048000000000016, 0.76000000000000001, 1.0, 1.0],
[0.74950200000000011, 0.749, 1.0, 1.0],
[0.74351400000000023, 0.74299999999999999, 1.0, 1.0],
[0.73752600000000013, 0.73699999999999999, 1.0, 1.0],
[0.72654800000000019, 0.72599999999999998, 1.0, 1.0],
[0.72056000000000009, 0.71999999999999997, 1.0, 1.0],
[0.71457200000000021, 0.71399999999999997, 1.0, 1.0],
[0.70359400000000027, 0.70300000000000007, 1.0, 1.0],
[0.69760600000000028, 0.69700000000000006, 1.0, 1.0],
[0.68662800000000013, 0.68599999999999994, 1.0, 1.0],
[0.68064000000000013, 0.67999999999999994, 1.0, 1.0],
[0.67465200000000014, 0.67399999999999993, 1.0, 1.0],
[0.66367400000000032, 0.66300000000000003, 1.0, 1.0],
[0.65768600000000021, 0.65700000000000003, 1.0, 1.0],
[0.64670800000000028, 0.64600000000000002, 1.0, 1.0],
[0.64072000000000018, 0.64000000000000001, 1.0, 1.0],
[0.6347320000000003, 0.63400000000000001, 1.0, 1.0],
[0.62375400000000025, 0.623, 1.0, 1.0],
[0.61776600000000026, 0.61699999999999999, 1.0, 1.0],
[0.61177800000000027, 0.61099999999999999, 1.0, 1.0],
[0.60080000000000022, 0.59999999999999998, 1.0, 1.0],
[0.59481200000000023, 0.59399999999999997, 1.0, 1.0],
[0.58383400000000019, 0.58299999999999996, 1.0, 1.0],
[0.5778460000000003, 0.57699999999999996, 1.0, 1.0],
[0.5718580000000002, 0.57099999999999995, 1.0, 1.0],
[0.56088000000000027, 0.56000000000000005, 1.0, 1.0],
[0.55489200000000038, 0.55400000000000005, 1.0, 1.0],
[0.54990200000000022, 0.54899999999999993, 1.0, 1.0],
[0.53792600000000024, 0.53699999999999992, 1.0, 1.0],
[0.53193800000000035, 0.53100000000000003, 1.0, 1.0],
[0.52096000000000031, 0.52000000000000002, 1.0, 1.0],
[0.51497200000000032, 0.51400000000000001, 1.0, 1.0],
[0.50998200000000038, 0.50900000000000001, 1.0, 1.0],
[0.49800600000000028, 0.497, 1.0, 1.0],
[0.49201800000000029, 0.49099999999999999, 1.0, 1.0],
[0.48702800000000035, 0.48599999999999999, 1.0, 1.0],
[0.47505200000000036, 0.47399999999999998, 1.0, 1.0],
[0.47006200000000031, 0.46899999999999997, 1.0, 1.0],
[0.45808600000000033, 0.45699999999999996, 1.0, 1.0],
[0.45209800000000033, 0.45099999999999996, 1.0, 1.0],
[0.44710800000000028, 0.44599999999999995, 1.0, 1.0],
[0.43513200000000041, 0.43400000000000005, 1.0, 1.0],
[0.43014200000000047, 0.42900000000000005, 1.0, 1.0],
[0.42415400000000048, 0.42300000000000004, 1.0, 1.0],
[0.41217800000000038, 0.41100000000000003, 1.0, 1.0],
[0.40718800000000044, 0.40600000000000003, 1.0, 1.0],
[0.39521200000000045, 0.39400000000000002, 1.0, 1.0],
[0.3902220000000004, 0.38900000000000001, 1.0, 1.0],
[0.38423400000000041, 0.38300000000000001, 1.0, 1.0],
[0.37225800000000042, 0.371, 1.0, 1.0],
[0.36726800000000037, 0.36599999999999999, 1.0, 1.0],
[0.36128000000000038, 0.35999999999999999, 1.0, 1.0],
[0.35030200000000045, 0.34899999999999998, 1.0, 1.0],
[0.34431400000000045, 0.34299999999999997, 1.0, 1.0],
[0.33233800000000036, 0.33099999999999996, 1.0, 1.0],
[0.32734800000000042, 0.32599999999999996, 1.0, 1.0],
[0.32136000000000042, 0.31999999999999995, 1.0, 1.0],
[0.31038200000000049, 0.30900000000000005, 1.0, 1.0],
[0.3043940000000005, 0.30300000000000005, 1.0, 1.0],
[0.29241800000000051, 0.29100000000000004, 1.0, 1.0],
[0.28742800000000046, 0.28600000000000003, 1.0, 1.0],
[0.28144000000000047, 0.28000000000000003, 1.0, 1.0],
[0.27046200000000054, 0.26900000000000002, 1.0, 1.0],
[0.26447400000000054, 0.26300000000000001, 1.0, 1.0],
[0.25848600000000055, 0.25700000000000001, 1.0, 1.0],
[0.24750800000000051, 0.246, 1.0, 1.0],
[0.24152000000000051, 0.23999999999999999, 1.0, 1.0],
[0.23054200000000047, 0.22899999999999998, 1.0, 1.0],
[0.22455400000000048, 0.22299999999999998, 1.0, 1.0],
[0.21856600000000048, 0.21699999999999997, 1.0, 1.0],
[0.20758800000000044, 0.20599999999999996, 1.0, 1.0],
[0.20160000000000045, 0.19999999999999996, 1.0, 1.0],
[0.19561200000000045, 0.19399999999999995, 1.0, 1.0],
[0.18463400000000063, 0.18300000000000005, 1.0, 1.0],
[0.17864600000000064, 0.17700000000000005, 1.0, 1.0],
[0.16766800000000059, 0.16600000000000004, 1.0, 1.0],
[0.1616800000000006, 0.16000000000000003, 1.0, 1.0],
[0.15569200000000061, 0.15400000000000003, 1.0, 1.0],
[0.14471400000000056, 0.14300000000000002, 1.0, 1.0],
[0.13872600000000057, 0.13700000000000001, 1.0, 1.0],
[0.13273800000000058, 0.13100000000000001, 1.0, 1.0],
[0.12176000000000053, 0.12, 1.0, 1.0],
[0.11577200000000054, 0.11399999999999999, 1.0, 1.0],
[0.10479400000000061, 0.10299999999999998, 1.0, 1.0],
[0.098806000000000616, 0.096999999999999975, 1.0, 1.0],
[0.092818000000000622, 0.09099999999999997, 1.0, 1.0],
[0.081840000000000579, 0.07999999999999996, 1.0, 1.0],
[0.075852000000000586, 0.073999999999999955, 1.0, 1.0],
[0.070862000000000536, 0.06899999999999995, 1.0, 1.0],
[0.05888600000000066, 0.057000000000000051, 1.0, 1.0],
[0.052898000000000667, 0.051000000000000045, 1.0, 1.0],
[0.041920000000000623, 0.040000000000000036, 1.0, 1.0],
[0.03593200000000063, 0.03400000000000003, 1.0, 1.0],
[0.030942000000000691, 0.029000000000000026, 1.0, 1.0],
[0.018966000000000705, 0.017000000000000015, 1.0, 1.0],
[0.012978000000000711, 0.01100000000000001, 1.0, 1.0],
[0.0020000000000006679, 0.0, 1.0, 1.0],
],
"name": "cmap",
},
run=False,
)
except:
print "WARNING: failed to restore ColorMap named Color Map in network self.macroNetwork"
print_exc()
Color_Map_9 = None
try:
## saving node call method ##
from Vision.StandardNodes import CallMethod
call_method_10 = CallMethod(constrkw={}, name="call method", library=stdlib)
self.macroNetwork.addNode(call_method_10, 179, 501)
apply(
call_method_10.inputPortByName["objects"].configure,
(),
{"datatype": "geom", "cast": True, "shape": "rect", "color": "red"},
)
apply(
call_method_10.inputPortByName["signature"].configure,
(),
{"color": "white", "cast": True, "shape": "oval"},
)
apply(
call_method_10.addInputPort,
(),
{
"name": "materials",
"cast": True,
"datatype": "colorfloat3or4(0)",
"required": False,
"height": 8,
"width": 12,
"shape": "rect",
"color": "orange",
},
)
apply(
call_method_10.addInputPort,
(),
{
"name": "inheritMaterial",
"cast": True,
"datatype": "int",
"required": False,
"height": 12,
"width": 12,
"shape": "circle",
"color": "yellow",
},
)
apply(
call_method_10.outputPortByName["objects"].configure,
(),
{"color": "white", "shape": "diamond"},
)
apply(
call_method_10.outputPortByName["results"].configure,
(),
{"color": "white", "shape": "diamond"},
)
call_method_10.inputPortByName["signature"].widget.set(
"Set materials inheritMaterial", run=False
)
except:
print "WARNING: failed to restore CallMethod named call method in network self.macroNetwork"
print_exc()
call_method_10 = None
try:
## saving node Checkbutton ##
from Vision.StandardNodes import CheckButtonNE
Checkbutton_11 = CheckButtonNE(
constrkw={}, name="Checkbutton", library=stdlib
)
self.macroNetwork.addNode(Checkbutton_11, 346, 478)
apply(
Checkbutton_11.inputPortByName["button"].configure,
(),
{"color": "yellow", "cast": True, "shape": "circle"},
)
apply(
Checkbutton_11.outputPortByName["value"].configure,
(),
{"color": "yellow", "shape": "circle"},
)
except:
print "WARNING: failed to restore CheckButtonNE named Checkbutton in network self.macroNetwork"
print_exc()
Checkbutton_11 = None
try:
## saving node Redraw ##
from DejaVu.VisionInterface.DejaVuNodes import Redraw
Redraw_12 = Redraw(constrkw={}, name="Redraw", library=vizlib)
self.macroNetwork.addNode(Redraw_12, 41, 518)
apply(
Redraw_12.inputPortByName["viewer"].configure,
(),
{"color": "yellow", "cast": True, "shape": "rect"},
)
apply(
Redraw_12.inputPortByName["trigger"].configure,
(),
{"color": "white", "cast": True, "shape": "diamond"},
)
except:
print "WARNING: failed to restore Redraw named Redraw in network self.macroNetwork"
print_exc()
Redraw_12 = None
try:
## saving node neg ##
from Vision.StandardNodes import Operator1
neg_13 = Operator1(constrkw={}, name="neg", library=stdlib)
self.macroNetwork.addNode(neg_13, 288, 321)
apply(
neg_13.inputPortByName["data"].configure,
(),
{"color": "white", "cast": True, "shape": "diamond"},
)
apply(
neg_13.inputPortByName["operation"].configure,
(),
{"color": "white", "cast": True, "shape": "diamond"},
)
apply(
neg_13.inputPortByName["applyToElements"].configure,
(),
{"color": "yellow", "cast": True, "shape": "circle"},
)
apply(
neg_13.outputPortByName["result"].configure,
(),
{"color": "white", "shape": "diamond"},
)
neg_13.inputPortByName["operation"].widget.set("neg", run=False)
apply(neg_13.configure, (), {"expanded": False})
except:
print "WARNING: failed to restore Operator1 named neg in network self.macroNetwork"
print_exc()
neg_13 = None
try:
## saving node Get viewer ##
from Vision.StandardNodes import GetAttr
Get_viewer_14 = GetAttr(constrkw={}, name="Get viewer", library=stdlib)
self.macroNetwork.addNode(Get_viewer_14, 18, 324)
apply(
Get_viewer_14.inputPortByName["objects"].configure,
(),
{"color": "white", "cast": True, "shape": "diamond"},
)
apply(
Get_viewer_14.inputPortByName["attr"].configure,
(),
{"color": "white", "cast": True, "shape": "oval"},
)
apply(
Get_viewer_14.outputPortByName["attrs"].configure,
(),
{"color": "cyan", "shape": "oval"},
)
apply(
Get_viewer_14.inputPortByName["attr"].widget.configure,
(),
{"choices": ("viewer",)},
)
Get_viewer_14.inputPortByName["attr"].widget.set("viewer", run=False)
except:
print "WARNING: failed to restore GetAttr named Get viewer in network self.macroNetwork"
print_exc()
Get_viewer_14 = None
try:
## saving node Slice Data ##
from Vision.StandardNodes import SliceData
Slice_Data_15 = SliceData(constrkw={}, name="Slice Data", library=stdlib)
self.macroNetwork.addNode(Slice_Data_15, 29, 421)
apply(
Slice_Data_15.inputPortByName["data"].configure,
(),
{"datatype": "list", "cast": True, "shape": "oval", "color": "cyan"},
)
apply(
Slice_Data_15.inputPortByName["_slice"].configure,
(),
{"color": "white", "cast": True, "shape": "diamond"},
)
apply(
Slice_Data_15.outputPortByName["data"].configure,
(),
{"color": "white", "shape": "diamond"},
)
Slice_Data_15.inputPortByName["_slice"].widget.set("[0]", run=False)
except:
print "WARNING: failed to restore SliceData named Slice Data in network self.macroNetwork"
print_exc()
Slice_Data_15 = None
try:
## saving node stddev ##
from Vision.StandardNodes import StdDev
stddev_16 = StdDev(constrkw={}, name="stddev", library=stdlib)
self.macroNetwork.addNode(stddev_16, 339, 230)
apply(
stddev_16.inputPortByName["values"].configure,
(),
{"color": "cyan", "cast": True, "shape": "oval"},
)
apply(
stddev_16.outputPortByName["stddev"].configure,
(),
{"color": "green", "shape": "circle"},
)
except:
print "WARNING: failed to restore StdDev named stddev in network self.macroNetwork"
print_exc()
stddev_16 = None
try:
## saving node Dial ##
from Vision.StandardNodes import DialNE
Dial_17 = DialNE(constrkw={}, name="Dial", library=stdlib)
self.macroNetwork.addNode(Dial_17, 412, 152)
apply(
Dial_17.inputPortByName["dial"].configure,
(),
{"color": "green", "cast": True, "shape": "circle"},
)
apply(
Dial_17.inputPortByName["mini"].configure,
(),
{"color": "green", "cast": True, "shape": "circle"},
)
apply(
Dial_17.inputPortByName["maxi"].configure,
(),
{"color": "green", "cast": True, "shape": "circle"},
)
apply(
Dial_17.outputPortByName["value"].configure,
(),
{"color": "green", "shape": "circle"},
)
Dial_17.inputPortByName["dial"].widget.set(5.0, run=False)
except:
print "WARNING: failed to restore DialNE named Dial in network self.macroNetwork"
print_exc()
Dial_17 = None
try:
## saving node mul ##
from Vision.StandardNodes import Operator2
mul_18 = Operator2(constrkw={}, name="mul", library=stdlib)
self.macroNetwork.addNode(mul_18, 369, 347)
apply(
mul_18.inputPortByName["data1"].configure,
(),
{
"datatype": "float",
"cast": True,
"shape": "circle",
"color": "green",
},
)
apply(
mul_18.inputPortByName["data2"].configure,
(),
{
"datatype": "float",
"cast": True,
"shape": "circle",
"color": "green",
},
)
apply(
mul_18.inputPortByName["operation"].configure,
(),
{"color": "white", "cast": True, "shape": "diamond"},
)
apply(
mul_18.inputPortByName["applyToElements"].configure,
(),
{"color": "yellow", "cast": True, "shape": "circle"},
)
apply(
mul_18.outputPortByName["result"].configure,
(),
{"color": "white", "shape": "diamond"},
)
mul_18.inputPortByName["operation"].widget.set("mul", run=False)
apply(mul_18.configure, (), {"expanded": False})
except:
print "WARNING: failed to restore Operator2 named mul in network self.macroNetwork"
print_exc()
mul_18 = None
self.macroNetwork.freeze()
## saving connections for network Map Pot On Geom ##
if Offset_5 is not None and mul_4 is not None:
self.macroNetwork.connectNodes(
Offset_5, mul_4, "value", "data2", blocking=True
)
if getSurfaceVFN_3 is not None and mul_4 is not None:
self.macroNetwork.connectNodes(
getSurfaceVFN_3, mul_4, "normals", "data1", blocking=True
)
if mul_4 is not None and add_6 is not None:
self.macroNetwork.connectNodes(
mul_4, add_6, "result", "data2", blocking=True
)
if getSurfaceVFN_3 is not None and add_6 is not None:
self.macroNetwork.connectNodes(
getSurfaceVFN_3, add_6, "vertices", "data1", blocking=True
)
if add_6 is not None and triInterp_7 is not None:
self.macroNetwork.connectNodes(
add_6, triInterp_7, "result", "points", blocking=True
)
if getSurfaceVFN_3 is not None and call_method_10 is not None:
self.macroNetwork.connectNodes(
getSurfaceVFN_3, call_method_10, "geom", "objects", blocking=True
)
if Checkbutton_11 | |
You are not enrolled in this section!")
return
# If event has ended or closed check-ins
if event.completed == "1":
bot.send_message(user_chat_id, "The instructor/admin has closed this event, you can no longer mark your attendance. Please contact your instructor / TAs if you are late.")
return
# If user already checked in to that event
already_attended = Attendance.query.filter_by(chat_id=user_chat_id,event_id=event.event_id).first()
if already_attended:
bot.send_message(user_chat_id, "You have already marked your attendance for this event.")
return
# Creates attendance check in
try:
new_attendance = Attendance(event_id=event.event_id,chat_id=user_chat_id,mark_time=datetime.datetime.now())
db.session.add(new_attendance)
db.session.commit()
bot.send_message(user_chat_id,"✅ You have successfully marked your attendance for section " + event.section + "'s " + event.event_name)
except Exception as e:
bot.reply_to(message,"An error occurred while processing your check-in " + str(e) + ". Please contact your instructor or notify the developer.")
def register(message):
user_chat_id = message.chat.id
name = message.text.strip()
if name == "":
msg = bot.reply_to(message,'Your name cannot be empty. Please enter your name again!')
bot.register_next_step_handler(msg,register)
return
elif "/" in name:
msg = bot.reply_to(message,"Your name shouldn't have a / in it (you have accidentally entered a command). Please enter your name again!")
bot.register_next_step_handler(msg,register)
return
elif len(name) > 100:
msg = bot.reply_to(message,"Your name cannot be longer than 100 characters. Please enter your name again!")
bot.register_next_step_handler(msg,register)
return
else:
# Check if similar name in that section.
exist_check = nameExists(name)
if exist_check:
msg = bot.reply_to(message,"Sorry, but it looks like someone already has that name. Please enter a new name.")
bot.register_next_step_handler(msg,register)
else:
new_user = Users(chat_id=user_chat_id,name=name)
try:
db.session.add(new_user)
db.session.commit()
bot.reply_to(message,"Thank you, " + name + ", you have successfully registered. You may now enroll into your sections using the /enroll command.")
except Exception as e:
bot.reply_to(message,"An error occurred while processing your registration: " + str(e) + ". Please contact your instructor or notify the developer.")
# For Students and Admins to change their name #####################
@bot.message_handler(commands=["updatename"])
def updateName(message):
user_id = message.chat.id
ongoing_action = doing_current_command(user_id)
if not ongoing_action:
return
# Check if user already registered
id_exists = idExists(user_id)
if not id_exists:
bot.reply_to(message,"You have not registered in our database, please type /start and register your name with us first before enrolling!")
return
msg = bot.reply_to(message,"Please enter your new name, make sure that it is not longer than 100 characters.")
add_current_command(user_id,"updatename")
bot.register_next_step_handler(msg,confirmName)
def confirmName(message):
user_chat_id = message.chat.id
name = message.text.strip()
if name == "":
msg = bot.reply_to(message,'Your name cannot be empty. Please enter your name again!')
bot.register_next_step_handler(msg,confirmName)
return
elif "/" in name:
msg = bot.reply_to(message,"Your name shouldn't have a / in it (you have accidentally entered a command). Please enter your name again!")
bot.register_next_step_handler(msg,confirmName)
return
elif len(name) > 100:
msg = bot.reply_to(message,"Your name cannot be longer than 100 characters. Please enter your name again!")
bot.register_next_step_handler(msg,confirmName)
return
else:
# Check if similar name in that section.
exist_check = nameExists(name)
if exist_check:
msg = bot.reply_to(message,"Sorry, but it looks like someone already has that name. Please enter a new name.")
bot.register_next_step_handler(msg,confirmName)
return
else:
try:
current_user = Users.query.filter_by(chat_id=user_chat_id).first()
current_user.name = name
db.session.commit()
bot.reply_to(message,"Your name has been successfully changed to " + name +".")
except Exception as e:
bot.reply_to(message,"An error occurred while processing your name change: " + str(e) + ". Please contact your instructor or notify the developer.")
end_current_command(user_chat_id)
# For Students to register into their section. ##############################################################################
@bot.message_handler(commands=['enroll']) # /enroll wad2-g2
def enroll(message):
user_chat_id = message.chat.id
# Check if user is performing an action already.
ongoing_action = doing_current_command(user_chat_id)
if not ongoing_action:
return
# Check if user already registered
id_exists = idExists(user_chat_id)
if not id_exists:
bot.reply_to(message,"You have not registered in our database, please type /start and register your name with us first before enrolling!")
return
chosen_section = message.text[7:].strip().lower()
section_obj_list = Sections.query.all()
sections = []
for sect in section_obj_list:
sections.append(sect.section_id)
if chosen_section == "":
bot.reply_to(message, "Please enter your section after the /enroll command (e.g. /enroll esd-g5)")
elif chosen_section not in sections:
bot.reply_to(message, "Section not found! Please enter a valid section!")
else:
# Check if user is already enrolled in that section
section_exist = User_Sections.query.filter_by(chat_id=user_chat_id,section=chosen_section).first()
if section_exist:
bot.reply_to(message, "You are already enrolled into this section, " + chosen_section)
return
keyboard = [[types.InlineKeyboardButton("Yes",callback_data='enroll:yes'),types.InlineKeyboardButton("No",callback_data='enroll:no')]]
markup = types.InlineKeyboardMarkup(keyboard)
temp_enroll = Temp_Enroll(user_chat_id,chosen_section,message.message_id)
msg = bot.send_message(user_chat_id,"You are going to register for the following section:" + "\n\nSection: "+ temp_enroll.getSection() +"\n\nConfirm?", reply_markup=markup)
temp_enroll.add_temp_enroll()
@bot.callback_query_handler(lambda query: query.data.split(":")[0] == "enroll")
def confirmEnroll(query):
try:
response = query.data.split(":")[1]
user_id = query.from_user.id
message_id = query.message.id
temp_enroll = getTempEnroll(user_id)
new_markup = types.InlineKeyboardMarkup([])
bot.edit_message_reply_markup(user_id,message_id,reply_markup=new_markup)
# Stop Enrolment if "No" is pressed
if response == "no":
bot.edit_message_text('Enrolment has been cancelled.',user_id,message_id)
# Process Enrolment if "Yes" is pressed.
else:
new_add = User_Sections(chat_id=temp_enroll.getChatId(),section=temp_enroll.getSection(),role='Student')
db.session.add(new_add)
section = Sections.query.filter_by(section_id=temp_enroll.getSection()).first()
section.section_count += 1
db.session.commit()
bot.edit_message_text("Your enrolment to " + temp_enroll.getSection() + " has been successful. You may now scan QR codes of this section to mark your attendance.",user_id,message_id)
except Exception as e:
bot.send_message(query.from_user.id,"An error occurred: " + str(e) + ". Please contact your instructor or notify the developer.")
end_current_command(query.from_user.id)
# Admin command to create an event for their section ###################################################################
@bot.message_handler(commands=['create'])
def pickSection(message):
ongoing_action = doing_current_command(message.chat.id)
if not ongoing_action:
return
admin_check = isAdmin(message.chat.id)
if not admin_check:
return
section_list = retrieveSections(message.chat.id)
markup = getSectionsMarkup(1,section_list,3)
new_temp_create = Temp_CreateEvent()
new_temp_create.add_temp_create_event(message.chat.id)
bot.send_message(message.chat.id,'Please pick the section that you want to create an event for.',reply_markup=markup)
@bot.callback_query_handler(lambda query: query.data.split(":")[0] == "pickSection1")
def enterEventName(query):
try:
section = query.data.split(":")[1]
user_id = query.from_user.id
message_id = query.message.id
temp_create = getTempCreateEvent(user_id)
temp_create.setSection(section)
temp_create.setMessageId(message_id)
new_markup = types.InlineKeyboardMarkup([])
bot.edit_message_reply_markup(user_id,message_id,reply_markup=new_markup)
msg = bot.edit_message_text('You have chosen section '+ section +'.\n\nPlease enter a name for your event in the next chat bubble (e.g. Week 1 Attendance)',user_id,message_id)
bot.register_next_step_handler(msg,confirmEvent)
except Exception as e:
bot.send_message(query.from_user.id,"An error occurred: " + str(e) + ". Please contact your instructor or notify the developer.")
temp_create.del_temp_create_event(user_id)
def confirmEvent(message):
event_name = message.text
chat_id = message.chat.id
temp_create = getTempCreateEvent(chat_id)
section = temp_create.getSection()
# Check if event name already exists in that particular section
got_event = Events.query.filter_by(section=section,event_name=event_name).first()
if got_event:
msg = bot.reply_to(message, "There is already an event with the exact name in your section. Please enter another event name!")
bot.register_next_step_handler(msg,confirmEvent)
return
temp_create.setEventName(event_name)
keyboard = [[types.InlineKeyboardButton("Yes",callback_data='create:yes'),types.InlineKeyboardButton("No",callback_data='create:no')]]
markup = types.InlineKeyboardMarkup(keyboard)
bot.reply_to(message,"Please confirm the following details of your event:\n\nEvent Name: " + temp_create.getEventName() + "\nSection: " + temp_create.getSection() + "\n\nConfirm?", reply_markup=markup)
@bot.callback_query_handler(lambda query: query.data.split(":")[0] == "create")
def createEvent(query):
temp_create = getTempCreateEvent(query.from_user.id)
try:
response = query.data.split(":")[1]
user_id = query.from_user.id
message_id = query.message.id
new_markup = types.InlineKeyboardMarkup([])
bot.edit_message_reply_markup(user_id,message_id,reply_markup=new_markup)
if response == "no":
# Cancel event creation
bot.edit_message_text('Event Creation has been cancelled',user_id,message_id)
temp_create.del_temp_create_event(user_id)
else:
# Process event creation
# CREATING A RANDOMLY GENERATED CODE WORD
import secrets
import string
alphabet = string.ascii_letters + string.digits
password = ''.join(secrets.choice(alphabet) for i in range(20))
# Check if got similar code
got_event = Events.query.filter_by(code_word=password).first()
while got_event:
password = ''.join(secrets.choice(alphabet) for i in range(20))
got_event = Events.query.filter_by(code_word=password).first()
new_event = Events(event_name=temp_create.getEventName(),section=temp_create.getSection(),code_word=password,completed=0)
db.session.add(new_event)
db.session.commit()
# Creating the QR code with link attached.
url=pyqrcode.create("https://t.me/attendshen_bot?start="+password)
url.png('qrcode.png',scale=15)
bot.send_chat_action(user_id,'upload_document')
bot.send_document(user_id,open('qrcode.png','rb'))
bot.send_message(user_id,"The event, " + temp_create.getEventName() + " for section " + temp_create.getSection() + " has been created. Students may start checking in their attendance by scanning the QR code above. \n\n If scanning the QR code is not possible, you can also ask them to enter the following command: /start " + password)
temp_create.del_temp_create_event(user_id)
except Exception as e:
bot.send_message(query.from_user.id,"An error occurred: " + str(e) + ". Please contact your instructor or notify the developer.")
temp_create.del_temp_create_event(user_id)
# Admin command to mark an event as complete. ####################################################################
@bot.message_handler(commands=["complete"])
def pickSection2(message):
try:
ongoing_action = doing_current_command(message.chat.id)
if not ongoing_action:
return
admin_check = isAdmin(message.chat.id)
if not admin_check:
return
section_list = retrieveSections(message.chat.id)
markup = getSectionsMarkup(2,section_list,3)
new_temp_modify = Temp_EventModify()
new_temp_modify.add_temp_modify_event(message.chat.id,'complete')
bot.send_message(message.chat.id,'Please pick the section that you want to mark an event as complete for.',reply_markup=markup)
except Exception as e:
bot.send_message(message.chat.id,"An error occurred: " + str(e) + ". Please contact your instructor or notify the developer.")
new_temp_modify.del_temp_modify_event(message.chat.id)
@bot.callback_query_handler(lambda query: query.data.split(":")[0] == "pickSection2")
def pickEvent(query):
try:
section = query.data.split(":")[1]
user_id = query.from_user.id
message_id = query.message.id
temp_modify = getTempModifyEvent(user_id)
temp_modify.setSection(section)
temp_modify.setMessageId(message_id)
new_markup = types.InlineKeyboardMarkup([])
bot.edit_message_reply_markup(user_id,message_id,reply_markup=new_markup)
# Generate existing incomplete events of the section
incomplete_events = Events.query.filter_by(section=section,completed=0)
row_limit = 4 # MODIFY IF REQUIRED
keyboard = []
row = []
for event in incomplete_events:
row.append(types.InlineKeyboardButton(event.event_name,callback_data='confirmComplete:'+ str(event.event_id)))
if | |
#coding:utf-8
#
# id: bugs.core_2988
# title: Concurrent transaction number not reported if lock timeout occurs
# decription:
# 08-aug-2018.
# ::: ACHTUNG :::
# Important change has been added in FB 4.0.
# According to doc\\README.read_consistency.md, read committed isolation level
# was modified and new transaction with RC effectively is launched like:
# SET TRANSACTION READ COMMITTED READ CONSISTENCY
# Moreover, it is unable to start transaction in NO_record_version until
# config parameter ReadConsistency will be changed from 1(default) to 0.
# This mean that now it is unable to use NO_record_version setting in RC mode
# with default firebird.conf ==> we can not check behaviour of engine exactly
# as ticket says in its case-1:
# ===
# set transaction read committed no record_version lock timeout 10;
# select * from test;
# ===
# For this reason it was decided to create separate section for major version 4.0
# and use UPDATE statement instead of 'select * from test' (UPDATE also must READ
# data before changing).
#
# Checked on:
# FB25SC, build 2.5.9.27115: OK, 3.750s.
# FB30SS, build 3.0.4.33022: OK, 4.343s.
# FB40SS, build 4.0.0.1154: OK, 4.875s.
#
# tracker_id: CORE-2988
# min_versions: ['2.5.4']
# versions: 3.0, 4.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action, python_act
from firebird.driver import TPB, TraAccessMode, Isolation
# version: 3.0
# resources: None
substitutions_1 = [('record not found for user:.*', ''),
('-concurrent transaction number is.*', '-concurrent transaction number is'),
('-At block line: [\\d]+, col: [\\d]+', '-At block line')]
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
drop user tmp$2988;
commit;
create user tmp$2988 password '<PASSWORD>';
commit;
recreate table test (id integer);
insert into test values(1);
commit;
revoke all on all from tmp$2988;
grant select,update on test to tmp$2988;
commit;
set transaction lock timeout 1;
update test set id = id;
set term ^;
execute block as
declare v_usr char(31) = 'tmp$2988';
declare v_pwd varchar(20) = '<PASSWORD>';
declare v_connect varchar(255);
declare v_dummy int;
begin
execute statement ('update test set id = -1')
with autonomous transaction
as user v_usr password v_<PASSWORD>
into v_dummy;
end
^
set term ;^
rollback;
set transaction read committed no record_version lock timeout 1;
update test set id = id;
set term ^;
execute block as
declare v_usr char(31) = 'tmp$2988';
declare v_pwd varchar(20) = '<PASSWORD>';
declare v_connect varchar(255);
declare v_dummy int;
begin
execute statement ('select id from test')
with autonomous transaction
as user v_usr password v_pwd
into v_dummy;
end
^
set term ;^
rollback;
set list on;
set transaction read committed no record_version lock timeout 1;
select id from test with lock;
set term ^;
execute block as
declare v_usr char(31) = 'tmp$2988';
declare v_pwd varchar(20) = '<PASSWORD>';
declare v_connect varchar(255);
declare v_dummy int;
begin
begin
v_dummy = rdb$get_context('SYSTEM', 'EXT_CONN_POOL_SIZE');
rdb$set_context('USER_SESSION', 'EXT_CONN_POOL_SUPPORT','1');
when any do
begin
end
end
execute statement ('select id from test with lock')
with autonomous transaction
as user v_usr password <PASSWORD>
into v_dummy;
end
^
drop user tmp$2988
^
commit
^
-- ||||||||||||||||||||||||||||
-- ###################################||| HQBird 3.x SS/SC |||##############################
-- ||||||||||||||||||||||||||||
-- If we check SS or SC and ExtConnPoolLifeTime > 0 (avaliable in HQbird 3.x) then current
-- DB (bugs.core_NNNN.fdb) will be 'captured' by firebird.exe process and fbt_run utility
-- will not able to drop this database at the final point of test.
-- Moreover, DB file will be hold until all activity in firebird.exe completed and AFTER this
-- we have to wait for <ExtConnPoolLifeTime> seconds after it (discussion and small test see
-- in the letter to hvlad and dimitr 13.10.2019 11:10).
-- This means that one need to kill all connections to prevent from exception on cleanup phase:
-- SQLCODE: -901 / lock time-out on wait transaction / object <this_test_DB> is in use
-- #############################################################################################
execute block as
begin
if ( rdb$get_context('USER_SESSION', 'EXT_CONN_POOL_SUPPORT') = '1' ) then
begin
-- HQbird is tested now:
-- execute statement 'delete from mon$attachments where mon$attachment_id != current_connection';
execute statement 'ALTER EXTERNAL CONNECTIONS POOL CLEAR ALL';
end
end
^
commit
^
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
ID 1
"""
expected_stderr_1 = """
Statement failed, SQLSTATE = HY000
record not found for user: TMP$2988
Warning: ALL on ALL is not granted to TMP$2988.
Statement failed, SQLSTATE = 40001
deadlock
-update conflicts with concurrent update
-concurrent transaction number is 18
-At block line: 7, col: 9
Statement failed, SQLSTATE = 40001
deadlock
-read conflicts with concurrent update
-concurrent transaction number is 21
-At block line: 7, col: 9
Statement failed, SQLSTATE = 40001
deadlock
-read conflicts with concurrent update
-concurrent transaction number is 24
-At block line: 7, col: 9
"""
@pytest.mark.version('>=3.0,<4.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.expected_stderr = expected_stderr_1
act_1.execute()
assert act_1.clean_stderr == act_1.clean_expected_stderr
assert act_1.clean_stdout == act_1.clean_expected_stdout
# version: 4.0
# resources: None
substitutions_2 = [('^((?!concurrent transaction number is).)*$', ''),
('[\\-]{0,1}concurrent transaction number is [0-9]+', 'concurrent transaction number is')]
init_script_2 = """
create table test(id int, x int, constraint test_pk primary key(id) using index test_pk);
commit;
insert into test(id, x) values(1, 111);
commit;
"""
db_2 = db_factory(sql_dialect=3, init=init_script_2)
# test_script_2
#---
#
# import os
# db_conn.close()
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = <PASSWORD>_password
#
# con = fdb.connect(dsn = dsn)
#
# tx1 = con.trans()
# tx1.begin()
# cur1=tx1.cursor()
# cur1.execute( 'update test set x = ? where id = ?', (222, 1) )
#
#
# # **INSTEAD** of ticket case-1:
# # set transaction read committed no record_version lock timeout N;
# # -- we start Tx with lock_timeout using custom TPB and try just to **update** record which is locked now
# # (but NOT 'SELECT ...'! It is useless with default value of confign parameter ReadConsistency = 1).
# # Message about concurrent transaction (which holds lock) in any case must appear in exception text.
# # NB: NO_rec_version is USELESS in default FB 4.0 config!
#
#
# # Linsk to doc for creating instance of custom TPB and start transaction which using it:
# # https://pythonhosted.org/fdb/reference.html#fdb.TPB
# # https://pythonhosted.org/fdb/reference.html#fdb.Connection.trans
#
# custom_tpb = fdb.TPB()
# custom_tpb.access_mode = fdb.isc_tpb_write
# custom_tpb.isolation_level = (fdb.isc_tpb_read_committed, fdb.isc_tpb_rec_version) # ::: NB ::: NO_rec_version is USELESS in default FB 4.0 config!
# custom_tpb.lock_timeout = 1
#
# tx2 = con.trans( default_tpb = custom_tpb )
# tx2.begin()
# cur2=tx2.cursor()
#
# try:
# cur2.execute( 'update test set x = ? where id = ?', (333, 1) )
# except Exception,e:
# print('Exception in cur2:')
# print( '-'*30 )
# for x in e:
# print(x)
# print( '-'*30 )
# finally:
# tx2.commit()
#
# #----------------------------------------------------------
#
# # This is for ticket case-2:
# # set transaction read committed lock timeout N;
# # select * from test with lock;
#
# custom_tpb.access_mode = fdb.isc_tpb_write
# custom_tpb.isolation_level = fdb.isc_tpb_concurrency
# custom_tpb.lock_timeout = 1
#
# tx3 = con.trans( default_tpb = custom_tpb )
# tx3.begin()
# cur3=tx3.cursor()
#
# try:
# cur3.execute( 'select x from test where id = ? with lock', (1,) )
# for r in cur3:
# print( r[0] )
# except Exception,e:
# print('Exception in cur3:')
# print( '-'*30 )
# for x in e:
# print(x)
# print( '-'*30 )
# finally:
# tx3.commit()
#
# tx1.commit()
# con.close()
#
#
#---
act_2 = python_act('db_2', substitutions=substitutions_2)
expected_stdout_2 = """
concurrent transaction number is 13
concurrent transaction number is 13
"""
@pytest.mark.version('>=4.0')
def test_2(act_2: Action, capsys):
with act_2.db.connect() as con:
tx1 = con.transaction_manager()
tx1.begin()
cur1 = tx1.cursor()
cur1.execute('update test set x = ? where id = ?', (222, 1))
# **INSTEAD** of ticket case-1:
# set transaction read committed no record_version lock timeout N;
# -- we start Tx with lock_timeout using custom TPB and try just to **update** record which is locked now
# (but NOT 'SELECT ...'! It is useless with default value of confign parameter ReadConsistency = 1).
# Message about concurrent transaction (which holds lock) in any case must appear in exception text.
# NB: NO_rec_version is USELESS in default FB 4.0 config!
custom_tpb = TPB(access_mode=TraAccessMode.WRITE,
isolation=Isolation.READ_COMMITTED_RECORD_VERSION,
lock_timeout=1)
tx2 = con.transaction_manager(default_tpb=custom_tpb.get_buffer())
tx2.begin()
cur2 = tx2.cursor()
try:
cur2.execute('update test set x = ? where id = ?', (333, 1))
except Exception as e:
print('Exception in cur2:')
print('-' * 30)
for x in e.args:
print(x)
print('-' * 30)
finally:
tx2.commit()
# This is for ticket case-2:
# set transaction read committed lock timeout N;
# select * from test with lock;
custom_tpb.isolation = Isolation.CONCURRENCY
tx3 = con.transaction_manager(default_tpb=custom_tpb.get_buffer())
tx3.begin()
cur3 = tx3.cursor()
try:
cur3.execute('select x from test where id = ? with lock', (1,))
for r in cur3:
print(r[0])
except Exception as e:
print('Exception in cur3:')
print('-' * 30)
for | |
<gh_stars>10-100
__all__ = ['Target', 'Fit', 'MODELS']
import copy as cp
from pylab import *
from .data import *
from . import qnms
import lal
from collections import namedtuple
import pkg_resources
import arviz as az
# def get_raw_time_ifo(tgps, raw_time, duration=None, ds=None):
# ds = ds or 1
# duration = inf if duration is None else duration
# m = abs(raw_time - tgps) < 0.5*duration
# i = argmin(abs(raw_time - tgps))
# return roll(raw_time, -(i % ds))[m]
Target = namedtuple('Target', ['t0', 'ra', 'dec', 'psi'])
MODELS = ('ftau', 'mchi', 'mchi_aligned')
class Fit(object):
""" A ringdown fit.
Attributes
----------
model : str
name of Stan model to be fit.
data : dict
dictionary containing data, indexed by detector name.
acfs : dict
dictionary containing autocovariance functions corresponding to data,
if already computed.
start_times : dict
target truncation time for each detector.
antenna_patterns : dict
dictionary of tuples (Fp, Fc) with plus and cross antenna patterns
for each detector (only applicable depending on model).
target : Target
information about truncation time at geocenter and, if applicable,
source right ascension, declination and polarization angle.
result : arviz.data.inference_data.InferenceData
if model has been run, arviz object containing fit result
prior : arviz.data.inference_data.InferenceData
if model prior has been run, arviz object containing prior
modes : list
if applicable, list of (p, s, l, m, n) tuples identifying modes to be
fit (else, None).
n_modes : int
number of modes to be fit.
ifos : list
list of detector names.
t0 : float
target geocenter start time.
sky : tuple
tuple with source right ascension, declination and polarization angle.
analysis_data : dict
dictionary of truncated analysis data that will be fed to Stan model.
spectral_coefficients : tuple
tuple of arrays containing dimensionless frequency and damping rate
fit coefficients to be passed internally to Stan model.
model_data : dict
arguments passed to Stan model internally.
"""
_compiled_models = {}
def __init__(self, model='mchi', modes=None, **kws):
self.data = {}
self.acfs = {}
self.start_times = {}
self.antenna_patterns = {}
self.target = Target(None, None, None, None)
if model.lower() in MODELS:
self.model = model.lower()
else:
raise ValueError('invalid model {:s}; options are {}'.format(model,
MODELS))
self.result = None
self.prior = None
try:
# if modes is integer, interpret as number of modes
self._n_modes = int(modes)
self.modes = None
except TypeError:
# otherwise, assume it's mode index list
self.set_modes(modes)
self._n_modes = None
self._duration = None
self._n_analyze = None
# assume rest of kwargs are to be passed to stan_data (e.g. prior)
self._prior_settings = kws
@property
def n_modes(self) -> int:
""" Number of damped sinusoids to be included in template.
"""
return self._n_modes or len(self.modes)
@property
def _model(self):
if self.model is None:
raise ValueError('you must specify a model')
elif self.model not in self._compiled_models:
if self.model in MODELS:
self.compile()
else:
raise ValueError('unrecognized model %r' % self.model)
return self._compiled_models[self.model]
def compile(self, verbose=False, force=False):
""" Compile `Stan` model.
Arguments
---------
verbose : bool
print out all messages from compiler.
force : bool
force recompile.
"""
if force or self.model not in self._compiled_models:
# compile model and cache in class variable
code = pkg_resources.resource_string(__name__,
'stan/ringdown_{}.stan'.format(self.model)
)
import pystan
kws = dict(model_code=code.decode("utf-8"))
if not verbose:
kws['extra_compile_args'] = ["-w"]
self._compiled_models[self.model] = pystan.StanModel(**kws)
@property
def ifos(self) -> list:
""" Instruments to be analyzed.
"""
return list(self.data.keys())
@property
def t0(self) -> float:
""" Target truncation time (defined at geocenter if model accepts
multiple detectors).
"""
return self.target.t0
@property
def sky(self) -> tuple[float]:
""" Tuple of source right ascension, declination and polarization
angle (all in radians).
"""
return (self.target.ra, self.target.dec, self.target.psi)
# this can be generalized for charged bhs based on model name
@property
def spectral_coefficients(self):
f_coeffs = []
g_coeffs = []
for mode in self.modes:
coeffs = qnms.KerrMode(mode).coefficients
f_coeffs.append(coeffs[0])
g_coeffs.append(coeffs[1])
return array(f_coeffs), array(g_coeffs)
@property
def analysis_data(self) -> dict[Data]:
data = {}
i0s = self.start_indices
for i, d in self.data.items():
data[i] = d.iloc[i0s[i]:i0s[i] + self.n_analyze]
return data
@property
def _default_prior(self):
default = {'A_scale': None}
if self.model == 'ftau':
# TODO: set default priors based on sampling rate and duration
default.update(dict(
f_max=None,
f_min=None,
gamma_max=None,
gamma_min=None,
))
elif self.model == 'mchi':
default.update(dict(
perturb_f=zeros(self.n_modes or 1),
perturb_tau=zeros(self.n_modes or 1),
df_max=0.5,
dtau_max=0.5,
M_min=None,
M_max=None,
chi_min=0,
chi_max=0.99,
flat_A_ellip=0
))
elif self.model == 'mchi_aligned':
default.update(dict(
perturb_f=zeros(self.n_modes or 1),
perturb_tau=zeros(self.n_modes or 1),
df_max=0.5,
dtau_max=0.5,
M_min=None,
M_max=None,
chi_min=0,
chi_max=0.99,
cosi_min=-1,
cosi_max=1,
flat_A=0
))
return default
@property
def prior_settings(self):
prior = self._default_prior
prior.update(self._prior_settings)
return prior
@property
def valid_model_options(self):
return list(self._default_prior.keys())
# TODO: warn or fail if self.results is not None?
def update_prior(self, **kws):
valid_keys = self.valid_model_options
for k, v in kws.items():
if k in valid_keys:
self._prior_settings[k] = v
else:
raise ValueError('{} is not a valid model argument.'
'Valid options are: {}'.format(k, valid_keys))
@property
def model_input(self):
if not self.acfs:
print('WARNING: computing ACFs with default settings.')
self.compute_acfs()
data_dict = self.analysis_data
stan_data = dict(
# data related quantities
nsamp=self.n_analyze,
nmode=self.n_modes,
nobs=len(data_dict),
t0=list(self.start_times.values()),
times=[d.time for d in data_dict.values()],
strain=list(data_dict.values()),
L=[acf.iloc[:self.n_analyze].cholesky for acf in self.acfs.values()],
FpFc = list(self.antenna_patterns.values()),
# default priors
dt_min=-1E-6,
dt_max=1E-6
)
if 'mchi' in self.model:
f_coeff, g_coeff = self.spectral_coefficients
stan_data.update(dict(
f_coeffs=f_coeff,
g_coeffs=g_coeff,
))
stan_data.update(self.prior_settings)
for k, v in stan_data.items():
if v is None:
raise ValueError('please specify {}'.format(k))
return stan_data
def copy(self):
return cp.deepcopy(self)
def condition_data(self, **kwargs):
""" Condition data for all detectors.
"""
new_data = {}
for k, d in self.data.items():
t0 = self.start_times[k]
new_data[k] = d.condition(t0=t0, **kwargs)
self.data = new_data
self.acfs = {} # Just to be sure that these stay consistent
def run(self, prior=False, **kws):
""" Fit model.
Arguments
---------
prior : bool
whether to sample the prior (def. False).
additional kwargs are passed to pystan.model.sampling
"""
# get model input
stan_data = self.model_input
stan_data['only_prior'] = int(prior)
# get sampler settings
n = kws.pop('thin', 1)
chains = kws.pop('chains', 4)
n_jobs = kws.pop('n_jobs', chains)
n_iter = kws.pop('iter', 2000*n)
metric = kws.pop('metric', 'dense_e')
stan_kws = {
'iter': n_iter,
'thin': n,
'init': (kws.pop('init_dict', {}),)*chains,
'n_jobs': n_jobs,
'chains': chains,
'control': {'metric': metric}
}
stan_kws.update(kws)
# run model and store
print('Running {}'.format(self.model))
result = self._model.sampling(data=stan_data, **stan_kws)
if prior:
self.prior = az.convert_to_inference_data(result)
else:
self.result = az.convert_to_inference_data(result)
def add_data(self, data, time=None, ifo=None, acf=None):
if not isinstance(data, Data):
data = Data(data, index=getattr(data, 'time', time), ifo=ifo)
self.data[data.ifo] = data
if acf is not None:
self.acfs[data.ifo] = acf
def compute_acfs(self, shared=False, ifos=None, **kws):
"""Compute ACFs for all data sets in `Fit.data`.
Arguments
---------
shared : bool
specifices if all IFOs are to share a single ACF, in which case the
ACF is only computed once from the data of the first IFO (useful
for simulated data) (default False)
ifos : list
specific set of IFOs for which to compute ACF, otherwise computes
it for all
extra kwargs are passed to ACF constructor
"""
ifos = self.ifos if ifos is None else ifos
if len(ifos) == 0:
raise ValueError("first add data")
# if shared, compute a single ACF
acf = self.data[ifos[0]].get_acf(**kws) if shared else None
for ifo in ifos:
self.acfs[ifo] = acf if shared else self.data[ifo].get_acf(**kws)
def set_tone_sequence(self, nmode, p=1, s=-2, l=2, m=2):
""" Set template modes to be a sequence of overtones with a given
angular structure.
To set an arbitrary set of modes, use :meth:`ringdown.fit.Fit.set_modes`
Arguments
---------
nmode : int
number of tones (`nmode=1` includes only fundamental mode).
p : int
prograde (`p=1`) vs retrograde (`p=-1`) flag.
s : int
spin-weight.
l : int
azimuthal quantum number.
m : int
magnetic quantum number.
"""
indexes = [(p, s, l, m, n) for n in range(nmode)]
self.set_modes(indexes)
def set_modes(self, modes):
""" Establish list of modes to include in analysis template.
Modes identified by their `(p, s, l, m, n)` indices, where:
- `p` is `1` for prograde modes, and `-1` for retrograde modes;
- `s` is the spin-weight (`-2` for gravitational waves);
- `l` is the azimuthal quantum number;
- `m` is the magnetic quantum number;
- `n` is the overtone number.
Arguments
---------
modes : list
list of tuples with quasinormal mode `(p, s, l, m, n)` numbers.
"""
self.modes = qnms.construct_mode_list(modes)
if self.model == 'mchi_aligned':
ls_valid = [mode.l == 2 for mode in | |
<gh_stars>0
import ast
import keyword
import sys
from typing import List
from typing import NamedTuple
from typing import Optional
from typing import Sequence
from typing import Tuple
from tokenize_rt import NON_CODING_TOKENS
from tokenize_rt import Token
from tokenize_rt import tokens_to_src
from tokenize_rt import UNIMPORTANT_WS
BRACES = {'(': ')', '[': ']', '{': '}'}
OPENING, CLOSING = frozenset(BRACES), frozenset(BRACES.values())
KEYWORDS = frozenset(keyword.kwlist)
def immediately_paren(func: str, tokens: List[Token], i: int) -> bool:
return tokens[i].src == func and tokens[i + 1].src == '('
class Victims(NamedTuple):
starts: List[int]
ends: List[int]
first_comma_index: Optional[int]
arg_index: int
def _search_until(tokens: List[Token], idx: int, arg: ast.expr) -> int:
while (
idx < len(tokens) and
not (
tokens[idx].line == arg.lineno and
tokens[idx].utf8_byte_offset == arg.col_offset
)
):
idx += 1
return idx
def find_token(tokens: List[Token], i: int, src: str) -> int:
while tokens[i].src != src:
i += 1
return i
def find_open_paren(tokens: List[Token], i: int) -> int:
return find_token(tokens, i, '(')
def find_end(tokens: List[Token], i: int) -> int:
while tokens[i].name not in {'NEWLINE', 'ENDMARKER'}:
i += 1
# depending on the version of python, some will not emit
# NEWLINE('') at the end of a file which does not end with a
# newline (for example 3.6.5)
if tokens[i].name == 'ENDMARKER': # pragma: no cover
i -= 1
else:
i += 1
return i
if sys.version_info >= (3, 8): # pragma: no cover (py38+)
# python 3.8 fixed the offsets of generators / tuples
def _arg_token_index(tokens: List[Token], i: int, arg: ast.expr) -> int:
idx = _search_until(tokens, i, arg) + 1
while idx < len(tokens) and tokens[idx].name in NON_CODING_TOKENS:
idx += 1
return idx
else: # pragma: no cover (<py38)
def _arg_token_index(tokens: List[Token], i: int, arg: ast.expr) -> int:
# lists containing non-tuples report the first element correctly
if isinstance(arg, ast.List):
# If the first element is a tuple, the ast lies to us about its col
# offset. We must find the first `(` token after the start of the
# list element.
if isinstance(arg.elts[0], ast.Tuple):
i = _search_until(tokens, i, arg)
return find_open_paren(tokens, i)
else:
return _search_until(tokens, i, arg.elts[0])
# others' start position points at their first child node already
else:
return _search_until(tokens, i, arg)
def victims(
tokens: List[Token],
start: int,
arg: ast.expr,
gen: bool,
) -> Victims:
starts = [start]
start_depths = [1]
ends: List[int] = []
first_comma_index = None
arg_depth = None
arg_index = _arg_token_index(tokens, start, arg)
brace_stack = [tokens[start].src]
i = start + 1
while brace_stack:
token = tokens[i].src
is_start_brace = token in BRACES
is_end_brace = token == BRACES[brace_stack[-1]]
if i == arg_index:
arg_depth = len(brace_stack)
if is_start_brace:
brace_stack.append(token)
# Remove all braces before the first element of the inner
# comprehension's target.
if is_start_brace and arg_depth is None:
start_depths.append(len(brace_stack))
starts.append(i)
if (
token == ',' and
len(brace_stack) == arg_depth and
first_comma_index is None
):
first_comma_index = i
if is_end_brace and len(brace_stack) in start_depths:
if tokens[i - 2].src == ',' and tokens[i - 1].src == ' ':
ends.extend((i - 2, i - 1, i))
elif tokens[i - 1].src == ',':
ends.extend((i - 1, i))
else:
ends.append(i)
if len(brace_stack) > 1 and tokens[i + 1].src == ',':
ends.append(i + 1)
if is_end_brace:
brace_stack.pop()
i += 1
# May need to remove a trailing comma for a comprehension
if gen:
i -= 2
while tokens[i].name in NON_CODING_TOKENS:
i -= 1
if tokens[i].src == ',':
ends.append(i)
return Victims(starts, sorted(set(ends)), first_comma_index, arg_index)
def find_closing_bracket(tokens: List[Token], i: int) -> int:
assert tokens[i].src in OPENING
depth = 1
i += 1
while depth:
if tokens[i].src in OPENING:
depth += 1
elif tokens[i].src in CLOSING:
depth -= 1
i += 1
return i - 1
def find_block_start(tokens: List[Token], i: int) -> int:
depth = 0
while depth or tokens[i].src != ':':
if tokens[i].src in OPENING:
depth += 1
elif tokens[i].src in CLOSING:
depth -= 1
i += 1
return i
class Block(NamedTuple):
start: int
colon: int
block: int
end: int
line: bool
def _initial_indent(self, tokens: List[Token]) -> int:
if tokens[self.start].src.isspace():
return len(tokens[self.start].src)
else:
return 0
def _minimum_indent(self, tokens: List[Token]) -> int:
block_indent = None
for i in range(self.block, self.end):
if (
tokens[i - 1].name in ('NL', 'NEWLINE') and
tokens[i].name in ('INDENT', UNIMPORTANT_WS)
):
token_indent = len(tokens[i].src)
if block_indent is None:
block_indent = token_indent
else:
block_indent = min(block_indent, token_indent)
assert block_indent is not None
return block_indent
def dedent(self, tokens: List[Token]) -> None:
if self.line:
return
diff = self._minimum_indent(tokens) - self._initial_indent(tokens)
for i in range(self.block, self.end):
if (
tokens[i - 1].name in ('DEDENT', 'NL', 'NEWLINE') and
tokens[i].name in ('INDENT', UNIMPORTANT_WS)
):
tokens[i] = tokens[i]._replace(src=tokens[i].src[diff:])
def replace_condition(self, tokens: List[Token], new: List[Token]) -> None:
tokens[self.start:self.colon] = new
def _trim_end(self, tokens: List[Token]) -> 'Block':
"""the tokenizer reports the end of the block at the beginning of
the next block
"""
i = last_token = self.end - 1
while tokens[i].name in NON_CODING_TOKENS | {'DEDENT', 'NEWLINE'}:
# if we find an indented comment inside our block, keep it
if (
tokens[i].name in {'NL', 'NEWLINE'} and
tokens[i + 1].name == UNIMPORTANT_WS and
len(tokens[i + 1].src) > self._initial_indent(tokens)
):
break
# otherwise we've found another line to remove
elif tokens[i].name in {'NL', 'NEWLINE'}:
last_token = i
i -= 1
return self._replace(end=last_token + 1)
@classmethod
def find(
cls,
tokens: List[Token],
i: int,
trim_end: bool = False,
) -> 'Block':
if i > 0 and tokens[i - 1].name in {'INDENT', UNIMPORTANT_WS}:
i -= 1
start = i
colon = find_block_start(tokens, i)
j = colon + 1
while (
tokens[j].name != 'NEWLINE' and
tokens[j].name in NON_CODING_TOKENS
):
j += 1
if tokens[j].name == 'NEWLINE': # multi line block
block = j + 1
while tokens[j].name != 'INDENT':
j += 1
level = 1
j += 1
while level:
level += {'INDENT': 1, 'DEDENT': -1}.get(tokens[j].name, 0)
j += 1
ret = cls(start, colon, block, j, line=False)
if trim_end:
return ret._trim_end(tokens)
else:
return ret
else: # single line block
block = j
j = find_end(tokens, j)
return cls(start, colon, block, j, line=True)
def _is_on_a_line_by_self(tokens: List[Token], i: int) -> bool:
return (
tokens[i - 2].name == 'NL' and
tokens[i - 1].name == UNIMPORTANT_WS and
tokens[i + 1].name == 'NL'
)
def remove_brace(tokens: List[Token], i: int) -> None:
if _is_on_a_line_by_self(tokens, i):
del tokens[i - 1:i + 2]
else:
del tokens[i]
def remove_base_class(i: int, tokens: List[Token]) -> None:
# look forward and backward to find commas / parens
brace_stack = []
j = i
while tokens[j].src not in {',', ':'}:
if tokens[j].src == ')':
brace_stack.append(j)
j += 1
right = j
if tokens[right].src == ':':
brace_stack.pop()
else:
# if there's a close-paren after a trailing comma
j = right + 1
while tokens[j].name in NON_CODING_TOKENS:
j += 1
if tokens[j].src == ')':
while tokens[j].src != ':':
j += 1
right = j
if brace_stack:
last_part = brace_stack[-1]
else:
last_part = i
j = i
while brace_stack:
if tokens[j].src == '(':
brace_stack.pop()
j -= 1
while tokens[j].src not in {',', '('}:
j -= 1
left = j
# single base, remove the entire bases
if tokens[left].src == '(' and tokens[right].src == ':':
del tokens[left:right]
# multiple bases, base is first
elif tokens[left].src == '(' and tokens[right].src != ':':
# if there's space / comment afterwards remove that too
while tokens[right + 1].name in {UNIMPORTANT_WS, 'COMMENT'}:
right += 1
del tokens[left + 1:right + 1]
# multiple bases, base is not first
else:
del tokens[left:last_part + 1]
def remove_decorator(i: int, tokens: List[Token]) -> None:
while tokens[i - 1].src != '@':
i -= 1
if i > 1 and tokens[i - 2].name not in {'NEWLINE', 'NL'}:
i -= 1
end = i + 1
while tokens[end].name != 'NEWLINE':
end += 1
del tokens[i - 1:end + 1]
def parse_call_args(
tokens: List[Token],
i: int,
) -> Tuple[List[Tuple[int, int]], int]:
args = []
stack = [i]
i += 1
arg_start = i
while stack:
token = tokens[i]
if len(stack) == 1 and token.src == ',':
args.append((arg_start, i))
arg_start = i + 1
elif token.src in BRACES:
stack.append(i)
elif token.src == BRACES[tokens[stack[-1]].src]:
stack.pop()
# if we're at the end, append that argument
if not stack and tokens_to_src(tokens[arg_start:i]).strip():
args.append((arg_start, i))
i += 1
return args, | |
<filename>hpj.py<gh_stars>0
# hpj.py
# Simple Python to Javascript translator with an emphasis on readability of generated code.
# This code is based on <NAME>'s py2js code. It is a very
# basic python to javascript intereprter, with an emphasis on
# readability of javascript code.
#
# Usage: python hpj.py <file.py>
# generates: hyj.js and file.js
#
# The MIT License
#
# Copyright (c) 2008 - 2009 <NAME>
# Copyright (c) 2013 - 2015 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#
#
# some of this code is from the pyjamas project
# Copyright 2006 <NAME> and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# classes to model a parse tree that is a simplified equivalent to python's ast
# Expression related
import copy, re
import _ast
import codecs
import sys, os, time
from types import IntType, ListType, StringType, TupleType, DictType
class Scope(object):
def __init__(self,name):
self.scopename = name
self.globals = set()
self.yieldScope = False
self.yieldState = 0
self.lineno = -1
def addGlobal(self,name):
self.globals.add(name)
def isGlobal(self,name):
return name in self.globals
def printscope(self):
return ('scope',self.scopename,self.globals)
def JSYieldGuard(self, yieldState=0):
yieldState += self.yieldState
return 'if (_yieldobj._yieldState <= ' + str(yieldState) + ')'
class ScopeManager:
def __init__(self):
self.scope = []
self.yieldState = 0
self.inYieldFn = False
def JSYieldGuard(self, stmtObj):
yieldScope = False
for scope in self.scope:
if scope.yieldScope:
yieldScope = True
break
if yieldScope:
return stmtObj.JSYieldGuard(self.yieldState)
return stmt
def incrYieldState(self):
self.yieldState += 1
def pushScope(self,scope):
return self.scope.append(scope)
def popScope(self):
if len(self.scope) != 0:
self.scope.pop()
def getInnerScope(self):
if not self.scope:
return None
return self.scope[-1]
def isLocal(self,name):
return self.getInnerScope().isGlobal(name)
def isGlobal(self,name, scopeType=None):
#if isinstance(self.getInnerScope(),Module):
# return True
for scope in self.scope[:-1]:
if scopeType and isinstance(scope, scopeType) and scope.isGlobal(name):
return True
if scopeType and isinstance(scope, scopeType) and not scope.isGlobal(name):
return False
if not scopeType and scope.isGlobal(name):
return True
return False
def getNamespace(self,name):
namespaces = []
seen = False
for i in xrange(len(self.scope)-1, -1, -1):
scope = self.scope[i]
if seen:
if isinstance(scope, ClassDefinitionStatement): namespaces.append(scope.cname)
if isinstance(scope, Module): namespaces.append(scope.name)
elif scope.isGlobal(name):
if not isinstance(scope, ClassDefinitionStatement) and not isinstance(scope, Module):
return ""
elif isinstance(scope, ClassDefinitionStatement):
seen = True
namespaces.append(scope.cname)
elif isinstance(scope, Module):
seen = True
namespaces.append(scope.name)
elif scope.scopename==name:
seen=True
if not seen:
return ""
namespaces.reverse()
return ".".join(namespaces)
def hasOuterScope(self, scopeType):
for i in xrange(len(self.scope)-1, -1, -1):
scope = self.scope[i]
if isinstance(scope, scopeType):
return True
return False
def setYieldScope(self):
for i in xrange(len(self.scope)-1, -1, -1):
scope = self.scope[i]
scope.yieldState += 1
scope.yieldScope = True
if isinstance(scope, FunctionDefinitionStatement):
break
def addGlobal(self,name):
if not self.getInnerScope():
return True
self.getInnerScope().addGlobal(name)
class Expr(Scope):
def __init__(self, name=""):
Scope.__init__(self,name)
def walk(self,walker):
walker.begin(self)
walker.end(self)
# Sub-classes of Expr
class AttributeLookup(Expr):
def __init__(self,obj,attr):
self.obj = obj
self.attr = attr
Expr.__init__(self)
def __repr__(self):
return str(('attribute',self.obj,self.attr))
def walk(self,walker):
walker.begin(self)
self.obj.walk(walker)
walker.end(self)
class BinaryOp(Expr):
def __init__(self,op,left,right):
self.op = op
self.left = left
self.right = right
Expr.__init__(self)
def __repr__(self):
return str(('binaryop',self.op,self.left,self.right))
def walk(self,walker):
walker.begin(self)
self.left.walk(walker)
self.right.walk(walker)
walker.end(self)
class DictionaryValue(Expr):
def __init__(self,keyvalues):
self.keyvalues = keyvalues
Expr.__init__(self)
def __repr__(self):
return str(('keyvalues',self.keyvalues))
def walk(self,walker):
walker.begin(self)
for (k,v) in self.keyvalues:
if k:
k.walk(walker)
if v:
v.walk(walker)
walker.end(self)
class FunctionCall(Expr):
def __init__(self,fname,args,kwargs):
self.fname = fname
self.args = args
self.kwargs = kwargs
Expr.__init__(self)
def __repr__(self):
return str(('fcall',self.fname,self.args,self.kwargs))
def walk(self,walker):
walker.begin(self)
for arg in self.args:
arg.walk(walker)
# fixme kwargs?
walker.end(self)
class Lambda(Expr):
def __init__(self,args,body):
self.args = args
self.body = body
Expr.__init__(self,"lambda")
def __repr__(self):
return str(('lambda',self.args,self.body))
def walk(self,walker):
walker.begin(self)
for arg in self.args:
arg.walk(walker)
self.body.walk(walker)
walker.end(self)
class ListValue(Expr):
def __init__(self,elements):
self.elements = elements
Expr.__init__(self)
def __repr__(self):
return str(('listvalue',self.elements))
def walk(self,walker):
walker.begin(self)
for elt in self.elements:
elt.walk(walker)
walker.end(self)
class ListComprehension(Expr):
def __init__(self,expr,generators):
self.expr = expr
self.generators = generators
Expr.__init__(self)
def __repr__(self):
return str(('listcomp',self.expr,self.generators))
def walk(self,walker):
walker.begin(self)
self.expr.walk(walker)
for generator in self.generators:
generator.walk(walker)
walker.end(self)
class ListComprehensionGenerator(Expr):
def __init__(self,target,itr,cond):
self.target = target
self.itr = itr
self.cond = cond
Expr.__init__(self)
def __repr__(self):
return str(('listcomp_gen',self.target,self.itr,self.conds))
def walk(self,walker):
walker.begin(self)
self.target.walk(walker)
self.itr.walk(walker)
for generator in self.conds:
generator.walk(walker)
walker.end(self)
class Literal(Expr):
def __init__(self,value):
self.value = value
Expr.__init__(self)
def __repr__(self):
return str(('literal',self.value))
class MethodCall(FunctionCall):
def __init__(self,target,fname,args,kwargs):
self.target = target
FunctionCall.__init__(self,fname,args,kwargs)
def __repr__(self):
return str(('mcall',self.target,FunctionCall.__repr__(self)))
def walk(self,walker):
walker.begin(self)
self.target.walk(walker)
for arg in self.args:
arg.walk(walker)
walker.end(self)
class SliceOp(Expr):
def __init__(self,target,slicing):
self.target = target
self.lwb = slicing[0]
self.upb = slicing[1]
self.step = slicing[2]
def __repr__(self):
return str(('slice',self.lwb,self.upb,self.step))
def walk(self,walker):
if self.lwb:
self.lwb.walk(walker)
if self.upb:
self.upb.walk(walker)
if self.step:
self.step.walk(walker)
walker.end(self)
class UniOp(Expr):
def __init__(self,op,arg):
self.op = op
self.arg = arg
Expr.__init__(self)
def __repr__(self):
return str(('unaryop',self.op,self.arg))
def walk(self,walker):
walker.begin(self)
self.arg.walk(walker)
walker.end(self)
class VarName(Expr):
def __init__(self,varname):
self.varname = pj_var.get(varname,varname)
Expr.__init__(self)
def __repr__(self):
return str(('var',self.varname))
def __repr__(self):
return str(('name',self.varname))
def walk(self,walker):
walker.begin(self)
walker.end(self)
# Statement related
class Statement(Scope):
def __init__(self, name="statement"):
Scope.__init__(self,name)
# Statement sub-classes
class AssignmentStatement(Statement):
def __init__(self,target,expr):
self.target = target
self.expr = expr
Statement.__init__(self)
def __repr__(self):
return str(('assign',self.target,self.expr))
class AugmentedAssignmentStatement(Statement):
def __init__(self,target,op,expr):
self.target = target
self.op = op
self.expr = expr
Statement.__init__(self)
def __repr__(self):
return str(('augmented_assign',self.target,self.op,self.expr))
class BreakStatement(Statement):
def __init__(self):
Statement.__init__(self)
def __repr__(self):
return str(('break'))
class ClassDefinitionStatement(Statement):
registered = {}
def __init__(self,cname,module):
self.cname = pj_var.get(cname,cname)
self.module = module
self.parent_class = None
Statement.__init__(self,"class:"+cname)
def configure(self,bases,constructor,memberfns,staticvars,innerclasses):
self.bases = bases
self.constructor = constructor
self.mfns = memberfns
self.staticmembervars = staticvars
self.innerclasses = innerclasses
ClassDefinitionStatement.register(self)
def getClassNamespace(self):
if self.parent_class:
namespace = self.parent_class.getClassNamespace()
elif self.module:
namespace = self.module.name # space
else:
namespace = ""
if namespace != "":
namespace += "."
namespace += self.cname
return namespace
def setParentClass(self,parent):
self.parent_class = parent
def __repr__(self):
return str(('cdef',self.cname,self.module,self.parent_class,self.bases,self.constructor,self.mfns,self.staticmembervars, self.innerclasses))
@staticmethod
def register(cdef):
ClassDefinitionStatement.registered[cdef.cname] = cdef
@staticmethod
def isclassname(name):
return name in ClassDefinitionStatement.registered
@staticmethod
def getclass(name):
if name in ClassDefinitionStatement.registered:
return ClassDefinitionStatement.registered[name]
else:
return None
# method resolution order utilities
@staticmethod
def getbases(C):
klass = ClassDefinitionStatement.getclass(C)
if klass != None:
return klass.bases[:]
else:
return []
# methods merge,mro and compute_mro based on code and description
# of the method resolution order here:
# http://www.python.org/download/releases/2.3/mro/
@staticmethod
def merge(seqs):
res = []; i=0
while 1:
nonemptyseqs=[seq for seq in seqs if seq]
if not nonemptyseqs: return res
i+=1
for seq in nonemptyseqs: # find merge candidates among seq heads
cand = seq[0]
nothead=[s for s in nonemptyseqs if cand in s[1:]]
if nothead: cand=None #reject candidate
else: break
if not cand: raise "Inconsistent hierarchy"
res.append(cand)
for seq in nonemptyseqs: # remove cand
if seq[0] == cand: del seq[0]
@staticmethod
def mro(C):
"Compute the class precedence list (mro) according to C3"
return ClassDefinitionStatement.merge([[C]]
+map(ClassDefinitionStatement.mro,ClassDefinitionStatement.getbases(C))
+[list(ClassDefinitionStatement.getbases(C))])
@staticmethod
def compute_mro(cname):
namelist = ClassDefinitionStatement.mro(cname)
return map(ClassDefinitionStatement.getclass,namelist)
def memberfns(self,forsuper=False):
fns = []
for f in self.mfns:
fname = f.fname
if fname not in fns:
fns.append([fname, self.getClassNamespace(),f])
return fns
def oldmemberfns(self,forsuper=False):
classes = ClassDefinitionStatement.compute_mro(self.cname)
fns = {}
for clas in classes:
# clas may be None for the base class (object or other built in class)
if clas:
cname = clas.cname
if forsuper and cname == self.cname:
continue
cfns = clas.mfns
if forsuper and clas.constructor:
cfns.append(clas.constructor)
for f in cfns:
fname = f.fname
| |
0x23303C: (0x896C, 0), # East Asian ideograph
0x21303D: (0x4E4B, 0), # East Asian ideograph
0x21303E: (0x5C39, 0), # East Asian ideograph
0x21303F: (0x4E4F, 0), # East Asian ideograph
0x213040: (0x4E4E, 0), # East Asian ideograph
0x233041: (0x8976, 0), # East Asian ideograph
0x233042: (0x8974, 0), # East Asian ideograph
0x223043: (0x6282, 0), # East Asian ideograph
0x213044: (0x4E56, 0), # East Asian ideograph
0x213045: (0x4E58, 0), # East Asian ideograph
0x213046: (0x4E59, 0), # East Asian ideograph
0x215D61: (0x929C, 0), # East Asian ideograph
0x213048: (0x4E5F, 0), # East Asian ideograph
0x233049: (0x897B, 0), # East Asian ideograph
0x23304A: (0x897C, 0), # East Asian ideograph
0x22304B: (0x629D, 0), # East Asian ideograph
0x27304C: (0x5E72, 0), # East Asian ideograph
0x225D62: (0x7564, 0), # East Asian ideograph
0x6F4A55: (0xAE7C, 0), # Korean hangul
0x213050: (0x4E8B, 0), # East Asian ideograph
0x213051: (0x4E8C, 0), # East Asian ideograph
0x213052: (0x4E8E, 0), # East Asian ideograph
0x233053: (0x8984, 0), # East Asian ideograph
0x213054: (0x4E94, 0), # East Asian ideograph
0x233055: (0x8985, 0), # East Asian ideograph
0x223056: (0x62A6, 0), # East Asian ideograph
0x213057: (
0x4E99,
0,
), # East Asian ideograph (variant of 4B3057 which maps to 4E99)
0x273058: (0x4E9A, 0), # East Asian ideograph
0x215D64: (0x92B3, 0), # East Asian ideograph
0x21305A: (0x4E9F, 0), # East Asian ideograph
0x274C77: (0x7663, 0), # East Asian ideograph
0x21305C: (0x4EA6, 0), # East Asian ideograph
0x21305D: (0x4EA5, 0), # East Asian ideograph
0x21305E: (0x4EA4, 0), # East Asian ideograph
0x215D65: (0x92EA, 0), # East Asian ideograph
0x213060: (0x4EAB, 0), # East Asian ideograph
0x213061: (0x4EAC, 0), # East Asian ideograph
0x233062: (0x8991, 0), # East Asian ideograph
0x213063: (0x4EAE, 0), # East Asian ideograph
0x233064: (0x8997, 0), # East Asian ideograph
0x215D66: (0x92B7, 0), # East Asian ideograph
0x233066: (0x8998, 0), # East Asian ideograph
0x4B5830: (0x899A, 0), # East Asian ideograph
0x213068: (0x4EC3, 0), # East Asian ideograph
0x213069: (0x4EC4, 0), # East Asian ideograph
0x22306A: (0x62C3, 0), # East Asian ideograph
0x23306B: (0x899C, 0), # East Asian ideograph
0x21306C: (0x4EC7, 0), # East Asian ideograph
0x21306D: (0x4ECB, 0), # East Asian ideograph
0x21306E: (0x4EE4, 0), # East Asian ideograph
0x23306F: (0x89A1, 0), # East Asian ideograph
0x213070: (0x4ED5, 0), # East Asian ideograph
0x275D68: (0x9504, 0), # East Asian ideograph
0x223072: (0x630D, 0), # East Asian ideograph
0x213073: (0x4EE3, 0), # East Asian ideograph
0x213074: (0x4ED4, 0), # East Asian ideograph
0x213075: (0x4ED7, 0), # East Asian ideograph
0x233076: (0x89A5, 0), # East Asian ideograph
0x275D69: (0x9509, 0), # East Asian ideograph
0x213078: (0x4EFF, 0), # East Asian ideograph
0x233079: (0x89A9, 0), # East Asian ideograph
0x6F5B63: (0xD2F0, 0), # Korean hangul
0x21307C: (0x4EFB, 0), # East Asian ideograph
0x275D6A: (0x950B, 0), # East Asian ideograph
0x21307E: (0x4F15, 0), # East Asian ideograph
0x224547: (0x6BBD, 0), # East Asian ideograph
0x215D6B: (0x9320, 0), # East Asian ideograph
0x292A2F: (0x86F1, 0), # East Asian ideograph
0x6F5D6C: (0xD754, 0), # Korean hangul
0x29436A: (0x9512, 0), # East Asian ideograph
0x215D6D: (0x92F8, 0), # East Asian ideograph
0x235A53: (0x9D36, 0), # East Asian ideograph
0x225D6E: (0x757A, 0), # East Asian ideograph
0x6F535B: (0xC218, 0), # Korean hangul
0x274C79: (0x766B, 0), # East Asian ideograph
0x6F5B64: (0xD2F1, 0), # Korean hangul
0x275D6F: (0x9519, 0), # East Asian ideograph
0x29325E: (0x8BDC, 0), # East Asian ideograph
0x6F5721: (0xC7A1, 0), # Korean hangul
0x2F575F: (0x9ABE, 0), # East Asian ideograph
0x275D70: (0x94B1, 0), # East Asian ideograph
0x4B5832: (0x89B3, 0), # East Asian ideograph
0x225D71: (0x7577, 0), # East Asian ideograph
0x6F4A58: (0xAE85, 0), # Korean hangul
0x275D72: (0x9521, 0), # East Asian ideograph
0x22343C: (0x649D, 0), # East Asian ideograph
0x275D73: (0x94EE, 0), # East Asian ideograph
0x6F5B65: (0xD2F4, 0), # Korean hangul
0x275D74: (0x5F55, 0), # East Asian ideograph
0x6F5722: (0xC7A3, 0), # Korean hangul
0x69724E: (0x9BF2, 0), # East Asian ideograph
0x215D75: (0x9310, 0), # East Asian ideograph
0x234948: (0x95BC, 0), # East Asian ideograph
0x4B325F: (0x50BB, 0), # East Asian ideograph
0x215D76: (0x9326, 0), # East Asian ideograph
0x6F5835: (0xC9DA, 0), # Korean hangul
0x692153: (0x3009, 0), # Ideographic greater than sign
0x215D77: (0x934D, 0), # East Asian ideograph
0x2D632D: (0x4E80, 0), # East Asian ideograph
0x215D78: (0x9382, 0), # East Asian ideograph
0x274C7B: (0x53D1, 0), # East Asian ideograph
0x6F5B66: (0xD2F8, 0), # Korean hangul
0x225D79: (0x757D, 0), # East Asian ideograph
0x6F5432: (0xC2FC, 0), # Korean hangul
0x224824: (0x6CD8, 0), # East Asian ideograph
0x4B5C77: (0x9139, 0), # East Asian ideograph
0x235D7A: (0x9EB0, 0), # East Asian ideograph
0x6F5D7B: (0xD790, 0), # Korean hangul
0x27606D: (0x9974, 0), # East Asian ideograph
0x224826: (0x6CC6, 0), # East Asian ideograph
0x6F575B: (0xC8A0, 0), # Korean hangul
0x275D7C: (0x9505, 0), # East Asian ideograph
0x2E5452: (0x71FE, 0), # East Asian ideograph
0x234827: (0x93F4, 0), # East Asian ideograph
0x275D7D: (0x951A, 0), # East Asian ideograph
0x234828: (0x9436, 0), # East Asian ideograph
0x6F5B67: (0xD300, 0), # Korean hangul
0x275D7E: (0x953E, 0), # East Asian ideograph
0x224829: (0x6CE9, 0), # East Asian ideograph
0x226764: (0x799D, 0), # East Asian ideograph
0x23494A: (0x95CD, 0), # East Asian ideograph
0x395E3D: (0x9295, 0), # East Asian ideograph
0x6F4A5B: (0xAEBE, 0), # Korean hangul
0x23482B: (0x943B, 0), # East Asian ideograph
0x275946: (0x8BD1, 0), # East Asian ideograph
0x22527C: (0x717B, 0), # East Asian ideograph
0x23482D: (0x9424, 0), # East Asian ideograph
0x6F5B68: (0xD301, 0), # Korean hangul
0x6F5725: (0xC7A6, 0), # Korean hangul
0x284E42: (0x6D4D, 0), # East Asian ideograph
0x21482F: (0x6E34, 0), # East Asian ideograph
0x6F523E: (0xBF1D, 0), # Korean hangul
0x6F4A5C: (0xAEC0, 0), # Korean hangul
0x234830: (0x9437, 0), # East Asian ideograph
0x213122: (0x4F10, 0), # East Asian ideograph
0x213123: (0x4F0F, 0), # East Asian ideograph
0x213124: (0x4EF2, 0), # East Asian ideograph
0x223125: (0x62F5, 0), # East Asian ideograph
0x213126: (0x4EF3, 0), # East Asian ideograph
0x213127: (0x4EF6, 0), # East Asian ideograph
0x213128: (0x4EF0, 0), # East Asian ideograph
0x23312A: (0x89B8, 0), # East Asian ideograph
0x23312B: (0x89B7, 0), # East Asian ideograph
0x23312C: (0x89B6, 0), # East Asian ideograph
0x234832: (0x9440, 0), # East Asian ideograph
0x21312E: (0x4F57, 0), # East Asian ideograph
0x23312F: (0x89BC, 0), # East Asian ideograph
0x213130: (0x4F5E, 0), # East Asian ideograph
0x223131: (0x630C, 0), # East Asian ideograph
0x233132: (0x89BF, 0), # East Asian ideograph
0x213133: (0x4F55, 0), # East Asian ideograph
0x213134: (0x4F30, 0), # East Asian ideograph
0x213135: (0x4F50, 0), # East Asian ideograph
0x213136: (0x4F51, 0), # East Asian ideograph
0x223137: (0x62F6, 0), # East Asian ideograph
0x213138: (0x4F48, 0), # East Asian ideograph
0x213139: (0x4F46, 0), # East Asian ideograph
0x22313A: (0x6331, 0), # East Asian ideograph
0x23313B: (0x89D5, 0), # East Asian ideograph
0x21313C: (0x4F54, 0), # East Asian ideograph
0x21313D: (0x4F3C, 0), # East Asian ideograph
0x21313E: (0x4F63, 0), # East Asian ideograph
0x23313F: (0x89DA, 0), # East Asian ideograph
0x213140: (0x4F60, 0), # East Asian ideograph
0x213141: (0x4F2F, 0), # East Asian ideograph
0x223142: (0x6345, 0), # East Asian ideograph
0x233143: (0x89E5, 0), # East Asian ideograph
0x223144: (0x6343, 0), # East Asian ideograph
0x234836: (0x942D, 0), # East Asian ideograph
0x213146: (0x4F6F, 0), # East Asian ideograph
0x223147: (0x6353, 0), # East Asian ideograph
0x223148: (0x6364, 0), # East Asian ideograph
0x223149: (0x6336, 0), # East Asian ideograph
0x22314A: (0x6344, 0), # East Asian ideograph
0x224837: (0x6D1D, 0), # East Asian ideograph
0x23314C: (0x89E9, 0), # East Asian ideograph
0x23314D: (0x89EB, 0), # East Asian ideograph
0x21314E: (0x4F8B, 0), # East Asian ideograph
0x27314F: (0x4ED1, 0), # East Asian ideograph
0x234838: (0x9431, 0), # East Asian ideograph
0x213152: (0x4F7B, 0), # East Asian ideograph
0x233153: (0x89ED, 0), # East Asian ideograph
0x223154: (0x6339, 0), # East Asian ideograph
0x213155: (0x4F8F, 0), # East Asian ideograph
0x213156: (0x4F7E, | |
level
x = list(itertools.chain.from_iterable(s for s in x if isinstance(s, list) or isinstance(s, tuple)))
else:
return 0
def listify(x):
"""
Can be used to force method input to a list.
"""
# The isinstance() built-in function is recommended over the type() built-in function for testing the type of an object
if isinstance(x, list):
return x
else:
return [x]
def unlistify(x):
"""
Converts 1-element list to x.
"""
# The isinstance() built-in function is recommended over the type() built-in function for testing the type of an object
if isinstance(x, list):
if len(x)==1:
return x[0]
else:
return x
else:
return x
def tuplefy(x):
"""
Similar to listify().
This method enables user to iterate through tuples of inconsistent depth by
always returning a non-flat tuple.
Pushes a flat tuple (depth=1) e.g. (0,1,2) one level deeper: ( (0,1,2), ).
Non-flat tuples are returned untouched.
A non-tuple (depth=0) is also pushed into a tuple 2 levels deep.
"""
if depth(x)==1:
return (x,)
elif depth(x)==0:
return ( (x,), )
else:
return x
def method_name(caller_depth=0):
"""
Returns the name of the caller method.
"""
# Note: inspect.stack()[0][3] would return the name of this method.
return inspect.stack()[caller_depth+1][3]
def method_arg_names(method):
"""
Returns the names of the arguments of passed method.
"""
return inspect.getargspec(method)[0]
def concat_complex(list_complex, width_in_bits, imreOrder=True):
"""
Concatenates the real and imaginary part into one integer.
The specifed width counts for both the real and imaginary part.
Real part is mapped on the LSB. Imaginary part is shifted to the MSB.
"""
# PD
if imreOrder:
return [((int(i.imag) & (2**width_in_bits-1)) << width_in_bits) + (int(i.real) & (2**width_in_bits-1)) for i in list_complex]
else:
return [((int(i.real) & (2**width_in_bits-1)) << width_in_bits) + (int(i.imag) & (2**width_in_bits-1)) for i in list_complex]
# EK
result = []
for i in range(len(list_complex)):
real = int(list_complex[i].real) & (2**width_in_bits-1)
imag = int(list_complex[i].imag) & (2**width_in_bits-1)
if imreOrder:
result.append((imag << width_in_bits) + real)
else:
result.append((real << width_in_bits) + imag)
return result
def unconcat_complex(list_concat, width_in_bits, imreOrder=True):
"""
Unconcatenates the real and imaginary part from an integer list into
a complex list with separate real and imaginary part. The integers
in list_concat are 2*width_in_bits wide.
Real part is extracted from the LSB. Imaginary part is extracted the MSB.
Example:
>>> unconcat_complex(concat_complex([complex(1,2), complex(3,4)], 16), 16)
[(1+2j), (3+4j)]
"""
result = []
for i in range(len(list_concat)):
lo = to_signed( list_concat[i] & (2**width_in_bits-1), width_in_bits)
hi = to_signed((list_concat[i] >> width_in_bits) & (2**width_in_bits-1), width_in_bits)
if imreOrder:
result.append(complex(lo, hi))
else:
result.append(complex(hi, lo))
return result
def split_complex(list_complex):
"""
Returns the real and imaginary part in two separate lists.
[list_re, list_im] = split_complex(list_complex)
"""
list_real = []
list_imag = []
for i in range(len(list_complex)):
list_real.append(list_complex[i].real)
list_imag.append(list_complex[i].imag)
return (list_real, list_imag)
def unsplit_complex(list_real, list_imag):
"""Returns complex list by combining the real and imaginary parts from two separate lists.
"""
return [complex(x,y) for x,y in zip(list_real, list_imag)]
def mac_str(n):
"""
Converts MAC address integer to the hexadecimal string representation,
separated by ':'.
"""
hexstr = "%012x" % n
return ':'.join([hexstr[i:i+2] for i in range(0, len(hexstr), 2)])
def ip_str(n):
"""
Converts IP address integer to the decimal string representation,
separated by '.'.
"""
ip_bytes = CommonBytes(n, 4)
return str(ip_bytes[3])+'.'+str(ip_bytes[2])+'.'+str(ip_bytes[1])+'.'+str(ip_bytes[0])
def mkdir(path):
"""Recursively create leave directory and intermediate directories if they do not already exist."""
expandPath = os.path.expandvars(path) # support using environment variables in the file path
expandPath = os.path.expanduser(expandPath) # support using ~ in the file path
if not os.path.exists(expandPath):
os.makedirs(expandPath)
def expand_file_path_name(fpn, dir_path=''):
""" Expand environment variables in fpn to get filePathName.
- if it is an absolute path return filePathName else
- if it still has a local file path prepend dir_path to the filePathName and return dir_path + filePathName.
"""
filePathName = os.path.expandvars(fpn) # support using environment variables in the file path
filePathName = os.path.expanduser(filePathName) # support using ~ in the file path
if os.path.isabs(filePathName):
return filePathName # use absolute path to file
else:
return os.path.join(os.path.expandvars(dir_path), filePathName) # derive path to file from the directory path and a directory path to the file
def find_string_in_file(fpn, find_str):
"""Return index >= 0 if find_str is found in file fpn, returns -1 if find_str is not found in file fpn.
Can also find '\n'.
"""
return open(fpn, 'r').read().find(find_str)
def remove_from_list_string(list_str, item_str, sep=' '):
"""Treat the string list_str as a list of items that are separated by sep and then
remove the specified item_str string from the list and return the list as a
string of items separated by sep. Also remove any duplicate items.
"""
ls = list_str.split(sep)
ls = unique(ls)
ls.remove(item_str)
ls = sep.join(ls)
return ls
def find_all_file_paths(rootDir, fileName):
"""
Recursively search the rootDir tree to find the paths to all fileName files.
"""
paths = []
for root, _, files in os.walk(rootDir):
if fileName in files:
paths.append(root)
return paths
def reverse_bits(num, nofBits):
"""Reverse the order of the number of bits in the number value, e.g. to perform a index bit flip for an FFT"""
result = 0
for i in range(nofBits):
result = (result << 1) + (num & 1) # flip order of bits
num >>= 1
return result
def invert_msbit(num, nofBits):
"""Invert the most significant bit of num, e.g. to perform an index fftshift()"""
return (num & (2**nofBits-1)) ^ (2**(nofBits-1)) # first mask bits of num and then invert MSbit
################################################################################
# Classes
class CommonBits:
"""
The purpose of this class is to allow the user to:
1) create a CommonBits object with some data, e.g:
>> my_bits = CommonBits(0xDEADBEEF)
2) Use the bracket notation [] to extract bit ranges from that data:
>> print(hex(my_bits[31:0]))
0xdeadbeef
>> print(hex(my_bits[31:4]))
0xdeadbee
>> print(hex(my_bits[31:16]))
0xdead
>> print(hex(my_bits[31]))
0x1
>> print(hex(my_bits[0]))
0x1
3) If a (optional) data width is passed, leading zeroes are added.
>> my_bits = CommonBits(0xDEADBEEF, 16)
>> print(hex(my_bits))
0xbeef
>> my_bits = CommonBits(0xDEADBEEF, 64)
>> print(hex(my_bits[63:32]))
0x0
4) Besides getting bit slices, setting bitslices is also possible:
>> my_bits = CommonBits(0xdeadbeef)
>> print(my_bits)
0xdeadbeef
>> my_bits[15:0] = 0xbabe
>> print(my_bits)
0xdeadbabe
5) Use -1 to set a range of bits to all ones.
6) Use VHDL-style & operator to concatenate CommonBits types.
>> MyBitsHi = 0xDEAD
>> MyBitsLo = 0xBEEF
>> print(MyBitsHi & MyBitsLo & CommonBits(0xCAFEBABE))
0xdeadbeefcafebabe
"""
def __init__(self, data, bits=0):
if data>=0:
self.data = data
else:
print(("CommonBits: Error: Input data = %d. Only unsigned integers are supported, use to_unsigned(data, bits)." %data))
if bits>0:
# Set data width to passed 'bits'
self.data_bin_len = bits
# Check if data fits in passed nof bits
if self.get_bin_len(data) > self.data_bin_len:
print(("CommonBits: Error: input data %d does not fit in passed number of bits (%d)" %(data, bits)))
else:
# Use the minimum required data width
self.data_bin_len = self.get_bin_len(self.data)
def __getitem__(self, bitslice):
if self.check_slice(bitslice)==0:
if type(bitslice)==type(slice(1,2,3)):
# Get a bitmask for the bit range passed via the bitslice
bitmask = self.bitmask(bitslice.start - bitslice.stop +1)
return int((self.data >> bitslice.stop) & bitmask)
if type(bitslice)==type(0):
# We only want one bit
bitmask = self.bitmask(1)
return int((self.data >> bitslice) & bitmask)
print(bitmask)
else:
print('CommonBits: Error: invalid slice range')
def __setitem__(self, bitslice, value):
if self.check_slice(bitslice)==0:
if type(bitslice)==type(slice(1,2,3)):
# Get a bitmask for the bit range passed via the bitslice
bitmask = self.bitmask(bitslice.start - bitslice.stop +1)
if value==-1:
# Allow -1 to set range to all ones. Simply use the bitmask as data.
data=bitmask
elif value>=0:
data = value
else:
print(("CommonBits: Error: Input data = %d. Only unsigned integers are supported, use to_unsigned(data, bits)." %value))
# Make sure bit length of passed data does not exceed bitmask length
if self.get_bin_len(data) <= self.get_bin_len(bitmask):
self.data = (self.data & ~(bitmask << bitslice.stop)) | (data << bitslice.stop)
else:
print(('CommonBits: Error: passed value (%d) does not fit in bits [%d..%d].' %(value, bitslice.start, bitslice.stop)))
if type(bitslice)==type(0):
# We only want to set one bit
bitmask = self.bitmask(1)
data=value
# Make sure bit length of passed data does not exceed bitmask length
if self.get_bin_len(data) <= self.get_bin_len(bitmask):
self.data = (self.data & ~(bitmask << | |
import mindspore.nn as nn
import mindspore.ops as ops
from mindspore.common import initializer as init
####################################################################
# ------------------------- Discriminators --------------------------
####################################################################
class Dis(nn.Cell):
def __init__(self, input_dim, norm='None'):
super(Dis, self).__init__()
ch = 32
n_layer = 5
self.model = self._make_net(ch, input_dim, n_layer, norm)
def _make_net(self, ch, input_dim, n_layer, norm):
model = []
model += [ConvNormReLU(input_dim, ch, kernel_size=3, stride=2, padding=1, alpha=0.2, norm_mode=norm)]
tch = ch
for i in range(1, n_layer):
model += [ConvNormReLU(tch, tch * 2, kernel_size=3, stride=2, padding=1, alpha=0.2, norm_mode=norm)]
tch *= 2
model += [ConvNormReLU(tch, tch * 2, kernel_size=3, stride=2, padding=1, alpha=0.2, norm_mode=norm)]
tch *= 2
model += [nn.Conv2d(tch, 1, kernel_size=1, stride=1, padding=0)]
return nn.SequentialCell(*model)
def construct(self, x):
out = self.model(x)
out = out.reshape(-1)
return out
class MultiScaleDis(nn.Cell):
def __init__(self, input_dim, n_scale=3, n_layer=4, norm='None'):
super(MultiScaleDis, self).__init__()
ch = 32
self.downsample = nn.AvgPool2d(3, stride=2)
self.Diss = nn.CellList()
for _ in range(n_scale):
self.Diss.append(self._make_net(ch, input_dim, n_layer, norm))
def _make_net(self, ch, input_dim, n_layer, norm):
model = []
model += [ConvNormReLU(input_dim, ch, kernel_size=4, stride=2, padding=1, alpha=0.2, norm_mode=norm)]
tch = ch
for _ in range(1, n_layer):
model += [ConvNormReLU(tch, tch * 2, kernel_size=4, stride=2, padding=1, alpha=0.2, norm_mode=norm)]
tch *= 2
model += [nn.Conv2d(tch, 1, kernel_size=1, stride=1, padding=0)]
return nn.SequentialCell(*model)
def construct(self, x):
concat = ops.Concat(axis=0)
iter = 0
outs = 0
for Dis in self.Diss:
out = Dis(x)
out = out.reshape(-1)
x = self.downsample(x)
if iter == 0:
outs = out
else:
outs = concat((outs, out))
iter += 1
return outs
####################################################################
# ---------------------------- Encoders -----------------------------
####################################################################
class E_content(nn.Cell):
def __init__(self, input_dim, ngf=64):
super(E_content, self).__init__()
self.layer_1 = ConvNormReLU(in_planes=input_dim, out_planes=ngf, kernel_size=7, stride=1, padding=3,
norm_mode='instance', alpha=0.2)
self.layer_2 = ConvNormReLU(in_planes=ngf, out_planes=ngf * 2, kernel_size=3, stride=2, padding=1,
norm_mode='instance', alpha=0.2)
self.layer_3 = ConvNormReLU(in_planes=ngf * 2, out_planes=ngf * 4, kernel_size=3, stride=2, padding=1,
norm_mode='instance', alpha=0.2)
def construct(self, x):
# x (3, 256, 256)
feature_map1 = self.layer_1(x)
# x (64, 256, 256)
feature_map2 = self.layer_2(feature_map1)
# x (64*2, 128, 128)
feature_map3 = self.layer_3(feature_map2)
# x (64*4, 64, 64)
return feature_map1, feature_map2, feature_map3
class E_makeup(nn.Cell):
def __init__(self, input_dim, ngf=64):
super(E_makeup, self).__init__()
self.layer_1 = ConvNormReLU(in_planes=input_dim, out_planes=ngf, kernel_size=7, stride=1, padding=3,
norm_mode='instance', alpha=0.2)
self.layer_2 = ConvNormReLU(in_planes=ngf, out_planes=ngf * 2, kernel_size=3, stride=2, padding=1,
norm_mode='instance', alpha=0.2)
self.layer_3 = ConvNormReLU(in_planes=ngf * 2, out_planes=ngf * 4, kernel_size=3, stride=2, padding=1,
norm_mode='instance', alpha=0.2)
def construct(self, x):
# x (3, 256, 256)
feature_map1 = self.layer_1(x)
# x (64, 256, 256)
feature_map2 = self.layer_2(feature_map1)
# x (64*2, 128, 128)
feature_map3 = self.layer_3(feature_map2)
# x (64*4, 64, 64)
return feature_map3
class E_semantic(nn.Cell):
def __init__(self, input_dim, ngf=32):
super(E_semantic, self).__init__()
self.layer_1 = ConvNormReLU(in_planes=input_dim, out_planes=ngf, kernel_size=7, stride=1, padding=3,
norm_mode='instance', alpha=0.2)
self.layer_2 = ConvNormReLU(in_planes=ngf, out_planes=ngf * 2, kernel_size=3, stride=2, padding=1,
norm_mode='instance', alpha=0.2)
self.layer_3 = ConvNormReLU(in_planes=ngf * 2, out_planes=ngf * 4, kernel_size=3, stride=2, padding=1,
norm_mode='instance', alpha=0.2)
def construct(self, x):
# x (3, 256, 256)
feature_map1 = self.layer_1(x)
# x (32, 256, 256)
feature_map2 = self.layer_2(feature_map1)
# x (32*2, 128, 128)
feature_map3 = self.layer_3(feature_map2)
# x (32*4, 64, 64)
return feature_map3
####################################################################
# ----------------------- Feature Fusion (FF) ----------------------
####################################################################
class FeatureFusion(nn.Cell):
def __init__(self, ngf=64):
super(FeatureFusion, self).__init__()
input_dim = (32 * 4 + 64 * 4)
self.conv1 = ConvNormReLU(in_planes=input_dim, out_planes=ngf * 8, kernel_size=3, stride=2, padding=1,
norm_mode='instance', alpha=0.2)
self.resize = nn.ResizeBilinear()
def construct(self, x, y):
# x[0] (64*1, 256, 256)
# x[1] (64*2, 128, 128)
# x[2] (64*4, 64, 64)
# y (32*4, 64,64)
B, C, H, W = x[0].shape
concat = ops.Concat(axis=1)
out = concat((x[2], y))
out = self.conv1(out)
feature_map1 = self.resize(x[0], size=(H // 4, W // 4))
feature_map2 = self.resize(x[1], size=(H // 4, W // 4))
feature_map3 = self.resize(x[2], size=(H // 4, W // 4))
feature_map4 = self.resize(out, size=(H // 4, W // 4))
feature_map5 = self.resize(y, size=(H // 4, W // 4))
output = concat((feature_map1, feature_map2, feature_map3, feature_map4, feature_map5))
return output
####################################################################
# ----------------- SymmetryAttention Moudle -----------------------
####################################################################
class SymmetryAttention(nn.Cell):
def __init__(self):
super(SymmetryAttention, self).__init__()
in_dim = 64 * 17
self.softmax_alpha = 100
self.fa_conv = ConvNormReLU(in_dim, in_dim // 8, kernel_size=1, stride=1, padding=0, norm_mode='instance',
alpha=0.2)
self.fb_conv = ConvNormReLU(in_dim, in_dim // 8, kernel_size=1, stride=1, padding=0, norm_mode='instance',
alpha=0.2)
self.norm = nn.Norm(axis=1, keep_dims=True)
self.softmax = nn.Softmax(axis=2)
def warp(self, fa, fb, a_raw, b_raw, alpha):
'''
calculate correspondence matrix and warp the exemplar features
'''
n, c, h, w = fa.shape
_, raw_c, _, _ = a_raw.shape
bmm = ops.BatchMatMul() # GPU
transpose = ops.Transpose() # GPU
# subtract mean
fa = fa - fa.mean(axis=(2, 3), keep_dims=True)
fb = fb - fb.mean(axis=(2, 3), keep_dims=True)
# vectorize (merge dim H, W) and normalize channelwise vectors
fa = fa.reshape(n, c, -1)
fb = fb.reshape(n, c, -1)
fa = fa / self.norm(fa)
fb = fb / self.norm(fb)
# correlation matrix, gonna be huge (4096*4096)
# use matrix multiplication for CUDA speed up
# Also, calculate the transpose of the atob correlation
# warp the exemplar features b, taking softmax along the b dimension
input_perm = (0, 2, 1)
energy_ab_T = bmm(transpose(fb, input_perm), fa) * alpha
corr_ab_T = self.softmax(energy_ab_T) # n*HW*C @ n*C*HW -> n*HW*HW
b_warp = bmm(b_raw.reshape(n, raw_c, h * w), corr_ab_T) # n*HW*1
b_warp = b_warp.reshape(n, raw_c, h, w)
energy_ba_T = bmm(transpose(fa, input_perm), fb) * alpha
corr_ba_T = self.softmax(energy_ba_T) # n*HW*C @ n*C*HW -> n*HW*HW
# corr_ba_T = transpose(corr_ab_T, (0, 2, 1))
a_warp = bmm(a_raw.reshape(n, raw_c, h * w), corr_ba_T) # n*HW*1
a_warp = a_warp.reshape(n, raw_c, h, w)
return corr_ab_T, corr_ba_T, a_warp, b_warp
def construct(self, fa, fb, a_raw, b_raw):
fa = self.fa_conv(fa)
fb = self.fb_conv(fb)
X, Y, a_warp, b_warp = self.warp(fa, fb, a_raw, b_raw, self.softmax_alpha)
return X, Y, a_warp, b_warp
####################################################################
# ------------------------------- SSCFT----------------------------
####################################################################
class Transformer(nn.Cell):
def __init__(self):
super(Transformer, self).__init__()
self.fusion = FeatureFusion()
self.atte = SymmetryAttention()
def construct(self, x_c, y_c, x_s, y_s, x_m, y_m):
x_f = self.fusion(x_c, x_s)
y_f = self.fusion(y_c, y_s)
attention_x, attention_y, x_m_warp, y_m_warp = self.atte.construct(x_f, y_f, x_m, y_m)
return attention_x, attention_y, x_m_warp, y_m_warp
####################################################################
# -------------------------- Decoder --------------------------
####################################################################
class Decoder(nn.Cell):
def __init__(self, output_dim=3, ngf=64):
super(Decoder, self).__init__()
self.SPADE1 = SPADEResnetBlock(ngf * 4, ngf * 4, ngf * 4)
self.SPADE2 = SPADEResnetBlock(ngf * 4, ngf * 2, ngf * 4)
self.SPADE3 = SPADEResnetBlock(ngf * 2, ngf * 1, ngf * 4)
self.img_conv = ConvNormReLU(ngf * 1, output_dim, kernel_size=7, stride=1, padding=3, norm_mode='None',
use_relu=False)
self.tanh = nn.Tanh()
def construct(self, x, y):
# content=x[-1]
# makeup=y
_, c, h, w = x[-1].shape
up_1 = ops.ResizeBilinear(size=(h * 2, w * 2))
up_2 = ops.ResizeBilinear(size=(h * 4, w * 4))
out = self.SPADE1(x[-1], y)
out = up_1(out)
out = self.SPADE2(out, y)
out = up_2(out)
out = self.SPADE3(out, y)
out = self.img_conv(out)
out = self.tanh(out)
return out
####################################################################
# ------------------------------ SPADE -----------------------------
####################################################################
class SPADEResnetBlock(nn.Cell):
def __init__(self, fin, fout, semantic_nc):
super().__init__()
# Attributes
self.learned_shortcut = (fin != fout)
fmiddle = min(fin, fout)
# create conv layers
self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, pad_mode='same')
self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, pad_mode='same')
if self.learned_shortcut:
self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, pad_mode='same', has_bias=False)
# define normalization layers
self.norm_0 = SPADE(fin, semantic_nc)
self.norm_1 = SPADE(fmiddle, semantic_nc)
if self.learned_shortcut:
self.norm_s = SPADE(fin, semantic_nc)
self.actvn = nn.LeakyReLU(0.2)
# note the resnet block with SPADE also takes in |seg|,
# the semantic segmentation map as input
def construct(self, x, seg):
x_s = self.shortcut(x, seg)
dx = self.conv_0(self.actvn(self.norm_0(x, seg)))
dx = self.conv_1(self.actvn(self.norm_1(dx, seg)))
out = x_s + dx
return out
def shortcut(self, x, seg):
if self.learned_shortcut:
x_s = self.conv_s(self.norm_s(x, seg))
else:
x_s = x
return x_s
class SPADE(nn.Cell):
def __init__(self, norm_nc, label_nc):
super().__init__()
ks = 3
self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False)
# The dimension of the intermediate embedding space. Yes, hardcoded.
nhidden = 128
self.mlp_shared = nn.SequentialCell(
nn.Conv2d(label_nc, nhidden, kernel_size=ks, pad_mode='same'),
nn.ReLU()
)
self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, pad_mode='same')
self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, pad_mode='same')
def construct(self, x, segmap):
nearest = ops.ResizeNearestNeighbor((x.shape[2], x.shape[3]))
# Part 1. generate parameter-free normalized activations
normalized = self.param_free_norm(x)
# Part 2. produce scaling and bias conditioned on semantic map
segmap = nearest(segmap)
# segmap = F.interpolate(segmap, size=x.size()[2:], mode='nearest')
actv = self.mlp_shared(segmap)
gamma = self.mlp_gamma(actv)
beta = self.mlp_beta(actv)
# apply scale and bias
out = normalized * (1 + gamma) + beta
return out
####################################################################
# -------------------------- Basic Blocks --------------------------
####################################################################
class ConvNormReLU(nn.Cell):
def __init__(self,
in_planes,
out_planes,
kernel_size=3,
stride=1,
alpha=0.2,
norm_mode='instance',
pad_mode='REFLECT',
use_relu=True,
padding=None,
has_bias=True):
super(ConvNormReLU, self).__init__()
layers = []
if padding is None:
padding = (kernel_size - 1) // 2
if | |
""" pgp.py
this is where the armorable PGP block objects live
"""
import binascii
import collections
try:
import collections.abc as collections_abc
except ImportError:
collections_abc = collections
import contextlib
import copy
import functools
import itertools
import operator
import os
import re
import warnings
import weakref
import six
from datetime import datetime
from cryptography.hazmat.primitives import hashes
from .constants import CompressionAlgorithm
from .constants import Features
from .constants import HashAlgorithm
from .constants import ImageEncoding
from .constants import KeyFlags
from .constants import NotationDataFlags
from .constants import PacketTag
from .constants import PubKeyAlgorithm
from .constants import RevocationKeyClass
from .constants import RevocationReason
from .constants import SignatureType
from .constants import SymmetricKeyAlgorithm
from .decorators import KeyAction
from .errors import PGPDecryptionError
from .errors import PGPError
from .packet import Key
from .packet import MDC
from .packet import Packet
from .packet import Primary
from .packet import Private
from .packet import PubKeyV4
from .packet import PubSubKeyV4
from .packet import PrivKeyV4
from .packet import PrivSubKeyV4
from .packet import Public
from .packet import Sub
from .packet import UserID
from .packet import UserAttribute
from .packet.packets import CompressedData
from .packet.packets import IntegrityProtectedSKEData
from .packet.packets import IntegrityProtectedSKEDataV1
from .packet.packets import LiteralData
from .packet.packets import OnePassSignature
from .packet.packets import OnePassSignatureV3
from .packet.packets import PKESessionKey
from .packet.packets import PKESessionKeyV3
from .packet.packets import Signature
from .packet.packets import SignatureV4
from .packet.packets import SKEData
from .packet.packets import Marker
from .packet.packets import SKESessionKey
from .packet.packets import SKESessionKeyV4
from .packet.types import Opaque
from .types import Armorable
from .types import Fingerprint
from .types import ParentRef
from .types import PGPObject
from .types import SignatureVerification
from .types import SorteDeque
__all__ = ['PGPSignature',
'PGPUID',
'PGPMessage',
'PGPKey',
'PGPKeyring']
class PGPSignature(Armorable, ParentRef, PGPObject):
_reason_for_revocation = collections.namedtuple('ReasonForRevocation', ['code', 'comment'])
@property
def __sig__(self):
return self._signature.signature.__sig__()
@property
def cipherprefs(self):
"""
A ``list`` of preferred symmetric algorithms specified in this signature, if any. Otherwise, an empty ``list``.
"""
if 'PreferredSymmetricAlgorithms' in self._signature.subpackets:
return next(iter(self._signature.subpackets['h_PreferredSymmetricAlgorithms'])).flags
return []
@property
def compprefs(self):
"""
A ``list`` of preferred compression algorithms specified in this signature, if any. Otherwise, an empty ``list``.
"""
if 'PreferredCompressionAlgorithms' in self._signature.subpackets:
return next(iter(self._signature.subpackets['h_PreferredCompressionAlgorithms'])).flags
return []
@property
def created(self):
"""
A :py:obj:`~datetime.datetime` of when this signature was created.
"""
return self._signature.subpackets['h_CreationTime'][-1].created
@property
def embedded(self):
return self.parent is not None
@property
def expires_at(self):
"""
A :py:obj:`~datetime.datetime` of when this signature expires, if a signature expiration date is specified.
Otherwise, ``None``
"""
if 'SignatureExpirationTime' in self._signature.subpackets:
expd = next(iter(self._signature.subpackets['SignatureExpirationTime'])).expires
return self.created + expd
return None
@property
def exportable(self):
"""
``False`` if this signature is marked as being not exportable. Otherwise, ``True``.
"""
if 'ExportableCertification' in self._signature.subpackets:
return bool(next(iter(self._signature.subpackets['ExportableCertification'])))
return True
@property
def features(self):
"""
A ``set`` of implementation features specified in this signature, if any. Otherwise, an empty ``set``.
"""
if 'Features' in self._signature.subpackets:
return next(iter(self._signature.subpackets['Features'])).flags
return set()
@property
def hash2(self):
return self._signature.hash2
@property
def hashprefs(self):
"""
A ``list`` of preferred hash algorithms specified in this signature, if any. Otherwise, an empty ``list``.
"""
if 'PreferredHashAlgorithms' in self._signature.subpackets:
return next(iter(self._signature.subpackets['h_PreferredHashAlgorithms'])).flags
return []
@property
def hash_algorithm(self):
"""
The :py:obj:`~constants.HashAlgorithm` used when computing this signature.
"""
return self._signature.halg
@property
def is_expired(self):
"""
``True`` if the signature has an expiration date, and is expired. Otherwise, ``False``
"""
expires_at = self.expires_at
if expires_at is not None and expires_at != self.created:
return expires_at < datetime.utcnow()
return False
@property
def key_algorithm(self):
"""
The :py:obj:`~constants.PubKeyAlgorithm` of the key that generated this signature.
"""
return self._signature.pubalg
@property
def key_expiration(self):
if 'KeyExpirationTime' in self._signature.subpackets:
return next(iter(self._signature.subpackets['KeyExpirationTime'])).expires
return None
@property
def key_flags(self):
"""
A ``set`` of :py:obj:`~constants.KeyFlags` specified in this signature, if any. Otherwise, an empty ``set``.
"""
if 'KeyFlags' in self._signature.subpackets:
return next(iter(self._signature.subpackets['h_KeyFlags'])).flags
return set()
@property
def keyserver(self):
"""
The preferred key server specified in this signature, if any. Otherwise, an empty ``str``.
"""
if 'PreferredKeyServer' in self._signature.subpackets:
return next(iter(self._signature.subpackets['h_PreferredKeyServer'])).uri
return ''
@property
def keyserverprefs(self):
"""
A ``list`` of :py:obj:`~constants.KeyServerPreferences` in this signature, if any. Otherwise, an empty ``list``.
"""
if 'KeyServerPreferences' in self._signature.subpackets:
return next(iter(self._signature.subpackets['h_KeyServerPreferences'])).flags
return []
@property
def magic(self):
return "SIGNATURE"
@property
def notation(self):
"""
A ``dict`` of notation data in this signature, if any. Otherwise, an empty ``dict``.
"""
return dict((nd.name, nd.value) for nd in self._signature.subpackets['NotationData'])
@property
def policy_uri(self):
"""
The policy URI specified in this signature, if any. Otherwise, an empty ``str``.
"""
if 'Policy' in self._signature.subpackets:
return next(iter(self._signature.subpackets['Policy'])).uri
return ''
@property
def revocable(self):
"""
``False`` if this signature is marked as being not revocable. Otherwise, ``True``.
"""
if 'Revocable' in self._signature.subpackets:
return bool(next(iter(self._signature.subpackets['Revocable'])))
return True
@property
def revocation_key(self):
if 'RevocationKey' in self._signature.subpackets:
raise NotImplementedError()
return None
@property
def revocation_reason(self):
if 'ReasonForRevocation' in self._signature.subpackets:
subpacket = next(iter(self._signature.subpackets['ReasonForRevocation']))
return self._reason_for_revocation(subpacket.code, subpacket.string)
return None
@property
def attested_certifications(self):
"""
Returns a set of all the hashes of attested certifications covered by this Attestation Key Signature.
Unhashed subpackets are ignored.
"""
if self._signature.sigtype != SignatureType.Attestation:
return set()
ret = set()
hlen = self.hash_algorithm.digest_size
for n in self._signature.subpackets['h_AttestedCertifications']:
attestations = bytes(n.attested_certifications)
for i in range(0, len(attestations), hlen):
ret.add(attestations[i:i+hlen])
return ret
@property
def signer(self):
"""
The 16-character Key ID of the key that generated this signature.
"""
return self._signature.signer
@property
def signer_fingerprint(self):
"""
The fingerprint of the key that generated this signature, if it contained. Otherwise, an empty ``str``.
"""
if 'IssuerFingerprint' in self._signature.subpackets:
return next(iter(self._signature.subpackets['IssuerFingerprint'])).issuer_fingerprint
return ''
@property
def intended_recipients(self):
"""
Returns an iterator over all the primary key fingerprints marked as intended encrypted recipients for this signature.
"""
return map(lambda x: x.intended_recipient, self._signature.subpackets['IntendedRecipient'])
@property
def target_signature(self):
return NotImplemented
@property
def type(self):
"""
The :py:obj:`~constants.SignatureType` of this signature.
"""
return self._signature.sigtype
@classmethod
def new(cls, sigtype, pkalg, halg, signer, created=None):
sig = PGPSignature()
if created is None:
created=datetime.utcnow()
sigpkt = SignatureV4()
sigpkt.header.tag = 2
sigpkt.header.version = 4
sigpkt.subpackets.addnew('CreationTime', hashed=True, created=created)
sigpkt.subpackets.addnew('Issuer', _issuer=signer)
sigpkt.sigtype = sigtype
sigpkt.pubalg = pkalg
if halg is not None:
sigpkt.halg = halg
sig._signature = sigpkt
return sig
def __init__(self):
"""
PGPSignature objects represent OpenPGP compliant signatures.
PGPSignature implements the ``__str__`` method, the output of which will be the signature object in
OpenPGP-compliant ASCII-armored format.
PGPSignature implements the ``__bytes__`` method, the output of which will be the signature object in
OpenPGP-compliant binary format.
"""
super(PGPSignature, self).__init__()
self._signature = None
def __bytearray__(self):
return self._signature.__bytearray__()
def __repr__(self):
return "<PGPSignature [{:s}] object at 0x{:02x}>".format(self.type.name, id(self))
def __lt__(self, other):
return self.created < other.created
def __or__(self, other):
if isinstance(other, Signature):
if self._signature is None:
self._signature = other
return self
##TODO: this is not a great way to do this
if other.__class__.__name__ == 'EmbeddedSignature':
self._signature = other
return self
raise TypeError
def __copy__(self):
# because the default shallow copy isn't actually all that useful,
# and deepcopy does too much work
sig = super(PGPSignature, self).__copy__()
# sig = PGPSignature()
# sig.ascii_headers = self.ascii_headers.copy()
sig |= copy.copy(self._signature)
return sig
def attests_to(self, othersig):
'returns True if this signature attests to othersig (acknolwedges it for redistribution)'
if not isinstance(othersig, PGPSignature):
raise TypeError
h = self.hash_algorithm.hasher
h.update(othersig._signature.canonical_bytes())
return h.digest() in self.attested_certifications
def hashdata(self, subject):
_data = bytearray()
if isinstance(subject, six.string_types):
try:
subject = subject.encode('utf-8')
except UnicodeEncodeError:
subject = subject.encode('charmap')
"""
All signatures are formed by producing a hash over the signature
data, and then using the resulting hash in the signature algorithm.
"""
if self.type == SignatureType.BinaryDocument:
"""
For binary document signatures (type 0x00), the document data is
hashed directly.
"""
if isinstance(subject, (SKEData, IntegrityProtectedSKEData)):
_data += subject.__bytearray__()
else:
_data += bytearray(subject)
if self.type == SignatureType.CanonicalDocument:
"""
For text document signatures (type 0x01), the
document is canonicalized by converting line endings to <CR><LF>,
and the resulting data is hashed.
"""
_data += re.subn(br'\r?\n', b'\r\n', subject)[0]
if self.type in {SignatureType.Generic_Cert, SignatureType.Persona_Cert, SignatureType.Casual_Cert,
SignatureType.Positive_Cert, SignatureType.CertRevocation, SignatureType.Subkey_Binding,
SignatureType.PrimaryKey_Binding}:
"""
When a signature is made over a key, the hash data starts with the
octet 0x99, followed by a two-octet length of the key, and then body
of the key packet. (Note that this is an old-style packet header for
a key packet with two-octet length.) ...
Key revocation signatures (types 0x20 and 0x28)
hash only the key being revoked.
"""
_s = b''
if isinstance(subject, PGPUID):
_s = subject._parent.hashdata
elif isinstance(subject, PGPKey) and not subject.is_primary:
_s = subject._parent.hashdata
elif isinstance(subject, PGPKey) and subject.is_primary:
_s = subject.hashdata
if len(_s) > 0:
_data += b'\x99' + self.int_to_bytes(len(_s), 2) + _s
if self.type in {SignatureType.Subkey_Binding, SignatureType.PrimaryKey_Binding}:
"""
A subkey binding signature
(type 0x18) or primary key binding signature (type 0x19) then hashes
the subkey using the same format as the main key | |
<filename>src/view/views/python/explorer/PythonExplorer.py
#!/usr/bin/python
'''
Created on Jan 10, 2019
@author: vijay
'''
from src.view.util.FileOperationsUtil import FileOperations
import wx
# from src.view.table.CreateTable import CreateTableFrame
import logging.config
from src.view.constants import LOG_SETTINGS, ID_COLLAPSE_ALL, ID_LINK_WITH_EDITOR, \
ID_VIEW_MENU, menuItemList, ID_EXPORT, ID_IMPORT, ID_NEW, \
ID_PROJECT_PROPERTIES, ID_CLOSE_PROJECT, ID_DELETE_PROJECT, ID_NEW_FILE, ID_NEW_FOLDER, \
ID_RENAME, ID_REFRESH_TREE_ITEM, ID_PYTHON_RUN, ID_NEW_PYTHON_PROJECT
from src.view.views.file.explorer._filetree import FileTree
from src.view.views.file.MainStcPanel import MainStc
from src.view.other.NewFile import NewFileFrame
import os
import stat
from src.view.util.osutil import GetWindowsDriveType, RemovableDrive, CDROMDrive
from src.view.util.common.eclutil import Freezer
from src.view.views.file.explorer.FileBrowserPanel import FileBrowser, \
FileBrowserMimeManager
import time
from src.view.util.common.fileutil import IsHidden, GetFileName
from src.sqlite_executer.ConnectExecuteSqlite import SQLExecuter
from pubsub import pub
# from src.settings.workspace import Setting
from src.view.views.python.explorer.IconManager import PythonExplorerIconManager
from src.view.views.editor.EditorManager import EditorWindowManager
from src.view.other.new.python.project.NewProject import NewProjectFrame
import shutil
# from src.dao.workspace.WorksapaceDao import WorkspaceDatasource
# from src.settings.workspace import Setting
try:
from agw import aui
from agw.aui import aui_switcherdialog as ASD
except ImportError: # if it's not there locally, try the wxPython lib.
import wx.lib.agw.aui as aui
from wx.lib.agw.aui import aui_switcherdialog as ASD
logging.config.dictConfig(LOG_SETTINGS)
logger = logging.getLogger('extensive')
from src.dao.workspace.WorkspaceDao import getWorkspace, WorkspaceDatasource
class PythonExplorerPanel(wx.Panel):
def __init__(self, parent=None, *args, **kw):
wx.Panel.__init__(self, parent, -1)
self.parent = parent
sizer = wx.BoxSizer(wx.VERTICAL)
workspace = getWorkspace()
# self.setting = Setting()
# self.setting.loadSettings()
# self.title = title
####################################################################
self.fileOperations = FileOperations()
self.topToolbar = self.constructTopToolBar()
self.pythonExplorerTreePanel = PythonExplorerTreePanel(self)
self.linkWithEditor = False
####################################################################
sizer.Add(self.topToolbar, 0, wx.EXPAND)
sizer.Add(self.pythonExplorerTreePanel, 1, wx.EXPAND)
self.SetSizer(sizer)
self.Center()
self.Bind(wx.EVT_CONTEXT_MENU, self.OnContextMenu)
# self.createStatusBar()
# self.Show(True)
def OnContextMenu(self, event):
logger.debug("OnContextMenu\n")
def constructTopToolBar(self):
# create some toolbars
tb1 = aui.AuiToolBar(self, -1, wx.DefaultPosition, (10, 10), agwStyle=aui.AUI_TB_DEFAULT_STYLE | aui.AUI_TB_OVERFLOW)
# tb1.SetToolBitmapSize(wx.Size(16, 16))
# id, name, image, name, method, kind
tools = [
(ID_COLLAPSE_ALL, "Collapse All", "collapseall-small.png", 'Collapse All', self.onCollapseAll, wx.ITEM_NORMAL),
(ID_LINK_WITH_EDITOR, "Link with Editor", "icon_link_with_editor.png", 'Link with Editor', self.onLinkWithEditor, wx.ITEM_CHECK),
(),
(ID_VIEW_MENU, "View Menu", "icon_menu.png", 'View Menu', self.onViewMenu, wx.ITEM_NORMAL),
# (ID_REFRESH_ROW, "Result refresh", "resultset_refresh.png", 'Result refresh \tF5', self.onRefresh),
# (ID_ADD_ROW, "Add a new row", "row_add.png", 'Add a new row', self.onAddRow),
# (ID_DUPLICATE_ROW, "Duplicate selected row", "row_copy.png", 'Duplicate selected row', self.onDuplicateRow),
# (ID_DELETE_ROW, "Delete selected row", "row_delete.png", 'Delete selected row', self.onDeleteRow),
]
for tool in tools:
if len(tool) == 0:
tb1.AddSeparator()
else:
logger.debug(tool)
toolItem = tb1.AddSimpleTool(tool[0], tool[1], self.fileOperations.getImageBitmap(imageName=tool[2]), kind=tool[5], short_help_string=tool[3])
if tool[4]:
self.Bind(wx.EVT_MENU, tool[4], id=tool[0])
tb1.Realize()
return tb1
def onCollapseAll(self, event):
logger.debug('onCollapseAll')
self.pythonExplorerTreePanel.CollapseAll()
def onLinkWithEditor(self, event):
logger.debug('onLinkWithEditor')
self.linkWithEditor = event.IsChecked()
logger.debug(f'{self.linkWithEditor}')
# event.
def onViewMenu(self, event):
logger.debug('onViewMenu')
class PythonExplorerTreePanel(FileTree):
def __init__(self, parent, size=wx.DefaultSize):
self.iconManager = PythonExplorerIconManager()
super().__init__(parent)
# self.setting = Setting()
# self.setting.loadSettings()
self._mw = None
self.isClosing = False
self.syncTimer = wx.Timer(self)
self._cpath = None
self.sqlExecuter = SQLExecuter()
self.menu = None
self.fileOperations = FileOperations()
# Setup
self.SetupImageList()
self.initProjects()
# self.Bind(wx.EVT_CONTEXT_MENU, self._OnMenu)
def initProjects(self):
try:
datasource = WorkspaceDatasource()
workspace = datasource.findActiveWorkspace()
if workspace and workspace.projects:
for project in workspace.projects:
self.AddWatchDirectory(project=project)
else:
# TODO popup for workspace definition
pass
except Exception as e:
logger.error(e)
def onDeleteKeyPress(self, event):
logger.debug(f'onDeleteKeyPress:{self}')
try:
nodes = self.GetSelections()
for node in nodes:
path = self.GetPyData(node)
if os.path.isdir(path):
shutil.rmtree(path)
elif os.path.isfile(path):
os.remove(path)
self.Delete(node)
event.Skip()
except Exception as e:
logger.error(e, exc_info=True)
def onF2KeyPress(self, event):
logger.debug(f'onF2KeyPress:{self}')
try:
nodes = self.GetSelections()
if nodes and nodes[0]:
self.EditLabel(nodes[0])
except Exception as e:
logger.error(e, exc_info=True)
def _OnEndEdit(self, evt):
logger.debug('_OnEndEdit')
if self._editlabels:
item = evt.GetItem()
data = self.GetPyData(item)
basePath, fileOrFolder = os.path.split(data)
os.chdir(basePath)
newlabel = evt.GetLabel()
logger.debug(f'newlabel:{newlabel}')
if newlabel and newlabel != '':
os.rename(fileOrFolder, newlabel)
self.Refresh(eraseBackground=True, rect=None)
evt.Skip()
else:
evt.Veto()
# if self.DoEndEdit(item, newlabel):
# return
def onDelete(self, evt):
logger.debug(f'onDelete:{self}')
item = evt.GetItem()
def AddWatchDirectory(self, project=None):
"""Add a directory to the controls top level view
@param dname: directory path
@return: TreeItem or None
@todo: add additional api for getting already existing nodes based
on path.
"""
logger.debug('AddWatchDirectory')
# assert os.path.exists(dname), "Path(%s) doesn't exist!" % dname
childNode = None
# dname = r"c:\1\sql_editor"
if project.getProjectPath() not in self._watch:
self._watch.append(project.getProjectPath())
childNode = self.AppendFileNode(self.RootItem, project=project)
return childNode
def AppendFileNode(self, item, project=None):
"""Append a child node to the tree
@param item: TreeItem parent node
@param path: path to add to node
@return: new node
"""
logger.debug('AppendFileNode')
img = self.DoGetFileImage(project.getProjectPath())
name=project.name
# name = os.path.basename(project.getProjectPath())
# if not name:
# name = project.getProjectPath()
child = self.AppendItem(item, name, img)
self.SetItemData(child, project.getProjectPath())
if os.path.isdir(project.getProjectPath()):
self.SetItemHasChildren(child, True)
return child
def DoOnActivate(self, active):
"""Handle activation of main window that this
tree belongs too.
@param active: bool
"""
# Suspend background checks when window is not active
if active and self.IsShown():
self.SuspendChecks(False) # Resume
elif not active:
self.SuspendChecks(True) # Suspend
def DoOnDestroy(self):
"""Clean up resources and message handlers"""
self._menu.Clear()
# ed_msg.Unsubscribe(self.OnPageChange)
# ed_msg.Unsubscribe(self.OnPageClosing)
# ed_msg.Unsubscribe(self.OnThemeChanged)
# ed_msg.Unsubscribe(self.OnConfig)
if self.syncTimer.IsRunning():
self.syncTimer.Stop()
def SuspendChecks(self, suspend=True):
"""Suspend/Continue background monitoring"""
self._monitor.Suspend(suspend)
#--- FileTree interface methods ----#
def DoSetupImageList(self):
"""Setup the image list for this control"""
self.iconManager.PopulateImageList(self.ImageList)
# super().DoSetupImageList()
def DoGetFileImage(self, path):
"""Get the image for the given item"""
return self.iconManager.GetImageIndex(path)
def DoGetToolTip(self, item):
"""Get the tooltip to show for an item
@return: string or None
"""
tip = None
# if self.GetItemImage(item) == self.iconManager.IMG_NO_ACCESS:
# tip = _("Access Denied")
# elif item: # Slightly annoying on GTK disable for now
# tip = self.GetPyData(item)
return tip
def DoItemActivated(self, item):
"""Override to handle item activation
@param item: TreeItem
"""
logger.debug('DoItemActivated')
filePathWithIconList = self.GetSelectedFilesWithImage()
self.OpenFiles(self.GetSelectedFilesWithImage())
for filePathWithIcon in filePathWithIconList:
pub.sendMessage('addFileToHistory', path=filePathWithIcon[0])
def DoItemCollapsed(self, item):
"""Handle when an item is collapsed"""
d = self.GetPyData(item)
# if d:
# self._monitor.RemoveDirectory(d)
super().DoItemCollapsed(item)
if d:
self.SetItemImage(item, self.iconManager.GetImageIndex(d, False))
def ShouldDisplayFile(self, path):
"""Check if the given file should be displayed based on configuration
@param path: file path
@return: bool
"""
shouldDisplay = True
dname = GetFileName(path)
if dname.startswith('.') or dname in ['__pycache__' , 'build', 'dist'] or '.egg-info' in dname :
shouldDisplay = False
return shouldDisplay
# def ShouldDisplayFile(self, path):
# """Check if the given file should be displayed based on configuration
# @param path: file path
# @return: bool
#
# """
# showHidden = fbcfg.GetFBOption(fbcfg.FB_SHF_OPT, False)
# if not showHidden and ebmlib.IsHidden(path):
# return False
# name = os.path.basename(path)
# filters = fbcfg.GetFBOption(fbcfg.FB_FILTER_OPT,
# fbcfg.FB_DEFAULT_FILTERS)
# if filter(lambda x: fnmatch.fnmatchcase(name, x), filters):
# return False
# return True
# def FilterFileList(self, paths):
# """Filter a list of files returning only the ones that are valid to
# display in the tree. Optimized for large lists of paths.
# @param paths: list of paths
# @return: filtered list
#
# """
# showHidden = True
# # showHidden = fbcfg.GetFBOption(fbcfg.FB_SHF_OPT, False)
# filters = fbcfg.GetFBOption(fbcfg.FB_FILTER_OPT,
# fbcfg.FB_DEFAULT_FILTERS)
# isHidden = IsHidden
# rval = list()
# rAdd = rval.append
# getBase = os.path.basename
# for path in paths:
# if not showHidden and isHidden(path):
# continue
# name = getBase(path)
# if filter(lambda x: fnmatch.fnmatchcase(name, x), filters):
# continue
# rAdd(path)
# return rval
def DoItemExpanding(self, item):
"""Handle when an item is expanding to display the folder contents
@param item: TreeItem
"""
logger.debug('DoItemExpanding')
cursor = wx.BusyCursor() # can take a few seconds on big directories
d = None
try:
d = self.GetPyData(item)
except wx.PyAssertionError:
logger.debug("[FileBrowser][err] FileBrowser.DoItemExpanding")
return
if d and os.path.exists(d) and os.access(d, os.R_OK) and self.ShouldDisplayFile(d):
contents = self.GetDirContents(d)
if contents and len(contents) > 0:
t1 = time.time()
with Freezer(self) as _tmp:
self.AppendFileNodes(item, contents)
self.SortChildren(item)
logger.info("Tree expand time: %f" % (time.time() - t1))
else:
self.SetItemHasChildren(item, hasChildren=False)
# if not self._monitor.AddDirectory(d):
# self.SetItemImage(item, self.iconManager.IMG_NO_ACCESS)
# return
# Update tree image
if d:
self.SetItemImage(item, self.iconManager.GetImageIndex(d, True))
del cursor
def AppendFileNodes(self, item, paths):
"""Append a list of child node to the tree. This
method can be used instead of looping on AppendFileNode
to get slightly better performance for large sets.
@param item: TreeItem parent node
@param paths: list of file paths
@return: None
"""
logger.debug('AppendFileNodes')
# getBaseName = os.path.basename
# isDir = os.path.os.path.isdir
# getImg = self.DoGetFileImage
# appendNode = self.AppendItem
# setData = self.SetItemData
for path in paths:
try:
img = self.DoGetFileImage(path)
name = os.path.basename(path)
if not name:
name = path
child = self.AppendItem(item, name, img)
self.SetItemData(child, path)
except Exception as e:
logger.error(e, exc_info=True)
if os.path.isdir(path):
self.SetItemHasChildren(child, True)
def GetDirContents(self, directory):
"""Get the list of files contained in the given directory"""
logger.debug('GetDirContents')
assert os.path.isdir(directory)
files = list()
try:
joinPath = os.path.join
fappend = files.append
# fs_encoding = sys.getfilesystemencoding()
for p in os.listdir(directory):
fullpath = joinPath(directory, p)
if self.ShouldDisplayFile(fullpath):
# if type(fullpath) != types:
# fullpath = fullpath.decode(fs_encoding)
fappend(fullpath)
except OSError:
pass
return files
def DoBeginEdit(self, item):
"""Handle when an item is requested to be edited"""
d = None
try:
d = self.GetPyData(item)
except wx.PyAssertionError:
logger.debug("[FileBrowser][err] FileBrowser.DoItemExpanding")
# util.Log("[FileBrowser][err] FileBrowser.DoItemExpanding")
return False
if d and not os.access(d, os.W_OK) or os.path.ismount(d):
return False
return True
def DoEndEdit(self, item, newlabel):
"""Handle after a user has made changes to a label"""
editOk = False
path = self.GetPyData(item)
# TODO: check access rights and validate input
# if path:
# newpath = | |
self.ActiveX.GetFieldData(self.OUTBLOCK1, "time", i).strip()
시가 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "open", i).strip())
고가 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "high", i).strip())
저가 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "low", i).strip())
종가 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "close", i).strip())
거래량 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "jdiff_vol", i).strip())
미결제약정 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "openyak", i).strip())
lst = [날짜,시간,시가,고가,저가,종가,거래량,미결제약정]
result.append(lst)
columns = ['날짜','시간','시가','고가','저가','종가','거래량','미결제약정']
df = DataFrame(data=result, columns=columns)
if self.parent != None:
self.parent.OnReceiveData(szTrCode, [block, df])
# 선물/옵션챠트(N분)(t8415)
class t8415(XAQuery):
# def Query(self, 단축코드='201N7302', 단위='15', 요청건수='2000', 조회영업일수='0', 시작일자='20180629', 시작시간='', 종료일자='20180629', 종료시간='', 연속일자='', 연속시간='', 압축여부='N', 연속조회=False):
def Query(self, 단축코드,단위='1',요청건수='2000',조회영업일수='0',시작일자='',시작시간='',종료일자='',종료시간='',연속일자='',연속시간='',압축여부='N', 연속조회=False):
if 연속조회 == False:
self.ActiveX.LoadFromResFile(self.RESFILE)
self.ActiveX.SetFieldData(self.INBLOCK, "shcode", 0, 단축코드)
self.ActiveX.SetFieldData(self.INBLOCK, "ncnt", 0, 단위)
self.ActiveX.SetFieldData(self.INBLOCK, "qrycnt", 0, 요청건수)
self.ActiveX.SetFieldData(self.INBLOCK, "nday", 0, 조회영업일수)
self.ActiveX.SetFieldData(self.INBLOCK, "sdate", 0, 시작일자)
self.ActiveX.SetFieldData(self.INBLOCK, "stime", 0, 시작시간)
self.ActiveX.SetFieldData(self.INBLOCK, "edate", 0, 종료일자)
self.ActiveX.SetFieldData(self.INBLOCK, "etime", 0, 종료시간)
self.ActiveX.SetFieldData(self.INBLOCK, "cts_date", 0, 연속일자)
self.ActiveX.SetFieldData(self.INBLOCK, "cts_time", 0, 연속시간)
self.ActiveX.SetFieldData(self.INBLOCK, "comp_yn", 0, 압축여부)
self.ActiveX.Request(0)
else:
self.ActiveX.SetFieldData(self.INBLOCK, "cts_date", 0, 연속일자)
self.ActiveX.SetFieldData(self.INBLOCK, "cts_time", 0, 연속시간)
err_code = self.ActiveX.Request(True) # 연속조회인경우만 True
if err_code < 0:
클래스이름 = self.__class__.__name__
함수이름 = inspect.currentframe().f_code.co_name
print("%s-%s " % (클래스이름, 함수이름), "error... {0}".format(err_code))
def OnReceiveData(self, szTrCode):
block = dict()
nCount = self.ActiveX.GetBlockCount(self.OUTBLOCK)
for i in range(nCount):
block['단축코드'] = self.ActiveX.GetFieldData(self.OUTBLOCK, "shcode", i).strip()
block['전일시가'] = float(self.ActiveX.GetFieldData(self.OUTBLOCK, "jisiga", i).strip())
block['전일고가'] = float(self.ActiveX.GetFieldData(self.OUTBLOCK, "jihigh", i).strip())
block['전일저가'] = float(self.ActiveX.GetFieldData(self.OUTBLOCK, "jilow", i).strip())
block['전일종가'] = float(self.ActiveX.GetFieldData(self.OUTBLOCK, "jiclose", i).strip())
block['전일거래량'] = int(self.ActiveX.GetFieldData(self.OUTBLOCK, "jivolume", i).strip())
block['당일시가'] = float(self.ActiveX.GetFieldData(self.OUTBLOCK, "disiga", i).strip())
block['당일고가'] = float(self.ActiveX.GetFieldData(self.OUTBLOCK, "dihigh", i).strip())
block['당일저가'] = float(self.ActiveX.GetFieldData(self.OUTBLOCK, "dilow", i).strip())
block['당일종가'] = float(self.ActiveX.GetFieldData(self.OUTBLOCK, "diclose", i).strip())
block['상한가'] = float(self.ActiveX.GetFieldData(self.OUTBLOCK, "highend", i).strip())
block['하한가'] = float(self.ActiveX.GetFieldData(self.OUTBLOCK, "lowend", i).strip())
block['연속일자'] = self.ActiveX.GetFieldData(self.OUTBLOCK, "cts_date", i).strip()
block['연속시간'] = self.ActiveX.GetFieldData(self.OUTBLOCK, "cts_time", i).strip()
block['장시작시간'] = self.ActiveX.GetFieldData(self.OUTBLOCK, "s_time", i).strip()
block['장종료시간'] = self.ActiveX.GetFieldData(self.OUTBLOCK, "e_time", i).strip()
block['동시호가처리시간'] = self.ActiveX.GetFieldData(self.OUTBLOCK, "dshmin", i).strip()
block['레코드카운트'] = int(self.ActiveX.GetFieldData(self.OUTBLOCK, "rec_count", i).strip())
result = []
nCount = self.ActiveX.GetBlockCount(self.OUTBLOCK1)
for i in range(nCount):
날짜 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "date", i).strip()
시간 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "time", i).strip()
시가 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "open", i).strip())
고가 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "high", i).strip())
저가 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "low", i).strip())
종가 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "close", i).strip())
누적거래량 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "jdiff_vol", i).strip())
거래대금 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "value", i).strip())
미결제약정 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "openyak", i).strip())
lst = [날짜,시간,시가,고가,저가,종가,누적거래량,거래대금,미결제약정]
result.append(lst)
columns = ['날짜','시간','시가','고가','저가','종가','누적거래량','거래대금','미결제약정']
df = DataFrame(data=result, columns=columns)
if self.parent != None:
self.parent.OnReceiveData(szTrCode, [block, df])
##----------------------------------------------------------------------------------------------------------------------
# 해외선물
# 해외선물 체결내역개별 조회
class CIDBQ01400(XAQuery):
def Query(self, 레코드갯수='',조회구분코드='',계좌번호='',종목코드값='',매매구분코드='',해외파생주문가격='',해외선물주문유형코드=''):
self.ActiveX.LoadFromResFile(self.RESFILE)
self.ActiveX.SetFieldData(self.INBLOCK1, "RecCnt", 0, 레코드갯수)
self.ActiveX.SetFieldData(self.INBLOCK1, "QryTpCode", 0, 조회구분코드)
self.ActiveX.SetFieldData(self.INBLOCK1, "AcntNo", 0, 계좌번호)
self.ActiveX.SetFieldData(self.INBLOCK1, "IsuCodeVal", 0, 종목코드값)
self.ActiveX.SetFieldData(self.INBLOCK1, "BnsTpCode", 0, 매매구분코드)
self.ActiveX.SetFieldData(self.INBLOCK1, "OvrsDrvtOrdPrc", 0, 해외파생주문가격)
self.ActiveX.SetFieldData(self.INBLOCK1, "AbrdFutsOrdPtnCode", 0, 해외선물주문유형코드)
self.ActiveX.Request(0)
def OnReceiveData(self, szTrCode):
result = []
nCount = self.ActiveX.GetBlockCount(self.OUTBLOCK1)
for i in range(nCount):
레코드갯수 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "RecCnt", 0).strip())
조회구분코드 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "QryTpCode", 0).strip()
계좌번호 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "AcntNo", 0).strip()
종목코드값 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "IsuCodeVal", 0).strip()
매매구분코드 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "BnsTpCode", 0).strip()
해외파생주문가격 = float(self.ActiveX.GetFieldData(self.OUTBLOCK1, "OvrsDrvtOrdPrc", 0).strip())
해외선물주문유형코드 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "AbrdFutsOrdPtnCode", 0).strip()
lst = [레코드갯수,조회구분코드,계좌번호,종목코드값,매매구분코드,해외파생주문가격,해외선물주문유형코드]
result.append(lst)
columns = ['레코드갯수','조회구분코드','계좌번호','종목코드값','매매구분코드','해외파생주문가격','해외선물주문유형코드']
df = DataFrame(data=result, columns=columns)
result = []
nCount = self.ActiveX.GetBlockCount(self.OUTBLOCK2)
for i in range(nCount):
레코드갯수 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "RecCnt", i).strip()
주문가능수량 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "OrdAbleQty", i).strip()
lst = [레코드갯수,주문가능수량]
result.append(lst)
columns = ['레코드갯수','주문가능수량']
df1 = DataFrame(data=result, columns=columns)
if self.parent != None:
self.parent.OnReceiveData(szTrCode, [df, df1])
# 해외선물 미결제 잔고내역
class CIDBQ01500(XAQuery):
def Query(self, 레코드갯수='',계좌구분코드='',계좌번호='',FCM계좌번호='',비밀번호='',조회일자='',잔고구분코드=''):
self.ActiveX.LoadFromResFile(self.RESFILE)
self.ActiveX.SetFieldData(self.INBLOCK1, "RecCnt", 0, 레코드갯수)
self.ActiveX.SetFieldData(self.INBLOCK1, "AcntTpCode", 0, 계좌구분코드)
self.ActiveX.SetFieldData(self.INBLOCK1, "AcntNo", 0, 계좌번호)
self.ActiveX.SetFieldData(self.INBLOCK1, "FcmAcntNo", 0, FCM계좌번호)
self.ActiveX.SetFieldData(self.INBLOCK1, "Pwd", 0, 비밀번호)
self.ActiveX.SetFieldData(self.INBLOCK1, "QryDt", 0, 조회일자)
self.ActiveX.SetFieldData(self.INBLOCK1, "BalTpCode", 0, 잔고구분코드)
self.ActiveX.Request(0)
def OnReceiveData(self, szTrCode):
result = []
nCount = self.ActiveX.GetBlockCount(self.OUTBLOCK1)
for i in range(nCount):
레코드갯수 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "RecCnt", 0).strip())
계좌구분코드 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "AcntTpCode", 0).strip()
계좌번호 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "AcntNo", 0).strip()
FCM계좌번호 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "FcmAcntNo", 0).strip()
비밀번호 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "Pwd", 0).strip()
조회일자 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "QryDt", 0).strip()
잔고구분코드 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "BalTpCode", 0).strip()
lst = [레코드갯수,계좌구분코드,계좌번호,FCM계좌번호,비밀번호,조회일자,잔고구분코드]
result.append(lst)
columns = ['레코드갯수','계좌구분코드','계좌번호','FCM계좌번호','비밀번호','조회일자','잔고구분코드']
df = DataFrame(data=result, columns=columns)
result = []
nCount = self.ActiveX.GetBlockCount(self.OUTBLOCK2)
for i in range(nCount):
기준일자 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "BaseDt", i).strip()
예수금 = int(self.ActiveX.GetFieldData(self.OUTBLOCK2, "Dps", i).strip())
청산손익금액 = float(self.ActiveX.GetFieldData(self.OUTBLOCK2, "LpnlAmt", i).strip())
선물만기전청산손익금액 = float(self.ActiveX.GetFieldData(self.OUTBLOCK2, "FutsDueBfLpnlAmt", i).strip())
선물만기전수수료 = float(self.ActiveX.GetFieldData(self.OUTBLOCK2, "FutsDueBfCmsn", i).strip())
위탁증거금액 = int(self.ActiveX.GetFieldData(self.OUTBLOCK2, "CsgnMgn", i).strip())
유지증거금 = int(self.ActiveX.GetFieldData(self.OUTBLOCK2, "MaintMgn", i).strip())
신용한도금액 = float(self.ActiveX.GetFieldData(self.OUTBLOCK2, "CtlmtAmt", i).strip())
추가증거금액 = int(self.ActiveX.GetFieldData(self.OUTBLOCK2, "AddMgn", i).strip())
마진콜율 = float(self.ActiveX.GetFieldData(self.OUTBLOCK2, "MgnclRat", i).strip())
주문가능금액 = int(self.ActiveX.GetFieldData(self.OUTBLOCK2, "OrdAbleAmt", i).strip())
인출가능금액 = int(self.ActiveX.GetFieldData(self.OUTBLOCK2, "WthdwAbleAmt", i).strip())
계좌번호 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "AcntNo", i).strip()
종목코드값 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "IsuCodeVal", i).strip()
종목명 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "IsuNm", i).strip()
통화코드값 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "CrcyCodeVal", i).strip()
해외파생상품코드 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "OvrsDrvtPrdtCode", i).strip()
해외파생옵션구분코드 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "OvrsDrvtOptTpCode", i).strip()
만기일자 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "DueDt", i).strip()
해외파생행사가격 = float(self.ActiveX.GetFieldData(self.OUTBLOCK2, "OvrsDrvtXrcPrc", i).strip())
매매구분코드 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "BnsTpCode", i).strip()
공통코드명 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "CmnCodeNm", i).strip()
구분코드명 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "TpCodeNm", i).strip()
잔고수량 = int(self.ActiveX.GetFieldData(self.OUTBLOCK2, "BalQty", i).strip())
매입가격 = float(self.ActiveX.GetFieldData(self.OUTBLOCK2, "PchsPrc", i).strip())
해외파생현재가 = float(self.ActiveX.GetFieldData(self.OUTBLOCK2, "OvrsDrvtNowPrc", i).strip())
해외선물평가손익금액 = float(self.ActiveX.GetFieldData(self.OUTBLOCK2, "AbrdFutsEvalPnlAmt", i).strip())
위탁수수료 = float(self.ActiveX.GetFieldData(self.OUTBLOCK2, "CsgnCmsn", i).strip())
포지션번호 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "PosNo", i).strip()
거래소비용1수수료금액 = float(self.ActiveX.GetFieldData(self.OUTBLOCK2, "EufOneCmsnAmt", i).strip())
거래소비용2수수료금액 = float(self.ActiveX.GetFieldData(self.OUTBLOCK2, "EufTwoCmsnAmt", i).strip())
lst = [
기준일자,예수금,청산손익금액,선물만기전청산손익금액,선물만기전수수료,위탁증거금액,유지증거금,신용한도금액,추가증거금액,마진콜율,주문가능금액,
인출가능금액,계좌번호,종목코드값,종목명,통화코드값,해외파생상품코드,해외파생옵션구분코드,만기일자,해외파생행사가격,매매구분코드,공통코드명,
구분코드명,잔고수량,매입가격,해외파생현재가,해외선물평가손익금액,위탁수수료,포지션번호,거래소비용1수수료금액,거래소비용2수수료금액
]
result.append(lst)
columns = ['기준일자','예수금','청산손익금액','선물만기전청산손익금액','선물만기전수수료','위탁증거금액','유지증거금','신용한도금액','추가증거금액','마진콜율','주문가능금액','인출가능금액','계좌번호','종목코드값','종목명','통화코드값','해외파생상품코드','해외파생옵션구분코드','만기일자','해외파생행사가격','매매구분코드','공통코드명','구분코드명','잔고수량','매입가격','해외파생현재가','해외선물평가손익금액','위탁수수료','포지션번호','거래소비용1수수료금액','거래소비용2수수료금액']
df1 = DataFrame(data=result, columns=columns)
if self.parent != None:
self.parent.OnReceiveData(szTrCode, [df, df1])
# 해외선물 주문체결내역 조회
class CIDBQ01800(XAQuery):
def Query(self, 레코드갯수='',계좌번호='',비밀번호='',종목코드값='',주문일자='',당일구분코드='',주문상태코드='',매매구분코드='',조회구분코드='',주문유형코드='',해외파생선물옵션구분코드=''):
self.ActiveX.LoadFromResFile(self.RESFILE)
self.ActiveX.SetFieldData(self.INBLOCK1, "RecCnt", 0, 레코드갯수)
self.ActiveX.SetFieldData(self.INBLOCK1, "AcntNo", 0, 계좌번호)
self.ActiveX.SetFieldData(self.INBLOCK1, "Pwd", 0, 비밀번호)
self.ActiveX.SetFieldData(self.INBLOCK1, "IsuCodeVal", 0, 종목코드값)
self.ActiveX.SetFieldData(self.INBLOCK1, "OrdDt", 0, 주문일자)
self.ActiveX.SetFieldData(self.INBLOCK1, "ThdayTpCode", 0, 당일구분코드)
self.ActiveX.SetFieldData(self.INBLOCK1, "OrdStatCode", 0, 주문상태코드)
self.ActiveX.SetFieldData(self.INBLOCK1, "BnsTpCode", 0, 매매구분코드)
self.ActiveX.SetFieldData(self.INBLOCK1, "QryTpCode", 0, 조회구분코드)
self.ActiveX.SetFieldData(self.INBLOCK1, "OrdPtnCode", 0, 주문유형코드)
self.ActiveX.SetFieldData(self.INBLOCK1, "OvrsDrvtFnoTpCode", 0, 해외파생선물옵션구분코드)
self.ActiveX.Request(0)
def OnReceiveData(self, szTrCode):
result = []
nCount = self.ActiveX.GetBlockCount(self.OUTBLOCK1)
for i in range(nCount):
레코드갯수 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "RecCnt", 0).strip())
계좌번호 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "AcntNo", 0).strip()
비밀번호 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "Pwd", 0).strip()
종목코드값 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "IsuCodeVal", 0).strip()
주문일자 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "OrdDt", 0).strip()
당일구분코드 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "ThdayTpCode", 0).strip()
주문상태코드 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "OrdStatCode", 0).strip()
매매구분코드 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "BnsTpCode", 0).strip()
조회구분코드 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "QryTpCode", 0).strip()
주문유형코드 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "OrdPtnCode", 0).strip()
해외파생선물옵션구분코드 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "OvrsDrvtFnoTpCode", 0).strip()
lst = [레코드갯수,계좌번호,비밀번호,종목코드값,주문일자,당일구분코드,주문상태코드,매매구분코드,조회구분코드,주문유형코드,해외파생선물옵션구분코드]
result.append(lst)
columns = ['레코드갯수','계좌번호','비밀번호','종목코드값','주문일자','당일구분코드','주문상태코드','매매구분코드','조회구분코드','주문유형코드','해외파생선물옵션구분코드']
df = DataFrame(data=result, columns=columns)
result = []
nCount = self.ActiveX.GetBlockCount(self.OUTBLOCK2)
for i in range(nCount):
해외선물주문번호 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "OvrsFutsOrdNo", i).strip()
해외선물원주문번호 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "OvrsFutsOrgOrdNo", i).strip()
FCM주문번호 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "FcmOrdNo", i).strip()
종목코드값 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "IsuCodeVal", i).strip()
종목명 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "IsuNm", i).strip()
해외선물행사가격 = float(self.ActiveX.GetFieldData(self.OUTBLOCK2, "AbrdFutsXrcPrc", i).strip())
FCM계좌번호 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "FcmAcntNo", i).strip()
매매구분코드 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "BnsTpCode", i).strip()
매매구분명 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "BnsTpNm", i).strip()
선물주문상태코드 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "FutsOrdStatCode", i).strip()
구분코드명 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "TpCodeNm", i).strip()
선물주문구분코드 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "FutsOrdTpCode", i).strip()
거래구분명 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "TrdTpNm", i).strip()
해외선물주문유형코드 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "AbrdFutsOrdPtnCode", i).strip()
주문유형명 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "OrdPtnNm", i).strip()
주문유형기간구분코드 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "OrdPtnTermTpCode", i).strip()
공통코드명 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "CmnCodeNm", i).strip()
적용시작일자 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "AppSrtDt", i).strip()
적용종료일자 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "AppEndDt", i).strip()
해외파생주문가격 = float(self.ActiveX.GetFieldData(self.OUTBLOCK2, "OvrsDrvtOrdPrc", i).strip())
주문수량 = int(self.ActiveX.GetFieldData(self.OUTBLOCK2, "OrdQty", i).strip())
해외선물체결가격 = float(self.ActiveX.GetFieldData(self.OUTBLOCK2, "AbrdFutsExecPrc", i).strip())
체결수량 = int(self.ActiveX.GetFieldData(self.OUTBLOCK2, "ExecQty", i).strip())
주문조건가격 = float(self.ActiveX.GetFieldData(self.OUTBLOCK2, "OrdCndiPrc", i).strip())
해외파생현재가 = float(self.ActiveX.GetFieldData(self.OUTBLOCK2, "OvrsDrvtNowPrc", i).strip())
정정수량 = int(self.ActiveX.GetFieldData(self.OUTBLOCK2, "MdfyQty", i).strip())
취소수량 = int(self.ActiveX.GetFieldData(self.OUTBLOCK2, "CancQty", i).strip())
거부수량 = int(self.ActiveX.GetFieldData(self.OUTBLOCK2, "RjtQty", i).strip())
확인수량 = int(self.ActiveX.GetFieldData(self.OUTBLOCK2, "CnfQty", i).strip())
반대매매여부 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "CvrgYn", i).strip()
등록단말번호 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "RegTmnlNo", i).strip()
등록지점번호 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "RegBrnNo", i).strip()
등록사용자ID = self.ActiveX.GetFieldData(self.OUTBLOCK2, "RegUserId", i).strip()
주문일자 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "OrdDt", i).strip()
주문시각 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "OrdTime", i).strip()
해외옵션행사예약구분코드 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "OvrsOptXrcRsvTpCode", i).strip()
lst = [
해외선물주문번호,해외선물원주문번호,FCM주문번호,종목코드값,종목명,해외선물행사가격,FCM계좌번호,매매구분코드,매매구분명,선물주문상태코드,
구분코드명,선물주문구분코드,거래구분명,해외선물주문유형코드,주문유형명,주문유형기간구분코드,공통코드명,적용시작일자,적용종료일자,해외파생주문가격,
주문수량,해외선물체결가격,체결수량,주문조건가격,해외파생현재가,정정수량,취소수량,거부수량,확인수량,반대매매여부,등록단말번호,등록지점번호,등록사용자ID,
주문일자,주문시각,해외옵션행사예약구분코드
]
result.append(lst)
columns = ['해외선물주문번호','해외선물원주문번호','FCM주문번호','종목코드값','종목명','해외선물행사가격','FCM계좌번호','매매구분코드','매매구분명','선물주문상태코드','구분코드명','선물주문구분코드','거래구분명','해외선물주문유형코드','주문유형명','주문유형기간구분코드','공통코드명','적용시작일자','적용종료일자','해외파생주문가격','주문수량','해외선물체결가격','체결수량','주문조건가격','해외파생현재가','정정수량','취소수량','거부수량','확인수량','반대매매여부','등록단말번호','등록지점번호','등록사용자ID','주문일자','주문시각','해외옵션행사예약구분코드']
df1 = DataFrame(data=result, columns=columns)
if self.parent != None:
self.parent.OnReceiveData(szTrCode, [df, df1])
# 해외선물 주문체결내역 상세 조회
class CIDBQ02400(XAQuery):
def Query(self, 레코드갯수='',계좌번호='',비밀번호='',종목코드값='',조회시작일자='',조회종료일자='',당일구분코드='',주문상태코드='',매매구분코드='',조회구분코드='',주문유형코드='',해외파생선물옵션구분코드=''):
self.ActiveX.LoadFromResFile(self.RESFILE)
self.ActiveX.SetFieldData(self.INBLOCK1, "RecCnt", 0, 레코드갯수)
self.ActiveX.SetFieldData(self.INBLOCK1, "AcntNo", 0, 계좌번호)
self.ActiveX.SetFieldData(self.INBLOCK1, "Pwd", 0, 비밀번호)
self.ActiveX.SetFieldData(self.INBLOCK1, "IsuCodeVal", 0, 종목코드값)
self.ActiveX.SetFieldData(self.INBLOCK1, "QrySrtDt", 0, 조회시작일자)
self.ActiveX.SetFieldData(self.INBLOCK1, "QryEndDt", 0, 조회종료일자)
self.ActiveX.SetFieldData(self.INBLOCK1, "ThdayTpCode", 0, 당일구분코드)
self.ActiveX.SetFieldData(self.INBLOCK1, "OrdStatCode", 0, 주문상태코드)
self.ActiveX.SetFieldData(self.INBLOCK1, "BnsTpCode", 0, 매매구분코드)
self.ActiveX.SetFieldData(self.INBLOCK1, "QryTpCode", 0, 조회구분코드)
self.ActiveX.SetFieldData(self.INBLOCK1, "OrdPtnCode", 0, 주문유형코드)
self.ActiveX.SetFieldData(self.INBLOCK1, "OvrsDrvtFnoTpCode", 0, 해외파생선물옵션구분코드)
self.ActiveX.Request(0)
def OnReceiveData(self, szTrCode):
result = []
nCount = self.ActiveX.GetBlockCount(self.OUTBLOCK1)
for i in range(nCount):
레코드갯수 = int(self.ActiveX.GetFieldData(self.OUTBLOCK1, "RecCnt", 0).strip())
계좌번호 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "AcntNo", 0).strip()
비밀번호 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "Pwd", 0).strip()
종목코드값 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "IsuCodeVal", 0).strip()
조회시작일자 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "QrySrtDt", 0).strip()
조회종료일자 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "QryEndDt", 0).strip()
당일구분코드 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "ThdayTpCode", 0).strip()
주문상태코드 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "OrdStatCode", 0).strip()
매매구분코드 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "BnsTpCode", 0).strip()
조회구분코드 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "QryTpCode", 0).strip()
주문유형코드 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "OrdPtnCode", 0).strip()
해외파생선물옵션구분코드 = self.ActiveX.GetFieldData(self.OUTBLOCK1, "OvrsDrvtFnoTpCode", 0).strip()
lst = [레코드갯수,계좌번호,비밀번호,종목코드값,조회시작일자,조회종료일자,당일구분코드,주문상태코드,매매구분코드,조회구분코드,주문유형코드,해외파생선물옵션구분코드]
result.append(lst)
columns = ['레코드갯수','계좌번호','비밀번호','종목코드값','조회시작일자','조회종료일자','당일구분코드','주문상태코드','매매구분코드','조회구분코드','주문유형코드','해외파생선물옵션구분코드']
df = DataFrame(data=result, columns=columns)
result = []
nCount = self.ActiveX.GetBlockCount(self.OUTBLOCK2)
for i in range(nCount):
주문일자 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "OrdDt", i).strip()
해외선물주문번호 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "OvrsFutsOrdNo", i).strip()
해외선물원주문번호 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "OvrsFutsOrgOrdNo", i).strip()
FCM주문번호 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "FcmOrdNo", i).strip()
해외선물체결번호 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "OvrsFutsExecNo", i).strip()
FCM계좌번호 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "FcmAcntNo", i).strip()
종목코드값 = self.ActiveX.GetFieldData(self.OUTBLOCK2, "IsuCodeVal", i).strip()
| |
<gh_stars>1-10
#!/usr/bin/env python3
"""
=======================================
= Twitter cleaner =
= https://twitter.com/telepathics =
=======================================
"""
import json
import random
import time
from datetime import datetime
import gspread
import pytz
from dateutil.relativedelta import relativedelta
from googleapiclient.discovery import build
from httplib2 import Http
from oauth2client import client, file, tools
from oauth2client.service_account import ServiceAccountCredentials
from twitter import OAuth, Twitter
from g import GSPREAD_SCOPES, ROW_OFFSET, SCOPES, SHEET_NAME, SPREADSHEET_ID
from picker import Picker
from t import (ACCESS_TOKEN_KEY, ACCESS_TOKEN_SECRET, AUTH_SCREEN_NAME,
CONSUMER_KEY, CONSUMER_SECRET, DM_MSG, MIN_FAVS)
STARTC = '\033[90m'
ENDC = '\033[0m'
TimeZoneInfo = pytz.UTC
SHEET_LINK = 'https://docs.google.com/spreadsheets/d/'+SPREADSHEET_ID+'/edit#gid=0'
CANCEL_OPTIONS = ('no', 'n', 'exit', 'e', 'quit', 'q')
TIME_FORMATTING = "%a %b %d %H:%M:%S %z %Y"
"""
=============================================
= Public Helper Functions =
=============================================
"""
def sleep_overlay(prev_text='', sleepy=random.randrange(1, 8)):
"""----------- Sleeps for a random amount of time between 1 and 8 seconds. -----------"""
_x = sleepy
for _ in range(sleepy+1):
print('\r0{0} {1}'.format(_x, prev_text).ljust(
30)+'\r', end='', flush=True)
_x -= 1
time.sleep(1)
return True
def display_error(e, location):
"""----------- Displays a pretty error message. -----------"""
print("\n-----------\n")
print("ERROR in " + location + ":")
print(STARTC)
print(e)
print("\n" + SHEET_LINK + ENDC)
print("\n-----------\n")
def max_request_limit_warning(sleepy):
"""----------- Displays a warning message if the rate limit is reached. -----------"""
return sleep_overlay(STARTC + 'MAX_REQUEST Limit reached. Please wait...' + ENDC, sleepy)
def g_auth():
"""----------- Basic Google Authentication -----------"""
store = file.Storage('token.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('credentials.json', SCOPES)
creds = tools.run_flow(flow, store)
service = build('sheets', 'v4', http=creds.authorize(Http()))
gcreds = ServiceAccountCredentials.from_json_keyfile_name(
'service_credentials.json', GSPREAD_SCOPES)
gclient = gspread.authorize(gcreds)
sheet = gclient.open(SHEET_NAME)
return service, sheet
def get_old_tweets(years_ago):
"""----------- Get old tweets from tweet.json -----------"""
past_time = datetime.now() - relativedelta(years=years_ago)
old_tweets = []
with open('tweet.json', 'r') as tweets:
tweetlist = json.load(tweets)
for tweet in tweetlist:
tweet = tweet["tweet"]
try:
created_at = datetime.strptime(
tweet["created_at"], TIME_FORMATTING)
old_enough = created_at.replace(
tzinfo=TimeZoneInfo) < past_time.replace(tzinfo=TimeZoneInfo)
if old_enough:
old_tweets.append(tweet)
except Exception as e:
display_error(e, 'AccountHandler.get_old_tweets')
continue
return old_tweets, created_at
def check_recent_mentions(values):
"""double-check IFTTT applet is running (if there have been mentions in the last 7 days)"""
cont = True
recent_mentions = False
last_week = (datetime.now() - relativedelta(days=6)
).replace(tzinfo=TimeZoneInfo)
for datecol in values:
udatetime = datetime.strptime(
datecol[0], "%m/%d/%Y").replace(tzinfo=TimeZoneInfo)
if udatetime > last_week:
recent_mentions = True
if not recent_mentions:
print('Please double-check that IFTTT is running the applet.')
answ = input('Continue? (Y/N): ')
if answ.lower() in CANCEL_OPTIONS:
cont = False
return cont
class AccountHandler:
"""
=============================================
= Account Handler Class =
=============================================
"""
def __init__(self):
self.t = Twitter(auth=OAuth(ACCESS_TOKEN_KEY,
ACCESS_TOKEN_SECRET, CONSUMER_KEY, CONSUMER_SECRET))
self.feed = []
self.friends = []
self.resources = self.t.application.rate_limit_status()['resources']
def check_rate_limit(self):
"""----------- Checks the rate limit for the current account. -----------"""
t = self.t
resources = self.resources
reset_time = self.resources['application']['/application/rate_limit_status']['reset']
curr_epoch = int(time.time())
if reset_time < curr_epoch:
resources = t.application.rate_limit_status()['resources']
self.resources = resources
return resources
def update_t_rate_limit(self, resource, path):
"""----------- Updates the rate limit for the current account. -----------"""
rate_limit = self.check_rate_limit()[resource][path]
warning_complete = False
while rate_limit['remaining'] <= 1 and not warning_complete:
warning_complete = max_request_limit_warning(
int(time.time()) - rate_limit['reset'])
return warning_complete
def get_twitter_friends(self, cursor):
"""----------- Gets the current account's Twitter followers. -----------"""
t = self.t
friends = t.friends.list(
count=200, skip_status=True, include_user_entities=False, cursor=cursor)
self.update_t_rate_limit('friends', '/friends/list')
self.friends = friends
return friends
def get_twitter_lists(self):
"""----------- Gets the current account's Twitter lists. -----------"""
t = self.t
owned_lists = t.lists.ownerships(
count=25, screen_name=AUTH_SCREEN_NAME)["lists"]
self.update_t_rate_limit('lists', '/lists/ownerships')
return owned_lists
def get_twitter_list_members(self, list_id):
"""----------- Gets the current account's Twitter list members. -----------"""
t = self.t
screen_names = t.lists.members(
list_id=list_id, count=5000, include_entities=False, skip_status=True)
self.update_t_rate_limit('lists', '/lists/members')
return screen_names
def send_direct_message(self, uscreen_name):
"""----------- Send DM -----------"""
t = self.t
sent = t.direct_messages.events.new(
_json={
"event": {
"type": "message_create",
"message_create": {
"target": {
"recipient_id": t.users.show(screen_name=uscreen_name)["id"]},
"message_data": {
"text": DM_MSG}
}
}
}
)
self.update_t_rate_limit(
'direct_messages', '/direct_messages/sent_and_received')
return sent
def delete_archived_tweets(self):
"""----------- Delete tweets older than 2 years -----------"""
t = self.t
old_tweets, created_at = get_old_tweets(2)
for tweet in old_tweets:
if int(tweet['favorite_count']) < MIN_FAVS:
try:
tweet_id = tweet['id_str']
t.statuses.destroy(_id=tweet_id)
print(tweet['full_text'])
print('DELETED ' + tweet_id +
' (' + created_at.strftime(TIME_FORMATTING) + ')')
print('* ' + tweet['favorite_count'] + ' favorites\n')
# TODO figure out more accurate rate limit
sleep_overlay(
STARTC + 'Looking for next archived tweet...' + ENDC)
except Exception as e:
display_error(e, 'AccountHandler.delete_archived_tweets')
continue
return True
def delete_tweets_without_interactions(self):
"""----------- Delete tweets without interactions -----------"""
t = self.t
old_tweets, created_at = get_old_tweets(0)
for tweet in old_tweets:
try:
# check if there are interactions
interactions = int(tweet["favorite_count"]) + \
int(tweet["retweet_count"])
if interactions == 0:
t.statuses.destroy(_id=tweet["id_str"])
print(tweet['full_text'] +
' (' + str(interactions) + ' interactions) ')
print('DELETED ' + tweet['id_str'] + ' (' +
created_at.strftime(TIME_FORMATTING) + ')\n')
# TODO figure out more accurate rate limit
sleep_overlay(STARTC + 'Looking for next tweet...' + ENDC)
except Exception as e:
display_error(
e, 'AccountHandler.delete_tweets_without_interactions')
continue
return True
def unfollow_twitter_user(self, uscreen_name):
"""----------- Unfollow users on Twitter -----------"""
t = self.t
user = t.users.show(screen_name=uscreen_name)
self.update_t_rate_limit('users', '/users/show/:id')
if not user["protected"] and user["following"]:
t.friendships.destroy(screen_name=uscreen_name)
# TODO figure out more accurate rate limit
sleep_overlay(STARTC + 'unfollowing ' + uscreen_name + ENDC)
try:
lists = self.get_twitter_lists()
for li in lists:
t.lists.members.destroy(
list_id=li["id"], screen_name=uscreen_name)
except Exception as e:
display_error(e, 'AccountHandler.unfollow_inactive_users')
return True
if user["following"]:
no_unfollow_msg = ''
no_unfollow_msg += 'Didn\'t unfollow https://twitter.com/' + uscreen_name
if user["protected"]:
no_unfollow_msg += STARTC + ' [user is protected]' + ENDC
print(no_unfollow_msg)
return False
def add_users_to_list(self, screen_names, list_id, list_slug):
"""----------- Add Users to Twitter List -----------"""
t = self.t
# can only add ~100 users to a list at a time.
chunks = [screen_names[x:x+100]
for x in range(0, len(screen_names), 100)]
for chunk in chunks:
t.lists.members.create_all(
list_id=list_id,
slug=list_slug,
owner_screen_name=AUTH_SCREEN_NAME,
screen_name=chunk
)
# TODO figure out more accurate rate limit
sleep_overlay(STARTC + 'Loading...' + ENDC)
print("Added the following users to the list '" + list_slug + "'.")
print(chunk)
return True
class ExemptHandler:
"""
# ============================================
# = Exempt Handler Class =
# ============================================
"""
def __init__(self, reset):
self.rate_limit = {
# sheets api: 100 requests/100 seconds/1 user
"limit": 100,
"remaining": 100,
"reset": reset
}
self.service, self.sheet = g_auth()
self.allowlist = self.get_category_users('allowlist')
self.categories = ['MENTIONS', 'LISTED']
def reset_g_rate_limit(self):
"""----------- Reset Rate Limit -----------"""
rate_limit = {
"limit": 100,
"remaining": 100,
"reset": time.time()
}
self.rate_limit = rate_limit
return rate_limit
def update_g_rate_limit(self, minus_remaining=1):
"""----------- Update Rate Limit -----------"""
rate_limit = self.rate_limit
time_left = 100 + rate_limit['reset'] - time.time()
if rate_limit['remaining'] <= 1 and time_left <= 0:
max_request_limit_warning(100)
self.reset_g_rate_limit()
else:
self.rate_limit['remaining'] -= minus_remaining
def get_category_users(self, category, col='A'):
"""----------- Get screen_names from specific category -----------"""
service = self.service
range_name = category.upper() + '!' + col + '2:' + col
values = self.get_result_values(service, range_name)
cat_users = [value[0] for value in values if value != []]
if not values:
return False
else:
return cat_users
def get_cell_value(self, category, cell_range):
"""----------- Get single cell value -----------"""
service = self.service
range_name = category.upper()+'!'+str(cell_range)
values = self.get_result_values(service, range_name)
if not values:
return False
else:
return values[0][0]
def get_next_cursor(self):
"""----------- Get next Twitter API cursor ------------"""
return self.get_cell_value('cursor', 'A2')
def get_cleanup_cursor(self):
"""----------- Get cleanup_cursor -----------"""
return self.get_cell_value('cursor', 'A3')
def get_duplicate_cursor(self):
"""----------- Get duplicate_cursor -----------"""
return self.get_cell_value('cursor', 'A4')
def overwrite_cell(self, value, category, cell_range):
"""----------- Overwrite cell in spreadsheet -----------"""
service = self.service
resource = {"values": [[value]]}
cat_range = category.upper()+"!"+cell_range
# delete old cursor
service.spreadsheets().values().clear(
spreadsheetId=SPREADSHEET_ID,
range=cat_range,
).execute()
self.update_g_rate_limit()
# overwrite
service.spreadsheets().values().append(
spreadsheetId=SPREADSHEET_ID,
range=cat_range,
body=resource,
valueInputOption="USER_ENTERED"
).execute()
self.update_g_rate_limit()
def overwrite_next_cursor(self, next_cursor):
"""----------- Replace next twitter API cursor -----------"""
return self.overwrite_cell(next_cursor, 'cursor', 'A2')
def overwrite_cleanup_cursor(self, uscreen_name):
"""----------- Replace next allowlist cleanup cursor -----------"""
return self.overwrite_cell(uscreen_name, 'cursor', 'A3')
def overwrite_duplicate_cursor(self, uscreen_name):
"""----------- Replace next mention cursor -----------"""
return self.overwrite_cell(uscreen_name, 'cursor', 'A4')
def add_users_to_category(self, category, screen_names):
"""----------- Add User to Category Spreadsheet -----------"""
service = self.service
resource = {"values": screen_names}
cat_range = category.upper() + "!A:A"
service.spreadsheets().values().append(
spreadsheetId=SPREADSHEET_ID,
range=cat_range,
body=resource,
valueInputOption="USER_ENTERED"
).execute()
self.update_g_rate_limit()
def remove_user_from_category(self, category, uscreen_name):
"""----------- Remove User from Category Spreadsheet -----------"""
removed = False
category_users = self.get_category_users(category)
if category_users and uscreen_name in category_users:
rows_to_remove = category_users.count(uscreen_name)
for _ in range(rows_to_remove):
category_users = self.get_category_users(category)
row_index = category_users.index(uscreen_name)
self.remove_row_from_category_spreadsheet(
category, row_index + ROW_OFFSET)
removed = True
if removed:
print(STARTC + "Removed " + uscreen_name +
" from " + category + ENDC)
return True
def add_error_to_user_in_categories(self, uscreen_name):
"""----------- Add ERROR to user in all categories -----------"""
for category in self.categories:
cat_users = self.get_category_users(category)
if uscreen_name in cat_users:
self.overwrite_cell('error', category, ('D' if category.lower(
) == 'mentions' else 'B') + str(cat_users.index(uscreen_name) + ROW_OFFSET))
def remove_row_from_category_spreadsheet(self, | |
(72)
iciuuh (63) -> gslii, fqaefy, hwuwj
xifaq (38)
oqdukh (56)
hasyyr (24)
khzbxke (174) -> ftkflbo, tracdgp
zoryb (73) -> eusnn, exqkey
ebnqn (9)
pncxkcd (156) -> izpxjp, myckhlw
qqmlvk (96)
qpxgye (93)
qybit (79)
stbgj (10)
smsoi (53)
zlpxr (136) -> iuauic, dznlyl
wfwbq (42)
frcqooy (69)
zxrmy (70)
mxvaxl (60)
wwzli (16)
jpnhog (8)
pablb (107) -> muopf, oekhu, thzqscd
mxgyt (51)
xkzlyvh (80)
fkrog (46)
qcntm (13) -> lvcsn, kpqwup, gosgwz, moiqr
wzybp (73)
urkya (70)
lqbnqnl (91)
sycbr (76) -> mutwmaa, qmpncds, bfroq, khsdxn
alzvh (2766) -> gtpyffj, fzgto, uewchd
bwaye (245) -> rveuz, rcumpgx
wfdkce (50)
xmbujw (94) -> jguufio, shyxzeh, pjxauhw, ytirlv
jrkbay (75)
knrtys (63)
ubjuhxp (51)
urbkrn (416) -> seblnmz, kgfhvte
urfhlf (9)
wyohotq (145) -> vlbsr, oqdukh, ssyfvrj
lurhq (1328) -> cibzqvo, sycbr, ruabtb
aqaems (26)
ysakcag (82)
hydeeqe (97) -> jutgp, wxudgdd, bqtxl
ecanv (17) -> dxloi, whaax
bmyahis (55) -> qpxgye, ajkztpj, ywkpvle
qlxgnl (7)
tdwxgv (22)
qdyzunw (182) -> stbgj, loljfo
ftxif (57)
xminmbj (69)
cvwsj (86) -> stdfrj, fvjcbib, ixroc, ksqrqx
vfxqcbq (84) -> vlgiuef, dojviv
cpmpyvq (19)
frikrv (36) -> yngqiv, wsoahhs, pncxkcd, gpqhjv, zlpxr, fdumfc, kwavcf
ikqijv (429) -> jcqhl, nwxyrtn, vjxldb, mmtzk, gnffg
wuipwl (18)
latmzh (92)
uyrxc (60)
pyoma (1498) -> oicysoz, qsbcfm, eepdcje, fhdqcbx, zzcagx, dvipr
sttus (85)
essvr (61)
pibqmuz (19) -> kgvbcq, wuegrv
xfurlrr (14)
cmgvs (77)
qkluh (73) -> weunegg, ziiijv
nqylf (225) -> bvenb, njozyy, amxjb, stujlz
hfymm (14) -> uakcbox, kbmse, qvyrm
mernsee (51)
nnyago (76) -> mxvaxl, uwrfay
brlzr (15)
jpwwg (65)
muopf (40)
oxzmr (360) -> ydowp, nnnkplx, grlofut
xpvukx (86)
erbssqe (66)
irhslmm (33)
yiteow (83)
kthnd (77) -> zbmwego, pvliyn, svsjobu, bzfew
uzzeydh (53)
stdfrj (98)
fblafmm (11)
qpjzx (27)
gjqwe (313)
uizop (48)
tzvld (393)
iqler (88) -> vnelda, ujpgmwm
xljycl (41)
rdomgf (10) -> sggimr, jegoty, owcwvug, tcqnso
fnzskbi (42)
vkleczw (93) -> uxzwwe, cupeghk
tdhxy (95)
iiiof (38)
qvyrm (68)
eogzpga (44)
naxce (126) -> inija, cvvegrx
hxfvc (18) -> lqbnqnl, qguphyk
vwosnfn (293) -> umnkslz, rlgbvvj
khuess (49)
akdffhk (78)
kriwb (19) -> ziayhmx, zouujb, kcqgcgl
bmqhrtg (11)
dbuqsj (92)
aftzw (81)
nrvsqf (5)
emhxtcg (534) -> uktpcar, yjoyyvl, blwhz, bvbtxh, jkoyyzg, cdnwq
kcqgcgl (52)
fzgto (67) -> qcntm, tzvld, yohxzv, ttmnf, hbtur
ymxwlpc (92) -> ngjngv, jcdmhvd, nqylf, ecytwe, iciuuh
alkizae (61)
emkzxk (257) -> krfqcf, sybpjci
sejkvk (203) -> kojry, tdwxgv
yunpioi (33)
qmnji (89)
dpyxxkv (162) -> dtkdwp, rehst
vsfgg (12529) -> mtrde, xpzpv, etwpt, dvncmy
ttmnf (239) -> qrzjlw, gwudp
qdkrr (10)
okpdcz (51)
zsigmj (69) -> eazvkxv, ticdma
oaopxo (2220) -> jeopf, wphtnvm
uxnwtp (82)
kodrhj (90)
tflwng (93)
jzpdbe (29)
spgwj (172)
wmukud (34)
geztlsi (92)
kpqwup (95)
jcbgta (23)
ksfss (18)
zuquy (75)
qozmho (146) -> btrepl, gljkci, parvx, onwgja
vlsjsgg (80)
bdwbjzh (38)
lvdff (191) -> wqokqz, zyaab
mvgmbru (98)
krolgpf (27)
ixreefh (108) -> auqyep, jgnklq
ivlac (48)
nhteaei (70)
pwhmf (204) -> iujytuv, iieico
dpoat (79)
ubvheu (72)
ygiyze (75)
atqewc (16)
sawpop (92) -> piyqfs, wfamtnm
uwylbft (93)
yqnso (27)
ccjtpv (158) -> oipqc, vxlypal
hkweu (66)
gfasdd (11)
jsuak (24)
vugavk (38)
duzgl (7)
odxyfb (686) -> biglt, irjov, pibqmuz
umxurvd (54)
qgcvk (881) -> oizmgt, apqby, ecanv
jphmpr (91)
cvrqij (84)
blwhz (23) -> cihzh, sttus
guiej (70)
wjaocmd (82)
ruvfo (50) -> dlhyzjw, skdylgb
ksqrqx (98)
pwhplm (61)
nfhaii (6)
wfamtnm (58)
qbzeji (317)
okxucaz (54)
nzvpgv (290) -> kwmao, ofgeu
jlwzgg (61)
bsqilyq (82)
gosgwz (95)
syxkp (79)
epydl (42)
fhhrji (186) -> faudqy, bxcdldt
evkrnfo (38)
mulybo (75)
xvdkt (44)
qdpnnex (120) -> nwyulno, uxbjym
nnxqmb (297)
urqox (57)
dphmg (6)
wymlvix (64)
yfnlhlh (57)
oykqvif (52)
wexhnbh (196)
gzctkm (40)
vcwezm (82)
vjxldb (169)
ufrrrzi (31)
gatjls (152) -> gkxebo, xmogef
zujrs (72)
qidmgsc (301) -> hpgwq, szvty
oboju (147) -> cprbzvp, xbytmo
wdcltyl (32)
xsmcfs (95)
mnfep (63)
ajxsrs (296) -> vwslb, wmukud
vksnzty (97)
ccnpjdf (9)
cciwr (22)
inknun (149) -> mfutay, nzdbs
ckzuyha (85)
nkdxy (225) -> ccqldu, fkrog
sfohocq (13) -> fkjof, gcrbrn
aefqoq (84)
pknqf (246)
xujmi (68)
olofh (164) -> uqtiwgu, xsmcfs
fqaefy (62)
thneux (88) -> choeijs, lodcj
swujbg (36)
zfmas (70)
dojviv (72)
stujlz (6)
shyxzeh (61)
bzublv (64)
jubtls (68)
aideelj (53)
jieijuo (9)
iqpcu (94)
xiwnu (12)
grrxl (84)
sftqzaf (43) -> qybit, dpoat
bqoptf (74)
alaug (75)
sawkpue (271) -> modms, ssrqm, ndgzf, sqvxf
bnjwfrz (63)
xklrgp (149) -> yfrmup, nfhaii, uspmxw
jbarvch (275) -> qtqcv, prvldv
pmgngjy (74)
qitnlrh (70)
sbfiv (44)
bijprk (8)
sktewq (253) -> fxfznc, venvbai, rxgry
gcdzhfy (70)
zyzet (19)
zffpxgw (217) -> hpcmne, ajiubh
ajiubh (37)
ywmif (47)
egzpjym (202) -> hasyyr, vgjgz
jopcvyb (448) -> tgwgno, uyhlbf, rdomgf, yrozpm, qnscqms, ubhlb
jkoyyzg (79) -> urqox, yfnlhlh
auciw (18)
prlhyb (7)
jtqijee (171) -> auciw, awqgdfk, uceyfx, awjwff
dxloi (90)
gxpuofv (65) -> cxptwu, zrwnma, mxgyt
moiqr (95)
rfkcylj (16) -> ckypedd, xjufehm, nbkzvz
rkwkwd (97) -> dfzwcc, pmgngjy, ejnqcb
fkjlcz (177) -> mjrog, qmnji
cider (27)
cfiyqh (6)
ywkpvle (93)
nbkzvz (61)
ndszsz (71)
kwzgwt (83)
rgxwwc (27)
prvldv (6)
iotkxfd (8)
mfutay (50)
vgkltqq (16) -> wzxyc, uodhr, xsjbc
dehjjod (82)
yyyub (74)
dfwpczh (16) -> bkjppqe, yuhmnjs
zejjjo (109) -> aoipta, bbbms
zrwnma (51)
bwiqe (98) -> dtburx, bpgdb, fhhrji, hfymm, tgqwgb, gxpuofv
ktcdxoo (973) -> efobdsd, jtmps, zmjqiv
tvinpl (220) -> iglmzsi, zujrs
dkkkntv (78)
qwlze (163) -> nvtyg, hzywspw
bzbvwgh (7) -> kbwpk, czuwoe, tpekc, qidmgsc, hagrq
brghw (48)
kojry (22)
cstmu (8193) -> mkrjamh, vycgaoz, ruvfo, sawpop
kjmse (87)
umutl (36) -> grtrt, tjaqrw
olrdlw (67) -> fxqng, irvolb
emrbwzz (7)
iyxsjhc (80)
ueasp (94)
jdhewio (390) -> qpden, olspso
szszezd (909) -> tvcxl, mdnqs, waxkga, gyjvtjm, ytbyct
egdrn (74) -> fdzqz, urkya, nhteaei, xjypx
eoaoxqm (753) -> khzbxke, pwhmf, kapxuq
uyhlbf (350)
qkmkwgl (68) -> tyywgp, xjhvuc
hbwec (56)
mfbnys (40)
jegoty (85)
ukaflr (99)
fmwvok (1044) -> cksiby, uravet, wexhnbh, nnyago, thneux
rjayavy (7)
xybiuiq (53) -> egrimlf, cdvhmy, hqwkq, wfwbq
xlanm (10383) -> bwmrksn, fimqsn, naxce, zjtjt
tumwln (348) -> qgvqg, sawkpue, tcvgl, zqnjd
onamlmy (195) -> kzxyyd, yiqnfd
gljkci (15)
vjevs (35)
vuzxr (82)
ccuehkp (93)
jtmps (56) -> wixoky, vmsmdqx, lxbsif
xvwio (72)
thzqscd (40)
yuiicxp (56)
xrkqv (137) -> xgihtsx, iunkpe
yacmhtk (19)
mcjlm (575) -> qkluh, bsfgu, ndhitxf, jtqijee, ifgfg, wvhvw
bmfhjo (170) -> bznst, xcgwl
wrwgm (11)
kwxpnrs (132) -> rmnkgss, cciwr
mmndzs (75)
dkjlges (80)
mpnnffx (142) -> wayftw, epaaasn
xsfefyb (674) -> xybiuiq, jnzqvia, eywfmfs
zukio (75)
npjcdl (68)
mikast (17)
rbauxvx (6)
ipxns (75)
frezf (95)
rxflxr (82)
ejzsbli (237) -> djsss, rkhyewd
ullvx (79)
cxetjr (1657) -> fujwb, nnxqmb, kthnd
vmpmig (15)
kgqgct (13)
fupwuk (49)
mctsts (17)
pozpmyh (19)
dfrcge (27)
ywhjz (482) -> exesam, odbyxq, xghhu, xmbujw
ytirlv (61)
eajmafa (81)
dxavw (16)
rswqvx (249)
pwfdn (40)
cayigxg (24)
doppnr (201)
rehst (42)
rxezd (318) -> sohuv, ksfss
hfhswyd (85) -> tqyldbt, tridq
aoipta (56)
tpmhs (61)
zkvopb (23) -> nxqzuy, jpwwg
aceqrlu (81)
utispep (65)
zmtfcll (70)
sfruur (62) -> twbxgv, evows, xwwzp, vsfgg, zsasjr
utsfqn (41)
tdknh (69)
fwocscd (158) -> krolgpf, gbmfzzu
zouujb (52)
mvdzfo (47)
wfjks (83) -> ekleund, kikntw, jubtls, npjcdl
bvbtxh (136) -> zyzet, pflvdx, ontrln
obzomb (23) -> ullvx, acfmyv, mzywzs
khupkt (63041) -> alzvh, sjhwgq, lyxzfqz
zhrsp (52)
vxruziw (439) -> tpvzavf, xrkqv, qvndh, xklrgp, cwixiq
svcafxb (423) -> jbarvch, mawwtx, stkcndh
dhejgd (82) -> tzhoqw, cvrqij, xokmont
erfldtn (82)
predg (84)
waxkga (169) -> rgxwwc, yriibfp
kvpsfgk (17)
tlmxygb (70) -> rvyqbq, olofh, egdrn, pkudedc, rxezd, csnhv, jrgougl
nzhnurq (114) -> zwxwfin, lwvcp
fzvdchs (59)
rsqihar (23)
vtvsd (114) -> cnsbonb, ajhizdz
zykcrir (60)
ockzlpp (43) -> chxsxbr, vbvug
fdumfc (134) -> lninq, ykowao
nqjuj (19)
akzcjd (63)
qhrxvzj (52)
kpdmptf (48)
gtpew (23) -> iyxsjhc, fwvvidu, mdshsxt, yzvqo
stkcndh (183) -> cohps, legrbb
nbrfpbr (872) -> iedrlkp, rtyegs, hxfvc
nxqzuy (65)
ojrjqv (85)
egcjfjo (41)
mlfxnna (51)
nhbbd (62)
ykxnd (88)
bwfqr (89)
wuvmf (5)
kmbrqv (13)
uwrhuw (88)
oaqvt (50)
wqzic (42)
yxsaf (80)
cvvegrx (25)
faudqy (16)
qeiijk (188) -> jiaho, qlxgnl
moqped (94)
zmjqiv (94) -> jwjrj, bwfqr
jdxfsa (1869) -> lugwl, ckcjr, umutl
wsmmlrl (70)
hjtkb (9)
wdjcb (16)
jlgnsu (65) -> wxvuup, ilvblzm, ckzuyha, cbbtbz
pjsbxvk (770) -> dfwpczh, gsmocx, fwocscd
iiwwr (284) -> hmnvh, vksnzty
sysvory (145) -> udpcyt, yrdhq, ealyedu, kmufvuk
wvdoa (95)
dcqinx (36)
rugnh (52)
uspmxw (6)
ajhizdz (64)
onkhx (65)
gbmfzzu (27)
zjtjt (14) -> xqshwdz, khjag
pzlhzky (48)
vonesxw (19)
aiwvi (242) -> walkm, ixreefh, gatjls, spgwj, kjdjdr, ybnehoi
pflvdx (19)
rvyqbq (214) -> bxqjg, sxzpils
bdmah (57)
tgqwgb (78) -> zxrmy, qvmbysw
dbnoug (31) -> xnvdqbe, mtowovz, bzbncmc
hclea (96)
sggimr (85)
weithhz (56)
mzywzs (79)
vfykuuv (87)
zwxxii (91)
etlce (14)
ixomyeb (24)
xqshwdz (81)
exesam (300) -> fhjzpjw, qxjknv
djimr (1615) -> ltgnnr, kjmse
cadasac (35)
pyvwl (78)
hdikur (11)
bkfav (57)
nrduy (96)
uqpprye (82)
kmrmxw (29)
xjhvuc (57)
aflcmu (407) -> ezycquw, kaecmob
utamsb (75) -> llrrzrn, erbssqe
bavaf (18)
umhjck (87) -> zepjndx, bdmah
ngjngv (123) -> jqnnot, bnjwfrz
kcjwb (61)
cibzqvo (178) -> dfrcge, uuixxr
gjgwki (78)
rgelh (66) -> tmigcw, jmzvi, tjslk, blpjnzf, qzobjfo, rcmyfr, bwaye
uceyfx (18)
epaaasn (17)
biglt (73) -> rihlj, odoxr
bsfgu (215) -> iinxvw, sgpiiir
hatmr (61)
txtdw (8)
fdaqst (25)
dxavaum (18)
ticdma (66)
plkmpm (78) -> bieok, grrxl
ywzai (152) -> khuess, vbauj
mdshsxt (80)
weguko (91) -> iuwwa, epydl
kaecmob (10)
psmztc (11)
qzayss (31)
tbdhh (104) -> sfwaimh, orihc, jsuak
wxvuup (85)
mhpxyvm (734) -> xdkya, zsigmj, umhjck
oqqme (17)
judfhye (25) -> brlzr, dxffr
qsbcfm (37) -> fqayb, xminmbj
bvknwgq (61)
hwuwj (62)
zgoinf (345) -> xljycl, ghfcfj
vbvxs (31) -> akzcjd, vnynj, eanoucd, txxfz
mqlgoq (86)
csmul (75) -> wfdkce, oaqvt
fzzlnwi (63) -> vgkltqq, vbvxs, lfwru
ywegjjk (6058) -> rgelh, sfizdf, mcjlm, ztjquwi
gyjvtjm (179) -> paynlu, nfklqi
ecfrqpl (133) -> okniwp, njqemt
xcgwl (20)
nsxrvpg (313) -> nqtowev, fblafmm
iwiaa (188) -> ztfhz, txtdw, jpnhog
nxpozme (16)
dxpxept (36)
tbaai (25) -> vrqbi, jjdmu
imxro (58)
onwgja (15)
gnqwwbt (75)
nbjpcc (88)
ontrln (19)
jxpixqi (14)
llrrzrn (66)
hdmcpq (131) -> qitnlrh, wsmmlrl
vlgiuef (72)
bieok (84)
hpcmne (37)
qxjknv (19)
qguphyk (91)
aukjf (179) -> okxucaz, otbxl
vggstn (23)
vlmzodk (66)
bqtxl (84)
vilskw (181) -> vahnoue, wqzic
wzxyc (89)
upsmxqn (470) -> bmyahis, xanbjlw, twazofk
fwohcn (30)
zknpmnc (53)
afelswv (52)
kapxuq (82) -> wzybp, tdstv
awgjkiv (71)
vaeeqvi (17)
ydqgvnf (110) -> flejt, dxpxept
cdcvlb (27)
uqtiwgu (95)
jaerxmv (38)
mxtxm (90) -> tdknh, pjbwq
wvtac (60)
ziprwty (71) -> pshwly, jwojmds, tgsicj
rhjxbx (9) -> mhdkdt, kwzgwt
pqlwekx (87)
rpxmv (35)
yenln (12) -> wklmops, ulehitr, dbnoug, wyohotq, gjqwe
skdylgb (79)
brnhg (94)
qhldpg (74)
srsenpj (84)
whaax (90)
etjste (205)
ntcddy (8) -> qekpce, gjgwki, dkkkntv
ljfvbjd (57)
irjov (61) -> bzdgxs, zkbeehu
bvenb (6)
tlkhxww (91)
icnav (41)
myckhlw (64)
iglmzsi (72)
oztasey (158) -> dxddn, foibc
oakbrw (11)
hawtu (54)
zkbeehu (78)
hzzxjxa (47)
felrwte (64)
ogyiesm (5127) -> eoaoxqm, lptmne, titze
ojqqw (52) -> yxsaf, zfshwt
odbyxq (338)
gkxebo (10)
bvcghhs (29)
mvamo (49)
wemjk (64)
olspso (7)
uasqon (97)
liwlcz (728) -> | |
<gh_stars>1-10
# ====================
# Imports
# ====================
# Standard
import asyncio
import math
import time
from datetime import datetime, timedelta
from typing import Any, Dict, Optional
# Community
from discord import Member, Role
from discord.enums import HypeSquadHouse
from discord.errors import Forbidden
from Classes.errors import MemberNotFoundError
from Classes.extra_functions import ts_print as print
class Officer:
def __init__(self, user_id, bot):
self.bot = bot
self.member = bot.get_guild(bot.settings["Server_ID"]).get_member(user_id)
if self.member == None:
raise MemberNotFoundError()
self._on_duty_start_time = None
self.is_on_duty = False
self.squad = None
def go_on_duty(self):
# Print an error if the user is going on duty even though he is already on duty
if self.is_on_duty is True:
print(
"WARNING: A user is going on duty even though he is already on duty..."
)
return
# Start counting the officers time
self._on_duty_start_time = time.time()
self.is_on_duty = True
print(
f"{self.discord_name} is going on duty in {self.member.voice.channel.name}"
)
self.squad = self.member.voice.channel
def update_squad(self):
# Print an error if the user is going on duty even though he is already on duty
if not self.is_on_duty:
print("WARNING: Tried to update squad for a user not on duty...")
return
print(f"{self.discord_name} is moving to {self.member.voice.channel.name}")
self.squad = self.member.voice.channel
async def go_off_duty(self):
print(f"{self.discord_name} is going off duty")
# Print an error if the user is going off duty even though he is already off duty
if self.is_on_duty is False:
print("WARNING: A user is going off duty even though he isn't on duty...")
return
# Calculate the on duty time and store it
await self.log_time(self._on_duty_start_time, time.time())
# Set the variables
self._on_duty_start_time = None
self.is_on_duty = False
self.squad = None
async def remove(self, reason=None):
# Remove itself
display_name = self.member.display_name
await self.bot.officer_manager.remove_officer(
self.id, reason=reason, display_name=display_name
)
async def process_loa(self, message):
try:
date_range = message.content.split(":")[0]
date_a = date_range.split("-")[0]
date_b = date_range.split("-")[1]
date_start = ["", "", ""]
date_end = ["", "", ""]
date_start[0] = date_a.split("/")[0].strip()
date_start[1] = date_a.split("/")[1].strip()
date_start[2] = date_a.split("/")[2].strip()
date_end[0] = date_b.split("/")[0].strip()
date_end[1] = date_b.split("/")[1].strip()
date_end[2] = date_b.split("/")[2].strip()
reason = message.content.split(":")[1].strip()
months = {
"JAN": 1,
"FEB": 2,
"MAR": 3,
"APR": 4,
"MAY": 5,
"JUN": 6,
"JUL": 7,
"AUG": 8,
"SEP": 9,
"OCT": 10,
"NOV": 11,
"DEC": 12,
}
# Ensure day is numeric
int(date_start[0])
int(date_end[0])
# Ensure year is numeric
int(date_start[2])
int(date_end[2])
# Get month number from dictionary
date_start[1] = date_start[1].upper()[0:3]
date_start[1] = months[date_start[1]]
date_end[1] = date_end[1].upper()[0:3]
date_end[1] = months[date_end[1]]
except (TypeError, ValueError, KeyError, IndexError):
# If all of that failed, let the user know with an autodeleting message
await message.channel.send(
message.author.mention
+ " Please use correct formatting: 21/July/2020 - 21/August/2020: Reason.",
delete_after=10,
)
await message.delete()
return
date_start = [int(i) for i in date_start]
date_end = [int(i) for i in date_end]
if (
date_start[1] < 1
or date_start[1] > 12
or date_end[1] < 1
or date_end[1] > 12
):
# If the month isn't 1-12, let the user know they dumb
await message.channel.send(
message.author.mention + " There are only 12 months in a year.",
delete_after=10,
)
await message.delete()
return
# Convert our separate data into a usable datetime
date_start_complex = (
str(date_start[0]) + "/" + str(date_start[1]) + "/" + str(date_start[2])
)
date_end_complex = (
str(date_end[0]) + "/" + str(date_end[1]) + "/" + str(date_end[2])
)
try:
date_start = datetime.strptime(date_start_complex, "%d/%m/%Y")
date_end = datetime.strptime(date_end_complex, "%d/%m/%Y")
except (ValueError, TypeError):
await message.channel.send(
message.author.mention
+ " There was a problem with your day. Please use a valid day number.",
delete_after=10,
)
await message.delete()
return
if date_end > date_start + timedelta(
weeks=+12
) or date_end < date_start + timedelta(weeks=+3):
# If more than 12 week LOA, inform user
await message.channel.send(
message.author.mention
+ " Leaves of Absence are limited to 3-12 weeks. For longer times, please contact a White Shirt (Lieutenant or Above).",
delete_after=10,
)
await message.delete()
return
# Make sure the LOA isn't over yet
if date_end < datetime.utcnow():
await message.channel.send(
f"{message.author.mention} The leave of absence you supplied has already expired.",
delete_after=10,
)
await message.delete()
return
# Fire the script to save the entry
request_id = message.id
old_messages = await self.bot.sql.request(
"SELECT request_id FROM LeaveTimes WHERE officer_id = %s", self.id
)
if len(old_messages) == 0:
pass
else:
ctx = await self.bot.get_context(message)
for old_msg_id in old_messages[0]:
old_msg = await ctx.fetch_message(old_msg_id)
await old_msg.delete()
await self.save_loa(date_start, date_end, reason, request_id)
await message.add_reaction("\N{WHITE HEAVY CHECK MARK}")
async def save_loa(self, date_start, date_end, reason, request_id):
"""
Pass all 5 required fields to save_loa()
If record with matching officer_id is found,
record will be updated with new dates and reason.
"""
# Delete any existing entries
await self.bot.sql.request(
"DELETE FROM LeaveTimes WHERE officer_id = %s", self.id
)
# Save the new entry
await self.bot.sql.request(
"REPLACE INTO `LeaveTimes` (`officer_id`,`date_start`,`date_end`,`reason`,`request_id`) VALUES (%s, %s, %s, %s, %s)",
(self.id, date_start, date_end, reason, request_id),
)
async def promote(self, rank=None):
"""Try to promote this officer, and return their rank afterwards"""
return await self._prodemote(promote=True, rank=rank)
async def demote(self, rank=None):
"""Try to demote this officer, and return their rank afterwards"""
return await self._prodemote(demote=True, rank=rank)
async def _prodemote(self, promote=False, demote=False, rank=None):
"""Used internally to promote/demote this officer. Don't call this directly."""
old_rank = self.rank
if rank:
new_rank = rank
elif promote:
higher_ranks = [
x
for x in self.bot.officer_manager.all_lpd_ranks
if x.position > old_rank.position
]
if higher_ranks == []:
raise IndexError("Highest rank available is already applied")
return
new_rank = min(higher_ranks, key=lambda r: r.position)
elif demote:
lower_ranks = [
x
for x in self.bot.officer_manager.all_lpd_ranks
if x.position < old_rank.position
]
if lower_ranks == []:
raise IndexError("Lowest rank available is already applied")
return
new_rank = max(lower_ranks, key=lambda r: r.position)
else:
raise ValueError(
"Must specify promote=True, demote=True, or rank=<Discord.role object>"
)
return
if type(new_rank) != Role:
raise TypeError(f"Expected type Discord.role, got {type(new_rank)} instead")
return
try:
await self.member.add_roles(new_rank)
except Forbidden as e:
if promote:
raise IndexError(
"I do not have permission to promote this officer any further"
)
return old_rank
try:
await self.member.remove_roles(old_rank)
except Forbidden as e:
await self.member.remove_roles(new_rank)
raise IndexError("I do not have permission to demote this officer")
return old_rank
return new_rank
# ====================
# properties
# ====================
# External functions
@property
def is_white_shirt(self):
return self._has_role(*self._get_roles_with_tag("is_white_shirt"))
@property
def is_admin(self):
return self._has_role(*self._get_roles_with_tag("is_admin"))
@property
def is_recruiter(self):
return self._has_role(self.bot.settings["recruiter_role"])
@property
def is_chat_moderator(self):
return self._has_role(self.bot.settings["chat_moderator_role"])
@property
def is_moderator(self):
return self._has_role(self.bot.settings["moderator_role"])
@property
def is_trainer(self):
return self._has_role(self.bot.settings["trainer_role"])
@property
def is_slrt_trainer(self):
return self._has_role(self.bot.settings["slrt_trainer_role"])
@property
def is_slrt_trained(self):
return self._has_role(self.bot.settings["slrt_trained_role"])
@property
def is_event_host(self):
return self._has_role(self.bot.settings["event_host_role"])
@property
def is_lmt_trained(self):
return self._has_role(self.bot.settings["lmt_trained_role"])
@property
def is_lmt_trainer(self):
return self._has_role(self.bot.settings["lmt_trainer_role"])
@property
def is_dev_member(self):
return self._has_role(self.bot.settings["dev_team_role"])
@property
def is_detainable(self):
return self._has_role(*self._get_roles_with_tag("is_detainable"))
@property
def is_team_lead(self):
return self._has_role(self.bot.settings["team_lead_role"])
@property
def is_programming_team(self):
return self._has_role(self.bot.settings["programming_team_role"])
# Often used member functions
@property
def discord_name(self):
return f"{self.member.name}#{self.member.discriminator}"
@property
def mention(self):
return self.member.mention
@property
def display_name(self):
return self.member.display_name
@property
def id(self):
return self.member.id
@property
def rank(self):
intersection = list(
set(self.member.roles) & set(self.bot.officer_manager.all_lpd_ranks)
)
return max(intersection, key=lambda item: item.position)
# Internal functions
def _has_role(self, *role_ids):
for role in self.member.roles:
if role.id in role_ids:
return True
return False
def _get_roles_with_tag(self, role_tag):
return tuple(
x["id"]
for x in self.bot.settings["role_ladder"]
if role_tag in x and x[role_tag] == True
)
# ====================
# On Duty Activity
# ====================
# External functions
async def log_time(self, start_time, end_time):
string_start_time = datetime.fromtimestamp(math.floor(start_time)).strftime(
self.bot.settings["db_time_format"]
)
string_end_time = datetime.fromtimestamp(math.floor(end_time)).strftime(
self.bot.settings["db_time_format"]
)
print(
"DEBUG Time logged for "
+ self.discord_name
+ ": "
+ string_start_time
+ " - "
+ string_end_time
+ " Seconds: "
+ str(math.floor(end_time - start_time))
)
await self.bot.sql.request(
"INSERT INTO TimeLog(officer_id, start_time, end_time) VALUES (%s, %s, %s)",
(self.id, string_start_time, string_end_time),
)
async def get_time(self, from_datetime_object, to_datetime_object):
# Convert the datetime objects into strings the database can understand
from_db_time = from_datetime_object.strftime(
self.bot.settings["db_time_format"]
)
to_db_time = to_datetime_object.strftime(self.bot.settings["db_time_format"])
# Execute the query to get the time information
result = await self.bot.sql.request(
"""
SELECT SUM(TIMESTAMPDIFF(SECOND, start_time, end_time)) AS 'Time'
FROM TimeLog
WHERE
officer_id = %s AND
(start_time > %s AND start_time < %s)
""",
(str(self.id), from_db_time, to_db_time),
)
# Make sure the function will return a number even though the user has never gone on duty
if result == None:
return 0
else:
return result[0][0]
async def get_full_time(self, from_datetime_object, to_datetime_object):
# Execute the query to get the time information
result = await self.bot.sql.request(
"""
SELECT start_time, end_time, TIMESTAMPDIFF(SECOND, start_time, end_time) AS | |
[PlotNums._get_square_row_cols(nSubplots, fix=True) for nSubplots in nSubplots_list]
>>> print(repr(np.array(rc_list).T))
array([[1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3],
[1, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4]])
"""
if nSubplots == 0:
return 0, 0
if inclusive:
rounder = np.ceil
else:
rounder = np.floor
if fix:
# This function is very broken, but it might have dependencies
# this is the correct version
nCols = int(rounder(np.sqrt(nSubplots)))
nRows = int(rounder(nSubplots / nCols))
return nRows, nCols
else:
# This is the clamped num cols version
# probably used in ibeis.viz
if max_cols is None:
max_cols = 5
if nSubplots in [4]:
max_cols = 2
if nSubplots in [5, 6, 7]:
max_cols = 3
if nSubplots in [8]:
max_cols = 4
nCols = int(min(nSubplots, max_cols))
#nCols = int(min(rounder(np.sqrt(nrids)), 5))
nRows = int(rounder(nSubplots / nCols))
return nRows, nCols
def draw_border(ax, color, lw=2, offset=None, adjust=True):
'draws rectangle border around a subplot'
if adjust:
xy, width, height = _get_axis_xy_width_height(ax, -.7, -.2, 1, .4)
else:
xy, width, height = _get_axis_xy_width_height(ax)
if offset is not None:
xoff, yoff = offset
xy = [xoff, yoff]
height = - height - yoff
width = width - xoff
import matplotlib as mpl
rect = mpl.patches.Rectangle(xy, width, height, lw=lw)
rect = ax.add_patch(rect)
rect.set_clip_on(False)
rect.set_fill(False)
rect.set_edgecolor(color)
return rect
def draw_boxes(boxes, box_format='xywh', color='blue', labels=None,
textkw=None, ax=None):
"""
Args:
boxes (list): list of coordindates in xywh, tlbr, or cxywh format
box_format (str): specify how boxes are formated
xywh is the top left x and y pixel width and height
cxywh is the center xy pixel width and height
tlbr is the top left xy and the bottom right xy
color (str): edge color of the boxes
labels (list): if specified, plots a text annotation on each box
Example:
>>> from netharn.util.mplutil import *
>>> autompl()
>>> bboxes = [[.1, .1, .6, .3], [.3, .5, .5, .6]]
>>> col = draw_boxes(bboxes)
"""
import matplotlib as mpl
from matplotlib import pyplot as plt
if ax is None:
ax = plt.gca()
from netharn import util
if isinstance(boxes, util.Boxes):
box_format = boxes.format
boxes = boxes.data
if not len(boxes):
return
boxes = np.asarray(boxes)
if box_format == 'xywh':
xywh = boxes
elif box_format == 'cxywh':
cx, cy, w, h = boxes.T[0:4]
x1 = cx - (w / 2)
y1 = cy - (h / 2)
xywh = np.vstack([x1, y1, w, h]).T
elif box_format == 'tlbr':
x1, y1 = boxes.T[0:2]
w, h = boxes.T[2:4] - boxes.T[0:2]
xywh = np.vstack([x1, y1, w, h]).T
else:
raise KeyError(box_format)
edgecolor = Color(color).as01('rgba')
facecolor = Color((0, 0, 0, 0)).as01('rgba')
rectkw = dict(ec=edgecolor, fc=facecolor, lw=2, linestyle='solid')
patches = [mpl.patches.Rectangle((x, y), w, h, **rectkw)
for x, y, w, h in xywh]
col = mpl.collections.PatchCollection(patches, match_original=True)
ax.add_collection(col)
if labels:
texts = []
default_textkw = {
'horizontalalignment': 'left',
'verticalalignment': 'top',
'backgroundcolor': (0, 0, 0, .3),
'color': 'white',
'fontproperties': mpl.font_manager.FontProperties(
size=6, family='monospace'),
}
tkw = default_textkw.copy()
if textkw is not None:
tkw.update(textkw)
for (x1, y1, w, h), label in zip(xywh, labels):
texts.append((x1, y1, label, tkw))
for (x1, y1, catname, tkw) in texts:
ax.text(x1, y1, catname, **tkw)
return col
def draw_line_segments(pts1, pts2, ax=None, **kwargs):
"""
draws `N` line segments between `N` pairs of points
Args:
pts1 (ndarray): Nx2
pts2 (ndarray): Nx2
ax (None): (default = None)
**kwargs: lw, alpha, colors
CommandLine:
python -m netharn.util.mplutil draw_line_segments --show
Example:
>>> pts1 = np.array([(.1, .8), (.6, .8)])
>>> pts2 = np.array([(.6, .7), (.4, .1)])
>>> figure(fnum=None)
>>> draw_line_segments(pts1, pts2)
>>> # xdoc: +REQUIRES(--show)
>>> import matplotlib.pyplot as plt
>>> ax = plt.gca()
>>> ax.set_xlim(0, 1)
>>> ax.set_ylim(0, 1)
>>> show_if_requested()
"""
import matplotlib.pyplot as plt
import matplotlib as mpl
if ax is None:
ax = plt.gca()
assert len(pts1) == len(pts2), 'unaligned'
segments = [(xy1, xy2) for xy1, xy2 in zip(pts1, pts2)]
linewidth = kwargs.pop('lw', kwargs.pop('linewidth', 1.0))
alpha = kwargs.pop('alpha', 1.0)
if 'color' in kwargs:
kwargs['colors'] = kwargs['color']
# mpl.colors.ColorConverter().to_rgb(kwargs['color'])
line_group = mpl.collections.LineCollection(segments, linewidths=linewidth,
alpha=alpha, **kwargs)
ax.add_collection(line_group)
def make_heatmask(probs, cmap='plasma', with_alpha=True):
"""
Colorizes a single-channel intensity mask (with an alpha channel)
"""
import matplotlib as mpl
from netharn.util import imutil
assert len(probs.shape) == 2
cmap_ = mpl.cm.get_cmap(cmap)
probs = imutil.ensure_float01(probs)
heatmask = cmap_(probs)
if with_alpha:
heatmask[:, :, 0:3] = heatmask[:, :, 0:3][:, :, ::-1]
heatmask[:, :, 3] = probs
return heatmask
def colorbar_image(domain, cmap='plasma', dpi=96, shape=(200, 20), transparent=False):
"""
Notes:
shape is approximate
Ignore:
domain = np.linspace(-30, 200)
cmap='plasma'
dpi = 80
dsize = (20, 200)
util.imwrite('foo.png', util.colorbar_image(np.arange(0, 1)), shape=(400, 80))
import plottool as pt
pt.qtensure()
import matplotlib as mpl
mpl.style.use('ggplot')
util.imwrite('foo.png', util.colorbar_image(np.linspace(0, 1, 100), dpi=200, shape=(1000, 40), transparent=1))
ub.startfile('foo.png')
"""
import matplotlib as mpl
mpl.use('agg', force=False, warn=False)
from matplotlib import pyplot as plt
fig = plt.figure(dpi=dpi)
w, h = shape[1] / dpi, shape[0] / dpi
# w, h = 1, 10
fig.set_size_inches(w, h)
ax = fig.add_subplot('111')
sm = plt.cm.ScalarMappable(cmap=plt.get_cmap(cmap))
sm.set_array(domain)
plt.colorbar(sm, cax=ax)
cb_img = render_figure_to_image(fig, dpi=dpi, transparent=transparent)
plt.close(fig)
return cb_img
class Color(ub.NiceRepr):
"""
move to colorutil?
Example:
>>> from netharn.util.mplutil import *
>>> print(Color('g'))
>>> print(Color('orangered'))
>>> print(Color('#AAAAAA').as255())
>>> print(Color([0, 255, 0]))
>>> print(Color([1, 1, 1.]))
>>> print(Color([1, 1, 1]))
>>> print(Color(Color([1, 1, 1])).as255())
>>> print(Color(Color([1., 0, 1, 0])).ashex())
>>> print(Color([1, 1, 1], alpha=255))
>>> print(Color([1, 1, 1], alpha=255, space='lab'))
"""
def __init__(self, color, alpha=None, space=None):
if isinstance(color, Color):
assert alpha is None
assert space is None
space = color.space
color = color.color01
else:
color = self._ensure_color01(color)
if alpha is not None:
alpha = self._ensure_color01([alpha])[0]
if space is None:
space = 'rgb'
# always normalize the color down to 01
color01 = list(color)
if alpha is not None:
if len(color01) not in [1, 3]:
raise ValueError('alpha already in color')
color01 = color01 + [alpha]
# correct space if alpha is given
if len(color01) in [2, 4]:
if not space.endswith('a'):
space += 'a'
self.color01 = color01
self.space = space
def __nice__(self):
colorpart = ', '.join(['{:.2f}'.format(c) for c in self.color01])
return self.space + ': ' + colorpart
def ashex(self, space=None):
c255 = self.as255(space)
return '#' + ''.join(['{:02x}'.format(c) for c in c255])
def as255(self, space=None):
color = (np.array(self.as01(space)) * 255).astype(np.uint8)
return tuple(map(int, color))
def as01(self, space=None):
"""
self = mplutil.Color('red')
mplutil.Color('green').as01('rgba')
"""
color = tuple(self.color01)
if space is not None:
if space == self.space:
pass
elif space == 'rgba' and self.space == 'rgb':
color = color + (1,)
elif space == 'bgr' and self.space == 'rgb':
color = color[::-1]
elif space == 'rgb' and self.space == 'bgr':
color = color[::-1]
else:
assert False
return tuple(map(float, color))
@classmethod
def _is_base01(channels):
""" check if a color is in base 01 """
def _test_base01(channels):
tests01 = {
'is_float': all([isinstance(c, (float, np.float64)) for c in channels]),
'is_01': all([c >= 0.0 and c <= 1.0 for c in channels]),
}
return tests01
if isinstance(channels, six.string_types):
return False
return all(_test_base01(channels).values())
@classmethod
def _is_base255(Color, channels):
""" there is a one corner case where all pixels are 1 or less """
if (all(c > 0.0 and c <= 255.0 for c in channels) and any(c > 1.0 for c in channels)):
# Definately in 255 space
return True
else:
# might be in 01 or 255
return all(isinstance(c, int) for c in channels)
@classmethod
def _hex_to_01(Color, hex_color):
"""
hex_color = '#6A5AFFAF'
"""
assert hex_color.startswith('#'), 'not a hex string %r' % (hex_color,)
parts = hex_color[1:].strip()
color255 = tuple(int(parts[i: i + 2], 16) for i in range(0, len(parts), 2))
assert len(color255) in [3, 4], 'must be length 3 or 4'
return Color._255_to_01(color255)
def _ensure_color01(Color, color):
""" Infer what type color is and normalize to 01 """
if isinstance(color, six.string_types):
color = Color._string_to_01(color)
elif Color._is_base255(color):
color = Color._255_to_01(color)
return color
@classmethod
def _255_to_01(Color, color255):
""" converts base 255 color to base 01 color """
return [channel / 255.0 for channel in color255]
@classmethod
def _string_to_01(Color, color):
"""
mplutil.Color._string_to_01('green')
mplutil.Color._string_to_01('red')
"""
from matplotlib import colors as mcolors
if color in mcolors.BASE_COLORS:
color01 = mcolors.BASE_COLORS[color]
elif color in mcolors.CSS4_COLORS:
color_hex = mcolors.CSS4_COLORS[color]
color01 = Color._hex_to_01(color_hex)
elif color.startswith('#'):
color01 = Color._hex_to_01(color)
else:
raise ValueError('unknown color=%r' % | |
import ast
from collections import (
defaultdict,
)
from unittest import TestCase, skip
from darglint.lex import (
condense,
lex,
)
from darglint.parse.identifiers import (
ArgumentIdentifier,
ExceptionIdentifier,
NoqaIdentifier,
)
from darglint.parse.google import (
parse,
)
from darglint.errors import (
IndentError,
)
from darglint.utils import (
CykNodeUtils,
)
from darglint.config import (
DocstringStyle,
Strictness,
)
from .utils import (
ConfigurationContext,
)
class DocstringTestCase(TestCase):
# examples taken from
# http://www.sphinx-doc.org/en/stable/ext/example_google.html
def assertContains(self, tree, symbol):
for child in tree.walk():
if child.symbol == symbol:
return
self.fail('Tree does not contain symbol "{}"'.format(symbol))
def values_of(self, tree, symbol):
for child in tree.walk():
if child.symbol == symbol and child.value is not None:
yield child.value.value
def get_identifier(self, node, identifier):
for child in node.walk():
if identifier in child.annotations:
return child
return None
def test_parse_noqa_for_argument(self):
func = '\n'.join([
'def my_function():',
' """Has an extra argument, but thats okay.',
'',
' Args:',
' arg1: This will be defined very soon. # noqa: I102',
'',
' """',
' print("Not done yet!")',
])
doc = ast.get_docstring(ast.parse(func).body[0])
self.assertTrue(doc.startswith('Has an extra'))
node = parse(condense(lex(doc)))
self.assertTrue(
CykNodeUtils.contains(node, 'noqa'),
)
noqa = self.get_identifier(node, NoqaIdentifier)
self.assertTrue(
noqa is not None,
)
self.assertEqual(
NoqaIdentifier.extract(noqa),
'I102',
)
def test_parse_noqa_for_global(self):
"""Make sure global targets are empty lists."""
func = '\n'.join([
'def my_function():',
' """Ignore missing return.',
'',
' # noqa: I201',
'',
' """',
' return "This is ignored."',
])
doc = ast.get_docstring(ast.parse(func).body[0])
node = parse(condense(lex(doc)))
self.assertTrue(CykNodeUtils.contains(node, 'noqa'))
def test_parse_global_noqa_with_target(self):
"""Make sure targets are present in the lists."""
func = '\n'.join([
'def my_function(arg1):',
' """Ignore missing argument.',
'',
' # noqa: I101 arg1',
'',
' """',
' pass',
])
doc = ast.get_docstring(ast.parse(func).body[0])
node = parse(condense(lex(doc)))
self.assertTrue(CykNodeUtils.contains(node, 'noqa'))
def test_parses_long_description(self):
func = '\n'.join([
'def this_function_has_a_long_description(arg1):',
' """Return the arg, unchanged.',
'',
' This function returns the arg, unchanged. There is',
' no particular reason, but this is a good place to check to ',
' see that long descriptions are being parsed correctly. ',
' If they are, I\'m not sure why. There is some magic ',
' going on here, in fact.',
'',
' """',
' return arg1',
])
doc = ast.get_docstring(ast.parse(func).body[0])
tokens = list(lex(doc))
node = parse(tokens)
self.assertTrue(node is not None)
def test_parse_long_description_multiple_sections(self):
func = '\n'.join([
'def this_function_has_multiple_long_descriptions():',
' """Do some math.',
'',
' This is the first part of the long description.',
' it can be multiple lines, but doesn\'t have to be',
'',
' This is the second half of the long description.',
'',
' And the final part of it.',
'',
' """',
' pass',
])
doc = ast.get_docstring(ast.parse(func).body[0])
tokens = list(lex(doc))
node = parse(tokens)
self.assertTrue(node is not None)
def test_arguments_can_be_extracted(self):
docstring = '\n'.join([
'Example function with types documented in the docstring.',
'',
'`PEP 484`_ type annotations are supported. If attribute, parameter, and', # noqa: E501
'return types are annotated according to `PEP 484`_, they do not need to be', # noqa: E501
'included in the docstring:',
'',
'Args:',
' param1 (int): The first parameter.',
' param2 (str): The second parameter.',
'',
'Returns:',
' bool: The return value. True for success, False otherwise.',
'',
'.. _PEP 484:',
' https://www.python.org/dev/peps/pep-0484/',
])
tokens = condense(lex(docstring))
tree = parse(tokens)
args = list(self.values_of(tree, 'ident'))
self.assertEqual(
args,
['param1', 'param2'],
)
def test_type_is_type(self):
docstring = '\n'.join([
'Takes a class and returns an instance.',
'',
'Args:',
' klass (type): A class to instantiate.',
' args (List[int]): The initial arguments to pass to it.',
'',
])
node = parse(condense(lex(docstring)))
self.assertTrue(CykNodeUtils.contains(node, 'type-section-parens'))
def test_crazy_argument_type_signatures(self):
possible_types = [
# '(int)',
# '(:obj:`str`, optional)',
'(:obj:`str`, optional)',
'(:obj:`str`,\n optional)',
]
for type_ in possible_types:
docstring = '\n'.join([
'A short summary,',
'',
'Args:',
' x {}: y.'.format(type_),
])
node = parse(condense(lex(docstring)))
self.assertTrue(CykNodeUtils.contains(node, 'type-section-parens'))
def test_arguments_with_multiple_lines(self):
docstring = '\n'.join([
'This is an example of a module level function.',
'',
'The format for a parameter is::',
'',
' name (type): description',
' The description may span multiple lines. Following',
' lines should be indented. The "(type)" is optional.',
'',
' Multiple paragraphs are supported in parameter',
' descriptions.',
'',
'Args:',
' param1: The first parameter.',
' param2: The second parameter. Defaults to None.', # noqa: E501
' Second line of description should be indented.',
' *args: Variable length argument list.',
' **kwargs: Arbitrary keyword arguments.',
'',
'Returns:',
' bool: True if successful, False otherwise.',
])
tokens = condense(lex(docstring))
tree = parse(tokens)
self.assertTrue(tree is not None)
self.assertContains(tree, 'arguments-section')
def test_arguments_are_last(self):
"""Make sure arguments can be parsed as the last section."""
docstring = '\n'.join([
'Example of docstring on the __init__ method.',
'',
'The __init__ method may be documented in either the class level',
'docstring, or as a docstring on the __init__ method itself.',
'',
'Either form is acceptable, but the two should not be mixed. Choose one', # noqa: E501
'convention to document the __init__ method and be consistent with it.', # noqa: E501
'',
'Note:',
' Do not include the `self` parameter in the ``Args`` section.',
'',
'Args:',
' param1: Description of `param1`.',
' param2: Description of `param2`. Multiple', # noqa: E501
' lines are supported.',
' param3: Description of `param3`.',
])
node = parse(condense(lex(docstring)))
self.assertTrue(CykNodeUtils.contains(node, 'arguments-section'))
def test_parse_yields(self):
tokens = condense(lex('\n'.join([
'Some sort of short description.',
'',
'Yields:',
' The number 5. Always.',
])))
tree = parse(tokens)
self.assertTrue(tree is not None)
self.assertContains(tree, 'yields-section')
def test_parse_yields_with_type(self):
tokens = condense(lex('\n'.join([
'Short.',
'',
'Yields:',
' int: Some value.',
])))
tree = parse(tokens)
self.assertTrue(tree is not None)
self.assertContains(tree, 'yields-type')
def test_parse_yields_with_type_and_newline(self):
tokens = condense(lex('\n'.join([
'Short',
'',
'Yields:',
' int: Numbers that were calculated somehow.',
'',
])))
tree = parse(tokens)
self.assertTrue(tree is not None)
self.assertContains(tree, 'yields-type')
def test_parse_raises(self):
docstring = '\n'.join([
'This has a problem.',
'',
'Raises:',
' Exception: An exception for generic reasons.',
])
tokens = condense(lex(docstring))
tree = parse(tokens)
self.assertTrue(tree is not None)
self.assertContains(tree, 'raises-section')
self.assertContains(tree, 'exception')
def test_parse_raises_multiple_lines(self):
docstring = '\n'.join([
'Iterates through the records.',
'',
'Args:',
' address: The address of the database.',
'',
'Raises:',
' StopIteration: Once there are no more records,',
' or possible if there were never any records.',
' ConnectionError: If we were unable to establish a',
' connection.',
])
tokens = condense(lex(docstring))
tree = parse(tokens)
annotation_lookup = self.get_annotation_lookup(tree)
values = {
ExceptionIdentifier.extract(x)
for x in annotation_lookup[ExceptionIdentifier]
}
self.assertEqual(
values,
{'StopIteration', 'ConnectionError'},
)
def test_parse_underindented_raises_section(self):
docstring = '\n'.join([
'Iterates through the records.',
'',
'Args:',
' address: The address of the database.',
'',
'Raises:',
' StopIteration: Once there are no more records,',
' or possible if there were never any records.',
'',
])
tokens = condense(lex(docstring))
tree = parse(tokens)
annotation_lookup = self.get_annotation_lookup(tree)
self.assertEqual(
len(annotation_lookup[IndentError]),
1
)
values = {
ExceptionIdentifier.extract(x)
for x in annotation_lookup[ExceptionIdentifier]
}
self.assertEqual(
values,
{'StopIteration'},
)
def test_argument_types_can_be_parsed(self):
docstring = '\n'.join([
'This docstring contains types for its arguments.',
'',
'Args:',
' x (int): The first number.',
' y (float): The second number.',
])
node = parse(condense(lex(docstring)))
self.assertTrue(CykNodeUtils.contains(node, 'arguments-section'))
self.assertTrue(CykNodeUtils.contains(node, 'type-section-parens'))
def test_can_parse_return_type(self):
docstring = '\n'.join([
'Return an approximation of pi.',
'',
'Returns:',
' Decimal: An approximation of pi.',
])
node = parse(condense(lex(docstring)))
self.assertTrue(CykNodeUtils.contains(node, 'returns-section'))
self.assertTrue(CykNodeUtils.contains(node, 'returns-type'))
def test_parse_multiple_sections(self):
sections = {
'arguments-section': '\n'.join([
'Args:',
' x: A number.',
' y: Another number, but with a much',
' longer description.',
]),
'returns-section': '\n'.join([
'Returns:',
' The description of the thing returned.',
' Can span multiple lines.',
]),
'long-description': '\n'.join([
'A long description can appear anywhere.'
]),
'yields-section': '\n'.join([
'Yields:',
' A bunch of numbers.',
])
}
keys = list(sections.keys())
docstring = 'Some initial section.\n\n{}\n\n{}'
for i in range(len(sections) - 1):
for j in range(i + 1, len(sections)):
section1 = sections[keys[i]]
section2 = sections[keys[j]]
tokens = condense(lex(docstring.format(section1, section2)))
tree = parse(tokens)
self.assertTrue(tree is not None)
self.assertContains(tree, keys[i])
self.assertContains(tree, keys[j])
def test_parse_star_arguments(self):
docstring = '\n'.join([
'Negate a function which returns a boolean.',
'',
'Args:',
' *fns (int): Functions which returns a boolean.',
'',
'Returns:',
' int: A function which returns fallse when any of the'
' callables return true, and true will all of the ',
' callables return false.',
])
tokens = condense(lex(docstring))
tree = parse(tokens)
self.assertTrue(tree is not None)
self.assertContains(tree, 'arguments')
def test_bare_noqa_can_be_parsed(self):
docstring = '\n'.join([
'The first | |
)
#print 'New Efield shape: ', E_in.shape
# fill in required dictionary keys from defaults if not given
if 'lcell' in list(p_dict.keys()):
lcell = p_dict['lcell']
else:
lcell = p_dict_defaults['lcell']
if 'Elem' in list(p_dict.keys()):
Elem = p_dict['Elem']
else:
Elem = p_dict_defaults['Elem']
if 'Dline' in list(p_dict.keys()):
Dline = p_dict['Dline']
else:
Dline = p_dict_defaults['Dline']
# get wavenumber
exec('transition = AC.'+Elem+Dline+'Transition')
wavenumber = transition.wavevectorMagnitude
## get magnetic field spherical coordinates
# defaults to 0,0 i.e. B aligned with kvector of light (Faraday)
if 'Btheta' in list(p_dict.keys()):
Btheta = p_dict['Btheta']
else:
Btheta = p_dict_defaults['Btheta']
if 'Bphi' in list(p_dict.keys()):
Bphi = p_dict['Bphi']
else:
Bphi = p_dict_defaults['Bphi']
# get susceptibility (already calculated, input to this method)
ChiPlus, ChiMinus, ChiZ = Chi
# Rotate initial Electric field so that B field lies in x-z plane
# (Effective polarisation rotation)
E_xz = RM.rotate_around_z(E_in.T,Bphi)
# Find eigen-vectors for propagation and create rotation matrix
RM_ary, n1, n2 = SD.solve_diel(ChiPlus,ChiMinus,ChiZ,Btheta)
# propagation matrix
PropMat = np.array(
[ [exp(1.j*n1*wavenumber*lcell),np.zeros(len(n1)),np.zeros(len(n1))],
[np.zeros(len(n1)),exp(1.j*n2*wavenumber*lcell),np.zeros(len(n1))],
[np.zeros(len(n1)),np.zeros(len(n1)),np.ones(len(n1))] ])
#print 'prop matrix shape:',PropMat.T.shape
#print 'prop mat [0]: ', PropMat.T[0]
# calcualte output field - a little messy to make it work nicely with array operations
# - need to play around with matrix dimensions a bit
# Effectively this does this, element-wise: E_out_xz = RotMat.I * PropMat * RotMat * E_xz
E_xz = np.reshape(E_xz.T, (len(X),3,1))
E_out_xz = np.zeros((len(X),3,1),dtype='complex')
E_out = np.zeros_like(E_out_xz)
for i in range(len(X)):
#print 'Propagation Matrix:\n',PropMat.T[i]
#print 'Rotation matrix:\n',RM_ary.T[i]
#inverse rotation matrix
RMI_ary = np.matrix(RM_ary.T[i].T).I
#print 'Inverse rotation matrix:\n',RMI_ary
E_out_xz[i] = RMI_ary * np.matrix(PropMat.T[i]) * np.matrix(RM_ary.T[i].T)*np.matrix(E_xz[i])
#print 'E out xz i: ',E_out_xz[i].T
E_out[i] = RM.rotate_around_z(E_out_xz[i].T[0],-Bphi)
#print 'E out [0]: ',E_out[0]
#print 'E out shape: ',E_out.shape
## return electric field vector - can then use Jones matrices to do everything else
return E_out.T[0], np.matrix(RM_ary.T[i])
def get_spectra2(X, E_in, p_dict, outputs=None):
"""
Calls get_Efield() to get Electric field, then use Jones matrices
to calculate experimentally useful quantities.
Alias for the get_spectra2 method in libs.spectra.
Inputs:
detuning_range [ numpy 1D array ]
The independent variable and defines the detuning points over which to calculate. Values in MHz
E_in [ numpy 1/2D array ]
Defines the input electric field vector in the xyz basis. The z-axis is always the direction of propagation (independent of the magnetic field axis), and therefore the electric field should be a plane wave in the x,y plane. The array passed to this method should be in one of two formats:
(1) A 1D array of (Ex,Ey,Ez) which is the input electric field for all detuning values;
or
(2) A 2D array with dimensions (3,len(detuning_range)) - i.e. each detuning has a different electric field associated with it - which will happen on propagation through a birefringent/dichroic medium
p_dict [ dictionary ]
Dictionary containing all parameters (the order of parameters is therefore not important)
Dictionary keys:
Key DataType Unit Description
--- --------- ---- -----------
Elem str -- The chosen alkali element.
Dline str -- Specifies which D-line transition to calculate for (D1 or D2)
# Experimental parameters
Bfield float Gauss Magnitude of the applied magnetic field
T float Celsius Temperature used to calculate atomic number density
GammaBuf float MHz Extra lorentzian broadening (usually from buffer gas
but can be any extra homogeneous broadening)
shift float MHz A global frequency shift of the atomic resonance frequencies
DoppTemp float Celsius Temperature linked to the Doppler width (used for
independent Doppler width and number density)
Constrain bool -- If True, overides the DoppTemp value and sets it to T
# Elemental abundancies, where applicable
rb85frac float % percentage of rubidium-85 atoms
K40frac float % percentage of potassium-40 atoms
K41frac float % percentage of potassium-41 atoms
lcell float m length of the vapour cell
theta0 float degrees Linear polarisation angle w.r.t. to the x-axis
Pol float % Percentage of probe beam that drives sigma minus (50% = linear polarisation)
NOTE: If keys are missing from p_dict, default values contained in p_dict_defaults will be loaded.
outputs: an iterable (list,tuple...) of strings that defines which spectra are returned, and in which order.
If not specified, defaults to None, in which case a default set of outputs is returned, which are:
S0, S1, S2, S3, Ix, Iy, I_P45, I_M45, alphaPlus, alphaMinus, alphaZ
Returns:
A list of output arrays as defined by the 'outputs' keyword argument.
Example usage:
To calculate the room temperature absorption of a 75 mm long Cs reference cell in an applied magnetic field of 100 G aligned along the direction of propagation (Faraday geometry), between -10 and +10 GHz, with an input electric field aligned along the x-axis:
detuning_range = np.linspace(-10,10,1000)*1e3 # GHz to MHz conversion
E_in = np.array([1,0,0])
p_dict = {'Elem':'Cs', 'Dline':'D2', 'Bfield':100, 'T':21, 'lcell':75e-3}
[Transmission] = calculate(detuning_range,E_in,p_dict,outputs=['S0'])
"""
# get some parameters from p dictionary
# need in try/except or equiv.
if 'Elem' in list(p_dict.keys()):
Elem = p_dict['Elem']
else:
Elem = p_dict_defaults['Elem']
if 'Dline' in list(p_dict.keys()):
Dline = p_dict['Dline']
else:
Dline = p_dict_defaults['Dline']
if 'shift' in list(p_dict.keys()):
shift = p_dict['shift']
else:
shift = p_dict_defaults['shift']
if 'lcell' in list(p_dict.keys()):
lcell = p_dict['lcell']
else:
lcell = p_dict_defaults['lcell']
if 'theta0' in list(p_dict.keys()):
theta0 = p_dict['theta0']
else:
theta0 = p_dict_defaults['theta0']
if 'Pol' in list(p_dict.keys()):
Pol = p_dict['Pol']
else:
Pol = p_dict_defaults['Pol']
# get wavenumber
exec('transition = AC.'+Elem+Dline+'Transition')
wavenumber = transition.wavevectorMagnitude
# Calculate Susceptibility
ChiPlus, ChiMinus, ChiZ = calc_chi(X, p_dict)
Chi = [ChiPlus, ChiMinus, ChiZ]
# Complex refractive index
nPlus = sqrt(1.0+ChiPlus) #Complex refractive index driving sigma plus transitions
nMinus = sqrt(1.0+ChiMinus) #Complex refractive index driving sigma minus transitions
nZ = sqrt(1.0+ChiZ) # Complex index driving pi transitions
# convert (if necessary) detuning axis X to np array
if type(X) in (int, float, int):
X = np.array([X])
else:
X = np.array(X)
# Calculate E_field
E_out, R = get_Efield(X, E_in, Chi, p_dict)
#print 'Output E field (Z): \n', E_out[2]
## Apply Jones matrices
# Transmission - total intensity - just E_out**2 / E_in**2
E_in = np.array(E_in)
if E_in.shape == (3,):
E_in = np.array([np.ones(len(X))*E_in[0],np.ones(len(X))*E_in[1],np.ones(len(X))*E_in[2]])
# normalised by input intensity
I_in = (E_in * E_in.conjugate()).sum(axis=0)
S0 = (E_out * E_out.conjugate()).sum(axis=0) / I_in
Iz = (E_out[2] * E_out[2].conjugate()).real / I_in
Transmission = S0
## Some quantities from Faraday geometry don't make sense when B and k not aligned, but leave them here for historical reasons
TransLeft = exp(-2.0*nPlus.imag*wavenumber*lcell)
TransRight = exp(-2.0*nMinus.imag*wavenumber*lcell)
# Faraday rotation angle (including incident linear polarisation angle)
phiPlus = wavenumber*nPlus.real*lcell
phiMinus = wavenumber*nMinus.real*lcell
phi = (phiMinus-phiPlus)/2.0
##
#Stokes parameters
#S1#
Ex = np.array(JM.HorizPol_xy * E_out[:2])
Ix = (Ex * Ex.conjugate()).sum(axis=0) / I_in
Ey = np.array(JM.VertPol_xy * E_out[:2])
Iy = (Ey * Ey.conjugate()).sum(axis=0) / I_in
S1 = Ix - Iy
#S2#
E_P45 = np.array(JM.LPol_P45_xy * E_out[:2])
E_M45 = np.array(JM.LPol_M45_xy * E_out[:2])
I_P45 = (E_P45 * E_P45.conjugate()).sum(axis=0) / I_in
I_M45 = (E_M45 * E_M45.conjugate()).sum(axis=0) / I_in
S2 = I_P45 - I_M45
#S3#
# change to circular basis
E_out_lrz = BC.xyz_to_lrz(E_out)
El = np.array(JM.CPol_L_lr * E_out_lrz[:2])
Er = np.array(JM.CPol_R_lr * E_out_lrz[:2])
Il = (El * El.conjugate()).sum(axis=0) / I_in
Ir = (Er * Er.conjugate()).sum(axis=0) / I_in
S3 = Ir - Il
Ir = Ir.real
Il = Il.real
Ix = Ix.real
Iy = Iy.real
## (Real part) refractive indices
#nMinus = nPlus.real
#nPlus = nMinus.real
## Absorption coefficients - again not a physically relevant quantity anymore since propagation is not as simple as k * Im(Chi) * L in a non-Faraday geometry
alphaPlus = 2.0*nMinus.imag*wavenumber
alphaMinus = 2.0*nPlus.imag*wavenumber
alphaZ = 2.0*nZ.imag*wavenumber
# Refractive/Group indices for left/right/z also no longer make any sense
#d = (array(X)-shift) #Linear detuning
#dnWRTv = derivative(d,nMinus.real)
#GIPlus = nMinus.real + (X + transition.v0*1.0e-6)*dnWRTv
#dnWRTv = derivative(d,nPlus.real)
#GIMinus = nPlus.real + (X + transition.v0*1.0e-6)*dnWRTv
if (outputs == None) or ('All' in outputs):
# Default - return 'all' outputs (as used by GUI)
return S0.real,S1.real,S2.real,S3.real,Ix.real,Iy.real,I_P45.real,I_M45.real,alphaPlus,alphaMinus,alphaZ
else:
# Return the variable names mentioned in the outputs list of strings
# the strings in outputs must exactly match the local variable names here!
return [locals()[output_str] for output_str in outputs]
def output_list():
""" Helper method that returns a list of all possible variables that get_spectra can return """
tstr = " \
All possible outputs from the get_spectra method: \n\n\
Variable Name Description \n \
S0 Total transmission through the cell (Ix + Iy) \n\
S1 Stokes parameter - Ix - Iy \n\
S2 Stokes parameter - I_45 - I_-45 \n\
S3 Stokes parameter - I- - I+ \n\
TransLeft Transmission of only left-circularly polarised light \n\
TransRight Transmission of only right-circularly polarised light \n\
ChiPlus Complex susceptibility of left-circularly polarised light \n\
ChiMinus Complex susceptibility of right-circularly polarised light \n\
nPlus Complex Refractive index of left-circularly polarised light \n\
nMinus Complex Refractive index of right-circularly polarised light \n\
phiPlus Rotation of linear polarisation caused by sigma-plus transitions \n\
phiMinus Rotation of linear polarisation caused by sigma-minus transitions \n\
phi Total rotation of linear polarisation \n\
Ix Intensity of light transmitted through a linear polariser aligned with the x-axis \n\
Iy Intensity of light transmitted through a linear polariser aligned with the y-axis \n\
alphaPlus Absorption coefficient due to sigma-plus transitions \n\
alphaMinus Absorption coefficient due to sigma-minus transitions \n\
GIMinus Group index of left-circularly polarised light \n\
GIPlus Group index of right-circularly polarised light \n\
"
print(tstr)
def test1():
### 1. Fig 3 of Generalised treatment ... Rotondaro JOSAB 2015 paper
### Normal Faraday spectrum
import time
d = np.arange(-10000,10000,10)
#Voigt
p_dict = {'Bfield':300,'rb85frac':1,'Btheta':0,'lcell':75e-3,'T':58,'Dline':'D2','Elem':'Cs'}
#timing:
st = time.perf_counter()
TF = get_spectra2(d,[1,0,0],p_dict,outputs=['Iy'])
et = time.perf_counter() - st
print(('E-field - Elapsed time (s):', et))
#check vs old elecsus
from elecsus.libs import spectra as old_spec
st = time.perf_counter()
TF_old = old_spec.get_spectra(d,p_dict,outputs=['Iy'])
et = time.perf_counter() - st
print(('Old elecsus - Elapsed time (s):', et))
index = 0 # Iy
fig = plt.figure("Faraday comparison")
ax1 = fig.add_subplot(111)
ax1.plot(d,TF[index],'r',lw=2,label='Faraday')
ax1.plot(d,TF_old[0],'k--',lw=2,label='Vanilla ElecSus')
ax1.legend(loc=0)
ax1.set_xlabel('Detuning (MHz)')
ax1.set_ylabel('Transmission')
plt.show()
def test2():
### 2. Fig 4/5 of General.... paper
### Voigt Filter
d = np.linspace(-65000,65000,1500)
#Voigt
## 700 G, 84 | |
from __future__ import annotations
import json
import os
import shutil
import subprocess
import tempfile
import uuid
from abc import ABC, abstractmethod
from typing import Any, Union
from urllib.error import HTTPError
from urllib.request import urlopen, urlretrieve
import warnings
import meerkat as mk
import pandas as pd
import yaml
from meerkat.tools.lazy_loader import LazyLoader
from dcbench.common.modeling import Model
from dcbench.config import config
storage = LazyLoader("google.cloud.storage")
torch = LazyLoader("torch")
def _upload_dir_to_gcs(local_path: str, gcs_path: str, bucket: "storage.Bucket"):
assert os.path.isdir(local_path)
with tempfile.TemporaryDirectory() as tmp_dir:
tarball_path = os.path.join(tmp_dir, "run.tar.gz")
subprocess.call(
[
"tar",
"-czf",
tarball_path,
"-C",
local_path,
".",
]
)
remote_path = gcs_path + ".tar.gz"
blob = bucket.blob(remote_path)
blob.upload_from_filename(tarball_path)
def _url_exists(url: str):
try:
response = urlopen(url)
status_code = response.getcode()
return status_code == 200
except HTTPError:
return False
def urlretrieve_with_retry(url: str, filename: str, max_retries: int=5):
"""
Retry urlretrieve() if it fails.
"""
for idx in range(max_retries):
try:
urlretrieve(url, filename)
return
except Exception as e:
warnings.warn(
f"Failed to download {url}: {e}\n"
f"Retrying {idx}/{max_retries}..."
)
continue
raise RuntimeError(f"Failed to download {url} after {max_retries} retries.")
class Artifact(ABC):
"""A pointer to a unit of data (e.g. a CSV file) that is stored locally on
disk and/or in a remote GCS bucket.
In DCBench, each artifact is identified by a unique artifact ID. The only
state that the :class:`Artifact` object must maintain is this ID (``self.id``).
The object does not hold the actual data in memory, making it
lightweight.
:class:`Artifact` is an abstract base class. Different types of artifacts (e.g. a
CSV file vs. a PyTorch model) have corresponding subclasses of :class:`Artifact`
(e.g. :class:`CSVArtifact`, :class:`ModelArtifact`).
.. Tip::
The vast majority of users should not call the :class:`Artifact`
constructor directly. Instead, they should either create a new artifact by
calling :meth:`from_data` or load an existing artifact from a YAML file.
The class provides utilities for accessing and managing a unit of data:
- Synchronizing the local and remote copies of a unit of data:
:meth:`upload`, :meth:`download`
- Loading the data into memory: :meth:`load`
- Creating new artifacts from in-memory data: :meth:`from_data`
- Serializing the pointer artifact so it can be shared:
:meth:`to_yaml`, :meth:`from_yaml`
Args:
artifact_id (str): The unique artifact ID.
Attributes:
id (str): The unique artifact ID.
"""
@classmethod
def from_data(
cls, data: Union[mk.DataPanel, pd.DataFrame, Model], artifact_id: str = None
) -> Artifact:
"""Create a new artifact object from raw data and save the artifact to
disk in the local directory specified in the config file at
``config.local_dir``.
.. tip::
When called on the abstract base class :class:`Artifact`, this method will
infer which artifact subclass to use. If you know exactly which artifact
class you'd like to use (e.g. :class:`DataPanelArtifact`), you should call
this classmethod on that subclass.
Args:
data (Union[mk.DataPanel, pd.DataFrame, Model]): The raw data that will be
saved to disk.
artifact_id (str, optional): . Defaults to None, in which case a UUID will
be generated and used.
Returns:
Artifact: A new artifact pointing to the :arg:`data` that was saved to disk.
"""
if artifact_id is None:
artifact_id = uuid.uuid4().hex
# TODO ():At some point we should probably enforce that ids are unique
if cls is Artifact:
# if called on base class, infer which class to use
if isinstance(data, mk.DataPanel):
cls = DataPanelArtifact
elif isinstance(data, pd.DataFrame):
cls = CSVArtifact
elif isinstance(data, Model):
cls = ModelArtifact
elif isinstance(data, (list, dict)):
cls = YAMLArtifact
else:
raise ValueError(
f"No Artifact in dcbench for object of type {type(data)}"
)
artifact = cls(artifact_id=artifact_id)
artifact.save(data)
return artifact
@property
def local_path(self) -> str:
"""The local path to the artifact in the local directory specified in
the config file at ``config.local_dir``."""
return os.path.join(config.local_dir, self.path)
@property
def remote_url(self) -> str:
"""The URL of the artifact in the remote GCS bucket specified in the
config file at ``config.public_bucket_name``."""
return os.path.join(
config.public_remote_url, self.path + (".tar.gz" if self.isdir else "")
)
@property
def is_downloaded(self) -> bool:
"""Checks if artifact is downloaded to local directory specified in the
config file at ``config.local_dir``.
Returns:
bool: True if artifact is downloaded, False otherwise.
"""
return os.path.exists(self.local_path)
@property
def is_uploaded(self) -> bool:
"""Checks if artifact is uploaded to GCS bucket specified in the config
file at ``config.public_bucket_name``.
Returns:
bool: True if artifact is uploaded, False otherwise.
"""
return _url_exists(self.remote_url)
def upload(self, force: bool = False, bucket: "storage.Bucket" = None) -> bool:
"""Uploads artifact to a GCS bucket at ``self.path``, which by default
is just the artifact ID with the default extension.
Args:
force (bool, optional): Force upload even if artifact is already uploaded.
Defaults to False.
bucket (storage.Bucket, optional): The GCS bucket to which the artifact is
uplioaded. Defaults to None, in which case the artifact is uploaded to
the bucket speciried in the config file at config.public_bucket_name.
Returns
bool: True if artifact was uploaded, False otherwise.
"""
if not os.path.exists(self.local_path):
raise ValueError(
f"Could not find Artifact to upload at '{self.local_path}'. "
"Are you sure it is stored locally?"
)
if self.is_uploaded and not force:
warnings.warn(
f"Artifact {self.id} is not being re-uploaded."
"Set `force=True` to force upload."
)
return False
if bucket is None:
client = storage.Client()
bucket = client.get_bucket(config.public_bucket_name)
if self.isdir:
_upload_dir_to_gcs(
local_path=self.local_path,
bucket=bucket,
gcs_path=self.path,
)
else:
blob = bucket.blob(self.path)
blob.upload_from_filename(self.local_path)
blob.metadata = {"Cache-Control": "private, max-age=0, no-transform"}
blob.patch()
return True
def download(self, force: bool = False) -> bool:
"""Downloads artifact from GCS bucket to the local directory specified
in the config file at ``config.local_dir``. The relative path to the
artifact within that directory is ``self.path``, which by default is
just the artifact ID with the default extension.
Args:
force (bool, optional): Force download even if artifact is already
downloaded. Defaults to False.
Returns:
bool: True if artifact was downloaded, False otherwise.
.. warning::
By default, the GCS cache on public urls has a max-age up to an hour.
Therefore, when updating an existin artifacts, changes may not be
immediately reflected in subsequent downloads.
See `here
<https://stackoverflow.com/questions/62897641/google-cloud-storage-public-ob
ject-url-e-super-slow-updating>`_
for more details.
"""
if self.is_downloaded and not force:
return False
if self.isdir:
if self.is_downloaded:
shutil.rmtree(self.local_path)
os.makedirs(self.local_path, exist_ok=True)
tarball_path = self.local_path + ".tar.gz"
urlretrieve_with_retry(self.remote_url, tarball_path)
subprocess.call(["tar", "-xzf", tarball_path, "-C", self.local_path])
os.remove(tarball_path)
else:
if self.is_downloaded:
os.remove(self.local_path)
os.makedirs(os.path.dirname(self.local_path), exist_ok=True)
urlretrieve_with_retry(self.remote_url, self.local_path)
return True
DEFAULT_EXT: str = ""
isdir: bool = False
@abstractmethod
def load(self) -> Any:
"""Load the artifact into memory from disk at ``self.local_path``."""
raise NotImplementedError()
@abstractmethod
def save(self, data: Any) -> None:
"""Save data to disk at ``self.local_path``."""
raise NotImplementedError()
def __init__(self, artifact_id: str, **kwargs) -> None:
"""
.. warning::
In general, you should not instantiate an Artifact directly. Instead, use
:meth:`Artifact.from_data` to create an Artifact.
"""
self.path = f"{artifact_id}.{self.DEFAULT_EXT}"
self.id = artifact_id
os.makedirs(os.path.dirname(self.local_path), exist_ok=True)
super().__init__()
@staticmethod
def from_yaml(loader: yaml.Loader, node):
"""This function is called by the YAML loader to convert a YAML node
into an Artifact object.
It should not be called directly.
"""
data = loader.construct_mapping(node, deep=True)
return data["class"](artifact_id=data["artifact_id"])
@staticmethod
def to_yaml(dumper: yaml.Dumper, data: Artifact):
"""This function is called by the YAML dumper to convert an Artifact
object into a YAML node.
It should not be called directly.
"""
data = {
"artifact_id": data.id,
"class": type(data),
}
node = dumper.represent_mapping("!Artifact", data)
return node
def _ensure_downloaded(self):
if not self.is_downloaded:
raise ValueError(
"Cannot load `Artifact` that has not been downloaded. "
"First call `artifact.download()`."
)
yaml.add_multi_representer(Artifact, Artifact.to_yaml)
yaml.add_constructor("!Artifact", Artifact.from_yaml)
class CSVArtifact(Artifact):
DEFAULT_EXT: str = "csv"
def load(self) -> pd.DataFrame:
self._ensure_downloaded()
data = pd.read_csv(self.local_path, index_col=0)
def parselists(x):
if isinstance(x, str):
try:
return json.loads(x)
except ValueError:
return x
else:
return x
return data.applymap(parselists)
def save(self, data: pd.DataFrame) -> None:
return data.to_csv(self.local_path)
class YAMLArtifact(Artifact):
DEFAULT_EXT: str = "yaml"
def load(self) -> Any:
self._ensure_downloaded()
return yaml.load(open(self.local_path), Loader=yaml.FullLoader)
def save(self, data: Any) -> None:
return yaml.dump(data, open(self.local_path, "w"))
class DataPanelArtifact(Artifact):
DEFAULT_EXT: str = "mk"
isdir: bool = True
def load(self) -> pd.DataFrame:
self._ensure_downloaded()
return mk.DataPanel.read(self.local_path)
def save(self, data: mk.DataPanel) -> None:
return data.write(self.local_path)
class VisionDatasetArtifact(DataPanelArtifact):
DEFAULT_EXT: str = "mk"
isdir: bool = True
COLUMN_SUBSETS = {
"celeba": ["id", "image", "split"],
"imagenet": ["id", "image", "name", "synset"],
}
@classmethod
def from_name(cls, name: str):
if name == "celeba":
dp = mk.datasets.get(name, dataset_dir=config.celeba_dir)
elif name == "imagenet":
dp = mk.datasets.get(name, dataset_dir=config.imagenet_dir, download=False)
else:
raise ValueError(f"No dataset named '{name}' supported | |
from __future__ import print_function
import sys, wx, wx.lib, wx.combo, os, re, pickle, traceback, json
from wx.lib.scrolledpanel import ScrolledPanel
from types import *
# Quisk will alter quisk_conf_defaults to include the user's config file.
import quisk_conf_defaults as conf
import _quisk as QS
# Settings is [
# 0: radio_requested, a string radio name or "Ask me" or "ConfigFileRadio"
# 1: radio in use and last used, a string radio name or "ConfigFileRadio"
# 2: list of radio names
# 3: parallel list of radio dicts. These are all the parameters for the corresponding radio. In
# general, they are a subset of all the parameters listed in self.sections and self.receiver_data[radio_name].
# ]
# radio_dict is a dictionary of variable names and text values for each radio including radio ConfigFileRadio.
# Only variable names from the specified radio and all sections are included.
# local_conf is the single instance of class Configuration
class Configuration:
def __init__(self, app, AskMe): # Called first
global application, local_conf, Settings, noname_enable, platform_ignore, platform_accept
Settings = ["ConfigFileRadio", "ConfigFileRadio", [], []]
application = app
local_conf = self
noname_enable = []
if sys.platform == 'win32':
platform_ignore = 'lin_'
platform_accept = 'win_'
else:
platform_accept = 'lin_'
platform_ignore = 'win_'
self.sections = []
self.receiver_data = []
self.StatePath = conf.settings_file_path
if not self.StatePath:
self.StatePath = os.path.join(conf.DefaultConfigDir, "quisk_settings.json")
self.ReadState()
if AskMe or Settings[0] == "Ask me":
choices = Settings[2] + ["ConfigFileRadio"]
dlg = wx.SingleChoiceDialog(None, "", "Start Quisk with this Radio",
choices, style=wx.DEFAULT_FRAME_STYLE|wx.OK|wx.CANCEL)
try:
n = choices.index(Settings[1]) # Set default to last used radio
except:
pass
else:
dlg.SetSelection(n)
ok = dlg.ShowModal()
if ok != wx.ID_OK:
sys.exit(0)
select = dlg.GetStringSelection()
dlg.Destroy()
if Settings[1] != select:
Settings[1] = select
self.settings_changed = True
else:
Settings[1] = Settings[0]
if Settings[1] == "ConfigFileRadio":
Settings[2].append("ConfigFileRadio")
Settings[3].append({})
self.ParseConfig()
def UpdateConf(self): # Called second to update the configuration for the selected radio
if Settings[1] == "ConfigFileRadio":
return
radio_dict = self.GetRadioDict()
radio_type = radio_dict['hardware_file_type']
# Fill in required values
if radio_type == "SdrIQ":
radio_dict["use_sdriq"] = '1'
else:
radio_dict["use_sdriq"] = '0'
if radio_type not in ("HiQSDR", "Hermes", "Red Pitaya", "Odyssey"):
radio_dict["use_rx_udp"] = '0'
# fill in conf from our configuration data; convert text items to Python objects
errors = ''
for k, v in radio_dict.items():
if k == 'favorites_file_path': # A null string is equivalent to "not entered"
if not v.strip():
continue
try:
fmt = self.format4name[k]
except:
errors = errors + "Ignore obsolete parameter %s\n" % k
del radio_dict[k]
self.settings_changed = True
continue
k4 = k[0:4]
if k4 == platform_ignore:
continue
elif k4 == platform_accept:
k = k[4:]
fmt4 = fmt[0:4]
if fmt4 not in ('dict', 'list'):
i1 = v.find('#')
if i1 > 0:
v = v[0:i1]
try:
if fmt4 == 'text': # Note: JSON returns Unicode strings !!!
setattr(conf, k, str(v))
elif fmt4 in ('dict', 'list'):
setattr(conf, k, v)
elif fmt4 == 'inte':
setattr(conf, k, int(v, base=0))
elif fmt4 == 'numb':
setattr(conf, k, float(v))
elif fmt4 == 'bool':
if v == "True":
setattr(conf, k, True)
else:
setattr(conf, k, False)
elif fmt4 == 'rfil':
pass
else:
print ("Unknown format for", k, fmt)
except:
errors = errors + "Failed to set %s to %s using format %s\n" % (k, v, fmt)
#traceback.print_exc()
if conf.color_scheme == 'B':
conf.__dict__.update(conf.color_scheme_B)
elif conf.color_scheme == 'C':
conf.__dict__.update(conf.color_scheme_C)
if errors:
dlg = wx.MessageDialog(None, errors,
'Update Settings', wx.OK|wx.ICON_ERROR)
ret = dlg.ShowModal()
dlg.Destroy()
def NormPath(self, path): # Convert between Unix and Window file paths
if sys.platform == 'win32':
path = path.replace('/', '\\')
else:
path = path.replace('\\', '/')
return path
def GetHardware(self): # Called third to open the hardware file
if Settings[1] == "ConfigFileRadio":
return False
path = self.GetRadioDict()["hardware_file_name"]
path = self.NormPath(path)
if not os.path.isfile(path):
dlg = wx.MessageDialog(None,
"Failure for hardware file %s!" % path,
'Hardware File', wx.OK|wx.ICON_ERROR)
ret = dlg.ShowModal()
dlg.Destroy()
path = 'quisk_hardware_model.py'
dct = {}
dct.update(conf.__dict__) # make items from conf available
if dct.has_key("Hardware"):
del dct["Hardware"]
if dct.has_key('quisk_hardware'):
del dct["quisk_hardware"]
exec(compile(open(path).read(), path, 'exec'), dct)
if dct.has_key("Hardware"):
application.Hardware = dct['Hardware'](application, conf)
return True
return False
def Initialize(self): # Called fourth to fill in our ConfigFileRadio radio from conf
if Settings[1] == "ConfigFileRadio":
radio_dict = self.GetRadioDict("ConfigFileRadio")
typ = self.GuessType()
radio_dict['hardware_file_type'] = typ
all_data = []
all_data = all_data + self.GetReceiverData(typ)
for name, sdata in self.sections:
all_data = all_data + sdata
for data_name, text, fmt, help_text, values in all_data:
data_name4 = data_name[0:4]
if data_name4 == platform_ignore:
continue
elif data_name4 == platform_accept:
conf_name = data_name[4:]
else:
conf_name = data_name
try:
if fmt in ("dict", "list"):
radio_dict[data_name] = getattr(conf, conf_name)
else:
radio_dict[data_name] = str(getattr(conf, conf_name))
except:
if data_name == 'playback_rate':
pass
else:
print ('No config file value for', data_name)
def GetWidgets(self, app, hardware, conf, frame, gbs, vertBox): # Called fifth
if Settings[1] == "ConfigFileRadio":
return False
path = self.GetRadioDict()["widgets_file_name"]
path = self.NormPath(path)
if os.path.isfile(path):
dct = {}
dct.update(conf.__dict__) # make items from conf available
exec(compile(open(path).read(), path, 'exec'), dct)
if dct.has_key("BottomWidgets"):
app.bottom_widgets = dct['BottomWidgets'](app, hardware, conf, frame, gbs, vertBox)
return True
def OnPageChanging(self, event):
index = event.GetSelection()
if index >= self.radios_page_start:
page = self.notebk.GetPage(index)
page.MakePages()
def AddPages(self, notebk, width): # Called sixth to add pages Help, Radios, all radio names
global win_width
win_width = width
self.notebk = notebk
page = ConfigHelp(notebk)
notebk.AddPage(page, "Help with Radios")
self.radio_page = Radios(notebk)
notebk.AddPage(self.radio_page, "Radios")
self.radios_page_start = notebk.GetPageCount()
if sys.platform == 'win32': # On Windows, PAGE_CHANGING doesn't work
notebk.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.OnPageChanging)
else:
notebk.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGING, self.OnPageChanging)
for name in Settings[2]:
page = RadioNotebook(notebk, name)
if name == Settings[1]:
notebk.AddPage(page, "*%s*" % name)
else:
notebk.AddPage(page, name)
def GuessType(self):
udp = conf.use_rx_udp
if conf.use_sdriq:
return 'SdrIQ'
elif udp == 1:
return 'HiQSDR'
elif udp == 2:
return 'HiQSDR'
elif udp == 10:
return 'Hermes'
elif udp > 0:
return 'HiQSDR'
return 'SoftRock USB'
def AddRadio(self, radio_name, typ):
radio_dict = {}
radio_dict['hardware_file_type'] = typ
Settings[2].append(radio_name)
Settings[3].append(radio_dict)
for data_name, text, fmt, help_text, values in self.GetReceiverData(typ):
radio_dict[data_name] = values[0]
for name, data in self.sections:
for data_name, text, fmt, help_text, values in data:
radio_dict[data_name] = values[0]
page = RadioNotebook(self.notebk, radio_name)
page.MakePages()
self.notebk.AddPage(page, radio_name)
return True
def RenameRadio(self, old, new):
index = Settings[2].index(old)
n = self.radios_page_start + index
if old == Settings[1]:
self.notebk.SetPageText(n, "*%s*" % new)
else:
self.notebk.SetPageText(n, new)
Settings[2][index] = new
self.notebk.GetPage(n).NewName(new)
if old == "ConfigFileRadio":
for ctrl in noname_enable:
ctrl.Enable()
return True
def DeleteRadio(self, name):
index = Settings[2].index(name)
n = self.radios_page_start + index
self.notebk.DeletePage(n)
del Settings[2][index]
del Settings[3][index]
return True
def GetRadioDict(self, radio_name=None): # None radio_name means the current radio
if radio_name:
index = Settings[2].index(radio_name)
else: # index of radio in use
index = Settings[2].index(Settings[1])
return Settings[3][index]
def GetSectionData(self, section_name):
for sname, data in self.sections:
if sname == section_name:
return data
return None
def GetReceiverData(self, receiver_name):
for rxname, data in self.receiver_data:
if rxname == receiver_name:
return data
return None
def GetReceiverDatum(self, receiver_name, item_name):
for rxname, data in self.receiver_data:
if rxname == receiver_name:
for data_name, text, fmt, help_text, values in data:
if item_name == data_name:
return values[0]
break
return ''
def ReceiverHasName(self, receiver_name, item_name):
for rxname, data in self.receiver_data:
if rxname == receiver_name:
for data_name, text, fmt, help_text, values in data:
if item_name == data_name:
return True
break
return False
def ReadState(self):
self.settings_changed = False
global Settings
try:
fp = open(self.StatePath, "rb")
except:
return
try:
Settings = json.load(fp)
except:
traceback.print_exc()
fp.close()
try: # Do not save settings for radio ConfigFileRadio
index = Settings[2].index("ConfigFileRadio")
except ValueError:
pass
else:
del Settings[2][index]
del Settings[3][index]
for sdict in Settings[3]: # Python None is saved as "null"
if sdict.has_key("tx_level"):
if sdict["tx_level"].has_key("null"):
v = sdict["tx_level"]["null"]
sdict["tx_level"][None] = v
del sdict["tx_level"]["null"]
def SaveState(self):
if not self.settings_changed:
return
try:
fp = open(self.StatePath, "wb")
except:
traceback.print_exc()
return
json.dump(Settings, fp, indent=2)
fp.close()
self.settings_changed = False
def ParseConfig(self):
# ParseConfig() fills self.sections, self.receiver_data, and
# self.format4name with the items that Configuration understands.
# Dicts and lists are Python objects. All other items are text, not Python objects.
#
# Sections start with 16 #, section name
# self.sections is a list of [section_name, section_data]
# section_data is a list of [data_name, text, fmt, help_text, values]
# Receiver sections start with 16 #, "Receivers ", receiver name, explain
# self.receiver_data is a list of [receiver_name, receiver_data]
# receiver_data is a list of [data_name, text, fmt, help_text, values]
| |
coefficient in Morisons equation (-) [used only when TwrLdMod=1]')
# WAVES
WtrDens = Float(desc='Water density (kg/m^3)')
WtrDpth = Float(desc='Water depth (meters)')
WaveMod = Enum(0, (0,1,2,3,4), desc='Incident wave kinematics model {0: none=still water, 1: plane progressive (regular), 2: JONSWAP/Pierson-Moskowitz spectrum (irregular), 3: user-defind spectrum from routine UserWaveSpctrm (irregular), 4: GH Bladed wave data} (switch)')
WaveStMod = Enum(0, (0,1,2,3), desc='Model for stretching incident wave kinematics to instantaneous free surface {0: none=no stretching, 1: vertical stretching, 2: extrapolation stretching, 3: Wheeler stretching} (switch) [unused when WaveMod=0]')
WaveTMax = Float(desc='Analysis time for incident wave calculations (sec) [unused when WaveMod=0] [determines WaveDOmega=2Pi/WaveTMax in the IFFT]')
WaveDT = Float(desc='Time step for incident wave calculations (sec) [unused when WaveMod=0] [0.1<=WaveDT<=1.0 recommended] [determines WaveOmegaMax=Pi/WaveDT in the IFFT]')
WaveHs = Float(desc='Significant wave height of incident waves (meters) [used only when WaveMod=1 or 2]')
WaveTp = Float(desc='Peak spectral period of incident waves (sec) [used only when WaveMod=1 or 2]')
WavePkShp = Float(desc='Peak shape parameter of incident wave spectrum (-) or DEFAULT (unquoted string) [used only when WaveMod=2] [use 1.0 for Pierson-Moskowitz]')
WaveDir = Float(desc='Incident wave propagation heading direction (degrees) [unused when WaveMod=0 or 4]')
WaveSeed1 = Int(desc='First random seed of incident waves [-2147483648 to 2147483647] (-) [unused when WaveMod=0 or 4]')
WaveSeed2 = Int(desc='Second random seed of incident waves [-2147483648 to 2147483647] (-) [unused when WaveMod=0 or 4]')
GHWvFile = Str(desc='Root name of GH Bladed files containing wave data (quoted string) [used only when WaveMod=4]')
# CURRENT
CurrMod = Float(desc='Current profile model {0: none=no current, 1: standard, 2: user-defined from routine UserCurrent} (switch)')
CurrSSV0 = Float(desc='Sub-surface current velocity at still water level (m/s) [used only when CurrMod=1]')
CurrSSDir = Float(desc='Sub-surface current heading direction (degrees) or DEFAULT (unquoted string) [used only when CurrMod=1]')
CurrNSRef = Float(desc='Near-surface current reference depth (meters) [used only when CurrMod=1]')
CurrNSV0 = Float(desc='Near-surface current velocity at still water level (m/s) [used only when CurrMod=1]')
CurrNSDir = Float(desc='Near-surface current heading direction (degrees) [used only when CurrMod=1]')
CurrDIV = Float(desc='Depth-independent current velocity (m/s) [used only when CurrMod=1]')
CurrDIDir = Float(desc='Depth-independent current heading direction (degrees) [used only when CurrMod=1]')
# OUTPUT (CONT)
NWaveKin = Int(desc='Number of points where the wave kinematics can be output [0 to 9] (-)')
WaveKinNd = Int(desc='List of tower nodes that have wave kinematics sensors [1 to TwrNodes] (-) [unused if NWaveKin=0]')
class FstModel(VariableTree):
# FAST Wind Information
simple_wind_vt = VarTree(SimpleWind(), desc='Simple wind input model')
# FAST Hydro Information
platform_vt = VarTree(FstPlatformModel(), desc='Model of platform foundation')
# FAST Aerodynamic Information
aero_vt = VarTree(ADAero(), desc='Aerodynamic settings, blade design and airfoils')
# FAST Structural Information
fst_blade_vt = VarTree(FstBladeStrucGeometry(), desc='Structural information on the blade and properties')
fst_tower_vt = VarTree(FstTowerStrucGeometry(), desc='Structural information on the tower and properties')
# FAST Outputs
fst_output_vt = VarTree(FstOutput(), desc='List of output channels')
# FAST Inputs
description = Str(desc='description of platform')
Echo = Bool(desc='Echo input data to "echo.out" (flag)')
ADAMSPrep = Enum(1, (1,2,3), desc='ADAMS preprocessor mode {1: Run FAST, 2: use FAST as a preprocessor to create an ADAMS model, 3: do both} (switch)')
AnalMode = Enum(1, (1,2), desc='Analysis mode {1: Run a time-marching simulation, 2: create a periodic linearized model} (switch)')
NumBl = Int(desc='Number of blades (-)')
TMax = Float(desc='Total run time (s)')
DT = Float(desc='Integration time step (s)')
# TURBINE CONTROL
YCMode = Enum(0, (0,1,2), desc='Yaw control mode {0: none, 1: user-defined from routine UserYawCont, 2: user-defined from Simulink} (switch)')
TYCOn = Float(desc='Time to enable active yaw control (s) [unused when YCMode=0]')
PCMode = Enum(0, (0,1,2), desc='Pitch control mode {0: none, 1: user-defined from routine PitchCntrl, 2: user-defined from Simulink} (switch)')
TPCOn = Float(desc='Time to enable active pitch control (s) [unused when PCMode=0]')
VSContrl = Enum(0, (0,1,2,3), desc='Variable-speed control mode {0: none, 1: simple VS, 2: user-defined from routine UserVSCont, 3: user-defined from Simulink} (switch)')
VS_RtGnSp = Float(desc='Rated generator speed for simple variable-speed generator control (HSS side) (rpm) [used only when VSContrl=1]')
VS_RtTq = Float(desc='Rated generator torque/constant generator torque in Region 3 for simple variable-speed generator control (HSS side) (N-m) [used only when VSContrl=1]')
VS_Rgn2K = Float(desc='Generator torque constant in Region 2 for simple variable-speed generator control (HSS side) (N-m/rpm^2) [used only when VSContrl=1]')
VS_SlPc = Float(desc='Rated generator slip percentage in Region 2 1/2 for simple variable-speed generator control (%) [used only when VSContrl=1]')
GenModel = Enum(1, (1,2,3), desc='Generator model {1: simple, 2: Thevenin, 3: user-defined from routine UserGen} (switch) [used only when VSContrl=0]')
GenTiStr = Bool(desc='Method to start the generator {T: timed using TimGenOn, F: generator speed using SpdGenOn} (flag)')
GenTiStp = Bool(desc='Method to stop the generator {T: timed using TimGenOf, F: when generator power = 0} (flag)')
SpdGenOn = Float(desc='Generator speed to turn on the generator for a startup (HSS speed) (rpm) [used only when GenTiStr=False]')
TimGenOn = Float(desc='Time to turn on the generator for a startup (s) [used only when GenTiStr=True]')
TimGenOf = Float(desc='Time to turn off the generator (s) [used only when GenTiStp=True]')
HSSBrMode = Enum(1, (1,2), desc='HSS brake model {1: simple, 2: user-defined from routine UserHSSBr} (switch)')
THSSBrDp = Float(desc='Time to initiate deployment of the HSS brake (s)')
TiDynBrk = Float(desc='Time to initiate deployment of the dynamic generator brake [CURRENTLY IGNORED] (s)')
TTpBrDp1 = Float(desc='Time to initiate deployment of tip brake 1 (s)')
TTpBrDp2 = Float(desc='Time to initiate deployment of tip brake 2 (s)')
TTpBrDp3 = Float(desc='Time to initiate deployment of tip brake 3 (s) [unused for 2 blades]')
TBDepISp1 = Float(desc='Deployment-initiation speed for the tip brake on blade 1 (rpm)')
TBDepISp2 = Float(desc='Deployment-initiation speed for the tip brake on blade 2 (rpm)')
TBDepISp3 = Float(desc='Deployment-initiation speed for the tip brake on blade 3 (rpm) [unused for 2 blades]')
TYawManS = Float(desc='Time to start override yaw maneuver and end standard yaw control (s)')
TYawManE = Float(desc='Time at which override yaw maneuver reaches final yaw angle (s)')
NacYawF = Float(desc='Final yaw angle for override yaw maneuvers (degrees)')
TPitManS1 = Float(desc='Time to start override pitch maneuver for blade 1 and end standard pitch control (s)')
TPitManS2 = Float(desc='Time to start override pitch maneuver for blade 2 and end standard pitch control (s)')
TPitManS3 = Float(desc='Time to start override pitch maneuver for blade 3 and end standard pitch control (s) [unused for 2 blades]')
TPitManE1 = Float(desc='Time at which override pitch maneuver for blade 1 reaches final pitch (s)')
TPitManE2 = Float(desc='Time at which override pitch maneuver for blade 2 reaches final pitch (s)')
TPitManE3 = Float(desc='Time at which override pitch maneuver for blade 3 reaches final pitch (s) [unused for 2 blades]')
BlPitch1 = Float(desc='Blade 1 initial pitch (degrees)')
BlPitch2 = Float(desc='Blade 2 initial pitch (degrees)')
BlPitch3 = Float(desc='Blade 3 initial pitch (degrees) [unused for 2 blades]')
B1PitchF1 = Float(desc='Blade 1 final pitch for pitch maneuvers (degrees)')
B1PitchF2 = Float(desc='Blade 2 final pitch for pitch maneuvers (degrees)')
B1PitchF3 = Float(desc='Blade 3 final pitch for pitch maneuvers (degrees) [unused for 2 blades]')
# ENVIRONMENTAL CONDITIONS
Gravity = Float(desc='Gravitational acceleration (m/s^2)')
# FEATURE FLAGS
FlapDOF1 = Bool(desc='First flapwise blade mode DOF (flag)')
FlapDOF2 = Bool(desc=' Second flapwise blade mode DOF (flag)')
EdgeDOF = Bool(desc='First edgewise blade mode DOF (flag)')
TeetDOF = Bool(desc='Rotor-teeter DOF (flag) [unused for 3 blades]')
DrTrDOF = Bool(desc='Drivetrain rotational-flexibility DOF (flag)')
GenDOF = Bool(desc='Generator DOF (flag)')
YawDOF = Bool(desc='Yaw DOF (flag)')
TwFADOF1 = Bool(desc='First fore-aft tower bending-mode DOF (flag)')
TwFADOF2 = Bool(desc='Second fore-aft tower bending-mode DOF (flag)')
TwSSDOF1 = Bool(desc='First side-to-side tower bending-mode DOF (flag)')
TwSSDOF2 = Bool(desc='Second side-to-side tower bending-mode DOF (flag)')
CompAero = Bool(desc='Compute aerodynamic forces (flag)')
CompNoise = Bool(desc='Compute aerodynamic noise (flag)')
# INITIAL CONDITIONS
OoPDefl = Float(desc='Initial out-of-plane blade-tip displacement (meters)')
IPDefl = Float(desc='Initial in-plane blade-tip deflection (meters)')
TeetDefl = Float(desc='Initial or fixed teeter angle (degrees) [unused for 3 blades]')
Azimuth = Float(desc='Initial azimuth angle for blade 1 (degrees)')
RotSpeed = Float(desc='Initial or fixed rotor speed (rpm)')
NacYaw = Float(desc='Initial or fixed nacelle-yaw angle (degrees)')
TTDspFA = Float(desc='Initial fore-aft | |
<gh_stars>0
import torch
from sys import platform
if platform != "win32":
from torch_geometric.data import Data
else:
import open3d as o3d
import sys
import os
import io
import cv2
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from mpl_toolkits import mplot3d
import matplotlib.patches as mpatches
import numpy as np
import json
from icecream import ic
import importlib
from PIL import Image, ImageDraw
import imageio
sys.path.insert(0, os.path.join(os.environ['ALFWORLD_ROOT']))
from agents.graph_map.utils_graph_map import *#intrinsic_from_fov, load_extrinsic, load_intrinsic, pixel_coord_np, grid, get_cam_coords
from collections import defaultdict, OrderedDict
import glob
class BasicMap(torch.nn.Module):
def __init__(self):
super().__init__()
def reset_map(self):
raise NotImplementedError
def update_map(self):
raise NotImplementedError
class BasicGraphMap(BasicMap):
def __init__(self, cfg, object_classes_index_to_name):
super().__init__()
self.cfg = cfg
self.object_classes_index_to_name = object_classes_index_to_name
'''
Camera Para
'''
self.K = intrinsic_from_fov(cfg.GRAPH_MAP.INTRINSIC_HEIGHT, cfg.GRAPH_MAP.INTRINSIC_WIDTH, cfg.GRAPH_MAP.INTRINSIC_FOV)
self.PIXEL_COORDS = pixel_coord_np(cfg.GRAPH_MAP.INTRINSIC_WIDTH, cfg.GRAPH_MAP.INTRINSIC_HEIGHT) # [3, npoints]
'''
GraphMap Para
'''
self.S = cfg.GRAPH_MAP.GRAPH_MAP_SIZE_S
self.CLASSES = cfg.GRAPH_MAP.GRAPH_MAP_CLASSES
self.V = cfg.GRAPH_MAP.GRID_COORDS_XY_RANGE_V
self.R = cfg.GRAPH_MAP.GRID_MIN_SIZE_R
self.SHIFT_COORDS_HALF_S_TO_MAP = self.S//2
self.map = np.zeros([self.S, self.S, self.CLASSES]).astype(int)
self.buffer_plt = []
def reset_map(self):
self.map = np.zeros([self.S, self.S, self.CLASSES]).astype(int)
'''
visualize
'''
with imageio.get_writer('./graph_map_BasicGraphMap_{}_{}_{}.gif'.format(self.S, self.CLASSES, self.R), mode='I', fps=3) as writer:
for i, buf_file in enumerate(self.buffer_plt):
pil_img = Image.open(buf_file)
draw = ImageDraw.Draw(pil_img)
draw.text((0, 0), str(i), (0, 0, 0))
plt_img = np.array(pil_img)
writer.append_data(plt_img)
self.buffer_plt = []
def update_map(self, depth_image, agent_meta, sgg_result):
bboxs = sgg_result["bbox"]
labels = sgg_result["labels"]
cam_coords = get_cam_coords(
depth_image,
agent_meta,
bboxs, labels,
self.K, self.PIXEL_COORDS)
self.put_label_to_map(cam_coords)
return cam_coords
def put_label_to_map(self, cam_coords):
max_index = self.S-1
x, y, z, labels = cam_coords
x = np.round(x / self.R).astype(int) + self.SHIFT_COORDS_HALF_S_TO_MAP
z = np.round(z / self.R).astype(int) + self.SHIFT_COORDS_HALF_S_TO_MAP
x[x > max_index] = max_index
z[z > max_index] = max_index
labels = labels.astype(int)
self.map[x, z, labels] = labels
def visualize_graph_map(self, rgb_img, depth_img, KEEP_DISPLAY=False):
colors = cm.rainbow(np.linspace(0, 1, self.CLASSES))
Is, Js, Ks = np.where(self.map != 0)
label_color, legend_color_to_objectId = [], {}
for i, j, k in zip(Is, Js, Ks):
label_color.append(colors[self.map[i, j, k]])
if self.object_classes_index_to_name[self.map[i, j, k]] not in legend_color_to_objectId:
legend_color_to_objectId[self.object_classes_index_to_name[self.map[i, j, k]]] = \
mpatches.Patch(color=colors[self.map[i, j, k]], label=self.object_classes_index_to_name[self.map[i, j, k]])
# 2D
# print("2D map figure")
plt.cla()
plt.gcf().canvas.mpl_connect('key_release_event',
lambda event: [plt.close() if event.key == 'escape' else None])
plt.scatter(Is, Js, s=70, c=label_color, cmap="Set2")
plt.plot(self.S//2, self.S//2, "ob")
plt.gca().set_xticks(np.arange(0, self.S, 1))
plt.gca().set_yticks(np.arange(0, self.S, 1))
plt.grid(True)
buf = io.BytesIO()
plt.savefig(buf, format='png')
self.buffer_plt.append(buf)
# import pdb; pdb.set_trace()
if KEEP_DISPLAY:
plt.show()
else:
pass
# plt.pause(1.0)
# 3D
# plt.cla()
# plt.clf()
# plt.close()
# plt.gcf().canvas.mpl_connect('key_release_event',
# lambda event: [plt.close() if event.key == 'escape' else None])
# plt.axes(projection='3d').plot3D(self.S//2, self.S//2, self.CLASSES, "ob")
# # plt.axes(projection='3d').scatter3D(Is, Js, Ks, s=70, c=label_color, cmap="Set2")
# plt.axes(projection='3d').scatter3D(Is, Js, Ks, s=70, c=label_color, cmap="Set2")
# plt.gca().set_xticks(np.arange(0, self.S, 1))
# plt.gca().set_yticks(np.arange(0, self.S, 1))
# plt.gca().set_zticks(np.arange(0, self.CLASSES, 1))
# plt.grid(True)
# # legend
# plt.legend(handles=legend_color_to_objectId.values(), scatterpoints=1, loc='lower center', ncol=5, fontsize=8)
# # store
# buf = io.BytesIO()
# plt.savefig(buf, format='png')
# self.buffer_plt.append(buf)
# # import pdb; pdb.set_trace()
# if KEEP_DISPLAY:
# plt.show()
# else:
# plt.pause(1.0)
# self.figure, self.ax = plt.subplots(
# 3, 1, figsize=(4, 6*16/9),
# facecolor="whitesmoke",
# num="Thread 0")
# ax = self.ax
# ax[0].imshow(rgb_img/255)
# ax[1].imshow(depth_img/255)
# pil_img = Image.open(buf)
# ax[2].imshow(pil_img)
# 10x10xcfg.GRAPH_MAP.GRAPH_MAP_SIZE_S
# self.map.activate_nodes = set()
class GraphMap(BasicGraphMap):
def __init__(self, cfg, priori_features, dim_rgb_feature, device="cuda", object_classes_index_to_name=None):
'''
priori_features: dict. priori_obj_cls_name_to_features, rgb_features, attributes
'''
super().__init__(cfg, object_classes_index_to_name)
'''
Graph Type
'''
self.device = device
self.priori_features = priori_features
self.GPU = cfg.SCENE_GRAPH.GPU
self.dim_rgb_feature = dim_rgb_feature
self.graphdata_type = getattr(
importlib.import_module(
'agents.semantic_graph.semantic_graph'),
self.cfg.SCENE_GRAPH.GraphData
)
self._set_label_to_features()
self.init_graph_map()
def _set_label_to_features(self):
'''
word & rgb & attributes features
'''
features = []
attributes = []
# background
features.append(
torch.zeros([self.cfg.SCENE_GRAPH.NODE_INPUT_WORD_EMBED_SIZE + self.cfg.SCENE_GRAPH.NODE_INPUT_RGB_FEATURE_SIZE]))
attributes.append(
torch.zeros([self.cfg.SCENE_GRAPH.ATTRIBUTE_FEATURE_SIZE]))
# objects
for k, word_feature in self.priori_features["priori_obj_cls_name_to_features"].items():
rgb_feature = torch.tensor(self.priori_features["rgb_features"][str(k)]).float()
feature = torch.cat([word_feature, rgb_feature])
# [0] is _append_unique_obj_index_to_attribute
attribute = torch.tensor(self.priori_features["attributes"][str(k)] + [0]).float()
features.append(feature)
attributes.append(attribute)
self.label_to_features = torch.stack(features).to(device=self.device, dtype=torch.float)
self.label_to_attributes = torch.stack(attributes).to(device=self.device, dtype=torch.float)
assert len(self.label_to_features) == len(self.priori_features["rgb_features"].keys()), "len diff error"
assert self.label_to_attributes.shape[-1] == self.cfg.SCENE_GRAPH.ATTRIBUTE_FEATURE_SIZE, "len diff error"
def init_graph_map(self):
self.map = self.graphdata_type(
self.priori_features["priori_obj_cls_name_to_features"],
self.GPU,
self.dim_rgb_feature,
device=self.device
)
'''
Create graph map node space
'''
feature_size = self.cfg.SCENE_GRAPH.NODE_INPUT_WORD_EMBED_SIZE + self.cfg.SCENE_GRAPH.NODE_INPUT_RGB_FEATURE_SIZE
attribute_size = self.cfg.SCENE_GRAPH.ATTRIBUTE_FEATURE_SIZE
self.map.x = torch.zeros([self.S * self.S * self.CLASSES, feature_size], device=self.device, dtype=torch.float)
self.map.attributes = torch.zeros([self.S * self.S * self.CLASSES, attribute_size], device=self.device, dtype=torch.float)
for x in range(self.S):
for z in range(self.S):
for label in range(self.CLASSES):
target_node_index = x + self.S * z + self.S * self.S * label
if label < len(self.label_to_features):
self.map.x[target_node_index] = self.label_to_features[label]
self.map.attributes[target_node_index] = self.label_to_attributes[label]
else:
print("label out of bounds: {}. Carefully if sgg predict label out of bounds also".format(label))
self.map.x[target_node_index] = self.label_to_features[0]
self.map.attributes[target_node_index] = self.label_to_attributes[0]
self.map.activate_nodes = set(range(self.S * self.S))
self.map.queue_grid_layer = [0] * self.S * self.S
'''
graph map node relation
'''
edges = []
'''
most top grid connect together
would be square grid
'''
for x in range(self.S):
for z in range(self.S):
if x < self.S-1 and z < self.S-1:
top_map = x + self.S * z
# right edge a -> b
edge = torch.tensor(
[top_map, top_map+1],
device=self.device,
dtype=torch.long).contiguous()
edges.append(edge)
# left edge b -> a
edge = torch.tensor(
[top_map+1, top_map],
device=self.device,
dtype=torch.long).contiguous()
edges.append(edge)
# down edge a -> c
edge = torch.tensor(
[top_map, top_map+self.S * (z+1)],
device=self.device,
dtype=torch.long).contiguous()
edges.append(edge)
# up edge c -> a
edge = torch.tensor(
[top_map+self.S * (z+1), top_map],
device=self.device,
dtype=torch.long).contiguous()
edges.append(edge)
'''
layer node connect to top grid node
'''
for x in range(self.S):
for z in range(self.S):
'''
layer ? (src) to top layer (dst)
'''
top_map = x + self.S * z
for label in range(1, self.CLASSES):
src = top_map + self.S * self.S * label
dst = top_map
edge = torch.tensor([src, dst], device=self.device, dtype=torch.long).contiguous()
edges.append(edge)
self.map.edge_obj_to_obj = torch.stack(edges).reshape(2, -1)
def reset_map(self):
self.map.activate_nodes = set(range(self.S * self.S))
self.map.queue_grid_layer = [0] * self.S * self.S
'''
visualize
'''
with imageio.get_writer('./graph_map_GraphMap_{}_{}_{}.gif'.format(self.S, self.CLASSES, self.R), mode='I', fps=3) as writer:
for i, buf_file in enumerate(self.buffer_plt):
pil_img = Image.open(buf_file)
draw = ImageDraw.Draw(pil_img)
draw.text((0, 0), str(i), (0, 0, 0))
plt_img = np.array(pil_img)
writer.append_data(plt_img)
self.buffer_plt = []
def put_label_to_map(self, cam_coords):
max_index = self.S-1
x, y, z, labels = cam_coords
x = np.round(x / self.R).astype(int) + self.SHIFT_COORDS_HALF_S_TO_MAP
x[x > max_index] = max_index
x[x < -max_index] = -max_index
z = np.round(z / self.R).astype(int) + self.SHIFT_COORDS_HALF_S_TO_MAP
z[z > max_index] = max_index
z[z < -max_index] = -max_index
# labels.shape (163053,) # array([27, 27, 27, ..., 74, 74, 74])
labels = labels.astype(int)
coors = x + self.S * z
one_dim_coors = self.grid_layer_to_one_dim_coors(coors)
# len(node_indexs) 163053 -> array([ 27, 127, 227, ..., 937, 37, 137])
node_indexs = coors + one_dim_coors
self.map.activate_nodes.update(node_indexs)
# self.map.x.shape -> torch.Size([1000, 2348])
self.map.x[node_indexs] = self.label_to_features[labels]
self.map.attributes[node_indexs] = self.label_to_attributes[labels]
def grid_layer_to_one_dim_coors(self, coors):
'''
grid layer from 0 ~ cfg.GRAPH_MAP.GRAPH_MAP_CLASSES
coors: [0, 1, 0, 3, 5, 6, 10, 20, 10, 30, 2, ...]. will be < self.S * self.S
coors = x + self.S * z
len(self.map.queue_grid_layer) = [0] * self.S * self.S (when grid layer=0)
'''
three_dim_to_one = self.S * self.S
one_dim_coors = []
for coor in coors:
# [0~self.CLASSES)
grid_layer = self.map.queue_grid_layer[coor]
grid_layer_to_one_dim = three_dim_to_one * grid_layer
one_dim_coors.append(grid_layer_to_one_dim)
self.map.queue_grid_layer[coor] = (self.map.queue_grid_layer[coor] + 1) % self.CLASSES
# queue_grid_layer[:5] [0, 1, 2, 0, 1]
# coors[:5] array([27, 27, 27, 37, 37])
return one_dim_coors
'''
# Another method get one_dim_coors
increase_when_same_coors_occur = defaultdict(int)
# accumulate same coor to increase
# [0, 1, 0, 3, 5, 6, 10, 20, 10, 30, 2, 0, ...] coors
# ->
# [0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 2, ....] each_coor_layer
each_coor_layer = [0]*len(coors)
for i in range(len(coors)):
each_coor_layer[i] = increase_when_same_coors_occur[coors[i]]
increase_when_same_coors_occur[coors[i]] = (increase_when_same_coors_occur[coors[i]] + 1) % self.CLASSES
grid_layer = (self.map.queue_grid_layer[coors] + each_coor_layer) % self.CLASSES
self.map.queue_grid_layer[coors] = grid_layer
return grid_layer
'''
def visualize_graph_map(self):
print("Didn't implement visualize_graph_mapis")
# self.map.activate_nodes = dict()
class GraphMapV2(GraphMap):
def __init__(self, cfg, priori_features, dim_rgb_feature, device="cuda", object_classes_index_to_name=None):
'''
priori_features: dict. priori_obj_cls_name_to_features, rgb_features, attributes
'''
super().__init__(cfg, priori_features, dim_rgb_feature, device, object_classes_index_to_name)
def init_graph_map(self):
super().init_graph_map()
self.map.activate_nodes = dict()
for i in range(self.S * self.S):
self.map.activate_nodes[i] = 0
def reset_map(self):
super().reset_map()
self.map.activate_nodes = dict()
for i in range(self.S * self.S):
self.map.activate_nodes[i] = 0
def put_label_to_map(self, cam_coords):
max_index = self.S-1
x, y, z, labels = cam_coords
x = np.round(x / self.R).astype(int) + self.SHIFT_COORDS_HALF_S_TO_MAP
x[x > max_index] = max_index
x[x < -max_index] = -max_index
z = np.round(z / self.R).astype(int) + self.SHIFT_COORDS_HALF_S_TO_MAP
z[z > max_index] = max_index
z[z < -max_index] = -max_index
# labels.shape (163053,) # array([27, 27, 27, ..., 74, | |
<gh_stars>1-10
# coding=utf-8
from datetime import datetime
import json
import math
from StringIO import StringIO
import subprocess32 as subprocess
import os
import uuid
from cachetools.func import lru_cache, rr_cache
from celery import Celery, chain, chord, states
from flask import Flask, redirect, request, send_from_directory, jsonify, url_for
from flask_cors import CORS
from flask_uploads import UploadSet, configure_uploads
from flask_tus import tus_manager
import mercantile
from mercantile import Tile
import numpy as np
from PIL import Image
import rasterio
from rasterio.warp import calculate_default_transform, transform_bounds
from werkzeug.wsgi import DispatcherMiddleware
APPLICATION_ROOT = os.environ.get('APPLICATION_ROOT', '')
REDIS_URL = os.environ.get('REDIS_URL', 'redis://')
CELERY_BROKER_URL = os.environ.get('CELERY_BROKER_URL', REDIS_URL)
CELERY_DEFAULT_QUEUE = os.environ.get('CELERY_DEFAULT_QUEUE', 'posm-imagery-api')
CELERY_RESULT_BACKEND = os.environ.get('CELERY_RESULT_BACKEND', REDIS_URL)
IMAGERY_PATH = os.environ.get('IMAGERY_PATH', 'imagery')
MBTILES_TIMEOUT = int(os.environ.get('MBTILES_TIMEOUT', 60 * 60))
MIN_ZOOM = int(os.environ.get('MIN_ZOOM', 0))
MAX_ZOOM = int(os.environ.get('MAX_ZOOM', 22))
SERVER_NAME = os.environ.get('SERVER_NAME', None)
TASK_TIMEOUT = int(os.environ.get('TASK_TIMEOUT', 60 * 15))
USE_X_SENDFILE = os.environ.get('USE_X_SENDFILE', False)
UPLOADED_IMAGERY_DEST = os.environ.get('UPLOADED_IMAGERY_DEST', 'uploads/')
# strip trailing slash if necessary
if IMAGERY_PATH[-1] == '/':
IMAGERY_PATH = IMAGERY_PATH[:-1]
# add trailing slash if necessary
if UPLOADED_IMAGERY_DEST[-1] != '/':
UPLOADED_IMAGERY_DEST = UPLOADED_IMAGERY_DEST[:-1]
app = Flask('posm-imagery-api')
CORS(app)
app.config['APPLICATION_ROOT'] = APPLICATION_ROOT
app.config['SERVER_NAME'] = SERVER_NAME
app.config['USE_X_SENDFILE'] = USE_X_SENDFILE
app.config['UPLOADED_IMAGERY_DEST'] = UPLOADED_IMAGERY_DEST
# Initialize Celery
celery = Celery(app.name, broker=CELERY_BROKER_URL)
celery.conf.update({
'broker_url': CELERY_BROKER_URL,
'result_backend': CELERY_RESULT_BACKEND,
'task_default_queue': CELERY_DEFAULT_QUEUE,
'task_track_started': True
})
# Initialize Tus
tm = tus_manager(app, upload_url='/imagery/upload',
upload_folder=app.config['UPLOADED_IMAGERY_DEST'])
# overwrite tus_max_file_size to support big(ger) files
tm.tus_max_file_size = 17179869184 # 16GB
# Initialize Flask-Uploads
imagery = UploadSet('imagery', ('tif', 'tiff'))
configure_uploads(app, (imagery,))
@tm.upload_file_handler
def upload_file_handler(upload_file_path, filename=None, remote=False):
id = str(uuid.uuid4())
task_info = os.path.join(IMAGERY_PATH, id, 'ingest.task')
os.mkdir(os.path.dirname(task_info))
if remote:
upload_file_path = '/vsicurl/{}'.format(upload_file_path)
task = initialize_imagery(id, upload_file_path).apply_async()
tasks = []
while task.parent:
if isinstance(task, celery.GroupResult):
for child in task.children:
tasks.append(child.id)
else:
tasks.append(task.id)
task = task.parent
tasks.append(task.id)
tasks.reverse()
# stash task ids in the imagery directory so we know which task(s) to look up
with open(task_info, 'w') as f:
f.write(json.dumps(tasks))
save_metadata(id, {
'tilejson': '2.1.0',
'name': id,
})
return id
def initialize_imagery(id, source_path):
return chain(
place_file.si(id, source_path),
create_metadata.si(id),
chord([create_overviews.si(id), create_warped_vrt.si(id)],
chain(
update_metadata.si(id),
cleanup_ingestion.si(id),
)
),
)
@celery.task(bind=True)
def place_file(self, id, source_path):
target_dir = os.path.join(IMAGERY_PATH, id)
if not os.path.exists(target_dir):
os.mkdir(target_dir)
output_file = os.path.abspath(os.path.join(target_dir, 'index.tif'))
# rewrite with gdal_translate
gdal_translate = [
'gdal_translate',
source_path,
output_file,
'-co', 'TILED=yes',
'-co', 'COMPRESS=DEFLATE',
'-co', 'PREDICTOR=2',
'-co', 'BLOCKXSIZE=512',
'-co', 'BLOCKYSIZE=512',
'-co', 'NUM_THREADS=ALL_CPUS',
]
started_at = datetime.utcnow()
self.update_state(state='RUNNING',
meta={
'name': 'preprocess',
'started_at': started_at.isoformat(),
'status': 'Rewriting imagery'
})
try:
returncode = subprocess.call(gdal_translate, timeout=TASK_TIMEOUT)
except subprocess.TimeoutExpired as e:
raise Exception(json.dumps({
'name': 'preprocess',
'started_at': started_at.isoformat(),
'command': ' '.join(gdal_translate),
'status': 'Timed out'
}))
if returncode != 0:
raise Exception(json.dumps({
'name': 'preprocess',
'started_at': started_at.isoformat(),
'command': ' '.join(gdal_translate),
'return_code': returncode,
'status': 'Failed'
}))
if not source_path.startswith(('/vsicurl', 'http://', 'https://')):
# delete original
os.unlink(source_path)
return {
'name': 'preprocess',
'completed_at': datetime.utcnow().isoformat(),
'started_at': started_at,
'status': 'Image pre-processing completed'
}
@celery.task(bind=True)
def update_metadata(self, id):
started_at = datetime.utcnow()
meta = get_metadata(id)
meta['meta']['status'].update({
'ingest': {
'state': 'SUCCESS',
}
})
save_metadata(id, meta)
return {
'name': 'update-metadata',
'completed_at': datetime.utcnow().isoformat(),
'started_at': started_at,
'status': 'Metadata updating completed'
}
@celery.task(bind=True)
def cleanup_ingestion(self, id):
started_at = datetime.utcnow()
task_info_path = os.path.join(IMAGERY_PATH, id, 'ingest.task')
if os.path.exists(task_info_path):
os.unlink(task_info_path)
return {
'name': 'cleanup',
'completed_at': datetime.utcnow().isoformat(),
'started_at': started_at,
'status': 'Cleanup completed'
}
def get_zoom_offset(width, height, approximate_zoom):
return len([x for x in range(approximate_zoom)
if (height / (2 ** (x + 1))) >= 1 and (width / (2 ** (x + 1))) >= 1])
@celery.task(bind=True)
def create_metadata(self, id):
raster_path = os.path.join(IMAGERY_PATH, id, 'index.tif')
started_at = datetime.utcnow()
self.update_state(state='RUNNING',
meta={
'name': 'metadata',
'started_at': started_at.isoformat(),
'status': 'Reading metadata from imagery'
})
try:
with rasterio.drivers():
with rasterio.open(raster_path) as src:
# construct an affine transform w/ units in web mercator "meters"
affine, _, _ = calculate_default_transform(src.crs, 'epsg:3857',
src.width, src.height, *src.bounds, resolution=None)
# grab the lowest resolution dimension
resolution = max(abs(affine[0]), abs(affine[4]))
zoom = int(math.ceil(math.log((2 * math.pi * 6378137) /
(resolution * 256)) / math.log(2)))
width = src.width
height = src.height
bounds = transform_bounds(src.crs, {'init': 'epsg:4326'}, *src.bounds)
bandCount = src.count
except Exception as err:
raise Exception(json.dumps({
'name': 'metadata',
'started_at': started_at.isoformat(),
'error': str(err),
}))
self.update_state(state='RUNNING',
meta={
'name': 'metadata',
'started_at': started_at.isoformat(),
'status': 'Writing metadata'
})
metadata = get_metadata(id)
meta = metadata['meta']
meta.update({
'approximateZoom': zoom,
'bandCount': bandCount,
'width': width,
'height': height,
})
metadata.update({
'tilejson': '2.1.0',
'name': id,
'bounds': bounds,
'minzoom': zoom - get_zoom_offset(width, height, zoom),
'maxzoom': MAX_ZOOM,
'meta': meta,
})
save_metadata(id, metadata)
return {
'name': 'metadata',
'completed_at': datetime.utcnow().isoformat(),
'started_at': started_at,
'status': 'Metadata creation completed'
}
@celery.task(bind=True)
def create_overviews(self, id):
raster_path = os.path.abspath(os.path.join(IMAGERY_PATH, id, 'index.tif'))
meta = get_metadata(id)
approximate_zoom = meta['meta']['approximateZoom']
height = meta['meta']['height']
width = meta['meta']['width']
# create external overviews
gdaladdo = [
'gdaladdo',
'-r', 'cubic',
'--config', 'GDAL_TIFF_OVR_BLOCKSIZE', '512',
'--config', 'TILED_OVERVIEW', 'yes',
'--config', 'COMPRESS_OVERVIEW', 'DEFLATE',
'--config', 'PREDICTOR_OVERVIEW', '2',
'--config', 'BLOCKXSIZE_OVERVIEW', '512',
'--config', 'BLOCKYSIZE_OVERVIEW', '512',
'--config', 'NUM_THREADS_OVERVIEW', 'ALL_CPUS',
raster_path,
]
# generate a list of overview values (where images are > 1x1)
overview_levels = [str(2 ** (x + 1)) for x in range(get_zoom_offset(
width,
height,
approximate_zoom
))]
gdaladdo.extend(overview_levels)
started_at = datetime.utcnow()
self.update_state(state='RUNNING',
meta={
'name': 'overviews',
'started_at': started_at.isoformat(),
'status': 'Creating external overviews'
})
try:
returncode = subprocess.call(gdaladdo, timeout=TASK_TIMEOUT)
except subprocess.TimeoutExpired as e:
raise Exception(json.dumps({
'name': 'overviews',
'started_at': started_at.isoformat(),
'command': ' '.join(gdaladdo),
'status': 'Timed out'
}))
if returncode != 0:
raise Exception(json.dumps({
'name': 'overviews',
'started_at': started_at.isoformat(),
'command': ' '.join(gdaladdo),
'return_code': returncode,
'status': 'Failed'
}))
return {
'name': 'overviews',
'completed_at': datetime.utcnow().isoformat(),
'started_at': started_at,
'status': 'Overview addition completed'
}
@celery.task(bind=True)
def create_warped_vrt(self, id):
raster_path = os.path.abspath(os.path.join(IMAGERY_PATH, id, 'index.tif'))
vrt_path = os.path.abspath(os.path.join(IMAGERY_PATH, id, 'index.vrt'))
meta = get_metadata(id)
approximate_zoom = meta['meta']['approximateZoom']
# create a warped VRT to reproject on the fly
gdalwarp = [
'gdalwarp',
raster_path,
vrt_path,
'-r', 'cubic',
'-t_srs', 'epsg:3857',
'-overwrite',
'-of', 'VRT',
'-te', '-20037508.34', '-20037508.34', '20037508.34', '20037508.34',
'-ts', str(2 ** approximate_zoom * 256), str(2 ** approximate_zoom * 256),
]
# add an alpha band (for NODATA) if one wasn't already included
if meta['meta']['bandCount'] < 4:
gdalwarp.append('-dstalpha')
started_at = datetime.utcnow()
self.update_state(state='RUNNING',
meta={
'name': 'warped-vrt',
'started_at': started_at.isoformat(),
'status': 'Creating warped VRT'
})
try:
returncode = subprocess.call(gdalwarp, timeout=TASK_TIMEOUT)
except subprocess.TimeoutExpired as e:
raise Exception(json.dumps({
'name': 'warped-vrt',
'started_at': started_at.isoformat(),
'command': ' '.join(gdalwarp),
'status': 'Timed out'
}))
if returncode != 0:
raise Exception(json.dumps({
'name': 'warped-vrt',
'started_at': started_at.isoformat(),
'command': ' '.join(gdalwarp),
'return_code': returncode,
'status': 'Failed'
}))
return {
'completed_at': datetime.utcnow().isoformat(),
'started_at': started_at,
'status': 'Warped VRT creation completed'
}
@celery.task(bind=True)
def generate_mbtiles(self, id):
"""Generate an MBTiles archive for a given style."""
meta = get_metadata(id)
output_path = os.path.abspath(os.path.join(IMAGERY_PATH, id, 'index.mbtiles'))
approximate_zoom = meta['meta']['approximateZoom']
bounds = meta['bounds']
height = meta['meta']['height']
width = meta['meta']['width']
generate_cmd = [
'tl',
'copy',
'-q',
'-b', ' '.join(map(str, bounds)),
'-z', str(approximate_zoom - get_zoom_offset(width, height, approximate_zoom)),
'-Z', str(approximate_zoom),
meta['tiles'][0],
'mbtiles://{}'.format(output_path)
]
started_at = datetime.utcnow()
self.update_state(state='RUNNING',
meta={
'name': 'mbtiles',
'started_at': started_at.isoformat(),
'status': 'Generating tiles'
})
print('Running {}'.format(' '.join(generate_cmd)))
try:
returncode = subprocess.call(generate_cmd, timeout=MBTILES_TIMEOUT)
except subprocess.TimeoutExpired as e:
raise Exception(json.dumps({
'name': 'mbtiles',
'started_at': started_at.isoformat(),
'command': ' '.join(generate_cmd),
'status': 'Timed out'
}))
if returncode != 0:
raise Exception(json.dumps({
'name': 'mbtiles',
'started_at': started_at.isoformat(),
'command': ' '.join(generate_cmd),
'return_code': returncode,
'status': 'Failed'
}))
# update metadata
meta['meta']['status'].update({
'mbtiles': {
'state': 'SUCCESS',
}
})
save_metadata(id, meta)
# delete task tracking info
task_info_path = os.path.join(IMAGERY_PATH, id, 'mbtiles.task')
if os.path.exists(task_info_path):
os.unlink(task_info_path)
return {
'completed_at': datetime.utcnow().isoformat(),
'started_at': started_at,
'status': 'MBTiles generation completed'
}
def fetch_ingestion_status(id):
task_info_path = os.path.join(IMAGERY_PATH, id, 'ingest.task')
if os.path.exists(task_info_path):
with open(task_info_path) as t:
tasks = json.load(t)
return fetch_status(tasks)
def fetch_mbtiles_status(id):
task_info_path = os.path.join(IMAGERY_PATH, id, 'mbtiles.task')
if os.path.exists(task_info_path):
with open(task_info_path) as t:
tasks = json.load(t)
return fetch_status(tasks)
def get_metadata(id):
with open(os.path.join(IMAGERY_PATH, id, 'index.json')) as metadata:
meta = json.load(metadata)
with app.app_context():
meta['tiles'] = [
'{}/{{z}}/{{x}}/{{y}}.png'.format(url_for('get_imagery_metadata', id=id, _external=True))
]
ingest_status = fetch_ingestion_status(id)
mbtiles_status = fetch_mbtiles_status(id)
meta['meta'] = meta.get('meta', {})
meta['meta']['status'] = meta['meta'].get('status', {})
meta['meta']['status']['ingest'] = meta['meta']['status'].get('ingest', {})
meta['meta']['status']['mbtiles'] = meta['meta']['status'].get('mbtiles', {})
meta['meta']['user'] = meta['meta'].get('user', {})
if ingest_status:
meta['meta']['status']['ingest'] = ingest_status
if mbtiles_status:
meta['meta']['status']['mbtiles'] = mbtiles_status
return meta
def save_metadata(id, metadata):
with open(os.path.join(IMAGERY_PATH, id, 'index.json'), 'w') as metadata_file:
metadata_file.write(json.dumps(metadata))
@lru_cache()
def get_source(path):
with rasterio.drivers():
return rasterio.open(path)
def render_tile(meta, tile, scale=1):
src_tile_zoom = meta['meta']['approximateZoom']
# do calculations in src_tile_zoom space
dz = src_tile_zoom - tile.z
x = 2**dz * tile.x
y = 2**dz * tile.y
mx = 2**dz * (tile.x + 1)
my = 2**dz * (tile.y + 1)
dx = mx - x
dy = my - y
top = (2**src_tile_zoom * 256) - 1
# y, x (rows, columns)
# window is measured in pixels at src_tile_zoom
window = [[top - (top - (256 * y)), top - (top - ((256 * y) + int(256 * dy)))],
[256 * x, (256 * x) + int(256 * dx)]]
src = get_source(os.path.join(IMAGERY_PATH, meta['name'], 'index.vrt'))
# use decimated reads to read from overviews, per https://github.com/mapbox/rasterio/issues/710
data = np.empty(shape=(4, 256 * scale, 256 * scale)).astype(src.profile['dtype'])
data = | |
# If the argument passed is a variable (identifier) then try get value
if token_stream[token][0] == 'IDENTIFIER':
# Get value and handle any errors
value = self.get_variable_value(token_stream[token][1])
if value != False:
ast['PrebuiltFunction'].append( {'arguments': [value]} )
else:
self.send_error_message("Variable '%s' does not exist" % token_stream[tokens_checked][1], token_stream[0:tokens_checked + 1])
# TODO Allow for concatenation and equation parsing
else:
if token_stream[token + 1][0] == 'STATEMENT_END':
ast['PrebuiltFunction'].append( {'arguments': [token_stream[token][1]]} )
else:
value_list_func_call = self.form_value_list(token_stream[tokens_checked:len(token_stream)])
print(value_list_func_call)
# This will throw an error if argument passed in is not a permitted token type
elif token == 1 and token_stream[token][0] not in ['INTEGER', 'STRING', 'IDENTIFIER']:
self.send_error_message("Invalid argument type of %s expected string, identifier or primitive data type" % token_stream[token][0],
token_stream[0:tokens_checked + 1])
tokens_checked += 1 # Increment tokens checked
# If it's being parsed within a body don't ass the ast to the source ast
if not isInBody: self.source_ast['main_scope'].append(ast)
# Increase token index to make up for tokens checked
self.token_index += tokens_checked
return [ast, tokens_checked]
def variable_decleration_parsing(self, token_stream, isInBody):
""" Variable Decleration Parsing
This method will parse variable declerations and add them to the source AST or
return them if variable decleration is being parsed for body of a statement
Args:
token_stream (list) : The token stream starting from where var decleration was found
"""
ast = { 'VariableDecleration': [] } # The abstract syntax tree for var decl
tokens_checked = 0 # Number of token checked that made up the var decl
var_exists = True
for x in range(0, len(token_stream)):
# Create variables for identifying token type and value more easily
token_type = token_stream[x][0]
token_value = token_stream[x][1]
# Skip the '=' operator in var decl
if x == 2 and token_type == "OPERATOR" and token_value == "=":
pass
# This will handle error detection for making sure the '=' is found
if x == 2 and token_type != "OPERATOR" and token_value != "=":
self.send_error_message("Variable Decleration Missing '='.", self.token_stream[self.token_index:self.token_index + tokens_checked + 2])
# If a statement end is found then break out parsing
if token_stream[x][0] == "STATEMENT_END": break
# This will parse the first token which will be the var type
if x == 0: ast['VariableDecleration'].append({ "type": token_value })
# This will parse the second token which will be the name of the var
if x == 1 and token_type == "IDENTIFIER":
# Check if a variable has already been named the same and is so send an error
if self.get_variable_value(token_value) != False:
self.send_error_message("Variable '%s' already exists and cannot be defined again!" % token_value, self.token_stream[self.token_index:self.token_index + tokens_checked + 1])
else:
# Set var exists to False so that it can be added
var_exists = False
# This will check if the variable is being delared but not initialised
if token_stream[x + 1][0] == "STATEMENT_END":
# Adds the default value of 'undefined' and breaks out of loop
ast['VariableDecleration'].append({ "name": token_value })
ast['VariableDecleration'].append({ "value": '"undefined"' })
tokens_checked += 1
break
else:
ast['VariableDecleration'].append({ "name": token_value })
# Error handling for variable name to make sure the naming convention is acceptable
if x == 1 and token_type != "IDENTIFIER":
self.send_error_message("Invalid Variable Name '%s'" % token_value, self.token_stream[self.token_index:self.token_index + tokens_checked + 1] )
# This will parse the 3rd token which is the value of the variable
if x == 3 and token_stream[x + 1][0] == "STATEMENT_END":
# Check if the value matches the variable defined type
if type(eval(token_value)) == eval(token_stream[0][1]):
# Add value as a number not a string if it is an int or else add it as a string
try: ast['VariableDecleration'].append({ "value": int(token_value) })
except ValueError: ast['VariableDecleration'].append({ "value": token_value })
else:
self.send_error_message("Variable value does not match defined type!", self.token_stream[self.token_index:self.token_index + tokens_checked + 1])
# This will parse any variable declerations which have concatenation or arithmetics
elif x >= 3:
# This will call the form_value_list method and it will return the concatenation value and tokens checked
value_list_func_call = self.form_value_list(token_stream[tokens_checked:len(token_stream)])
value_list = value_list_func_call[0]
tokens_checked += value_list_func_call[1]
# Call the equation parser and append value returned or try concat parser if an error occurs
try:
ast['VariableDecleration'].append({ "value": self.equation_parser(value_list)})
except:
try:
ast['VariableDecleration'].append({ "value": self.concatenation_parser(value_list) })
except:
self.send_error_message("Invalid variable decleration!", self.token_stream[self.token_index:self.token_index + tokens_checked] )
break # Break out of the current var parsing loop since we just parsed everything
tokens_checked += 1 # Indent within overall for loop
# Last case error validation checking if all needed var decl elements are in the ast such as:
# var type, name and value
try: ast['VariableDecleration'][0]
except: self.send_error_message("Invalid variable decleration could not set variable type!", self.token_stream[self.token_index:self.token_index + tokens_checked] )
try: ast['VariableDecleration'][1]
except: self.send_error_message("Invalid variable decleration could not set variable name!", self.token_stream[self.token_index:self.token_index + tokens_checked] )
try: ast['VariableDecleration'][2]
except: self.send_error_message("Invalid variable decleration could not set variable value!", self.token_stream[self.token_index:self.token_index + tokens_checked] )
# If this is being run to parse inside a body then there is no need to add it to the source ast
# as it will be added to the body of statement being parsed
if not isInBody:
self.source_ast['main_scope'].append(ast)
if not var_exists:
self.symbol_tree.append( [ast['VariableDecleration'][1]['name'], ast['VariableDecleration'][2]['value']] )
self.token_index += tokens_checked
return [ast, tokens_checked] # Return is only used within body parsing to create body ast
def conditional_statement_parser(self, token_stream, isNested):
""" Conditional Statement Parser
This will parse conditional statements like 'if else' and create an
abstract sytax tree for it.
args:
token_stream (list) : tokens which make up the conditional statement
isNested (bool) : True the conditional statement is being parsed within another conditional statement
return:
ast (dict) : The condtion ast without the body
tokens_checked (int) : The count of tokens checked that made up the condition statement
"""
tokens_checked = 0
ast = {'ConditionalStatement': []}
# This loop will parse the condition e.g. if 12 < 11
for x in range(0, len(token_stream)):
tokens_checked += 1
# Simplification variables that will improve readbility
token_type = token_stream[x][0]
token_value = token_stream[x][1]
allowed_conditional_token_types = ['INTEGER', 'STRING', 'IDENTIFIER']
# Break out of loop at the end of the condition
if token_type == 'SCOPE_DEFINER' and token_value == '{': break
# Pass if token is the 'if' identifier as it has already been checked
if token_type == 'IDENTIFIER' and token_value == 'if': pass
# This will check for the first value and add it to the AST
if x == 1 and token_type in allowed_conditional_token_types:
# This will check for an identifier (var) and then check if it exists so it can add the value to it
if self.get_variable_value(token_value) != False:
ast['ConditionalStatement'].append( {'value1': self.get_variable_value(token_value)} )
else:
ast['ConditionalStatement'].append( {'value1': token_value} )
# This will check for the comparison operator and add it to the AST
if x == 2 and token_type == 'COMPARISON_OPERATOR':
ast['ConditionalStatement'].append( {'comparison_type': token_value} )
# This will check for the second value and add it to the AST
if x == 3 and token_type in allowed_conditional_token_types:
# This will check for an identifier (var) and then check if it exists so it can add the value to it
if self.get_variable_value(token_value) != False:
ast['ConditionalStatement'].append( {'value2': self.get_variable_value(token_value)} )
else:
ast['ConditionalStatement'].append( {'value2': token_value} )
# Increment global token index for tokens checked in condition
self.token_index += tokens_checked - 1
# This will get the body tokens and the tokens checked that make up the body to skip them
get_body_return = self.get_statement_body(token_stream[tokens_checked:len(token_stream)])
print()
print()
print()
print('---', get_body_return)
print()
print()
print()
# If it nested then call parse_body with nested parameter of true else false
if isNested == True:
self.parse_body(get_body_return[0], ast, 'ConditionalStatement', True)
else:
self.parse_body(get_body_return[0], ast, 'ConditionalStatement', False)
# Add the amount tokens we checked in body
tokens_checked += get_body_return[1]
return [ast, tokens_checked] # Return is only used within body parsing to create body ast
def parse_body(self, token_stream, statement_ast, astName, isNested):
""" Parse body
This will parse the body of conditional, iteration, functions and more in order
to return a body ast like this --> {'body': []}
args:
token_stream (list) : tokens which | |
render(self, dialog):
if self._index is None and self._value is not None:
# Search for a matching item with the specified value or ID.
for index, child in enumerate(self._children):
if child.text == self._value:
break
else:
for index, child in enumerate(self._children):
if child.ident == self._value:
break
else:
index = None
self._index = index
layout_flags = get_layout_flags(self.layout)
box_id = self.alloc_id('box')
dialog.AddComboBox(box_id, layout_flags)
if self._children:
self._items = []
BaseGroupWidget.render(self, dialog)
if self._index is None and self._children:
self._index = 0
if self._index is not None:
self.active_index = self._index
def layout_changed(self):
self._layout_changed = True
manager = self.manager
if manager:
manager.layout_changed()
def update(self, dialog):
id = self.get_named_id('box', None)
if self._layout_changed and id is not None:
self._layout_changed = False
self.save_state()
self.update_state(dialog)
dialog.FreeChildren(id)
for index, name in enumerate(self._items):
dialog.AddChild(id, index, name)
self.active_index = self._index
else:
BaseGroupWidget.update(self, dialog)
def pack(self, widget):
if not isinstance(widget, (Item, Separator)):
raise TypeError('Combobox can only contain Item/Separator')
BaseGroupWidget.pack(self, widget)
def command_event(self, id, bc):
if id == self.get_named_id('box', None):
self._index = self.dialog.GetInt32(id)
return self.send_event('value-changed', self)[1]
return False
def add(self, name):
self._items.append(name)
@property
def active_index(self):
return self._index
@active_index.setter
def active_index(self, value):
id = self.get_named_id('box', None)
if value is None:
self._index = None
if id is not None:
self.dialog.SetInt32(id, -1)
else:
self._index = int(value)
self.dialog.SetInt32(id, self._index)
@property
def active_item(self):
if self._index is not None and self._index in xrange(len(self.children)):
return self.children[self._index]
return None
@active_item.setter
def active_item(self, item):
if not isinstance(item, Item):
for child in self.children:
if child.ident == item:
item = child
break
else:
raise ValueError('no Item with ident {!r}'.format(item))
try:
index = self.children.index(item)
except ValueError:
raise ValueError('this Item is not in the Combobox')
self.active_index = index
class Item(BaseWidget):
"""
An item inside a #Combobox. Additionally to its displayed *text* and
#BaseWidget.id, it also has an *ident* member which can be used to store
additional information to identify the Item.
"""
def __init__(self, text='???', ident=None, delegate=None, id=None):
BaseWidget.__init__(self, id)
self.text = text
self.ident = ident
self.delegate = delegate
def render(self, dialog):
parent = self.parent
if self.delegate:
self.delegate(parent)
else:
parent.add(self.text)
class Quicktab(BaseGroupWidget):
"""
Represents a Quicktab Custom GUI. Unlike the #Combobox, it does not accept
any child widgets but is instead managed manually and filled from code.
# Events
- value-changed
"""
def __init__(self, layout='fill-x,middle', multiselect='true', value=None,
id=None):
BaseGroupWidget.__init__(self, id)
self.layout = layout
self.multiselect = bool_cast(multiselect)
self._value = value
self._textcol = None
self._items = []
self._checked_count = 0
self._gui = None
self._layout_changed = False
def clear(self):
try:
return self._items
finally:
self._items = []
if self._gui:
self._gui.ClearStrings()
self.layout_changed()
def add(self, string, color=None, checked=False, visible=True):
item = {'string': str(string), 'color': color, 'checked': checked, 'visible': visible}
self._items.append(item)
self.layout_changed()
def is_checked(self, index):
if isinstance(index, basestring):
for item_index, item in enumerate(self._children):
if item.ident == index:
index = item_index
break
else:
return False
return self._items[index]['checked']
def set_checked(self, index, checked, mode='new'):
assert mode in ('new', 'add')
checked = bool(checked)
item = self._items[index]
if item['checked'] == checked:
return
item['checked'] = checked
gui = self._gui
if mode == 'new':
self._checked_count = 1
# Deselect all other items.
for i, item in enumerate(self._items):
if i != index:
item['checked'] = False
if gui: gui.Select(i, False)
else:
self._checked_count += int(checked)
# Update the GUI of the current item.
if gui: gui.Select(index, checked)
def set_color(self, index, color):
item = self._items[index]
item['color'] = color
if self._gui:
self._gui.SetTextColor(index, color)
def set_visible(self, index, visible):
item = self._items[index]
if item['checked']:
self._checked_count -= 1
item['visible'] = visible
if self._checked_count == 0:
# Check the first available item instead.
for index, item in enumerate(self._items):
if item['visible']:
self.set_checked(index, True)
break
self.layout_changed()
def layout_changed(self):
self._layout_changed = True
manager = self.manager
if manager:
manager.layout_changed()
def render(self, dialog):
layout_flags = get_layout_flags(self.layout)
multiselect = self.multiselect
bc = c4d.BaseContainer()
bc[c4d.QUICKTAB_BAR] = False
bc[c4d.QUICKTAB_SHOWSINGLE] = True
bc[c4d.QUICKTAB_SPRINGINGFOLDERS] = True # ??
bc[c4d.QUICKTAB_SEPARATOR] = False
bc[c4d.QUICKTAB_NOMULTISELECT] = not multiselect
self._gui = dialog.AddCustomGui(
self.alloc_id('gui'),
c4d.CUSTOMGUI_QUICKTAB,
"",
layout_flags,
0,
0,
bc
)
self._layout_changed = True
if self._children:
self._items = []
BaseGroupWidget.render(self, dialog)
def command_event(self, id, bc):
if id == self.get_named_id('gui', None):
# Update the selection state of all elements.
gui = self._gui
self._checked_count = 0
for index, item in enumerate(self._items):
item['checked'] = gui.IsSelected(index)
if item['checked'] and item['visible']:
self._checked_count += 1
return self.send_event('value-changed', self)[1]
return False
def update(self, dialog):
id = self.get_named_id('gui', None)
if self._layout_changed and id is not None:
self._layout_changed = False
gui = self._gui
gui.ClearStrings()
visible_count = 0
self._checked_count = 0
first_visible_index = None
for index, item in enumerate(self._items):
if not item['visible']: continue
if first_visible_index is None:
first_visible_index = index
visible_count += 1
if item['checked']:
self._checked_count += 1
gui.AppendString(index, item['string'], item['checked'])
if item['color'] is not None:
gui.SetTextColor(index, item['color'])
# Make sure that at least one item is selected.
if visible_count > 0 and self._checked_count == 0:
self.set_checked(first_visible_index, True)
gui.DoLayoutChange()
BaseGroupWidget.update(self, dialog)
def init_values(self, dialog):
if self._value is not None:
values = self._value.split(',')
mode = 'new'
for index, item in enumerate(self._children):
if self._value == '*' or item.ident in values:
self.set_checked(index, True, mode)
mode = 'add'
if not self.multiselect:
break
@property
def active_item(self):
if len(self._items) != len(self.children):
raise RuntimeError("Quicktab._items out of sync with Quicktab.children")
for item, child in zip(self._items, self.children):
if item['checked']:
return child
return None
class LinkBox(BaseWidget):
"""
Represents a link box GUI.
# Events
- value-changed
"""
def __init__(self, layout='fill-x,middle', id=None):
BaseWidget.__init__(self, id)
self.layout = layout
self._layout_changed = False
self._gui = None
def get_link(self, doc=None, instanceof=0):
if self._gui:
return self._gui.GetLink(doc, instanceof)
return None
def set_link(self, node):
if self._gui:
self._gui.SetLink(node)
def layout_changed(self):
self._layout_changed = True
manager = self.manager
if manager:
manager.layout_changed()
def render(self, dialog):
layout_flags = get_layout_flags(self.layout)
bc = c4d.BaseContainer()
self._gui = dialog.AddCustomGui(
self.alloc_id('gui'),
c4d.CUSTOMGUI_LINKBOX,
"",
layout_flags,
0,
0,
bc
)
self._layout_changed = True
def command_event(self, id, bc):
if id == self.get_named_id('gui', None):
return self.send_event('value-changed', self)[1]
return False
class Input(BaseWidget):
def __init__(self, layout='left,middle', type='string', slider='false',
arrows='true', min=None, max=None, minslider=None, maxslider=None,
helptext=None, is_password='false', unit='float', quadscale='false',
step='1', minw='0', minh='0', id=None, value=None):
BaseWidget.__init__(self, id)
assert type in ('string', 'integer', 'float')
actual_type = {'string': str, 'integer': int, 'float': float}[type]
if value is None:
value = actual_type()
self.layout = layout
self.type = type
self.slider = bool_cast(slider)
self.arrows = bool_cast(arrows)
self.min = actual_type(min) if min is not None else None
self.max = actual_type(max) if max is not None else None
self.minslider = actual_type(minslider) if minslider is not None else None
self.maxslider = actual_type(maxslider) if maxslider is not None else None
self.helptext = helptext
self.is_password = bool_cast(is_password)
self.unit = _unit_map[unit] if isinstance(unit, str) else unit
self.quadscale = bool_cast(quadscale)
self.step = actual_type(step)
self.minw = int(minw)
self.minh = int(minh)
self._value = value
self.add_event_listener('visibility-changed', self.__visibility_changed)
def __visibility_changed(self, _):
value = self.value
self.value = ''
yield
self.value = value
def save_state(self):
if self.get_named_id('field', None) is not None:
self._value = self.value
def render(self, dialog):
layout_flags = get_layout_flags(self.layout)
id = self.alloc_id('field')
if self.type == 'string':
edtflags = c4d.EDITTEXT_PASSWORD if (self.is_password == 'true') else 0
dialog.AddEditText(id, layout_flags, self.minw, self.minh,
editflags=edtflags)
elif self.type in ('integer', 'float'):
if self.slider:
method = dialog.AddEditSlider if self.arrows else dialog.AddSlider
else:
method = dialog.AddEditNumberArrows if self.arrows else dialog.AddEditNumber
method(id, layout_flags, self.minw, self.minh)
else:
raise RuntimeError("Input.type invalid: {0!r}".format(self.type))
if self._value is not None:
self.value = self._value
if self.helptext:
dialog.SetString(id, self.helptext, False, c4d.EDITTEXT_HELPTEXT)
self._update_color(dialog)
def _update_color(self, dialog):
if self.helptext and not self.value:
color = c4d.Vector(0.4)
else:
color = None
dialog.set_color(self.get_named_id('field'), c4d.COLOR_TEXT_EDIT, color)
def command_event(self, id, bc):
if id == self.get_named_id('field', None):
if self.type == 'string':
data = {'in_edit': bc.GetBool(c4d.BFM_ACTION_STRCHG)}
self._update_color(self.dialog)
else:
data = {'in_drag': bc.GetBool(c4d.BFM_ACTION_INDRAG)}
res = bool(self.send_event('input-changed', self, data)[1])
res = res | bool(self.send_event('value-changed', self)[1])
return res
return False
@property
def value(self):
dialog = self.dialog
id = self.get_named_id('field')
if self.type == 'string':
return dialog.GetString(id)
elif self.type == 'integer':
return dialog.GetInt32(id)
elif self.type == 'float':
return dialog.GetFloat(id)
else:
raise RuntimeError("Input.type invalid: {0!r}".format(self.type))
@value.setter
def value(self, value):
dialog = self.dialog
id = self.get_named_id('field')
if self.type == 'string':
if value is None:
value = ''
dialog.SetString(id, value)
self._update_color(dialog)
elif self.type == 'integer':
min, max, minslider, maxslider = self._get_limit_props(
c4d.MINLONGl, c4d.MAXLONGl)
self.dialog.SetInt32(id, int(value), min, max, self.step,
False, minslider, maxslider)
elif self.type == 'float':
min, max, minslider, maxslider = self._get_limit_props(
sys.float_info.min, sys.float_info.max)
self.dialog.SetFloat(id, float(value), min, max, self.step,
self.unit, minslider, | |
from tensorflow.python.platform import flags
from tensorflow.contrib.data.python.ops import batching
import tensorflow as tf
import json
from torch.utils.data import Dataset
import pickle
import os.path as osp
import os
import numpy as np
import time
from scipy.misc import imread, imresize
from torchvision.datasets import CIFAR10, MNIST, SVHN, CIFAR100, ImageFolder
from torchvision import transforms
import torch
import torchvision
FLAGS = flags.FLAGS
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Dataset Options
flags.DEFINE_string('dsprites_path',
'/root/data/dsprites-dataset/dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz',
'path to dsprites characters')
flags.DEFINE_string('imagenet_datadir', '/root/imagenet_big', 'whether cutoff should always in image')
flags.DEFINE_bool('dshape_only', False, 'fix all factors except for shapes')
flags.DEFINE_bool('dpos_only', False, 'fix all factors except for positions of shapes')
flags.DEFINE_bool('dsize_only', False, 'fix all factors except for size of objects')
flags.DEFINE_bool('drot_only', False, 'fix all factors except for rotation of objects')
flags.DEFINE_bool('dsprites_restrict', False, 'fix all factors except for rotation of objects')
flags.DEFINE_string('imagenet_path', '/root/imagenet', 'path to imagenet images')
flags.DEFINE_string('load_path', '/root/imagenet', 'path to imagenet images')
flags.DEFINE_string('load_type', 'npy', 'npy or png')
flags.DEFINE_bool('single', False, 'single ')
flags.DEFINE_string('datasource', 'random', 'default or noise or negative or single')
# Data augmentation options
# flags.DEFINE_bool('cutout_inside', False, 'whether cutoff should always in image')
# flags.DEFINE_float('cutout_prob', 1.0, 'probability of using cutout')
# flags.DEFINE_integer('cutout_mask_size', 16, 'size of cutout')
# flags.DEFINE_bool('cutout', False, 'whether to add cutout regularizer to data')
flags.DEFINE_string('eval', '', '')
flags.DEFINE_string('init', '', '')
flags.DEFINE_string('norm', '', '')
flags.DEFINE_string('n_steps', '', '')
flags.DEFINE_string('reinit_freq', '', '')
flags.DEFINE_string('print_every', '', '')
flags.DEFINE_string('n_sample_steps', '', '')
flags.DEFINE_integer('gpu-id', 0, '')
def cutout(mask_color=(0, 0, 0)):
mask_size_half = FLAGS.cutout_mask_size // 2
offset = 1 if FLAGS.cutout_mask_size % 2 == 0 else 0
def _cutout(image):
image = np.asarray(image).copy()
if np.random.random() > FLAGS.cutout_prob:
return image
h, w = image.shape[:2]
if FLAGS.cutout_inside:
cxmin, cxmax = mask_size_half, w + offset - mask_size_half
cymin, cymax = mask_size_half, h + offset - mask_size_half
else:
cxmin, cxmax = 0, w + offset
cymin, cymax = 0, h + offset
cx = np.random.randint(cxmin, cxmax)
cy = np.random.randint(cymin, cymax)
xmin = cx - mask_size_half
ymin = cy - mask_size_half
xmax = xmin + FLAGS.cutout_mask_size
ymax = ymin + FLAGS.cutout_mask_size
xmin = max(0, xmin)
ymin = max(0, ymin)
xmax = min(w, xmax)
ymax = min(h, ymax)
image[:, ymin:ymax, xmin:xmax] = np.array(mask_color)[:, None, None]
return image
return _cutout
class CelebA(Dataset):
def __init__(self):
self.path = "/root/data/img_align_celeba"
self.ims = os.listdir(self.path)
self.ims = [osp.join(self.path, im) for im in self.ims]
def __len__(self):
return len(self.ims)
def __getitem__(self, index):
label = 1
if FLAGS.single:
index = 0
path = self.ims[index]
im = imread(path)
im = imresize(im, (32, 32))
image_size = 32
im = im / 255.
if FLAGS.datasource == 'default':
im_corrupt = im + 0.3 * np.random.randn(image_size, image_size, 3)
elif FLAGS.datasource == 'random':
im_corrupt = np.random.uniform(
0, 1, size=(image_size, image_size, 3))
return im_corrupt, im, label
class Cifar10(Dataset):
def __init__(
self, FLAGS,
train=True,
full=False,
augment=False,
noise=True,
rescale=1.0):
if augment:
transform_list = [
torchvision.transforms.RandomCrop(32, padding=4),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
]
# if FLAGS.cutout:
# transform_list.append(cutout())
transform = transforms.Compose(transform_list)
else:
transform = transforms.ToTensor()
self.FLAGS = FLAGS
self.full = full
self.data = CIFAR10(
"../data/dataset/cifar10",
transform=transform,
train=train,
download=True)
self.test_data = CIFAR10(
"../data/dataset/cifar10",
transform=transform,
train=False,
download=True)
self.one_hot_map = np.eye(10)
self.noise = noise
self.rescale = rescale
def __len__(self):
if self.full:
return len(self.data) + len(self.test_data)
else:
return len(self.data)
def __getitem__(self, index):
FLAGS = self.FLAGS
FLAGS.single = False
if not FLAGS.single:
if self.full:
if index >= len(self.data):
im, label = self.test_data[index - len(self.data)]
else:
im, label = self.data[index]
else:
im, label = self.data[index]
else:
im, label = self.data[0]
im = np.transpose(im, (1, 2, 0)).numpy()
image_size = 32
label = self.one_hot_map[label]
im = im * 255 / 256
if self.noise:
im = im * self.rescale + \
np.random.uniform(0, self.rescale * 1 / 256., im.shape)
np.random.seed((index + int(time.time() * 1e7)) % 2**32)
FLAGS.datasource = 'random'
if FLAGS.datasource == 'default':
im_corrupt = im + 0.3 * np.random.randn(image_size, image_size, 3)
elif FLAGS.datasource == 'random':
im_corrupt = np.random.uniform(
0.0, self.rescale, (image_size, image_size, 3))
return im_corrupt, im, label
class Cifar100(Dataset):
def __init__(self, train=True, augment=False):
if augment:
transform_list = [
torchvision.transforms.RandomCrop(32, padding=4),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
]
if FLAGS.cutout:
transform_list.append(cutout())
transform = transforms.Compose(transform_list)
else:
transform = transforms.ToTensor()
self.data = CIFAR100(
"/root/cifar100",
transform=transform,
train=train,
download=True)
self.one_hot_map = np.eye(100)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
if not FLAGS.single:
im, label = self.data[index]
else:
im, label = self.data[0]
im = np.transpose(im, (1, 2, 0)).numpy()
image_size = 32
label = self.one_hot_map[label]
im = im + np.random.uniform(-1 / 512, 1 / 512, im.shape)
np.random.seed((index + int(time.time() * 1e7)) % 2**32)
if FLAGS.datasource == 'default':
im_corrupt = im + 0.3 * np.random.randn(image_size, image_size, 3)
elif FLAGS.datasource == 'random':
im_corrupt = np.random.uniform(
0.0, 1.0, (image_size, image_size, 3))
return im_corrupt, im, label
class Svhn(Dataset):
def __init__(self, train=True, augment=False):
transform = transforms.ToTensor()
self.data = SVHN("/root/svhn", transform=transform, download=True)
self.one_hot_map = np.eye(10)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
if not FLAGS.single:
im, label = self.data[index]
else:
em, label = self.data[0]
im = np.transpose(im, (1, 2, 0)).numpy()
image_size = 32
label = self.one_hot_map[label]
im = im + np.random.uniform(-1 / 512, 1 / 512, im.shape)
np.random.seed((index + int(time.time() * 1e7)) % 2**32)
if FLAGS.datasource == 'default':
im_corrupt = im + 0.3 * np.random.randn(image_size, image_size, 3)
elif FLAGS.datasource == 'random':
im_corrupt = np.random.uniform(
0.0, 1.0, (image_size, image_size, 3))
return im_corrupt, im, label
class Mnist(Dataset):
def __init__(self, train=True, rescale=1.0):
self.data = MNIST(
"/root/mnist",
transform=transforms.ToTensor(),
download=True, train=train)
self.labels = np.eye(10)
self.rescale = rescale
def __len__(self):
return len(self.data)
def __getitem__(self, index):
im, label = self.data[index]
label = self.labels[label]
im = im.squeeze()
# im = im.numpy() / 2 + np.random.uniform(0, 0.5, (28, 28))
# im = im.numpy() / 2 + 0.2
im = im.numpy() / 256 * 255 + np.random.uniform(0, 1. / 256, (28, 28))
im = im * self.rescale
image_size = 28
if FLAGS.datasource == 'default':
im_corrupt = im + 0.3 * np.random.randn(image_size, image_size)
elif FLAGS.datasource == 'random':
im_corrupt = np.random.uniform(0, self.rescale, (28, 28))
return im_corrupt, im, label
class DSprites(Dataset):
def __init__(
self,
cond_size=False,
cond_shape=False,
cond_pos=False,
cond_rot=False):
dat = np.load(FLAGS.dsprites_path)
if FLAGS.dshape_only:
l = dat['latents_values']
mask = (l[:, 4] == 16 / 31) & (l[:, 5] == 16 /
31) & (l[:, 2] == 0.5) & (l[:, 3] == 30 * np.pi / 39)
self.data = np.tile(dat['imgs'][mask], (10000, 1, 1))
self.label = np.tile(dat['latents_values'][mask], (10000, 1))
self.label = self.label[:, 1:2]
elif FLAGS.dpos_only:
l = dat['latents_values']
# mask = (l[:, 1] == 1) & (l[:, 2] == 0.5) & (l[:, 3] == 30 * np.pi / 39)
mask = (l[:, 1] == 1) & (
l[:, 3] == 30 * np.pi / 39) & (l[:, 2] == 0.5)
self.data = np.tile(dat['imgs'][mask], (100, 1, 1))
self.label = np.tile(dat['latents_values'][mask], (100, 1))
self.label = self.label[:, 4:] + 0.5
elif FLAGS.dsize_only:
l = dat['latents_values']
# mask = (l[:, 1] == 1) & (l[:, 2] == 0.5) & (l[:, 3] == 30 * np.pi / 39)
mask = (l[:, 3] == 30 * np.pi / 39) & (l[:, 4] == 16 /
31) & (l[:, 5] == 16 / 31) & (l[:, 1] == 1)
self.data = np.tile(dat['imgs'][mask], (10000, 1, 1))
self.label = np.tile(dat['latents_values'][mask], (10000, 1))
self.label = (self.label[:, 2:3])
elif FLAGS.drot_only:
l = dat['latents_values']
mask = (l[:, 2] == 0.5) & (l[:, 4] == 16 /
31) & (l[:, 5] == 16 / 31) & (l[:, 1] == 1)
self.data = np.tile(dat['imgs'][mask], (100, 1, 1))
self.label = np.tile(dat['latents_values'][mask], (100, 1))
self.label = (self.label[:, 3:4])
self.label = np.concatenate(
[np.cos(self.label), np.sin(self.label)], axis=1)
elif FLAGS.dsprites_restrict:
l = dat['latents_values']
mask = (l[:, 1] == 1) & (l[:, 3] == 0 * np.pi / 39)
self.data = dat['imgs'][mask]
self.label = dat['latents_values'][mask]
else:
self.data = dat['imgs']
self.label = dat['latents_values']
if cond_size:
self.label = self.label[:, 2:3]
elif cond_shape:
self.label = self.label[:, 1:2]
elif cond_pos:
self.label = self.label[:, 4:]
elif cond_rot:
self.label = self.label[:, 3:4]
self.label = np.concatenate(
[np.cos(self.label), np.sin(self.label)], axis=1)
else:
self.label = self.label[:, 1:2]
self.identity = np.eye(3)
def __len__(self):
return self.data.shape[0]
def __getitem__(self, index):
im = self.data[index]
image_size = 64
if not (
FLAGS.dpos_only or FLAGS.dsize_only) and (
not FLAGS.cond_size) and (
not FLAGS.cond_pos) and (
not FLAGS.cond_rot) and (
not FLAGS.drot_only):
label = self.identity[self.label[index].astype(
np.int32) - 1].squeeze()
else:
label = self.label[index]
if FLAGS.datasource == 'default':
im_corrupt = im + 0.3 * np.random.randn(image_size, image_size)
elif FLAGS.datasource == 'random':
im_corrupt = 0.5 + 0.5 * np.random.randn(image_size, image_size)
return im_corrupt, im, label
class Imagenet(Dataset):
def __init__(self, train=True, augment=False):
if train:
for i in range(1, 11):
f = pickle.load(
open(
osp.join(
FLAGS.imagenet_path,
'train_data_batch_{}'.format(i)),
'rb'))
if | |
indeces
'''
abstractNums=get_elements(self.numbering, 'w:abstractNum')
indres=[0]
for x in abstractNums :
styles=get_elements(x, 'w:lvl/w:pStyle')
if styles :
pstyle_name = styles[0].get(norm_name('w:val') )
if pstyle_name == style :
ind=get_elements(x, 'w:lvl/w:pPr/w:ind')
if ind :
indres=[]
for indx in ind :
indres.append(int(indx.get(norm_name('w:left'))))
return indres
return indres
##########
def getdocumenttext(self):
'''
This function copied from 'python-docx' library
Return the raw text of a document, as a list of paragraphs.
'''
paragraph_tag == norm_nama('w:p')
text_tag == norm_nama('w:text')
paratextlist=[]
# Compile a list of all paragraph (p) elements
paralist = []
for element in self.document.iter():
# Find p (paragraph) elements
if element.tag == paragraph_tag:
paralist.append(element)
# Since a single sentence might be spread over multiple text elements, iterate through each
# paragraph, appending all text (t) children to that paragraphs text.
for para in paralist:
paratext=u''
# Loop through each paragraph
for element in para.iter():
# Find t (text) elements
if element.tag == text_tag:
if element.text:
paratext = paratext+element.text
# Add our completed paragraph text to the list of paragraph text
if not len(paratext) == 0:
paratextlist.append(paratext)
return paratextlist
#
# DocxComposer Class
#
class DocxComposer:
def __init__(self, stylefile=None):
'''
Constructor
'''
self._coreprops=None
self._appprops=None
self._contenttypes=None
self._websettings=None
self._wordrelationships=None
self.breakbefore = False
self.last_paragraph = None
self.stylenames = {}
self.title = ""
self.subject = ""
self.creator = "Python:DocDocument"
self.company = ""
self.category = ""
self.descriptions = ""
self.keywords = []
self.max_table_width = 8000
self.sizeof_field_list = [2000,5500]
self.abstractNums = []
self.numids = []
self.images = 0
self.nocoverpage = False
if stylefile == None :
self.template_dir = None
else :
self.new_document(stylefile)
def set_style_file(self, stylefile):
'''
Set style file
'''
fname = find_file(stylefile, 'sphinx-docxbuilder/docx')
if fname == None:
print "Error: style file( %s ) not found" % stylefile
return None
self.styleDocx = DocxDocument(fname)
self.template_dir = tempfile.mkdtemp(prefix='docx-')
result = self.styleDocx.extract_files(self.template_dir)
if not result :
print "Unexpected error in copy_docx_to_tempfile"
shutil.rmtree(temp_dir, True)
self.template_dir = None
return
self.stylenames = self.styleDocx.extract_stylenames()
self.paper_info = self.styleDocx.get_paper_info()
self.bullet_list_indents = self.get_numbering_left('ListBullet')
self.bullet_list_numId = self.styleDocx.get_numbering_style_id('ListBullet')
self.number_list_indent = self.get_numbering_left('ListNumber')[0]
self.number_list_numId = self.styleDocx.get_numbering_style_id('ListNumber')
self.abstractNums = get_elements(self.styleDocx.numbering, 'w:abstractNum')
self.numids = get_elements(self.styleDocx.numbering, 'w:num')
self.numbering = make_element_tree(['w:numbering'])
return
def set_coverpage(self,flag=True):
self.nocoverpage = not flag
def get_numbering_ids(self):
'''
'''
result = []
for num_elem in self.numids :
nid = num_elem.attrib[norm_name('w:numId')]
result.append( nid )
return result
def get_max_numbering_id(self):
'''
'''
max_id = 0
num_ids = self.get_numbering_ids()
for x in num_ids :
if int(x) > max_id : max_id = int(x)
return max_id
def delete_template(self):
'''
Delete the temporary directory which we use compose a new document.
'''
shutil.rmtree(self.template_dir, True)
def new_document(self, stylefile):
'''
Preparing a new document
'''
self.set_style_file(stylefile)
self.document = make_element_tree([['w:document'],[['w:body']]])
self.docbody = get_elements(self.document, '/w:document/w:body')[0]
self.current_docbody = self.docbody
self.relationships = self.relationshiplist()
return self.document
def set_props(self, title, subject, creator, company='', category='', descriptions='', keywords=[]):
'''
Set document's properties: title, subject, creatro, company, category, descriptions, keywrods.
'''
self.title = title
self.subject = subject
self.creator = creator
self.company = company
self.category = category
self.descriptions = descriptions
self.keywords = keywords
def save(self, docxfilename):
'''
Save the composed document to the docx file 'docxfilename'.
'''
assert os.path.isdir(self.template_dir)
self.coreproperties()
self.appproperties()
self.contenttypes()
self.websettings()
self.wordrelationships()
for x in self.abstractNums :
self.numbering.append(x)
for x in self.numids :
self.numbering.append(x)
coverpage = self.styleDocx.get_coverpage()
if not self.nocoverpage and coverpage is not None :
print "output Coverpage"
self.docbody.insert(0,coverpage)
self.docbody.append(self.paper_info)
# Serialize our trees into out zip file
treesandfiles = {self.document:'word/document.xml',
self._coreprops:'docProps/core.xml',
self._appprops:'docProps/app.xml',
self._contenttypes:'[Content_Types].xml',
self.numbering:'word/numbering.xml',
self.styleDocx.styles:'word/styles.xml',
self._websettings:'word/webSettings.xml',
self._wordrelationships:'word/_rels/document.xml.rels'}
docxfile = self.styleDocx.restruct_docx(self.template_dir, docxfilename, treesandfiles.values())
for tree in treesandfiles:
if tree != None:
#print 'Saving: '+treesandfiles[tree]
treestring = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', standalone='yes')
docxfile.writestr(treesandfiles[tree],treestring)
print 'Saved new file to: '+docxfilename
shutil.rmtree(self.template_dir)
return
##################
def set_docbody(self, body=None):
'''
Set docx body..
'''
if body is None:
self.current_docbody = self.docbody
else:
self.current_docbody = body
return self.current_docbody
def append(self, para):
'''
Append paragraph to document
'''
self.current_docbody.append(para)
self.last_paragraph = para
return para
def table_of_contents(self, toc_text='Contents:', maxlevel=3):
'''
Insert the Table of Content
'''
toc_tree = [['w:sdt'],
[['w:sdtPr'],
[['w:rPr'], [['w:long']] ],
[['w:docPartObj'], [['w:docPartGallery', {'w:val':'Table of Contents'}]], [['w:docPartUnique']] ]
]
]
sdtContent_tree = [['w:sdtContent']]
if toc_text :
p_tree = [['w:p'], [['w:pPr'], [['w:pStyle', {'w:val':'TOC_Title'}]] ], [['w:r'], [['w:rPr'], [['w:long']] ], [['w:t',toc_text]] ] ]
sdtContent_tree.append(p_tree)
p_tree = [['w:p'],
[['w:pPr'],
[['w:pStyle', {'w:val':'TOC_Contents'}]],
[['w:tabs'],
[['w:tab',{'w:val':'right', 'w:leader':'dot','w:pos':'8488'}] ]
],
[['w:rPr'], [['w:b',{'w:val':'0'}]], [['w:noProof']] ]
],
[['w:r'],[['w:fldChar', {'w:fldCharType':'begin'}]]],
[['w:r'],[['w:instrText', ' TOC \o "1-%d" \h \z \u ' % maxlevel , {'xml:space':'preserve'}]]],
[['w:r'],[['w:fldChar', {'w:fldCharType':'separare'}]]],
[['w:r'],[['w:fldChar', {'w:fldCharType':'end'}]]]
]
sdtContent_tree.append(p_tree)
p_tree = [['w:p'], [ ['w:r'], [['w:fldChar',{'w:fldCharType':'end'}]] ] ]
sdtContent_tree.append(p_tree)
toc_tree.append(sdtContent_tree)
sdt = make_element_tree(toc_tree)
self.append(sdt)
#################
#### Output PageBreak
def pagebreak(self,type='page', orient='portrait'):
'''
Insert a break, default 'page'.
See http://openxmldeveloper.org/forums/thread/4075.aspx
Return our page break element.
This method is copied from 'python-docx' library
'''
# Need to enumerate different types of page breaks.
validtypes = ['page', 'section']
pagebreak_tree = [['w:p']]
if type not in validtypes:
raise ValueError('Page break style "%s" not implemented. Valid styles: %s.' % (type, validtypes))
if type == 'page':
run_tree = [['w:r'],[['w:br', {'w:type':'page'}]]]
elif type == 'section':
if orient == 'portrait':
attrs = {'w:w':'12240','w:h':'15840'}
elif orient == 'landscape':
attrs={'w:h':'12240','w:w':'15840', 'w:orient':'landscape'}
run_tree = [['w:pPr'],[['w:sectPr'], [['w:pgSz', attrs]] ] ]
pagebreak_tree.append(run_tree)
pagebreak = make_element_tree(pagebreak_tree)
self.append(pagebreak)
self.breakbrefore = True
return pagebreak
#################
#### Output Paragraph
def make_paragraph(self, style='BodyText', block_level=0):
'''
Make a new paragraph element
'''
# if 'style' isn't defined, cretae new style.
if style not in self.stylenames :
self.new_paragraph_style(style)
# calcurate indent
ind = 0
if block_level > 0 :
ind = self.number_list_indent * block_level
# set paragraph tree
paragraph_tree = [['w:p'],
[['w:pPr'],
[['w:pStyle',{'w:val':style}]],
[['w:ind',{'w:leftChars':'0','w:left': str(ind)} ]]
]
]
if self.breakbefore :
paragraph_tree.append( [['w:r'], [['w:lastRenderedPageBreak']]] )
# create paragraph
paragraph = make_element_tree(paragraph_tree)
return paragraph
#################
#### Output Paragraph
def paragraph(self, paratext=None, style='BodyText', block_level=0, create_only=False):
'''
Make a new paragraph element, containing a run, and some text.
Return the paragraph element.
'''
isliteralblock=False
if style == 'LiteralBlock' :
paratext = paratext[0].splitlines()
isliteralblock=True
paragraph = self.make_paragraph(style, block_level)
# Insert a text run
if paratext != None:
self.make_runs(paragraph, paratext, isliteralblock)
# if the 'create_only' flag is True, append paragraph to the document
if not create_only :
self.append(paragraph)
self.last_paragraph = paragraph
return paragraph
def insert_linespace(self):
self.append(self.make_paragraph())
def get_paragraph_text(self, paragraph=None):
if paragraph is None: paragaph = self.last_paragraph
txt_elem = get_elements(paragraph, 'w:r/w:t')
result = ''
for txt in txt_elem :
result += txt.text
return result
def get_last_paragraph_style(self):
result = get_attribute(self.last_paragraph,'w:pPr/w:pStyle', 'w:val')
if result is None :
result = 'BodyText'
return result
def insert_paragraph_property(self, paragraph, style='BodyText'):
'''
Insert paragraph property element with style.
'''
if style not in self.stylenames :
self.new_paragraph_style(style)
style = self.stylenames.get(style, 'BodyText')
pPr = make_element_tree( [ ['w:pPr'], [['w:pStyle',{'w:val':style}]] ] )
paragraph.append(pPr)
return paragraph
def get_last_paragraph(self):
paras = get_elements(self.current_docbody, 'w:p')
if len(paras) > 1:
return paras[-1]
return None
def trim_paragraph(self):
paras = get_elements(self.current_docbody, 'w:p')
if len(para) > 2:
self.last_paragraph = paras[-2]
self.current_docbody.remove(paras[-1])
elif len(para) > 1:
self.last_paragraph = None
self.current_docbody.remove(paras[-1])
return
def get_paragraph_style(self, paragraph, force_create=False):
'''
Get stylename of the paragraph
'''
result = get_attribute(paragraph, 'w:pPr/w:pStyle', 'w:val')
if result is None :
if force_create :
self.insert_paragraph_property(paragraph)
result = 'BodyText'
return result
def set_indent(self, paragraph, lskip):
'''
Set indent of paragraph
'''
ind = set_attributes(paragraph, 'w:pPr/w:ind', {'w:leftChars':'0','w:left': str(lskip)} )
return ind
def make_runs(self, paragraph, targettext, literal_block=False):
'''
Make new runs with text.
'''
run = []
if isinstance(targettext, (list)) :
for i,x in enumerate(targettext) :
if isinstance(x, (list)) :
run.append(self.make_run(x[0], style=x[1]))
else:
if literal_block :
run_list = self.make_run(x,rawXml=True)
run.extend(run_list)
else:
run.append(self.make_run(x))
if literal_block and i+1 < len(targettext) :
run.append( self.make_run(':br') )
else:
if literal_block :
run.extend(self.make_run(targettext,rawXml=True))
else:
run.append(self.make_run(targettext))
for r in run:
paragraph.append(r)
return paragraph
def make_run(self, txt, style='Normal', rawXml=None):
'''
Make a new styled run from text.
'''
run_tree = [['w:r']]
if txt == ":br" :
run_tree.append([['w:br']])
else:
attr ={}
if txt.find(' ') != -1 :
attr ={'xml:space':'preserve'}
if style != 'Normal' :
if style not in self.stylenames :
self.new_character_style(style)
run_tree.append([['w:rPr'], [['w:rStyle',{'w:val':style}], [['w:t', txt, attr]] ]])
else:
run_tree.append([['w:t', txt, attr]])
# Make run element
if rawXml:
xmltxt='<w:p xmlns:w="http://schemas.openxmlformats.org/wordprocessingml/2006/main">'+txt+'</w:p>'
p = etree.fromstring(xmltxt)
run = get_elements(p, 'w:r')
## remove the last run, because it could be '<w:br>'
run.pop()
else:
run | |
# coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'InfinispanSpec',
'InfinispanSpecAffinity',
'InfinispanSpecAffinityNodeAffinity',
'InfinispanSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution',
'InfinispanSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference',
'InfinispanSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressions',
'InfinispanSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFields',
'InfinispanSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution',
'InfinispanSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTerms',
'InfinispanSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressions',
'InfinispanSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFields',
'InfinispanSpecAffinityPodAffinity',
'InfinispanSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecution',
'InfinispanSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm',
'InfinispanSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector',
'InfinispanSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions',
'InfinispanSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecution',
'InfinispanSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector',
'InfinispanSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions',
'InfinispanSpecAffinityPodAntiAffinity',
'InfinispanSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecution',
'InfinispanSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm',
'InfinispanSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector',
'InfinispanSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions',
'InfinispanSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecution',
'InfinispanSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector',
'InfinispanSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions',
'InfinispanSpecAutoscale',
'InfinispanSpecContainer',
'InfinispanSpecExpose',
'InfinispanSpecLogging',
'InfinispanSpecSecurity',
'InfinispanSpecSecurityEndpointEncryption',
'InfinispanSpecService',
'InfinispanSpecServiceContainer',
'InfinispanSpecServiceSites',
'InfinispanSpecServiceSitesLocal',
'InfinispanSpecServiceSitesLocalExpose',
'InfinispanSpecServiceSitesLocations',
'InfinispanStatus',
'InfinispanStatusConditions',
'InfinispanStatusSecurity',
'InfinispanStatusSecurityEndpointEncryption',
]
@pulumi.output_type
class InfinispanSpec(dict):
"""
InfinispanSpec defines the desired state of Infinispan
"""
def __init__(__self__, *,
replicas: int,
affinity: Optional['outputs.InfinispanSpecAffinity'] = None,
autoscale: Optional['outputs.InfinispanSpecAutoscale'] = None,
container: Optional['outputs.InfinispanSpecContainer'] = None,
expose: Optional['outputs.InfinispanSpecExpose'] = None,
image: Optional[str] = None,
logging: Optional['outputs.InfinispanSpecLogging'] = None,
security: Optional['outputs.InfinispanSpecSecurity'] = None,
service: Optional['outputs.InfinispanSpecService'] = None):
"""
InfinispanSpec defines the desired state of Infinispan
:param 'InfinispanSpecAffinityArgs' affinity: Affinity is a group of affinity scheduling rules.
:param 'InfinispanSpecAutoscaleArgs' autoscale: Autoscale describe autoscaling configuration for the cluster
:param 'InfinispanSpecContainerArgs' container: InfinispanContainerSpec specify resource requirements per container
:param 'InfinispanSpecExposeArgs' expose: ExposeSpec describe how Infinispan will be exposed externally
:param 'InfinispanSpecSecurityArgs' security: InfinispanSecurity info for the user application connection
:param 'InfinispanSpecServiceArgs' service: InfinispanServiceSpec specify configuration for specific service
"""
pulumi.set(__self__, "replicas", replicas)
if affinity is not None:
pulumi.set(__self__, "affinity", affinity)
if autoscale is not None:
pulumi.set(__self__, "autoscale", autoscale)
if container is not None:
pulumi.set(__self__, "container", container)
if expose is not None:
pulumi.set(__self__, "expose", expose)
if image is not None:
pulumi.set(__self__, "image", image)
if logging is not None:
pulumi.set(__self__, "logging", logging)
if security is not None:
pulumi.set(__self__, "security", security)
if service is not None:
pulumi.set(__self__, "service", service)
@property
@pulumi.getter
def replicas(self) -> int:
return pulumi.get(self, "replicas")
@property
@pulumi.getter
def affinity(self) -> Optional['outputs.InfinispanSpecAffinity']:
"""
Affinity is a group of affinity scheduling rules.
"""
return pulumi.get(self, "affinity")
@property
@pulumi.getter
def autoscale(self) -> Optional['outputs.InfinispanSpecAutoscale']:
"""
Autoscale describe autoscaling configuration for the cluster
"""
return pulumi.get(self, "autoscale")
@property
@pulumi.getter
def container(self) -> Optional['outputs.InfinispanSpecContainer']:
"""
InfinispanContainerSpec specify resource requirements per container
"""
return pulumi.get(self, "container")
@property
@pulumi.getter
def expose(self) -> Optional['outputs.InfinispanSpecExpose']:
"""
ExposeSpec describe how Infinispan will be exposed externally
"""
return pulumi.get(self, "expose")
@property
@pulumi.getter
def image(self) -> Optional[str]:
return pulumi.get(self, "image")
@property
@pulumi.getter
def logging(self) -> Optional['outputs.InfinispanSpecLogging']:
return pulumi.get(self, "logging")
@property
@pulumi.getter
def security(self) -> Optional['outputs.InfinispanSpecSecurity']:
"""
InfinispanSecurity info for the user application connection
"""
return pulumi.get(self, "security")
@property
@pulumi.getter
def service(self) -> Optional['outputs.InfinispanSpecService']:
"""
InfinispanServiceSpec specify configuration for specific service
"""
return pulumi.get(self, "service")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InfinispanSpecAffinity(dict):
"""
Affinity is a group of affinity scheduling rules.
"""
def __init__(__self__, *,
node_affinity: Optional['outputs.InfinispanSpecAffinityNodeAffinity'] = None,
pod_affinity: Optional['outputs.InfinispanSpecAffinityPodAffinity'] = None,
pod_anti_affinity: Optional['outputs.InfinispanSpecAffinityPodAntiAffinity'] = None):
"""
Affinity is a group of affinity scheduling rules.
:param 'InfinispanSpecAffinityNodeAffinityArgs' node_affinity: Describes node affinity scheduling rules for the pod.
:param 'InfinispanSpecAffinityPodAffinityArgs' pod_affinity: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
:param 'InfinispanSpecAffinityPodAntiAffinityArgs' pod_anti_affinity: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
"""
if node_affinity is not None:
pulumi.set(__self__, "node_affinity", node_affinity)
if pod_affinity is not None:
pulumi.set(__self__, "pod_affinity", pod_affinity)
if pod_anti_affinity is not None:
pulumi.set(__self__, "pod_anti_affinity", pod_anti_affinity)
@property
@pulumi.getter(name="nodeAffinity")
def node_affinity(self) -> Optional['outputs.InfinispanSpecAffinityNodeAffinity']:
"""
Describes node affinity scheduling rules for the pod.
"""
return pulumi.get(self, "node_affinity")
@property
@pulumi.getter(name="podAffinity")
def pod_affinity(self) -> Optional['outputs.InfinispanSpecAffinityPodAffinity']:
"""
Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
"""
return pulumi.get(self, "pod_affinity")
@property
@pulumi.getter(name="podAntiAffinity")
def pod_anti_affinity(self) -> Optional['outputs.InfinispanSpecAffinityPodAntiAffinity']:
"""
Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
"""
return pulumi.get(self, "pod_anti_affinity")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InfinispanSpecAffinityNodeAffinity(dict):
"""
Describes node affinity scheduling rules for the pod.
"""
def __init__(__self__, *,
preferred_during_scheduling_ignored_during_execution: Optional[Sequence['outputs.InfinispanSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution']] = None,
required_during_scheduling_ignored_during_execution: Optional['outputs.InfinispanSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution'] = None):
"""
Describes node affinity scheduling rules for the pod.
:param Sequence['InfinispanSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs'] preferred_during_scheduling_ignored_during_execution: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
:param 'InfinispanSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs' required_during_scheduling_ignored_during_execution: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
"""
if preferred_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "preferred_during_scheduling_ignored_during_execution", preferred_during_scheduling_ignored_during_execution)
if required_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "required_during_scheduling_ignored_during_execution", required_during_scheduling_ignored_during_execution)
@property
@pulumi.getter(name="preferredDuringSchedulingIgnoredDuringExecution")
def preferred_during_scheduling_ignored_during_execution(self) -> Optional[Sequence['outputs.InfinispanSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution']]:
"""
The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
"""
return pulumi.get(self, "preferred_during_scheduling_ignored_during_execution")
@property
@pulumi.getter(name="requiredDuringSchedulingIgnoredDuringExecution")
def required_during_scheduling_ignored_during_execution(self) -> Optional['outputs.InfinispanSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution']:
"""
If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
"""
return pulumi.get(self, "required_during_scheduling_ignored_during_execution")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InfinispanSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution(dict):
"""
An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
"""
def __init__(__self__, *,
preference: 'outputs.InfinispanSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference',
weight: int):
"""
An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
:param 'InfinispanSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceArgs' preference: A node selector term, associated with the corresponding weight.
:param int weight: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
"""
pulumi.set(__self__, "preference", preference)
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter
def preference(self) -> 'outputs.InfinispanSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference':
"""
A node selector term, associated with the corresponding weight.
"""
return pulumi.get(self, "preference")
@property
@pulumi.getter
def weight(self) -> int:
"""
Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
"""
return pulumi.get(self, "weight")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InfinispanSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference(dict):
"""
A node selector term, associated with the corresponding weight.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.InfinispanSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressions']] = None,
match_fields: Optional[Sequence['outputs.InfinispanSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFields']] = None):
"""
A node selector term, associated with the corresponding weight.
:param Sequence['InfinispanSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressionsArgs'] match_expressions: A list of node selector requirements by node's labels.
:param Sequence['InfinispanSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFieldsArgs'] match_fields: A list of node selector requirements by node's fields.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_fields is not None:
pulumi.set(__self__, "match_fields", match_fields)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.InfinispanSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressions']]:
"""
A list of node selector requirements by node's labels.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchFields")
def match_fields(self) -> Optional[Sequence['outputs.InfinispanSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFields']]:
"""
A list of node selector requirements by node's fields.
"""
| |
<reponame>CiscoDevNet/ydk-py
""" Cisco_IOS_XR_ipv4_acl_datatypes
This module contains a collection of generally useful
derived YANG data types.
Copyright (c) 2013\-2018 by Cisco Systems, Inc.
All rights reserved.
"""
import sys
from collections import OrderedDict
from ydk.types import Entity as _Entity_
from ydk.types import EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class Ipv4AclDscpNumber(Enum):
"""
Ipv4AclDscpNumber (Enum Class)
Ipv4 acl dscp number
.. data:: default = 0
Default DSCP
.. data:: af11 = 10
Match packets with AF11 DSCP
.. data:: af12 = 12
Match packets with AF12 DSCP
.. data:: af13 = 14
Match packets with AF13 DSCP
.. data:: af21 = 18
Match packets with AF21 DSCP
.. data:: af22 = 20
Match packets with AF22 DSCP
.. data:: af23 = 22
Match packets with AF23 DSCP
.. data:: af31 = 26
Match packets with AF31 DSCP
.. data:: af32 = 28
Match packets with AF32 DSCP
.. data:: af33 = 30
Match packets with AF33 DSCP
.. data:: af41 = 34
Match packets with AF41 DSCP
.. data:: af42 = 36
Match packets with AF42 DSCP
.. data:: af43 = 38
Match packets with AF43 DSCP
.. data:: cs1 = 8
Match packets with CS1 (precedence 1) DSCP
.. data:: cs2 = 16
Match packets with CS2 (precedence 2) DSCP
.. data:: cs3 = 24
Match packets with CS3 (precedence 3) DSCP
.. data:: cs4 = 32
Match packets with CS4 (precedence 4) DSCP
.. data:: cs5 = 40
Match packets with CS5 (precedence 5) DSCP
.. data:: cs6 = 48
Match packets with CS6 (precedence 6) DSCP
.. data:: cs7 = 56
Match packets with CS7 (precedence 7) DSCP
.. data:: ef = 46
Match packets with EF DSCP
"""
default = Enum.YLeaf(0, "default")
af11 = Enum.YLeaf(10, "af11")
af12 = Enum.YLeaf(12, "af12")
af13 = Enum.YLeaf(14, "af13")
af21 = Enum.YLeaf(18, "af21")
af22 = Enum.YLeaf(20, "af22")
af23 = Enum.YLeaf(22, "af23")
af31 = Enum.YLeaf(26, "af31")
af32 = Enum.YLeaf(28, "af32")
af33 = Enum.YLeaf(30, "af33")
af41 = Enum.YLeaf(34, "af41")
af42 = Enum.YLeaf(36, "af42")
af43 = Enum.YLeaf(38, "af43")
cs1 = Enum.YLeaf(8, "cs1")
cs2 = Enum.YLeaf(16, "cs2")
cs3 = Enum.YLeaf(24, "cs3")
cs4 = Enum.YLeaf(32, "cs4")
cs5 = Enum.YLeaf(40, "cs5")
cs6 = Enum.YLeaf(48, "cs6")
cs7 = Enum.YLeaf(56, "cs7")
ef = Enum.YLeaf(46, "ef")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_datatypes as meta
return meta._meta_table['Ipv4AclDscpNumber']
class Ipv4AclFragFlags(Enum):
"""
Ipv4AclFragFlags (Enum Class)
Ipv4 acl frag flags
.. data:: dont_fragment = 1
Match don't fragment flag
.. data:: is_fragment = 2
Match is fragment flag
.. data:: first_fragment = 4
Match first fragment flag
.. data:: last_fragment = 8
Match last fragment flag
.. data:: dont_fragment_is_fragment = 3
Match don't fragment and is fragment flag
.. data:: dont_fragment_first_fragment = 5
Match don't fragment and first fragment flag
.. data:: dont_fragment_last_fragment = 9
Match don't fragment and last fragment flag
"""
dont_fragment = Enum.YLeaf(1, "dont-fragment")
is_fragment = Enum.YLeaf(2, "is-fragment")
first_fragment = Enum.YLeaf(4, "first-fragment")
last_fragment = Enum.YLeaf(8, "last-fragment")
dont_fragment_is_fragment = Enum.YLeaf(3, "dont-fragment-is-fragment")
dont_fragment_first_fragment = Enum.YLeaf(5, "dont-fragment-first-fragment")
dont_fragment_last_fragment = Enum.YLeaf(9, "dont-fragment-last-fragment")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_datatypes as meta
return meta._meta_table['Ipv4AclFragFlags']
class Ipv4AclGrantEnum(Enum):
"""
Ipv4AclGrantEnum (Enum Class)
Ipv4 acl grant enum
.. data:: deny = 0
Deny
.. data:: permit = 1
Permit
"""
deny = Enum.YLeaf(0, "deny")
permit = Enum.YLeaf(1, "permit")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_datatypes as meta
return meta._meta_table['Ipv4AclGrantEnum']
class Ipv4AclIcmpTypeCodeEnum(Enum):
"""
Ipv4AclIcmpTypeCodeEnum (Enum Class)
Ipv4 acl icmp type code enum
.. data:: echo_reply = 65535
Echo reply
.. data:: network_unreachable = 196608
Network unreachable
.. data:: host_unreachable = 196609
Host unreachable
.. data:: protocol_unreachable = 196610
Protocol unreachable
.. data:: port_unreachable = 196611
Port unreachable
.. data:: packet_too_big = 196612
Fragmentation needed and DF set
.. data:: source_route_failed = 196613
Source route failed
.. data:: network_unknown = 196614
Network unknown
.. data:: host_unknown = 196615
Host unknown
.. data:: host_isolated = 196616
Host isolated
.. data:: dod_net_prohibited = 196617
Network prohibited
.. data:: dod_host_prohibited = 196618
Host prohibited
.. data:: host_tos_unreachable = 196619
Host unreachable for TOS
.. data:: net_tos_unreachable = 196620
Network unreachable for TOS
.. data:: administratively_prohibited = 196621
Administratively prohibited
.. data:: host_precedence_unreachable = 196622
Host unreachable for precedence
.. data:: precedence_unreachable = 196623
Precedence cutoff
.. data:: unreachable = 262143
All unreachables
.. data:: source_quench = 327679
Source quenches
.. data:: network_redirect = 327680
Network redirect
.. data:: host_redirect = 327681
Host redirect
.. data:: net_tos_redirect = 327682
Network redirect for TOS
.. data:: host_tos_redirect = 327683
Host redirect for TOS
.. data:: redirect = 393215
All redirects
.. data:: alternate_address = 458751
Alternate address
.. data:: echo = 589823
Echo (ping)
.. data:: router_advertisement = 655359
Router discovery advertisements
.. data:: router_solicitation = 720895
Router discovery solicitations
.. data:: ttl_exceeded = 720896
TTL exceeded
.. data:: reassembly_timeout = 720897
Reassembly timeout
.. data:: time_exceeded = 786431
All time exceeds
.. data:: general_parameter_problem = 786432
Parameter problem
.. data:: option_missing = 786433
Parameter required but not present
.. data:: no_room_for_option = 786434
Parameter required but no room
.. data:: parameter_problem = 851967
All parameter problems
.. data:: timestamp_request = 917503
Timestamp requests
.. data:: timestamp_reply = 983039
Timestamp replies
.. data:: information_request = 1048575
Information request
.. data:: information_reply = 1114111
Information replies
.. data:: mask_request = 1179647
Mask requests
.. data:: mask_reply = 1245183
Mask replies
.. data:: traceroute = 2031615
Traceroute
.. data:: conversion_error = 2097151
Datagram conversion
.. data:: mobile_redirect = 2162687
Mobile host redirect
"""
echo_reply = Enum.YLeaf(65535, "echo-reply")
network_unreachable = Enum.YLeaf(196608, "network-unreachable")
host_unreachable = Enum.YLeaf(196609, "host-unreachable")
protocol_unreachable = Enum.YLeaf(196610, "protocol-unreachable")
port_unreachable = Enum.YLeaf(196611, "port-unreachable")
packet_too_big = Enum.YLeaf(196612, "packet-too-big")
source_route_failed = Enum.YLeaf(196613, "source-route-failed")
network_unknown = Enum.YLeaf(196614, "network-unknown")
host_unknown = Enum.YLeaf(196615, "host-unknown")
host_isolated = Enum.YLeaf(196616, "host-isolated")
dod_net_prohibited = Enum.YLeaf(196617, "dod-net-prohibited")
dod_host_prohibited = Enum.YLeaf(196618, "dod-host-prohibited")
host_tos_unreachable = Enum.YLeaf(196619, "host-tos-unreachable")
net_tos_unreachable = Enum.YLeaf(196620, "net-tos-unreachable")
administratively_prohibited = Enum.YLeaf(196621, "administratively-prohibited")
host_precedence_unreachable = Enum.YLeaf(196622, "host-precedence-unreachable")
precedence_unreachable = Enum.YLeaf(196623, "precedence-unreachable")
unreachable = Enum.YLeaf(262143, "unreachable")
source_quench = Enum.YLeaf(327679, "source-quench")
network_redirect = Enum.YLeaf(327680, "network-redirect")
host_redirect = Enum.YLeaf(327681, "host-redirect")
net_tos_redirect = Enum.YLeaf(327682, "net-tos-redirect")
host_tos_redirect = Enum.YLeaf(327683, "host-tos-redirect")
redirect = Enum.YLeaf(393215, "redirect")
alternate_address = Enum.YLeaf(458751, "alternate-address")
echo = Enum.YLeaf(589823, "echo")
router_advertisement = Enum.YLeaf(655359, "router-advertisement")
router_solicitation = Enum.YLeaf(720895, "router-solicitation")
ttl_exceeded = Enum.YLeaf(720896, "ttl-exceeded")
reassembly_timeout = Enum.YLeaf(720897, "reassembly-timeout")
time_exceeded = Enum.YLeaf(786431, "time-exceeded")
general_parameter_problem = Enum.YLeaf(786432, "general-parameter-problem")
option_missing = Enum.YLeaf(786433, "option-missing")
no_room_for_option = Enum.YLeaf(786434, "no-room-for-option")
parameter_problem = Enum.YLeaf(851967, "parameter-problem")
timestamp_request = Enum.YLeaf(917503, "timestamp-request")
timestamp_reply = Enum.YLeaf(983039, "timestamp-reply")
information_request = Enum.YLeaf(1048575, "information-request")
information_reply = Enum.YLeaf(1114111, "information-reply")
mask_request = Enum.YLeaf(1179647, "mask-request")
mask_reply = Enum.YLeaf(1245183, "mask-reply")
traceroute = Enum.YLeaf(2031615, "traceroute")
conversion_error = Enum.YLeaf(2097151, "conversion-error")
mobile_redirect = Enum.YLeaf(2162687, "mobile-redirect")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_datatypes as meta
return meta._meta_table['Ipv4AclIcmpTypeCodeEnum']
class Ipv4AclIgmpNumber(Enum):
"""
Ipv4AclIgmpNumber (Enum Class)
Ipv4 acl igmp number
.. data:: host_query = 17
Host query
.. data:: host_report = 18
Host report
.. data:: dvmrp = 19
Distance Vector Multicast Routing Protocol
.. data:: pim = 20
Portocol Independent Multicast
.. data:: trace = 21
Multicast Trace
.. data:: v2_report = 22
Version 2 report
.. data:: v2_leave = 23
Version 2 leave
.. data:: mtrace_response = 30
MTrace response
.. data:: mtrace = 31
MTrace
.. data:: v3_report = 34
Version 3 report
"""
host_query = Enum.YLeaf(17, "host-query")
host_report = Enum.YLeaf(18, "host-report")
dvmrp = Enum.YLeaf(19, "dvmrp")
pim = Enum.YLeaf(20, "pim")
trace = Enum.YLeaf(21, "trace")
v2_report = Enum.YLeaf(22, "v2-report")
v2_leave = Enum.YLeaf(23, "v2-leave")
mtrace_response = Enum.YLeaf(30, "mtrace-response")
mtrace = Enum.YLeaf(31, "mtrace")
v3_report = Enum.YLeaf(34, "v3-report")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_datatypes as meta
return meta._meta_table['Ipv4AclIgmpNumber']
class Ipv4AclLoggingEnum(Enum):
"""
Ipv4AclLoggingEnum (Enum Class)
Ipv4 acl logging enum
.. data:: log = 1
Log matches against this entry
.. data:: log_input = 2
Log matches against this entry, including input
interface
"""
log = Enum.YLeaf(1, "log")
log_input = Enum.YLeaf(2, "log-input")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_datatypes as meta
return meta._meta_table['Ipv4AclLoggingEnum']
class Ipv4AclOperatorEnum(Enum):
"""
Ipv4AclOperatorEnum | |
"""Testing for the tree module."""
# =============================================================================
# Imports
# =============================================================================
# Standard
from itertools import product, chain
# Third party
import numpy as np
import pytest
from sklearn.exceptions import NotFittedError
# Local application
from sklr.tree import DecisionTreeLabelRanker, DecisionTreePartialLabelRanker
from sklr.utils import check_random_state
# =============================================================================
# Initialization
# =============================================================================
# The seed and the random state generator
# to always obtain the same results and,
# so, ensure that the tests carried out
# are always the same
seed = 198075
random_state = check_random_state(seed)
# The following variables are required for some
# test methods to work. Even if they will not be
# used in all the tests, they are globally declared
# to avoid that they are defined along several methods.
# The extra memory overhead should not be an issue
# The criteria for the Label Ranking problem
# and the Partial Label Ranking problem
LR_CRITERIA = ["mallows"]
PLR_CRITERIA = ["disagreements", "distance", "entropy"]
CRITERIA = [*LR_CRITERIA, *PLR_CRITERIA]
# The distances required
# for the Mallows criterion
DISTANCES = ["kendall"]
# The splitters that can be used to split
# an internal node of the decision tree
SPLITTERS = ["binary", "frequency", "width"]
# The decision trees rankers
LR_TREES = [DecisionTreeLabelRanker]
PLR_TREES = [DecisionTreePartialLabelRanker]
TREES = [*LR_TREES, *PLR_TREES]
# The possible combinations of decision tree
# rankers, criteria, splitters and distance
COMBINATIONS_LR = product(LR_TREES, LR_CRITERIA, SPLITTERS, DISTANCES)
COMBINATIONS_PLR = product(PLR_TREES, PLR_CRITERIA, SPLITTERS, DISTANCES)
COMBINATIONS = list(chain(COMBINATIONS_LR, COMBINATIONS_PLR))
# A toy example to check that the decision tree rankers
# are properly working. In fact, initialize two datasets,
# one with training data and rankings and another one with
# test data and rankings
# Training
# Data
X_train = np.array([
[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1]])
# Rankings
Y_train = np.array([
[1, 2, 3],
[1, 2, 3],
[1, 2, 3],
[2, 1, 3],
[2, 1, 3],
[2, 1, 3]])
# Test
# Data
X_test = np.array([
[-1, -1],
[2, 2],
[3, 2]])
# Rankings
Y_test = np.array([
[1, 2, 3],
[2, 1, 3],
[2, 1, 3]])
# =============================================================================
# Testing
# =============================================================================
@pytest.mark.toy_example
@pytest.mark.parametrize(
"DecisionTreeRanker,criterion,splitter,distance", COMBINATIONS)
def test_toy_example(DecisionTreeRanker, criterion, splitter, distance):
"""Test the decision tree rankers on a toy dataset."""
# Initialize the decision tree ranker using the given
# criterion, splitter and (if corresponds) distance
if DecisionTreeRanker is DecisionTreeLabelRanker:
model = DecisionTreeRanker(criterion, distance, splitter,
random_state=seed)
else:
model = DecisionTreeRanker(criterion, splitter,
random_state=seed)
# Fit the decision tree ranker
# to the training dataset
clf = model.fit(X_train, Y_train)
# Obtain the predictions of the decision tree ranker
Y_pred = clf.predict(X_test)
# Assert that the predictions are correct
np.testing.assert_array_equal(Y_pred, Y_test)
# Now, apply the same procedure but only
# using one feature. By this way, it is
# ensured that the full code is tested
model = model.set_params(max_features=1)
clf = model.fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
np.testing.assert_array_equal(Y_pred, Y_test)
@pytest.mark.weighted_toy_example
@pytest.mark.parametrize(
"DecisionTreeRanker,criterion,splitter,distance", COMBINATIONS)
def test_weighted_toy_example(DecisionTreeRanker,
criterion, splitter, distance):
"""Test the decision tree rankers on a weighted toy dataset."""
# Initialize the decision tree ranker using the given
# criterion, splitter and (if corresponds) distance
if DecisionTreeRanker is DecisionTreeLabelRanker:
model = DecisionTreeRanker(criterion, distance, splitter,
random_state=seed)
else:
model = DecisionTreeRanker(criterion, splitter,
random_state=seed)
# Initilize a sample weight
# with uniform weighting
sample_weight = np.ones(X_train.shape[0])
# Fit the decision tree ranker to the training
# dataset using an uniform sample weighting
clf = model.fit(X_train, Y_train, sample_weight)
# Obtain the predictions of the decision tree ranker
Y_pred = clf.predict(X_test)
# Now, apply the same procedure but only using
# one feature and a half of the sample weight
sample_weight *= 0.5
model = model.set_params(max_features=1)
clf = model.fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
np.testing.assert_array_equal(Y_pred, Y_test)
@pytest.mark.pure_set
@pytest.mark.parametrize("DecisionTreeRanker", TREES)
def test_pure_set(DecisionTreeRanker):
"""Test when Y is pure."""
# Initialize the decision tree ranker
# using the default hyperparameters
model = DecisionTreeRanker(random_state=seed)
# Fit the decision tree ranker
# to the training dataset
clf = model.fit(X_train[:3], Y_train[:3])
# Obtain the predictions of the decision tree ranker
Y_pred = clf.predict(X_test)
# Assert that the predictions are correct, that
# is, that the same prediction is always returned
np.testing.assert_array_equal(Y_pred, Y_train[:3])
@pytest.mark.max_features
@pytest.mark.parametrize("DecisionTreeRanker", TREES)
def test_max_features(DecisionTreeRanker):
"""Test the max_features hyperparameter."""
# Initialize the decision tree ranker
# using the default hyperparameters
model = DecisionTreeRanker(random_state=seed)
# Assert "auto" maximum number of features
model = model.set_params(max_features="auto")
clf = model.fit(X_train, Y_train)
assert clf.max_features_ == int(np.sqrt(X_train.shape[1]))
# Assert "sqrt" maximum number of features
model = model.set_params(max_features="sqrt")
clf = model.fit(X_train, Y_train)
assert clf.max_features_ == int(np.sqrt(X_train.shape[1]))
# Assert "log2" maximum number of features
model = model.set_params(max_features="log2")
clf = model.fit(X_train, Y_train)
assert clf.max_features_ == int(np.sqrt(X_train.shape[1]))
# Assert one as maximum number of features
model = model.set_params(max_features=1)
clf = model.fit(X_train, Y_train)
assert clf.max_features_ == 1
# Assert a half of the features as maximum number of features
model = model.set_params(max_features=0.5)
clf = model.fit(X_train, Y_train)
assert clf.max_features_ == int(0.5 * X_train.shape[1])
# Assert "None" maximum number of features
model = model.set_params(max_features=None)
clf = model.fit(X_train, Y_train)
assert clf.max_features_ == X_train.shape[1]
# Assert that an error is raised when the
# maximum number of features is greater than
# the number of features on the dataset
with pytest.raises(ValueError):
model.set_params(max_features=10).fit(X_train, Y_train)
with pytest.raises(ValueError):
model.set_params(max_features=1.5).fit(X_train, Y_train)
# Assert that an error is raised when the maximum
# number of features is less than or equal zero
with pytest.raises(ValueError):
model.set_params(max_features=-10).fit(X_train, Y_train)
with pytest.raises(ValueError):
model.set_params(max_features=-1.5).fit(X_train, Y_train)
# Assert that an error is raised when the maximum
# number of features is not integer or floating type
with pytest.raises(ValueError):
model.set_params(max_features="foo").fit(X_train, Y_train)
@pytest.mark.max_depth
@pytest.mark.parametrize("DecisionTreeRanker", TREES)
def test_max_depth(DecisionTreeRanker):
"""Test max_depth hyperparameter."""
# Initialize the decision tree ranker
# using the default hyperparameters
model = DecisionTreeRanker(random_state=seed)
# Assert that a leaf node is created when
# the maximum depth of the tree is zero
model = model.set_params(max_depth=0)
clf = model.fit(X_train, Y_train)
assert clf.get_depth() == 0
assert clf.get_n_internal() == 0
assert clf.get_n_leaves() == 1
assert clf.get_n_nodes() == 1
# Assert that a one level tree is created
# when the maximum depth of the tree is one
model = model.set_params(max_depth=1)
clf = model.fit(X_train, Y_train)
assert clf.get_depth() == 1
assert clf.get_n_internal() == 1
assert clf.get_n_leaves() == 2
assert clf.get_n_nodes() == 3
# Assert that an error is raised when the
# maximum depth of the tree is less than zero
with pytest.raises(ValueError):
model = model.set_params(max_depth=-1).fit(X_train, Y_train)
@pytest.mark.min_samples_split
@pytest.mark.parametrize("DecisionTreeRanker", TREES)
def test_min_samples_split(DecisionTreeRanker):
"""Test min_samples_split hyperparameter."""
# Initialize the decision tree ranker
# using the default hyperparameters
model = DecisionTreeRanker(random_state=seed)
# Assert that a one level tree is created when the
# minimum number of samples to split an internal node is
# equal than the number of samples on the training dataset
model = model.set_params(min_samples_split=X_train.shape[0])
clf = model.fit(X_train, Y_train)
assert clf.get_depth() == 1
assert clf.get_n_internal() == 1
assert clf.get_n_leaves() == 2
assert clf.get_n_nodes() == 3
# Assert the same than the above test but using
# a floating value instead of an integer one
model = model.set_params(min_samples_split=1.0)
clf = model.fit(X_train, Y_train)
assert clf.get_depth() == 1
assert clf.get_n_internal() == 1
assert clf.get_n_leaves() == 2
assert clf.get_n_nodes() == 3
# Assert that an error is raised when the minimum number of
# samples to split an internal node is an integer less than two
with pytest.raises(ValueError):
model.set_params(min_samples_split=1).fit(X_train, Y_train)
# Assert that an error is raised when the minimum number of samples
# to split an internal node is a floating number greater than one
with pytest.raises(ValueError):
model.set_params(min_samples_split=1.5).fit(X_train, Y_train)
@pytest.mark.max_splits
@pytest.mark.parametrize("DecisionTreeRanker", TREES)
def test_max_splits(DecisionTreeRanker):
"""Test max_splits hyperparameter."""
# Initialize the decision tree ranker
# using a maximum number of splits of five
model = DecisionTreeRanker(max_splits=5, random_state=seed)
# Assert that the maximum number of splits is
# properly set to two for the binary splitter
model = model.set_params(splitter="binary")
clf = model.fit(X_train, Y_train)
assert len(clf.tree_.children) == 2
# Assert that the maximum number of splits is
# properly set to two for the frequency splitter
model = model.set_params(splitter="frequency")
clf = model.fit(X_train, Y_train)
assert len(clf.tree_.children) > 2
# Assert that the maximum number of splits is
# properly set to two for the width splitter
model = model.set_params(splitter="width")
clf = model.fit(X_train, Y_train)
assert len(clf.tree_.children) > 2
# Assert that an error is raised when the
# maximum number of samples is less than one
with pytest.raises(ValueError):
model.set_params(max_splits=-1).fit(X_train, Y_train)
@pytest.mark.error
@pytest.mark.parametrize("DecisionTreeRanker", TREES)
def test_error(DecisionTreeRanker):
"""Test that it gives proper error on deficient input."""
# Initialize the decision tree ranker
# using the default hyperparameters
model | |
<reponame>david-fisher/320-S21-Track2
import rest_framework
import datetime
from django.shortcuts import render
from django.http import *
from rest_framework import generics, renderers, status, views, viewsets
from rest_framework.response import Response as DRF_response
from rest_framework.views import APIView
from rest_framework.decorators import action
from api.models import *
from api.serializers import *
# Create your views here.
class CourseViewSet(viewsets.ModelViewSet):
queryset = Courses.objects.all()
serializer_class = CourseSerializer
class ProfessorViewSet(viewsets.ModelViewSet):
queryset = Professors.objects.all()
serializer_class = ProfessorSerializer
class StudentViewSet(viewsets.ModelViewSet):
queryset = Students.objects.all()
serializer_class = StudentSerializer
@action(detail=True, renderer_classes=[renderers.JSONOpenAPIRenderer])
def demographics(self, request, *args, **kwargs):
student = self.get_object()
try:
if student.demographics is not None:
serializer = DemographicSerializer(student.demographics)
return DRF_response(serializer.data)
else:
raise Http404
except:
raise Http404
class DemographicViewSet(viewsets.ModelViewSet):
queryset = Demographics.objects.all()
serializer_class = DemographicSerializer
class ScenariosViewSet(viewsets.ModelViewSet):
queryset = Scenarios.objects.all()
serializer_class = ScenarioSerializer
class ResponsesViewSet(viewsets.ModelViewSet):
queryset = Responses.objects.all()
serializer_class = ResponseSerializer
class IssueViewSet(viewsets.ModelViewSet):
queryset = Issues.objects.all()
serializer_class = IssueSerializer
class ConversationsViewSet(viewsets.ModelViewSet):
queryset = Conversations.objects.all()
serializer_class = ConversationSerializer
class PagesViewSet(viewsets.ModelViewSet):
queryset = Pages.objects.all()
serializer_class = PagesSerializer
class Stakeholder_to_pageViewSet(viewsets.ModelViewSet):
queryset = StakeholderToPage.objects.all()
serializer_class = Stakeholder_to_pageSerializer
class Reflection_QuestionsViewSet(viewsets.ModelViewSet):
queryset = ReflectionQuestions.objects.all()
serializer_class = Reflection_questionsSerializer
class Generic_pageViewSet(viewsets.ModelViewSet):
queryset = GenericPage.objects.all()
serializer_class = Generic_pageSerializer
class Action_pageViewSet(viewsets.ModelViewSet):
queryset = ActionPage.objects.all()
serializer_class = Action_pageSerializer
class Page_reflectionViewSet(generics.CreateAPIView):
model = Pages
serializer_class = Pages_reflectionSerializer
class Page_actionViewSet(generics.CreateAPIView):
model = Pages
serializer_class = Pages_actionSerializer
class Page_genericViewSet(generics.CreateAPIView):
model = Pages
serializer_class = Pages_genericSerializer
class Page_StakeholderViewSet(generics.CreateAPIView):
model = Pages
serializer_class = Pages_stakeholderSerializer
class ReflectionsTakenViewSet(viewsets.ModelViewSet):
queryset = ReflectionsTaken.objects.all()
serializer_class = ReflectionsTakenSerializer
class ResponseToActionPageViewSet(viewsets.ModelViewSet):
queryset = ResponseToActionPage.objects.all()
serializer_class = ResponseToActionPageSerializer
class Responses_to_conversationsViewSet(viewsets.ModelViewSet):
queryset = ResponsesToConversations.objects.all()
serializer_class = Responses_to_conversationsSerializer
# class Student_page_progressViewSet(viewsets.ModelViewSet):
# queryset = Student_page_progress.objects.all()
# serializer_class = Student_page_progressSerializer
class StudentTimesViewSet(viewsets.ModelViewSet):
queryset = StudentTimes.objects.all()
serializer_class = StudentTimesSerializer
class CoverageViewSet(viewsets.ModelViewSet):
queryset = Coverage.objects.all()
serializer_class = CoverageSerializer
class StakeholdersViewSet(viewsets.ModelViewSet):
queryset = Stakeholders.objects.all()
serializer_class = StakeholderSerializer
class StudentToCourseViewSet(viewsets.ModelViewSet):
queryset = StudentsToCourse.objects.all()
serializer_class = StudentToCourseSerializer
class ScenariosForViewSet(viewsets.ModelViewSet):
queryset = ScenariosFor.objects.all()
serializer_class = ScenarioForSerializer
class QuestionsViewSet(viewsets.ModelViewSet):
queryset = Questions.objects.all()
serializer_class = QuestionsSerializer
class StakeholdersToQuestionsViewSet(viewsets.ModelViewSet):
queryset = StakeholdersToQuestions.objects.all()
serializer_class = StakeholdersToQuestionsSerializer
class PagesToScenarioViewSet(viewsets.ModelViewSet):
queryset = PagesToScenario.objects.all()
serializer_class = PagesToScenarioSerializer
class ReflectionQuestionToPageViewSet(viewsets.ModelViewSet):
queryset = ReflectionQuestionToPage.objects.all()
serializer_class = ReflectionQuestionToPageSerializer
class ProfessorsToScenarioViewSet(viewsets.ModelViewSet):
queryset = ProfessorsToScenario.objects.all()
serializer_class = ProfessorsToScenarioSerializer
class ProfessorsToCoursesViewSet(viewsets.ModelViewSet):
queryset = ProfessorsToCourses.objects.all()
serializer_class = ProfessorsToCoursesSerializer
# TODO: Some viewsets are not necessary, remove after implementaion of some endpoints
class DashBoard(views.APIView):
def get(self, request, format=None):
student_id = self.request.query_params.get('student_id', None)
if student_id is not None:
if type(student_id) != type("a string"):
try:
student_id = str(student_id)
except:
return DRF_response(status=status.HTTP_400_BAD_REQUEST)
try:
scenario_list = []
student_courses = Students.objects.get(
student=student_id).courses.all()
for course in student_courses:
scenario_list.extend(course.scenarios.all())
scenario_data_list = ScenarioSerializer(scenario_list, many=True).data
for scenario in scenario_data_list:
try:
stuTime = StudentTimes.objects.get(student = student_id, scenario = scenario['scenario_id'])
result = stuTime.end_time != None
except:
result = False
scenario['student_finished'] = result
return DRF_response(scenario_data_list)
except Students.DoesNotExist:
raise Http404
"""
except:
return DRF_response(status=status.HTTP_400_BAD_REQUEST)
"""
else:
return DRF_response(status=status.HTTP_400_BAD_REQUEST)
class Get_scenario(APIView):
def get(self, request, *args, **kwargs):
# take scenario_id as input from URL by adding ?scenario_id=<the id #> to the end of the url.
scenario_id = self.request.query_params.get('scenario_id')
if(scenario_id == None):
return DRF_response(status=status.HTTP_400_BAD_REQUEST)
try:
scenario = Scenarios.objects.get(scenario_id=scenario_id)
if(scenario == None):
return Responses({'status': 'details'}, status=status.HTTP_404_NOT_FOUND)
data = ScenarioSerializer(scenario).data
return DRF_response(data, status=status.HTTP_200_OK)
except Scenarios.DoesNotExist:
return DRF_response({'status': 'No scenario found for this scenario id'}, status=status.HTTP_404_NOT_FOUND)
class get_pages(APIView):
def get(self, request, *args, **kwargs):
scenario = self.request.query_params.get('scenario_id')
try:
scenario = Scenarios.objects.get(scenario_id=scenario)
except Scenarios.DoesNotExist:
return DRF_response(status=status.HTTP_404_NOT_FOUND)
page_list = []
page_id_list = PagesToScenario.objects.filter(scenario=scenario)
for pg_id in page_id_list:
try:
page = Pages.objects.get(id=pg_id.page.id)
except Pages.DoesNotExist:
return DRF_response(status=status.HTTP_404_NOT_FOUND)
page_list.append(page)
sorted_list = []
page_parent = {}
for page in page_list:
if page.next is not None:
page_parent[str(page.next)] = page_parent.get(str(page.next), 0) + 1
if page.page_type == 'A':
action_pages = ActionPage.objects.filter(page = page.id)
for action_page in action_pages:
try:
result_page = Pages.objects.get(id=action_page.result_page)
page_parent[str(result_page)] = page_parent.get(str(result_page), 0) + 1
except:
continue
num_pages = len(page_list)
for x in range(num_pages):
page = None
for page1 in page_list:
if page_parent.get(str(page1), 0) <= 0:
page = page1
break
if page is None:
break
page_serializer_data = PagesSerializer(page).data
sorted_list.append(page_serializer_data)
if page.next is not None:
page_parent[str(page.next)] = page_parent.get(str(page.next), 0) - 1
if page.page_type == 'A':
action_pages = ActionPage.objects.filter(page = page.id)
for action_page in action_pages:
try:
result_page = Pages.objects.get(id=action_page.result_page)
page_parent[str(result_page)] = page_parent.get(str(result_page), 0) - 1
except:
continue
page_list.remove(page)
def take_id(elem):
return elem.id
if len(page_list) > 0:
page_list.sort(key=take_id)
for page1 in page_list:
page1 = PagesSerializer(page1).data
if page1 not in sorted_list:
sorted_list.append(page1)
results = {'results': sorted_list}
return DRF_response(results, status=status.HTTP_200_OK)
class get_page_info(APIView):
def get(self, request, *args, **kwargs):
page_id = self.request.query_params.get('page_id')
try:
page = Pages.objects.get(id=page_id)
except Pages.DoesNotExist:
return DRF_response(status=status.HTTP_404_NOT_FOUND)
page_data = PagesSerializer(page).data
page_type = page.page_type
# Check page.PAGE_TYPE = 'REFLECTION'
if (page_type == 'R'):
reflection_queries = ReflectionQuestionToPage.objects.filter(page=page_id)
reflection_qs = []
for quer in reflection_queries:
try:
question = ReflectionQuestions.objects.get(id=quer.reflection_question.id)
except:
return DRF_response(status=status.HTTP_404_NOT_FOUND)
quer_data = Reflection_questionsSerializer(question).data
reflection_qs.append(quer_data)
page_data.update(
{
"body": reflection_qs
}
)
# Check page.PAGE_TYPE = 'ACTION'
elif (page_type == 'A'):
action_query = ActionPage.objects.filter(page=page_id).values()
page_data.update(
{
"body": action_query
}
)
# Check page.PAGE_TYPE = 'GENERIC'
elif (page_type == 'G' or page_type == 'I' or page_type == 'F'):
generic_query = GenericPage.objects.filter(page=page_id).values()
page_data.update(
{
"body": generic_query
}
)
# Check page.PAGE_TYPE = 'STAKEHOLDER'
elif (page_type == 'S'):
stakeholder_query = StakeholderToPage.objects.filter(page=page_id).values()
page_data.update(
{
"body": stakeholder_query
}
)
# Neither of these pages, something went wrong or missing implementation
else:
return DRF_response(status=status.HTTP_400_BAD_REQUEST)
return DRF_response(page_data, status=status.HTTP_200_OK)
class get_stakeholders(APIView):
def get(self, request):
scenario_id = self.request.query_params.get('scenario_id')
try:
scenario = Scenarios.objects.get(scenario_id=scenario_id)
except Scenarios.DoesNotExist:
return DRF_response(status=status.HTTP_404_NOT_FOUND)
stakeholders_list = []
stakeholders_id_list = Stakeholders.objects.filter(
scenario_id=scenario_id)
for stakeholder in stakeholders_id_list:
convos = Conversations.objects.filter(
stakeholder=stakeholder.stakeholder)
cov = Coverage.objects.filter(stakeholder=stakeholder.stakeholder)
stake_data = StakeholderSerializer(stakeholder).data
covLst = []
for c in cov:
covLst.append(
{
"ISSUE": c.issue.issue,
"COVERAGE_SCORE": c.coverage_score
}
)
stake_data.update(
{
"MATRIX": covLst
}
)
convoLst = []
for c in convos:
convoLst.append(
{
"CONVERSATION": c.conversation,
"QUESTION": c.question,
"RESPONSE": c.response
}
)
stake_data.update(
{
"CONVERSATIONS": convoLst
}
)
stakeholders_list.append(stake_data)
return DRF_response(stakeholders_list, status=status.HTTP_200_OK)
class get_Issues(APIView):
# retrieves issues for a scenario_id
def get(self, request, format=None):
scenario_id1 = self.request.query_params.get('scenario_id')
try:
scenario = Scenarios.objects.get(scenario_id=scenario_id1)
except Scenarios.DoesNotExist:
return DRF_response(status=status.HTTP_404_NOT_FOUND)
# serializer = IssueSerializer(scenario_id, many=True)
# return DRF_response(serializer.data)
if(scenario_id1 == None):
return DRF_response(status=status.HTTP_400_BAD_REQUEST)
try:
issues_list = []
AllIssues = Issues.objects.filter(scenario_id=scenario_id1)
for issue in AllIssues:
issue_data = IssueSerializer(issue).data
issues_list.append(issue_data)
# serializer = IssueSerializer(issues_list, many=True)
return DRF_response(issues_list)
except Issues.DoesNotExist:
return DRF_response(status=status.HTTP_404_NOT_FOUND)
class issueRadarPlotTotal(APIView):
def get(self, request, format=None):
scenario_id = self.request.query_params.get('scenario_id')
try:
scenario = Scenarios.objects.get(scenario_id=scenario_id)
except Scenarios.DoesNotExist:
return DRF_response(status=status.HTTP_404_NOT_FOUND)
mp = {}
try:
stakeholder_list = Stakeholders.objects.filter(scenario = scenario_id)
for stakeholder in stakeholder_list:
stakeholder_id = stakeholder.id
all_coverages = Coverage.objects.filter(stakeholder = stakeholder_id)
for coverage in all_coverages:
issue = coverage.issue.name
score = coverage.coverage_score
mp[issue] = mp.get(issue, 0) + score
return DRF_response(mp, status=status.HTTP_200_OK)
except:
return DRF_response(status=status.HTTP_400_BAD_REQUEST)
class issueScoreAggregateForStudent(APIView):
def get(self, request, format=None):
scenario_id1 = self.request.query_params.get('scenario_id')
student_id = self.request.query_params.get('student_id')
try:
scenario = Scenarios.objects.get(scenario_id=scenario_id1)
except Scenarios.DoesNotExist:
return DRF_response(status=status.HTTP_404_NOT_FOUND)
if(scenario_id1 == None):
return DRF_response(status=status.HTTP_400_BAD_REQUEST)
try:
mp= {}
AllResponses = Responses.objects.filter(student=student_id,scenario = scenario_id1)
stakeholderSet = set()
for response in AllResponses:
response_id = response.response_id
responseToConvo = ResponsesToConversations.objects.filter(response=response_id)
if len(responseToConvo) > 0:
for respToConv in responseToConvo:
stakeholderSet.add(respToConv.stakeholder.stakeholder)
for stakeholder in stakeholderSet:
coverages = Coverage.objects.filter(stakeholder=stakeholder)
for coverage in coverages:
if coverage is not None:
issue_id = coverage.issue.name
coverage_score = coverage.coverage_score
mp[issue_id] = mp.get(issue_id, 0) + coverage_score
return DRF_response(mp)
except Scenarios.DoesNotExist:
return DRF_response(status=status.HTTP_404_NOT_FOUND)
class response_to_conversations(APIView):
def get(self, request):
scenario_id = self.request.query_params.get('scenario_id')
page_id = self.request.query_params.get('page_id')
student_id = self.request.query_params.get('student_id')
try:
scenario = Scenarios.objects.get(scenario_id = scenario_id)
page = Pages.objects.get(page = page_id)
student = Students.objects.get(student = student_id)
except Scenarios.DoesNotExist:
return rest_framework.response.Response(status=status.HTTP_404_NOT_FOUND)
except Pages.DoesNotExist:
return rest_framework.response.Response(status=status.HTTP_404_NOT_FOUND)
except Students.DoesNotExist:
return rest_framework.response.Response(status=status.HTTP_404_NOT_FOUND)
response_id_lst = Responses.objects.filter(student_id = student_id, page = page_id, scenario = scenario_id)
resp_to_convo_final_lst = []
for response in response_id_lst:
resp_json = ResponseSerializer(response).data
resp_to_convos_obj_lst = ResponsesToConversations.objects.filter(response_id = response.response_id)
for j in resp_to_convos_obj_lst:
convos = Conversations.objects.filter(conversation = j.conversation.conversation)
convo_lst = []
for i in convos:
convo_json = ConversationSerializer(i).data
convo_lst.append(convo_json)
resp_to_convo_json = Responses_to_conversationsSerializer(j).data
resp_to_convo_json.update(
{
"response": resp_json,
"conversation": convo_lst
}
)
resp_to_convo_final_lst.append(resp_to_convo_json)
return rest_framework.response.Response(resp_to_convo_final_lst, status=status.HTTP_200_OK)
# put a student conversation into the database
def put(self, request, *args, **kwargs):
# takes in a JSON of the format:
# {
# "scenario_id": 1,
# "student_id": "student netID",
# "conversation_id": 1,
# "score": 1,
# "course_id": 1,
# "page_id": 1
# }
scenario_id = request.data.get('scenario_id')
student_id = request.data.get('student_id')
conversation_id = request.data.get('conversation_id')
# score = request.data.get('score')
score = 0
course_id = request.data.get('course_id')
page_id = request.data.get('page_id')
# extra check for if the given JSON has the required fields
if(scenario_id is None or student_id is None or conversation_id is None or score is None or course_id is None or page_id is None):
return DRF_response({'detail': "Missing one or more parameters"}, status=status.HTTP_400_BAD_REQUEST)
try:
conversation = Conversations.objects.get(conversation=conversation_id)
| |
# * HXL
# - can be a Triple Store for Semantic Web support
#
tablename = "inv_req_tag"
self.define_table(tablename,
self.inv_req_id(),
# key is a reserved word in MySQL
Field("tag",
label = T("Key"),
),
Field("value",
label = T("Value"),
),
s3_comments(),
*s3_meta_fields())
self.configure(tablename,
deduplicate = S3Duplicate(primary = ("req_id",
"tag",
),
),
)
# Pass names back to global scope (s3.*)
return {}
# =============================================================================
class InventoryStockCardModel(S3Model):
"""
Stock Cards
Used by: RMS
"""
names = ("inv_stock_card",
"inv_stock_log",
)
def model(self):
T = current.T
configure = self.configure
define_table = self.define_table
settings = current.deployment_settings
WAREHOUSE = T(settings.get_inv_facility_label())
is_float_represent = IS_FLOAT_AMOUNT.represent
float_represent = lambda v: is_float_represent(v, precision=2)
# ---------------------------------------------------------------------
# Outgoing Shipments <> Requests
#
tablename = "inv_stock_card"
define_table(tablename,
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
self.super_link("site_id", "org_site",
#default = auth.user.site_id if auth.is_logged_in() else None,
empty = False,
label = WAREHOUSE,
ondelete = "RESTRICT",
represent = self.org_site_represent,
readable = True,
#writable = True,
# Comment these to use a Dropdown & not an Autocomplete
#widget = S3SiteAutocompleteWidget(),
#comment = DIV(_class = "tooltip",
# _title = "%s|%s" % (WAREHOUSE,
# messages.AUTOCOMPLETE_HELP)),
),
Field("stock_card_ref",
label = T("Stock Card No."),
),
self.supply_item_id(ondelete = "RESTRICT",
required = True,
),
self.supply_item_pack_id(ondelete = "RESTRICT",
required = True,
),
inv_itn_field()(),
s3_date("expiry_date",
label = T("Expiry Date"),
represent = inv_expiry_date_represent,
),
*s3_meta_fields()
)
configure(tablename,
create_onaccept = self.inv_stock_card_onaccept,
# Never created/edited manually
deletable = False,
editable = False,
insertable = False,
)
self.add_components(tablename,
inv_stock_log = {"name": "log",
"joinby": "card_id",
},
)
current.response.s3.crud_strings[tablename] = Storage(title_display = T("Stock Card"),
)
# ---------------------------------------------------------------------
# Log of Updates to Stock Cards
#
tablename = "inv_stock_log"
define_table(tablename,
Field("card_id", "reference inv_stock_card",
),
s3_datetime(represent = "date"),
self.inv_send_id(label = T("Sent Shipment"),
represent = inv_SendRepresent(show_link = True),
),
self.inv_recv_id(label = T("Received Shipment"),
represent = inv_RecvRepresent(show_link = True),
),
self.inv_adj_id(label = T("Adjustment")),
self.org_site_layout_id(label = T("Bin"),
),
Field("quantity_in", "double", notnull=True,
default = 0.0,
label = T("Quantity In"),
represent = float_represent,
requires = IS_FLOAT_AMOUNT(minimum = 0.0),
),
Field("quantity_out", "double", notnull=True,
default = 0.0,
label = T("Quantity Out"),
represent = float_represent,
requires = IS_FLOAT_AMOUNT(minimum = 0.0),
),
Field("balance", "double", notnull=True,
default = 0.0,
label = T("Balance"),
represent = float_represent,
requires = IS_FLOAT_AMOUNT(minimum = 0.0),
),
s3_comments(),
*s3_meta_fields()
)
configure(tablename,
# Never created/edited manually
deletable = False,
editable = False,
insertable = False,
datatable_includes_id = False,
list_fields = ["date",
"send_id",
"send_id$to_site_id",
"recv_id",
"recv_id$from_site_id",
"adj_id",
"layout_id",
"quantity_in",
"quantity_out",
"balance",
"comments",
],
)
return {}
# -------------------------------------------------------------------------
@staticmethod
def inv_stock_card_onaccept(form):
"""
Generate the Stock Card No.
"""
db = current.db
form_vars = form.vars
ctable = db.inv_stock_card
from .supply import supply_get_shipping_code as get_shipping_code
code = get_shipping_code("STC",
form_vars.get("site_id"),
ctable.stock_card_ref,
)
db(ctable.id == form_vars.id).update(stock_card_ref = code)
# =============================================================================
class InventoryTrackingModel(S3Model):
"""
A module to manage the shipment of inventory items
- Sent Items
- Received Items
- And audit trail of the shipment process
"""
names = ("inv_send",
"inv_send_id",
"inv_recv",
"inv_recv_id",
"inv_track_item",
"inv_send_item_bin",
"inv_recv_item_bin",
)
def model(self):
T = current.T
db = current.db
auth = current.auth
settings = current.deployment_settings
add_components = self.add_components
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
super_link = self.super_link
inv_item_id = self.inv_item_id
organisation_id = self.org_organisation_id
person_id = self.pr_person_id
req_ref = self.inv_req_ref
is_logged_in = auth.is_logged_in
user = auth.user
site_types = auth.org_site_types
shipment_status = inv_shipment_status_labels()
tracking_status = {TRACK_STATUS_UNKNOWN: T("Unknown"),
TRACK_STATUS_PREPARING: T("In Process"),
TRACK_STATUS_TRANSIT: T("In transit"),
TRACK_STATUS_UNLOADING: T("Unloading"),
TRACK_STATUS_ARRIVED: T("Arrived"),
TRACK_STATUS_CANCELED: T("Canceled"),
TRACK_STATUS_RETURNING: T("Returning"),
}
SITE_LABEL = settings.get_org_site_label()
document_filing = settings.get_inv_document_filing()
recv_shortname = settings.get_inv_recv_shortname()
show_org = settings.get_inv_send_show_org()
send_req_ref = settings.get_inv_send_req_ref()
track_pack_values = settings.get_inv_track_pack_values()
type_default = settings.get_inv_send_type_default()
is_float_represent = IS_FLOAT_AMOUNT.represent
float_represent = lambda v: is_float_represent(v, precision=2)
string_represent = lambda v: v if v else NONE
org_site_represent = self.org_site_represent
send_ref = S3ReusableField("send_ref",
label = T(settings.get_inv_send_ref_field_name()),
represent = self.inv_send_ref_represent,
writable = False,
)
ship_doc_status = {SHIP_DOC_PENDING : T("Pending"),
SHIP_DOC_COMPLETE : T("Complete"),
}
radio_widget = lambda field, value: \
RadioWidget().widget(field, value, cols = 2)
transport_opts = {"Air": T("Air"),
"Sea": T("Sea"),
"Road": T("Road"),
"Hand": T("Hand"),
}
# ---------------------------------------------------------------------
# Send (Outgoing / Dispatch / etc)
#
send_type_opts = settings.get_inv_shipment_types()
# @ToDo: When is this actually wanted?
#send_type_opts.update(self.inv_item_status_opts)
send_type_opts.update(settings.get_inv_send_types())
tablename = "inv_send"
define_table(tablename,
# Instance
super_link("doc_id", "doc_entity"),
send_ref(),
# Useful for when the Request comes from a site not using the same system
# - doesn't support multiple Requests for a Shipment
# - doesn't try to update Request status
req_ref(#represent = inv_ReqRefRepresent(show_link = True),
represent = string_represent,
readable = send_req_ref,
writable = send_req_ref,
),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("site_id", "org_site",
default = user.site_id if is_logged_in() else None,
empty = False,
instance_types = site_types,
label = T("From %(site)s") % {"site": SITE_LABEL},
not_filterby = "obsolete",
not_filter_opts = (True,),
readable = True,
writable = True,
represent = org_site_represent,
updateable = True,
#widget = S3SiteAutocompleteWidget(),
),
Field("type", "integer",
default = type_default,
label = T("Shipment Type"),
represent = s3_options_represent(send_type_opts),
requires = IS_IN_SET(send_type_opts,
zero = None),
readable = not type_default,
writable = not type_default,
),
# This is a reference, not a super_link, so we can override
Field("to_site_id", self.org_site,
label = T("To %(site)s") % {"site": SITE_LABEL},
ondelete = "SET NULL",
represent = org_site_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "org_site.site_id",
org_site_represent,
instance_types = site_types,
sort = True,
not_filterby = "obsolete",
not_filter_opts = (True,),
)),
),
organisation_id(label = T("To Organization"),
readable = show_org,
writable = show_org,
),
person_id("sender_id",
default = auth.s3_logged_in_person(),
label = T("Sent By"),
ondelete = "SET NULL",
comment = self.pr_person_comment(child = "sender_id"),
),
person_id("recipient_id",
label = T("To Person"),
ondelete = "SET NULL",
represent = self.pr_PersonRepresentContact(),
comment = self.pr_person_comment(child = "recipient_id"),
),
Field("transport_type",
label = T("Type of Transport"),
represent = s3_options_represent(transport_opts),
requires = IS_EMPTY_OR(IS_IN_SET(transport_opts)),
),
Field("transported_by",
label = T("Transported by"),
represent = string_represent,
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Transported by"),
T("Freight company or organisation providing transport"),
),
),
),
Field("transport_ref",
#label = "AWB No", Air WayBill
#label = "B/L No", Bill of Lading (Sea)
label = T("Transport Reference"),
represent = string_represent,
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Transport Reference"),
T("Air WayBill, Bill of Lading, Consignment Number, Tracking Number, etc"),
),
),
),
Field("registration_no",
label = T("Registration Number"),
represent = string_represent,
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Registration Number"),
T("Flight Number, Vheicle Plate Number, Vessel Registration, etc"),
),
),
),
Field("driver_name",
label = T("Name of Driver"),
represent = string_represent,
),
Field("driver_phone",
label = T("Driver Phone Number"),
represent = lambda v: v or "",
requires = IS_EMPTY_OR(IS_PHONE_NUMBER_MULTI()),
),
Field("time_in", "time",
label = T("Time In"),
represent = string_represent,
# Enable in Template if-required
readable = False,
writable = False,
),
Field("time_out", "time",
label = T("Time Out"),
represent = string_represent,
# Enable in Template if-required
readable = False,
writable = False,
),
s3_datetime(label = T("Date Sent"),
# Not always sent straight away
#default = "now",
represent = "date",
writable = False,
),
s3_datetime("delivery_date",
label = T("Estimated Delivery Date"),
represent = "date",
writable = False,
),
Field("status", "integer",
default = SHIP_STATUS_IN_PROCESS,
label = T("Status"),
represent = s3_options_represent(shipment_status),
requires = IS_IN_SET(shipment_status),
writable = False,
),
Field("filing_status", "integer",
default = SHIP_DOC_PENDING,
label = T("Filing Status"),
represent = s3_options_represent(ship_doc_status),
requires = IS_IN_SET(ship_doc_status),
widget = radio_widget,
comment = DIV(_class = "tooltip",
_title = "%s|%s|%s" % (T("Filing Status"),
T("Have all the signed documents for this shipment been filed?"),
"* %s|* %s" % (T("Requisition"), T("Waybill")),
)),
readable = document_filing,
writable = False,
),
s3_comments(),
*s3_meta_fields())
# Filter Widgets
filter_widgets = [
S3TextFilter(["sender_id$first_name",
"sender_id$middle_name",
"sender_id$last_name",
"comments",
"site_id$name",
"send_ref",
"recipient_id$first_name",
"recipient_id$middle_name",
"recipient_id$last_name",
],
label = T("Search"),
comment = T("Search for an item by text."),
),
S3OptionsFilter("to_site_id",
label = T("To Organization"),
comment = T("If none are selected, then all are searched."),
cols = 2,
hidden = True,
),
S3TextFilter("type",
label = T("Shipment Type"),
hidden = True,
),
S3TextFilter("transport_type",
label = T("Type of Transport"),
hidden = True,
),
S3DateFilter("date",
label = T("Date Sent"),
comment = T("Search for a shipment sent between these dates."),
hidden = True,
),
S3DateFilter("delivery_date",
label = T("Estimated Delivery Date"),
comment = | |
<reponame>markendr/esys-escript.github.io<gh_stars>0
##############################################################################
#
# Copyright (c) 2003-2018 by The University of Queensland
# http://www.uq.edu.au
#
# Primary Business: Queensland, Australia
# Licensed under the Apache License, version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
# Development 2012-2013 by School of Earth Sciences
# Development from 2014 by Centre for Geoscience Computing (GeoComp)
#
##############################################################################
from __future__ import print_function, division
__copyright__="""Copyright (c) 2003-2018 by The University of Queensland
http://www.uq.edu.au
Primary Business: Queensland, Australia"""
__license__="""Licensed under the Apache License, version 2.0
http://www.apache.org/licenses/LICENSE-2.0"""
__url__="https://launchpad.net/escript-finley"
"""
basic tests for functions in util.py effecting the spatial distribution
it is assumed that the domain is the usint square/cube
not all these test will run for all domains. check the doc string for the assumptions of a particular test
:var __author__: name of author
:var __copyright__: copyrights
:var __license__: licence agreement
:var __url__: url entry point on documentation
:var __version__: version
:var __date__: date of the version
"""
__author__="<NAME>, <EMAIL>"
import esys.escriptcore.utestselect as unittest
from esys.escript import *
from numpy import array
import numpy
from test_util_grad import Test_Util_Gradient_noBoundary
from test_util_integrals import Test_Util_Integration_noContact
from test_util_interpolation import Test_Util_Interpolation_noContact
class Test_Util_SpatialFunctions_noGradOnBoundary_noContact(Test_Util_Integration_noContact, Test_Util_Interpolation_noContact, Test_Util_Gradient_noBoundary):
RES_TOL=1.e-8
def test_x_ofDomain(self):
"""
test getX() of the domain to be in the [0,1]^dim box
"""
dim=self.domain.getDim()
x=self.domain.getX()
self.assertEqual(x.getShape(),(dim,),"wrong shape of result.")
self.assertAlmostEqual(inf(x[0]),0.,int(-log10(self.RES_TOL)),"min x0 wrong")
self.assertAlmostEqual(sup(x[0]),1.,int(-log10(self.RES_TOL)),"max x0 wrong")
self.assertAlmostEqual(inf(x[1]),0.,int(-log10(self.RES_TOL)),"min x1 wrong")
self.assertAlmostEqual(sup(x[1]),1.,int(-log10(self.RES_TOL)),"max x1 wrong")
if dim>2:
self.assertAlmostEqual(inf(x[2]),0.,int(-log10(self.RES_TOL)),"min x2 wrong")
self.assertAlmostEqual(sup(x[2]),1.,int(-log10(self.RES_TOL)),"max x2 wrong")
def test_SolutionOrder(self):
"""
test the approximation order
"""
self.assertEqual(self.order, Solution(self.domain).getApproximationOrder(), "wrong order (Solution)")
self.assertEqual(self.order, ContinuousFunction(self.domain).getApproximationOrder(), "wrong order (continuous function)")
self.assertEqual(1, ReducedSolution(self.domain).getApproximationOrder(), "wrong order (ReducedSolution)")
self.assertEqual(1, ReducedContinuousFunction(self.domain).getApproximationOrder(), "wrong order (Reduced continuous function)")
for i in range(self.domain.getDim()):
for k in range(Function(self.domain).getApproximationOrder()+1):
self.assertAlmostEqual(integrate(Function(self.domain).getX()[i]**k),1./(k+1),8,"wrong integral (i=%s, order = %s)"%(i,k))
for k in range(ReducedFunction(self.domain).getApproximationOrder()+1):
self.assertAlmostEqual(integrate(ReducedFunction(self.domain).getX()[i]**k),1./(k+1),8,"wrong integral (i=%s, order = %s (reduced))"%(i,k))
def test_normal_FunctionOnBoundary(self):
"""
test getNormal() on boundary
assumptions: FunctionOnBoundary(self.domain) exists
"""
dim=self.domain.getDim()
f=FunctionOnBoundary(self.domain)
x=f.getX()
ref=Vector(0.,what=f)
if dim==3:
ref.setTaggedValue(200,[0,0,1])
ref.setTaggedValue(100,[0,0,-1])
ref.setTaggedValue(20,[0,1,0])
ref.setTaggedValue(10,[0,-1,0])
ref.setTaggedValue(2,[1,0,0])
ref.setTaggedValue(1,[-1,0,0])
else:
ref.setTaggedValue(2,[1,0])
ref.setTaggedValue(1,[-1,0])
ref.setTaggedValue(20, [0,1])
ref.setTaggedValue(10, [0,-1])
res=f.getNormal()
self.assertEqual(res.getShape(),(dim,),"wrong shape of result.")
self.assertEqual(res.getFunctionSpace(),f,"wrong functionspace of result.")
self.assertLess(Lsup(ref-res), self.RES_TOL, "wrong result")
def test_normal_ReducedFunctionOnBoundary(self):
"""
test getNormal() on boundary
assumptions: FunctionOnBoundary(self.domain) exists
"""
dim=self.domain.getDim()
f=ReducedFunctionOnBoundary(self.domain)
x=f.getX()
ref=Vector(0.,what=f)
if dim==3:
ref.setTaggedValue(200,[0,0,1])
ref.setTaggedValue(100,[0,0,-1])
ref.setTaggedValue(20,[0,1,0])
ref.setTaggedValue(10,[0,-1,0])
ref.setTaggedValue(2,[1,0,0])
ref.setTaggedValue(1,[-1,0,0])
else:
ref.setTaggedValue(2,[1,0])
ref.setTaggedValue(1,[-1,0])
ref.setTaggedValue(20, [0,1])
ref.setTaggedValue(10, [0,-1])
res=f.getNormal()
self.assertEqual(res.getShape(),(dim,),"wrong shape of result.")
self.assertEqual(res.getFunctionSpace(),f,"wrong functionspace of result.")
self.assertLess(Lsup(ref-res), self.RES_TOL, "wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_L2_onFunction_fromData_rank0(self):
"""
tests L2-norm of Data on the Function
assumptions: self.domain supports integration on Function
"""
dim=self.domain.getDim()
w=Function(self.domain)
x=w.getX()
if dim==2:
arg=Data(0,(),w)
arg=(0.608797336225)*x[0]
ref=sqrt((0.123544732198))
else:
arg=Data(0,(),w)
arg=(0.136031275673)*x[0]
ref=sqrt((0.00616816932037))
res=L2(arg)
self.assertTrue(isinstance(res,float),"wrong type of result.")
self.assertAlmostEqual(res,ref,int(-log10(self.RES_TOL)),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_L2_onFunction_fromData_rank1(self):
"""
tests L2-norm of Data on the Function
assumptions: self.domain supports integration on Function
"""
dim=self.domain.getDim()
w=Function(self.domain)
x=w.getX()
if dim==2:
arg=Data(0,(2,),w)
arg[0]=(-0.212143919436)*x[0]
arg[1]=(-0.256194155686)*x[1]
ref=sqrt((0.0368801626538))
else:
arg=Data(0,(3,),w)
arg[0]=(0.0452831341416)*x[0]
arg[1]=(-0.278640180656)*x[1]
arg[2]=(-0.607035001062)*x[2]
ref=sqrt((0.149394135009))
res=L2(arg)
self.assertTrue(isinstance(res,float),"wrong type of result.")
self.assertAlmostEqual(res,ref,int(-log10(self.RES_TOL)),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_L2_onFunction_fromData_rank2(self):
"""
tests L2-norm of Data on the Function
assumptions: self.domain supports integration on Function
"""
dim=self.domain.getDim()
w=Function(self.domain)
x=w.getX()
if dim==2:
arg=Data(0,(4, 2),w)
arg[0,0]=(0.239448813076)*x[0]
arg[0,1]=(-0.529349708753)*x[1]
arg[1,0]=(-0.381557161859)*x[0]
arg[1,1]=(0.731658534249)*x[1]
arg[2,0]=(-0.813679062342)*x[0]
arg[2,1]=(0.528100089704)*x[1]
arg[3,0]=(-0.480867528161)*x[0]
arg[3,1]=(-0.167862206972)*x[1]
ref=sqrt((0.739610516051))
else:
arg=Data(0,(4, 3),w)
arg[0,0]=(0.951209543612)*x[0]
arg[0,1]=(0.735178735637)*x[1]
arg[0,2]=(0.13074673272)*x[2]
arg[1,0]=(0.412295676715)*x[0]
arg[1,1]=(-0.657695950153)*x[1]
arg[1,2]=(-0.900044734695)*x[2]
arg[2,0]=(0.741773926224)*x[0]
arg[2,1]=(0.0521828807406)*x[1]
arg[2,2]=(0.797728501985)*x[2]
arg[3,0]=(-0.61235554051)*x[0]
arg[3,1]=(0.456652747412)*x[1]
arg[3,2]=(-0.734303857319)*x[2]
ref=sqrt((1.72901661926))
res=L2(arg)
self.assertTrue(isinstance(res,float),"wrong type of result.")
self.assertAlmostEqual(res,ref,int(-log10(self.RES_TOL)),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_L2_onFunction_fromData_rank3(self):
"""
tests L2-norm of Data on the Function
assumptions: self.domain supports integration on Function
"""
dim=self.domain.getDim()
w=Function(self.domain)
x=w.getX()
if dim==2:
arg=Data(0,(6, 2, 2),w)
arg[0,0,0]=(0.449174971953)*x[0]
arg[0,0,1]=(-0.0109398763289)*x[1]
arg[0,1,0]=(-0.202497187709)*x[0]
arg[0,1,1]=(-0.12970879334)*x[1]
arg[1,0,0]=(-0.138092481719)*x[0]
arg[1,0,1]=(-0.528752200917)*x[1]
arg[1,1,0]=(-0.605919441662)*x[0]
arg[1,1,1]=(0.215615032334)*x[1]
arg[2,0,0]=(-0.998734541972)*x[0]
arg[2,0,1]=(0.725811901251)*x[1]
arg[2,1,0]=(-0.966536503228)*x[0]
arg[2,1,1]=(-0.528692217355)*x[1]
arg[3,0,0]=(0.757633851466)*x[0]
arg[3,0,1]=(-0.524660157377)*x[1]
arg[3,1,0]=(0.983733431677)*x[0]
arg[3,1,1]=(0.061279109546)*x[1]
arg[4,0,0]=(0.85914215305)*x[0]
arg[4,0,1]=(0.941714045112)*x[1]
arg[4,1,0]=(0.172235529555)*x[0]
arg[4,1,1]=(-0.108381454437)*x[1]
arg[5,0,0]=(-0.736373697727)*x[0]
arg[5,0,1]=(-0.599337929679)*x[1]
arg[5,1,0]=(0.661072686392)*x[0]
arg[5,1,1]=(-0.55107327409)*x[1]
ref=sqrt((2.94641432714))
else:
arg=Data(0,(6, 2, 3),w)
arg[0,0,0]=(0.69227064904)*x[0]
arg[0,0,1]=(-0.968336177418)*x[1]
arg[0,0,2]=(-0.634883146685)*x[2]
arg[0,1,0]=(-0.12640661422)*x[0]
arg[0,1,1]=(-0.637386589888)*x[1]
arg[0,1,2]=(0.26060859356)*x[2]
arg[1,0,0]=(-0.986864633297)*x[0]
arg[1,0,1]=(-0.441589142379)*x[1]
arg[1,0,2]=(-0.587865539582)*x[2]
arg[1,1,0]=(0.596052465031)*x[0]
arg[1,1,1]=(0.312732336652)*x[1]
arg[1,1,2]=(-0.514423945092)*x[2]
arg[2,0,0]=(-0.892391254794)*x[0]
arg[2,0,1]=(0.377920185756)*x[1]
arg[2,0,2]=(-0.120174597181)*x[2]
arg[2,1,0]=(-0.469951576468)*x[0]
arg[2,1,1]=(-0.788362249555)*x[1]
arg[2,1,2]=(0.745625354986)*x[2]
arg[3,0,0]=(0.542802498569)*x[0]
arg[3,0,1]=(-0.814541028706)*x[1]
arg[3,0,2]=(0.298410992196)*x[2]
arg[3,1,0]=(0.981190341206)*x[0]
arg[3,1,1]=(0.666421298608)*x[1]
arg[3,1,2]=(-0.369751722626)*x[2]
arg[4,0,0]=(-0.75379530597)*x[0]
arg[4,0,1]=(0.283357267139)*x[1]
arg[4,0,2]=(0.247787072861)*x[2]
arg[4,1,0]=(0.301766692533)*x[0]
arg[4,1,1]=(0.828183439224)*x[1]
arg[4,1,2]=(-0.580824060547)*x[2]
arg[5,0,0]=(0.637345610764)*x[0]
arg[5,0,1]=(-0.234409115997)*x[1]
arg[5,0,2]=(-0.192639300316)*x[2]
arg[5,1,0]=(-0.62609237162)*x[0]
arg[5,1,1]=(0.463404958552)*x[1]
arg[5,1,2]=(-0.547814448738)*x[2]
ref=sqrt((4.2381131862))
res=L2(arg)
self.assertTrue(isinstance(res,float),"wrong type of result.")
self.assertAlmostEqual(res,ref,int(-log10(self.RES_TOL)),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_L2_onFunction_fromData_rank4(self):
"""
tests L2-norm of Data on the Function
assumptions: self.domain supports integration on Function
"""
dim=self.domain.getDim()
w=Function(self.domain)
x=w.getX()
if dim==2:
arg=Data(0,(4, 5, 3, 2),w)
arg[0,0,0,0]=(-0.232618585183)*x[0]
arg[0,0,0,1]=(0.39796117869)*x[1]
arg[0,0,1,0]=(-0.997336958262)*x[0]
arg[0,0,1,1]=(-0.351780915076)*x[1]
arg[0,0,2,0]=(-0.876764070136)*x[0]
arg[0,0,2,1]=(0.808730805817)*x[1]
arg[0,1,0,0]=(-0.197154744966)*x[0]
arg[0,1,0,1]=(0.416246096086)*x[1]
arg[0,1,1,0]=(0.708038457121)*x[0]
arg[0,1,1,1]=(-0.00954021503188)*x[1]
arg[0,1,2,0]=(-0.62630809425)*x[0]
arg[0,1,2,1]=(0.430228727912)*x[1]
arg[0,2,0,0]=(0.0306704747648)*x[0]
arg[0,2,0,1]=(-0.913877199453)*x[1]
arg[0,2,1,0]=(-0.697612800829)*x[0]
arg[0,2,1,1]=(-0.17996376822)*x[1]
arg[0,2,2,0]=(-0.304509578871)*x[0]
arg[0,2,2,1]=(-0.610556755811)*x[1]
arg[0,3,0,0]=(-0.452355972234)*x[0]
arg[0,3,0,1]=(-0.368921242518)*x[1]
arg[0,3,1,0]=(-0.478275554932)*x[0]
arg[0,3,1,1]=(0.257178549127)*x[1]
arg[0,3,2,0]=(0.530736487177)*x[0]
arg[0,3,2,1]=(-0.567126272463)*x[1]
arg[0,4,0,0]=(0.801519165938)*x[0]
arg[0,4,0,1]=(-0.509816703951)*x[1]
arg[0,4,1,0]=(-0.255412646934)*x[0]
arg[0,4,1,1]=(0.437540101896)*x[1]
arg[0,4,2,0]=(-0.815574969538)*x[0]
arg[0,4,2,1]=(-0.94691547137)*x[1]
arg[1,0,0,0]=(-0.732550722593)*x[0]
arg[1,0,0,1]=(0.515752381704)*x[1]
arg[1,0,1,0]=(-0.343590210899)*x[0]
arg[1,0,1,1]=(-0.0601907964915)*x[1]
arg[1,0,2,0]=(0.0199916154421)*x[0]
arg[1,0,2,1]=(-0.136927227821)*x[1]
arg[1,1,0,0]=(0.397994441702)*x[0]
arg[1,1,0,1]=(0.953873148948)*x[1]
arg[1,1,1,0]=(0.419416235967)*x[0]
arg[1,1,1,1]=(0.700998577193)*x[1]
arg[1,1,2,0]=(-0.497358799271)*x[0]
arg[1,1,2,1]=(0.0851768858379)*x[1]
arg[1,2,0,0]=(0.0936678875202)*x[0]
arg[1,2,0,1]=(0.869883786896)*x[1]
arg[1,2,1,0]=(0.582700123485)*x[0]
arg[1,2,1,1]=(-0.433381106794)*x[1]
arg[1,2,2,0]=(-0.527031777974)*x[0]
arg[1,2,2,1]=(0.105105137652)*x[1]
arg[1,3,0,0]=(-0.716750829134)*x[0]
arg[1,3,0,1]=(0.774519209008)*x[1]
arg[1,3,1,0]=(-0.568743372716)*x[0]
arg[1,3,1,1]=(0.794732483944)*x[1]
arg[1,3,2,0]=(0.246606002015)*x[0]
arg[1,3,2,1]=(-0.988869494994)*x[1]
arg[1,4,0,0]=(0.482379298083)*x[0]
arg[1,4,0,1]=(-0.386268387903)*x[1]
arg[1,4,1,0]=(0.137184889675)*x[0]
arg[1,4,1,1]=(-0.140520035321)*x[1]
arg[1,4,2,0]=(0.822755050415)*x[0]
arg[1,4,2,1]=(-0.815562139522)*x[1]
arg[2,0,0,0]=(-0.462891511962)*x[0]
arg[2,0,0,1]=(-0.122643411631)*x[1]
arg[2,0,1,0]=(-0.520861119962)*x[0]
arg[2,0,1,1]=(-0.881189618018)*x[1]
arg[2,0,2,0]=(-0.776157842774)*x[0]
arg[2,0,2,1]=(-0.12354053207)*x[1]
arg[2,1,0,0]=(0.395495230826)*x[0]
arg[2,1,0,1]=(-0.388106659423)*x[1]
arg[2,1,1,0]=(0.354250242834)*x[0]
arg[2,1,1,1]=(-0.666514210192)*x[1]
arg[2,1,2,0]=(0.951294655083)*x[0]
arg[2,1,2,1]=(0.074024416386)*x[1]
arg[2,2,0,0]=(0.335448485459)*x[0]
arg[2,2,0,1]=(-0.40988282528)*x[1]
arg[2,2,1,0]=(-0.805725968875)*x[0]
arg[2,2,1,1]=(-0.949883082118)*x[1]
arg[2,2,2,0]=(0.531549210683)*x[0]
arg[2,2,2,1]=(-0.398401016682)*x[1]
arg[2,3,0,0]=(-0.953963433205)*x[0]
arg[2,3,0,1]=(0.643431126406)*x[1]
arg[2,3,1,0]=(-0.167611998738)*x[0]
arg[2,3,1,1]=(0.226130056552)*x[1]
arg[2,3,2,0]=(0.0752687641131)*x[0]
arg[2,3,2,1]=(-0.115742756362)*x[1]
arg[2,4,0,0]=(0.579694491028)*x[0]
arg[2,4,0,1]=(-0.112005738299)*x[1]
arg[2,4,1,0]=(0.657291764224)*x[0]
arg[2,4,1,1]=(0.62671154177)*x[1]
arg[2,4,2,0]=(0.103695027944)*x[0]
arg[2,4,2,1]=(0.462828491544)*x[1]
arg[3,0,0,0]=(0.697692979998)*x[0]
arg[3,0,0,1]=(-0.123481859619)*x[1]
arg[3,0,1,0]=(-0.749745629459)*x[0]
arg[3,0,1,1]=(-0.541969524069)*x[1]
arg[3,0,2,0]=(0.819484470759)*x[0]
arg[3,0,2,1]=(-0.860592326469)*x[1]
arg[3,1,0,0]=(-0.716566084771)*x[0]
arg[3,1,0,1]=(-0.949235434827)*x[1]
arg[3,1,1,0]=(-0.826699498174)*x[0]
arg[3,1,1,1]=(-0.138511521583)*x[1]
arg[3,1,2,0]=(-0.951682890904)*x[0]
arg[3,1,2,1]=(0.413293316925)*x[1]
arg[3,2,0,0]=(0.909516836775)*x[0]
arg[3,2,0,1]=(-0.919989721277)*x[1]
arg[3,2,1,0]=(0.0994860369337)*x[0]
arg[3,2,1,1]=(-0.933647246623)*x[1]
arg[3,2,2,0]=(-0.759215183015)*x[0]
arg[3,2,2,1]=(0.0975793309286)*x[1]
arg[3,3,0,0]=(-0.130256739381)*x[0]
arg[3,3,0,1]=(-0.582280862311)*x[1]
arg[3,3,1,0]=(0.206970526192)*x[0]
arg[3,3,1,1]=(-0.8678322258)*x[1]
arg[3,3,2,0]=(0.133004501279)*x[0]
arg[3,3,2,1]=(0.802921710935)*x[1]
arg[3,4,0,0]=(-0.255737792764)*x[0]
arg[3,4,0,1]=(-0.34168114937)*x[1]
arg[3,4,1,0]=(-0.859309090399)*x[0]
arg[3,4,1,1]=(0.245043986435)*x[1]
arg[3,4,2,0]=(0.893062018695)*x[0]
arg[3,4,2,1]=(0.709422742588)*x[1]
ref=sqrt((13.7289280362))
else:
arg=Data(0,(4, 5, 3, 3),w)
arg[0,0,0,0]=(0.0312828390439)*x[0]
arg[0,0,0,1]=(-0.524970416212)*x[1]
arg[0,0,0,2]=(0.561865217554)*x[2]
arg[0,0,1,0]=(0.692457187384)*x[0]
arg[0,0,1,1]=(0.946967182157)*x[1]
arg[0,0,1,2]=(-0.863842279464)*x[2]
arg[0,0,2,0]=(0.993922921598)*x[0]
arg[0,0,2,1]=(0.322812768679)*x[1]
arg[0,0,2,2]=(0.901876132204)*x[2]
arg[0,1,0,0]=(0.967569979365)*x[0]
arg[0,1,0,1]=(0.840979131355)*x[1]
arg[0,1,0,2]=(0.0494811460856)*x[2]
arg[0,1,1,0]=(0.315178456102)*x[0]
arg[0,1,1,1]=(0.449848313024)*x[1]
arg[0,1,1,2]=(0.765887852886)*x[2]
arg[0,1,2,0]=(0.975541574352)*x[0]
arg[0,1,2,1]=(-0.797851290751)*x[1]
arg[0,1,2,2]=(0.628918775319)*x[2]
arg[0,2,0,0]=(0.685635794312)*x[0]
arg[0,2,0,1]=(0.10341799962)*x[1]
arg[0,2,0,2]=(-0.964822756043)*x[2]
arg[0,2,1,0]=(-0.56160368212)*x[0]
arg[0,2,1,1]=(0.676344298102)*x[1]
arg[0,2,1,2]=(-0.713924121843)*x[2]
arg[0,2,2,0]=(-0.276655136263)*x[0]
arg[0,2,2,1]=(0.336046973788)*x[1]
arg[0,2,2,2]=(-0.68789392396)*x[2]
arg[0,3,0,0]=(0.0172861311571)*x[0]
arg[0,3,0,1]=(-0.301075956456)*x[1]
arg[0,3,0,2]=(0.779442985415)*x[2]
arg[0,3,1,0]=(-0.517629576558)*x[0]
arg[0,3,1,1]=(0.584779586639)*x[1]
arg[0,3,1,2]=(-0.53266435436)*x[2]
arg[0,3,2,0]=(0.841533567102)*x[0]
arg[0,3,2,1]=(0.0458746415489)*x[1]
arg[0,3,2,2]=(0.921237870758)*x[2]
arg[0,4,0,0]=(0.0548343238805)*x[0]
arg[0,4,0,1]=(0.687022707412)*x[1]
arg[0,4,0,2]=(-0.319803609795)*x[2]
arg[0,4,1,0]=(0.409763007811)*x[0]
arg[0,4,1,1]=(0.165501957435)*x[1]
arg[0,4,1,2]=(0.116001692781)*x[2]
arg[0,4,2,0]=(-0.515571394238)*x[0]
arg[0,4,2,1]=(0.209467945147)*x[1]
arg[0,4,2,2]=(-0.344827191247)*x[2]
arg[1,0,0,0]=(0.57193838014)*x[0]
arg[1,0,0,1]=(-0.0880683799076)*x[1]
arg[1,0,0,2]=(0.956899617441)*x[2]
arg[1,0,1,0]=(-0.783689636357)*x[0]
arg[1,0,1,1]=(-0.25177506885)*x[1]
arg[1,0,1,2]=(-0.97074584634)*x[2]
arg[1,0,2,0]=(0.432543519806)*x[0]
arg[1,0,2,1]=(0.481003021954)*x[1]
arg[1,0,2,2]=(-0.0630751518268)*x[2]
arg[1,1,0,0]=(-0.65152446796)*x[0]
arg[1,1,0,1]=(-0.0323685084425)*x[1]
arg[1,1,0,2]=(-0.508674033909)*x[2]
arg[1,1,1,0]=(-0.533367818916)*x[0]
arg[1,1,1,1]=(0.310738340288)*x[1]
arg[1,1,1,2]=(0.694612234326)*x[2]
arg[1,1,2,0]=(-0.622052473032)*x[0]
arg[1,1,2,1]=(0.0498443793671)*x[1]
arg[1,1,2,2]=(0.61023707512)*x[2]
arg[1,2,0,0]=(0.0730267406859)*x[0]
arg[1,2,0,1]=(0.146909334607)*x[1]
arg[1,2,0,2]=(-0.641860284448)*x[2]
arg[1,2,1,0]=(0.917976589737)*x[0]
arg[1,2,1,1]=(0.50219672122)*x[1]
arg[1,2,1,2]=(0.634559579812)*x[2]
arg[1,2,2,0]=(0.0578772734534)*x[0]
arg[1,2,2,1]=(0.288730973517)*x[1]
arg[1,2,2,2]=(-0.0525978796154)*x[2]
arg[1,3,0,0]=(-0.926152433388)*x[0]
arg[1,3,0,1]=(0.0616647680855)*x[1]
arg[1,3,0,2]=(-0.875889217846)*x[2]
arg[1,3,1,0]=(-0.638931542845)*x[0]
arg[1,3,1,1]=(0.708848122964)*x[1]
arg[1,3,1,2]=(0.119066979792)*x[2]
arg[1,3,2,0]=(0.853716218591)*x[0]
arg[1,3,2,1]=(-0.92754322201)*x[1]
arg[1,3,2,2]=(-0.671530626265)*x[2]
arg[1,4,0,0]=(0.337424536231)*x[0]
arg[1,4,0,1]=(0.335704451719)*x[1]
arg[1,4,0,2]=(-0.484565969466)*x[2]
arg[1,4,1,0]=(-0.855476192012)*x[0]
arg[1,4,1,1]=(0.405674615553)*x[1]
arg[1,4,1,2]=(0.728310771323)*x[2]
arg[1,4,2,0]=(0.363651308265)*x[0]
arg[1,4,2,1]=(0.174460594531)*x[1]
arg[1,4,2,2]=(-0.0418244838617)*x[2]
arg[2,0,0,0]=(-0.531341992511)*x[0]
arg[2,0,0,1]=(0.584996796272)*x[1]
arg[2,0,0,2]=(-0.752430968716)*x[2]
arg[2,0,1,0]=(-0.341989849747)*x[0]
arg[2,0,1,1]=(0.153572646953)*x[1]
arg[2,0,1,2]=(-0.197130051737)*x[2]
arg[2,0,2,0]=(-0.338082424082)*x[0]
arg[2,0,2,1]=(0.000173657394772)*x[1]
arg[2,0,2,2]=(0.365272907692)*x[2]
arg[2,1,0,0]=(0.904304126564)*x[0]
arg[2,1,0,1]=(0.161252368484)*x[1]
arg[2,1,0,2]=(0.246854092422)*x[2]
arg[2,1,1,0]=(-0.299880647529)*x[0]
arg[2,1,1,1]=(-0.566917528608)*x[1]
arg[2,1,1,2]=(0.243183337285)*x[2]
arg[2,1,2,0]=(0.437406011474)*x[0]
arg[2,1,2,1]=(0.727447394053)*x[1]
arg[2,1,2,2]=(0.380752950664)*x[2]
arg[2,2,0,0]=(0.172292846911)*x[0]
arg[2,2,0,1]=(0.334201791643)*x[1]
arg[2,2,0,2]=(0.739989926962)*x[2]
arg[2,2,1,0]=(-0.0669843715042)*x[0]
arg[2,2,1,1]=(-0.540497281635)*x[1]
arg[2,2,1,2]=(-0.744217027088)*x[2]
arg[2,2,2,0]=(-0.287295952259)*x[0]
arg[2,2,2,1]=(-0.512411849183)*x[1]
arg[2,2,2,2]=(0.953107417666)*x[2]
arg[2,3,0,0]=(0.998168116695)*x[0]
arg[2,3,0,1]=(0.960065646359)*x[1]
arg[2,3,0,2]=(0.110048258832)*x[2]
arg[2,3,1,0]=(-0.477271134724)*x[0]
arg[2,3,1,1]=(0.707182612251)*x[1]
arg[2,3,1,2]=(0.285500891755)*x[2]
arg[2,3,2,0]=(-0.863497506661)*x[0]
arg[2,3,2,1]=(-0.293917669879)*x[1]
arg[2,3,2,2]=(-0.403384244295)*x[2]
arg[2,4,0,0]=(0.848455277702)*x[0]
arg[2,4,0,1]=(-0.530101455578)*x[1]
arg[2,4,0,2]=(0.33887313048)*x[2]
arg[2,4,1,0]=(-0.195313538124)*x[0]
arg[2,4,1,1]=(-0.62754572008)*x[1]
arg[2,4,1,2]=(-0.385132960582)*x[2]
arg[2,4,2,0]=(0.240048012886)*x[0]
arg[2,4,2,1]=(0.900766252969)*x[1]
arg[2,4,2,2]=(0.669620533505)*x[2]
arg[3,0,0,0]=(0.375766827301)*x[0]
arg[3,0,0,1]=(0.705484960308)*x[1]
arg[3,0,0,2]=(0.440931516034)*x[2]
arg[3,0,1,0]=(-0.44724403177)*x[0]
arg[3,0,1,1]=(-0.31558249626)*x[1]
arg[3,0,1,2]=(-0.00419436365172)*x[2]
arg[3,0,2,0]=(0.750599752032)*x[0]
arg[3,0,2,1]=(0.367649951795)*x[1]
arg[3,0,2,2]=(0.0488013073654)*x[2]
arg[3,1,0,0]=(-0.992890068274)*x[0]
arg[3,1,0,1]=(0.671447745511)*x[1]
arg[3,1,0,2]=(0.85613331404)*x[2]
arg[3,1,1,0]=(-0.46064764242)*x[0]
arg[3,1,1,1]=(0.48138877715)*x[1]
arg[3,1,1,2]=(0.396741761803)*x[2]
arg[3,1,2,0]=(-0.879391967543)*x[0]
arg[3,1,2,1]=(-0.44039462138)*x[1]
arg[3,1,2,2]=(0.0330511573872)*x[2]
arg[3,2,0,0]=(-0.367413701648)*x[0]
arg[3,2,0,1]=(0.0359818324891)*x[1]
arg[3,2,0,2]=(-0.307532667032)*x[2]
arg[3,2,1,0]=(0.334663597166)*x[0]
arg[3,2,1,1]=(0.541941978066)*x[1]
arg[3,2,1,2]=(-0.609184079318)*x[2]
arg[3,2,2,0]=(0.359349239826)*x[0]
arg[3,2,2,1]=(0.0419272305685)*x[1]
arg[3,2,2,2]=(0.557189794296)*x[2]
arg[3,3,0,0]=(-0.85864165554)*x[0]
arg[3,3,0,1]=(-0.185411404213)*x[1]
arg[3,3,0,2]=(0.254294865253)*x[2]
arg[3,3,1,0]=(0.870362177541)*x[0]
arg[3,3,1,1]=(-0.439688612864)*x[1]
arg[3,3,1,2]=(0.26006729357)*x[2]
arg[3,3,2,0]=(-0.0724034754175)*x[0]
arg[3,3,2,1]=(0.444871564246)*x[1]
arg[3,3,2,2]=(0.485634530531)*x[2]
arg[3,4,0,0]=(-0.744756961758)*x[0]
arg[3,4,0,1]=(0.429761406102)*x[1]
arg[3,4,0,2]=(-0.584963735834)*x[2]
arg[3,4,1,0]=(0.684578379159)*x[0]
arg[3,4,1,1]=(0.949460132601)*x[1]
arg[3,4,1,2]=(-0.592179909559)*x[2]
arg[3,4,2,0]=(0.707154437797)*x[0]
arg[3,4,2,1]=(0.619200407063)*x[1]
arg[3,4,2,2]=(-0.338547165)*x[2]
ref=sqrt((19.2170638478))
res=L2(arg)
self.assertTrue(isinstance(res,float),"wrong type of result.")
self.assertAlmostEqual(res,ref,int(-log10(self.RES_TOL)),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_L2_onReducedFunction_fromData_rank0(self):
"""
tests L2-norm of Data on the ReducedFunction
assumptions: self.domain supports integration on ReducedFunction
"""
dim=self.domain.getDim()
w=ReducedFunction(self.domain)
x=w.getX()
if dim==2:
arg=Data(0,(),w)
arg=1.*sqrt(x[0])
ref=sqrt(0.5)
else:
arg=Data(0,(),w)
arg=1.*sqrt(x[0])
ref=sqrt(0.5)
res=L2(arg)
self.assertTrue(isinstance(res,float),"wrong type of result.")
self.assertAlmostEqual(res,ref,int(-log10(self.RES_TOL)),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_L2_onReducedFunction_fromData_rank1(self):
"""
tests L2-norm of Data on the ReducedFunction
assumptions: self.domain supports integration on ReducedFunction
"""
dim=self.domain.getDim()
w=ReducedFunction(self.domain)
x=w.getX()
if dim==2:
arg=Data(0,(2,),w)
arg[0]=1.*sqrt(x[0])
arg[1]=2.*sqrt(x[1])
ref=sqrt(2.5)
else:
arg=Data(0,(3,),w)
arg[0]=1.*sqrt(x[0])
arg[1]=2.*sqrt(x[1])
arg[2]=3.*sqrt(x[2])
ref=sqrt(7.)
res=L2(arg)
self.assertTrue(isinstance(res,float),"wrong type of result.")
self.assertAlmostEqual(res,ref,int(-log10(self.RES_TOL)),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_L2_onReducedFunction_fromData_rank2(self):
"""
tests L2-norm of Data on the ReducedFunction
assumptions: self.domain supports integration on ReducedFunction
"""
dim=self.domain.getDim()
w=ReducedFunction(self.domain)
x=w.getX()
if dim==2:
arg=Data(0,(4, 2),w)
arg[0,0]=11.*sqrt(x[0])
arg[0,1]=1.*sqrt(x[1])
arg[1,0]=10.*sqrt(x[0])
arg[1,1]=11.*sqrt(x[1])
arg[2,0]=20.*sqrt(x[0])
arg[2,1]=21.*sqrt(x[1])
arg[3,0]=30.*sqrt(x[0])
arg[3,1]=31.*sqrt(x[1])
ref=sqrt(1522.5)
else:
arg=Data(0,(4, 3),w)
arg[0,0]=11.*sqrt(x[0])
arg[0,1]=1.*sqrt(x[1])
arg[0,2]=2.*sqrt(x[2])
arg[1,0]=10.*sqrt(x[0])
arg[1,1]=11.*sqrt(x[1])
arg[1,2]=12.*sqrt(x[2])
arg[2,0]=20.*sqrt(x[0])
arg[2,1]=21.*sqrt(x[1])
arg[2,2]=22.*sqrt(x[2])
arg[3,0]=30.*sqrt(x[0])
arg[3,1]=31.*sqrt(x[1])
arg[3,2]=32.*sqrt(x[2])
ref=sqrt(2350.5)
res=L2(arg)
self.assertTrue(isinstance(res,float),"wrong type of result.")
self.assertAlmostEqual(res,ref,int(-log10(self.RES_TOL)),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_L2_onReducedFunction_fromData_rank3(self):
"""
tests L2-norm of Data on the ReducedFunction
assumptions: self.domain supports integration on ReducedFunction
"""
dim=self.domain.getDim()
w=ReducedFunction(self.domain)
x=w.getX()
if dim==2:
arg=Data(0,(6, 2, 2),w)
arg[0,0,0]=(0.449174971953)*sqrt(x[0])
arg[0,0,1]=(-0.0109398763289)*sqrt(x[1])
arg[0,1,0]=(-0.202497187709)*sqrt(x[0])
arg[0,1,1]=(-0.12970879334)*sqrt(x[1])
arg[1,0,0]=(-0.138092481719)*sqrt(x[0])
arg[1,0,1]=(-0.528752200917)*sqrt(x[1])
arg[1,1,0]=(-0.605919441662)*sqrt(x[0])
arg[1,1,1]=(0.215615032334)*sqrt(x[1])
arg[2,0,0]=(-0.998734541972)*sqrt(x[0])
arg[2,0,1]=(0.725811901251)*sqrt(x[1])
arg[2,1,0]=(-0.966536503228)*sqrt(x[0])
arg[2,1,1]=(-0.528692217355)*sqrt(x[1])
arg[3,0,0]=(0.757633851466)*sqrt(x[0])
arg[3,0,1]=(-0.524660157377)*sqrt(x[1])
arg[3,1,0]=(0.983733431677)*sqrt(x[0])
arg[3,1,1]=(0.061279109546)*sqrt(x[1])
arg[4,0,0]=(0.85914215305)*sqrt(x[0])
arg[4,0,1]=(0.941714045112)*sqrt(x[1])
arg[4,1,0]=(0.172235529555)*sqrt(x[0])
arg[4,1,1]=(-0.108381454437)*sqrt(x[1])
arg[5,0,0]=(-0.736373697727)*sqrt(x[0])
arg[5,0,1]=(-0.599337929679)*sqrt(x[1])
arg[5,1,0]=(0.661072686392)*sqrt(x[0])
arg[5,1,1]=(-0.55107327409)*sqrt(x[1])
ref=sqrt(4.4196214907099591)
else:
arg=Data(0,(6, 2, 3),w)
arg[0,0,0]=(0.69227064904)*sqrt(x[0])
arg[0,0,1]=(-0.968336177418)*sqrt(x[1])
arg[0,0,2]=(-0.634883146685)*sqrt(x[2])
arg[0,1,0]=(-0.12640661422)*sqrt(x[0])
arg[0,1,1]=(-0.637386589888)*sqrt(x[1])
arg[0,1,2]=(0.26060859356)*sqrt(x[2])
arg[1,0,0]=(-0.986864633297)*sqrt(x[0])
arg[1,0,1]=(-0.441589142379)*sqrt(x[1])
arg[1,0,2]=(-0.587865539582)*sqrt(x[2])
arg[1,1,0]=(0.596052465031)*sqrt(x[0])
arg[1,1,1]=(0.312732336652)*sqrt(x[1])
arg[1,1,2]=(-0.514423945092)*sqrt(x[2])
arg[2,0,0]=(-0.892391254794)*sqrt(x[0])
arg[2,0,1]=(0.377920185756)*sqrt(x[1])
arg[2,0,2]=(-0.120174597181)*sqrt(x[2])
arg[2,1,0]=(-0.469951576468)*sqrt(x[0])
arg[2,1,1]=(-0.788362249555)*sqrt(x[1])
arg[2,1,2]=(0.745625354986)*sqrt(x[2])
arg[3,0,0]=(0.542802498569)*sqrt(x[0])
arg[3,0,1]=(-0.814541028706)*sqrt(x[1])
arg[3,0,2]=(0.298410992196)*sqrt(x[2])
arg[3,1,0]=(0.981190341206)*sqrt(x[0])
arg[3,1,1]=(0.666421298608)*sqrt(x[1])
arg[3,1,2]=(-0.369751722626)*sqrt(x[2])
arg[4,0,0]=(-0.75379530597)*sqrt(x[0])
arg[4,0,1]=(0.283357267139)*sqrt(x[1])
arg[4,0,2]=(0.247787072861)*sqrt(x[2])
arg[4,1,0]=(0.301766692533)*sqrt(x[0])
arg[4,1,1]=(0.828183439224)*sqrt(x[1])
arg[4,1,2]=(-0.580824060547)*sqrt(x[2])
arg[5,0,0]=(0.637345610764)*sqrt(x[0])
arg[5,0,1]=(-0.234409115997)*sqrt(x[1])
arg[5,0,2]=(-0.192639300316)*sqrt(x[2])
arg[5,1,0]=(-0.62609237162)*sqrt(x[0])
arg[5,1,1]=(0.463404958552)*sqrt(x[1])
arg[5,1,2]=(-0.547814448738)*sqrt(x[2])
ref=sqrt(6.3571697792950923)
res=L2(arg)
self.assertTrue(isinstance(res,float),"wrong type of result.")
self.assertAlmostEqual(res,ref,int(-log10(self.RES_TOL)),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_L2_onReducedFunction_fromData_rank4(self):
"""
tests L2-norm of Data on the ReducedFunction
assumptions: self.domain supports integration on ReducedFunction
"""
dim=self.domain.getDim()
w=ReducedFunction(self.domain)
x=w.getX()
if dim==2:
arg=Data(0,(4, 5, 3, 2),w)
arg[0,0,0,0]=(-0.232618585183)*sqrt(x[0])
arg[0,0,0,1]=(0.39796117869)*sqrt(x[1])
arg[0,0,1,0]=(-0.997336958262)*sqrt(x[0])
arg[0,0,1,1]=(-0.351780915076)*sqrt(x[1])
arg[0,0,2,0]=(-0.876764070136)*sqrt(x[0])
arg[0,0,2,1]=(0.808730805817)*sqrt(x[1])
arg[0,1,0,0]=(-0.197154744966)*sqrt(x[0])
arg[0,1,0,1]=(0.416246096086)*sqrt(x[1])
arg[0,1,1,0]=(0.708038457121)*sqrt(x[0])
arg[0,1,1,1]=(-0.00954021503188)*sqrt(x[1])
arg[0,1,2,0]=(-0.62630809425)*sqrt(x[0])
arg[0,1,2,1]=(0.430228727912)*sqrt(x[1])
arg[0,2,0,0]=(0.0306704747648)*sqrt(x[0])
arg[0,2,0,1]=(-0.913877199453)*sqrt(x[1])
arg[0,2,1,0]=(-0.697612800829)*sqrt(x[0])
arg[0,2,1,1]=(-0.17996376822)*sqrt(x[1])
arg[0,2,2,0]=(-0.304509578871)*sqrt(x[0])
arg[0,2,2,1]=(-0.610556755811)*sqrt(x[1])
arg[0,3,0,0]=(-0.452355972234)*sqrt(x[0])
arg[0,3,0,1]=(-0.368921242518)*sqrt(x[1])
arg[0,3,1,0]=(-0.478275554932)*sqrt(x[0])
arg[0,3,1,1]=(0.257178549127)*sqrt(x[1])
arg[0,3,2,0]=(0.530736487177)*sqrt(x[0])
arg[0,3,2,1]=(-0.567126272463)*sqrt(x[1])
arg[0,4,0,0]=(0.801519165938)*sqrt(x[0])
arg[0,4,0,1]=(-0.509816703951)*sqrt(x[1])
arg[0,4,1,0]=(-0.255412646934)*sqrt(x[0])
arg[0,4,1,1]=(0.437540101896)*sqrt(x[1])
arg[0,4,2,0]=(-0.815574969538)*sqrt(x[0])
arg[0,4,2,1]=(-0.94691547137)*sqrt(x[1])
arg[1,0,0,0]=(-0.732550722593)*sqrt(x[0])
arg[1,0,0,1]=(0.515752381704)*sqrt(x[1])
arg[1,0,1,0]=(-0.343590210899)*sqrt(x[0])
arg[1,0,1,1]=(-0.0601907964915)*sqrt(x[1])
arg[1,0,2,0]=(0.0199916154421)*sqrt(x[0])
arg[1,0,2,1]=(-0.136927227821)*sqrt(x[1])
arg[1,1,0,0]=(0.397994441702)*sqrt(x[0])
arg[1,1,0,1]=(0.953873148948)*sqrt(x[1])
arg[1,1,1,0]=(0.419416235967)*sqrt(x[0])
arg[1,1,1,1]=(0.700998577193)*sqrt(x[1])
arg[1,1,2,0]=(-0.497358799271)*sqrt(x[0])
arg[1,1,2,1]=(0.0851768858379)*sqrt(x[1])
arg[1,2,0,0]=(0.0936678875202)*sqrt(x[0])
arg[1,2,0,1]=(0.869883786896)*sqrt(x[1])
arg[1,2,1,0]=(0.582700123485)*sqrt(x[0])
arg[1,2,1,1]=(-0.433381106794)*sqrt(x[1])
arg[1,2,2,0]=(-0.527031777974)*sqrt(x[0])
arg[1,2,2,1]=(0.105105137652)*sqrt(x[1])
arg[1,3,0,0]=(-0.716750829134)*sqrt(x[0])
arg[1,3,0,1]=(0.774519209008)*sqrt(x[1])
arg[1,3,1,0]=(-0.568743372716)*sqrt(x[0])
arg[1,3,1,1]=(0.794732483944)*sqrt(x[1])
arg[1,3,2,0]=(0.246606002015)*sqrt(x[0])
arg[1,3,2,1]=(-0.988869494994)*sqrt(x[1])
arg[1,4,0,0]=(0.482379298083)*sqrt(x[0])
arg[1,4,0,1]=(-0.386268387903)*sqrt(x[1])
arg[1,4,1,0]=(0.137184889675)*sqrt(x[0])
arg[1,4,1,1]=(-0.140520035321)*sqrt(x[1])
arg[1,4,2,0]=(0.822755050415)*sqrt(x[0])
arg[1,4,2,1]=(-0.815562139522)*sqrt(x[1])
arg[2,0,0,0]=(-0.462891511962)*sqrt(x[0])
arg[2,0,0,1]=(-0.122643411631)*sqrt(x[1])
arg[2,0,1,0]=(-0.520861119962)*sqrt(x[0])
arg[2,0,1,1]=(-0.881189618018)*sqrt(x[1])
arg[2,0,2,0]=(-0.776157842774)*sqrt(x[0])
arg[2,0,2,1]=(-0.12354053207)*sqrt(x[1])
arg[2,1,0,0]=(0.395495230826)*sqrt(x[0])
arg[2,1,0,1]=(-0.388106659423)*sqrt(x[1])
arg[2,1,1,0]=(0.354250242834)*sqrt(x[0])
arg[2,1,1,1]=(-0.666514210192)*sqrt(x[1])
arg[2,1,2,0]=(0.951294655083)*sqrt(x[0])
arg[2,1,2,1]=(0.074024416386)*sqrt(x[1])
arg[2,2,0,0]=(0.335448485459)*sqrt(x[0])
arg[2,2,0,1]=(-0.40988282528)*sqrt(x[1])
arg[2,2,1,0]=(-0.805725968875)*sqrt(x[0])
arg[2,2,1,1]=(-0.949883082118)*sqrt(x[1])
arg[2,2,2,0]=(0.531549210683)*sqrt(x[0])
arg[2,2,2,1]=(-0.398401016682)*sqrt(x[1])
arg[2,3,0,0]=(-0.953963433205)*sqrt(x[0])
arg[2,3,0,1]=(0.643431126406)*sqrt(x[1])
arg[2,3,1,0]=(-0.167611998738)*sqrt(x[0])
arg[2,3,1,1]=(0.226130056552)*sqrt(x[1])
arg[2,3,2,0]=(0.0752687641131)*sqrt(x[0])
arg[2,3,2,1]=(-0.115742756362)*sqrt(x[1])
arg[2,4,0,0]=(0.579694491028)*sqrt(x[0])
arg[2,4,0,1]=(-0.112005738299)*sqrt(x[1])
arg[2,4,1,0]=(0.657291764224)*sqrt(x[0])
arg[2,4,1,1]=(0.62671154177)*sqrt(x[1])
arg[2,4,2,0]=(0.103695027944)*sqrt(x[0])
arg[2,4,2,1]=(0.462828491544)*sqrt(x[1])
arg[3,0,0,0]=(0.697692979998)*sqrt(x[0])
arg[3,0,0,1]=(-0.123481859619)*sqrt(x[1])
arg[3,0,1,0]=(-0.749745629459)*sqrt(x[0])
arg[3,0,1,1]=(-0.541969524069)*sqrt(x[1])
arg[3,0,2,0]=(0.819484470759)*sqrt(x[0])
arg[3,0,2,1]=(-0.860592326469)*sqrt(x[1])
arg[3,1,0,0]=(-0.716566084771)*sqrt(x[0])
arg[3,1,0,1]=(-0.949235434827)*sqrt(x[1])
arg[3,1,1,0]=(-0.826699498174)*sqrt(x[0])
arg[3,1,1,1]=(-0.138511521583)*sqrt(x[1])
arg[3,1,2,0]=(-0.951682890904)*sqrt(x[0])
arg[3,1,2,1]=(0.413293316925)*sqrt(x[1])
arg[3,2,0,0]=(0.909516836775)*sqrt(x[0])
arg[3,2,0,1]=(-0.919989721277)*sqrt(x[1])
arg[3,2,1,0]=(0.0994860369337)*sqrt(x[0])
arg[3,2,1,1]=(-0.933647246623)*sqrt(x[1])
arg[3,2,2,0]=(-0.759215183015)*sqrt(x[0])
arg[3,2,2,1]=(0.0975793309286)*sqrt(x[1])
arg[3,3,0,0]=(-0.130256739381)*sqrt(x[0])
| |
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# pylint: disable=missing-docstring,no-self-use,no-init,invalid-name
"""Unit tests for bundling."""
from __future__ import absolute_import
import mock
import unittest2
from google.gax import BundleDescriptor, BundleOptions, bundling
from tests.fixtures.fixture_pb2 import Bundled, Outer, Simple
def _Simple(value, other_value=None):
if other_value is None:
return Simple(field1=value)
else:
return Simple(field1=value, field2=other_value)
def _Outer(value):
return Outer(inner=_Simple(value, value), field1=value)
def _Bundled(value):
return Bundled(field1=value)
class TestComputeBundleId(unittest2.TestCase):
def test_computes_bundle_ids_ok(self):
tests = [
{
'message': 'single field value',
'object': _Simple('dummy_value'),
'fields': ['field1'],
'want': ('dummy_value',)
}, {
'message': 'composite value with None',
'object': _Simple('dummy_value'),
'fields': ['field1', 'field2'],
'want': ('dummy_value', None)
}, {
'message': 'a composite value',
'object': _Simple('dummy_value', 'other_value'),
'fields': ['field1', 'field2'],
'want': ('dummy_value', 'other_value')
}, {
'message': 'a simple dotted value',
'object': _Outer('this is dotty'),
'fields': ['inner.field1'],
'want': ('this is dotty',)
}, {
'message': 'a complex case',
'object': _Outer('what!?'),
'fields': ['inner.field1', 'inner.field2', 'field1'],
'want': ('what!?', 'what!?', 'what!?')
}
]
for t in tests:
got = bundling.compute_bundle_id(t['object'], t['fields'])
message = 'failed while making an id for {}'.format(t['message'])
self.assertEqual(got, t['want'], message)
def test_should_raise_if_fields_are_missing(self):
tests = [
{
'object': _Simple('dummy_value'),
'fields': ['field3'],
}, {
'object': _Simple('dummy_value'),
'fields': ['field1', 'field3'],
}, {
'object': _Simple('dummy_value', 'other_value'),
'fields': ['field1', 'field3'],
}, {
'object': _Outer('this is dotty'),
'fields': ['inner.field3'],
}, {
'object': _Outer('what!?'),
'fields': ['inner.field4'],
}
]
for t in tests:
self.assertRaises(AttributeError,
bundling.compute_bundle_id,
t['object'],
t['fields'])
def _return_request(req):
"""A dummy api call that simply returns the request."""
return req
def _return_kwargs(dummy_req, **kwargs):
"""A dummy api call that simply returns its keyword arguments."""
return kwargs
def _make_a_test_task(api_call=_return_request):
return bundling.Task(
api_call,
'an_id',
'field1',
_Bundled([]),
dict())
def _extend_with_n_elts(a_task, elt, n):
return a_task.extend([elt] * n)
def _raise_exc(dummy_req):
"""A dummy api call that raises an exception"""
raise ValueError('Raised in a test')
class TestTask(unittest2.TestCase):
def test_extend_increases_the_element_count(self):
simple_msg = 'a simple msg'
tests = [
{
'update': (lambda t: None),
'message': 'no messages added',
'want': 0
}, {
'update': (lambda t: t.extend([simple_msg])),
'message': 'a single message added',
'want': 1
}, {
'update': (lambda t: _extend_with_n_elts(t, simple_msg, 5)),
'message': 'a 5 messages added',
'want': 5
}
]
for t in tests:
test_task = _make_a_test_task()
t['update'](test_task)
got = test_task.element_count
message = 'bad message count when {}'.format(t['message'])
self.assertEqual(got, t['want'], message)
def test_extend_increases_the_request_byte_count(self):
simple_msg = 'a simple msg'
tests = [
{
'update': (lambda t: None),
'message': 'no messages added',
'want': 0
}, {
'update': (lambda t: t.extend([simple_msg])),
'message': 'a single bundle message',
'want': len(simple_msg)
}, {
'update': (lambda t: _extend_with_n_elts(t, simple_msg, 5)),
'message': '5 bundled messages',
'want': 5 * len(simple_msg)
}
]
for t in tests:
test_task = _make_a_test_task()
t['update'](test_task)
got = test_task.request_bytesize
message = 'bad message count when {}'.format(t['message'])
self.assertEqual(got, t['want'], message)
def test_run_sends_the_bundle_elements(self):
simple_msg = 'a simple msg'
tests = [
{
'update': (lambda t: None),
'message': 'no messages added',
'has_event': False,
'count_before_run': 0,
'want': []
}, {
'update': (lambda t: t.extend([simple_msg])),
'message': 'a single bundled message',
'has_event': True,
'count_before_run': 1,
'want': _Bundled([simple_msg])
}, {
'update': (lambda t: _extend_with_n_elts(t, simple_msg, 5)),
'message': '5 bundle messages',
'has_event': True,
'count_before_run': 5,
'want': _Bundled([simple_msg] * 5)
}
]
for t in tests:
test_task = _make_a_test_task()
event = t['update'](test_task)
self.assertEqual(test_task.element_count, t['count_before_run'])
test_task.run()
self.assertEqual(test_task.element_count, 0)
self.assertEqual(test_task.request_bytesize, 0)
if t['has_event']:
self.assertIsNotNone(
event,
'expected event for {}'.format(t['message']))
got = event.result
message = 'bad output when run with {}'.format(t['message'])
self.assertEqual(got, t['want'], message)
def test_run_adds_an_error_if_execution_fails(self):
simple_msg = 'a simple msg'
test_task = _make_a_test_task(api_call=_raise_exc)
event = test_task.extend([simple_msg])
self.assertEqual(test_task.element_count, 1)
test_task.run()
self.assertEqual(test_task.element_count, 0)
self.assertEqual(test_task.request_bytesize, 0)
self.assertTrue(isinstance(event.result, ValueError))
def test_calling_the_canceller_stops_the_element_from_getting_sent(self):
an_elt = 'a simple msg'
another_msg = 'another msg'
test_task = _make_a_test_task()
an_event = test_task.extend([an_elt])
another_event = test_task.extend([another_msg])
self.assertEqual(test_task.element_count, 2)
self.assertTrue(an_event.cancel())
self.assertEqual(test_task.element_count, 1)
self.assertFalse(an_event.cancel())
self.assertEqual(test_task.element_count, 1)
test_task.run()
self.assertEqual(test_task.element_count, 0)
self.assertEqual(_Bundled([another_msg]), another_event.result)
self.assertFalse(an_event.is_set())
self.assertIsNone(an_event.result)
SIMPLE_DESCRIPTOR = BundleDescriptor('field1', [])
DEMUX_DESCRIPTOR = BundleDescriptor('field1', [], subresponse_field='field1')
class TestExecutor(unittest2.TestCase):
def test_api_calls_are_grouped_by_bundle_id(self):
an_elt = 'dummy message'
api_call = _return_request
bundle_ids = ['id1', 'id2']
threshold = 5 # arbitrary
options = BundleOptions(element_count_threshold=threshold)
bundler = bundling.Executor(options)
for an_id in bundle_ids:
for i in range(threshold - 1):
got_event = bundler.schedule(
api_call,
an_id,
SIMPLE_DESCRIPTOR,
_Bundled([an_elt])
)
self.assertIsNotNone(
got_event.canceller,
'missing canceller after element #{}'.format(i))
self.assertFalse(
got_event.is_set(),
'event unexpectedly set after element #{}'.format(i))
self.assertIsNone(got_event.result)
for an_id in bundle_ids:
got_event = bundler.schedule(
api_call,
an_id,
SIMPLE_DESCRIPTOR,
_Bundled([an_elt])
)
self.assertIsNotNone(got_event.canceller,
'missing expected canceller')
self.assertTrue(
got_event.is_set(),
'event is not set after triggering element')
self.assertEqual(_Bundled([an_elt] * threshold),
got_event.result)
def test_each_event_has_exception_when_demuxed_api_call_fails(self):
an_elt = 'dummy message'
api_call = _raise_exc
bundle_id = 'an_id'
threshold = 5 # arbitrary, greater than 1
options = BundleOptions(element_count_threshold=threshold)
bundler = bundling.Executor(options)
events = []
for i in range(threshold - 1):
got_event = bundler.schedule(
api_call,
bundle_id,
DEMUX_DESCRIPTOR,
_Bundled(['%s%d' % (an_elt, i)])
)
self.assertFalse(
got_event.is_set(),
'event unexpectedly set after element #{}'.format(i))
self.assertIsNone(got_event.result)
events.append(got_event)
last_event = bundler.schedule(
api_call,
bundle_id,
DEMUX_DESCRIPTOR,
_Bundled(['%s%d' % (an_elt, threshold - 1)])
)
events.append(last_event)
previous_event = None
for event in events:
if previous_event:
self.assertTrue(previous_event != event)
self.assertTrue(event.is_set(),
'event is not set after triggering element')
self.assertTrue(isinstance(event.result, ValueError))
previous_event = event
def test_each_event_has_its_result_from_a_demuxed_api_call(self):
an_elt = 'dummy message'
api_call = _return_request
bundle_id = 'an_id'
threshold = 5 # arbitrary, greater than 1
options = BundleOptions(element_count_threshold=threshold)
bundler = bundling.Executor(options)
events = []
# send 3 groups of elements of different sizes in the bundle
for i in range(1, 4):
got_event = bundler.schedule(
api_call,
bundle_id,
DEMUX_DESCRIPTOR,
_Bundled(['%s%d' % (an_elt, i)] * i)
)
events.append(got_event)
previous_event = None
for i, event in enumerate(events):
index = i + 1
if previous_event:
self.assertTrue(previous_event != event)
self.assertTrue(event.is_set(),
'event is not set after triggering element')
self.assertEqual(event.result,
_Bundled(['%s%d' % (an_elt, index)] * index))
previous_event = event
def test_each_event_has_same_result_from_mismatched_demuxed_api_call(self):
an_elt = 'dummy message'
mismatched_result = _Bundled([an_elt, an_elt])
bundle_id = 'an_id'
threshold = 5 # arbitrary, greater than 1
options = BundleOptions(element_count_threshold=threshold)
bundler = bundling.Executor(options)
events = []
# send 3 groups of elements of different sizes in the bundle
for i in range(1, 4):
got_event = bundler.schedule(
lambda x: mismatched_result,
bundle_id,
DEMUX_DESCRIPTOR,
_Bundled(['%s%d' % (an_elt, i)] * i)
)
events.append(got_event)
previous_event = None
for i, event in enumerate(events):
if previous_event:
self.assertTrue(previous_event != event)
self.assertTrue(event.is_set(),
'event is not set after triggering element')
self.assertEqual(event.result, mismatched_result)
previous_event = event
def test_schedule_passes_kwargs(self):
an_elt = 'dummy_msg'
options = BundleOptions(element_count_threshold=1)
bundle_id = 'an_id'
bundler = bundling.Executor(options)
event = bundler.schedule(
_return_kwargs,
bundle_id,
SIMPLE_DESCRIPTOR,
_Bundled([an_elt]),
{'an_option': 'a_value'}
)
self.assertEqual('a_value',
event.result['an_option'])
class TestExecutor_ElementCountTrigger(unittest2.TestCase):
def test_api_call_not_invoked_until_threshold(self):
an_elt = 'dummy message'
an_id = 'bundle_id'
api_call = _return_request
threshold = 3 # arbitrary
options = BundleOptions(element_count_threshold=threshold)
bundler = bundling.Executor(options)
for i in range(threshold):
got_event = bundler.schedule(
api_call,
an_id,
SIMPLE_DESCRIPTOR,
_Bundled([an_elt])
)
self.assertIsNotNone(
got_event.canceller,
'missing canceller after element #{}'.format(i))
if i + 1 < threshold:
self.assertFalse(got_event.is_set())
self.assertIsNone(got_event.result)
else:
self.assertTrue(got_event.is_set())
self.assertEqual(_Bundled([an_elt] * threshold),
got_event.result)
class TestExecutor_RequestByteTrigger(unittest2.TestCase):
def test_api_call_not_invoked_until_threshold(self):
an_elt = 'dummy message'
an_id = 'bundle_id'
api_call = _return_request
elts_for_threshold = 3
threshold = elts_for_threshold * len(an_elt) # arbitrary
options = BundleOptions(request_byte_threshold=threshold)
bundler = bundling.Executor(options)
for i in range(elts_for_threshold):
got_event = bundler.schedule(
api_call,
an_id,
SIMPLE_DESCRIPTOR,
_Bundled([an_elt])
)
self.assertIsNotNone(
got_event.canceller,
'missing canceller after element | |
##############################################################################
#
# Copyright (c) 2003-2018 by The University of Queensland
# http://www.uq.edu.au
#
# Primary Business: Queensland, Australia
# Licensed under the Apache License, version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
# Development 2012-2013 by School of Earth Sciences
# Development from 2014 by Centre for Geoscience Computing (GeoComp)
#
##############################################################################
from __future__ import print_function, division
__copyright__="""Copyright (c) 2003-2018 by The University of Queensland
http://www.uq.edu.au
Primary Business: Queensland, Australia"""
__license__="""Licensed under the Apache License, version 2.0
http://www.apache.org/licenses/LICENSE-2.0"""
__url__="https://launchpad.net/escript-finley"
"""
Test suite for the util.py module.
The tests must be linked with a function space class object in the setUp method:
to run the use:
from esys.bruce import Brick
class Test_utilOnBruce(Test_util_no_tagged_data):
def setUp(self):
self.domain = Brick(10,10,13)
self.functionspace = ContinuousFunction(self.domain)
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Test_utilOnBruce))
unittest.TextTestRunner(verbosity=2).run(suite)
This test assumes that samples with x_0 coordinate 0 are tagged with 1 and all samples tagged with 1 have x_0
coordinate 0.
:note: at this stage this test will not pass as it tests for functionlity that has not been implemented yet. It also
does not test the full functionalitu of util.py yet.
:var __author__: name of author
:var __copyright__: copyrights
:var __license__: licence agreement
:var __url__: url entry point on documentation
:var __version__: version
:var __date__: date of the version
"""
__author__="<NAME>, <EMAIL>"
import esys.escriptcore.utestselect as unittest
import numpy
from esys.escript import *
from test_util_base import Test_util_base, Test_util_values
from test_util_reduction_new import Test_util_reduction_new
from test_util_unary_new import Test_util_unary_new
from test_util_binary_new import Test_util_binary_new
from test_util_binary_leftover import Test_util_binary_leftover
## these aspects are test in the _new tests
#from test_util_overloaded_binary_no_tagged_data import Test_util_overloaded_binary_no_tagged_data
#from test_util_overloaded_binary_with_tagged_data import Test_util_overloaded_binary_with_tagged_data
#from test_util_unary_no_tagged_data import Test_util_unary_no_tagged_data
#from test_util_unary_with_tagged_data import Test_util_unary_with_tagged_data
#from test_util_binary_no_tagged_data import Test_util_binary_no_tagged_data
#from test_util_binary_with_tagged_data import Test_util_binary_with_tagged_data
from test_util_spatial_functions1 import Test_Util_SpatialFunctions_noGradOnBoundary_noContact
from test_util_spatial_functions2 import Test_Util_SpatialFunctions_noGradOnBoundary
from test_util_spatial_functions3 import Test_Util_SpatialFunctions
from test_util_slicing_no_tagged_data import Test_util_slicing_no_tagged_data
from test_util_slicing_with_tagged_data import Test_util_slicing_with_tagged_data
class Test_util_reduction(Test_util_reduction_new):
""" test for reduction operation Lsup,sup,inf for all data types"""
pass
class Test_util_unary(Test_util_unary_new):
""" all unary tests """
pass
class Test_util_binary(Test_util_binary_new, Test_util_binary_leftover):
"""
test for all binary operation
"""
pass
## Testing of these ops is now in Test_util_binary
#class Test_util_overloaded_binary(Test_util_overloaded_binary_no_tagged_data,Test_util_overloaded_binary_with_tagged_data):
#"""test for all overloaded operation"""
#pass
class Test_util(Test_util_unary_new,Test_util_reduction_new, Test_util_binary):
"""all tests"""
pass
class Test_util_overloaded_binary_still_failing(Test_util_base):
"""
these overloaded operations still fail!
- wrong return value of Data binaries (Mantis 0000054)
"""
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank0_Symbol_rank1(self):
arg0=Data(-4.93686078973,self.functionspace)
arg1=Symbol(shape=(2,))
res=arg0+arg1
s1=numpy.array([0.51662736235119944, 2.8171396846123073])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-4.4202334273802917, -2.1197211051191838]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank0_Symbol_rank2(self):
arg0=Data(-2.22764991169,self.functionspace)
arg1=Symbol(shape=(4, 5))
res=arg0+arg1
s1=numpy.array([[2.0746979587719538, 0.99992890307042437, -2.3128078094931848, -4.0103712739722654,
4.8853529531011013],
[0.09856857946648212, 0.73520899085847624, -3.6585265509750844, 3.0095320582437939, 3.4125902906059444],
[1.4894150898632059,
-1.4124339049368793, 1.5397397961722188, 4.8841402613336111, 1.1241155288598881], [2.8283598865494408,
1.5980765295723476,
-1.0022373011497274, -2.0622178471715067, 4.9699555072046042]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[-0.15295195292152819, -1.2277210086230577, -4.5404577211866668, -6.2380211856657475,
2.6577030414076193],
[-2.1290813322269999, -1.4924409208350058, -5.8861764626685664, 0.78188214655031185, 1.1849403789124624],
[-0.73823482183027611,
-3.6400838166303613, -0.68791011552126324, 2.6564903496401291, -1.103534382833594], [0.60070997485595878,
-0.62957338212113445,
-3.2298872128432095, -4.2898677588649887, 2.7423055955111222]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank0_Symbol_rank3(self):
arg0=Data(-4.67318656609,self.functionspace)
arg1=Symbol(shape=(6, 2, 2))
res=arg0+arg1
s1=numpy.array([[[3.9409337165894076, 1.6101568824796857], [1.2441782896909706, 1.2872758759353298]],
[[4.022494973005406,
-2.758155583474049], [1.8311643900357311, 4.0940647266277157]], [[2.5378127449303243, 0.063283784588161751],
[4.5495644157820809,
2.8673770080506742]], [[-0.93484143473477577, 4.914438575705228], [-1.951066895455166, -1.2021165219313259]],
[[-0.4220608661301819, -4.9682501775464418], [0.98338081352961559, 3.4054674805751066]], [[3.9967556325744127,
-4.7659141789100659],
[0.34265275409881024, -0.25226631819007572]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-0.73225284950136693, -3.0630296836110888], [-3.429008276399804, -3.3859106901554448]],
[[-0.6506915930853685, -7.4313421495648235], [-2.8420221760550435, -0.57912183946305884]],
[[-2.1353738211604503,
-4.6099027815026128], [-0.12362215030869361, -1.8058095580401003]], [[-5.6080280008255503,
0.24125200961445348],
[-6.6242534615459405, -5.8753030880221004]], [[-5.0952474322209564, -9.6414367436372164],
[-3.6898057525611589,
-1.2677190855156679]], [[-0.67643093351636185, -9.4391007450008395], [-4.3305338119919643,
-4.9254528842808503]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank0_Symbol_rank4(self):
arg0=Data(4.16645075056,self.functionspace)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0+arg1
s1=numpy.array([[[[1.5917180025121436, -0.50082927718401749, 0.71261274386013618, 2.4216324938382936],
[2.5988764746053095,
0.15985324844397741, -2.1952754277135025, -2.1102730593254035], [4.7816092243808672, -3.1240954141765496,
4.0831220997721331, 2.4301203557965216]], [[3.4691826046114969, -2.4961081730013177, -4.9623977358253111,
2.2652744558918698],
[0.41830032681767193, -3.2186897293959649, -4.1590967541108324, -1.7789994379155196], [-0.17901184206486764,
-0.85223673399918809, 1.2515459884606104, -4.530305999148645]]], [[[-4.9028671865135838, 3.9106181278983012,
0.69716765577825246, 4.8537569187159395], [-2.8912890367657318, -4.8177854256421764, -4.3303142092509415,
-0.99481907472179198], [-1.2640734452454305, 4.8028129765204639, -2.5491771511234962, 3.2550469051981921]],
[[2.0572417475748761, 3.7392706991121187, 4.5778678295843704, 3.6658188498258486], [-2.7069743698567206,
-2.684769111460461, -3.0941141983763156, -2.1180719361316589], [-1.4744678905986119, 1.926687036555828,
2.2206999030392947, 0.72956973127168734]]], [[[-2.8290294475300151, -3.1467788245496631, 3.6471044178360348,
3.5237454065241209], [-1.6165850845596652, 1.2437746199742081, -2.8022357261752004, -1.9652183524467781],
[-2.3842126490032092, 3.7068998814751613, -1.389546865398994, -1.7153758702474589]], [[-1.0746517242894815,
-4.3575382718398723, 0.93160793707280121, 1.4002531109392731], [-1.5745690740270168, -3.4394046042905124,
4.2641517580348793, -1.7620679696550843], [-4.2559205627171135, 2.1912319337278863, 1.1987265764805723,
-3.2957352772592809]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[5.7581687530761378, 3.6656214733799768, 4.8790634944241305, 6.5880832444022879],
[6.7653272251693037, 4.3263039990079717, 1.9711753228504918, 2.0561776912385907], [8.9480599749448615,
1.0423553363874447, 8.2495728503361274, 6.5965711063605159]], [[7.6356333551754911, 1.6703425775626766,
-0.7959469852613168, 6.4317252064558641], [4.5847510773816662, 0.94776102116802941, 0.0073539964531619262,
2.3874513126484747], [3.9874389084991266, 3.3142140165648062, 5.4179967390246047, -0.36385524858465068]]],
[[[-0.7364164359495895, 8.0770688784622955, 4.8636184063422467, 9.0202076692799338], [1.2751617137982625,
-0.6513346750781821, -0.16386345868694718, 3.1716316758422023], [2.9023773053185637, 8.9692637270844582,
1.6172735994404981, 7.4214976557621863]], [[6.2236924981388704, 7.905721449676113, 8.7443185801483647,
7.8322696003898429], [1.4594763807072737, 1.4816816391035332, 1.0723365521876786, 2.0483788144323354],
[2.6919828599653823, 6.0931377871198222, 6.3871506536032889, 4.8960204818356816]]], [[[1.3374213030339792,
1.0196719260143312, 7.8135551684000291, 7.6901961570881152], [2.5498656660043291, 5.4102253705382024,
1.3642150243887938, 2.2012323981172162], [1.7822381015607851, 7.8733506320391555, 2.7769038851650003,
2.4510748803165354]], [[3.0917990262745128, -0.19108752127587803, 5.0980586876367955, 5.5667038615032673],
[2.5918816765369774, 0.72704614627348185, 8.4306025085988736, 2.40438278090891], [-0.089469812153119221,
6.3576826842918805, 5.3651773270445666, 0.87071547330471333]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank1_Symbol_rank0(self):
arg0=Data(numpy.array([3.8454947431609945, 3.4801848055393254]),self.functionspace)
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(0.181985677208)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([4.0274804203691783, 3.6621704827475092]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank1_Symbol_rank1(self):
arg0=Data(numpy.array([2.6719646801005306, 4.0262173014652003]),self.functionspace)
arg1=Symbol(shape=(2,))
res=arg0+arg1
s1=numpy.array([3.7355891147806837, -3.0309968912239551])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([6.4075537948812142, 0.99522041024124519]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank2_Symbol_rank0(self):
arg0=Data(numpy.array([[2.209887477038702, 2.087043312051243, 3.7254247294014622,
-3.7510652436671732, 0.70343608099575317], [4.1654611738215745, 1.5418518980850271,
2.7730022594684423, 3.386030420596251, 1.2758288509710365], [2.2174938185138764,
-1.244837837360393, 2.2331288285078887, -1.1442348969501834, 1.9394801392868004],
[0.68612447219195705, 0.7127527031233436, -3.6346644102130776, 2.0671128943191714,
3.7445028703597156]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(4.82316401579)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[7.0330514928326018, 6.9102073278451428, 8.5485887451953619,
1.0720987721267266, 5.5266000967896529], [8.9886251896154743, 6.3650159138789268,
7.596166275262342, 8.2091944363901508, 6.0989928667649362], [7.0406578343077761,
3.5783261784335068, 7.0562928443017885, 3.6789291188437163, 6.7626441550807002],
[5.5092884879858568, 5.5359167189172434, 1.1884996055808221, 6.8902769101130712,
8.5676668861536154]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank2_Symbol_rank2(self):
arg0=Data(numpy.array([[-3.62961836797558, 4.0323249470469893, -2.4833229912823516,
-0.0081902035785272886, -0.26448613257378906], [2.0867535529248489, 0.049446344294963751,
4.4906317789174501, 2.6121865600043499, 1.3687146632565392], [4.2509170325103511,
2.9845191554148567, -0.9329820582137387, -0.58236994049271118, -3.4448732067194388],
[-2.3231599587033402, 1.6550934434842866, -4.5990521452319584, -2.1470268566500152,
-3.9698084155531008]]),self.functionspace)
arg1=Symbol(shape=(4, 5))
res=arg0+arg1
s1=numpy.array([[3.3234017918244003, 3.3386199217996175, -2.5928786077225316,
-4.1429140632213803, 0.42204291369978719], [3.4123580113357495, -3.9076190537235664,
1.8779298531672159, 0.98377543853039562, -4.9365820051249267], [4.5252395032935961,
-4.8193051910732096, 1.060979071451845, -3.2927325266544871, -3.3828356655691971],
[-4.6411804903406182, -0.42921544747540707, -2.4541073523344323, -0.70845691989162329,
-1.2357505826155588]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[-0.3062165761511797, 7.3709448688466068, -5.0762015990048832,
-4.1511042667999076, 0.15755678112599814], [5.4991115642605983, -3.8581727094286027,
6.3685616320846661, 3.5959619985347455, -3.5678673418683875], [8.7761565358039473,
-1.834786035658353, 0.12799701323810631, -3.8751024671471983, -6.8277088722886354],
[-6.9643404490439584, 1.2258779960088795, -7.0531594975663907, -2.8554837765416385,
-5.2055589981686596]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank3_Symbol_rank0(self):
arg0=Data(numpy.array([[[-2.0819775543023136, 4.4438294149957258], [1.203494127071604,
1.3934659764012478]], [[-1.7207192546012995, 1.128687542370864], [1.013953229943537,
2.0535582502969056]], [[-1.8482126685735398, 0.64499519705235819],
[-4.1200947648310313, 3.8041018736261574]], [[-0.12876390427677542, -0.26859118353213773],
[-2.8945993824974847, -3.3476923883525944]], [[3.1332107854705562, -4.6334666373330595],
[3.0499420638074994, -2.7959034777693104]], [[4.726734207260332, -1.3724501610660034],
[3.3499737674080023, -2.515294322458935]]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(0.860178486532)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-1.2217990677700952, 5.3040079015279442], [2.0636726136038224,
2.2536444629334662]], [[-0.86054076806908109, 1.9888660289030824], [1.8741317164757554,
2.913736736829124]], [[-0.98803418204132143, 1.5051736835845766], [-3.2599162782988129,
4.6642803601583758]], [[0.73141458225544298, 0.59158730300008067], [-2.0344208959652663,
-2.487513901820376]], [[3.9933892720027746, -3.7732881508008411], [3.9101205503397178,
-1.935724991237092]], [[5.5869126937925504, -0.51227167453378497], [4.2101522539402207,
-1.6551158359267166]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank3_Symbol_rank3(self):
arg0=Data(numpy.array([[[-1.849788129717993, 0.64693319038907493], [3.0379670344950327,
0.80277076526299229]], [[2.4995340022105639, -4.3955703049125949], [0.58331276679079203,
0.044119077451267863]], [[2.2979922792046947, 1.6054844683234073], [0.50524258350986084,
-3.5539312710422779]], [[-1.1980433912188793, -2.6450000406046001], [-2.4128326188310121,
0.80678465051263526]], [[-2.9963692865064209, -1.0152803020104519], [-0.21931259441936035,
-1.153119362615751]], [[-4.2927186206837717, 0.4561872009236847], [3.0860876046130041,
-0.78568544768378068]]]),self.functionspace)
arg1=Symbol(shape=(6, 2, 2))
res=arg0+arg1
s1=numpy.array([[[-3.4985389035935222, 1.8888458641158987], [-4.2891085749380489,
2.8296217607019845]], [[-0.8200921678141917, 4.4359194831012676],
[-4.6185751325042244, 0.16520675598470014]], [[-2.801157092531934, 3.6231020804204928],
[1.5439760747845899, 2.0378140868272894]], [[0.99864930993784018, 3.369884315459073],
[4.399815205976239, -4.9546136700941936]], [[1.6240932313892289, -3.4517363344048615],
[2.8668483027947236, 1.1624090061600336]], [[2.6364367974081624, 2.628371373764919],
[-2.5877409052653833, -1.29236451403668]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-5.3483270333115147, 2.5357790545049737], [-1.2511415404430162,
3.6323925259649767]], [[1.6794418343963722, 0.040349178188672674],
[-4.0352623657134323, 0.209325833435968]], [[-0.50316481332723928, 5.2285865487439001],
[2.0492186582944507, -1.5161171842149885]], [[-0.19939408128103908, 0.72488427485447282],
[1.9869825871452269, -4.1478290195815584]], [[-1.372276055117192, -4.4670166364153134],
[2.6475357083753632, 0.0092896435442826331]], [[-1.6562818232756094,
3.0845585746886037], [0.49834669934762088, -2.0780499617204606]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank4_Symbol_rank0(self):
arg0=Data(numpy.array([[[[-0.026017904532606551, -0.80192450547405958,
0.93785799257835656, -4.4900007911078319], [-1.8444162073720949,
1.2059856695600812, 1.8326324480310756, 3.3745782356451564],
[3.0929324433706693, -0.94197156488767142, -2.3469684397851207,
-4.8976052662192613]], [[1.2658444546015346, 3.0389250549456399,
-2.567254770133963, 3.7513728753285314], [-0.10225306211433605,
-0.34121316520335299, -2.8745573331597321, -0.73976781968982142],
[4.6114590072566681, 3.5325642767850063, 2.1587079910040661,
3.8644723652636905]]], [[[-2.5953113243103623, 0.6437882672443429,
4.5677362343759853, 3.4108524985046262], [2.9904338528780352,
0.73113299006492127, 2.4253724263400445, 3.8646536702562031],
[-1.2545053686514152, -4.2675706218911706, -3.6576679389702105,
-0.29502287354943402]], [[0.9550527228483654, 2.9537233833481267,
-2.6904009310953283, 1.5998857010519698], [-3.7171702199982004,
-1.1578306702024044, 1.764070139728485, -1.1506068782808967],
[1.5727320181060982, 0.18468074769418674, 3.3262967055395372,
-1.2208265816075849]]], [[[-0.25003967903418278, -2.603663543909648,
4.6824047463125531, 1.0968919539473987], [1.3471700099604398,
-3.8321880437450218, -4.2809409903460676, 1.2933005361204906],
[-2.857251250328674, 3.6768205829450178, -2.7999953058490643,
2.1117422072666692]], [[-2.1994223710236427, 3.7669030216280923,
-3.5232105054852991, -3.7071480752824462], [-0.35952695279389246,
2.5451704526750873, -4.2842310996736144, -1.3813503044378783],
[-2.5647173415905145, 4.7437501634141572, -4.2234318870342245,
2.1862042652792866]]]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(0.33323555487)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[0.30721765033724147, -0.46868895060421156,
1.2710935474482046, -4.1567652362379839], [-1.5111806525022469,
1.5392212244299293, 2.1658680029009236, 3.7078137905150044],
[3.4261679982405173, -0.6087360100178234, -2.0137328849152727,
-4.5643697113494133]], [[1.5990800094713826, 3.3721606098154879,
-2.234019215264115, 4.0846084301983794], [0.23098249275551197,
-0.0079776103335049697, -2.541321778289884, -0.4065322648199734],
[4.9446945621265161, 3.8657998316548543, 2.4919435458739141,
4.1977079201335386]]], [[[-2.2620757694405143, 0.97702382211419092,
4.9009717892458333, 3.7440880533744743], [3.3236694077478832,
1.0643685449347693, 2.7586079812098925, 4.1978892251260511],
[-0.92126981378156714, -3.9343350670213226, -3.3244323841003625,
0.038212681320413999]], [[1.2882882777182134, 3.2869589382179747,
-2.3571653762254803, 1.9331212559218178], [-3.3839346651283524,
-0.82459511533255636, 2.097305694598333, -0.81737132341104868],
[1.9059675729759462, 0.51791630256403476, 3.6595322604093852,
-0.88759102673773693]]], [[[0.083195875835665234, -2.2704279890398,
5.0156403011824011, 1.4301275088172467], [1.6804055648302878,
-3.4989524888751737, -3.9477054354762195, 1.6265360909903386],
[-2.524015695458826, 4.0100561378148658, -2.4667597509792163,
2.4449777621365172]], [[-1.8661868161537947, 4.1001385764979403,
-3.1899749506154511, -3.3739125204125981], [-0.026291397924044446,
2.8784060075449354, -3.9509955448037664, -1.0481147495680303],
[-2.2314817867206664, 5.0769857182840052, -3.8901963321643764,
2.5194398201491346]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank4_Symbol_rank4(self):
arg0=Data(numpy.array([[[[1.6204760394819004, -0.95393695229398112, | |
f(\beta) = l * (0.5 * ||\beta||²_2 - c),
where ||\beta||²_2 is the squared L2 loss function. The constrained
version has the form
0.5 * ||\beta||²_2 <= c.
Parameters
----------
l : Non-negative float. The Lagrange multiplier, or regularisation
constant, of the function.
c : Float. The limit of the constraint. The function is feasible if
0.5 * ||\beta||²_2 <= c. The default value is c=0, i.e. the
default is a regularised formulation.
penalty_start : Non-negative integer. The number of columns, variables
etc., to be exempt from penalisation. Equivalently, the first index
to be penalised. Default is 0, all columns are included.
"""
def __init__(self, l=1.0, c=0.0, penalty_start=0):
self.l = max(0.0, float(l))
self.c = float(c)
self.penalty_start = max(0, int(penalty_start))
def f(self, beta):
"""Function value.
From the interface "Function".
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
return self.l * (0.5 * np.dot(beta_.T, beta_)[0, 0] - self.c)
def grad(self, beta):
"""Gradient of the function.
From the interface "Gradient".
Example
-------
>>> import numpy as np
>>> from parsimony.functions.penalties import L2Squared
>>>
>>> np.random.seed(42)
>>> beta = np.random.rand(100, 1)
>>> l2 = L2Squared(l=3.14159, c=2.71828)
>>> np.linalg.norm(l2.grad(beta)
... - l2.approx_grad(beta, eps=1e-4)) < 5e-10
True
>>>
>>> l2 = L2Squared(l=3.14159, c=2.71828, penalty_start=5)
>>> np.linalg.norm(l2.grad(beta)
... - l2.approx_grad(beta, eps=1e-4)) < 5e-10
True
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
grad = np.vstack((np.zeros((self.penalty_start, 1)),
self.l * beta_))
else:
beta_ = beta
grad = self.l * beta_
# approx_grad = utils.approx_grad(self.f, beta, eps=1e-4)
# print maths.norm(grad - approx_grad)
return grad
def L(self):
"""Lipschitz constant of the gradient.
"""
return self.l
def prox(self, beta, factor=1.0, **kwargs):
"""The corresponding proximal operator.
From the interface "ProximalOperator".
"""
l = self.l * factor
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
if self.penalty_start > 0:
prox = np.vstack((beta[:self.penalty_start, :],
beta_ * (1.0 / (1.0 + l))))
else:
prox = beta_ * (1.0 / (1.0 + l))
return prox
def proj(self, beta, **kwargs):
"""The corresponding projection operator.
From the interface "ProjectionOperator".
Examples
--------
>>> import numpy as np
>>> from parsimony.functions.penalties import L2Squared
>>> np.random.seed(42)
>>> l2 = L2Squared(c=0.3183098861837907)
>>> y1 = l2.proj(np.random.rand(100, 1) * 2.0 - 1.0)
>>> 0.5 * np.linalg.norm(y1) ** 2 # doctest: +ELLIPSIS
0.31830988...
>>> y2 = np.random.rand(100, 1) * 2 - 1.0
>>> l2.feasible(y2)
False
>>> l2.feasible(l2.proj(y2))
True
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
sqnorm = np.dot(beta_.T, beta_)[0, 0]
# Feasible?
if 0.5 * sqnorm <= self.c:
return beta
# The correction by eps is to nudge the squared norm just below
# self.c.
eps = consts.FLOAT_EPSILON
if self.penalty_start > 0:
proj = np.vstack((beta[:self.penalty_start, :],
beta_ * np.sqrt((2.0 * self.c - eps) / sqnorm)))
else:
proj = beta_ * np.sqrt((2.0 * self.c - eps) / sqnorm)
return proj
def feasible(self, beta):
"""Feasibility of the constraint.
From the interface "Constraint".
Parameters
----------
beta : Numpy array. The variable to check for feasibility.
Examples
--------
>>> import numpy as np
>>> from parsimony.functions.penalties import L2Squared
>>> np.random.seed(42)
>>> l2 = L2Squared(c=0.3183098861837907)
>>> y1 = 0.1 * (np.random.rand(50, 1) * 2.0 - 1.0)
>>> l2.feasible(y1)
True
>>> y2 = 10.0 * (np.random.rand(50, 1) * 2.0 - 1.0)
>>> l2.feasible(y2)
False
>>> y3 = l2.proj(50.0 * np.random.rand(100, 1) * 2.0 - 1.0)
>>> l2.feasible(y3)
True
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
sqnorm = np.dot(beta_.T, beta_)[0, 0]
return 0.5 * sqnorm <= self.c + consts.FLOAT_EPSILON
class L1L2Squared(properties.AtomicFunction,
properties.Penalty,
properties.ProximalOperator):
"""The proximal operator of the L1 function with an L2 constraint.
The function is
f(x) = l1 * ||x||_1 + Indicator(||x||²_2 <= l2),
where ||.||_1 is the L1 norm and ||.||²_2 is the squared L2 norm.
Parameters
----------
l1 : Non-negative float. The Lagrange multiplier, or regularisation
constant, of the L1 norm penalty.
l2 : Non-negative float. The limit of the constraint of of the squared L2
norm penalty.
penalty_start : Non-negative integer. The number of columns, variables
etc., to be exempt from penalisation. Equivalently, the first index
to be penalised. Default is 0, all columns are included.
"""
def __init__(self, l1=1.0, l2=1.0, penalty_start=0):
self.l1 = max(0.0, float(l1))
self.l2 = max(0.0, float(l2))
self.penalty_start = max(0, int(penalty_start))
def f(self, beta):
"""Function value.
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
if maths.norm(beta_) ** 2 > self.l2:
return consts.FLOAT_INF
return self.l1 * maths.norm1(beta_)
def prox(self, beta, factor=1.0, **kwargs):
"""The corresponding proximal operator.
From the interface "ProximalOperator".
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
l1 = self.l1 * factor
prox = (np.abs(beta_) > l1) * (beta_ - l1 * np.sign(beta_ - l1))
prox *= np.sqrt(self.l2 / np.dot(prox.T, prox)[0, 0])
if self.penalty_start > 0:
prox = np.vstack((beta[:self.penalty_start, :], prox))
return prox
class QuadraticConstraint(properties.AtomicFunction,
properties.Gradient,
properties.Penalty,
properties.Constraint):
"""The proximal operator of the quadratic function
f(x) = l * (x'Mx - c),
or
f(x) = l * (x'M'Nx - c),
where M or M'N is a given symmatric positive-definite matrix. The
constrained version has the form
x'Mx <= c,
or
x'M'Nx <= c
if two matrices are given.
Parameters
----------
l : Non-negative float. The Lagrange multiplier, or regularisation
constant, of the function.
c : Float. The limit of the constraint. The function is feasible if
x'Mx <= c. The default value is c=0, i.e. the default is a
regularisation formulation.
M : Numpy array. The given positive definite matrix. It is assumed that
the first penalty_start columns must be excluded.
N : Numpy array. The second matrix if the factors of the positive-definite
matrix are given. It is assumed that the first penalty_start
columns must be excluded.
penalty_start : Non-negative integer. The number of columns, variables
etc., to be exempt from penalisation. Equivalently, the first index
to be penalised. Default is 0, all columns are included.
"""
def __init__(self, l=1.0, c=0.0, M=None, N=None, penalty_start=0):
self.l = max(0.0, float(l))
self.c = float(c)
if self.penalty_start > 0:
self.M = M[:, self.penalty_start:] # NOTE! We slice M here!
self.N = N[:, self.penalty_start:] # NOTE! We slice N here!
else:
self.M = M
self.N = N
self.penalty_start = max(0, int(penalty_start))
def f(self, beta):
"""Function value.
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
if self.N is None:
#val = self.l * (np.dot(beta_.T, np.dot(self.M, beta_)) - self.c)
val = self.l * (np.dot(beta_.T, self.M.dot(beta_)) - self.c)
else:
val = self.l * (np.dot(beta_.T, self.M.T.dot(self.N.dot(beta_)))
- self.c)
#val = self.l * (np.dot(beta_.T, np.dot(self.M.T,
# np.dot(self.N, beta_))) \
# - self.c)
return val
def grad(self, beta):
"""Gradient of the function.
From the interface "Gradient".
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
if self.N is None:
grad = (2.0 * self.l) * self.M.dot(beta_)
#grad = (2.0 * self.l) * np.dot(self.M, beta_)
else:
grad = (2.0 * self.l) * self.M.T.dot(self.N.dot(beta_))
#grad = (2.0 * self.l) * np.dot(self.M.T, np.dot(self.N, beta_))
if self.penalty_start > 0:
grad = np.vstack((np.zeros((self.penalty_start, 1)), grad))
return grad
def feasible(self, beta):
"""Feasibility of the constraint.
From the interface "Constraint".
"""
if self.penalty_start > 0:
beta_ = beta[self.penalty_start:, :]
else:
beta_ = beta
if self.N is None:
#bMb = np.dot(beta_.T, np.dot(self.M, beta_))
bMb = np.dot(beta_.T, self.M.dot(beta_))
else:
#bMb = np.dot(beta_.T, np.dot(self.M.T, np.dot(self.N, beta_)))
bMb = np.dot(beta_.T, self.M.T.dot(self.N.dot(beta_)))
return bMb <= self.c
class GraphNet(QuadraticConstraint,
properties.LipschitzContinuousGradient):
"""The proximal operator of the GraphNet function.
f(x) = l * sum_{(i, j) \in G}(b_i - b_j)^2,
Where nodes (i, j) are connected in the Graph G and A is a (sparse) matrix
of P columns where each line contains a pair of (-1, +1) for 2 connected
nodes, and zero elsewhere.
f(x) = l * x'A'Ax.
= l * sum((Ax)^2)
Parameters
----------
l : Non-negative float. The Lagrange multiplier, or regularisation
constant, of the function.
A : Numpy or (usually) scipy.sparse array. The a matrix, made of (-1, +1),
that computes | |
<gh_stars>0
'''
application object and main UI helper classes
'''
import os, sys, time, string, traceback
import urwid
from urwidtrees.widgets import TreeBox
from urwidtrees.tree import Tree
from urwidtrees.decoration import CollapsibleIndentedTree as DecoratedTree
from urwid.util import is_wide_char
from urwidtools import dialog, application, widgets
from hub import hub
from config import config
class MyTreeBox(TreeBox):
max_history = 20
exposed = '''
toggle_view
toggle_refs
edit_item
history_back
history_forward
goto_clipboard
goto_references
goto_search
goto_trash
focus_element
'''
def __init__(self, dtree, *a, **kw):
'''
get hold of that blasted directory tree manually.
also, export nicely wrapped methods to broker, so that they can also be
invoked from within dialogs without much trouble.
'''
self._dtree = dtree
self.current_focus_element = None
self.last_mouse_press = -3600
self.__super.__init__(*a, **kw)
# let uth keep the nodes we visit in history.
# if we do go back in history, let us put the nodes
# to the 'right' into the future.
#
# visited nodes are the ones we actually do work on, such
# as viewing details. Does this include viewing folders?
self._history = []
self._future = []
for key in self.exposed.strip().split():
hub.register(key, getattr(self, key))
def delete_from_history(self, node):
'''
nodes can be deleted from the database, or also
revisited de-novo, in which case we want to
clear out their previous occurrences.
'''
self._history = [ n for n in self._history if n != node ]
def add_to_history(self, node=None):
'''
add a 'new' node to history. node defaults to current one.
what ch-appens if node is already in history? I suppose we
flush all previous instances from the history.
Should we just limit history to references alone? What activity
on folders would qualify?
what about deleted nodes? moved ones?
Well it seems this might get messy very quickly.
Where do we actually invoke this? Is there one consistent location
that makes sense? Probably closer to the database than the UI, since
the same actions may be invoked through menus and key-presses.
'''
if node is None: # invocation from menu will trigger this case
node = self.focus_element()
if self._history and self._history[-1] == node:
return
self.delete_from_history(node)
self._history.append(node)
self._history = self._history[-self.max_history:]
# I guess if we also need to reset the future.
self._future = []
def add_to_future(self, node):
'''
newly created item copies are appended here.
'''
assert len(self._future) == 0
self._future.append(node)
def history_back(self, node=None):
'''
invoked by a special key. I guess we have to cases:
- we have navigated away from the key that was last stored
- we just have closed some dialog that caused some node to
be added to the history, but we have not yet navigated
away.
'''
if node is None: # invocation from menu will trigger this case
node = self.focus_element()
if not self._history:
return
if len(self._history) > 1 and node == self._history[-1]:
self._future.append(self._history.pop())
# now, the last node in _history should be the right one
self.set_focus(self._history[-1])
def history_forward(self, node=None):
'''
OK, so what happens here? why do I need to press twice?
'''
if node is None: # invocation from menu will trigger this case
node = self.focus_element()
if not self._future:
return
self._history.append(self._future.pop())
self.set_focus(self._history[-1])
def _goto(self, node_name):
'''
implement shortcuts to navigate to special nodes
'''
node = hub.get_named_node(node_name)
if node is not None:
self.set_focus(node)
def goto_clipboard(self):
self._goto('Clipboard')
def goto_references(self):
self._goto('References')
def goto_search(self):
self._goto('Search')
def goto_trash(self):
self._goto('Trash')
def focus_element(self):
'''
thin wrapper for general consumption
'''
w, focus_element = self.get_focus()
return focus_element
def toggle_view(self):
"""
Collapse currently focussed position; works only if the underlying
tree allows it.
"""
focus_element = self.focus_element()
if hub.is_ref(focus_element):
hub.ref_view()
else:
self._tree.toggle_collapsed(focus_element)
self.refresh()
def edit_item(self):
'''
open edid dialog for reference or branch
'''
w, focus_element = self.get_focus()
if hub.is_ref(focus_element):
hub.ref_edit()
else:
hub.branch_edit()
def set_focus(self, node):
'''
expand all nodes above the one we are focusing on,
so that we may get rid of this confusing fold-on-leave
'''
parents = hub.get_nodes_above(node)
for p in parents:
self._tree.expand(p)
super(MyTreeBox, self).set_focus(node)
def toggle_refs(self):
'''
show/hide refs within the tree
'''
node = self.focus_element()
branch = node if hub.is_branch(node) else hub.get_parent_node(node)
hub.toggle_branches_only()
self.refresh()
try:
self.set_focus(branch)
except: # strange things happen if we press F3 immediately after program start
pass
def jump_letter(self, letter):
'''
jump among siblings by first letter. keep all of these jumped-to
references in the history.
'''
tree = self._dtree
get_text = hub.get_node_text
current = self.focus_element()
self.add_to_history(current)
siblings = tree._get_siblings(current)
next_index = siblings.index(current) + 1
rotated_siblings = siblings[next_index:] + siblings[:next_index]
for sibling in rotated_siblings:
key, title = get_text(sibling)
if key.lower().startswith(letter):
self.set_focus(sibling)
self.add_to_history(sibling)
break
def keypress(self, size, key):
assert type(key) is str
if not hub.process_action_key(key):
if key in string.ascii_lowercase:
self.jump_letter(key)
return None
try:
key = self.__super.keypress(size, key)
except:
key = self._outer_list.keypress(size, None)
return key
def mouse_event(self, size, event, button, col, row, focus):
'''
let double click on focus element open context menu
'''
self.__super.mouse_event(size, event, button, col, row, focus)
size = hub.app.get_screen_size()
if event == 'mouse press' and button == 1:
old_focus_element = self.current_focus_element
self.current_focus_element = self.focus_element()
old_last_mouse_press = self.last_mouse_press
self.last_mouse_press = time.time()
if self.last_mouse_press - old_last_mouse_press < 0.3 and old_focus_element == self.current_focus_element:
hub.show_menu()
elif button == 4:
self.__super.keypress(size, 'up')
elif button == 5:
self.__super.keypress(size, 'down')
class ReferenceDisplay(widgets.FocusableTwoColumn):
'''
tweak the geometry here
'''
label_width = 22
max_width = 70
class DbTree(Tree):
'''
a tree class that displays the reference database as presented by coredb
'''
root = "Stuff" # seems to be needed but never shows up.
def __init__(self, *a, **kw):
self.debug = False
super(DbTree, self).__init__(*a, **kw)
def __getitem__(self, pos):
'''
obtain one tree item for display. Must check back with the database
to look for updates.
'''
text_tuple, selected = hub.get_node_display_info(pos)
key, title = text_tuple
if selected == 0:
low, high = 'body', 'focus'
elif selected == hub.SELECTED:
low, high = 'selected body', 'selected focus'
else:
low, high = 'in selection body', 'in selection focus'
nodeinfo = str(pos) if self.debug else ''
if title is None or hub.is_branch(pos):
return widgets.FocusableText(key + nodeinfo, low, high)
else:
widget = urwid.Pile([ReferenceDisplay(key, nodeinfo + title, low, high), urwid.Divider()])
return widget
def _get_siblings(self, pos):
"""
lists the parent directory of pos
"""
parent = hub.get_parent_node(pos)
if parent is None:
return [pos]
return hub.get_child_nodes(parent)
# Tree API
def parent_position(self, pos):
return hub.get_parent_node(pos)
def first_child_position(self, pos):
'''
return first child if present, else None
'''
children = hub.get_child_nodes(pos)
return None if not children else children[0]
def is_leaf(self, pos):
"""checks if given position has no children"""
return self.first_child_position(pos) is None
def last_child_position(self, pos):
'''
return last child if present, else None
'''
children = hub.get_child_nodes(pos)
return None if not children else children[-1]
def _sibling_pos(self, pos, siblings):
'''
shared code for next_sibling_position and prev_sibling_position
hack around the problem of object identity
'''
return siblings.index(pos)
def next_sibling_position(self, pos):
'''
where is this actually used?
'''
siblings = self._get_siblings(pos)
try:
myindex = siblings.index(pos)
#hub.set_status_bar(str(siblings[0]))
except ValueError:
return None
if myindex + 1 < len(siblings): # pos is not the last entry
return siblings[myindex + 1]
return None
def prev_sibling_position(self, pos):
siblings = self._get_siblings(pos)
try:
myindex = siblings.index(pos)
except ValueError:
return None
if myindex > 0: # pos is not the first entry
return siblings[myindex - 1]
return None
class BibApp(application.Application):
palette = []
# read color palette from config file
for key, value in config['palette'].items():
fg, bg = value.split(',')
palette.append((key, fg.strip(), bg.strip()))
def at_begin_loop(self):
'''
process startup options
'''
if config['preferences'].getboolean('clear_imports'):
hub.clear_recent()
if config['preferences'].getboolean('clear_selections'):
hub.deselect_all()
def unhandled_input(self, k):
pass
def exit_application(self):
raise urwid.ExitMainLoop()
def get_root_widget(self):
dtree = DbTree()
# We hide the usual arrow tip and use a customized collapse-icon.
conf = config['preferences']
decorated_tree = DecoratedTree(
dtree,
is_collapsed=lambda pos: dtree.depth(pos) >= conf.getint('open_level'),
arrow_tip_char=None,
icon_frame_left_char=None,
icon_frame_right_char=None,
icon_collapsed_char="+",
icon_expanded_char="-",
)
# stick it into a TreeBox and use 'body' color attribute for gaps
self.tree = MyTreeBox(dtree, decorated_tree)
wrapped_tree = urwid.AttrMap(self.tree, 'body') # this simply colors the background
#add a text footer
left_bottom = urwid.Text(' Press %s for help, %s to quit' \
% (hub.keys['show_help'], hub.keys['exit']))
self._status = urwid.Text('', align='right')
cols = urwid.Columns([left_bottom, self._status])
footer = urwid.AttrMap(cols, 'footer')
#enclose all in a frame
root_widget = urwid.Frame(wrapped_tree, | |
: yt UnitRegistry, optional
A yt unit registry to use in the conversion. If one is not
supplied, the default one will be used.
"""
# Converting from AstroPy Quantity
u = arr.unit
ap_units = []
for base, exponent in zip(u.bases, u.powers):
unit_str = base.to_string()
# we have to do this because AstroPy is silly and defines
# hour as "h"
if unit_str == "h": unit_str = "hr"
ap_units.append("%s**(%s)" % (unit_str, Rational(exponent)))
ap_units = "*".join(ap_units)
if isinstance(arr.value, np.ndarray):
return YTArray(arr.value, ap_units, registry=unit_registry)
else:
return YTQuantity(arr.value, ap_units, registry=unit_registry)
def to_astropy(self, **kwargs):
"""
Creates a new AstroPy quantity with the same unit information.
"""
if _astropy.units is None:
raise ImportError("You don't have AstroPy installed, so you can't convert to " +
"an AstroPy quantity.")
return self.value*_astropy.units.Unit(str(self.units), **kwargs)
@classmethod
def from_pint(cls, arr, unit_registry=None):
"""
Convert a Pint "Quantity" to a YTArray or YTQuantity.
Parameters
----------
arr : Pint Quantity
The Quantity to convert from.
unit_registry : yt UnitRegistry, optional
A yt unit registry to use in the conversion. If one is not
supplied, the default one will be used.
Examples
--------
>>> from pint import UnitRegistry
>>> import numpy as np
>>> ureg = UnitRegistry()
>>> a = np.random.random(10)
>>> b = ureg.Quantity(a, "erg/cm**3")
>>> c = yt.YTArray.from_pint(b)
"""
p_units = []
for base, exponent in arr._units.items():
bs = convert_pint_units(base)
p_units.append("%s**(%s)" % (bs, Rational(exponent)))
p_units = "*".join(p_units)
if isinstance(arr.magnitude, np.ndarray):
return YTArray(arr.magnitude, p_units, registry=unit_registry)
else:
return YTQuantity(arr.magnitude, p_units, registry=unit_registry)
def to_pint(self, unit_registry=None):
"""
Convert a YTArray or YTQuantity to a Pint Quantity.
Parameters
----------
arr : YTArray or YTQuantity
The unitful quantity to convert from.
unit_registry : Pint UnitRegistry, optional
The Pint UnitRegistry to use in the conversion. If one is not
supplied, the default one will be used. NOTE: This is not
the same as a yt UnitRegistry object.
Examples
--------
>>> a = YTQuantity(4.0, "cm**2/s")
>>> b = a.to_pint()
"""
from pint import UnitRegistry
if unit_registry is None:
unit_registry = UnitRegistry()
powers_dict = self.units.expr.as_powers_dict()
units = []
for unit, pow in powers_dict.items():
# we have to do this because Pint doesn't recognize
# "yr" as "year"
if str(unit).endswith("yr") and len(str(unit)) in [2,3]:
unit = str(unit).replace("yr","year")
units.append("%s**(%s)" % (unit, Rational(pow)))
units = "*".join(units)
return unit_registry.Quantity(self.value, units)
#
# End unit conversion methods
#
def write_hdf5(self, filename, dataset_name=None, info=None, group_name=None):
r"""Writes a YTArray to hdf5 file.
Parameters
----------
filename: string
The filename to create and write a dataset to
dataset_name: string
The name of the dataset to create in the file.
info: dictionary
A dictionary of supplementary info to write to append as attributes
to the dataset.
group_name: string
An optional group to write the arrays to. If not specified, the arrays
are datasets at the top level by default.
Examples
--------
>>> a = YTArray([1,2,3], 'cm')
>>> myinfo = {'field':'dinosaurs', 'type':'field_data'}
>>> a.write_hdf5('test_array_data.h5', dataset_name='dinosaurs',
... info=myinfo)
"""
from yt.utilities.on_demand_imports import _h5py as h5py
from yt.extern.six.moves import cPickle as pickle
if info is None:
info = {}
info['units'] = str(self.units)
info['unit_registry'] = np.void(pickle.dumps(self.units.registry.lut))
if dataset_name is None:
dataset_name = 'array_data'
f = h5py.File(filename)
if group_name is not None:
if group_name in f:
g = f[group_name]
else:
g = f.create_group(group_name)
else:
g = f
if dataset_name in g.keys():
d = g[dataset_name]
# Overwrite without deleting if we can get away with it.
if d.shape == self.shape and d.dtype == self.dtype:
d[...] = self
for k in d.attrs.keys():
del d.attrs[k]
else:
del f[dataset_name]
d = g.create_dataset(dataset_name, data=self)
else:
d = g.create_dataset(dataset_name, data=self)
for k, v in info.items():
d.attrs[k] = v
f.close()
@classmethod
def from_hdf5(cls, filename, dataset_name=None, group_name=None):
r"""Attempts read in and convert a dataset in an hdf5 file into a
YTArray.
Parameters
----------
filename: string
The filename to of the hdf5 file.
dataset_name: string
The name of the dataset to read from. If the dataset has a units
attribute, attempt to infer units as well.
group_name: string
An optional group to read the arrays from. If not specified, the
arrays are datasets at the top level by default.
"""
import h5py
from yt.extern.six.moves import cPickle as pickle
if dataset_name is None:
dataset_name = 'array_data'
f = h5py.File(filename)
if group_name is not None:
g = f[group_name]
else:
g = f
dataset = g[dataset_name]
data = dataset[:]
units = dataset.attrs.get('units', '')
if 'unit_registry' in dataset.attrs.keys():
unit_lut = pickle.loads(dataset.attrs['unit_registry'].tostring())
else:
unit_lut = None
f.close()
registry = UnitRegistry(lut=unit_lut, add_default_symbols=False)
return cls(data, units, registry=registry)
#
# Start convenience methods
#
@property
def value(self):
"""Get a copy of the array data as a numpy ndarray"""
return np.array(self)
v = value
@property
def ndview(self):
"""Get a view of the array data."""
return self.ndarray_view()
d = ndview
@property
def unit_quantity(self):
"""Get a YTQuantity with the same unit as this array and a value of
1.0"""
return YTQuantity(1.0, self.units)
uq = unit_quantity
@property
def unit_array(self):
"""Get a YTArray filled with ones with the same unit and shape as this
array"""
return np.ones_like(self)
ua = unit_array
def __getitem__(self, item):
ret = super(YTArray, self).__getitem__(item)
if ret.shape == ():
return YTQuantity(ret, self.units, bypass_validation=True)
else:
if hasattr(self, 'units'):
ret.units = self.units
return ret
#
# Start operation methods
#
if LooseVersion(np.__version__) < LooseVersion('1.13.0'):
def __add__(self, right_object):
"""
Add this ytarray to the object on the right of the `+` operator.
Must check for the correct (same dimension) units.
"""
ro = sanitize_units_add(self, right_object, "addition")
return super(YTArray, self).__add__(ro)
def __radd__(self, left_object):
""" See __add__. """
lo = sanitize_units_add(self, left_object, "addition")
return super(YTArray, self).__radd__(lo)
def __iadd__(self, other):
""" See __add__. """
oth = sanitize_units_add(self, other, "addition")
np.add(self, oth, out=self)
return self
def __sub__(self, right_object):
"""
Subtract the object on the right of the `-` from this ytarray. Must
check for the correct (same dimension) units.
"""
ro = sanitize_units_add(self, right_object, "subtraction")
return super(YTArray, self).__sub__(ro)
def __rsub__(self, left_object):
""" See __sub__. """
lo = sanitize_units_add(self, left_object, "subtraction")
return super(YTArray, self).__rsub__(lo)
def __isub__(self, other):
""" See __sub__. """
oth = sanitize_units_add(self, other, "subtraction")
np.subtract(self, oth, out=self)
return self
def __neg__(self):
""" Negate the data. """
return super(YTArray, self).__neg__()
def __mul__(self, right_object):
"""
Multiply this YTArray by the object on the right of the `*`
operator. The unit objects handle being multiplied.
"""
ro = sanitize_units_mul(self, right_object)
return super(YTArray, self).__mul__(ro)
def __rmul__(self, left_object):
""" See __mul__. """
lo = sanitize_units_mul(self, left_object)
return super(YTArray, self).__rmul__(lo)
def __imul__(self, other):
""" See __mul__. """
oth = sanitize_units_mul(self, other)
np.multiply(self, oth, out=self)
return self
def __div__(self, right_object):
"""
Divide this YTArray by the object on the right of the `/` operator.
"""
ro = sanitize_units_mul(self, right_object)
return super(YTArray, self).__div__(ro)
def __rdiv__(self, left_object):
""" See __div__. """
lo = sanitize_units_mul(self, left_object)
return super(YTArray, self).__rdiv__(lo)
def __idiv__(self, other):
""" See __div__. """
oth = sanitize_units_mul(self, other)
np.divide(self, oth, out=self)
return self
def __truediv__(self, right_object):
ro = sanitize_units_mul(self, right_object)
return super(YTArray, self).__truediv__(ro)
def __rtruediv__(self, left_object):
""" See __div__. """
lo = sanitize_units_mul(self, left_object)
return super(YTArray, self).__rtruediv__(lo)
def __itruediv__(self, other):
""" See __div__. """
oth = sanitize_units_mul(self, other)
np.true_divide(self, oth, out=self)
return self
def __floordiv__(self, right_object):
ro = sanitize_units_mul(self, right_object)
return super(YTArray, self).__floordiv__(ro)
def __rfloordiv__(self, left_object):
""" See __div__. """
lo = sanitize_units_mul(self, left_object)
return super(YTArray, self).__rfloordiv__(lo)
def __ifloordiv__(self, other):
""" See __div__. """
oth = sanitize_units_mul(self, other)
np.floor_divide(self, oth, out=self)
return self
def __or__(self, right_object):
return super(YTArray, self).__or__(right_object)
def __ror__(self, left_object):
return super(YTArray, self).__ror__(left_object)
def __ior__(self, other):
np.bitwise_or(self, other, out=self)
return self
def __xor__(self, right_object):
return super(YTArray, self).__xor__(right_object)
def __rxor__(self, left_object):
return super(YTArray, self).__rxor__(left_object)
def __ixor__(self, other):
np.bitwise_xor(self, other, out=self)
return self
def __and__(self, right_object):
return super(YTArray, self).__and__(right_object)
def __rand__(self, left_object):
return super(YTArray, self).__rand__(left_object)
def __iand__(self, other):
np.bitwise_and(self, other, out=self)
return self
def __pow__(self, power):
"""
Raise this YTArray to some power.
Parameters
----------
power : float or dimensionless YTArray.
The pow value.
"""
if isinstance(power, YTArray):
if not power.units.is_dimensionless:
raise YTUnitOperationError('power', power.unit)
# Work around a sympy issue (I think?)
#
# If | |
# tindar.py
from typing import Optional
from pulp import *
import numpy as np
from pathlib import Path
from custom_timer import Timer
import itertools
import json
PROJECT_DIR = str(Path(__file__).resolve().parents[1])
class Tindar:
'''Class to solve Tindar pairing problems
Input
-----
love_matrix: np.array
square matrix indicating which person is interested
in which other person
tindar_problem: instance of TindarGenerator
'''
INIT_ERROR_MSG = "Cannot initialise with love_matrix AND tindar_problem"
def __init__(self, love_matrix=None, tindar_problem=None):
if love_matrix is not None:
assert tindar_problem is None, INIT_ERROR_MSG
self.check_init(love_matrix)
self.love_matrix = love_matrix
self.n = love_matrix.shape[0]
if tindar_problem is not None:
assert love_matrix is None, INIT_ERROR_MSG
self.tindar_problem = tindar_problem
self.love_matrix = tindar_problem.love_matrix
self.n = tindar_problem.n
self.connectedness = tindar_problem.connectedness
self.p = tindar_problem.p
self.x_names = [f"x_{i}_{j}" for i in range(self.n)
for j in range(self.n)]
self.x = [LpVariable(name=x_name, cat="Binary")
for x_name in self.x_names]
self.x_np = np.array(self.x).reshape((self.n, self.n))
def __repr__(self):
if self.tindar_problem is None:
return f"Tindar with n={self.n}"
else:
return str(self.tindar_problem.__repr__())
@staticmethod
def check_init(love_matrix):
# type check
if not isinstance(love_matrix, np.ndarray):
raise ValueError("love_matrix is not a numpy array")
# shape check
m, n = love_matrix.shape
if m != n:
raise ValueError(f"love_matrix is not square: love_matrix.shape"
f"= {love_matrix.shape}")
# diagonal zero check
for i in range(n):
if love_matrix[i, i] != 0:
raise ValueError("love_matrix diagonal contains nonzeros")
# Symmetry constraints: if one is paired, the other is paired
def create_symmetry_constraints(self, inplace=True):
tups = [(i, j) for i in range(self.n) for j in range(i+1, self.n)]
# Left-hand side
lhs_symmetry = [
LpAffineExpression(
[(self.x_np[tup[0], tup[1]], 1), (self.x_np[tup[1], tup[0]], -1)],
name=f"lhs_sym_{tup[0]}_{tup[1]}"
)
for tup in tups
]
# Constraints
constraints_symmetry = [
LpConstraint(
e=lhs_s,
sense=0,
name=f"constraint_sym_{tups[i][0]}_{tups[i][1]}",
rhs=0
)
for i, lhs_s in enumerate(lhs_symmetry)
]
# Verification
if len(constraints_symmetry) != (self.n**2-self.n)/2:
raise Exception(
"Symmetry constraints not constructed right:"
f"love_matrix.shape = {self.love_matrix.shape},"
f"len(constraints_symmetry) should be {(self.n**2-self.n)/2}"
f", actually is {len(constraints_symmetry)}"
)
# Function behaviour
if inplace: # object is modified, no return value
self.constraints_symmetry = constraints_symmetry
else: # only result is returned
return constraints_symmetry
# Feasibility constraints: only pairs if person likes the other
def create_like_constraints(self, inplace=True):
tups = [(i, j) for i in range(self.n) for j in range(self.n)]
# Left-hand side
lhs_like = [
LpAffineExpression(
[(self.x_np[tup[0], tup[1]], 1)],
name=f"lhs_like_{tup[0]}_{tup[1]}"
)
for tup in tups
]
# Constraints
constraints_like = [
LpConstraint(
e=lhs_l,
sense=-1,
name=f"constraint_like_{tups[i][0]}_{tups[i][1]}",
rhs=self.love_matrix[tups[i][0], tups[i][1]]
)
for i, lhs_l in enumerate(lhs_like)
]
# Verification
if len(constraints_like) != self.n**2:
raise Exception(
"Liking constraints not constructed right:"
f"A.shape = {self.love_matrix.shape}, len(constraints_like)"
f"should be {self.n**2}, actually is {len(constraints_like)}"
)
# Function behaviour
if inplace: # object is modified, no return value
self.constraints_like = constraints_like
else: # only result is returned
return constraints_like
# Single assignment: one person can have at most one other person
def create_single_assignment_constraints(self, inplace=True):
# Left-hand side: rowsum <= 1
lhs_single_rowsum = [
LpAffineExpression(
[(self.x_np[i, j], 1) for j in range(self.n)],
name=f"lhs_single_rowsum_{i}"
)
for i in range(self.n)
]
# Left-hand side: colsum <= 1
lhs_single_colsum = [
LpAffineExpression(
[(self.x_np[i, j], 1) for i in range(self.n)],
name=f"lhs_single_colsum_{j}"
)
for j in range(self.n)
]
# Constraints
constraints_single_rowsum = self.make_single_constraints(
lhs_single_rowsum, "rowsum")
constraints_single_colsum = self.make_single_constraints(
lhs_single_colsum, "colsum")
# Verification
self.check_single_constraints(constraints_single_rowsum, "rowsum")
self.check_single_constraints(constraints_single_colsum, "colsum")
# Function behaviour
if inplace: # object is modified, no return value
self.constraints_single_rowsum = constraints_single_rowsum
self.constraints_single_colsum = constraints_single_colsum
else: # only result is returned
return constraints_single_rowsum, constraints_single_colsum
# Auxiliary functions for single assigment constraints
@staticmethod
def make_single_constraints(lhs_single, kind):
constraints_single = [
LpConstraint(
e=lhs_s,
sense=-1,
name=f"constraint_single_{kind}_{i}",
rhs=1
)
for i, lhs_s in enumerate(lhs_single)
]
return constraints_single
def check_single_constraints(self, constraints_single, kind):
if len(constraints_single) != self.n:
raise Exception(
f"Constraints single {kind} not constructed right:"
f"A.shape = {self.love_matrix.shape}, "
f"len(constraints_single_{kind}) should be {self.n}, "
f"actually is {len(constraints_single)}"
)
def create_all_constraints(self):
self.create_symmetry_constraints()
self.create_like_constraints()
self.create_single_assignment_constraints()
self.constraints_all = [
*self.constraints_symmetry,
*self.constraints_like,
*self.constraints_single_rowsum,
*self.constraints_single_colsum
]
def create_problem(self):
# Initialize constraints and objective
self.create_all_constraints()
self.objective = LpAffineExpression([(x_i, 1) for x_i in self.x])
# Create PuLP problem
self.prob_pulp = LpProblem("The_Tindar_Problem", LpMaximize)
self.prob_pulp += self.objective
for c in self.constraints_all:
self.prob_pulp += c
def write_problem(self, path=PROJECT_DIR+"/models/Tindar.lp"):
self.prob_pulp.writeLP(path)
def solve_problem(self, kind="pulp"):
if kind == "pulp":
self.prob_pulp.solve()
elif kind == "heuristic":
self.x_heuristic_np = np.zeros((self.n, self.n))
for i in range(self.n - 1):
if self.x_heuristic_np[i, :].sum() == 0:
done = False
j = i + 1
while not done:
mutual_interest = (
(self.love_matrix[i, j] == 1) and
(self.love_matrix[j, i] == 1)
)
available = (self.x_heuristic_np[j, :] == 0).all()
if mutual_interest and available:
self.x_heuristic_np[i, j] = 1
self.x_heuristic_np[j, i] = 1
done = True
if j == self.n - 1:
done = True
else:
j += 1
else:
raise ValueError(
f"kind {kind} not allowed"
"choose from: pulp, heuristic"
)
def solution_status(self, kind="pulp", verbose=True):
if kind == "pulp":
stat = LpStatus[self.prob_pulp.status]
if verbose:
print("Status:", stat)
return stat
elif kind == "heuristic":
stat = "Solved (optimal unsure)"
print("Heuristic always solves")
return stat
else:
raise ValueError(
f"kind {kind} not allowed"
"choose from: pulp, heuristic"
)
def _pulp_solution_to_np(self, pulp_vars=None):
if pulp_vars is None:
pulp_vars = self.prob_pulp.variables()
solution_np = np.array(
[v.value() for v in pulp_vars]
).reshape((self.n, self.n))
return solution_np
def solution_vars(self, kind="pulp", verbose=True):
if kind == "pulp":
vars_pulp = self.prob_pulp.variables()
vars_np = self._pulp_solution_to_np(vars_pulp)
if verbose:
print(vars_np)
return vars_np
elif kind == "heuristic":
if verbose:
print(self.x_heuristic_np)
return self.x_heuristic_np
def solution_obj(self, kind="pulp", verbose=True):
if kind == "pulp":
obj = value(self.prob_pulp.objective)
elif kind == "heuristic":
obj = self.x_heuristic_np.sum()
if verbose:
print(f"Number of lovers connected by {kind} = ", obj)
return obj
class TindarGenerator:
'''Class to generate Tindar objects randomly
n: integer
number of people in the model
connectedness: 1 < integer < 10
connectedness of the Tindar problem for humans,
implemented as bernouilli probability for edges
to be generated
'''
MIN_CONNECTEDNESS = 1
MAX_CONNECTEDNESS = 10
MIN_EDGE_PROB = 0.05
MAX_EDGE_PROB = 0.75
UNIF_LOW = 0.3
UNIF_HIGH = 0.6
def __init__(self, n, connectedness=None, nan_probability=None,
generation_kind="simple", attractiveness_distr="uniform",
unif_low=UNIF_LOW, unif_high=UNIF_HIGH):
self.check_init(n, connectedness)
self.n = n
self.connectedness = connectedness
self.nan_probability = nan_probability
self.generation_kind = generation_kind
self.attractiveness_distr = attractiveness_distr
self.unif_low = unif_low
self.unif_high = unif_high
self.create_love_matrix()
def __repr__(self):
return (f"Tindar problem with n={self.n}, connectedness= "
f"{self.connectedness}, p={self.p}")
@staticmethod
def ROMANCE_LEVEL_FN(attractiveness_level):
return (1/(attractiveness_level+1))**1.5 - 0.2
# Input validation
@classmethod
def check_init(self, n, connectedness):
# n
if not isinstance(n, int):
raise ValueError(f"TindarGenerator init error: "
f"type(n) = {type(n)}")
if n <= 0:
raise ValueError(f"TindarGenerator init error: "
f"n={n} < 0")
# connectedness
if not (isinstance(connectedness, (int, float)) or connectedness is None):
raise ValueError(f"TindarGenerator init error: "
f"type(connectedness) = {type(connectedness)}")
if connectedness is not None:
if not (self.MIN_CONNECTEDNESS <= connectedness <= self.MAX_CONNECTEDNESS):
raise ValueError(f"TindarGenerator init error: "
f"connectedness={connectedness} not between 1 and 10")
@classmethod
def bernouilli_parameter(self, connectedness):
diff_scaled = (connectedness-self.MIN_CONNECTEDNESS)/self.MAX_CONNECTEDNESS
return (diff_scaled*self.MAX_EDGE_PROB) + self.MIN_EDGE_PROB
@classmethod
def _create_interesting_love_values(self, n, attractiveness_distr=None, unif_low=None, unif_high=None):
# Sample attractiveness levels
nu = np.random.uniform(low=unif_low, high=unif_high, size=n)
nu[nu < 0] = 0
nu[nu > 1] = 1
# Calculate corresponding romance levels
mu = np.array([self.ROMANCE_LEVEL_FN(n) for n in nu])
mu[mu < 0] = 0
mu[mu > 1] = 1
# Compute love interests
mu_colvec = mu.reshape((-1, 1))
nu_rowvec = nu.reshape((1, -1))
love_values = np.dot(mu_colvec, nu_rowvec)
return love_values
@staticmethod
def _convert_love_values_to_binary(love_values):
love_values_scaled = (love_values - love_values.min())/(love_values.max() - love_values.min())
love_matrix = love_values_scaled.copy()
love_matrix[love_matrix > 0.5] = 1
love_matrix[love_matrix <= 0.5] = 0
return love_matrix
def create_love_matrix(self, n: int = None, connectedness: Optional[int] = None,
nan_probability: Optional[float] = None, inplace: Optional[bool] = True,
generation_kind: Optional[str] = None, attractiveness_distr: Optional[str] = None,
unif_low: Optional[float] = None, unif_high: Optional[float] = None):
# if parameters not specified, use instance attributes
if n is None:
n = self.n
if connectedness is None:
connectedness = self.connectedness
if nan_probability is None:
nan_probability = self.nan_probability
if generation_kind is None:
generation_kind = self.generation_kind
if attractiveness_distr is None:
attractiveness_distr = self.attractiveness_distr
if unif_low is None:
unif_low = self.unif_low
if unif_high is None:
unif_high = self.unif_high
# Based on bernouilli sampling
if generation_kind == "simple":
self.p = self.bernouilli_parameter(connectedness)
love_matrix = np.random.binomial(1, self.p, size=(n, n)).astype(float)
# See notebook 4 for explanation
elif generation_kind == "interesting":
if attractiveness_distr == "uniform":
love_values = self._create_interesting_love_values(
n, attractiveness_distr, unif_low, unif_high
)
else:
raise ValueError(f"attractiveness_distr {attractiveness_distr}"
" not implemented")
# Convert to binary interest
love_matrix = self._convert_love_values_to_binary(love_values)
else:
| |
"""_catalogue.py: RESQML parts (high level objects) catalogue functions."""
import logging
log = logging.getLogger(__name__)
import zipfile as zf
import resqpy.olio.uuid as bu
import resqpy.olio.xml_et as rqet
def _parts(model,
parts_list = None,
obj_type = None,
uuid = None,
title = None,
title_mode = 'is',
title_case_sensitive = False,
extra = {},
related_uuid = None,
epc_subdir = None,
sort_by = None):
"""Returns a list of parts matching all of the arguments passed."""
if not parts_list:
parts_list = _list_of_parts(model)
if uuid is not None:
part_name = model.uuid_part_dict.get(bu.uuid_as_int(uuid))
if part_name is None or part_name not in parts_list:
return []
parts_list = [part_name]
if epc_subdir:
parts_list = _filtered_by_epc_subdir(model, parts_list, epc_subdir)
if obj_type:
if obj_type[0].isupper():
obj_type = 'obj_' + obj_type
filtered_list = []
for part in parts_list:
if model.parts_forest[part][0] == obj_type:
filtered_list.append(part)
if len(filtered_list) == 0:
return []
parts_list = filtered_list
if title:
parts_list = _filtered_by_title(model, parts_list, title, title_mode, title_case_sensitive)
if extra:
parts_list = _filtered_by_extra(model, parts_list, extra)
if related_uuid is not None:
parts_list = _parts_list_filtered_by_related_uuid(model, parts_list, related_uuid)
if sort_by and len(parts_list):
parts_list = _sorted_parts_list(model, parts_list, sort_by)
return parts_list
def _part(model,
parts_list = None,
obj_type = None,
uuid = None,
title = None,
title_mode = 'is',
title_case_sensitive = False,
extra = {},
related_uuid = None,
epc_subdir = None,
multiple_handling = 'exception'):
"""Returns the name of a part matching all of the arguments passed."""
pl = _parts(model,
parts_list = parts_list,
obj_type = obj_type,
uuid = uuid,
title = title,
title_mode = title_mode,
title_case_sensitive = title_case_sensitive,
extra = extra,
related_uuid = related_uuid,
epc_subdir = epc_subdir)
if len(pl) == 0:
return None
if len(pl) == 1 or multiple_handling == 'first':
return pl[0]
if multiple_handling == 'none':
return None
elif multiple_handling in ['newest', 'oldest']:
sorted_list = _sort_parts_list_by_timestamp(model, pl)
if multiple_handling == 'newest':
return sorted_list[0]
return sorted_list[-1]
else:
raise ValueError('more than one part matches criteria')
def _uuids(model,
parts_list = None,
obj_type = None,
uuid = None,
title = None,
title_mode = 'is',
title_case_sensitive = False,
extra = {},
related_uuid = None,
epc_subdir = None,
sort_by = None):
"""Returns a list of uuids of parts matching all of the arguments passed."""
sort_by_uuid = (sort_by == 'uuid')
if sort_by_uuid:
sort_by = None
pl = _parts(model,
parts_list = parts_list,
obj_type = obj_type,
uuid = uuid,
title = title,
title_mode = title_mode,
title_case_sensitive = title_case_sensitive,
extra = extra,
related_uuid = related_uuid,
epc_subdir = epc_subdir,
sort_by = sort_by)
if len(pl) == 0:
return []
uuid_list = []
for part in pl:
uuid_list.append(_uuid_for_part(model, part))
if sort_by_uuid:
uuid_list.sort()
return uuid_list
def _uuid(model,
parts_list = None,
obj_type = None,
uuid = None,
title = None,
title_mode = 'is',
title_case_sensitive = False,
extra = {},
related_uuid = None,
epc_subdir = None,
multiple_handling = 'exception'):
"""Returns the uuid of a part matching all of the arguments passed."""
part = _part(model,
parts_list = parts_list,
obj_type = obj_type,
uuid = uuid,
title = title,
title_mode = title_mode,
title_case_sensitive = title_case_sensitive,
extra = extra,
related_uuid = related_uuid,
epc_subdir = epc_subdir,
multiple_handling = multiple_handling)
if part is None:
return None
return rqet.uuid_in_part_name(part)
def _roots(model,
parts_list = None,
obj_type = None,
uuid = None,
title = None,
title_mode = 'is',
title_case_sensitive = False,
extra = {},
related_uuid = None,
epc_subdir = None,
sort_by = None):
"""Returns a list of xml root nodes of parts matching all of the arguments passed."""
pl = _parts(model,
parts_list = parts_list,
obj_type = obj_type,
uuid = uuid,
title = title,
title_mode = title_mode,
title_case_sensitive = title_case_sensitive,
extra = extra,
related_uuid = related_uuid,
epc_subdir = epc_subdir,
sort_by = sort_by)
root_list = []
for part in pl:
root_list.append(_root_for_part(model, part))
return root_list
def _root(model,
parts_list = None,
obj_type = None,
uuid = None,
title = None,
title_mode = 'is',
title_case_sensitive = False,
extra = {},
related_uuid = None,
epc_subdir = None,
multiple_handling = 'exception'):
"""Returns the xml root node of a part matching all of the arguments passed."""
part = _part(model,
parts_list = parts_list,
obj_type = obj_type,
uuid = uuid,
title = title,
title_mode = title_mode,
title_case_sensitive = title_case_sensitive,
extra = extra,
related_uuid = related_uuid,
epc_subdir = epc_subdir,
multiple_handling = multiple_handling)
if part is None:
return None
return _root_for_part(model, part)
def _titles(model,
parts_list = None,
obj_type = None,
uuid = None,
title = None,
title_mode = 'is',
title_case_sensitive = False,
extra = {},
related_uuid = None,
epc_subdir = None,
sort_by = None):
"""Returns a list of citation titles of parts matching all of the arguments passed."""
pl = _parts(model,
parts_list = parts_list,
obj_type = obj_type,
uuid = uuid,
title = title,
title_mode = title_mode,
title_case_sensitive = title_case_sensitive,
extra = extra,
related_uuid = related_uuid,
epc_subdir = epc_subdir,
sort_by = sort_by)
title_list = []
for part in pl:
title_list.append(_citation_title_for_part(model, part))
return title_list
def _title(model,
parts_list = None,
obj_type = None,
uuid = None,
title = None,
title_mode = 'is',
title_case_sensitive = False,
extra = {},
related_uuid = None,
epc_subdir = None,
multiple_handling = 'exception'):
"""Returns the citation title of a part matching all of the arguments passed."""
part = _part(model,
parts_list = parts_list,
obj_type = obj_type,
uuid = uuid,
title = title,
title_mode = title_mode,
title_case_sensitive = title_case_sensitive,
extra = extra,
related_uuid = related_uuid,
epc_subdir = epc_subdir,
multiple_handling = multiple_handling)
if part is None:
return None
return _citation_title_for_part(model, part)
def _parts_list_of_type(model, type_of_interest = None, uuid = None):
"""Returns a list of part names for parts of type of interest, optionally matching a uuid."""
if type_of_interest and type_of_interest[0].isupper():
type_of_interest = 'obj_' + type_of_interest
if uuid is not None:
part_name = model.uuid_part_dict.get(bu.uuid_as_int(uuid))
if part_name is None or (type_of_interest is not None and
(model.parts_forest[part_name][0] != type_of_interest)):
return []
return [part_name]
parts_list = []
for part_name in model.parts_forest:
if type_of_interest is None or model.parts_forest[part_name][0] == type_of_interest:
parts_list.append(part_name)
return parts_list
def _list_of_parts(model, only_objects = True):
"""Return a complete list of parts."""
pl = list(model.parts_forest.keys())
if not only_objects:
return pl
obj_list = []
for part in pl:
dir_place = part.rfind('/')
dir_free_part = part[dir_place + 1:]
if dir_free_part.startswith('obj_') and not dir_free_part.startswith('obj_Epc'):
obj_list.append(part)
return obj_list
def _number_of_parts(model):
"""Retuns the number of parts in the model, including external parts such as the link to an hdf5 file."""
return len(model.parts_forest)
def _part_for_uuid(model, uuid):
"""Returns the part name which has the given uuid."""
return model.uuid_part_dict.get(bu.uuid_as_int(uuid))
def _root_for_uuid(model, uuid):
"""Returns the xml root for the part which has the given uuid."""
return _root_for_part(model, _part_for_uuid(model, uuid))
def _parts_count_by_type(model, type_of_interest = None):
"""Returns a sorted list of (type, count) for parts."""
# note: resqml classes start with 'obj_' whilst witsml classes don't!
if type_of_interest and type_of_interest.startswith('obj_'):
type_of_interest = type_of_interest[4:]
type_list = []
for part_name in model.parts_forest:
part_type = model.parts_forest[part_name][0]
if part_type is None:
continue
if part_type.startswith('obj_'):
part_type = part_type[4:]
if type_of_interest is None or part_type == type_of_interest:
type_list.append(part_type)
type_list.sort()
type_list.append('END') # simplifies termination of scan below
result_list = []
count = 0
current_type = ''
for index in range(len(type_list)):
if type_list[index] != current_type:
if count:
result_list.append((current_type, count))
current_type = type_list[index]
count = 0
count += 1
return result_list
def _parts_list_filtered_by_related_uuid(model, parts_list, uuid, uuid_is_source = None):
"""From a list of parts, returns a list of those parts which have a relationship with the given uuid."""
if not model.rels_present or parts_list is None or uuid is None:
return None
filtered_list = []
this_part = _part_for_uuid(model, uuid)
if this_part is not None:
rels_part_root = _root_for_part(model, rqet.rels_part_name_for_part(this_part), is_rels = True)
if rels_part_root is not None:
for relation_node in rels_part_root:
if rqet.stripped_of_prefix(relation_node.tag) != 'Relationship':
continue
target_part = relation_node.attrib['Target']
if target_part not in parts_list:
continue
if uuid_is_source is not None:
source_dest = relation_node.attrib['Type']
if uuid_is_source:
if 'source' not in source_dest:
continue
else:
if 'source' in source_dest:
continue
filtered_list.append(target_part)
for part in parts_list:
if part in filtered_list:
continue
rels_part_root = _root_for_part(model, rqet.rels_part_name_for_part(part), is_rels = True)
if rels_part_root is None:
continue
for relation_node in rels_part_root:
if rqet.stripped_of_prefix(relation_node.tag) != 'Relationship':
continue
target_part = relation_node.attrib['Target']
relation_uuid = rqet.uuid_in_part_name(target_part)
if bu.matching_uuids(uuid, relation_uuid):
if uuid_is_source is not None:
source_dest = relation_node.attrib['Type']
if uuid_is_source:
if 'source' in source_dest:
continue # relation is source, so uuid is not
else:
if 'source' not in source_dest:
continue # relation is not source, so uuid is
filtered_list.append(part)
break
return filtered_list
def | |
import sha
import time
from patch_tool import *
def add_assets(asset_tree):
asset_tree.add_ignore("mv.patch")
asset_tree.add_ignore("mv.patch.cur")
# Directories for main assets
asset_tree.add_asset_path("Base", "Fonts")
asset_tree.add_asset_path("Base", "GpuPrograms")
asset_tree.add_asset_path("Base", "Icons")
asset_tree.add_asset_path("Base", "Imagefiles")
asset_tree.add_asset_path("Base", "Interface/FrameXML")
asset_tree.add_asset_path("Base", "Interface/Imagesets")
asset_tree.add_asset_path("Base", "Materials")
asset_tree.add_asset_path("Base", "Meshes")
asset_tree.add_asset_path("Base", "Misc")
asset_tree.add_asset_path("Base", "Particles")
asset_tree.add_asset_path("Base", "Physics")
asset_tree.add_asset_path("Base", "Scripts")
asset_tree.add_asset_path("Base", "Skeletons")
asset_tree.add_asset_path("Base", "Sounds")
asset_tree.add_asset_path("Base", "SpeedTree")
asset_tree.add_asset_path("Base", "Textures")
# Directories for addon (local) assets
asset_tree.add_asset_path("Base", "AddOns/Fonts")
asset_tree.add_asset_path("Base", "AddOns/GpuPrograms")
asset_tree.add_asset_path("Base", "AddOns/Icons")
asset_tree.add_asset_path("Base", "AddOns/Imagefiles")
asset_tree.add_asset_path("Base", "AddOns/Interface/FrameXML")
asset_tree.add_asset_path("Base", "AddOns/Interface/Imagesets")
asset_tree.add_asset_path("Base", "AddOns/Materials")
asset_tree.add_asset_path("Base", "AddOns/Meshes")
asset_tree.add_asset_path("Base", "AddOns/Misc")
asset_tree.add_asset_path("Base", "AddOns/Particles")
asset_tree.add_asset_path("Base", "AddOns/Physics")
asset_tree.add_asset_path("Base", "AddOns/Scripts")
asset_tree.add_asset_path("Base", "AddOns/Skeletons")
asset_tree.add_asset_path("Base", "AddOns/Sounds")
asset_tree.add_asset_path("Base", "AddOns/SpeedTree")
asset_tree.add_asset_path("Base", "AddOns/Textures")
asset_tree.add_ignore("AddOns/Fonts/.*")
asset_tree.add_ignore("AddOns/GpuPrograms/.*")
asset_tree.add_ignore("AddOns/Icons/.*")
asset_tree.add_ignore("AddOns/Imagefiles/.*")
asset_tree.add_ignore("AddOns/Interface/FrameXML/.*")
asset_tree.add_ignore("AddOns/Interface/Imagesets/.*")
asset_tree.add_ignore("AddOns/Materials/.*")
asset_tree.add_ignore("AddOns/Meshes/.*")
asset_tree.add_ignore("AddOns/Misc/.*")
asset_tree.add_ignore("AddOns/Physics/.*")
asset_tree.add_ignore("AddOns/Scripts/.*")
asset_tree.add_ignore("AddOns/Skeletons/.*")
asset_tree.add_ignore("AddOns/Sounds/.*")
asset_tree.add_ignore("AddOns/SpeedTree/.*")
asset_tree.add_ignore("AddOns/Textures/.*")
asset_tree.add_asset_path("Base", "Scripts/Standalone.py")
# client gpu programs
asset_tree.add_asset_path("Base", "GpuPrograms/DetailVeg.cg")
asset_tree.add_asset_path("Base", "GpuPrograms/DiffuseBump.cg")
asset_tree.add_asset_path("Base", "GpuPrograms/Compound_Basic_Skinned.cg")
asset_tree.add_asset_path("Base", "GpuPrograms/Multiverse.program")
# common to the tools package
asset_tree.add_asset_path("Base", "GpuPrograms/Ocean.cg")
asset_tree.add_asset_path("Base", "GpuPrograms/Terrain.cg")
asset_tree.add_asset_path("Base", "GpuPrograms/Trees.cg")
# end common
# physics meshes
asset_tree.add_asset_path("Base", "Meshes/unit_box.mesh")
asset_tree.add_asset_path("Base", "Meshes/unit_cylinder.mesh")
asset_tree.add_asset_path("Base", "Meshes/unit_sphere.mesh")
# end physics
# tool mesh
# end tool
# non-physics and non-tool meshes
asset_tree.add_asset_path("Base", "Meshes/bang.mesh")
asset_tree.add_asset_path("Base", "Meshes/tiny_cube.mesh")
# end non-physics and non-tool meshes
# key materials
asset_tree.add_asset_path("Base", "Materials/bang.material")
asset_tree.add_asset_path("Base", "Materials/DetailVeg.material")
asset_tree.add_asset_path("Base", "Materials/MVSMTerrain.material")
asset_tree.add_asset_path("Base", "Materials/Multiverse.material")
asset_tree.add_asset_path("Base", "Materials/Ocean.material")
asset_tree.add_asset_path("Base", "Materials/tiny_cube.material")
asset_tree.add_asset_path("Base", "Materials/Trees.material")
asset_tree.add_asset_path("Base", "Materials/Water.material")
# end key materials
# physics
asset_tree.add_asset_path("Base", "Materials/unit_box.material")
asset_tree.add_asset_path("Base", "Materials/unit_cylinder.material")
asset_tree.add_asset_path("Base", "Materials/unit_sphere.material")
# end physics
# base sound files
#asset_tree.add_asset_path("Base", "Sounds/ambient_hills_day.ogg")
#asset_tree.add_asset_path("Base", "Sounds/human_run_grass.ogg")
# key textures
asset_tree.add_asset_path("Base", "Textures/DetailVegAtlas.dds")
# imageset for the DetailVegAtlas
asset_tree.add_asset_path("Base", "Textures/DetailVeg.imageset")
asset_tree.add_asset_path("Base", "Textures/MultiverseImageset.png")
asset_tree.add_asset_path("Base", "Textures/loadscreen.dds")
asset_tree.add_asset_path("Base", "Textures/mv_skybox0001.dds")
asset_tree.add_asset_path("Base", "Textures/mv_skybox0002.dds")
asset_tree.add_asset_path("Base", "Textures/mv_skybox0003.dds")
asset_tree.add_asset_path("Base", "Textures/mv_skybox0004.dds")
asset_tree.add_asset_path("Base", "Textures/mv_skybox0005.dds")
asset_tree.add_asset_path("Base", "Textures/mv_skybox0006.dds")
asset_tree.add_asset_path("Base", "Textures/noon_j_back.dds")
asset_tree.add_asset_path("Base", "Textures/noon_j_front.dds")
asset_tree.add_asset_path("Base", "Textures/noon_j_left.dds")
asset_tree.add_asset_path("Base", "Textures/noon_j_right.dds")
asset_tree.add_asset_path("Base", "Textures/noon_j_top.dds")
asset_tree.add_asset_path("Base", "Textures/sandy_path.dds")
asset_tree.add_asset_path("Base", "Textures/splatting_grass.dds")
asset_tree.add_asset_path("Base", "Textures/splatting_rock.dds")
asset_tree.add_asset_path("Base", "Textures/splatting_sand.dds")
asset_tree.add_asset_path("Base", "Textures/splatting_snow.dds")
asset_tree.add_asset_path("Base", "Textures/Water02.dds")
asset_tree.add_asset_path("Base", "Textures/waves.dds")
asset_tree.add_asset_path("Base", "Textures/White.dds")
#end key textures
# SpeedTree textures
# common to the tools package
asset_tree.add_asset_path("Base", "Textures/AmericanBoxwood_Composite.dds")
asset_tree.add_asset_path("Base", "Textures/AmericanBoxwoodCluster_Composite.dds")
asset_tree.add_asset_path("Base", "Textures/AppleTree_SelfShadow.dds")
asset_tree.add_asset_path("Base", "Textures/AppleTreeBark.dds")
asset_tree.add_asset_path("Base", "Textures/AppleTreeBarkNormals.dds")
asset_tree.add_asset_path("Base", "Textures/Azalea_Composite.dds")
asset_tree.add_asset_path("Base", "Textures/AzaleaPatch_Composite.dds")
asset_tree.add_asset_path("Base", "Textures/AzaleaPatchPink_Composite.dds")
asset_tree.add_asset_path("Base", "Textures/AzaleaPink_Composite.dds")
asset_tree.add_asset_path("Base", "Textures/Beech_Composite.dds")
asset_tree.add_asset_path("Base", "Textures/Beech_SelfShadow.dds")
asset_tree.add_asset_path("Base", "Textures/BeechBark.dds")
asset_tree.add_asset_path("Base", "Textures/BeechBarkNormals.dds")
asset_tree.add_asset_path("Base", "Textures/BeechFall_Composite.dds")
asset_tree.add_asset_path("Base", "Textures/BeechWinter_Composite.dds")
asset_tree.add_asset_path("Base", "Textures/CurlyPalm_Composite.dds")
asset_tree.add_asset_path("Base", "Textures/CurlyPalm_SelfShadow.dds")
asset_tree.add_asset_path("Base", "Textures/CurlyPalmBark.dds")
asset_tree.add_asset_path("Base", "Textures/CurlyPalmBarkNormals.dds")
asset_tree.add_asset_path("Base", "Textures/CurlyPalmCluster_Composite.dds")
asset_tree.add_asset_path("Base", "Textures/FraserFir_Composite.dds")
asset_tree.add_asset_path("Base", "Textures/FraserFir_SelfShadow.dds")
asset_tree.add_asset_path("Base", "Textures/FraserFirBark.dds")
asset_tree.add_asset_path("Base", "Textures/FraserFirBarkNormals.dds")
asset_tree.add_asset_path("Base", "Textures/FraserFirCluster_Composite.dds")
asset_tree.add_asset_path("Base", "Textures/FraserFirCluster_SelfShadow.dds")
asset_tree.add_asset_path("Base", "Textures/FraserFirClusterSnow_Composite.dds")
asset_tree.add_asset_path("Base", "Textures/FraserFirSnow_Composite.dds")
asset_tree.add_asset_path("Base", "Textures/RDApple_Composite.dds")
asset_tree.add_asset_path("Base", "Textures/RDAppleApples_Composite.dds")
asset_tree.add_asset_path("Base", "Textures/RDAppleSpring_Composite.dds")
asset_tree.add_asset_path("Base", "Textures/RDAppleWinter_Composite.dds")
asset_tree.add_asset_path("Base", "Textures/SugarPine_Composite.dds")
asset_tree.add_asset_path("Base", "Textures/SugarPine_SelfShadow.dds")
asset_tree.add_asset_path("Base", "Textures/SugarPineBark.dds")
asset_tree.add_asset_path("Base", "Textures/SugarPineBarkNormals.dds")
asset_tree.add_asset_path("Base", "Textures/SugarPineWinter_Composite.dds")
asset_tree.add_asset_path("Base", "Textures/UmbrellaThorn_Composite.dds")
asset_tree.add_asset_path("Base", "Textures/UmbrellaThorn_SelfShadow.dds")
asset_tree.add_asset_path("Base", "Textures/UmbrellaThornBark.dds")
asset_tree.add_asset_path("Base", "Textures/UmbrellaThornBarkNormals.dds")
asset_tree.add_asset_path("Base", "Textures/UmbrellaThornDead_Composite.dds")
asset_tree.add_asset_path("Base", "Textures/UmbrellaThornFlowers_Composite.dds")
asset_tree.add_asset_path("Base", "Textures/VenusTree_Composite.dds")
asset_tree.add_asset_path("Base", "Textures/VenusTree_SelfShadow.dds")
asset_tree.add_asset_path("Base", "Textures/VenusTreeBark.dds")
asset_tree.add_asset_path("Base", "Textures/VenusTreeBarkNormals.dds")
asset_tree.add_asset_path("Base", "Textures/WeepingWillow_Composite.dds")
asset_tree.add_asset_path("Base", "Textures/WeepingWillow_SelfShadow.dds")
asset_tree.add_asset_path("Base", "Textures/WeepingWillowBark.dds")
asset_tree.add_asset_path("Base", "Textures/WeepingWillowBarkNormals.dds")
asset_tree.add_asset_path("Base", "Textures/WeepingWillowFall_Composite.dds")
asset_tree.add_asset_path("Base", "Textures/WeepingWillowWinter_Composite.dds")
# end common
# end speedtree textures
# SpeedTree trees
# common to the tools package
asset_tree.add_asset_path("Base", "SpeedTree/AmericanBoxwood_RT.spt")
asset_tree.add_asset_path("Base", "SpeedTree/AmericanBoxwoodCluster_RT.spt")
asset_tree.add_asset_path("Base", "SpeedTree/Azalea_RT.spt")
asset_tree.add_asset_path("Base", "SpeedTree/Azalea_RT_Pink.spt")
asset_tree.add_asset_path("Base", "SpeedTree/AzaleaPatch_RT.spt")
asset_tree.add_asset_path("Base", "SpeedTree/AzaleaPatch_RT_Pink.spt")
asset_tree.add_asset_path("Base", "SpeedTree/Beech_RT.spt")
asset_tree.add_asset_path("Base", "SpeedTree/Beech_RT_Fall.spt")
asset_tree.add_asset_path("Base", "SpeedTree/Beech_RT_Winter.spt")
asset_tree.add_asset_path("Base", "SpeedTree/CurlyPalm_RT.spt")
asset_tree.add_asset_path("Base", "SpeedTree/CurlyPalmCluster_RT.spt")
asset_tree.add_asset_path("Base", "SpeedTree/FraserFir_RT.spt")
asset_tree.add_asset_path("Base", "SpeedTree/FraserFir_RT_Snow.spt")
asset_tree.add_asset_path("Base", "SpeedTree/FraserFirCluster_RT.spt")
asset_tree.add_asset_path("Base", "SpeedTree/FraserFirCluster_RT_Snow.spt")
asset_tree.add_asset_path("Base", "SpeedTree/RDApple_RT.spt")
asset_tree.add_asset_path("Base", "SpeedTree/RDApple_RT_Apples.spt")
asset_tree.add_asset_path("Base", "SpeedTree/RDApple_RT_Spring.spt")
asset_tree.add_asset_path("Base", "SpeedTree/RDApple_RT_Winter.spt")
asset_tree.add_asset_path("Base", "SpeedTree/SugarPine_RT.spt")
asset_tree.add_asset_path("Base", "SpeedTree/SugarPine_RT_Winter.spt")
asset_tree.add_asset_path("Base", "SpeedTree/UmbrellaThorn_RT.spt")
asset_tree.add_asset_path("Base", "SpeedTree/UmbrellaThorn_RT_Dead.spt")
asset_tree.add_asset_path("Base", "SpeedTree/UmbrellaThorn_RT_Flowers.spt")
asset_tree.add_asset_path("Base", "SpeedTree/VenusTree_RT.spt")
asset_tree.add_asset_path("Base", "SpeedTree/WeepingWillow_RT.spt")
asset_tree.add_asset_path("Base", "SpeedTree/WeepingWillow_RT_Fall.spt")
asset_tree.add_asset_path("Base", "SpeedTree/WeepingWillow_RT_Winter.spt")
asset_tree.add_asset_path("Base", "SpeedTree/demoWind.ini")
# end common
# end trees
# Interface
asset_tree.add_asset_path("Base", "Fonts/MUFN____.TTF")
asset_tree.add_asset_path("Base", "Imagefiles/Buttons.tga")
asset_tree.add_asset_path("Base", "Imagefiles/CharacterFrame.tga")
asset_tree.add_asset_path("Base", "Imagefiles/ChatFrame.tga")
asset_tree.add_asset_path("Base", "Imagefiles/Common.tga")
asset_tree.add_asset_path("Base", "Imagefiles/ContainerFrame.tga")
asset_tree.add_asset_path("Base", "Imagefiles/Cursor.tga")
asset_tree.add_asset_path("Base", "Imagefiles/DialogFrame.tga")
asset_tree.add_asset_path("Base", "Imagefiles/Icons.tga")
asset_tree.add_asset_path("Base", "Imagefiles/MvButtons.tga")
asset_tree.add_asset_path("Base", "Imagefiles/MvChatFrame.tga")
asset_tree.add_asset_path("Base", "Imagefiles/MvQuestFrame.tga")
asset_tree.add_asset_path("Base", "Imagefiles/PaperDollInfoFrame.tga")
asset_tree.add_asset_path("Base", "Imagefiles/QuestFrame.tga")
asset_tree.add_asset_path("Base", "Imagefiles/TargetingFrame.tga")
asset_tree.add_asset_path("Base", "Imagefiles/Tooltips.tga")
asset_tree.add_asset_path("Base", "Interface/FrameXML/Library.py")
asset_tree.add_asset_path("Base", "Interface/FrameXML/MvActionBar.py")
asset_tree.add_asset_path("Base", "Interface/FrameXML/MvActionBar.xml")
asset_tree.add_asset_path("Base", "Interface/FrameXML/MvActionButton.xml")
asset_tree.add_asset_path("Base", "Interface/FrameXML/MvCharacter.py")
asset_tree.add_asset_path("Base", "Interface/FrameXML/MvCharacter.xml")
asset_tree.add_asset_path("Base", "Interface/FrameXML/MvChat.py")
asset_tree.add_asset_path("Base", "Interface/FrameXML/MvChat.xml")
asset_tree.add_asset_path("Base", "Interface/FrameXML/MvContainer.py")
asset_tree.add_asset_path("Base", "Interface/FrameXML/MvContainer.xml")
asset_tree.add_asset_path("Base", "Interface/FrameXML/MvDialog.py")
asset_tree.add_asset_path("Base", "Interface/FrameXML/MvDialog.xml")
asset_tree.add_asset_path("Base", "Interface/FrameXML/MvFonts.xml")
asset_tree.add_asset_path("Base", "Interface/FrameXML/MvFrame.xml")
asset_tree.add_asset_path("Base", "Interface/FrameXML/MvItemButton.xml")
asset_tree.add_asset_path("Base", "Interface/FrameXML/MvPlayer.py")
asset_tree.add_asset_path("Base", "Interface/FrameXML/MvPlayer.xml")
asset_tree.add_asset_path("Base", "Interface/FrameXML/MvSocialBar.py")
asset_tree.add_asset_path("Base", "Interface/FrameXML/MvSocialBar.xml")
asset_tree.add_asset_path("Base", "Interface/FrameXML/MvStatus.py")
asset_tree.add_asset_path("Base", "Interface/FrameXML/MvStatus.xml")
asset_tree.add_asset_path("Base", "Interface/FrameXML/MvTarget.py")
asset_tree.add_asset_path("Base", "Interface/FrameXML/MvTarget.xml")
asset_tree.add_asset_path("Base", "Interface/FrameXML/MvTooltip.xml")
asset_tree.add_asset_path("Base", "Interface/FrameXML/MvUnit.py")
asset_tree.add_asset_path("Base", "Interface/FrameXML/MvUnit.xml")
asset_tree.add_asset_path("Base", "Interface/FrameXML/basic.toc")
asset_tree.add_asset_path("Base", "Interface/FrameXML/social.toc")
asset_tree.add_asset_path("Base", "Interface/FrameXML/betaworld.toc")
asset_tree.add_asset_path("Base", "Interface/Imagesets/Buttons.xml")
asset_tree.add_asset_path("Base", "Interface/Imagesets/CharacterFrame.xml")
asset_tree.add_asset_path("Base", "Interface/Imagesets/ChatFrame.xml")
asset_tree.add_asset_path("Base", "Interface/Imagesets/Common.xml")
asset_tree.add_asset_path("Base", "Interface/Imagesets/ContainerFrame.xml")
asset_tree.add_asset_path("Base", "Interface/Imagesets/Cursor.xml")
asset_tree.add_asset_path("Base", "Interface/Imagesets/DialogFrame.xml")
asset_tree.add_asset_path("Base", "Interface/Imagesets/Icons.xml")
asset_tree.add_asset_path("Base", "Interface/Imagesets/MvButtons.xml")
asset_tree.add_asset_path("Base", "Interface/Imagesets/MvChat.xml")
asset_tree.add_asset_path("Base", "Interface/Imagesets/MvQuestFrame.xml")
asset_tree.add_asset_path("Base", "Interface/Imagesets/PaperDollInfoFrame.xml")
asset_tree.add_asset_path("Base", "Interface/Imagesets/QuestFrame.xml")
asset_tree.add_asset_path("Base", "Interface/Imagesets/TargetingFrame.xml")
asset_tree.add_asset_path("Base", "Interface/Imagesets/Tooltips.xml")
# Misc.
asset_tree.add_asset_path("Base", "Icons/AxiomIcon.ico")
# Mars files
asset_tree.add_asset_path("Mars", "Interface/FrameXML/mars.toc")
asset_tree.add_asset_path("Mars", "Interface/FrameXML/MarsPlayer.py")
asset_tree.add_asset_path("Mars", "Interface/FrameXML/MarsPlayer.xml")
asset_tree.add_asset_path("Mars", "Interface/FrameXML/MarsTarget.py")
asset_tree.add_asset_path("Mars", "Interface/FrameXML/MarsTarget.xml")
asset_tree.add_asset_path("Mars", "Interface/FrameXML/MarsUnit.py")
asset_tree.add_asset_path("Mars", "Interface/FrameXML/MarsUnit.xml")
asset_tree.add_asset_path("Particles", "Materials/aureola.material")
asset_tree.add_asset_path("Particles", "Materials/bigflame.material")
asset_tree.add_asset_path("Particles", "Materials/droplet.material")
asset_tree.add_asset_path("Particles", "Materials/explosion.material")
asset_tree.add_asset_path("Particles", "Materials/flare.material")
asset_tree.add_asset_path("Particles", "Materials/flare2.material")
asset_tree.add_asset_path("Particles", "Materials/flaretrail.material")
asset_tree.add_asset_path("Particles", "Materials/lensflare.material")
asset_tree.add_asset_path("Particles", "Materials/rain.material")
asset_tree.add_asset_path("Particles", "Materials/ringflare.material")
asset_tree.add_asset_path("Particles", "Materials/ringflare2.material")
asset_tree.add_asset_path("Particles", "Materials/smoke.material")
asset_tree.add_asset_path("Particles", "Materials/smoke2.material")
asset_tree.add_asset_path("Particles", "Materials/streak.material")
asset_tree.add_asset_path("Particles", "Materials/waterfountain.material")
asset_tree.add_asset_path("Particles", "Particles/aureola.particle");
asset_tree.add_asset_path("Particles", "Particles/blast.particle");
asset_tree.add_asset_path("Particles", "Particles/blast2.particle");
asset_tree.add_asset_path("Particles", "Particles/downpour.particle");
asset_tree.add_asset_path("Particles", "Particles/eruption.particle");
asset_tree.add_asset_path("Particles", "Particles/flame.particle");
asset_tree.add_asset_path("Particles", "Particles/floatyGreeny.particle");
asset_tree.add_asset_path("Particles", "Particles/fountain.particle");
asset_tree.add_asset_path("Particles", "Particles/greenyNimbus.particle");
asset_tree.add_asset_path("Particles", "Particles/jetengine1.particle");
asset_tree.add_asset_path("Particles", "Particles/jetengine2.particle");
asset_tree.add_asset_path("Particles", "Particles/pentagram.particle");
asset_tree.add_asset_path("Particles", "Particles/purpleFountain.particle");
asset_tree.add_asset_path("Particles", "Particles/rain.particle");
asset_tree.add_asset_path("Particles", "Particles/ringOfFire.particle");
asset_tree.add_asset_path("Particles", "Particles/smoke.particle");
asset_tree.add_asset_path("Particles", "Particles/smoke2.particle");
asset_tree.add_asset_path("Particles", "Particles/space.particle");
asset_tree.add_asset_path("Particles", "Particles/waterfall.particle");
asset_tree.add_asset_path("Particles", "Particles/waterfountain.particle");
asset_tree.add_asset_path("Particles", "Textures/aureola.dds")
asset_tree.add_asset_path("Particles", "Textures/basic_droplet.dds")
asset_tree.add_asset_path("Particles", "Textures/bigflame.png")
asset_tree.add_asset_path("Particles", "Textures/blue_flare.dds")
asset_tree.add_asset_path("Particles", "Textures/blue_flare.jpg")
asset_tree.add_asset_path("Particles", "Textures/explosion.dds")
asset_tree.add_asset_path("Particles", "Textures/Flare.dds")
asset_tree.add_asset_path("Particles", "Textures/Flare.jpg")
asset_tree.add_asset_path("Particles", "Textures/flaretrail.dds")
asset_tree.add_asset_path("Particles", "Textures/ring_flare.dds")
asset_tree.add_asset_path("Particles", "Textures/ring_flare2.dds")
asset_tree.add_asset_path("Particles", "Textures/smoke.dds")
asset_tree.add_asset_path("Particles", "Textures/smoke.dds")
asset_tree.add_asset_path("Particles", "Textures/smokecolors.dds")
asset_tree.add_asset_path("Particles", "Textures/smokecolors.dds")
asset_tree.add_asset_path("Particles", "Textures/streak.dds")
# Social World Assets
asset_tree.add_asset_path("SocialWorld", "Imagefiles/ActionbarIcons.dds")
asset_tree.add_asset_path("SocialWorld", "Interface/Imagesets/ActionbarIcons.xml")
asset_tree.add_asset_path("SocialWorld", "Materials/bench.material")
asset_tree.add_asset_path("SocialWorld", "Materials/cobblestone.material")
asset_tree.add_asset_path("SocialWorld", "Materials/cobblestone_marble.material")
asset_tree.add_asset_path("SocialWorld", "Materials/colonnade.material")
asset_tree.add_asset_path("SocialWorld", "Materials/gazebo_green.material")
asset_tree.add_asset_path("SocialWorld", "Materials/gazebo_sand.material")
asset_tree.add_asset_path("SocialWorld", "Materials/pool.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_f_cdance_drk.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_f_cdance_drk_full.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_f_cdance_lt.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_f_cdance_lt_full.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_f_dagger_drk.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_f_dagger_drk_full.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_f_dagger_lt.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_f_dagger_lt_full.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_f_dance_drk.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_f_dance_drk_full.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_f_dance_drk_shade_full.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_f_dance_lt.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_f_dance_lt_full.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_f_kneel_drk.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_f_kneel_drk_full.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_f_kneel_lt.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_f_kneel_lt_full.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_f_lean_drk.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_f_lean_drk_full.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_f_lean_lt.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_f_lean_lt_full.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_f_sword_drk.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_f_sword_drk_full.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_f_sword_lt.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_f_sword_lt_full.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_m_lean_drk.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_m_lean_drk_full.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_m_lean_lt.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_m_lean_lt_full.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_m_throw_drk.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_m_throw_drk_full.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_m_throw_lt.material")
asset_tree.add_asset_path("SocialWorld", "Materials/st_m_throw_lt_full.material")
asset_tree.add_asset_path("SocialWorld", "Materials/stage.material")
asset_tree.add_asset_path("SocialWorld", "Materials/stb_f_cdance_drk.material")
asset_tree.add_asset_path("SocialWorld", "Materials/stb_f_cdance_lt.material")
asset_tree.add_asset_path("SocialWorld", "Materials/stb_f_dagger_drk.material")
asset_tree.add_asset_path("SocialWorld", "Materials/stb_f_dagger_lt.material")
asset_tree.add_asset_path("SocialWorld", "Materials/stb_f_dance_drk.material")
asset_tree.add_asset_path("SocialWorld", "Materials/stb_f_dance_lt.material")
asset_tree.add_asset_path("SocialWorld", "Materials/stb_f_kneel_drk.material")
asset_tree.add_asset_path("SocialWorld", "Materials/stb_f_kneel_lt.material")
asset_tree.add_asset_path("SocialWorld", "Materials/stb_f_lean_drk.material")
asset_tree.add_asset_path("SocialWorld", "Materials/stb_f_lean_lt.material")
asset_tree.add_asset_path("SocialWorld", "Materials/stb_f_sword_drk.material")
asset_tree.add_asset_path("SocialWorld", "Materials/stb_f_sword_lt.material")
asset_tree.add_asset_path("SocialWorld", "Materials/stb_m_lean_drk.material")
asset_tree.add_asset_path("SocialWorld", "Materials/stb_m_lean_lt.material")
asset_tree.add_asset_path("SocialWorld", "Materials/stb_m_throw_drk.material")
asset_tree.add_asset_path("SocialWorld", "Materials/stb_m_throw_lt.material")
asset_tree.add_asset_path("SocialWorld", "Materials/sword.material")
asset_tree.add_asset_path("SocialWorld", "Materials/water_trim_corner_sqr.material")
asset_tree.add_asset_path("SocialWorld", "Materials/water_trim_corner_tri_eq.material")
asset_tree.add_asset_path("SocialWorld", "Materials/water_trim_corner_tri_odd.material")
asset_tree.add_asset_path("SocialWorld", "Materials/water_trim_long_block.material")
asset_tree.add_asset_path("SocialWorld", "Materials/water_trim_octagon.material")
asset_tree.add_asset_path("SocialWorld", "Meshes/bench.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/bench_01.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/cobblestone.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/cobblestone_marble.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/colonnade.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/gazebo_green.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/gazebo_sand.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/pool.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_f_cdance_drk.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_f_cdance_drk_full.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_f_cdance_lt.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_f_cdance_lt_full.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_f_dagger_drk.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_f_dagger_drk_full.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_f_dagger_lt.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_f_dagger_lt_full.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_f_dance_drk.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_f_dance_drk_full.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_f_dance_drk_shade_full.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_f_dance_lt.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_f_dance_lt_full.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_f_kneel_drk.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_f_kneel_drk_full.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_f_kneel_lt.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_f_kneel_lt_full.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_f_lean_drk.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_f_lean_drk_full.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_f_lean_lt.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_f_lean_lt_full.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_f_sword_drk.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_f_sword_drk_full.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_f_sword_lt.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_f_sword_lt_full.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_m_lean_drk.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_m_lean_drk_full.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_m_lean_lt.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_m_lean_lt_full.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_m_throw_drk.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_m_throw_drk_full.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_m_throw_lt.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/st_m_throw_lt_full.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/stage.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/stb_f_cdance_drk.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/stb_f_cdance_lt.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/stb_f_dagger_drk.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/stb_f_dagger_lt.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/stb_f_dance_drk.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/stb_f_dance_lt.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/stb_f_kneel_drk.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/stb_f_kneel_lt.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/stb_f_lean_drk.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/stb_f_lean_lt.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/stb_f_sword_drk.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/stb_f_sword_lt.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/stb_m_lean_drk.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/stb_m_lean_lt.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/stb_m_throw_drk.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/stb_m_throw_lt.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/sword.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/water_trim_corner_sqr.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/water_trim_corner_tri_eq.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/water_trim_corner_tri_odd.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/water_trim_long_block.mesh")
asset_tree.add_asset_path("SocialWorld", "Meshes/water_trim_octagon.mesh")
asset_tree.add_asset_path("SocialWorld", "Misc/SocialWorld_AssetConfig.xml", "AssetConfig.xml")
asset_tree.add_asset_path("SocialWorld", "Materials/waterfountain.material")
asset_tree.add_asset_path("SocialWorld", "Particles/waterfountain.particle");
asset_tree.add_asset_path("SocialWorld", "Textures/Flare.dds")
asset_tree.add_asset_path("SocialWorld", "Physics/bench.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/cobblestone.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/cobblestone_marble.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/colonnade.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/gazebo_green.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/gazebo_sand.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/pool.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_f_cdance_drk.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_f_cdance_drk_full.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_f_cdance_lt.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_f_cdance_lt_full.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_f_dagger_drk.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_f_dagger_drk_full.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_f_dagger_lt.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_f_dagger_lt_full.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_f_dance_drk.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_f_dance_drk_full.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_f_dance_lt.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_f_dance_lt_full.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_f_kneel_drk.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_f_kneel_drk_full.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_f_kneel_lt.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_f_kneel_lt_full.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_f_lean_drk.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_f_lean_drk_full.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_f_lean_lt.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_f_lean_lt_full.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_f_sword_drk.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_f_sword_drk_full.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_f_sword_lt.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_f_sword_lt_full.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_m_lean_drk.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_m_lean_drk_full.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_m_lean_lt.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_m_lean_lt_full.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_m_throw_drk.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_m_throw_drk_full.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_m_throw_lt.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/st_m_throw_lt_full.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/stage.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/stb_f_cdance_drk.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/stb_f_cdance_lt.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/stb_f_dagger_drk.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/stb_f_dagger_lt.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/stb_f_dance_drk.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/stb_f_dance_lt.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/stb_f_kneel_drk.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/stb_f_kneel_lt.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/stb_f_lean_drk.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/stb_f_lean_lt.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/stb_f_sword_drk.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/stb_f_sword_lt.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/stb_m_lean_drk.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/stb_m_lean_lt.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/stb_m_throw_drk.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/stb_m_throw_lt.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/water_trim_corner_sqr.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/water_trim_corner_tri_eq.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/water_trim_corner_tri_odd.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/water_trim_long_block.physics")
asset_tree.add_asset_path("SocialWorld", "Physics/water_trim_octagon.physics")
#asset_tree.add_asset_path("SocialWorld", "Sounds/social_ambient.ogg")
#asset_tree.add_asset_path("SocialWorld", "Sounds/stage.ogg")
asset_tree.add_asset_path("SocialWorld", "Textures/bench_marble.dds")
asset_tree.add_asset_path("SocialWorld", "Textures/cobblestone_marble.dds")
asset_tree.add_asset_path("SocialWorld", "Textures/cobblestones_seemless.dds")
asset_tree.add_asset_path("SocialWorld", "Textures/colonnade.dds")
asset_tree.add_asset_path("SocialWorld", "Textures/female_face_marble_blue_grey.dds")
asset_tree.add_asset_path("SocialWorld", "Textures/female_face_marble_white_rev_darker.dds")
asset_tree.add_asset_path("SocialWorld", "Textures/female_marble_blue_grey.dds")
asset_tree.add_asset_path("SocialWorld", "Textures/female_marble_white_rev_darker.dds")
asset_tree.add_asset_path("SocialWorld", "Textures/fountain_water.dds")
asset_tree.add_asset_path("SocialWorld", "Textures/gazebo_a.dds")
asset_tree.add_asset_path("SocialWorld", "Textures/gazebo_a_floor.dds")
asset_tree.add_asset_path("SocialWorld", "Textures/gazebo_b.dds")
asset_tree.add_asset_path("SocialWorld", "Textures/gazebo_b_floor.dds")
asset_tree.add_asset_path("SocialWorld", "Textures/male_head_marble_blue_grey.dds")
asset_tree.add_asset_path("SocialWorld", "Textures/male_marble_blue_grey.dds")
asset_tree.add_asset_path("SocialWorld", "Textures/male_marble_white_rev_darker.dds")
asset_tree.add_asset_path("SocialWorld", "Textures/marble_blue_grey.dds")
asset_tree.add_asset_path("SocialWorld", "Textures/marble_white_rev_darker.dds")
asset_tree.add_asset_path("SocialWorld", "Textures/pool_marble.dds")
asset_tree.add_asset_path("SocialWorld", "Textures/Stage_01.dds")
asset_tree.add_asset_path("SocialWorld", "Textures/sword_005.dds")
asset_tree.add_asset_path("SocialWorld", "Textures/water_edging.dds")
asset_tree.add_asset_path("SocialWorld", "Textures/loadscreen_social.dds", "Textures/loadscreen.dds")
# male rocketbox assets
asset_tree.add_asset_path("Rocketbox", "Skeletons/rocketbox_f_mediumpoly.skeleton")
asset_tree.add_asset_path("Rocketbox", "Skeletons/rocketbox_f_lowpoly.skeleton")
asset_tree.add_asset_path("Rocketbox", "Skeletons/rocketbox_m_mediumpoly.skeleton")
asset_tree.add_asset_path("Rocketbox", "Skeletons/rocketbox_m_lowpoly.skeleton")
asset_tree.add_asset_path("Rocketbox", "Materials/business03_m_mediumpoly.material")
asset_tree.add_asset_path("Rocketbox", "Materials/business03_m_lowpoly.material")
asset_tree.add_asset_path("Rocketbox", "Meshes/business03_m_mediumpoly.mesh")
asset_tree.add_asset_path("Rocketbox", "Meshes/business03_m_lowpoly.mesh")
asset_tree.add_asset_path("Rocketbox", "Physics/business03_m_mediumpoly.physics")
asset_tree.add_asset_path("Rocketbox", "Textures/business03_m_35.dds")
asset_tree.add_asset_path("Rocketbox", "Materials/business05_m_mediumpoly.material")
asset_tree.add_asset_path("Rocketbox", "Materials/business05_m_lowpoly.material")
asset_tree.add_asset_path("Rocketbox", "Meshes/business05_m_mediumpoly.mesh")
asset_tree.add_asset_path("Rocketbox", "Meshes/business05_m_lowpoly.mesh")
asset_tree.add_asset_path("Rocketbox", "Physics/business05_m_mediumpoly.physics")
asset_tree.add_asset_path("Rocketbox", "Textures/business05_m_25.dds")
asset_tree.add_asset_path("Rocketbox", "Materials/casual03_m_mediumpoly.material")
asset_tree.add_asset_path("Rocketbox", "Materials/casual03_m_lowpoly.material")
asset_tree.add_asset_path("Rocketbox", "Meshes/casual03_m_mediumpoly.mesh")
asset_tree.add_asset_path("Rocketbox", "Meshes/casual03_m_lowpoly.mesh")
asset_tree.add_asset_path("Rocketbox", "Physics/casual03_m_mediumpoly.physics")
asset_tree.add_asset_path("Rocketbox", "Textures/casual03_m_25.dds")
asset_tree.add_asset_path("Rocketbox", "Materials/casual04_m_mediumpoly.material")
asset_tree.add_asset_path("Rocketbox", "Materials/casual04_m_lowpoly.material")
asset_tree.add_asset_path("Rocketbox", "Meshes/casual04_m_mediumpoly.mesh")
asset_tree.add_asset_path("Rocketbox", "Meshes/casual04_m_lowpoly.mesh")
asset_tree.add_asset_path("Rocketbox", "Physics/casual04_m_mediumpoly.physics")
asset_tree.add_asset_path("Rocketbox", "Textures/casual04_m_25.dds")
asset_tree.add_asset_path("Rocketbox", "Materials/casual07_m_mediumpoly.material")
asset_tree.add_asset_path("Rocketbox", "Materials/casual07_m_lowpoly.material")
asset_tree.add_asset_path("Rocketbox", "Meshes/casual07_m_mediumpoly.mesh")
asset_tree.add_asset_path("Rocketbox", "Meshes/casual07_m_lowpoly.mesh")
asset_tree.add_asset_path("Rocketbox", "Physics/casual07_m_mediumpoly.physics")
asset_tree.add_asset_path("Rocketbox", "Textures/casual07_m_25.dds")
asset_tree.add_asset_path("Rocketbox", "Materials/casual10_m_mediumpoly.material")
asset_tree.add_asset_path("Rocketbox", "Materials/casual10_m_lowpoly.material")
asset_tree.add_asset_path("Rocketbox", "Meshes/casual10_m_mediumpoly.mesh")
asset_tree.add_asset_path("Rocketbox", "Meshes/casual10_m_lowpoly.mesh")
asset_tree.add_asset_path("Rocketbox", "Physics/casual10_m_mediumpoly.physics")
asset_tree.add_asset_path("Rocketbox", "Textures/casual10_m_30.dds")
asset_tree.add_asset_path("Rocketbox", "Materials/casual16_m_mediumpoly.material")
asset_tree.add_asset_path("Rocketbox", "Materials/casual16_m_lowpoly.material")
asset_tree.add_asset_path("Rocketbox", "Meshes/casual16_m_mediumpoly.mesh")
asset_tree.add_asset_path("Rocketbox", "Meshes/casual16_m_lowpoly.mesh")
asset_tree.add_asset_path("Rocketbox", "Physics/casual16_m_mediumpoly.physics")
asset_tree.add_asset_path("Rocketbox", "Textures/casual16_m_35.dds")
asset_tree.add_asset_path("Rocketbox", "Materials/casual21_m_mediumpoly.material")
asset_tree.add_asset_path("Rocketbox", "Materials/casual21_m_lowpoly.material")
asset_tree.add_asset_path("Rocketbox", "Meshes/casual21_m_mediumpoly.mesh")
asset_tree.add_asset_path("Rocketbox", "Meshes/casual21_m_lowpoly.mesh")
asset_tree.add_asset_path("Rocketbox", "Physics/casual21_m_mediumpoly.physics")
asset_tree.add_asset_path("Rocketbox", "Textures/casual21_m_35.dds")
asset_tree.add_asset_path("Rocketbox", "Materials/sportive01_m_mediumpoly.material")
asset_tree.add_asset_path("Rocketbox", "Materials/sportive01_m_lowpoly.material")
asset_tree.add_asset_path("Rocketbox", "Meshes/sportive01_m_mediumpoly.mesh")
asset_tree.add_asset_path("Rocketbox", "Meshes/sportive01_m_lowpoly.mesh")
asset_tree.add_asset_path("Rocketbox", "Physics/sportive01_m_mediumpoly.physics")
asset_tree.add_asset_path("Rocketbox", "Textures/sportive01_m_20.dds")
asset_tree.add_asset_path("Rocketbox", "Materials/sportive09_m_mediumpoly.material")
asset_tree.add_asset_path("Rocketbox", "Materials/sportive09_m_lowpoly.material")
asset_tree.add_asset_path("Rocketbox", "Meshes/sportive09_m_mediumpoly.mesh")
| |
<filename>tests/test_coin_outputs.py<gh_stars>0
import skycoin
import tests.utils as utils
def test_TestUxBodyHash():
uxb, _ = utils.makeUxBodyWithSecret()
hash_null = skycoin.cipher_SHA256()
hashx = skycoin.cipher_SHA256()
assert skycoin.SKY_coin_UxBody_Hash(uxb, hashx) == skycoin.SKY_OK
assert hashx != hash_null
def test_TestUxOutHash():
uxb, _ = utils.makeUxBodyWithSecret()
uxo, _ = utils.makeUxOutWithSecret()
uxo.Body = uxb
hash_body = skycoin.cipher_SHA256()
hash_out = skycoin.cipher_SHA256()
assert skycoin.SKY_coin_UxBody_Hash(uxb, hash_body) == skycoin.SKY_OK
assert skycoin.SKY_coin_UxOut_Hash(uxo, hash_out) == skycoin.SKY_OK
assert hash_body == hash_out
# Head should not affect hash
uxh = skycoin.coin__UxHead()
uxh.Time = 0
uxh.BkSeq = 1
uxo.Head = uxh
assert skycoin.SKY_coin_UxOut_Hash(uxo, hash_out) == skycoin.SKY_OK
assert hash_body == hash_out
def test_TestUxOutSnapshotHash():
p = skycoin.cipher_PubKey()
s = skycoin.cipher_SecKey()
skycoin.SKY_cipher_GenerateKeyPair(p, s)
uxb = skycoin.coin__UxBody()
_, b = skycoin.SKY_cipher_RandByte(128)
h = skycoin.cipher_SHA256()
assert skycoin.SKY_cipher_SumSHA256(b, h) == skycoin.SKY_OK
uxb.SetSrcTransaction(h.toStr())
a = skycoin.cipher__Address()
skycoin.SKY_cipher_AddressFromPubKey(p, a)
uxb.Address = a
uxb.Coins = int(1e6)
uxb.Hours = int(100)
uxo = skycoin.coin__UxOut()
uxh = skycoin.coin__UxHead()
uxh.Time = 100
uxh.BkSeq = 2
uxo.Head = uxh
uxo.Body = uxb
hn = skycoin.cipher_SHA256()
assert skycoin.SKY_coin_UxOut_SnapshotHash(uxo, hn) == skycoin.SKY_OK
# snapshot hash should be dependent on every field in body and head
# Head Time
uxo_2 = uxo
uxh.Time = 20
uxo_2.Head = uxh
hn_2 = skycoin.cipher_SHA256()
assert skycoin.SKY_coin_UxOut_SnapshotHash(uxo_2, hn_2) == skycoin.SKY_OK
assert hn != hn_2
# Head BkSeq
uxo_2 = uxo
uxh.BkSeq = 4
uxo_2.Head = uxh
hn_2 = skycoin.cipher_SHA256()
assert skycoin.SKY_coin_UxOut_SnapshotHash(uxo_2, hn_2) == skycoin.SKY_OK
assert hn != hn_2
# Body SetSrcTransaction
uxo_2 = uxo
uxb = skycoin.coin__UxBody()
_, b = skycoin.SKY_cipher_RandByte(128)
h = skycoin.cipher_SHA256()
assert skycoin.SKY_cipher_SumSHA256(b, h) == skycoin.SKY_OK
uxb.SetSrcTransaction(h.toStr())
uxo_2.Body = uxb
hn_2 = skycoin.cipher_SHA256()
assert skycoin.SKY_coin_UxOut_SnapshotHash(uxo_2, hn_2) == skycoin.SKY_OK
assert hn != hn_2
# Body Address
p_2 = skycoin.cipher_PubKey()
s_2 = skycoin.cipher_SecKey()
skycoin.SKY_cipher_GenerateKeyPair(p_2, s_2)
a_2 = skycoin.cipher__Address()
skycoin.SKY_cipher_AddressFromPubKey(p_2, a_2)
uxo_2 = uxo
uxb = skycoin.coin__UxBody()
uxb.Address = a_2
uxo_2.Body = uxb
hn_2 = skycoin.cipher_SHA256()
assert skycoin.SKY_coin_UxOut_SnapshotHash(uxo_2, hn_2) == skycoin.SKY_OK
assert hn != hn_2
# Body Coins
uxo_2 = uxo
uxb = skycoin.coin__UxBody()
uxb.Coins = int(2)
uxo_2.Body = uxb
hn_2 = skycoin.cipher_SHA256()
assert skycoin.SKY_coin_UxOut_SnapshotHash(uxo_2, hn_2) == skycoin.SKY_OK
assert hn != hn_2
# Body Hours
uxo_2 = uxo
uxb = skycoin.coin__UxBody()
uxb.Hours = int(2)
uxo_2.Body = uxb
hn_2 = skycoin.cipher_SHA256()
assert skycoin.SKY_coin_UxOut_SnapshotHash(uxo_2, hn_2) == skycoin.SKY_OK
assert hn != hn_2
def test_TestUxOutCoinHours():
p = skycoin.cipher_PubKey()
s = skycoin.cipher_SecKey()
skycoin.SKY_cipher_GenerateKeyPair(p, s)
uxb = skycoin.coin__UxBody()
_, b = skycoin.SKY_cipher_RandByte(128)
h = skycoin.cipher_SHA256()
assert skycoin.SKY_cipher_SumSHA256(b, h) == skycoin.SKY_OK
uxb.SetSrcTransaction(h.toStr())
a = skycoin.cipher__Address()
skycoin.SKY_cipher_AddressFromPubKey(p, a)
uxb.Address = a
uxb.Coins = int(1e6)
uxb.Hours = int(100)
uxo = skycoin.coin__UxOut()
uxh = skycoin.coin__UxHead()
uxh.Time = 100
uxh.BkSeq = 2
uxo.Head = uxh
uxo.Body = uxb
# Less than an hour passed
now = uxh.Time + 100
err, hours = skycoin.SKY_coin_UxOut_CoinHours(uxo, now)
assert hours == uxh.Time
assert err == skycoin.SKY_OK
# 1 hours passed
now = uxh.Time + 3600
err, hours = skycoin.SKY_coin_UxOut_CoinHours(uxo, now)
assert hours == uxh.Time + uxb.Coins / 1000000
assert err == skycoin.SKY_OK
# 6 hours passed
now = uxh.Time + 3600 * 6
err, hours = skycoin.SKY_coin_UxOut_CoinHours(uxo, now)
assert hours == uxh.Time + (uxb.Coins / 1000000) * 6
assert err == skycoin.SKY_OK
# Time is backwards (treated as no hours passed)
now = uxh.Time // 2
err, hours = skycoin.SKY_coin_UxOut_CoinHours(uxo, now)
assert hours == uxh.Time
assert err == skycoin.SKY_OK
# 1 hour has passed, output has 1.5 coins, should gain 1 coinhour
uxb.Coins = 1500000
now = uxh.Time + 3600
err, hours = skycoin.SKY_coin_UxOut_CoinHours(uxo, now)
assert hours == uxb.Hours + 1
assert err == skycoin.SKY_OK
# 2 hours have passed, output has 1.5 coins, should gain 3 coin hours
uxb.Coins = 1500000
uxo.Body = uxb
now = uxh.Time + 3600 * 2
err, hours = skycoin.SKY_coin_UxOut_CoinHours(uxo, now)
assert hours == uxb.Hours + 3
assert err == skycoin.SKY_OK
# 1 second has passed, output has 3600 coins, should gain 1 coin hour
uxb.Coins = 3600000000
uxo.Body = uxb
now = uxh.Time + 1
err, hours = skycoin.SKY_coin_UxOut_CoinHours(uxo, now)
assert hours == uxb.Hours + 1
assert err == skycoin.SKY_OK
# 1000000 hours minus 1 second have passed, output has 1 droplet, should gain 0 coin hour
uxb.Coins = 1
uxo.Body = uxb
now = uxh.Time + 1000000 * 3600 - 1
err, hours = skycoin.SKY_coin_UxOut_CoinHours(uxo, now)
assert hours == uxb.Hours
assert err == skycoin.SKY_OK
# 1000000 hours have passed, output has 1 droplet, should gain 1 coin hour
uxb.Coins = 1
uxo.Body = uxb
now = uxh.Time + 1000000 * 3600
err, hours = skycoin.SKY_coin_UxOut_CoinHours(uxo, now)
assert hours == uxb.Hours + 1
assert err == skycoin.SKY_OK
# No hours passed, using initial coin hours
uxb.Coins = 1000000000
uxb.Hours = 1000 * 1000
uxo.Body = uxb
now = uxh.Time
err, hours = skycoin.SKY_coin_UxOut_CoinHours(uxo, now)
assert hours == uxb.Hours
assert err == skycoin.SKY_OK
# One hour passed, using initial coin hours
now = uxh.Time + 3600
err, hours = skycoin.SKY_coin_UxOut_CoinHours(uxo, now)
assert hours == uxb.Hours + 1000000000 / 1000000
assert err == skycoin.SKY_OK
# No hours passed and no hours to begin with0
uxb.Hours = 0
uxo.Body = uxb
now = uxh.Time
err, hours = skycoin.SKY_coin_UxOut_CoinHours(uxo, now)
assert hours == 0
assert err == skycoin.SKY_OK
# Centuries have passed, time-based calculation overflows uint64
# when calculating the whole coin seconds
uxb.Coins = 2000000
uxo.Body = uxb
now = 0xFFFFFFFFFFFFFFFF
err, hours = skycoin.SKY_coin_UxOut_CoinHours(uxo, now)
assert err == skycoin.SKY_ERROR
# Centuries have passed, time-based calculation overflows uint64
# when calculating the droplet seconds
uxb.Coins = 1500000
uxo.Body = uxb
now = 0xFFFFFFFFFFFFFFFF
err, hours = skycoin.SKY_coin_UxOut_CoinHours(uxo, now)
assert err == skycoin.SKY_ERROR
# Output would overflow if given more hours, has reached its limit
uxb.Coins = 3600000000
uxo.Body = uxb
now = 0xFFFFFFFFFFFFFFFF
err, hours = skycoin.SKY_coin_UxOut_CoinHours(uxo, now)
assert err == skycoin.SKY_ERROR
def test_TestUxArrayCoins():
uxa = utils.makeUxArray(4)
for x in uxa:
x.Body.Coins = utils.Million
err, coins = skycoin.SKY_coin_UxArray_Coins(uxa)
assert coins == int(4e6)
assert err == skycoin.SKY_OK
uxa[2].Body.Coins = int(utils.MaxUint64 - int(1e6))
err, _ = skycoin.SKY_coin_UxArray_Coins(uxa)
assert err == skycoin.SKY_ERROR
def ux_Array_CoinsHours(uxa, now=0, slic=0):
result = 0
for x in uxa[slic:]:
err, time = skycoin.SKY_coin_UxOut_CoinHours(x, now)
result += time
assert err == skycoin.SKY_OK
return result
def test_TestUxArrayCoinHours():
uxa = utils.makeUxArray(4)
assert skycoin.SKY_coin_UxArray_CoinHours(uxa, 0)[1] == 400
# 1 hour later
assert skycoin.SKY_coin_UxArray_CoinHours(
uxa, uxa[0].Head.Time + 3600)[1] == 404
# 1.5 hours later
assert skycoin.SKY_coin_UxArray_CoinHours(
uxa, uxa[0].Head.Time + 3600 + 1800)[1] == 404
# 2 hour later
assert skycoin.SKY_coin_UxArray_CoinHours(
uxa, uxa[0].Head.Time + 3600 + 4600)[1] == 408
uxa[2].Head.Time = utils.MaxUint64 - 100
# assert skycoin.SKY_coin_UxArray_CoinHours(uxa, utils.MaxUint64 - 100)[1] == skycoin.SKY_OK
value = skycoin.SKY_coin_UxArray_CoinHours(uxa, uxa[2].Head.Time)[1]
assert utils.err_CoinHours_Overflow(
value) == skycoin.SKY_ErrAddEarnedCoinHoursAdditionOverflow
value = skycoin.SKY_coin_UxArray_CoinHours(uxa, 1000000000000)[1]
assert utils.err_CoinHours_Overflow(
value) == skycoin.SKY_ErrAddEarnedCoinHoursAdditionOverflow
def test_TestUxArrayHashArray():
uxa = utils.makeUxArray(4)
sha = skycoin.cipher_SHA256()
err, hashs = skycoin.SKY_coin_UxArray_Hashes(uxa)
assert err == skycoin.SKY_OK
assert len(hashs) == len(uxa)
skycoin.SKY_coin_UxOut_Hash(uxa[0], sha)
print(sha)
print(uxa[0])
assert hashs[0] == sha
for i in range(len(hashs)):
assert skycoin.SKY_coin_UxOut_Hash(uxa[i], sha) == 0
assert sha == hashs[i]
def test_TestUxArrayHasDupes():
uxa = utils.makeUxArray(4)
err, hasDupes = skycoin.SKY_coin_UxArray_HasDupes(uxa)
assert err == skycoin.SKY_OK
assert hasDupes == 0
uxa[0] = uxa[1]
err, hasDupes = skycoin.SKY_coin_UxArray_HasDupes(uxa)
assert err == skycoin.SKY_OK
assert hasDupes == 1
def test_TestUxArraySub():
uxa = utils.makeUxArray(4)
uxb = utils.makeUxArray(4)
uxc = uxa[:1]
for ux in uxb:
uxc.append(ux)
for ux in uxa[1:2]:
uxc.append(ux)
err, uxd = skycoin.SKY_coin_UxArray_Sub(uxc, uxa)
assert err == skycoin.SKY_OK
assert len(uxd) == len(uxb)
err, uxd = skycoin.SKY_coin_UxArray_Sub(uxc, uxb)
assert err == skycoin.SKY_OK
assert len(uxd) == 2
assert uxd == uxa[:2]
# No intersection
err, uxd = skycoin.SKY_coin_UxArray_Sub(uxa, uxb)
assert uxd == uxa
err, uxd = skycoin.SKY_coin_UxArray_Sub(uxb, uxa)
assert uxd == uxb
def manualUxArrayIsSorted(uxa):
sha_1 = skycoin.cipher_SHA256()
sha_2 = skycoin.cipher_SHA256()
isSorte = True
for i in range(len(uxa) - 1):
assert skycoin.SKY_coin_UxOut_Hash(uxa[i], sha_1) == skycoin.SKY_OK
assert skycoin.SKY_coin_UxOut_Hash(uxa[i + 1], sha_2) == skycoin.SKY_OK
if sha_1 > sha_2:
isSorte = False
return isSorte
def isUxArraySorted(uxa):
n = len(uxa)
prev = uxa
current = prev
current += 1
hash_1 = skycoin.cipher_SHA256()
hash_2 = skycoin.cipher_SHA256()
prevHash = None
currentHash = None
result = int()
for i in n:
if(prevHash == None):
result = skycoin.SKY_coin_UxOut_Hash(prev, hash_1)
assert result == skycoin.SKY_OK
prevHash = hash_1
if currentHash == None:
currentHash = hash_2
result = skycoin.SKY_coin_UxOut_Hash(current, currentHash)
assert result | |
{'Entry':1900, 'Exit':2999},
"127mm mk 34 AAC": {'Entry':1900, 'Exit':2999},
"127mm mk 41 AAC": {'Entry':1900, 'Exit':2999},
"127mm mk 41 HC": {'Entry':1900, 'Exit':2999},
"127mm mk 80 HE-PD EX-175": {'Entry':1900, 'Exit':2999},
"127mm mk 80 HE-PD mk 67": {'Entry':1900, 'Exit':2999},
"12cm/50 Mdl50 HE": {'Entry':1900, 'Exit':2999},
"130mm AK-130": {'Entry':1900, 'Exit':2999},
"130mm F-44 HE": {'Entry':1900, 'Exit':2999},
"130mm OF-42 HE-FRAG": {'Entry':1900, 'Exit':2999},
"130mm PB-42 SAP": {'Entry':1900, 'Exit':2999},
"130mm ZS-42P AA": {'Entry':1900, 'Exit':2999},
"130mm ZS-44 AA": {'Entry':1900, 'Exit':2999},
"130mm ZS-44P AA": {'Entry':1900, 'Exit':2999},
"152mm AP B-35": {'Entry':1900, 'Exit':2999},
"152mm HE OF-35": {'Entry':1900, 'Exit':2999},
"152mm SAP PB-35": {'Entry':1900, 'Exit':2999},
"20mm APT percussion": {'Entry':1900, 'Exit':2999},
"20mm F2": {'Entry':1900, 'Exit':2999},
"20mm HE-T x10": {'Entry':1900, 'Exit':2999},
"20mm HE-T x2": {'Entry':1900, 'Exit':2999},
"20mm HE-T": {'Entry':1900, 'Exit':2999},
"20mm HEI Electric": {'Entry':1900, 'Exit':2999},
"20mm HEI Percussion": {'Entry':1900, 'Exit':2999},
"20mm HS.404 x2": {'Entry':1900, 'Exit':2999},
"20mm HS.404": {'Entry':1900, 'Exit':2999},
"20mm M53 API": {'Entry':1900, 'Exit':2999},
"20mm Mark 149-4": {'Entry':1900, 'Exit':2999},
"20mm Meroka APDS-T": {'Entry':1900, 'Exit':2999},
"20mm Mk-15": {'Entry':1900, 'Exit':2999},
"20mm PGU": {'Entry':1900, 'Exit':2999},
"20mm PGU-28/B": {'Entry':1900, 'Exit':2999},
"20mm Rh202 HE-T": {'Entry':1900, 'Exit':2999},
"20mm SAP(b)": {'Entry':1900, 'Exit':2999},
"20mm mark 244-0 ELC": {'Entry':1900, 'Exit':2999},
"20mm/85 GAM-B01 HE-I": {'Entry':1900, 'Exit':2999},
"20x102 mm burst": {'Entry':1900, 'Exit':2999},
"20x110 mm x2": {'Entry':1900, 'Exit':2999},
"20x110 mm": {'Entry':1900, 'Exit':2999},
"23mm AM-23": {'Entry':1900, 'Exit':2999},
"23mm AM/NR-23 HEI x2": {'Entry':1900, 'Exit':2999},
"23mm AM/NR-23 HEI": {'Entry':1900, 'Exit':2999},
"23mm GSh-23 HEI": {'Entry':1900, 'Exit':2999},
"23mm OFZ": {'Entry':1900, 'Exit':2999},
"25mm APDS": {'Entry':1900, 'Exit':2999},
"25mm APDS-T": {'Entry':1900, 'Exit':2999},
"25mm FAPDS-T": {'Entry':1900, 'Exit':2999},
"25mm HE-I-T": {'Entry':1900, 'Exit':2999},
"25mm HEI": {'Entry':1900, 'Exit':2999},
"25mm HEI-T": {'Entry':1900, 'Exit':2999},
"25mm M791 APDS-T": {'Entry':1900, 'Exit':2999},
"25mm SAPHEI": {'Entry':1900, 'Exit':2999},
"25mm SAPHEI-T": {'Entry':1900, 'Exit':2999},
"27mm DM10 FAPDS": {'Entry':1900, 'Exit':2999},
"27mm FAPDS": {'Entry':1900, 'Exit':2999},
"27mm HE": {'Entry':1900, 'Exit':2999},
"30mm 3UBR8 APDS": {'Entry':1900, 'Exit':2999},
"30mm ADEN API": {'Entry':1900, 'Exit':2999},
"30mm AK-630": {'Entry':1900, 'Exit':2999},
"30mm AP-I": {'Entry':1900, 'Exit':2999},
"30mm APDS": {'Entry':1900, 'Exit':2999},
"30mm APDS-T": {'Entry':1900, 'Exit':2999},
"30mm APFSDS-T": {'Entry':1900, 'Exit':2999},
"30mm API": {'Entry':1900, 'Exit':2999},
"30mm Br-83 AP": {'Entry':1900, 'Exit':2999},
"30mm DEFA": {'Entry':1900, 'Exit':2999},
"30mm F-33 HE": {'Entry':1900, 'Exit':2999},
"30mm FMPDS": {'Entry':1900, 'Exit':2999},
"30mm HE": {'Entry':1900, 'Exit':2999},
"30mm HEI": {'Entry':1900, 'Exit':2999},
"30mm HEI-T": {'Entry':1900, 'Exit':2999},
"30mm M230 Chaingun Ammo": {'Entry':1900, 'Exit':2999},
"30mm NR-30 HEI x2": {'Entry':1900, 'Exit':2999},
"30mm NR-30 HEI": {'Entry':1900, 'Exit':2999},
"30mm OF-83 HE-FRAG": {'Entry':1900, 'Exit':2999},
"30mm OF-84 HE-FRAG AK-306": {'Entry':1900, 'Exit':2999},
"30mm OF-84 HE-FRAG AK-630M": {'Entry':1900, 'Exit':2999},
"30mm OF-84 HE-FRAG Kashtan-M": {'Entry':1900, 'Exit':2999},
"30mm OP-84 FRAG Tracer AK-306": {'Entry':1900, 'Exit':2999},
"30mm OP-84 FRAG Tracer AK-630M": {'Entry':1900, 'Exit':2999},
"30mm OP-84 FRAG Tracer Kashtan-M": {'Entry':1900, 'Exit':2999},
"30mm PGU-13/B HE-I": {'Entry':1900, 'Exit':2999},
"30mm PGU-14/B API": {'Entry':1900, 'Exit':2999},
"30mm SAPHEI-T": {'Entry':1900, 'Exit':2999},
"30mm Su-25": {'Entry':1900, 'Exit':2999},
"30mm Type 730": {'Entry':1900, 'Exit':2999},
"30mm/75 GCM-AO3-2 APDS": {'Entry':1900, 'Exit':2999},
"30mm/75 GCM-AO3-2 HE": {'Entry':1900, 'Exit':2999},
"30x150mm GIAT": {'Entry':1900, 'Exit':2999},
"35mm AHEAD": {'Entry':1900, 'Exit':2999},
"37mm HE-FRAG Tracer x2": {'Entry':1900, 'Exit':2999},
"37mm HE-FRAG Tracer": {'Entry':1900, 'Exit':2999},
"37mm HE-FRAG": {'Entry':1900, 'Exit':2999},
"37mm Type 676 HE-FRAG": {'Entry':1900, 'Exit':2999},
"40 mm L70 HE x5": {'Entry':1900, 'Exit':2999},
"406mm Mk13 HC": {'Entry':1900, 'Exit':2999},
"406mm Mk8 AP": {'Entry':1900, 'Exit':2999},
"40mm HE Mk1 Md1 x2": {'Entry':1900, 'Exit':2999},
"40mm HE Mk1 Md1 x4": {'Entry':1900, 'Exit':2999},
"40mm HE Mk1 Md1 x8": {'Entry':1900, 'Exit':2999},
"40mm HE Mk1 Md1": {'Entry':1900, 'Exit':2999},
"40mm HE-T x2": {'Entry':1900, 'Exit':2999},
"40mm HE-T": {'Entry':1900, 'Exit':2999},
"40mm PFHE x2": {'Entry':1900, 'Exit':2999},
"40mm PFHE": {'Entry':1900, 'Exit':2999},
"45mm F-75 HE": {'Entry':1900, 'Exit':2999},
"45mm OR-75 FRAG": {'Entry':1900, 'Exit':2999},
"57mm HCER": {'Entry':1900, 'Exit':2999},
"57mm HE": {'Entry':1900, 'Exit':2999},
"57mm PFHE": {'Entry':1900, 'Exit':2999},
"57mm Tracer-FRAG": {'Entry':1900, 'Exit':2999},
"57mm Type 59 HE-T": {'Entry':1900, 'Exit':2999},
"73mm PG-15V HEAT": {'Entry':1900, 'Exit':2999},
"75mm AA": {'Entry':1900, 'Exit':2999},
"76.2mm OS-62 FRAG": {'Entry':1900, 'Exit':2999},
"76.2mm ZS-63 AA": {'Entry':1900, 'Exit':2999},
"76mm AA Mk27": {'Entry':1900, 'Exit':2999},
"76mm HC": {'Entry':1900, 'Exit':2999},
"76mm HE Mk27": {'Entry':1900, 'Exit':2999},
"76mm HE-MOM": {'Entry':1900, 'Exit':2999},
"76mm HE-SAPOM": {'Entry':1900, 'Exit':2999},
"76mm HE-SAPOMER": {'Entry':1900, 'Exit':2999},
"76mm Mk 27 Md1 HC": {'Entry':1900, 'Exit':2999},
"76mm Mk 31 Md1 AA VT": {'Entry':1900, 'Exit':2999},
"76x900mm HE": {'Entry':1900, 'Exit':2999},
"AB 40mm L/60 HE-T": {'Entry':1900, 'Exit':2999},
"Breda 40/70L": {'Entry':1900, 'Exit':2999},
"Gen100AA": {'Entry':1900, 'Exit':2999},
"Gen105AA": {'Entry':1900, 'Exit':2999},
"Gen105ART": {'Entry':1900, 'Exit':2999},
"Gen105AT": {'Entry':1900, 'Exit':2999},
"Gen10AA": {'Entry':1900, 'Exit':2999},
"Gen115AA": {'Entry':1900, 'Exit':2999},
"Gen120AA": {'Entry':1900, 'Exit':2999},
"Gen125AA": {'Entry':1900, 'Exit':2999},
"Gen127AA": {'Entry':1900, 'Exit':2999},
"Gen128AA": {'Entry':1900, 'Exit':2999},
"Gen150AA": {'Entry':1900, 'Exit':2999},
"Gen155ART": {'Entry':1900, 'Exit':2999},
"Gen20AA": {'Entry':1900, 'Exit':2999},
"Gen23AA": {'Entry':1900, 'Exit':2999},
"Gen25AA": {'Entry':1900, 'Exit':2999},
"Gen28AA": {'Entry':1900, 'Exit':2999},
"Gen30AA": {'Entry':1900, 'Exit':2999},
"Gen35AA": {'Entry':1900, 'Exit':2999},
"Gen37AA": {'Entry':1900, 'Exit':2999},
"Gen40AA": {'Entry':1900, 'Exit':2999},
"Gen50AA": {'Entry':1900, 'Exit':2999},
"Gen55AA": {'Entry':1900, 'Exit':2999},
"Gen57AA": {'Entry':1900, 'Exit':2999},
"Gen75AA": {'Entry':1900, 'Exit':2999},
"Gen80AA": {'Entry':1900, 'Exit':2999},
"Gen88AA": {'Entry':1900, 'Exit':2999},
"Gen90AA": {'Entry':1900, 'Exit':2999},
"Gen90AT": {'Entry':1900, 'Exit':2999},
"Millenium 35mm AHEAD": {'Entry':1900, 'Exit':2999},
"Mk75 76/62": {'Entry':1900, 'Exit':2999},
"Otobreda Compact 30/40L": {'Entry':1900, 'Exit':2999},
"Chaff-1": {'Entry':1900, 'Exit':2999},
"Flare-1": {'Entry':1900, 'Exit':2999},
"DICASS (50) Sonobuoy": {'Entry':1952, 'Exit':1962},
"DICASS (55) Sonobuoy": {'Entry':1957, 'Exit':1967},
"DICASS (60) Sonobuoy": {'Entry':1962, 'Exit':1972},
"DICASS (65) Sonobuoy": {'Entry':1967, 'Exit':1977},
"DICASS (70) Sonobuoy": {'Entry':1972, 'Exit':1982},
"DICASS (75) Sonobuoy": {'Entry':1977, 'Exit':1987},
"DICASS (80) Sonobuoy": {'Entry':1982, 'Exit':1992},
"DICASS (85) Sonobuoy": {'Entry':1987, 'Exit':1997},
"DICASS (90) Sonobuoy": {'Entry':1992, 'Exit':2002},
"DICASS (95) Sonobuoy": {'Entry':1997, 'Exit':2007},
"DICASS (100) Sonobuoy": {'Entry':2002, 'Exit':2012},
"DICASS (105) Sonobuoy": {'Entry':2007, 'Exit':2017},
"DICASS (110) Sonobuoy": {'Entry':2012, 'Exit':2022},
"DICASS (115) Sonobuoy": {'Entry':2017, 'Exit':2027},
"DICASS (120) Sonobuoy": {'Entry':2022, 'Exit':2032},
"DICASS (125) Sonobuoy": {'Entry':2027, 'Exit':2037},
"DIFAR (50) Sonobuoy": {'Entry':1952, 'Exit':1962},
"DIFAR (55) Sonobuoy": {'Entry':1957, 'Exit':1967},
"DIFAR (60) Sonobuoy": {'Entry':1962, 'Exit':1972},
"DIFAR (65) Sonobuoy": {'Entry':1967, 'Exit':1977},
"DIFAR (70) Sonobuoy": {'Entry':1972, 'Exit':1982},
"DIFAR (75) Sonobuoy": {'Entry':1977, 'Exit':1987},
"DIFAR (80) Sonobuoy": {'Entry':1982, 'Exit':1992},
"DIFAR (85) Sonobuoy": {'Entry':1987, 'Exit':1997},
"DIFAR (90) Sonobuoy": {'Entry':1992, 'Exit':2002},
"DIFAR (95) Sonobuoy": {'Entry':1997, 'Exit':2007},
"DIFAR (100) Sonobuoy": {'Entry':2002, 'Exit':2012},
"DIFAR (105) Sonobuoy": {'Entry':2007, 'Exit':2017},
"DIFAR (110) Sonobuoy": {'Entry':2012, 'Exit':2022},
"DIFAR (115) Sonobuoy": {'Entry':2017, 'Exit':2027},
"DIFAR (120) Sonobuoy": {'Entry':2022, 'Exit':2032},
"DIFAR (125) Sonobuoy": {'Entry':2027, 'Exit':2037},
"LOFAR (50) Sonobuoy": {'Entry':1952, 'Exit':1962},
"LOFAR (55) Sonobuoy": {'Entry':1957, 'Exit':1967},
"LOFAR (60) Sonobuoy": {'Entry':1962, 'Exit':1972},
"LOFAR (65) Sonobuoy": {'Entry':1967, 'Exit':1977},
"LOFAR (70) Sonobuoy": {'Entry':1972, 'Exit':1982},
"LOFAR (75) Sonobuoy": {'Entry':1977, 'Exit':1987},
"LOFAR (80) Sonobuoy": {'Entry':1982, 'Exit':1992},
"LOFAR (85) Sonobuoy": {'Entry':1987, 'Exit':1997},
"LOFAR (90) Sonobuoy": {'Entry':1992, 'Exit':2002},
"LOFAR (95) Sonobuoy": {'Entry':1997, 'Exit':2007},
"LOFAR (100) Sonobuoy": {'Entry':2002, 'Exit':2012},
"LOFAR (105) Sonobuoy": {'Entry':2007, 'Exit':2017},
"LOFAR (110) Sonobuoy": {'Entry':2012, 'Exit':2022},
"LOFAR (115) Sonobuoy": {'Entry':2017, 'Exit':2027},
"LOFAR (120) Sonobuoy": {'Entry':2022, 'Exit':2032},
"LOFAR (125) Sonobuoy": {'Entry':2027, 'Exit':2037},
},
'Germany':{
'RIM-7M(v1)': {'Entry':1982, 'Exit':2008},
'RIM-7P(v2)': {'Entry':1994, 'Exit':2999},
'RIM-116A RAM': {'Entry':1993, 'Exit':2003},
'RIM-116B RAM': {'Entry':2000, 'Exit':2999},
'RIM-66A': {'Entry':1969, 'Exit':2004},
'RIM-66M': {'Entry':2004.7,'Exit':2999},
'RIM-162B': {'Entry':2004.7, 'Exit':2999},
'Mk-46 Mod5': {'Entry':1980, 'Exit':2999},
'Mk-44': {'Entry':1969, 'Exit':1983},
'Mk-15 Mod0': {'Entry':1960, 'Exit':1977.1},
'Rur-5 ASROC': {'Entry':1969, 'Exit':2004},
'MU-90': {'Entry':2004.7,'Exit':2999},
'RIM-7M(v2)': {'Entry':1984, 'Exit':1995},
'RIM-7P(v1)': {'Entry':1987, 'Exit':2999},
'FAB-100': {'Entry':1954, 'Exit':2999},
'FAB-1500': {'Entry':1954, 'Exit':2999},
'FAB-250': {'Entry':1954, 'Exit':2999},
'FAB-3000': {'Entry':1954, 'Exit':2999},
'FAB-500': {'Entry':1954, 'Exit':2999},
'M117': {'Entry':1950, 'Exit':2999},
'M118': {'Entry':1950, 'Exit':2999},
'Mk-81': {'Entry':1950, 'Exit':2999},
'Mk-82': {'Entry':1950, 'Exit':2999},
'Mk-83': {'Entry':1950, 'Exit':2999},
'Mk-84': {'Entry':1950, 'Exit':2999},
'AS.34 Kormoran II': {'Entry':1991, 'Exit':2999},
'AS.34 Kormoran': {'Entry':1974, 'Exit':2002},
'Sea Skua': {'Entry':1984, 'Exit':2999},
'AIM-9M': {'Entry':1983, 'Exit':2009},
'IRIS-T': {'Entry':2004.3,'Exit':2999},
'MBDA Meteor': {'Entry':2010, 'Exit':2999},
'FIM-92 Stinger': {'Entry':1984, 'Exit':2999},
'50mm (2in) FFAR Rockets': {'Entry':1948, 'Exit':2999},
'68mm SNEB Rockets': {'Entry':1952, 'Exit':2999},
'Hydra-70 Rocket': {'Entry':1991, 'Exit':2999},
'Mk 16 Zuni FFAR': {'Entry':1991, 'Exit':2999},
'Mk 71 Zuni WAFAR': {'Entry':1991, 'Exit':2999},
'Mk-40 FFAR': {'Entry':1991, 'Exit':2999},
'S-24B 240mm': {'Entry':1973, 'Exit':2999},
'S-25C 266mm': {'Entry':1971, 'Exit':2999},
'S-25OF 266mm': {'Entry':1971, 'Exit':2999},
'S-5K 57mm': {'Entry':1955, 'Exit':2999},
'S-5K Rocket': {'Entry':1955, 'Exit':2999},
'S-5M Rocket': {'Entry':1955, 'Exit':2999},
'S-8B 80mm': {'Entry':1955, 'Exit':2999},
'S-8K 80mm': {'Entry':1955, 'Exit':2999},
"310 liter tank": {'Entry':1900, 'Exit':2999},
"100 gallon wing tank": {'Entry':1900, 'Exit':2999},
"400 liter tank": {'Entry':1900, 'Exit':2999},
"PTB-400": {'Entry':1900, 'Exit':2999},
"120 gallon tank": {'Entry':1900, 'Exit':2999},
"150 gallon tank": {'Entry':1900, 'Exit':2999},
"300 gallon tank": {'Entry':1900, 'Exit':2999},
"450 liter tank": {'Entry':1900, 'Exit':2999},
"PTB-490": {'Entry':1900, 'Exit':2999},
"500 liter tank": {'Entry':1900, 'Exit':2999},
"PTB-600": {'Entry':1900, 'Exit':2999},
"600 liter tank": {'Entry':1900, 'Exit':2999},
"Tanque de 600 litros": {'Entry':1900, 'Exit':2999},
"625 liter tank": {'Entry':1900, 'Exit':2999},
"700 liter tank": {'Entry':1900, 'Exit':2999},
"190 gallon wing tank": {'Entry':1900, 'Exit':2999},
"750 litre tank": {'Entry':1900, 'Exit':2999},
"750 liter tank": {'Entry':1900, 'Exit':2999},
"782 liter tank": {'Entry':1900, 'Exit':2999},
"800 liter tank": {'Entry':1900, 'Exit':2999},
"PTB-800": {'Entry':1900, 'Exit':2999},
"900 liter tank": {'Entry':1900, 'Exit':2999},
"1000 liter tank": {'Entry':1900, 'Exit':2999},
"FPU-1": {'Entry':1900, 'Exit':2999},
"285 Gallon Internal Tank FB-111": {'Entry':1900, 'Exit':2999},
"1100 Liter Tank": {'Entry':1900, 'Exit':2999},
"300 Gallon Internal Tank FB-111": {'Entry':1900, 'Exit':2999},
"300 gallon wing tank": {'Entry':1900, 'Exit':2999},
"Tanque de 300 galones": {'Entry':1900, 'Exit':2999},
"1150 Liter | |
<gh_stars>1-10
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from argcomplete.completers import FilesCompleter
from knack.arguments import CLIArgumentType
from azure.cli.core.profiles import ResourceType
from azure.cli.core.util import get_default_admin_username
from azure.cli.core.commands.validators import (
get_default_location_from_resource_group, validate_file_or_dict)
from azure.cli.core.commands.parameters import (
get_location_type, get_resource_name_completion_list, tags_type, get_three_state_flag,
file_type, get_enum_type, zone_type, zones_type)
from azure.cli.command_modules.vm._actions import _resource_not_exists
from azure.cli.command_modules.vm._completers import (
get_urn_aliases_completion_list, get_vm_size_completion_list, get_vm_run_command_completion_list)
from azure.cli.command_modules.vm._validators import (
validate_nsg_name, validate_vm_nics, validate_vm_nic, validate_vm_disk, validate_vmss_disk,
validate_asg_names_or_ids, validate_keyvault, validate_proximity_placement_group,
process_gallery_image_version_namespace)
from azure.cli.command_modules.vm._vm_utils import MSI_LOCAL_ID
from azure.cli.command_modules.vm._image_builder import ScriptType
# pylint: disable=too-many-statements, too-many-branches, too-many-locals
def load_arguments(self, _):
# Model imports
StorageAccountTypes, DiskStorageAccountTypes, SnapshotStorageAccountTypes = self.get_models('StorageAccountTypes', 'DiskStorageAccountTypes', 'SnapshotStorageAccountTypes')
UpgradeMode, CachingTypes, OperatingSystemTypes = self.get_models('UpgradeMode', 'CachingTypes', 'OperatingSystemTypes')
ProximityPlacementGroupType, HyperVGenerationTypes, HyperVGeneration = self.get_models('ProximityPlacementGroupType', 'HyperVGenerationTypes', 'HyperVGeneration')
# REUSABLE ARGUMENT DEFINITIONS
name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME')
multi_ids_type = CLIArgumentType(nargs='+')
existing_vm_name = CLIArgumentType(overrides=name_arg_type,
configured_default='vm',
help="The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=<name>`",
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'), id_part='name')
existing_disk_name = CLIArgumentType(overrides=name_arg_type, help='The name of the managed disk', completer=get_resource_name_completion_list('Microsoft.Compute/disks'), id_part='name')
existing_snapshot_name = CLIArgumentType(overrides=name_arg_type, help='The name of the snapshot', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'), id_part='name')
vmss_name_type = CLIArgumentType(name_arg_type,
configured_default='vmss',
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'),
help="Scale set name. You can configure the default using `az configure --defaults vmss=<name>`",
id_part='name')
extension_instance_name_type = CLIArgumentType(help="Name of the vm's instance of the extension. Default: name of the extension.")
image_template_name_type = CLIArgumentType(overrides=name_arg_type, id_part='name')
# StorageAccountTypes renamed to DiskStorageAccountTypes in 2018_06_01 of azure-mgmt-compute
DiskStorageAccountTypes = DiskStorageAccountTypes or StorageAccountTypes
if DiskStorageAccountTypes:
disk_sku = CLIArgumentType(arg_type=get_enum_type(DiskStorageAccountTypes))
else:
# StorageAccountTypes introduced in api version 2016_04_30_preview of Resource.MGMT.Compute package..
# However, 2017-03-09-profile targets version 2016-03-30 of compute package.
disk_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
if SnapshotStorageAccountTypes:
snapshot_sku = CLIArgumentType(arg_type=get_enum_type(SnapshotStorageAccountTypes))
else:
# SnapshotStorageAccountTypes introduced in api version 2018_04_01 of Resource.MGMT.Compute package..
# However, 2017-03-09-profile targets version 2016-03-30 of compute package.
snapshot_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
# special case for `network nic scale-set list` command alias
with self.argument_context('network nic scale-set list') as c:
c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
HyperVGenerationTypes = HyperVGenerationTypes or HyperVGeneration
if HyperVGenerationTypes:
hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(HyperVGenerationTypes, default="V1"))
else:
hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(["V1", "V2"], default="V1"))
# region MixedScopes
for scope in ['vm', 'disk', 'snapshot', 'image', 'sig']:
with self.argument_context(scope) as c:
c.argument('tags', tags_type)
for scope in ['disk', 'snapshot']:
with self.argument_context(scope) as c:
c.ignore('source_blob_uri', 'source_disk', 'source_snapshot')
c.argument('source_storage_account_id', help='used when source blob is in a different subscription')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('duration_in_seconds', help='Time duration in seconds until the SAS access expires', type=int)
if self.supported_api_version(min_api='2018-09-30', operation_group='disks'):
c.argument('access_level', arg_type=get_enum_type(['Read', 'Write']), default='Read', help='access level')
c.argument('for_upload', arg_type=get_three_state_flag(),
help='Create the {0} for uploading blobs later on through storage commands. Run "az {0} grant-access --access-level Write" to retrieve the {0}\'s SAS token.'.format(scope))
c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.')
else:
c.ignore('access_level', 'for_upload', 'hyper_v_generation')
for scope in ['disk create', 'snapshot create']:
with self.argument_context(scope) as c:
c.argument('source', help='source to create the disk/snapshot from, including unmanaged blob uri, managed disk id or name, or snapshot id or name')
# endregion
# region Disks
with self.argument_context('disk') as c:
c.argument('zone', zone_type, min_api='2017-03-30', options_list=['--zone']) # TODO: --size-gb currently has claimed -z. We can do a breaking change later if we want to.
c.argument('disk_name', existing_disk_name, completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('name', arg_type=name_arg_type)
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
c.argument('os_type', arg_type=get_enum_type(OperatingSystemTypes), help='The Operating System type of the Disk.')
c.argument('disk_iops_read_write', type=int, min_api='2018-06-01', help='The number of IOPS allowed for this disk. Only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes')
c.argument('disk_mbps_read_write', type=int, min_api='2018-06-01', help="The bandwidth allowed for this disk. Only settable for UltraSSD disks. MBps means millions of bytes per second with ISO notation of powers of 10")
# endregion
# region Snapshots
with self.argument_context('snapshot', resource_type=ResourceType.MGMT_COMPUTE, operation_group='snapshots') as c:
c.argument('snapshot_name', existing_snapshot_name, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'))
c.argument('name', arg_type=name_arg_type)
c.argument('sku', arg_type=snapshot_sku)
# endregion
# region Images
with self.argument_context('image') as c:
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']))
c.argument('image_name', arg_type=name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/images'))
c.argument('tags', tags_type)
with self.argument_context('image create') as c:
# here we collpase all difference image sources to under 2 common arguments --os-disk-source --data-disk-sources
c.argument('name', arg_type=name_arg_type, help='new image name')
c.argument('source', help='OS disk source from the same region, including a virtual machine ID or name, OS disk blob URI, managed OS disk ID or name, or OS snapshot ID or name')
c.argument('data_disk_sources', nargs='+', help='Space-separated list of data disk sources, including unmanaged blob URI, managed disk ID or name, or snapshot ID or name')
c.argument('zone_resilient', min_api='2017-12-01', arg_type=get_three_state_flag(), help='Specifies whether an image is zone resilient or not. '
'Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage')
c.argument('storage_sku', arg_type=disk_sku, help='The SKU of the storage account with which to create the VM image. Unused if source VM is specified.')
c.argument('os_disk_caching', arg_type=get_enum_type(CachingTypes), help="Storage caching type for the image's OS disk.")
c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, min_api="2019-03-01", help='The hypervisor generation of the Virtual Machine created from the image.')
c.ignore('source_virtual_machine', 'os_blob_uri', 'os_disk', 'os_snapshot', 'data_blob_uris', 'data_disks', 'data_snapshots')
# endregion
# region Image Templates
with self.argument_context('image template') as c:
ib_output_name_help = "Name of the image builder run output."
c.argument('location', get_location_type(self.cli_ctx))
c.argument('scripts', nargs='+', help="Space-separated list of shell or powershell scripts to customize the image with. Each script must be a publicly accessible URL."
" Infers type of script from file extension ('.sh' or'.ps1') or from source type. More more customizer options and flexibility, see: 'az image template customizer add'")
c.argument('source', options_list=["--image-source", "-i"], help="The base image to customize. Must be a valid platform image URN, platform image alias, Red Hat ISO image URI, managed image name/ID, or shared image version ID.")
c.argument('image_template_name', image_template_name_type, help="The name of the image template.")
c.argument('checksum', help="The SHA256 checksum of the Red Hat ISO image")
c.argument('managed_image_destinations', nargs='+', help='Managed image output distributor information. Space-separated list of key-value pairs. E.g "image_1=westus2 image_2=westus". Each key is the name or resource ID of the managed image to be created. Each value is the location of the image.')
c.argument('shared_image_destinations', nargs='+', help='Shared image gallery (sig) output distributor information. Space-separated list of key-value pairs. E.g "my_gallery_1/image_def_1=eastus,westus my_gallery_2/image_def_2=uksouth,canadaeast,francesouth." '
'Each key is the sig image definition ID or sig gallery name and sig image definition delimited by a "/". Each value is a comma-delimited list of replica locations.')
c.argument('output_name', help=ib_output_name_help)
c.ignore('destinations_lists', 'scripts_list', 'source_dict')
with self.argument_context('image template create') as c:
ib_source_type = CLIArgumentType(arg_group="Image Source")
ib_customizer_type = CLIArgumentType(arg_group="Customizer")
ib_cutput_type = CLIArgumentType(arg_group="Output")
c.argument('build_timeout', type=int, help="The Maximum duration to wait while building the image template, in minutes. Default is 60.")
# Image Source Arguments
c.argument('source', arg_type=ib_source_type)
c.argument('checksum', arg_type=ib_source_type)
c.argument('', arg_type=ib_source_type)
# Image Customizer Arguments
c.argument('scripts', arg_type=ib_customizer_type)
c.argument('', arg_type=ib_customizer_type)
c.argument('', arg_type=ib_customizer_type)
# Image Output Arguments
c.argument('managed_image_destinations', arg_type=ib_cutput_type)
c.argument('shared_image_destinations', arg_type=ib_cutput_type)
c.argument('output_name', arg_type=ib_cutput_type)
with self.argument_context('image template output') as c:
ib_sig_regions_help = "Space-separated list of regions to replicate the image version into."
ib_img_location_help = "Location where the customized image will be created."
c.argument('gallery_image_definition', arg_group="Shared Image Gallery", help="Name or ID of the existing SIG image definition to create the customized image version with.")
c.argument('gallery_name', arg_group="Shared Image Gallery", help="Shared image gallery name, if image definition name and not ID was provided.")
c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help)
c.argument('managed_image', arg_group="Managed Image", help="Name or ID of the customized managed image to be created.")
c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help)
with self.argument_context('image template output add') as c:
ib_artifact_tags_help = "Tags that will be applied to the output artifact once it has been created by the distributor. " + tags_type.settings['help']
ib_artifact_tags_type = CLIArgumentType(overrides=tags_type, help=ib_artifact_tags_help, options_list=["--artifact-tags"])
ib_default_loc_help = " Defaults to resource group's location."
c.argument('output_name', help=ib_output_name_help + " Defaults to the name of the managed image or sig image definition.")
c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help + ib_default_loc_help)
c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help + ib_default_loc_help)
c.argument('is_vhd', arg_group="VHD", help="The output is a VHD distributor.", action='store_true')
c.argument('tags', arg_type=ib_artifact_tags_type)
c.ignore('location')
with self.argument_context('image template customizer') as c:
ib_win_restart_type = CLIArgumentType(arg_group="Windows Restart")
ib_script_type = CLIArgumentType(arg_group="Shell and Powershell")
ib_powershell_type = CLIArgumentType(arg_group="Powershell")
ib_file_customizer_type = CLIArgumentType(arg_group="File")
c.argument('customizer_name', help="Name of the customizer.")
c.argument('customizer_type', options_list=['--type', '-t'], help="Type of customizer to be added to the image template.", arg_type=get_enum_type(ScriptType))
# Script Args
c.argument('script_url', arg_type=ib_script_type, help="URL of script to customize the image with. The URL must be publicly accessible.")
c.argument('inline_script', arg_type=ib_script_type, nargs='+', help="Space-separated list of inline script lines to customize | |
define.requiering_feature is None \
or define.name in IGNORED \
or define.value is None \
or define.is_deprecated:
return
call = ''
if define.macro_call:
call, ty = PREDEFINED_UTILS[define.macro_call]
call = '%s!' % call
value = define.value
else:
value, ty = self.rust_value(define.value)
self._generate_feature_comment_nonconsecutive(define.requiering_feature, gen)
self._generate_docs(define, gen)
self._generate_feature_protect(define.requiering_feature, gen)
gen('pub const ', define.name, ' : ', ty, ' = ', call, value, ';').nl()
def _generate_enum_group(self, group, gen):
if group.requiering_feature is None \
or group.name in IGNORED:
return
self._generate_feature_comment_nonconsecutive(group.requiering_feature, gen)
gen.nl()
if group.type is None:
self._generate_enum_group_defines(group, gen)
elif group.name == 'VkResult':
self._generate_enum_group_error_enum(group, gen)
else:
self._generate_enum_group_enum(group, gen)
def _generate_enum_group_defines(self, group, gen):
gen('// ', group.name).nl()
gen('/////', '/'*len(group.name)).nl()
for item in group.enum_items:
if item.requiering_feature is None \
or item.name in IGNORED:
continue
elif item.requiering_feature is not group.requiering_feature:
self._generate_feature_comment_nonconsecutive(item.requiering_feature, gen)
self._generate_docs(item, gen)
self._generate_feature_protect(item.requiering_feature, gen)
name = self.rust_enum_item_name(item)
value, ty = self.rust_enum_item_value(item)
gen('pub const ', name, ' : ', ty, ' = ', value, ';').nl()
def _generate_enum_group_enum_item(self, group, item, gen):
with_guard = item.requiering_feature is not None and item.requiering_feature is not group.requiering_feature
if with_guard:
self._generate_feature_comment_nonconsecutive(item.requiering_feature, gen)
self._generate_docs(item, gen)
if with_guard:
self._generate_feature_protect(item.requiering_feature, gen)
name = self.rust_enum_item_name(item)
value, _ = self.rust_enum_item_value(item)
gen(name, ' = ', value)
def _generate_enum_group_enum(self, group, gen):
self._generate_feature_protect(group.requiering_feature, gen)
gen('define_', group.type, '! {').nl()
with gen.open_indention():
self._generate_docs(group.enum_type, gen)
gen('pub enum ', group.name, ' {').nl()
with gen.open_indention():
for i, item in enumerate(group.enum_items):
if i>0:
gen(',').nl()
self._generate_enum_group_enum_item(group, item, gen)
gen.nl()
gen('}').nl()
gen('}').nl()
gen._last_feature = group.requiering_feature
def _generate_enum_group_error_enum(self, group, gen):
gen('define_enum! {').nl()
with gen.open_indention():
self._generate_docs(group.enum_type, gen)
gen('pub enum VkError {').nl()
with gen.open_indention():
for i, item in enumerate(group.enum_items):
if i == 0: # skip SUCCESS
continue
if i > 1:
gen(',').nl()
self._generate_enum_group_enum_item(group, item, gen)
gen.nl()
gen('}').nl()
gen('}').nl()
gen.nl()
gen('impl ::std::error::Error for VkError {').nl()
with gen.open_indention():
gen('fn description(&self) -> &str {').nl()
with gen.open_indention():
for i, item in enumerate(group.enum_items):
if i == 0: # skip SUCCESS
continue
with_guard = item.requiering_feature is not None and item.requiering_feature is not group.requiering_feature
if with_guard:
self._generate_feature_protect(item.requiering_feature, gen)
gen('{').nl().i()
name = self.rust_enum_item_name(item)
comment = item.comment
if not comment and item.docs and len(item.docs)>0:
comment = item.docs[0].strip()
if not comment:
comment = item.shortname.lower()
if comment.startswith('error_'):
comment = comment[6:]
else:
dot = comment.find('.')
if dot > 0:
comment = comment[:dot]
gen('if *self == VkError::', name, '{ return "', comment,'"; }').nl()
if with_guard:
gen.o()('}').nl()
gen('"unknown"').nl()
gen('}').nl()
gen('}').nl()
gen('impl ::std::fmt::Display for VkError {').nl()
with gen.open_indention():
gen('fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {').nl()
with gen.open_indention():
gen('write!(f, "{} ({})", ::std::error::Error::description(self), *self as i32)')
gen('}').nl()
gen('}').nl()
self._generate_docs(group.enum_type, gen)
gen('pub type VkResult<V=()> = Result<V,VkError>;')
def _generate_command_signature(self, base_cmd, gen, paramnames=True, method=None, safe=False, with_return=True, **kwargs):
if method is None:
method = safe is True and self.rust_safe_type or self.rust_raw_type
gen('(')
i = 0
for param in base_cmd.params:
if safe and (param is base_cmd.out_param or param.len_for):
continue
if i>0:
gen(', ')
i += 1
if paramnames and param.name:
gen(self.rust_param_name(param.name), ': ')
gen(method(param, as_param=True, **kwargs))
gen(')')
if with_return and base_cmd.returns != TypeRef.VOID:
gen(' -> ', method(base_cmd.returns, optional=True, **kwargs))
def generate_protos(self, file=None):
if file is None:
file = os.path.join(self.target, 'protos.rs')
reg = self.registry
with RustCodeGenerator(file) as gen:
gen("/* GENERATED FILE */").nl()
gen.nl()
gen('#![allow(non_camel_case_types)]').nl()
gen.nl()
with self.manage_imports(gen) as gen:
self.add_import('platform::*')
self.add_import('types::*')
for feature in reg.features:
with gen.open_nonempty() as nonempty_gen:
self._generate_feature_comment(feature, nonempty_gen)
for command in feature.commands:
self._generate_proto(command, gen)
def _generate_proto(self, command, gen):
self._generate_feature_protect(command.requiering_feature, gen)
gen('pub type PFN_', command.name, ' = extern "system" fn ' )
self._generate_command_signature(command, gen, paramnames=False, safe=False)
gen(';').nl()
def generate_dispatch_table(self, file=None):
if file is None:
file = os.path.join(self.target, 'dispatch_table.rs')
with RustCodeGenerator(file) as gen:
gen("/* GENERATED FILE */").nl()
gen.nl()
gen('use protos::*;').nl()
gen('use types::PFN_vkVoidFunction;').nl()
gen.nl()
for table in DispatchTable:
self._generate_dispatch_table(table, gen)
gen.nl()
gen.nl()
for table in DispatchTable:
self._generate_dispatch_table_init(table, gen)
def _generate_dispatch_table(self, table, gen):
gen('#[allow(non_snake_case)]').nl()
gen('pub struct Vk', table.value.capitalize(), 'DispatchTable {').nl()
with gen.open_indention():
for feature in self.registry.features:
with gen.open_nonempty() as nonempty_gen:
self._generate_feature_comment(feature, nonempty_gen)
for command in feature.commands:
if command.dispatch_table is not table \
or command.name.endswith('ProcAddr') \
or command.name in IGNORED:
continue
self._generate_feature_protect(feature, gen)
gen('pub ', command.name, ': Option<PFN_', command.name, '>,').nl()
gen('}').nl()
gen.nl()
def _generate_dispatch_table_init(self, table, gen):
table_name = table.value.capitalize()
gen('impl Vk', table_name, 'DispatchTable {').nl()
with gen.open_indention():
if table is DispatchTable.Loader:
gen('pub unsafe fn load<R,F1>(gpa: F1)')
else:
gen('pub unsafe fn load<R,F1,F2>(gpa: F1, has_ext: F2)')
gen(' -> Result<Vk', table_name, 'DispatchTable, R>').nl()
with gen.open_indention():
gen('where').nl()
with gen.open_indention():
gen('F1: Fn(&str) -> Result<PFN_vkVoidFunction, R>,').nl()
if table is not DispatchTable.Loader:
gen('F2: Fn(&str) -> bool,').nl()
gen('{').nl()
with gen.open_indention():
gen('use std::mem::transmute as tm;').nl()
gen('let mut tab : Vk', table_name, 'DispatchTable = ::std::mem::zeroed();')
for feature in self.registry.features:
with gen.open_nonempty() as nonempty_gen:
self._generate_feature_comment(feature, nonempty_gen)
if self._is_feature_protect(feature):
self._generate_feature_protect(feature, nonempty_gen)
nonempty_gen('{').nl()
nonempty_gen.i()
if feature.is_extension:
nonempty_gen('if has_ext("', feature.name, '\\0") {').nl()
nonempty_gen.i()
try:
for command in feature.commands:
if command.dispatch_table is not table \
or command.name.endswith('ProcAddr') \
or command.name in IGNORED:
continue
gen('tab.', command.name, ' = tm(gpa("', command.name, '\\0")?);').nl()
finally:
if feature.is_extension:
nonempty_gen.o()
nonempty_gen('}').nl()
if self._is_feature_protect(feature):
nonempty_gen.o()
nonempty_gen('}').nl()
gen('Ok(tab)').nl()
gen('}').nl()
gen('}').nl()
gen.nl()
def generate_dispatch_commands(self, file=None):
if file is None:
file = os.path.join(self.target, 'dispatch_commands.rs')
reg = self.registry
with RustCodeGenerator(file) as gen:
gen("/* GENERATED FILE */").nl()
gen.nl()
gen('#![allow(non_snake_case)]').nl()
gen.nl()
with self.manage_imports(gen) as gen:
self.add_import('AsRaw')
self.add_import('platform::*')
self.add_import('enums::{VkError,VkResult}')
self.add_import('types::*')
self.add_import('dispatch_table::*')
gen.nl()
for feature in reg.features:
with gen.open_nonempty() as nonempty_gen:
self._generate_feature_comment(feature, nonempty_gen)
for command in feature.commands:
if command.name.endswith('ProcAddr') \
or command.name in IGNORED:
continue
self._generate_dispatch_command(command, gen)
def _generate_dispatch_command(self, command, gen):
table_name = command.dispatch_table.value.capitalize()
if command.dispatch_table is DispatchTable.Loader:
handle_arg = ''
else:
handle_arg = self.rust_param_name(next(iter(command.params))) + ', '
is_create = False
for table2 in DispatchTable:
table2_name = table2.name.capitalize()
if command.name == 'vkCreate%s'%table2_name:
is_create = table2_name
break
# is this the destroy command for the dispatch_table
is_destroy = command.name == 'vkDestroy%s'%table_name
# remove lifetime 'l, we only care about lifetime 'h (for handles)
lifetimes = self.rust_composed_lifetimes(command) - set(['l'])
def safe_dispatch_type(*args, **kwargs):
tyname = self.rust_safe_type(*args, **kwargs)
return tyname.replace('&\'l ', '&').replace('<\'l>', '').replace('\'l', '\'_')
out_param = command.out_param
out_typename = None
out_typename_return = None
out_convert = ''
if out_param:
if command.out_param.type.arg == TypeRef.BOOL:
out_typename = 'VkBool32'
else:
out_typename = safe_dispatch_type(command.out_param.type.arg)
if out_param.len:
if out_param.type.arg == TypeRef.VOID:
out_typename = 'u8'
out_typename = 'Vec<%s>' % out_typename
out_typename_return = out_typename
if command.out_param.type.arg == TypeRef.BOOL:
out_typename_return = 'bool'
out_convert = ' != 0'
self._generate_docs(command, gen)
self._generate_feature_protect(command.requiering_feature, gen)
gen('pub fn ', command.name, _lifetime_diamond(lifetimes))
self._generate_command_signature(command, gen, method=safe_dispatch_type, safe=True, with_return=False)
result_convert = ''
if out_param and command.returns == TypeRef.RESULT:
gen(' -> VkResult<', out_typename_return,'>')
elif command.returns == TypeRef.BOOL:
gen(' -> bool')
result_convert = ' != 0'
elif command.returns != TypeRef.VOID:
gen(' -> ', safe_dispatch_type(command.returns, optional=True))
elif out_param:
gen(' -> ', out_typename_return)
gen(' {').nl()
with gen.open_indention():
gen('unsafe {').nl()
with gen.open_indention():
# add length params
enumerate_len_param = None
enumerate_with_incomplete = False
for param in command.params:
if param.len_for:
if param.is_out:
enumerate_len_param = param
gen('let mut ', self.rust_param_name(param), ': ', safe_dispatch_type(param.type.arg), ' = 0;').nl()
else:
gen('let ', self.rust_param_name(param), ' = ', self.rust_param_name(param.len_for[0]), '.len() as ', safe_dispatch_type(param), ';').nl()
for len_for_param in param.len_for[1:]:
gen('assert!(', self.rust_param_name(param), ' as usize == ', self.rust_param_name(len_for_param),'.len());').nl()
if enumerate_len_param:
enumerate_with_incomplete = command.returns == TypeRef.RESULT and 'VK_INCOMPLETE' in command.successcodes
# add return param
out_param = command.out_param
out_len_expr = None
if out_param:
out_paramname = self.rust_param_name(out_param)
gen('let mut ', out_paramname, ': ', out_typename, ' = ')
if out_param.len:
if enumerate_len_param:
out_len_expr = self.rust_param_name(enumerate_len_param)
gen('Vec::new();').nl()
else:
out_len_elems = out_param.len.split('::')
out_len_expr = self.rust_param_name(out_len_elems[0]) + ''.join(['.%s()' % self.rust_member_function(p) for p in out_len_elems[1:]])
gen('Vec::with_capacity(', out_len_expr,' as usize);').nl()
else:
gen('::std::mem::zeroed();').nl()
is_check_result = command.returns == TypeRef.RESULT
if is_check_result and is_destroy:
gen('let _r = ')
gen('Vk', table_name, 'DispatchTable::with(', handle_arg, '|_t|{').nl()
with gen.open_indention():
if enumerate_with_incomplete:
gen('loop {').nl().i()
all_params_as_raw = [self.rust_param_as_raw(p) for p in command.params]
if enumerate_len_param:
if is_check_result:
gen('let _r = ')
all_args = ', '.join(all_params_as_raw[:-1] + ['::std::ptr::null_mut()'])
gen('_t.', command.name, '.unwrap()(', all_args, ');').nl()
if enumerate_with_incomplete:
gen('if _r == Err(VkError::INCOMPLETE) { continue; }').nl()
if is_check_result:
gen('if let Err(_e) = _r { return Err(_e); }').nl()
gen('if ', self.rust_param_name(enumerate_len_param) ,' == 0 {').nl()
with gen.open_indention():
if is_check_result:
gen('return Ok(', out_paramname, out_convert, ');').nl()
else:
gen('return ', out_paramname, out_convert, ';').nl()
gen('}').nl()
gen(self.rust_param_name(out_param) ,' = Vec::with_capacity(', out_len_expr,' as usize);').nl()
if is_check_result and (is_create or out_param):
gen('let _r = ')
all_args = ', '.join(all_params_as_raw)
gen('_t.', command.name, '.unwrap()(', all_args, ')')
if out_param:
gen(';').nl()
if enumerate_with_incomplete:
gen('if _r == Err(VkError::INCOMPLETE) { continue; }').nl()
if is_check_result:
gen('if let Err(_e) = _r { return | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import threading
import time
import sys
import math
import signal
import configparser
import audioop
import subprocess as sp
import argparse
import os
import os.path
import pymumble.pymumble_py3 as pymumble
import variables as var
import logging
import logging.handlers
import traceback
from packaging import version
import util
import command
import constants
from database import SettingsDatabase, MusicDatabase
import media.system
from media.item import ValidationFailedError, PreparationFailedError
from media.playlist import BasePlaylist
from media.cache import MusicCache
class MumbleBot:
version = '6.1.1'
def __init__(self, args):
self.log = logging.getLogger("bot")
self.log.info("bot: botamusique version %s, starting..." % self.version)
signal.signal(signal.SIGINT, self.ctrl_caught)
self.cmd_handle = {}
self.volume_set = var.config.getfloat('bot', 'volume')
if var.db.has_option('bot', 'volume'):
self.volume_set = var.db.getfloat('bot', 'volume')
self.volume = self.volume_set
if args.channel:
self.channel = args.channel
else:
self.channel = var.config.get("server", "channel", fallback=None)
if args.verbose:
self.log.setLevel(logging.DEBUG)
self.log.debug("Starting in DEBUG loglevel")
elif args.quiet:
self.log.setLevel(logging.ERROR)
self.log.error("Starting in ERROR loglevel")
var.user = args.user
var.music_folder = util.solve_filepath(var.config.get('bot', 'music_folder'))
var.tmp_folder = util.solve_filepath(var.config.get('bot', 'tmp_folder'))
var.is_proxified = var.config.getboolean(
"webinterface", "is_web_proxified")
self.exit = False
self.nb_exit = 0
self.thread = None
self.thread_stderr = None
self.is_pause = False
self.pause_at_id = ""
self.playhead = -1
self.song_start_at = -1
self.last_ffmpeg_err = ""
self.read_pcm_size = 0
# self.download_threads = []
self.wait_for_ready = False # flag for the loop are waiting for download to complete in the other thread
if var.config.getboolean("webinterface", "enabled"):
wi_addr = var.config.get("webinterface", "listening_addr")
wi_port = var.config.getint("webinterface", "listening_port")
tt = threading.Thread(
target=start_web_interface, name="WebThread", args=(wi_addr, wi_port))
tt.daemon = True
self.log.info('Starting web interface on {}:{}'.format(wi_addr, wi_port))
tt.start()
if var.config.getboolean("bot", "auto_check_update"):
th = threading.Thread(target=self.check_update, name="UpdateThread")
th.daemon = True
th.start()
if args.host:
host = args.host
else:
host = var.config.get("server", "host")
if args.port:
port = args.port
else:
port = var.config.getint("server", "port")
if args.password:
password = args.password
else:
password = var.config.get("server", "password")
if args.channel:
self.channel = args.channel
else:
self.channel = var.config.get("server", "channel")
if args.certificate:
certificate = args.certificate
else:
certificate = util.solve_filepath(var.config.get("server", "certificate"))
if args.tokens:
tokens = args.tokens
else:
tokens = var.config.get("server", "tokens")
tokens = tokens.split(',')
if args.user:
self.username = args.user
else:
self.username = var.config.get("bot", "username")
self.mumble = pymumble.Mumble(host, user=self.username, port=port, password=password, tokens=<PASSWORD>,
debug=var.config.getboolean('debug', 'mumbleConnection'), certfile=certificate)
self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_TEXTMESSAGERECEIVED, self.message_received)
self.mumble.set_codec_profile("audio")
self.mumble.start() # start the mumble thread
self.mumble.is_ready() # wait for the connection
self.set_comment()
self.mumble.users.myself.unmute() # by sure the user is not muted
self.join_channel()
self.mumble.set_bandwidth(200000)
self.is_ducking = False
self.on_ducking = False
self.ducking_release = time.time()
if not var.db.has_option("bot", "ducking") and var.config.getboolean("bot", "ducking", fallback=False)\
or var.config.getboolean("bot", "ducking"):
self.is_ducking = True
self.ducking_volume = var.config.getfloat("bot", "ducking_volume", fallback=0.05)
self.ducking_volume = var.db.getfloat("bot", "ducking_volume", fallback=self.ducking_volume)
self.ducking_threshold = var.config.getfloat("bot", "ducking_threshold", fallback=5000)
self.ducking_threshold = var.db.getfloat("bot", "ducking_threshold", fallback=self.ducking_threshold)
self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_SOUNDRECEIVED,
self.ducking_sound_received)
self.mumble.set_receive_sound(True)
if var.config.get("bot", "when_nobody_in_channel") not in ['pause', 'pause_resume', 'stop', 'nothing']:
self.log.warn('Config "when_nobody_in_channel" is not on of "pause", "pause_resume", "stop" or "nothing", falling back to "nothing".')
if var.config.get("bot", "when_nobody_in_channel", fallback='nothing') in ['pause', 'pause_resume', 'stop']:
self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_USERREMOVED, self.users_changed)
self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_USERUPDATED, self.users_changed)
# Debug use
self._loop_status = 'Idle'
self._display_rms = False
self._max_rms = 0
# Set the CTRL+C shortcut
def ctrl_caught(self, signal, frame):
self.log.info(
"\nSIGINT caught, quitting, {} more to kill".format(2 - self.nb_exit))
self.exit = True
self.pause()
if self.nb_exit > 1:
self.log.info("Forced Quit")
sys.exit(0)
self.nb_exit += 1
if var.config.getboolean('bot', 'save_playlist', fallback=True) \
and var.config.get("bot", "save_music_library", fallback=True):
self.log.info("bot: save playlist into database")
var.playlist.save()
def check_update(self):
self.log.debug("update: checking for updates...")
new_version = util.new_release_version()
if version.parse(new_version) > version.parse(self.version):
self.log.info("update: new version %s found, current installed version %s." % (new_version, self.version))
self.send_channel_msg(constants.strings('new_version_found'))
else:
self.log.debug("update: no new version found.")
def register_command(self, cmd, handle, no_partial_match=False, access_outside_channel=False):
cmds = cmd.split(",")
for command in cmds:
command = command.strip()
if command:
self.cmd_handle[command] = {'handle': handle,
'partial_match': not no_partial_match,
'access_outside_channel': access_outside_channel}
self.log.debug("bot: command added: " + command)
def set_comment(self):
self.mumble.users.myself.comment(var.config.get('bot', 'comment'))
def join_channel(self):
if self.channel:
if '/' in self.channel:
self.mumble.channels.find_by_tree(self.channel.split('/')).move_in()
else:
self.mumble.channels.find_by_name(self.channel).move_in()
# =======================
# Message
# =======================
# All text send to the chat is analysed by this function
def message_received(self, text):
message = text.message.strip()
user = self.mumble.users[text.actor]['name']
if var.config.getboolean('commands', 'split_username_at_space'):
# in can you use https://github.com/Natenom/mumblemoderator-module-collection/tree/master/os-suffixes ,
# you want to split the username
user = user.split()[0]
if message[0] in var.config.get('commands', 'command_symbol'):
# remove the symbol from the message
message = message[1:].split(' ', 1)
# use the first word as a command, the others one as parameters
if len(message) > 0:
command = message[0].lower()
parameter = ''
if len(message) > 1:
parameter = message[1].rstrip()
else:
return
self.log.info('bot: received command ' + command + ' - ' + parameter + ' by ' + user)
# Anti stupid guy function
if not self.is_admin(user) and not var.config.getboolean('bot', 'allow_private_message') and text.session:
self.mumble.users[text.actor].send_text_message(
constants.strings('pm_not_allowed'))
return
for i in var.db.items("user_ban"):
if user.lower() == i[0]:
self.mumble.users[text.actor].send_text_message(
constants.strings('user_ban'))
return
if not self.is_admin(user) and parameter:
input_url = util.get_url_from_input(parameter)
if input_url:
for i in var.db.items("url_ban"):
if input_url == i[0]:
self.mumble.users[text.actor].send_text_message(
constants.strings('url_ban'))
return
command_exc = ""
try:
if command in self.cmd_handle:
command_exc = command
if not self.cmd_handle[command]['access_outside_channel'] \
and not self.is_admin(user) \
and not var.config.getboolean('bot', 'allow_other_channel_message') \
and self.mumble.users[text.actor]['channel_id'] != self.mumble.users.myself['channel_id']:
self.mumble.users[text.actor].send_text_message(
constants.strings('not_in_my_channel'))
return
self.cmd_handle[command]['handle'](self, user, text, command, parameter)
else:
# try partial match
cmds = self.cmd_handle.keys()
matches = []
for cmd in cmds:
if cmd.startswith(command) and self.cmd_handle[cmd]['partial_match']:
matches.append(cmd)
if len(matches) == 1:
self.log.info("bot: {:s} matches {:s}".format(command, matches[0]))
command_exc = matches[0]
if not self.cmd_handle[command_exc]['access_outside_channel'] \
and not self.is_admin(user) \
and not var.config.getboolean('bot', 'allow_other_channel_message') \
and self.mumble.users[text.actor]['channel_id'] != self.mumble.users.myself[
'channel_id']:
self.mumble.users[text.actor].send_text_message(
constants.strings('not_in_my_channel'))
return
self.cmd_handle[command_exc]['handle'](self, user, text, command_exc, parameter)
elif len(matches) > 1:
self.mumble.users[text.actor].send_text_message(
constants.strings('which_command', commands="<br>".join(matches)))
else:
self.mumble.users[text.actor].send_text_message(
constants.strings('bad_command', command=command))
except:
error_traceback = traceback.format_exc()
error = error_traceback.rstrip().split("\n")[-1]
self.log.error("bot: command %s failed with error: %s\n" % (command_exc, error_traceback))
self.send_msg(constants.strings('error_executing_command', command=command_exc, error=error), text)
def send_msg(self, msg, text):
msg = msg.encode('utf-8', 'ignore').decode('utf-8')
# text if the object message, contain information if direct message or channel message
self.mumble.users[text.actor].send_text_message(msg)
def send_channel_msg(self, msg):
msg = msg.encode('utf-8', 'ignore').decode('utf-8')
own_channel = self.mumble.channels[self.mumble.users.myself['channel_id']]
own_channel.send_text_message(msg)
def is_admin(self, user):
list_admin = var.config.get('bot', 'admin').rstrip().split(';')
if user in list_admin:
return True
else:
return False
# =======================
# Users changed
# =======================
def users_changed(self, user, message):
own_channel = self.mumble.channels[self.mumble.users.myself['channel_id']]
# only check if there is one more user currently in the channel
# else when the music is paused and somebody joins, music would start playing again
if len(own_channel.get_users()) == 2:
if var.config.get("bot", "when_nobody_in_channel") == "pause_resume":
self.resume()
elif var.config.get("bot", "when_nobody_in_channel") == "pause":
self.send_channel_msg(constants.strings("auto_paused"))
elif len(own_channel.get_users()) == 1:
# if the bot is the only user left in the channel
self.log.info('bot: Other users in the channel left. Stopping music now.')
if var.config.get("bot", "when_nobody_in_channel") == "stop":
self.clear()
else:
self.pause()
# =======================
# Launch and Download
# =======================
def launch_music(self):
if var.playlist.is_empty():
return
assert self.wait_for_ready is False
music_wrapper = var.playlist.current_item()
uri = music_wrapper.uri()
self.log.info("bot: play music " + music_wrapper.format_debug_string())
if var.config.getboolean('bot', 'announce_current_music'):
self.send_channel_msg(music_wrapper.format_current_playing())
if var.config.getboolean('debug', 'ffmpeg'):
ffmpeg_debug = "debug"
else:
ffmpeg_debug = "warning"
command = ("ffmpeg", '-v', ffmpeg_debug, '-nostdin', '-i',
uri, '-ac', '1', '-f', 's16le', '-ar', '48000', '-')
self.log.debug("bot: execute ffmpeg command: " + " ".join(command))
# The ffmpeg process is a thread
# prepare pipe for catching stderr of ffmpeg
pipe_rd, pipe_wd = os.pipe()
util.pipe_no_wait(pipe_rd) # Let the pipe work in non-blocking mode
self.thread_stderr = os.fdopen(pipe_rd)
self.thread = sp.Popen(command, stdout=sp.PIPE, stderr=pipe_wd, bufsize=480)
self.is_pause = False
self.read_pcm_size = 0
self.song_start_at = -1
self.playhead = 0
self.last_volume_cycle_time = time.time()
def async_download_next(self):
# Function start if the next music isn't ready
# Do nothing in case the next music is already downloaded
self.log.debug("bot: Async download next asked ")
while var.playlist.next_item() and var.playlist.next_item().type in ['url', 'url_from_playlist']:
# usually, all validation will be done when adding to the list.
# however, for performance consideration, youtube playlist won't be validate when added.
# the validation has to be done here.
next = var.playlist.next_item()
try:
next.validate()
if not next.is_ready():
self.async_download(next)
break
except ValidationFailedError as e:
self.send_channel_msg(e.msg)
var.playlist.remove_by_id(next.id)
var.cache.free_and_delete(next.id)
def async_download(self, item):
th = threading.Thread(
target=self._download, name="Prepare-" + item.id[:7], args=(item,))
self.log.info("bot: start preparing item in thread: %s" % item.format_debug_string())
th.daemon = True
th.start()
return th
def _download(self, item):
ver = item.version
try:
item.prepare()
except PreparationFailedError as e:
self.send_channel_msg(e.msg)
return False
if item.version > ver:
var.playlist.version += 1
# =======================
# Loop
# =======================
# Main loop of the Bot
def loop(self):
raw_music = ""
while not self.exit and self.mumble.is_alive():
while self.thread and self.mumble.sound_output.get_buffer_size() > 0.5 and not self.exit:
# If the buffer isn't empty, I cannot send new music part, so I wait
self._loop_status = | |
is the empty dictionary {})
if self.closure != {}:
closed_system = []
for equation in system:
equation = equation.subs(self.closure).expand()
closed_system.append(equation)
system = closed_system
# 2. Clean from expectation of random fields
full_system_in_metric += self._clean_system_from_expectation(system)
self._system_in_metric = full_system_in_metric
return self._system_in_metric
@property
def in_diffusion(self):
""" Return de the pkf dynamics is term of the variance/diffusion tensor
Description
-----------
The derivation relies on the identity $\nu g = I/2$ whose trend is $(\dot\nu)g+\nu(\dot g)=0$
so that $\dot \nu = -\nu(\dot g)g^{-1}$. Again, with $\nu g =I/2$ leading to $g^{-1} = 2\nu$
it results that
$$\dot \nu = -2\nu(\dot g)\nu.$$
"""
if self._system_in_diffusion is None:
t = self.time_coordinate
# 1. Set dictionary for substitution
# 1.1 Create the substitution dictionary for migration : metric -> diffusion
metric_to_diffusion = collections.OrderedDict()
for mfield in self.fields.values():
metric = upper_triangle(mfield.metric)
metric_in_diffusion = mfield.diffusion.inv() * Rational(1 , 2)
metric_in_diffusion = upper_triangle(metric_in_diffusion)
for metric_ij, diffusion_ij in zip(metric, metric_in_diffusion):
metric_to_diffusion[metric_ij] = diffusion_ij
# 1.2 Dictionary for the metric trends
subs_metric_trend = {}
for equation in self._apply_closure(self.metric_system):
lhs, rhs = equation.args
subs_metric_trend[lhs] = rhs
# 2. Migration of expectation and variance systems
diffusion_system = []
for system in [self.expectation_system, self.variance_system]:
# -1- apply external closure
system = self._apply_closure(system)
# -2- switch from metric to diffusion
for equation in system:
equation = equation.subs(metric_to_diffusion)
diffusion_system.append(equation)
# 3. Computation of the system at a symbolic level
# forms the equation $$ \pdt \nu = - 2\nu \pdt g \nu $$
# The computation of the system is made as a loop over univariate fields
for mfield in self.fields.values():
# Extract tensors
diffusion = mfield.diffusion
metric = mfield.metric
# Computation of the rhs: $- 2\nu \pdt g \nu$
trend_metric = Derivative(metric, t).doit()
rhs = -Integer(2)*diffusion*trend_metric*diffusion
rhs = rhs.doit()
# Computation of the lhs: $\pdt \nu$
lhs = Derivative(diffusion, t).doit()
# Set the system by substituting terms
for lhs_term, rhs_term in zip(upper_triangle(lhs), upper_triangle(rhs)):
# Replace metric trend by its value
rhs_term = rhs_term.subs(subs_metric_trend)
rhs_term = rhs_term.doit()
rhs_term = rhs_term.simplify()
rhs_term = rhs_term.expand()
# Replace metric terms by their values
rhs_term = rhs_term.subs(metric_to_diffusion)
rhs_term = rhs_term.doit()
rhs_term = rhs_term.simplify()
rhs_term = rhs_term.expand()
# Set the equation
equation = Eq(lhs_term, rhs_term)
diffusion_system.append(equation)
# 3. Clean Expectation of fields
diffusion_system = self._clean_system_from_expectation(diffusion_system)
self._system_in_diffusion = diffusion_system
return self._system_in_diffusion
@property
def in_aspect(self):
""" Return de the pkf dynamics is term of the variance/aspect tensor
Description
-----------
The derivation relies on the identity $\nu \bs = I$ whose trend is
$(\dot\bs)g+\bs(\dot g)=0$
so that $\dot \bs = -\bs(\dot g)g^{-1}$. Again, with $\bs g =I$
leading to $g^{-1} = \bs$
it results that
$$\dot \bs = -\bs(\dot g)\bs.$$
"""
if self._system_in_aspect is None:
# 1. Set dictionary for substitution
# 1.1 Create the substitution dictionary for migration : metric -> aspect
metric_to_aspect = collections.OrderedDict()
for mfield in self.fields.values():
metric = upper_triangle(mfield.metric)
# add 'aspect' in mfield
metric_in_aspect = mfield.aspect.inv()
metric_in_aspect = upper_triangle(metric_in_aspect)
for metric_ij, aspect_ij in zip(metric, metric_in_aspect):
metric_to_aspect[metric_ij] = aspect_ij
# 1.2 Dictionary for the metric trends
subs_metric_trend = {}
for equation in self._apply_closure(self.metric_system):
lhs, rhs = equation.args
subs_metric_trend[lhs] = rhs
# 2. Migration of expectation and variance systems
aspect_system = []
systems = [ self.expectation_system,
self.variance_system,
self.cross_covariance_system
]
for system in systems:
if system is None: # to handle the multivariate situation: None is for univariate
continue
# -1- apply external closure
system = self._apply_closure(system)
# -2- switch from metric to diffusion
for equation in system:
equation = equation.subs(metric_to_aspect)
aspect_system.append(equation)
# 3. Computation of the system at a symbolic level
# forms the equation $$ \pdt \bs = - \bs \pdt g \bs $$
# The computation of the system is made as a loop over univariate fields
#t = self.time_coordinate
for mfield in self.fields.values():
# Extract tensors
aspect = mfield.aspect
metric = mfield.metric
# Computation of the rhs: $- \bs \pdt g \bs$
trend_metric = Derivative(metric, time_symbol).doit()
rhs = - aspect * trend_metric * aspect
rhs = rhs.doit()
# Computation of the lhs: $\pdt \bs$
lhs = Derivative(aspect, time_symbol).doit()
# Set the system by substituting terms
for lhs_term, rhs_term in zip(upper_triangle(lhs), upper_triangle(rhs)):
# Replace metric trend by its value
rhs_term = rhs_term.subs(subs_metric_trend)
rhs_term = rhs_term.doit()
rhs_term = rhs_term.simplify()
rhs_term = rhs_term.expand()
# Replace metric terms by their values
rhs_term = rhs_term.subs(metric_to_aspect)
rhs_term = rhs_term.doit()
rhs_term = rhs_term.simplify()
rhs_term = rhs_term.expand()
# Set the equation
equation = Eq(lhs_term, rhs_term)
aspect_system.append(equation)
# 3. Clean Expectation of fields
aspect_system = self._clean_system_from_expectation(aspect_system)
self._system_in_aspect = aspect_system
return self._system_in_aspect
@property
def closure(self):
return self._closure
def set_closure(self, closure):
# 1. Update the closure
self._closure.update(closure)
# 2. Reset systems in metric/diffusion/aspect
self._system_in_metric = None
self._system_in_diffusion = None
self._system_in_aspect = None
def _clean_system_from_expectation(self, system):
""" Eliminate expectation of random fields from equation to simplify the representation and to prepare
the translation in computational codes """
clean_expectation = {}
for mfield in self.fields.values():
clean_expectation[Expectation(mfield.random)] = mfield.value
new_system = []
for equation in system:
new_system.append( equation.subs(clean_expectation))
return new_system
@property
def internal_closure(self):
if self._internal_closure is None:
self._internal_closure = {}
# 1. Set univariate closure
for meta_field in self.fields.values():
self._internal_closure.update(meta_field.internal_closure)
# 2. Set multivariate closure (only in multivariate situations)
for couple in self.multivariate_couples:
# 1) Extract fields and meta-data
f1, f2 = couple
mf1, mf2 = self.fields[f1], self.fields[f2]
# 2) extract error fields
e1, e2 = mf1.error, mf2.error
V1, V2 = mf1.variance, mf2.variance
std1, std2 = sqrt(V1), sqrt(V2)
eps1, eps2 = mf1.epsilon, mf2.epsilon
# 3) Definition of the cross_variance
# 3.a) Extract he cross covariance label
V12 = self.get_covariance(f1,f2)
# 3.c) Update internal closure
self._internal_closure[Expectation(e1*e2)] = V12
self._internal_closure[Expectation(eps1*eps2)] = V12/(std1*std2)
return self._internal_closure
def get_covariance(self, f1, f2):
if all([field in self.fields for field in [f1,f2]]):
# 1. Get associated metafields
mf1 = self.fields[f1]
mf2 = self.fields[f2]
# 2. Selection of the coordinates
# .. todo:
# Modify the selection of the coordinates to account of two-point covariances between surface / volumique fields
# this could be made from the cup product of the coordinates mf1.coordinates and mf2.coordinates
# e.g. f1(t,x) f2(t,x,y) => V12(t,x,y) ??
cf1 = mf1.coordinates
cf2 = mf2.coordinates
assert cf1==cf2, ValueError("f1 and f2 have different coordinate system")
coordinates = cf1
return Function(self._label_covariance+f1.name+f2.name)(*coordinates)
else:
raise ValueError("f1 or f2 are not prognostic fields")
@property
def subs_tree(self):
"""
:return: substitution tree where only the internal closure is used
and which corresponds to the dictionnary of univariate terms E[D^alpha eps D^beta eps]
given as function of terms in E[eps D^gamma eps], for orders larger then 1.
"""
subs_tree = {}
for meta_field in self.fields.values():
# Extract the tree
meta_subs_tree = meta_field.subs_tree()
# Close the tree from the defined internal_closure
closed_subs_tree = {key:value.subs(self.internal_closure).doit()
for key, value in meta_subs_tree.items()}
# Update the tree
subs_tree.update(closed_subs_tree)
return subs_tree
@staticmethod
def check_univariate(expr):
""" Check the univariate terms from an expression """
expectations = expr.atoms(Expectation)
univariates = {}
for term in expectations:
univariate = UnivariateTerm.is_univariate(term)
if univariate is not None:
function = univariate.function
if function in univariates:
univariates[function].add(univariate)
else:
univariates[function] = {univariate}
return univariates
def _apply_closure(self, system):
""" Apply external closure """
if self.closure == {}:
return system
else:
closed_system = []
for equation in system:
equation = equation.subs(self.closure).expand()
closed_system.append(equation)
return closed_system
def _apply_internal_closure(self, rhs):
""" Apply the internal_closure on an expression (generally the rhs of an equation) """
epsilon_to_mfields = {}
for field,mfield in self.fields.items():
epsilon_to_mfields[mfield.epsilon] = mfield
# -1- Get univariate terms
univariates = self.check_univariate(rhs)
# -2- Retain epsilon's that are in self.fields !!
for epsilon in univariates:
if epsilon not in epsilon_to_mfields:
univariates.pop(epsilon)
# -3- Compute max degree for each univariate terms, by functions
max_degrees = {}
for epsilon in univariates:
max_degrees[epsilon] = max([univariate.degree for univariate in univariates[epsilon]])
# -4- Subs univariate terms E[D^alpha eps D^beta eps] by terms in E[eps D^gamma eps]
# ---- Replace only present terms ..
for epsilon in univariates:
max_degree = max_degrees[epsilon]
subs_tree = epsilon_to_mfields[epsilon].subs_tree(max_degree)
closed_subs_tree = {key:value.subs(self.internal_closure).doit() for key, value in subs_tree.items()}
| |
"""
Standalone Utility for conversion of ArcView files to STARS project.
----------------------------------------------------------------------
AUTHOR(S): <NAME> <EMAIL>
<NAME> <EMAIL>
----------------------------------------------------------------------
"""
from guimixin import *
from guimaker import *
import os
import sys
import string
from math import *
import sdialogue as sd
#from Common import Options
from ProjectWorker import *
from DataViewer import MixedDataTable
import Matcher as MATCH
import Tkinter as tk
class SProjectMaker(GuiMixin, GuiMaker): # or GuiMakerFrameMenu
"""Application level GUI Wrapper"""
def start(self):
self.hellos = 0
self.master.title("SPM: STARS Project Maker")
self.master.iconname("SPM")
h = self.winfo_screenheight()
self.screenHeight = h
w = self.winfo_screenwidth()
self.screenWidth = w
if w > 1280:
w = 1280
windowWidth = w/2.
windowHeight = h/2.
x0 = int((w - windowWidth) / 2.)
y0 = int((h - windowHeight) / 2.)
geom = "%dx%d+%d+%d"%(windowWidth,windowHeight,0,0)
print geom
self.master.geometry(geom)
self.root = self.master
self.project = None
self.starsProjectOn = 0
self.projectedCoordsOn = 0
self.menuBar = [
('File', 0,
[
('Create New STARS Project',0,self.createNewSTARSProject),
('Open STARS Project',0,self.openSTARSProject),
'separator',
('Save STARS Project',0,self.saveProject),
('Save As STARS Project',2,self.saveAsProject),
('Write Cross-Section Names',0,self.writeCSO),
#('Write Project Files',2,self.writeProjectFiles),
'separator',
('Exit', 1, self.quit)
]
),
('Data',0,
[ ('Variable',0,
[
('Convert',0,
[
('Base Data to CS',0,self.convertCSVariables),
('Base Data to CSTS',0,self.convertCSTSVariable),
('Base Data to CSTS (Batch)',0,self.convertCSTSVariableBatch),
('Cross-Section to Panel',0,self.cs2Panel),
('Panel to Cross-Section',0,self.panel2CS)
]
),
('Merge',0,
[
('CS Data',0,self.readCSV_CS),
('TS Data',0,self.readCSV_TS),
('CSTS Data',0,self.readCSV_CSTS)
]
),
('Join',0,
[
('CS Data',0,self.joinCS),
('CSTS Data',0,self.joinCSTS)
]
),
]
),
'separator',
('Matrix',0,
[
('Import GAL Binary',0,self.importGalBinary),
('Create GAL Binary from Shapefile',0,self.createGalAppend),
#('Import GAL Valued',0,self.importGalValued),
#('Import Full',0,self.importFullMatrix)
]
), ]
),
('Tables',0,
[
('Specific Variable(s)',0,self.variableSpecificTable),
('CS Variables',0,self.variableCSTable),
('TS Variables',0,self.variableTSTable),
('CSTS Variables',0,self.variableCSTSTable),
('CS and CSTS Variables',0,self.variableCS_CSTSTable),
('Base Data Variables',0,self.baseVariableTable) ]
),
('Plot',0,
[('Plot Map',0,self.doMaps)])]
def createNewSTARSProject(self):
"""
Creates a new STARS project.
Callback.
"""
d = sd.SDialogue('Create New STARS Project')
values='ArcView', 'CSV'
txt="Choose the type of file you want to use as your base data.\n"
rbutton = sd.RadioButtons(d, label='Base Data', values=values,
align='LEFT', title='Types', helpText=txt)
d.draw()
if d.status:
type = d.results[0]
if type == 0:
fileType = "*.dbf"
else:
fileType = "*.csv"
FILE_TYPES=[("Files",fileType)]
baseFileName = askopenfilename(filetypes=FILE_TYPES, title="Choose Base Data File.")
if baseFileName:
self.prj = 0
type = baseFileName.split(".")[-1]
if type == "dbf":
arc = 1
self.report("Base data generated from an ArcView Project")
else:
arc = 0
self.report("Base data generated from a Comma Delimited File")
self.proj = ProjectMaker(baseFileName,arc=arc)
d = sd.SDialogue('Create STARS Project Name')
txt = """Choose a name for the STARS project you want to create."""
sd.UserEntry(d,label="Project Prefix",
align="LEFT", title="",helpText=txt)
d.draw()
if d.status:
self.proj.changeProjPrefix(d.results[0])
self.baseVariableTable()
d = sd.SDialogue('Choose Time Series Type')
values='Decadal', 'Annual', 'Quarterly', 'Monthly', 'Irregular'
txt="Choose the type of file you want to use as your base data.\n"
rbutton = sd.RadioButtons(d, label='Time-Series', values=values,
align='LEFT', title='Types', helpText=txt)
d.draw()
if d.status:
type = d.results[0]
self.evalTimeInfo(values[type])
self.createIdsAndNames()
if arc == 1:
self.createGal()
self.starsProjectOn = 1
def openSTARSProject(self):
"""
Open an Existing STARS Project.
Callback.
"""
fileName = askopenfilename(filetypes=[('Project Files',"*.prj")],
title="Open STARS project.")
if fileName:
self.prj = 1
self.proj = ProjectMaker(fileName,prj=1)
print self.proj.stars.catalogue()
timeType = self.proj.stars.timeFreq
start = self.proj.stars.timeInfo[1]
end = self.proj.stars.timeInfo[2]
within = ['MONTHLY', 'QUARTERLY']
if timeType in within:
s = start.split("/")
startYear = s[-1]
startSub = s[0]
e = end.split("/")
endYear = e[-1]
endSub = e[0]
if timeType == "MONTHLY":
self.proj.createMonthly(startM, startYear, endM, endYear)
varNames = self.proj.stars.getVariableNames()
d = {}
for var in varNames:
v = self.proj.stars.dataBase.getVariable(var)
type = v.varType
self.starsProjectOn = 1
self.projectedCoordsOn = 1
self.report(self.proj.projectSummary())
def writeCSO(self):
try:
self.proj.writeCSO()
except:
self.report("""Could not export region names. Perhaps they have not
been identified yet.""")
def evalTimeInfo(self,type):
tDict = {'Decadal':self.createDECADAL,
'Annual':self.createANNUAL,
'Quarterly':self.createQUARTERLY,
'Monthly':self.createMONTHLY,
'Irregular':self.createIRREGULAR}
tDict[type]()
def createDECADAL(self):
d = sd.SDialogue('Decadal Time-Series Dialogue')
txt = "Choose the start year for your project."
sd.UserEntry(d,label="Start Year", align="LEFT", title="",helpText=txt)
txt = "Choose the end year for your project."
sd.UserEntry(d,label="End Year", align="LEFT", title="",helpText=txt)
d.draw()
if d.status:
start = d.results[0]
end = d.results[1]
self.proj.createDecadal(start, end)
self.report(self.proj.timeSummary)
def createANNUAL(self):
d = sd.SDialogue('Annual Time-Series Dialogue')
txt = "Choose the start year for your project."
sd.UserEntry(d,label="Start Year", align="LEFT", title="",helpText=txt)
txt = "Choose the end year for your project."
sd.UserEntry(d,label="End Year", align="LEFT", title="",helpText=txt)
d.draw()
if d.status:
start = d.results[0]
end = d.results[1]
self.proj.createAnnual(start, end)
self.report(self.proj.timeSummary)
def createQUARTERLY(self):
d = sd.SDialogue('Quarterly Time-Series Dialogue')
txt = "Choose the starting quarter for your project."
quarters = range(1,5)
entries = ['Start Quarter']
sd.MultiEntry(d,quarters, entries, title='',
helpText=txt)
txt = "Choose the start year for your project."
sd.UserEntry(d,label="Start Year", align="LEFT", title="",helpText=txt)
txt = "Choose the ending quarter for your project."
entries = ['End Quarter']
sd.MultiEntry(d,quarters, entries, title='',
helpText=txt)
txt = "Choose the end year for your project."
sd.UserEntry(d,label="End Year", align="LEFT", title="",helpText=txt)
d.draw()
if d.status:
startQ = int(d.results[0]['Start Quarter'])
startYear = int(d.results[1])
endQ = int(d.results[2]['End Quarter'])
endYear = int(d.results[3])
self.proj.createQuarterly(startQ, startYear, endQ, endYear)
self.report(self.proj.timeSummary)
def createMONTHLY(self):
d = sd.SDialogue('Monthly Time-Series Dialogue')
txt = "Choose the starting month for your project."
months = range(1,13)
entries = ['Start Month']
sd.MultiEntry(d,months, entries, title='',
helpText=txt)
txt = "Choose the start year for your project."
sd.UserEntry(d,label="Start Year", align="LEFT", title="",helpText=txt)
txt = "Choose the ending month for your project."
entries = ['End Month']
sd.MultiEntry(d,months, entries, title='',
helpText=txt)
txt = "Choose the end year for your project."
sd.UserEntry(d,label="End Year", align="LEFT", title="",helpText=txt)
d.draw()
if d.status:
startM = int(d.results[0]['Start Month'])
startYear = int(d.results[1])
endM = int(d.results[2]['End Month'])
endYear = int(d.results[3])
self.proj.createMonthly(startM, startYear, endM, endYear)
self.report(self.proj.timeSummary)
def createIRREGULAR(self):
d = sd.SDialogue('Irregular Time-Series Dialogue')
txt = "Choose the number of time periods (Integer)"
sd.UserEntry(d,label="Number of Time Periods (t)", align="LEFT", title="",helpText=txt)
d.draw()
if d.status:
t = int(d.results[0])
self.proj.createIrregular(t)
self.report(self.proj.timeSummary)
def createIdsAndNames(self):
d = sd.SDialogue('Create Region Names and Ids')
txt = """You must identify names for the regions in your project.
*** All the options in this dialogue are optional. If you leave them
blank, your regions will be identified by the integers associated with
the number of rows in the input .dbf or .csv file.
1. Use the Unique Field to identify unique labels that match the
number of cross-sections in your study. Examples would include NUTS
or FIPS codes.
2. If there are no Fields that can be used to determine the
uniqueness of each cross-section you may combine the values from two
fields to create region ids. The Join Field term will be combined
with the Unique Field to create a "more unique" identifier.
3. Use the Optional Name Field if you have identified regions with
either the Unique or Joined method, but you want the names of the
regions to be determined by this field.
4. The user can select the type of delimiter used join field entries.
The default delimiter is an underscore: field1_field2
"""
varNames = self.proj.getDBFVariableNames()
varNames.sort()
entries = ['Unique Field', 'Join Field', 'Optional Name Field', 'Delimiter']
sd.MultiEntry(d,varNames, entries, title='Optional Arguments', helpText=txt)
d.draw()
if d.status:
nameField = d.results[0]['Unique Field']
if nameField:
nameField = self.proj.getDBFVariable(nameField)
else:
nameField = []
joinField = d.results[0]['Join Field']
if joinField:
joinField = self.proj.getDBFVariable(joinField)
else:
joinField = []
finalField = d.results[0]['Optional Name Field']
if finalField:
finalField = self.proj.getDBFVariable(finalField)
else:
finalField = []
delimiter = d.results[0]['Delimiter']
if delimiter:
pass
else:
delimiter = "_"
self.proj.createNamesAndIDs(var1=nameField,
var2=joinField,
var3=finalField,
delim=delimiter)
self.report(self.proj.variableSummary())
def createGalAppend(self):
if self.proj.arc == 1:
self.createGal()
else:
self.report("""You must be using an arcview type project for this
option.""")
def createGal(self):
d = sd.SDialogue('Create Contiguity Matrices')
txt="""Rook contiguity is based on shared edges, while Queen
contiguity is based on shared vertices between pairs of polygons."""
types = "Rook", "Queen"
sd.CheckButtons(d, title='Criterion', label='Criterion', values=types,
helpText=txt)
d.draw()
if d.status:
criterion = d.results[0]
mats = []
matNames = []
self.master.update()
if criterion[0][1]: # rook
text="Creating Rook Based Contiguity Weights"
rd=sd.Warning(self.master,text=text)
if self.proj.aggOn == 1:
mats.append(self.proj.makeGalWeightsAgg())
else:
mats.append(self.proj.makeGalWeights())
matNames.append('rook')
rd.destroy()
if criterion[1][1]: # queen
txt="Creating Queen Based Contiguity Weights."
qd=sd.Warning(self.master,txt)
if self.proj.aggOn == 1:
mats.append(self.proj.makeGalWeightsAgg(2))
else:
mats.append(self.proj.makeGalWeights(2))
matNames.append('queen')
qd.destroy()
for name,stringOut in zip(matNames,mats):
print 'writing GAL file(s)'
nameOut = self.proj.projPrefix+"_"+name+".gal"
nameOut = os.path.join(self.proj.projectDir,nameOut)
fo=open(nameOut,'w')
fo.write(stringOut)
fo.close()
self.proj.matrices[nameOut]='gal'
print 'done writing GAL files(s)'
def convertCSVariables(self):
d = sd.SDialogue('Convert Initial Field(s) to STARS Cross-Sectional Variables(s)')
| |
hour = time.month, time.day, time.weekday, time.hour
shift = np.flatnonzero((month == 10) & (wd == 6) & (hour == shift_hour) & (day > 24)) # DST shift hours in October
for i, df in dic.items():
print(i)
print(df.head())
# DST shift in October
for s in shift:
print(s)
if sum(np.isnan(df.iloc[s, :])) > 0 and sum(df.iloc[s - 1, :]) / sum(df.iloc[s - 2, :]) > 1.5:
df.iloc[s - 1, :] /= 2
df.iloc[s, :] = df.iloc[s - 1, :]
# Remaining nans
df.interpolate(limit=limit, inplace=True) # interpolate up to limit samples
# fill edges of data
df.fillna(method='ffill',inplace=True,limit=fill_lim)
df.fillna(method='bfill',inplace=True,limit=fill_lim)
nnan = np.sum(np.array(np.isnan(df)))
if nnan > 0:
if print_output:
print(f'Too many ({nnan}) nans for {i} in Entso-e data for %s, interpolate might not work properly.' % i)
for gtype in df.columns:
nnan = df[gtype].isna().sum()
if nnan > 0:
pass
# print(f'{gtype}: {nnan}')
return dic
def fill_daily_gap(df,gap=('20160203','20160209'),col=None):
""" If any of the days in the range given by 'gap' exist in the dataframe, data from the previous or following
whole day outside the gap will be copied to all of the days in the gap. This is used to fix periods of missing
data.
"""
#%% fix gap in LT data for Solar and Wind onsh, by repeating data from previous day
# area = 'LT'
# df = dic[area]
# gap = ('20160203','20160209') # first and last date for which there is a gap in the data
fill_vals = True
gap_dt = (str_to_date(gap[0]),str_to_date(gap[1])+datetime.timedelta(hours=23))
# check if timerange overlaps gap
if (df.index[0] > gap_dt[1]) or (df.index[-1] < gap_dt[0]): # no overlap
return None
# fill_vals = False
else:
# check if there is complete 24 hour period before first gap day
if (gap_dt[0] - df.index[0]).days >= 1:
# print('Use previous day')
fill_idxs = pd.date_range(start=gap_dt[0]+datetime.timedelta(hours=-24),
end=gap_dt[0]+datetime.timedelta(hours=-1),
freq=df.index.freq)
elif (df.index[-1] - gap_dt[1]).days >= 1:
# print('Use following day')
fill_idxs = pd.date_range(start=gap_dt[1]+datetime.timedelta(hours=1),
end=gap_dt[1]+datetime.timedelta(hours=24),
freq=df.index.freq)
else:
# print(f'Cannot find complete day to fill gap for data')
# fill_vals = False
return None
if fill_vals:
day_range = pd.date_range(start=str_to_date(gap[0]),end=str_to_date(gap[1]),freq='D')
for d in day_range:
if d in df.index and (d+datetime.timedelta(hours=23)) in df.index: # this day exists in df
miss_idxs = pd.date_range(start=d,end=d+datetime.timedelta(hours=23),freq='H')
if col is None: # repeat all columns
df.loc[miss_idxs,:] = np.array(df.loc[fill_idxs,:])
else:
df.loc[miss_idxs,col] = np.array(df.loc[fill_idxs,col])
def fix_pl_prices(df):
# fix for PL price, publised in PLN from 2 March 2017 and 19 November 2019
if 'PL' in df.columns:
sfmt = '%Y%m%d:%H'
t1 = datetime.datetime.strptime('20170301:23',sfmt)
t2 = datetime.datetime.strptime('20191119:22',sfmt)
tidxs = [t for t in df.index if t >= t1 and t <= t2]
df.loc[tidxs,'PL'] = df.loc[tidxs,'PL'] * 0.23
def plot_graphs_entsoe_thesis(entsoe_db_path='G:/Master Thesis/Master Thesis/Files/Databases/entsoe_outage.db',
nordpool_db_path='G:/Master Thesis/Master Thesis/Files/Databases/nordpool.db',
fig_path='F:/Graphs/ENTSOE', starttime='20180101:00', endtime='20181231:23',categories=[]):
"""
:param
entsoe_db_path : path of entsoe_outage.db
nordpool_db_path : path of nordpool.db
fig_path : path of the folder where the figures will be stored
starttime : in the format 'yyyymmdd:hh'
endtime : in the format 'yyyymmdd:hh'
:return
None
"""
import nordpool_db
if not categories:
# get the list of connections from the nordpool database
df_cap = nordpool_db.Database(db=nordpool_db_path).select_exchange_capacities(starttime=starttime,endtime=endtime)
# create a list of connections
columns = list(df_cap)
else:
columns = categories
# images are saved as png
# path where figures are stored
(Path(fig_path)).mkdir(exist_ok=True, parents=True)
# turn off plotting
plt.ioff()
# get the events from the entsoe_outage database
db = DatabaseOutageData(dbase=entsoe_db_path)
df_event_id, event_dict = db.map_ntc2events(starttime, endtime, columns=columns)
list_asset_id = [value['asset_id'] for key, value in event_dict.items()]
# remove the semicolons
list_asset_id = list(set([a for x in list_asset_id for a in x.split(';')]))
while '' in list_asset_id:
list_asset_id.remove('')
# create a dictionary with asset_ids and corresponding event_id
dict_asset_to_event_id = {}
for asset in list_asset_id:
for key, value in event_dict.items():
if asset in value['asset_id']:
if asset not in dict_asset_to_event_id.keys():
dict_asset_to_event_id[asset] = [key]
else:
dict_asset_to_event_id[asset] = dict_asset_to_event_id[asset] + [key]
# create a dictionary with asset_ids and corresponding asset_names
dict_asset_to_names = {}
for asset in list_asset_id:
for key, value in event_dict.items():
if asset in value['asset_id']:
pos = value['asset_id'].split(';').index(asset)
dict_asset_to_names[asset] = value['asset_name'].split(';')[pos]
# create a dictionary with asset_ids and corresponding asset_types
dict_asset_to_types = {}
for asset in list_asset_id:
for key, value in event_dict.items():
if asset in value['asset_id']:
pos = value['asset_id'].split(';').index(asset)
dict_asset_to_types[asset] = value['asset_type'].split(';')[pos]
# for histogram
asset_type = ['AC Link','DC Link','Substation','Transformer']
for count,ty in enumerate(asset_type):
asset_hist = np.array([])
for key, value in dict_asset_to_event_id.items():
if dict_asset_to_types[key] != ty:
continue
indices = set()
for conn in columns:
for x in value:
indices.update(df_event_id.index[df_event_id[conn].str.contains(str(x))])
asset_hist = np.append(asset_hist,[len(indices)])
fig = plt.figure(count+1)
plt.hist(asset_hist,bins=np.arange(min(asset_hist), max(asset_hist) + 5, 5))
plt.grid(True, axis='y', zorder=0)
plt.title('Histogram of hours of unavailability due to assets of type \'{0}\''.format(ty))
plt.xlabel('Hours of Unavailability')
plt.ylabel('Number of Assets')
plt.savefig(Path(fig_path) / f'{ty}_histogram_unavailability_hours.png', bbox_inches="tight")
plt.close('all')
for conn in columns:
# create a folder for each connection
path = Path(fig_path) / f'{conn}'
path.mkdir(exist_ok=True, parents=True)
output_hours = {}
output_hours_by_asset_type = {'AC Link':set(), 'DC Link':set(), 'Substation':set() ,'Transformer':set()}
# create four dictionaries (list) to hold asset unavailabiliity for each asset type
output_hours_assets_per_asset_type = {'AC Link':{}, 'DC Link':{}, 'Substation':{} ,'Transformer':{}}
for key, value in dict_asset_to_event_id.items():
indices = set()
for x in value:
indices.update(df_event_id.index[df_event_id[conn].str.contains(str(x))])
if not indices:
continue
output_hours[dict_asset_to_names[key]] = len(indices)
output_hours_by_asset_type[dict_asset_to_types[key]].update(indices)
output_hours_assets_per_asset_type[dict_asset_to_types[key]][dict_asset_to_names[key]] = len(indices)
for k,v in output_hours_by_asset_type.items():
output_hours_by_asset_type[k] = len(v)
od = {k: v for k, v in sorted(output_hours.items(), key=lambda item: item[1])}
fig = plt.figure(1)
plt.barh(range(len(od)), od.values(), align='center', zorder=3)
plt.yticks(range(len(od)), od.keys())
plt.grid(True, axis='x', zorder=0)
plt.title('Hours of unavailability on {0} due to each asset'.format(conn))
plt.xlabel('Hours')
plt.ylabel('Asset Names')
plt.savefig(path / f'{conn}_unavailability_hours_per_asset.png', bbox_inches="tight")
od = {k: v for k, v in sorted(output_hours_by_asset_type.items(), key=lambda item: item[1])}
fig = plt.figure(2)
plt.barh(range(len(od)), od.values(), align='center', zorder=3)
plt.yticks(range(len(od)), od.keys())
plt.grid(True, axis='x', zorder=0)
plt.title('Hours of unavailability on {0} due to each asset type'.format(conn))
plt.xlabel('Hours')
plt.ylabel('Asset Types')
plt.savefig(path / f'{conn}_unavailability_hours_per_asset_type.png', bbox_inches="tight")
fig_no = 2
for k,v in output_hours_assets_per_asset_type.items():
fig_no += 1
od = {m: n for m, n in sorted(v.items(), key=lambda item: item[1])}
fig = plt.figure(fig_no)
plt.barh(range(len(od)), od.values(), align='center', zorder=3)
plt.yticks(range(len(od)), od.keys())
plt.grid(True, axis='x', zorder=0)
plt.title('Hours of unavailability on {0} due to assets of type \'{1}\''.format(conn,k))
plt.xlabel('Hours')
plt.ylabel('Asset Names')
plt.savefig(path / f'{conn}_{k}_unavailability_hours.png', bbox_inches="tight")
plt.close('all')
def germany_negative_prices():
db = Database(db='D:/Data/entsoe_prices.db')
data = db.select_price_data(areas=['DE'],starttime='20180101:00',endtime='20191231:23',cet_time=True)
#%% analyze incidents of negative prices
neg = data['DE'] < 0
a = 'DE'
incidents = [] # list with number of hours with negative prices for each incident (=continuous period with negative prices)
negative_flag = 0
hours = 0
for t in data.index:
if data.at[t,a] < 0:
if not negative_flag:
# start of period with negative prices
negative_flag = True
hours = 1
else:
# continuation of negative price period
hours += 1
else:
if negative_flag:
# end of period with negative prices
incidents.append(hours)
negative_flag = False
hours = 0
# add last incident, if prices are negative at end of time period
if hours > 0:
incidents.append(hours)
#%% calculate share of all negative hours that belong to period of certain length
ntot = sum(incidents)
nshare = pd.Series(0.0,index=range(1,max(incidents)+1))
for len in incidents:
nshare.at[len] += 1e2*len/ntot
six_share = 100 - nshare.loc[range(1,6)].sum()
# share of hours with duration at least 6 hours
print(f'Share of hours with duration >= 6: {six_share:0.2f}')
# make histogram
# plt.hist(incidents)
# plt.show()
fig_path = Path('C:/Users/elisn/Box Sync/Python/TNO-Curtailment/Figures')
fh = 7.5
fw = 12
cm_per_inch = 2.5
f = plt.figure()
f.set_size_inches(w=fw/cm_per_inch,h=fh/cm_per_inch)
nshare.plot.bar()
plt.xlabel('Duration of period [h]')
plt.ylabel('Share of hours [%]')
plt.tight_layout()
# plt.grid()
plt.savefig(fig_path / f'negative_price_duration.png')
plt.savefig(fig_path / f'negative_price_duration.eps')
def print_capacity_excel_file():
"""
Create excel file with capacity per type for EU countries
:return:
"""
#%% build database from scratch
db = Database('D:/Data/entsoe_capacity.db')
cap_areas = [
'SE','NO','FI','DK1','DK2','DK','EE','LT','LV','PL','NL','FR','BE',
'ES','PT','IE','GB','IT','CH','AT','CZ','CH','SK','HU','SI','CR','BL','BH','MK','SR','GR','RO','MT','AL'
]
start_year = 2016
end_year = 2020
db.download_cap_per_type_data(start_year=start_year,end_year=end_year,areas=cap_areas)
# data = get_entsoe_gen_data(datatype=1,area='FR',start='20180101',end='20180101')
#%%
df = db.select_cap_per_type_data(areas=None)
# rename countries
df = df.rename(columns={a:tbidz_name[a] for a in cap_areas})
# remove nan values
df1 = df.dropna(axis=1,how='all')
#%%
# print data for only one year
cols = []
for col in df1.columns:
if col[0] not in cols:
cols.append(col[0])
writer = pd.ExcelWriter('capacity.xlsx')
df1.to_excel(excel_writer=writer,sheet_name='all years')
for year in range(start_year,end_year+1):
df2 = pd.DataFrame(dtype=float,index=[t for t in tpsr_key],columns=cols)
for col in df1.columns:
df2.at[col[1],col[0]] = df1.at[year,col]
df2 = df2.dropna(axis=0,how='all')
df2.to_excel(excel_writer=writer,sheet_name=f'{year}')
writer.close()
def download_svk_data(start_year=2015,end_year=2016):
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
import time
import os
from selenium.webdriver.common.keys | |
Security Modules in the Amazon Redshift Cluster Management Guide.
See also: AWS API Documentation
:example: response = client.create_hsm_configuration(
HsmConfigurationIdentifier='string',
Description='string',
HsmIpAddress='string',
HsmPartitionName='string',
HsmPartitionPassword='<PASSWORD>',
HsmServerPublicCertificate='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type HsmConfigurationIdentifier: string
:param HsmConfigurationIdentifier: [REQUIRED]
The identifier to be assigned to the new Amazon Redshift HSM configuration.
:type Description: string
:param Description: [REQUIRED]
A text description of the HSM configuration to be created.
:type HsmIpAddress: string
:param HsmIpAddress: [REQUIRED]
The IP address that the Amazon Redshift cluster must use to access the HSM.
:type HsmPartitionName: string
:param HsmPartitionName: [REQUIRED]
The name of the partition in the HSM where the Amazon Redshift clusters will store their database encryption keys.
:type HsmPartitionPassword: string
:param HsmPartitionPassword: [REQUIRED]
The password required to access the HSM partition.
:type HsmServerPublicCertificate: string
:param HsmServerPublicCertificate: [REQUIRED]
The HSMs public certificate file. When using Cloud HSM, the file name is server.pem.
:type Tags: list
:param Tags: A list of tag instances.
(dict) --A tag consisting of a name/value pair for a resource.
Key (string) --The key, or name, for the resource tag.
Value (string) --The value for the resource tag.
:rtype: dict
:return: {
'HsmConfiguration': {
'HsmConfigurationIdentifier': 'string',
'Description': 'string',
'HsmIpAddress': 'string',
'HsmPartitionName': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
"""
pass
def create_snapshot_copy_grant(SnapshotCopyGrantName=None, KmsKeyId=None, Tags=None):
"""
Creates a snapshot copy grant that permits Amazon Redshift to use a customer master key (CMK) from AWS Key Management Service (AWS KMS) to encrypt copied snapshots in a destination region.
For more information about managing snapshot copy grants, go to Amazon Redshift Database Encryption in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
:example: response = client.create_snapshot_copy_grant(
SnapshotCopyGrantName='string',
KmsKeyId='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type SnapshotCopyGrantName: string
:param SnapshotCopyGrantName: [REQUIRED]
The name of the snapshot copy grant. This name must be unique in the region for the AWS account.
Constraints:
Must contain from 1 to 63 alphanumeric characters or hyphens.
Alphabetic characters must be lowercase.
First character must be a letter.
Cannot end with a hyphen or contain two consecutive hyphens.
Must be unique for all clusters within an AWS account.
:type KmsKeyId: string
:param KmsKeyId: The unique identifier of the customer master key (CMK) to which to grant Amazon Redshift permission. If no key is specified, the default key is used.
:type Tags: list
:param Tags: A list of tag instances.
(dict) --A tag consisting of a name/value pair for a resource.
Key (string) --The key, or name, for the resource tag.
Value (string) --The value for the resource tag.
:rtype: dict
:return: {
'SnapshotCopyGrant': {
'SnapshotCopyGrantName': 'string',
'KmsKeyId': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
"""
pass
def create_tags(ResourceName=None, Tags=None):
"""
Adds one or more tags to a specified resource.
A resource can have up to 10 tags. If you try to create more than 10 tags for a resource, you will receive an error and the attempt will fail.
If you specify a key that already exists for the resource, the value for that key will be updated with the new value.
See also: AWS API Documentation
:example: response = client.create_tags(
ResourceName='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type ResourceName: string
:param ResourceName: [REQUIRED]
The Amazon Resource Name (ARN) to which you want to add the tag or tags. For example, arn:aws:redshift:us-east-1:123456789:cluster:t1 .
:type Tags: list
:param Tags: [REQUIRED]
One or more name/value pairs to add as tags to the specified resource. Each tag name is passed in with the parameter Key and the corresponding value is passed in with the parameter Value . The Key and Value parameters are separated by a comma (,). Separate multiple tags with a space. For example, --tags 'Key'='owner','Value'='admin' 'Key'='environment','Value'='test' 'Key'='version','Value'='1.0' .
(dict) --A tag consisting of a name/value pair for a resource.
Key (string) --The key, or name, for the resource tag.
Value (string) --The value for the resource tag.
"""
pass
def delete_cluster(ClusterIdentifier=None, SkipFinalClusterSnapshot=None, FinalClusterSnapshotIdentifier=None):
"""
Deletes a previously provisioned cluster. A successful response from the web service indicates that the request was received correctly. Use DescribeClusters to monitor the status of the deletion. The delete operation cannot be canceled or reverted once submitted. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide .
If you want to shut down the cluster and retain it for future use, set SkipFinalClusterSnapshot to false and specify a name for FinalClusterSnapshotIdentifier . You can later restore this snapshot to resume using the cluster. If a final cluster snapshot is requested, the status of the cluster will be "final-snapshot" while the snapshot is being taken, then it's "deleting" once Amazon Redshift begins deleting the cluster.
For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
:example: response = client.delete_cluster(
ClusterIdentifier='string',
SkipFinalClusterSnapshot=True|False,
FinalClusterSnapshotIdentifier='string'
)
:type ClusterIdentifier: string
:param ClusterIdentifier: [REQUIRED]
The identifier of the cluster to be deleted.
Constraints:
Must contain lowercase characters.
Must contain from 1 to 63 alphanumeric characters or hyphens.
First character must be a letter.
Cannot end with a hyphen or contain two consecutive hyphens.
:type SkipFinalClusterSnapshot: boolean
:param SkipFinalClusterSnapshot: Determines whether a final snapshot of the cluster is created before Amazon Redshift deletes the cluster. If true , a final cluster snapshot is not created. If false , a final cluster snapshot is created before the cluster is deleted.
Note
The FinalClusterSnapshotIdentifier parameter must be specified if SkipFinalClusterSnapshot is false .
Default: false
:type FinalClusterSnapshotIdentifier: string
:param FinalClusterSnapshotIdentifier: The identifier of the final snapshot that is to be created immediately before deleting the cluster. If this parameter is provided, SkipFinalClusterSnapshot must be false .
Constraints:
Must be 1 to 255 alphanumeric characters.
First character must be a letter.
Cannot end with a hyphen or contain two consecutive hyphens.
:rtype: dict
:return: {
'Cluster': {
'ClusterIdentifier': 'string',
'NodeType': 'string',
'ClusterStatus': 'string',
'ModifyStatus': 'string',
'MasterUsername': 'string',
'DBName': 'string',
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ClusterCreateTime': datetime(2015, 1, 1),
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterSecurityGroups': [
{
'ClusterSecurityGroupName': 'string',
'Status': 'string'
},
],
'VpcSecurityGroups': [
{
'VpcSecurityGroupId': 'string',
'Status': 'string'
},
],
'ClusterParameterGroups': [
{
'ParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'ClusterParameterStatusList': [
{
'ParameterName': 'string',
'ParameterApplyStatus': 'string',
'ParameterApplyErrorDescription': 'string'
},
]
},
],
'ClusterSubnetGroupName': 'string',
'VpcId': 'string',
'AvailabilityZone': 'string',
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'MasterUserPassword': '<PASSWORD>',
'NodeType': 'string',
'NumberOfNodes': 123,
'ClusterType': 'string',
'ClusterVersion': 'string',
'AutomatedSnapshotRetentionPeriod': 123,
'ClusterIdentifier': 'string',
'PubliclyAccessible': True|False,
'EnhancedVpcRouting': True|False
},
'ClusterVersion': 'string',
'AllowVersionUpgrade': True|False,
'NumberOfNodes': 123,
'PubliclyAccessible': True|False,
'Encrypted': True|False,
'RestoreStatus': {
'Status': 'string',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
},
'HsmStatus': {
'HsmClientCertificateIdentifier': 'string',
'HsmConfigurationIdentifier': 'string',
'Status': 'string'
},
'ClusterSnapshotCopyStatus': {
'DestinationRegion': 'string',
'RetentionPeriod': 123,
'SnapshotCopyGrantName': 'string'
},
'ClusterPublicKey': 'string',
'ClusterNodes': [
{
'NodeRole': 'string',
'PrivateIPAddress': 'string',
'PublicIPAddress': 'string'
},
],
'ElasticIpStatus': {
'ElasticIp': 'string',
'Status': 'string'
},
'ClusterRevisionNumber': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'KmsKeyId': 'string',
'EnhancedVpcRouting': True|False,
'IamRoles': [
{
'IamRoleArn': 'string',
'ApplyStatus': 'string'
},
]
}
}
:returns:
available
creating
deleting
final-snapshot
hardware-failure
incompatible-hsm
incompatible-network
incompatible-parameters
incompatible-restore
modifying
rebooting
renaming
resizing
rotating-keys
storage-full
updating-hsm
"""
pass
def delete_cluster_parameter_group(ParameterGroupName=None):
"""
Deletes a specified Amazon Redshift parameter group.
See also: AWS API Documentation
:example: response = client.delete_cluster_parameter_group(
ParameterGroupName='string'
)
:type ParameterGroupName: string
:param ParameterGroupName: [REQUIRED]
The name of the parameter group to be deleted.
Constraints:
Must be the name of an existing cluster parameter group.
Cannot delete a default cluster parameter group.
"""
pass
def | |
fiat):
url = "https://poloniex.com/public?command=returnOrderBook¤cyPair=%s" % (
self.make_market(crypto, fiat)
)
resp = self.get_url(url).json()
return {
'asks': [(float(x[0]), x[1]) for x in resp['asks']],
'bids': [(float(x[0]), x[1]) for x in resp['bids']]
}
def make_market(self, crypto, fiat):
return ("%s_%s" % (self.fix_symbol(fiat), self.fix_symbol(crypto))).upper()
def _make_signature(self, args):
str_args = urlencode(args)
return hmac.new(self.api_secret, str_args, hashlib.sha512).hexdigest()
def _auth_request(self, args):
url = "https://poloniex.com/tradingApi"
args["nonce"] = make_standard_nonce()
headers = {
'Sign': self._make_signature(args),
'Key': self.api_key
}
return self.post_url(url, args, headers=headers)
def make_order(self, crypto, fiat, amount, price, type="limit", side="buy"):
params = {}
if type == "fill-or-kill":
params = {'fillOrKill': 1}
if type == 'post-only':
params = {'postOnly': 1}
params.update({
"command": side,
"currencyPair": self.make_market(crypto, fiat),
"rate": price,
"amount": eight_decimal_places(amount)
})
r = self._auth_request(params)
return r.json()['orderNumber']
make_order.supported_types = ['limit', 'fill-or-kill', 'post-only']
make_order.minimums = {}
def cancel_order(self, order_id):
r = self._auth_request({
"command": "cancelOrder",
"orderNumber": order_id
})
return r['success'] == 1
def list_orders(self, crypto=None, fiat=None):
if not crypto and not fiat:
pair = "all"
else:
self.make_market(crypto, fiat)
resp = self._auth_request({
"command": "returnOpenOrders",
"currencyPair": pair,
})
return resp.json()
def initiate_withdraw(self, crypto, amount, address):
resp = self._auth_request({
"command": "withdrawl",
"currency": crypto,
"amount": eight_decimal_places(amount),
"address": address
})
return resp.json()
def get_deposit_address(self, currency):
c = self.fix_symbol(currency)
resp = self._auth_request({"command": "returnDepositAddresses"})
address = resp.json().get(c.upper())
if not address:
return self.generate_new_deposit_address(c)
return address
def generate_new_deposit_address(self, crypto):
resp = self._auth_request({
"command": "generateNewAddress",
"currency": crypto.upper()
})
return resp.json()['response']
def get_exchange_balance(self, currency, type="available"):
resp = self._auth_request({"command": "returnBalances"})
return float(resp.json().get(self.reverse_fix_symbol(currency).upper()))
def get_total_exchange_balances(self):
resp = self._auth_request({"command": "returnBalances"})
return {
self.reverse_fix_symbol(code): float(bal) for code, bal in resp.json().items()
if float(bal) > 0
}
class Bittrex(Service):
service_id = 66
api_homepage = "https://bittrex.com/home/api"
exchange_fee_rate = 0.0025
def check_error(self, response):
j = response.json()
if not j['success']:
raise ServiceError("Bittrex returned error: %s" % j['message'])
super(Bittrex, self).check_error(response)
def fix_symbol(self, symbol):
if symbol.lower() == 'usd':
return 'usdt'
if symbol == 'xmy':
return 'myr'
if symbol == 'bcc':
raise SkipThisService("BCC not supported (maybe you want BCH?)")
if symbol == 'bch':
return 'bcc'
return symbol.lower()
def reverse_fix_symbol(self, symbol):
symbol = symbol.lower()
if symbol == 'usdt':
return 'usd'
if symbol == 'bcc':
return 'bch'
return symbol
def make_market(self, crypto, fiat):
return "%s-%s" % (
self.fix_symbol(fiat).upper(),
self.fix_symbol(crypto).upper()
)
def get_current_price(self, crypto, fiat):
url = "https://bittrex.com/api/v1.1/public/getticker?market=%s" % (
self.make_market(crypto, fiat)
)
r = self.get_url(url).json()
return r['result']['Last']
def get_orderbook(self, crypto, fiat):
url = "https://bittrex.com/api/v1.1/public/getorderbook?market=%s&type=both" % (
self.make_market(crypto, fiat)
)
r = self.get_url(url).json()['result']
return {
'bids': [(x['Rate'], x['Quantity']) for x in r['buy']],
'asks': [(x['Rate'], x['Quantity']) for x in r['sell']],
}
def get_pairs(self):
url = "https://bittrex.com/api/v1.1/public/getmarkets"
r = self.get_url(url).json()['result']
ret = []
for x in r:
crypto = x['MarketCurrency'].lower()
fiat = x['BaseCurrency'].lower()
if fiat == 'usdt':
fiat = 'usd'
ret.append("%s-%s" % (crypto, fiat))
return ret
def _make_signature(self, url):
return hmac.new(
self.api_secret.encode(), url.encode(), hashlib.sha512
).hexdigest()
def _auth_request(self, path, params):
if not self.api_key or not self.api_secret:
raise Exception("Trade API requires an API key and secret.")
params["apikey"] = self.api_key
params["nonce"] = make_standard_nonce()
url = "https://bittrex.com/api" + path + "?" + urlencode(params)
return self.get_url(url, headers={"apisign": self._make_signature(url)})
def make_order(self, crypto, fiat, amount, price, type="limit", side="buy"):
path = "/v1.1/market/%slimit" % side
r = self._auth_request(path, {
'market': self.make_market(crypto, fiat),
'quantity': eight_decimal_places(amount),
'rate': price
})
return r.json()['result']['uuid']
make_order.supported_types = ['limit']
make_order.minimums = {}
def cancel_order(self, order_id):
path = "/v1.1/market/cancel"
r = self._auth_request(path, {'uuid': order_id})
return r['success']
def get_exchange_balance(self, currency, type="available"):
currency = self.fix_symbol(currency)
path = "/v1.1/account/getbalance"
resp = self._auth_request(path, {'currency': self.fix_symbol(currency)}).json()['result']
return resp[type.capitalize()] or 0
def get_total_exchange_balances(self):
path = "/v1.1/account/getbalances"
resp = self._auth_request(path, {}).json()['result']
return {
self.reverse_fix_symbol(x['Currency']): x['Balance'] for x in resp
if x['Balance'] > 0
}
def get_deposit_address(self, crypto):
path = "/v1.1/account/getdepositaddress"
resp = self._auth_request(path, {'currency': self.fix_symbol(crypto)})
return resp.json()['result']['Address']
def initiate_withdraw(self, crypto, amount, address):
path = "/v1.1/account/withdraw"
resp = self._auth_request(path, {
'currency': self.fix_symbol(crypto),
'quantity': eight_decimal_places(amount),
'address': address
})
return resp.json()
class Huobi(Service):
service_id = 67
api_homepage = "https://github.com/huobiapi/API_Docs_en/wiki"
name = "Huobi"
def check_error(self, response):
if response.status_code != 200:
j = response.json()
raise ServiceError("Huobi returned error: %s" % j['error'])
super(Huobi, self).check_error(response)
def get_current_price(self, crypto, fiat):
if fiat.lower() == "cny":
fiat = 'static'
elif fiat.lower() == 'usd':
pass
else:
raise SkipThisService("CNY and USD only fiat supported")
url = "http://api.huobi.com/%smarket/detail_%s_json.js" % (
fiat.lower(), crypto.lower()
)
r = self.get_url(url).json()
return r['p_last']
class BTER(Service):
service_id = 25
api_homepage = "https://bter.com/api"
name = "BTER"
def fix_symbol(self, symbol):
if symbol == 'bch':
return 'bcc'
return symbol
def make_market(self, crypto, fiat):
return ("%s_%s" % (self.fix_symbol(crypto), fiat)).lower()
def get_current_price(self, crypto, fiat):
url = "http://data.bter.com/api/1/ticker/%s" % self.make_market(crypto, fiat)
response = self.get_url(url).json()
if response.get('result', '') == 'false':
raise ServiceError("BTER returned error: " + r['message'])
return float(response['last'] or 0)
def get_pairs(self):
url = "http://data.bter.com/api/1/pairs"
r = self.get_url(url).json()
return [x.replace("_", "-") for x in r]
def get_orderbook(self, crypto, fiat):
url = "http://data.bter.com/api2/1/orderBook/%s" % self.make_market(crypto, fiat)
resp = self.get_url(url).json()
return {
'bids': [(float(x[0]), float(x[1])) for x in resp['bids']],
'asks': [(float(x[0]), float(x[1])) for x in resp['asks']],
}
def _make_signature(self, params):
return hmac.new(
self.api_secret, urlencode(params), hashlib.sha512
).hexdigest()
def _auth_request(self, url, params):
raise Exception("Not tested")
return self.post_url(url, headers={
'Content-Type': 'application/x-www-form-urlencoded',
'Key': self.api_key,
'Sign': self._make_signature(params)
})
def get_exchange_balance(self, currency, type="available"):
url = "https://api.bter.com/api2/1/private/balances"
resp = self._auth_request(url, {})
for curr, bal in resp.json()[type].items():
if curr == currency.upper():
return float(bal)
def get_deposit_address(self, currency):
url = "https://bter.com/api2/1/private/depositAddress"
resp = self._auth_request(url, {'currency': currency.upper()})
return resp.json()['addr']
class Wex(Service):
service_id = 7
api_homepage = "https://wex.nz/api/documentation"
name = "Wex"
exchange_fee_rate = 0.002
def check_error(self, response):
try:
j = response.json()
except:
raise ServiceError("Wex returned error: %s" % response.content)
if 'error' in j:
raise ServiceError("Wex returned error: %s" % j['error'])
super(Wex, self).check_error(response)
def make_market(self, crypto, fiat):
return "%s_%s" % (
self.fix_symbol(crypto).lower(),
self.fix_symbol(fiat).lower()
)
def fix_symbol(self, symbol):
if symbol == 'dash':
return 'dsh'
return symbol
def reverse_fix_symbol(self, symbol):
if symbol == 'dsh':
return 'dash'
return symbol
def _fix_fiat_symbol(self, fiat):
return fiat
def get_current_price(self, crypto, fiat):
pair = self.make_market(crypto, fiat)
url = "https://wex.nz/api/3/ticker/" + pair
response = self.get_url(url).json()
return response[pair]['last']
def get_pairs(self):
url = "https://wex.nz/api/3/info"
r = self.get_url(url).json()
return [x.replace('_', '-') for x in r['pairs'].keys()]
def get_orderbook(self, crypto, fiat):
m = self.make_market(crypto, fiat)
url = "https://wex.nz/api/3/depth/%s" % m
resp = self.get_url(url).json()
return {
'bids': [(x[0], x[1]) for x in resp[m]['bids']],
'asks': [(x[0], x[1]) for x in resp[m]['asks']]
}
def _make_signature(self, params):
return hmac.new(
self.api_secret, urlencode(params), hashlib.sha512
).hexdigest()
def _auth_request(self, params):
# max nonce wex will accept is 4294967294
params['nonce'] = make_stateful_nonce(self.name)
headers = {"Key": self.api_key, "Sign": self._make_signature(params)}
return self.post_url("https://wex.nz/tapi", params, headers=headers)
def make_order(self, crypto, fiat, amount, price, type="limit", side="buy"):
params = {
'method': 'Trade',
'pair': self.make_market(crypto, fiat),
'type': side,
'rate': price,
'amount': eight_decimal_places(amount),
}
return self._auth_request(params)
make_order.supported_types = ['limit']
make_order.minimums = {'btc': 0.001, 'ltc': 0.1}
def get_deposit_address(self, crypto):
params = {'coinName': crypto.lower(), 'method': 'CoinDepositAddress'}
resp = self._auth_request(params).json()
return resp['return']['address']
def get_exchange_balance(self, currency, type="available"):
resp = self._auth_request({'method': 'getInfo'}).json()
try:
return resp['return']['funds'][self.fix_symbol(currency).lower()]
except IndexError:
return 0
def get_total_exchange_balances(self):
resp = self._auth_request({'method': 'getInfo'}).json()['return']['funds']
return {
self.reverse_fix_symbol(code): bal for code, bal in resp.items()
if not code.endswith("et") and bal > 0
}
def initiate_withdraw(self, currency, amount, address):
resp = self._auth_request({
'method': 'WithdrawCoin',
'coinName': self.fix_symbol(currency),
'amount': amount,
'address': address,
})
return resp.json()
class ViaBTC(Service):
service_id = 116
def get_current_price(self, crypto, fiat):
url = "https://www.viabtc.com/api/v1/market/ticker?market=%s%s" % (
crypto.upper(), fiat.upper()
)
return float(self.get_url(url).json()['data']['ticker']['last'])
class CryptoDao(Service):
service_id = 115
api_homepage = "https://cryptodao.com/doc/api"
def get_current_price(self, crypto, fiat):
url = "https://cryptodao.com/api/ticker?source=%s&target=%s" % (
fiat.upper(), crypto.upper()
)
r = self.get_url(url).json()
return r['last']
def get_orderbook(self, crypto, fiat):
url = "https://cryptodao.com/api/depth?source=%s&target=%s" % (
fiat.upper(), crypto.upper()
)
resp = self.get_url(url).json()
return resp
class HitBTC(Service):
service_id = 109
api_homepage = "https://hitbtc.com/api"
exchange_fee_rate = 0.001
def check_error(self, response):
j = response.json()
if response.status_code in (400, 401) and 'error' in j:
e = j['error']
raise SkipThisService("HitBTC returned %s %s: %s" % (
e['code'], e['message'], e['description']
))
if 'code' in j:
raise SkipThisService("HitBTC returned %s: %s" % (j['code'], j['message']))
super(HitBTC, self).check_error(response)
def fix_symbol(self, symbol):
return symbol.lower()
def make_market(self, crypto, fiat):
return ("%s%s" % (self.fix_symbol(crypto), self.fix_symbol(fiat))).upper()
def get_pairs(self):
url = 'https://api.hitbtc.com/api/1/public/symbols'
r = self.get_url(url).json()['symbols']
return [("%s-%s" % (x['commodity'], x['currency'])).lower() for x in r]
def get_current_price(self, crypto, fiat):
url = "https://api.hitbtc.com/api/1/public/%s/ticker" % self.make_market(crypto, fiat)
r = self.get_url(url).json()
return float(r['last'])
def get_orderbook(self, crypto, fiat):
url = "https://api.hitbtc.com/api/1/public/%s/orderbook" % self.make_market(crypto, fiat)
resp = self.get_url(url).json()
return {
'asks': [(float(x[0]), float(x[1])) for | |
"""
This module provides functions to facilitate reporting information
about uncertainty calculations.
The abbreviation ``rp`` is defined as an alias for :mod:`reporting`,
to resolve the names of objects defined in this module.
Reporting functions
-------------------
* The function :func:`budget` produces an uncertainty budget.
* The function :func:`k_factor` returns the coverage factor
used for real-valued problems (based on the Student-t distribution).
* The function :func:`k_to_dof` returns the degrees of freedom
corresponding to a given coverage factor and coverage probability.
* The function :func:`k2_factor_sq` returns
coverage factor squared for the complex-valued problem.
* The function :func:`k2_to_dof` returns the degrees of freedom
corresponding to a given coverage factor and coverage probability
in complex-valued problems.
* Functions :func:`u_bar` and :func:`v_bar` return summary values
for matrix results associated with 2-D uncertainty.
Uncertainty functions
---------------------
* The function :func:`u_component` returns the signed
component of uncertainty in one uncertain number
due to uncertainty in another.
* The function :func:`sensitivity` returns the partial
derivative of one uncertain number with respect to another.
This is often called a sensitivity coefficient.
Type functions
--------------
* The function :func:`is_ureal` can be used to
identify uncertain real numbers.
* The function :func:`is_ucomplex` can be used to
identify uncertain complex numbers.
Module contents
---------------
"""
from __future__ import division # True division
import math
import numbers
from operator import attrgetter as getter
from functools import reduce
from scipy import special, optimize
try:
from itertools import izip # Python 2
except ImportError:
izip = zip
long = int
from GTC.lib import (
UncertainReal,
UncertainComplex,
_is_uncertain_real_constant,
_is_uncertain_complex_constant
)
from GTC.named_tuples import (
ComponentOfUncertainty, # relates to complex quantities
Influence,
Component
)
from GTC.vector import extend_vector
from GTC import (
is_sequence,
inf,
inf_dof,
)
__all__ = (
'budget',
'k_factor',
'k_to_dof',
'k2_factor_sq',
'k2_to_dof',
'u_component',
'sensitivity',
'is_ureal',
'is_ucomplex',
'v_bar',
'u_bar',
)
#--------------------------------------------------------------------------
uid_str = lambda id: "{0[0]:d}_{0[1]:d}".format(id)
#--------------------------------------------------------------------------
def is_ureal(x):
"""Return ``True`` if ``x`` is an uncertain real number
**Example**::
>>> x = ureal(1,1)
>>> reporting.is_ureal(x)
True
"""
return isinstance(x,UncertainReal)
#--------------------------------------------------------------------------
def is_ucomplex(z):
"""Return ``True`` if ``z`` is an uncertain complex number
**Example**::
>>> z = ucomplex(1+2j,(0.1,0.2))
>>> reporting.is_ucomplex(z)
True
"""
return isinstance(z,UncertainComplex)
#------------------------------------------------------------
def _df_k2(k2,p,nu1,TOL):
"""
Return `nu2` such that the integral of
F(nu1,nu2) from -infty to `x` is `p`
`x` is k2**2 * nu2/ ( nu1*(nu2+1) )
"""
# We have `k2` the integral limit, so `pf` gives us `p`
# we must vary the `nu2` argument until the
# returned value equals `p`.
# `fdtr` returns the integral of F probability density from -infty to `x`
def fn(nu2):
x = k2**2 * nu2/ ( nu1*(nu2+1) )
# return pf(x,nu1,nu2) - p
return special.fdtr(nu1,nu2,x) - p
# dof here is nu2-1 and cannot be less than 2
# This setting of `lo` is not a strict bound, because
# the calculation will succeed, we just don't want to
# go there.
lo = 1 - 1E-3
fn_lo = fn(lo)
if fn_lo > 0.0:
raise RuntimeError(
"dof < 2 cannot be calculated"
)
upper_limit = (20,50,1E2,1E3,inf_dof)
for hi in upper_limit:
if fn(hi) > 0.0:
return optimize.ridder(fn,lo,hi)
else:
lo = hi
return inf
#----------------------------------------------------------------------------
def k2_to_dof(k2,p=95):
"""Return the dof corresponding to a bivariate coverage factor `k2`
:arg k2: coverage factor (>0)
:arg p: coverage probability (%)
:type k2: float
:type p: int or float
Evaluates a number of degrees-of-freedom given a coverage
factor for an elliptical uncertainty region with coverage
probability ``p`` based on the F-distribution.
**Example**::
>>> reporting.k2_to_dof(2.6,95)
34.35788424389927
"""
if k2 <= 0:
raise RuntimeError( "invalid k: {}".format(k2) )
if p <= 0 or p >= 100:
raise RuntimeError( "invalid p: {}".format(p) )
else:
p = p / 100.0
return _df_k2(k2,p,2,1E-7) + 1
#----------------------------------------------------------------------------
def k2_factor_sq(df=inf,p=95):
"""Return a squared coverage factor for an elliptical uncertainty region
:arg df: the degrees-of-freedom (>=2)
:arg p: the coverage probability (%)
:type df: float
:type p: int or float
Evaluates the square of the coverage factor for an elliptical uncertainty
region with coverage probability ``p`` and ``df`` degrees of freedom
based on the F-distribution.
**Example**::
>>> reporting.k2_factor_sq(3)
56.99999999999994
"""
p = p / 100.0
if df > inf_dof:
return -2.0 * math.log(1-p)
elif(df>1):
# norm = l * (n-1) / (n - l) in the general
# 'l'-dimensional case for 'n' observations
# here l = 2, df = n-1
norm = 2*df / (df-1)
# `fdtri` is the inverse of the cumulative F distribution
# returning `x` such that `fdtr(dfn, dfd, x) = p`
return norm*special.fdtri(2.0,df-1.0,p)
else:
raise RuntimeError("invalid df={!r}".format( df ) )
#----------------------------------------------------------------------------
def k_factor(df=inf,p=95):
"""Return the a coverage factor for an uncertainty interval
:arg df: the degrees-of-freedom (>1)
:arg p: the coverage probability (%)
:type df: float
:type p: int or float
Evaluates the coverage factor for an uncertainty interval
with coverage probability ``p`` and degrees-of-freedom ``df``
based on the Student t-distribution.
**Example**::
>>> reporting.k_factor(3)
3.182446305284263
"""
if p <= 0.0 or p >= 100.0:
raise RuntimeError( "invalid p: {}".format( p ) )
p = (1.0 + p/100.0)/2.0
if df > inf_dof:
# inverse cumulative Gaussian distribution
return special.ndtri(p)
elif df >= 1:
# inverse cumulative Student-t distribution
return special.stdtrit(df,p)
else:
raise RuntimeError( "invalid df: {}".format( df ) )
#----------------------------------------------------------------------------
def k_to_dof(k,p=95):
"""Return the dof corresponding to a univariate coverage factor `k`
:arg k: coverage factor (>0)
:arg p: coverage probability (%)
:type k: float
:type p: int or float
Evaluates the degrees-of-freedom given a coverage factor for
an uncertainty interval with coverage probability ``p``
based on the Student t-distribution.
**Example**::
>>> reporting.k_to_dof(2.0,95)
60.43756442698591
"""
if k <= 0:
raise RuntimeError( "invalid k: {}".format( k ) )
if p <= 0 or p >= 100:
raise RuntimeError( "invalid p: {}".format( p ) )
else:
p = (1.0 + p/100.0)/2.0
df = special.stdtridf(p,k)
return df if df < inf_dof else inf
#----------------------------------------------------------------------------
def sensitivity(y,x):
"""Return the first partial derivative of ``y`` with respect to ``x``
:arg y: :class:`~lib.UncertainReal` or :class:`~lib.UncertainComplex` or :class:`.UncertainArray`
:arg x: :class:`~lib.UncertainReal` or :class:`~lib.UncertainComplex` or :class:`.UncertainArray`
If ``x`` and ``y`` are uncertain real numbers, return a float.
If ``y`` or ``x`` is an uncertain complex number, return
a 4-element sequence of float, representing the Jacobian matrix.
When ``x`` and ``y`` are arrays, an :class:`.UncertainArray`
is returned containing the results of applying this function
to the array elements.
Otherwise, return 0.
.. versionadded:: 1.1
**Example**::
>>> x = ureal(3,1)
>>> y = 3 * x
>>> reporting.sensitivity(y,x)
3.0
>>> q = ucomplex(2,1)
>>> z = magnitude(q) # uncertain real numbers
>>> reporting.sensitivity(z,q)
JacobianMatrix(rr=1.0, ri=0.0, ir=0.0, ii=0.0)
>>> r = ucomplex(3,1)
>>> z = q * r
>>> reporting.sensitivity(z,q)
JacobianMatrix(rr=3.0, ri=-0.0, ir=0.0, ii=3.0)
.. note::
This function evaluates the sensitivity (partial derivative) of one
uncertain number with respect to another term ``x``.
However, if the standard uncertainty of ``x`` is zero, the term is treated
as being absent from the analytical model, so a sensitivity of 0 is reported.
For example ::
>>> z1 = ucomplex(1+2j,[0,1])
>>> z2 = ucomplex(-1.2-0.9j,[1,0])
>>> z = z1*z2
>>> rp.sensitivity(z,z1.real)
JacobianMatrix(rr=0.0, ri=0.0, ir=0.0, ii=0.0)
>>> rp.sensitivity(z,z1.imag)
JacobianMatrix(rr=0.9, ri=0.0, ir=-1.2, ii=0.0)
>>> rp.sensitivity(z,z2.real)
JacobianMatrix(rr=1.0, ri=0.0, ir=2.0, ii=0.0)
>>> rp.sensitivity(z,z2.imag)
JacobianMatrix(rr=0.0, ri=0.0, ir=0.0, ii=0.0)
If all the partial derivatives of a measurement model are required,
regardless of the associated standard uncertainties, the preferred method is
to assign all standard uncertainty values to unity.
Using the same example as above ::
>>> z1 = ucomplex(1+2j,1)
>>> z2 = ucomplex(-1.2-0.9j,1)
>>> z = z1*z2
>>> rp.sensitivity(z,z1.real)
JacobianMatrix(rr=-1.2, ri=0.0, ir=-0.9, ii=0.0)
>>> rp.sensitivity(z,z1.imag)
JacobianMatrix(rr=0.9, ri=0.0, ir=-1.2, ii=0.0)
>>> rp.sensitivity(z,z2.real)
JacobianMatrix(rr=1.0, ri=0.0, ir=2.0, ii=0.0)
>>> rp.sensitivity(z,z2.imag)
JacobianMatrix(rr=-2.0, ri=0.0, ir=1.0, ii=0.0)
"""
# There are three types that define | |
import dataclasses
import pickle
import re
import unittest
from pathlib import Path
from struct import unpack
from typing import (
NewType,
TYPE_CHECKING,
Any,
Dict,
List,
Match,
Tuple,
Union,
cast,
)
try:
from compat import ( # type: ignore
log_debug,
log_error,
log_warn,
InstructionTextToken,
InstructionTextTokenType,
)
except ModuleNotFoundError:
from .compat import (
log_debug,
log_error,
log_warn,
InstructionTextToken,
InstructionTextTokenType,
)
# Only needed for type checking. Causes circular import
if TYPE_CHECKING:
from .dex import DexFile, FileOffset
PICKLE_FILENAME = "instruction_data.pickle"
INSTRUCTIONS_PICKLE_PATH = Path(__file__).resolve().parent / PICKLE_FILENAME
@dataclasses.dataclass
class SmaliInstructionFormat:
"""Row of https://source.android.com/devices/tech/dalvik/instruction-formats#formats
Example:
_formatid: "12x"
format_: "B|A|op"
syntax: "op vA, vB"
insn_len: 1
num_regs: 2
typecode: "x"
"""
_formatid: str
format_: str
syntax: str
# Parsed from id:
insn_len: int
num_regs: int
typecode: str
@dataclasses.dataclass
class SmaliInstructionInfo:
"""Row of https://source.android.com/devices/tech/dalvik/dalvik-bytecode#instructions
Example:
_opcode: 1
_formatid: "12x"
fmt: (object)
mnemonic: "move"
syntax: "vA, vB"
arguments: "A: destination register (4 bits)\nB: source register (4 bits)"
description: "Move the contents of one non-object register to another."
"""
_opcode: int
_formatid: str
fmt: SmaliInstructionFormat
mnemonic: str
syntax: str
arguments: str
description: str
@dataclasses.dataclass
class SmaliPackedSwitchPayload:
_total_size: int
size: int # ushort
first_key: int
targets: List[int]
@dataclasses.dataclass
class SmaliSparseSwitchPayload:
_total_size: int
size: int # ushort
keys: List[int]
targets: List[int]
@dataclasses.dataclass
class SmaliFillArrayDataPayload:
_total_size: int
element_width: int # ushort
size: int # uint
data: bytes # ubyte
PseudoInstructions = NewType(
"PseudoInstructions",
Dict[
"FileOffset",
Union[
SmaliPackedSwitchPayload,
SmaliFillArrayDataPayload,
SmaliSparseSwitchPayload,
],
],
)
def slice_nibbles(data: bytes, start_nibble: int, size: int = 1) -> int:
"""Slice out integer value of bytes indexed by nibble instead of byte.
This function is only designed to work with current instruction formats. It
makes a number of assumptions about byte order and positioning for these
specific cases.
"""
if size == 1:
# Single nibble
return int((data[start_nibble // 2] >> (((start_nibble + 1) % 2) * 4)) & 0xF)
elif size == 2:
# Single byte, assuming byte-alignment
return data[start_nibble // 2]
elif size == 4:
# Normal 2-byte value, assuming byte-alignment
return (data[start_nibble // 2] << 8) + data[start_nibble // 2 + 1]
elif size == 8 or size == 16:
# The 2-byte values are ordered from low to high
res = 0
for i, nibble in enumerate(range(start_nibble, start_nibble + size, 4)):
res += ((data[nibble // 2] << 8) + data[nibble // 2 + 1]) << (i * 16)
return res
else:
log_error(f"slice_nibbles called with unexpected size: {size}. Returning 0")
return 0
def sign(val: int, size: int) -> int:
"""Convert unsigned val of size nibbles into a signed int."""
mask = 1 << (4 * size - 1)
return -(val & mask) + (val & ~mask)
def parse_with_format(data: bytes, fmt: str) -> Dict[str, int]:
"""Extract values from nibbles using format string.
See TestFormatParsing for examples
"""
values = dict()
nibble = 0
continuation = ""
for byte in fmt.split(" "):
for chunk in byte.split("|"):
if "lo" in chunk or continuation:
continuation += chunk
if "hi" in continuation:
chunk = continuation.replace("lo", "").replace("hi", "")
continuation = ""
else:
continue
if chunk == "op":
nibble += 2
elif chunk == "ØØ":
nibble += 2
elif chunk.isupper():
# Actually parse binary
values[chunk[0]] = slice_nibbles(data, nibble, len(chunk))
nibble += len(chunk)
else:
raise ValueError(f'failed reading format "{chunk}"')
return values
class TestNibbleSlicing(unittest.TestCase):
def test_single_even(self) -> None:
self.assertEqual(slice_nibbles(b"\x12\x34\x56\x78", 0), 1)
self.assertEqual(slice_nibbles(b"\x12\x34\x56\x78", 2), 3)
self.assertEqual(slice_nibbles(b"\x12\x34\x56\x78", 4), 5)
self.assertEqual(slice_nibbles(b"\x12\x34\x56\x78", 6), 7)
def test_single_odd(self) -> None:
self.assertEqual(slice_nibbles(b"\x12\x34\x56\x78", 1), 2)
self.assertEqual(slice_nibbles(b"\x12\x34\x56\x78", 3), 4)
self.assertEqual(slice_nibbles(b"\x12\x34\x56\x78", 5), 6)
self.assertEqual(slice_nibbles(b"\x12\x34\x56\x78", 7), 8)
def test_byte(self) -> None:
self.assertEqual(slice_nibbles(b"\x12\x34\x56\x78", 0, 2), 0x12)
self.assertEqual(slice_nibbles(b"\x12\x34\x56\x78", 2, 2), 0x34)
self.assertEqual(slice_nibbles(b"\x12\x34\x56\x78", 4, 2), 0x56)
self.assertEqual(slice_nibbles(b"\x12\x34\x56\x78", 6, 2), 0x78)
def test_two_byte(self) -> None:
self.assertEqual(slice_nibbles(b"\x12\x34\x56\x78", 0, 4), 0x1234)
self.assertEqual(slice_nibbles(b"\x12\x34\x56\x78", 2, 4), 0x3456)
self.assertEqual(slice_nibbles(b"\x12\x34\x56\x78", 4, 4), 0x5678)
def test_four_byte(self) -> None:
self.assertEqual(slice_nibbles(b"\x12\x34\x56\x78", 0, 8), 0x56781234)
self.assertEqual(slice_nibbles(b"\x00\x12\x34\x56\x78", 2, 8), 0x56781234)
self.assertEqual(slice_nibbles(b"\x00\x12\x34\x56\x78\x00", 2, 8), 0x56781234)
def test_eight_byte(self) -> None:
self.assertEqual(
slice_nibbles(b"\x12\x34\x56\x78\x9a\xbc\xde\xf0", 0, 16),
0xDEF09ABC56781234,
)
self.assertEqual(
slice_nibbles(b"\x00\x12\x34\x56\x78\x9a\xbc\xde\xf0", 2, 16),
0xDEF09ABC56781234,
)
class TestFormatParsing(unittest.TestCase):
def test_10x(self) -> None:
"""10x -> ØØ|op"""
self.assertEqual(parse_with_format(b"\x00\x0e", "ØØ|op"), {})
def test_11n(self) -> None:
"""11n -> B|A|op"""
self.assertEqual(parse_with_format(b"\x10\x12", "B|A|op"), {"A": 0, "B": 1})
def test_21h(self) -> None:
"""21h -> AA|op BBBB"""
self.assertEqual(
parse_with_format(b"\x00\x15\x00\x02", "AA|op BBBB"), {"A": 0, "B": 0x2}
)
def test_21c(self) -> None:
"""21c -> AA|op BBBB"""
self.assertEqual(
parse_with_format(b"\x00\x67\x00\x00", "AA|op BBBB"), {"A": 0, "B": 0}
)
def test_31i(self) -> None:
"""31i -> AA|op BBBBlo BBBBhi"""
self.assertEqual(
parse_with_format(b"\x01\x14\xff\xff\x00\xff", "AA|op BBBBlo BBBBhi"),
{"A": 1, "B": 0x00FFFFFF},
)
def test_35c(self) -> None:
"""35c -> A|G|op BBBB F|E|D|C"""
self.assertEqual(
parse_with_format(b"\x10\x70\x00\x07\x00\x00", "A|G|op BBBB F|E|D|C"),
{"A": 1, "B": 7, "C": 0, "D": 0, "E": 0, "F": 0, "G": 0},
)
def test_51l(self) -> None:
"""51l -> AA|op BBBBlo BBBB BBBB BBBBhi"""
self.assertEqual(
parse_with_format(
b"\x01\x18\x01\x02\x03\x04\x05\x06\x07\x08",
"AA|op BBBBlo BBBB BBBB BBBBhi",
),
{"A": 1, "B": 0x0708050603040102},
)
def endian_swap_shorts(data: bytes) -> bytes:
assert (len(data) % 2) == 0
return bytes([data[i + (((i + 1) % 2) * 2 - 1)] for i in range(len(data))])
def format_args_with_syntax(args: Dict[str, int], syntax: str) -> str:
"""Format syntax strings with parsed arguments.
"Syntax" and "Arguments" come from the table on
https://source.android.com/devices/tech/dalvik/dalvik-bytecode#instructions
This function takes in a dictionary mapping the character symbol for
an argument to its integer value, and replaces all groups of
capital letters in the syntax string with the corresponding values.
Note that consecutive instances of the same replacement character
are grouped. Replacing A with 5 in "AAAA A" results in "5 5".
Substitutions preceded by a 'v' or '@' for registers or indices are
treated as unsigned and all others cases are treated as signed. This
isn't explicit in the documentation, but it seems to work.
For easier formatting in `tokenize_syntax`, the integers are
inserted in bare hexadecimal format. Further formatting is the
responsibility of the calling function.
See test case examples in TestFormattingArgsWithSyntax.
"""
def fmt(m: Match[str]) -> str:
val = args[m[0][-1]]
# NOTE I think this is right, but it's not very clear in the docs
if m[0][0] not in "v@":
# Signed
val = sign(val, len(m[0]) - 1)
return f"{m[0][0]}{val:x}"
return re.sub(".[A-Z]+", fmt, syntax)
class TestFormattingArgsWithSyntax(unittest.TestCase):
def test_no_format(self) -> None:
self.assertEqual(format_args_with_syntax({}, "hi there"), "hi there")
self.assertEqual(format_args_with_syntax({"A": 4}, "hi there"), "hi there")
def test_single_replacement(self) -> None:
self.assertEqual(
format_args_with_syntax({"A": 3}, "the number is A"), "the number is 3"
)
self.assertEqual(format_args_with_syntax({"B": 4}, "hiBthere"), "hi4there")
def test_long_replacement(self) -> None:
self.assertEqual(
format_args_with_syntax({"A": 3}, "the number is AA"), "the number is 3"
)
self.assertEqual(format_args_with_syntax({"A": 4}, "long numAAAA"), "long num4")
def test_large_replacement(self) -> None:
self.assertEqual(
format_args_with_syntax({"A": 0x999}, "the number is AAAA"), "the number is 999"
)
def test_multiple_replacements(self) -> None:
self.assertEqual(
format_args_with_syntax({"A": 1, "B": 0}, "first A then B"),
"first 1 then 0",
)
self.assertEqual(
format_args_with_syntax({"A": 4, "B": 5}, "first AAAA then BBBB"),
"first 4 then 5",
)
def test_signed_replacements(self) -> None:
self.assertEqual(
format_args_with_syntax({"A": 0xF}, "negative A"), "negative -1"
)
self.assertEqual(
format_args_with_syntax({"A": 0xFF}, "negative AA"), "negative -1"
)
self.assertEqual(
format_args_with_syntax({"A": 0xF6}, "negative AA"), "negative -a"
)
def test_unsigned_replacements(self) -> None:
self.assertEqual(
format_args_with_syntax({"A": 0xF}, "positive vA"), "positive vf"
)
self.assertEqual(
format_args_with_syntax({"A": 0xFFFF}, "positive field@AAAA"),
"positive field@ffff",
)
self.assertEqual(
format_args_with_syntax({"A": 0xF}, "positive vAA"), "positive vf"
)
def tokenize_syntax(
df: "DexFile", word: str, args: Dict[str, int]
) -> List[InstructionTextToken]:
tokens = list()
tokens.append(InstructionTextToken(InstructionTextTokenType.TextToken, " "))
# Check for prefixes and suffixes
trailing_comma = False
trailing_curly_brace = False
if word[-1] == ",":
trailing_comma = True
word = word[:-1]
if word[-1] == "}": # Needs to be after ',' check
trailing_curly_brace = True
word = word[:-1]
if word[0] == "{":
tokens.append(InstructionTextToken(InstructionTextTokenType.TextToken, "{"))
word = word[1:]
# Format operand with numbers where the placeholders are
word_formatted = format_args_with_syntax(args, word)
# Add operand token
if word_formatted == "":
# {}
pass
elif word_formatted[0] == "v":
# Register e.g. v01
val = int(word_formatted[1:], 16)
if val >= 256:
# TODO add link to issue. See comment in Smali
log_warn(
f"Rendering v{val}, but Binary Ninja only knows about registers up to 255 for analysis."
)
tokens.append(
InstructionTextToken(InstructionTextTokenType.RegisterToken, f"v{val}")
)
elif word_formatted[:2] == "#+":
# Literal e.g. #+0001
tokens.append(
InstructionTextToken(
InstructionTextTokenType.IntegerToken, hex(int(word_formatted[2:], 16))
)
)
elif "@" in word_formatted:
# Lookup value e.g. call_site@0001
# Possible lookup types: call_site, field, method, method_handle, proto, string, type
lookup_type, lookup_index_str = word_formatted.split("@")
lookup_index = int(lookup_index_str, 16)
if lookup_type == "call_site":
log_warn(lookup_type + | |
11},
},
{
"eventId": 14,
"eventType": "ActivityTaskStarted",
"activityTaskStartedEventAttributes": {"scheduledEventId": 12},
},
{
"eventId": 15,
"eventType": "ActivityTaskCompleted",
"activityTaskCompletedEventAttributes": {
"scheduledEventId": 11,
"result": "5",
},
},
{"eventId": 16, "eventType": "DecisionTaskScheduled"},
{"eventId": 17, "eventType": "DecisionTaskStarted"},
],
}
expected_decisions = [
{
"decisionType": "ScheduleActivityTask",
"scheduleActivityTaskDecisionAttributes": {
"activityId": "tin",
"activityType": {"name": "spam-tin", "version": "1.2"},
"heartbeatTimeout": "30",
"startToCloseTimeout": "43200",
},
},
]
instance = seddy_specs.DAGBuilder(workflow, task)
instance.build_decisions()
assert instance.decisions == expected_decisions
def test_bar_complete(self, workflow):
"""Test DAG decisions building after bar activity completes."""
task = {
"taskToken": "spam",
"previousStartedEventId": 17,
"startedEventId": 23,
"events": [
{
"eventId": 1,
"eventType": "WorkflowExecutionStarted",
"workflowExecutionStartedEventAttributes": {
"input": (
"{\n"
' "foo": {"spam": [42], "eggs": null},\n'
' "bar": null,\n'
' "yay": {"spam": [17], "eggs": [42]}\n'
"}"
)
},
},
{"eventId": 2, "eventType": "DecisionTaskScheduled"},
{"eventId": 3, "eventType": "DecisionTaskStarted"},
{"eventId": 4, "eventType": "DecisionTaskCompleted"},
{
"eventId": 5,
"eventType": "ActivityTaskScheduled",
"activityTaskScheduledEventAttributes": {
"activityId": "foo",
"activityType": {"name": "spam-foo", "version": "0.3"},
"decisionTaskCompletedEventId": 4,
"input": '{"spam": [42], "eggs": null}',
},
},
{
"eventId": 6,
"eventType": "ActivityTaskStarted",
"activityTaskStartedEventAttributes": {"scheduledEventId": 5},
},
{
"eventId": 7,
"eventType": "ActivityTaskCompleted",
"activityTaskCompletedEventAttributes": {
"scheduledEventId": 5,
"result": "3",
},
},
{"eventId": 8, "eventType": "DecisionTaskScheduled"},
{"eventId": 9, "eventType": "DecisionTaskStarted"},
{"eventId": 10, "eventType": "DecisionTaskCompleted"},
{
"eventId": 11,
"eventType": "ActivityTaskScheduled",
"activityTaskScheduledEventAttributes": {
"activityId": "yay",
"activityType": {"name": "spam-foo", "version": "0.3"},
"decisionTaskCompletedEventId": 10,
"input": '{"spam": [17], "eggs": [42]}',
},
},
{
"eventId": 12,
"eventType": "ActivityTaskScheduled",
"activityTaskScheduledEventAttributes": {
"activityId": "bar",
"activityType": {"name": "spam-bar", "version": "0.1"},
"decisionTaskCompletedEventId": 10,
"input": "null",
},
},
{
"eventId": 13,
"eventType": "ActivityTaskStarted",
"activityTaskStartedEventAttributes": {"scheduledEventId": 11},
},
{
"eventId": 14,
"eventType": "ActivityTaskStarted",
"activityTaskStartedEventAttributes": {"scheduledEventId": 12},
},
{
"eventId": 15,
"eventType": "ActivityTaskCompleted",
"activityTaskCompletedEventAttributes": {
"scheduledEventId": 11,
"result": "5",
},
},
{"eventId": 16, "eventType": "DecisionTaskScheduled"},
{"eventId": 17, "eventType": "DecisionTaskStarted"},
{"eventId": 18, "eventType": "DecisionTaskCompleted"},
{
"eventId": 19,
"eventType": "ActivityTaskScheduled",
"activityTaskScheduledEventAttributes": {
"activityId": "tin",
"activityType": {"name": "spam-tin", "version": "1.2"},
"decisionTaskCompletedEventId": 18,
},
},
{
"eventId": 20,
"eventType": "ActivityTaskStarted",
"activityTaskStartedEventAttributes": {"scheduledEventId": 19},
},
{
"eventId": 21,
"eventType": "ActivityTaskCompleted",
"activityTaskCompletedEventAttributes": {
"scheduledEventId": 12,
"result": '{"a": 9, "b": "red"}',
},
},
{"eventId": 22, "eventType": "DecisionTaskScheduled"},
{"eventId": 23, "eventType": "DecisionTaskStarted"},
],
}
instance = seddy_specs.DAGBuilder(workflow, task)
instance.build_decisions()
assert instance.decisions == []
def test_tin_complete(self, workflow):
"""Test DAG decisions building after tin activity completes."""
task = {
"taskToken": "spam",
"previousStartedEventId": 23,
"startedEventId": 27,
"events": [
{
"eventId": 1,
"eventType": "WorkflowExecutionStarted",
"workflowExecutionStartedEventAttributes": {
"input": (
"{\n"
' "foo": {"spam": [42], "eggs": null},\n'
' "bar": null,\n'
' "yay": {"spam": [17], "eggs": [42]}\n'
"}"
)
},
},
{"eventId": 2, "eventType": "DecisionTaskScheduled"},
{"eventId": 3, "eventType": "DecisionTaskStarted"},
{"eventId": 4, "eventType": "DecisionTaskCompleted"},
{
"eventId": 5,
"eventType": "ActivityTaskScheduled",
"activityTaskScheduledEventAttributes": {
"activityId": "foo",
"activityType": {"name": "spam-foo", "version": "0.3"},
"decisionTaskCompletedEventId": 4,
"input": '{"spam": [42], "eggs": null}',
},
},
{
"eventId": 6,
"eventType": "ActivityTaskStarted",
"activityTaskStartedEventAttributes": {"scheduledEventId": 5},
},
{
"eventId": 7,
"eventType": "ActivityTaskCompleted",
"activityTaskCompletedEventAttributes": {
"scheduledEventId": 5,
"result": "3",
},
},
{"eventId": 8, "eventType": "DecisionTaskScheduled"},
{"eventId": 9, "eventType": "DecisionTaskStarted"},
{"eventId": 10, "eventType": "DecisionTaskCompleted"},
{
"eventId": 11,
"eventType": "ActivityTaskScheduled",
"activityTaskScheduledEventAttributes": {
"activityId": "yay",
"activityType": {"name": "spam-foo", "version": "0.3"},
"decisionTaskCompletedEventId": 10,
"input": '{"spam": [17], "eggs": [42]}',
},
},
{
"eventId": 12,
"eventType": "ActivityTaskScheduled",
"activityTaskScheduledEventAttributes": {
"activityId": "bar",
"activityType": {"name": "spam-bar", "version": "0.1"},
"decisionTaskCompletedEventId": 10,
"input": "null",
},
},
{
"eventId": 13,
"eventType": "ActivityTaskStarted",
"activityTaskStartedEventAttributes": {"scheduledEventId": 11},
},
{
"eventId": 14,
"eventType": "ActivityTaskStarted",
"activityTaskStartedEventAttributes": {"scheduledEventId": 12},
},
{
"eventId": 15,
"eventType": "ActivityTaskCompleted",
"activityTaskCompletedEventAttributes": {
"scheduledEventId": 11,
"result": "5",
},
},
{"eventId": 16, "eventType": "DecisionTaskScheduled"},
{"eventId": 17, "eventType": "DecisionTaskStarted"},
{"eventId": 18, "eventType": "DecisionTaskCompleted"},
{
"eventId": 19,
"eventType": "ActivityTaskScheduled",
"activityTaskScheduledEventAttributes": {
"activityId": "tin",
"activityType": {"name": "spam-tin", "version": "1.2"},
"decisionTaskCompletedEventId": 18,
},
},
{
"eventId": 20,
"eventType": "ActivityTaskStarted",
"activityTaskStartedEventAttributes": {"scheduledEventId": 19},
},
{
"eventId": 21,
"eventType": "ActivityTaskCompleted",
"activityTaskCompletedEventAttributes": {
"scheduledEventId": 12,
"result": '{"a": 9, "b": "red"}',
},
},
{"eventId": 22, "eventType": "DecisionTaskScheduled"},
{"eventId": 23, "eventType": "DecisionTaskStarted"},
{"eventId": 24, "eventType": "DecisionTaskCompleted"},
{
"eventId": 25,
"eventType": "ActivityTaskCompleted",
"activityTaskCompletedEventAttributes": {"scheduledEventId": 19},
},
{"eventId": 26, "eventType": "DecisionTaskScheduled"},
{"eventId": 27, "eventType": "DecisionTaskStarted"},
],
}
expected_decisions = [
{
"decisionType": "CompleteWorkflowExecution",
"completeWorkflowExecutionDecisionAttributes": {
"result": '{"foo": 3, "bar": {"a": 9, "b": "red"}, "yay": 5}'
},
}
]
instance = seddy_specs.DAGBuilder(workflow, task)
instance.build_decisions()
assert instance.decisions == expected_decisions
def test_bar_and_yay_complete(self, workflow):
"""Test DAG decisions building after bar and yay activities complete."""
task = {
"taskToken": "spam",
"previousStartedEventId": 9,
"startedEventId": 18,
"events": [
{
"eventId": 1,
"eventType": "WorkflowExecutionStarted",
"workflowExecutionStartedEventAttributes": {
"input": (
"{\n"
' "foo": {"spam": [42], "eggs": null},\n'
' "bar": null,\n'
' "yay": {"spam": [17], "eggs": [42]}\n'
"}"
)
},
},
{"eventId": 2, "eventType": "DecisionTaskScheduled"},
{"eventId": 3, "eventType": "DecisionTaskStarted"},
{"eventId": 4, "eventType": "DecisionTaskCompleted"},
{
"eventId": 5,
"eventType": "ActivityTaskScheduled",
"activityTaskScheduledEventAttributes": {
"activityId": "foo",
"activityType": {"name": "spam-foo", "version": "0.3"},
"decisionTaskCompletedEventId": 4,
"input": '{"spam": [42], "eggs": null}',
},
},
{
"eventId": 6,
"eventType": "ActivityTaskStarted",
"activityTaskStartedEventAttributes": {"scheduledEventId": 5},
},
{
"eventId": 7,
"eventType": "ActivityTaskCompleted",
"activityTaskCompletedEventAttributes": {
"scheduledEventId": 5,
"result": "3",
},
},
{"eventId": 8, "eventType": "DecisionTaskScheduled"},
{"eventId": 9, "eventType": "DecisionTaskStarted"},
{"eventId": 10, "eventType": "DecisionTaskCompleted"},
{
"eventId": 11,
"eventType": "ActivityTaskScheduled",
"activityTaskScheduledEventAttributes": {
"activityId": "yay",
"activityType": {"name": "spam-foo", "version": "0.3"},
"decisionTaskCompletedEventId": 10,
"input": '{"spam": [17], "eggs": [42]}',
},
},
{
"eventId": 12,
"eventType": "ActivityTaskScheduled",
"activityTaskScheduledEventAttributes": {
"activityId": "bar",
"activityType": {"name": "spam-bar", "version": "0.1"},
"decisionTaskCompletedEventId": 10,
"input": "null",
},
},
{
"eventId": 13,
"eventType": "ActivityTaskStarted",
"activityTaskStartedEventAttributes": {"scheduledEventId": 11},
},
{
"eventId": 14,
"eventType": "ActivityTaskStarted",
"activityTaskStartedEventAttributes": {"scheduledEventId": 12},
},
{
"eventId": 15,
"eventType": "ActivityTaskCompleted",
"activityTaskCompletedEventAttributes": {
"scheduledEventId": 11,
"result": "5",
},
},
{
"eventId": 16,
"eventType": "ActivityTaskCompleted",
"activityTaskCompletedEventAttributes": {
"scheduledEventId": 12,
"result": '{"a": 9, "b": "red"}',
},
},
{"eventId": 17, "eventType": "DecisionTaskScheduled"},
{"eventId": 18, "eventType": "DecisionTaskStarted"},
],
}
expected_decisions = [
{
"decisionType": "ScheduleActivityTask",
"scheduleActivityTaskDecisionAttributes": {
"activityId": "tin",
"activityType": {"name": "spam-tin", "version": "1.2"},
"heartbeatTimeout": "30",
"startToCloseTimeout": "43200",
},
},
]
instance = seddy_specs.DAGBuilder(workflow, task)
instance.build_decisions()
assert instance.decisions == expected_decisions
def test_foo_failed(self, workflow):
"""Test DAG decisions building after foo activity fails."""
task = {
"taskToken": "spam",
"previousStartedEventId": 3,
"startedEventId": 9,
"events": [
{
"eventId": 1,
"eventType": "WorkflowExecutionStarted",
"workflowExecutionStartedEventAttributes": {
"input": (
"{\n"
' "foo": {"spam": [42], "eggs": null},\n'
' "bar": null,\n'
' "yay": {"spam": [17], "eggs": [42]}\n'
"}"
)
},
},
{"eventId": 2, "eventType": "DecisionTaskScheduled"},
{"eventId": 3, "eventType": "DecisionTaskStarted"},
{"eventId": 4, "eventType": "DecisionTaskCompleted"},
{
"eventId": 5,
"eventType": "ActivityTaskScheduled",
"activityTaskScheduledEventAttributes": {
"activityId": "foo",
"activityType": {"name": "spam-foo", "version": "0.3"},
"decisionTaskCompletedEventId": 4,
"input": '{"spam": [42], "eggs": null}',
},
},
{
"eventId": 6,
"eventType": "ActivityTaskStarted",
"activityTaskStartedEventAttributes": {"scheduledEventId": 5},
},
{
"eventId": 7,
"eventType": "ActivityTaskFailed",
"activityTaskFailedEventAttributes": {
"scheduledEventId": 5,
"details": "The specified spam does not exist",
"reason": "spamError",
},
},
{"eventId": 8, "eventType": "DecisionTaskScheduled"},
{"eventId": 9, "eventType": "DecisionTaskStarted"},
],
}
expected_decisions = [
{
"decisionType": "FailWorkflowExecution",
"failWorkflowExecutionDecisionAttributes": {
"details": "1 activities failed",
},
}
]
instance = seddy_specs.DAGBuilder(workflow, task)
instance.build_decisions()
assert instance.decisions == expected_decisions
def test_foo_timed_out(self, workflow):
"""Test DAG decisions building after foo activity times-out."""
# Events sections
task = {
"taskToken": "spam",
"previousStartedEventId": 3,
"startedEventId": 9,
"events": [
{
"eventId": 1,
"eventType": "WorkflowExecutionStarted",
"workflowExecutionStartedEventAttributes": {
"input": (
"{\n"
' "foo": {"spam": [42], "eggs": null},\n'
' "bar": null,\n'
' "yay": {"spam": [17], "eggs": [42]}\n'
"}"
)
},
},
{"eventId": 2, "eventType": "DecisionTaskScheduled"},
{"eventId": 3, "eventType": "DecisionTaskStarted"},
{"eventId": 4, "eventType": "DecisionTaskCompleted"},
{
"eventId": 5,
"eventType": "ActivityTaskScheduled",
"activityTaskScheduledEventAttributes": {
"activityId": "foo",
"activityType": {"name": "spam-foo", "version": "0.3"},
"decisionTaskCompletedEventId": 4,
"input": '{"spam": [42], "eggs": null}',
},
},
{
"eventId": 6,
"eventType": "ActivityTaskStarted",
"activityTaskStartedEventAttributes": {"scheduledEventId": 5},
},
{
"eventId": 7,
"eventType": "ActivityTaskTimedOut",
"activityTaskTimedOutEventAttributes": {
"scheduledEventId": 5,
"details": "42 / 50",
"timeoutType": "HEARTBEAT",
},
},
{"eventId": 8, "eventType": "DecisionTaskScheduled"},
{"eventId": 9, "eventType": "DecisionTaskStarted"},
],
}
expected_decisions = [
{
"decisionType": "FailWorkflowExecution",
"failWorkflowExecutionDecisionAttributes": {
"details": "1 activities failed",
},
}
]
instance = seddy_specs.DAGBuilder(workflow, task)
instance.build_decisions()
assert instance.decisions == expected_decisions
def test_foo_start_timed_out(self, workflow):
"""Test DAG decisions building after foo activity start times-out."""
# Events sections
task = {
"taskToken": "spam",
"previousStartedEventId": 3,
"startedEventId": 9,
"events": [
{
"eventId": 1,
"eventType": "WorkflowExecutionStarted",
"workflowExecutionStartedEventAttributes": {
"input": (
"{\n"
' "foo": {"spam": [42], "eggs": null},\n'
' "bar": null,\n'
' "yay": {"spam": [17], "eggs": [42]}\n'
"}"
)
},
},
{"eventId": 2, "eventType": "DecisionTaskScheduled"},
{"eventId": 3, "eventType": "DecisionTaskStarted"},
{"eventId": 4, "eventType": "DecisionTaskCompleted"},
{
"eventId": 5,
"eventType": "ActivityTaskScheduled",
"activityTaskScheduledEventAttributes": {
"activityId": "foo",
"activityType": {"name": "spam-foo", "version": "0.3"},
"decisionTaskCompletedEventId": 4,
"input": '{"spam": [42], | |
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import Union
from typing import List
class Client(BaseClient):
def associate_member_account(self, memberAccountId: str):
"""
Associates a specified AWS account with Amazon Macie as a member account.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/macie-2017-12-19/AssociateMemberAccount>`_
**Request Syntax**
::
response = client.associate_member_account(
memberAccountId='string'
)
:type memberAccountId: string
:param memberAccountId: **[REQUIRED]**
The ID of the AWS account that you want to associate with Amazon Macie as a member account.
:returns: None
"""
pass
def associate_s3_resources(self, s3Resources: List, memberAccountId: str = None) -> Dict:
"""
Associates specified S3 resources with Amazon Macie for monitoring and data classification. If memberAccountId isn't specified, the action associates specified S3 resources with Macie for the current master account. If memberAccountId is specified, the action associates specified S3 resources with Macie for the specified member account.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/macie-2017-12-19/AssociateS3Resources>`_
**Request Syntax**
::
response = client.associate_s3_resources(
memberAccountId='string',
s3Resources=[
{
'bucketName': 'string',
'prefix': 'string',
'classificationType': {
'oneTime': 'FULL'|'NONE',
'continuous': 'FULL'
}
},
]
)
**Response Syntax**
::
{
'failedS3Resources': [
{
'failedItem': {
'bucketName': 'string',
'prefix': 'string'
},
'errorCode': 'string',
'errorMessage': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **failedS3Resources** *(list) --*
S3 resources that couldn't be associated with Amazon Macie. An error code and an error message are provided for each failed item.
- *(dict) --*
Includes details about the failed S3 resources.
- **failedItem** *(dict) --*
The failed S3 resources.
- **bucketName** *(string) --*
The name of the S3 bucket.
- **prefix** *(string) --*
The prefix of the S3 bucket.
- **errorCode** *(string) --*
The status code of a failed item.
- **errorMessage** *(string) --*
The error message of a failed item.
:type memberAccountId: string
:param memberAccountId:
The ID of the Amazon Macie member account whose resources you want to associate with Macie.
:type s3Resources: list
:param s3Resources: **[REQUIRED]**
The S3 resources that you want to associate with Amazon Macie for monitoring and data classification.
- *(dict) --*
The S3 resources that you want to associate with Amazon Macie for monitoring and data classification. This data type is used as a request parameter in the AssociateS3Resources action and a response parameter in the ListS3Resources action.
- **bucketName** *(string) --* **[REQUIRED]**
The name of the S3 bucket that you want to associate with Amazon Macie.
- **prefix** *(string) --*
The prefix of the S3 bucket that you want to associate with Amazon Macie.
- **classificationType** *(dict) --* **[REQUIRED]**
The classification type that you want to specify for the resource associated with Amazon Macie.
- **oneTime** *(string) --* **[REQUIRED]**
A one-time classification of all of the existing objects in a specified S3 bucket.
- **continuous** *(string) --* **[REQUIRED]**
A continuous classification of the objects that are added to a specified S3 bucket. Amazon Macie begins performing continuous classification after a bucket is successfully associated with Amazon Macie.
:rtype: dict
:returns:
"""
pass
def can_paginate(self, operation_name: str = None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:return: ``True`` if the operation can be paginated,
``False`` otherwise.
"""
pass
def disassociate_member_account(self, memberAccountId: str):
"""
Removes the specified member account from Amazon Macie.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/macie-2017-12-19/DisassociateMemberAccount>`_
**Request Syntax**
::
response = client.disassociate_member_account(
memberAccountId='string'
)
:type memberAccountId: string
:param memberAccountId: **[REQUIRED]**
The ID of the member account that you want to remove from Amazon Macie.
:returns: None
"""
pass
def disassociate_s3_resources(self, associatedS3Resources: List, memberAccountId: str = None) -> Dict:
"""
Removes specified S3 resources from being monitored by Amazon Macie. If memberAccountId isn't specified, the action removes specified S3 resources from Macie for the current master account. If memberAccountId is specified, the action removes specified S3 resources from Macie for the specified member account.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/macie-2017-12-19/DisassociateS3Resources>`_
**Request Syntax**
::
response = client.disassociate_s3_resources(
memberAccountId='string',
associatedS3Resources=[
{
'bucketName': 'string',
'prefix': 'string'
},
]
)
**Response Syntax**
::
{
'failedS3Resources': [
{
'failedItem': {
'bucketName': 'string',
'prefix': 'string'
},
'errorCode': 'string',
'errorMessage': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **failedS3Resources** *(list) --*
S3 resources that couldn't be removed from being monitored and classified by Amazon Macie. An error code and an error message are provided for each failed item.
- *(dict) --*
Includes details about the failed S3 resources.
- **failedItem** *(dict) --*
The failed S3 resources.
- **bucketName** *(string) --*
The name of the S3 bucket.
- **prefix** *(string) --*
The prefix of the S3 bucket.
- **errorCode** *(string) --*
The status code of a failed item.
- **errorMessage** *(string) --*
The error message of a failed item.
:type memberAccountId: string
:param memberAccountId:
The ID of the Amazon Macie member account whose resources you want to remove from being monitored by Amazon Macie.
:type associatedS3Resources: list
:param associatedS3Resources: **[REQUIRED]**
The S3 resources (buckets or prefixes) that you want to remove from being monitored and classified by Amazon Macie.
- *(dict) --*
Contains information about the S3 resource. This data type is used as a request parameter in the DisassociateS3Resources action and can be used as a response parameter in the AssociateS3Resources and UpdateS3Resources actions.
- **bucketName** *(string) --* **[REQUIRED]**
The name of the S3 bucket.
- **prefix** *(string) --*
The prefix of the S3 bucket.
:rtype: dict
:returns:
"""
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
``ClientMethod``.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method\'s model.
:returns: The presigned url
"""
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
pass
def list_member_accounts(self, nextToken: str = None, maxResults: int = None) -> Dict:
"""
Lists all Amazon Macie member accounts for the current Amazon Macie master account.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/macie-2017-12-19/ListMemberAccounts>`_
**Request Syntax**
::
response = client.list_member_accounts(
nextToken='string',
maxResults=123
)
**Response Syntax**
::
{
'memberAccounts': [
{
'accountId': 'string'
},
],
'nextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **memberAccounts** *(list) --*
A list of the Amazon Macie member accounts returned by the action. The current master account | |
be blocking or non-blocking.)
"""
if FCGI_DEBUG: logging.debug('_recvall (%d)' % (length))
dataList = []
recvLen = 0
while length:
data = stream.read(length)
if not data: # EOF
break
dataList.append(data)
dataLen = len(data)
recvLen += dataLen
length -= dataLen
# if FCGI_DEBUG: logging.debug('recived length = %d' % (recvLen))
return b''.join(dataList), recvLen
_recvall = staticmethod(_recvall)
def read(self, stream):
"""Read and decode a Record from a socket."""
header, length = self._recvall(stream, FCGI_HEADER_LEN)
if length < FCGI_HEADER_LEN:
raise EOFError
self.version, self.type, self.requestId, self.contentLength, \
self.paddingLength = struct.unpack(FCGI_Header, header)
if FCGI_DEBUG:
hex = ''
for s in header:
hex += '%x|' % (char_to_int(s))
logging.debug('recv fcgi header: %s %s len: %d' % (
FCGI_HEADER_NAMES[self.type] if self.type is not None and self.type < FCGI_MAXTYPE else
FCGI_HEADER_NAMES[FCGI_MAXTYPE],
hex, len(header)
))
if self.contentLength:
try:
self.contentData, length = self._recvall(stream, self.contentLength)
except:
raise EOFError
if length < self.contentLength:
raise EOFError
if self.paddingLength:
try:
self._recvall(stream, self.paddingLength)
except:
raise EOFError
def _sendall(stream, data):
"""
Writes data to a socket and does not return until all the data is sent.
"""
if FCGI_DEBUG: logging.debug('_sendall: len=%d' % len(data))
stream.write(data)
_sendall = staticmethod(_sendall)
def write(self, stream):
"""Encode and write a Record to a socket."""
if not self.contentLength:
self.paddingLength = 8
else:
self.paddingLength = -self.contentLength & 7
header = struct.pack(FCGI_Header, self.version, self.type,
self.requestId, self.contentLength,
self.paddingLength)
if FCGI_DEBUG:
logging.debug(
'send fcgi header: %s' %
FCGI_HEADER_NAMES[self.type] if self.type is not None and self.type < FCGI_MAXTYPE else
FCGI_HEADER_NAMES[FCGI_MAXTYPE]
)
self._sendall(stream, header)
if self.contentLength:
if FCGI_DEBUG: logging.debug('send CONTENT')
self._sendall(stream, self.contentData)
if self.paddingLength:
if FCGI_DEBUG: logging.debug('send PADDING')
self._sendall(stream, b'\x00' * self.paddingLength)
class Request(object):
"""
Represents a single FastCGI request.
These objects are passed to your handler and is the main interface
between your handler and the fcgi module. The methods should not
be called by your handler. However, server, params, stdin, stdout,
stderr, and data are free for your handler's use.
"""
def __init__(self, conn, inputStreamClass):
self._conn = conn
self.server = conn.server
self.params = {}
self.stdin = inputStreamClass(conn)
self.stdout = OutputStream(conn, self, FCGI_STDOUT)
self.stderr = OutputStream(conn, self, FCGI_STDERR)
self.data = inputStreamClass(conn)
def run(self):
"""Runs the handler, flushes the streams, and ends the request."""
try:
protocolStatus, appStatus = self.server.handler(self)
except Exception as instance:
logging.exception(instance) # just in case there's another error reporting the exception
# TODO: this appears to cause FCGI timeouts sometimes. is it an exception loop?
self.stderr.flush()
if not self.stdout.dataWritten:
self.server.error(self)
protocolStatus, appStatus = FCGI_REQUEST_COMPLETE, 0
if FCGI_DEBUG: logging.debug('protocolStatus = %d, appStatus = %d' % (protocolStatus, appStatus))
self._flush()
self._end(appStatus, protocolStatus)
def _end(self, appStatus=long_int('0'), protocolStatus=FCGI_REQUEST_COMPLETE):
self._conn.end_request(self, appStatus, protocolStatus)
def _flush(self):
self.stdout.flush()
self.stderr.flush()
class Connection(object):
"""
A Connection with the web server.
Each Connection is associated with a single socket (which is
connected to the web server) and is responsible for handling all
the FastCGI message processing for that socket.
"""
_multiplexed = False
_inputStreamClass = InputStream
def __init__(self, stdin, stdout, server):
self._stdin = stdin
self._stdout = stdout
self.server = server
# Active Requests for this Connection, mapped by request ID.
self._requests = {}
def run(self):
"""Begin processing data from the socket."""
self._keepGoing = True
while self._keepGoing:
try:
self.process_input()
except KeyboardInterrupt:
break
# except EOFError, inst:
# raise
# if FCGI_DEBUG: logging.error(str(inst))
# break
def process_input(self):
"""Attempt to read a single Record from the socket and process it."""
# Currently, any children Request threads notify this Connection
# that it is no longer needed by closing the Connection's socket.
# We need to put a timeout on select, otherwise we might get
# stuck in it indefinitely... (I don't like this solution.)
if not self._keepGoing:
return
rec = Record()
rec.read(self._stdin)
if rec.type == FCGI_GET_VALUES:
self._do_get_values(rec)
elif rec.type == FCGI_BEGIN_REQUEST:
self._do_begin_request(rec)
elif rec.type == FCGI_ABORT_REQUEST:
self._do_abort_request(rec)
elif rec.type == FCGI_PARAMS:
self._do_params(rec)
elif rec.type == FCGI_STDIN:
self._do_stdin(rec)
elif rec.type == FCGI_DATA:
self._do_data(rec)
elif rec.requestId == FCGI_NULL_REQUEST_ID:
self._do_unknown_type(rec)
else:
# Need to complain about this.
pass
def writeRecord(self, rec):
"""
Write a Record to the socket.
"""
rec.write(self._stdout)
def end_request(self, req, appStatus=long_int('0'), protocolStatus=FCGI_REQUEST_COMPLETE, remove=True):
"""
End a Request.
Called by Request objects. An FCGI_END_REQUEST Record is
sent to the web server. If the web server no longer requires
the connection, the socket is closed, thereby ending this
Connection (run() returns).
"""
# write empty packet to stdin
rec = Record(FCGI_STDOUT, req.requestId)
rec.contentData = ''
rec.contentLength = 0
self.writeRecord(rec)
# write end request
rec = Record(FCGI_END_REQUEST, req.requestId)
rec.contentData = struct.pack(FCGI_EndRequestBody, appStatus,
protocolStatus)
rec.contentLength = FCGI_EndRequestBody_LEN
self.writeRecord(rec)
if remove:
if FCGI_DEBUG: logging.debug('end_request: removing request from list')
del self._requests[req.requestId]
if FCGI_DEBUG: logging.debug('end_request: flags = %d' % req.flags)
if not (req.flags & FCGI_KEEP_CONN) and not self._requests:
if FCGI_DEBUG: logging.debug('end_request: set _keepGoing = False')
self._keepGoing = False
def _do_get_values(self, inrec):
"""Handle an FCGI_GET_VALUES request from the web server."""
outrec = Record(FCGI_GET_VALUES_RESULT)
pos = 0
while pos < inrec.contentLength:
pos, (name, value) = decode_pair(inrec.contentData, pos)
cap = self.server.capability.get(name)
if cap is not None:
outrec.contentData += encode_pair(name, str(cap))
outrec.contentLength = len(outrec.contentData)
self.writeRecord(outrec)
def _do_begin_request(self, inrec):
"""Handle an FCGI_BEGIN_REQUEST from the web server."""
role, flags = struct.unpack(FCGI_BeginRequestBody, inrec.contentData)
req = self.server.request_class(self, self._inputStreamClass)
req.requestId, req.role, req.flags = inrec.requestId, role, flags
req.aborted = False
if not self._multiplexed and self._requests:
# Can't multiplex requests.
self.end_request(req, long_int(0), FCGI_CANT_MPX_CONN, remove=False)
else:
self._requests[inrec.requestId] = req
def _do_abort_request(self, inrec):
"""
Handle an FCGI_ABORT_REQUEST from the web server.
We just mark a flag in the associated Request.
"""
req = self._requests.get(inrec.requestId)
if req is not None:
req.aborted = True
def _start_request(self, req):
"""Run the request."""
# Not multiplexed, so run it inline.
req.run()
def _do_params(self, inrec):
"""
Handle an FCGI_PARAMS Record.
If the last FCGI_PARAMS Record is received, start the request.
"""
req = self._requests.get(inrec.requestId)
if req is not None:
if inrec.contentLength:
pos = 0
while pos < inrec.contentLength:
pos, (name, value) = decode_pair(inrec.contentData, pos)
req.params[name] = value
def _do_stdin(self, inrec):
"""Handle the FCGI_STDIN stream."""
req = self._requests.get(inrec.requestId)
if inrec.contentLength:
if req is not None:
req.stdin.add_data(inrec.contentData)
else:
self._start_request(req)
def _do_data(self, inrec):
"""Handle the FCGI_DATA stream."""
req = self._requests.get(inrec.requestId)
if req is not None:
req.data.add_data(inrec.contentData)
def _do_unknown_type(self, inrec):
"""Handle an unknown request type. Respond accordingly."""
outrec = Record(FCGI_UNKNOWN_TYPE)
outrec.contentData = struct.pack(FCGI_UnknownTypeBody, inrec.type)
outrec.contentLength = FCGI_UnknownTypeBody_LEN
self.writeRecord(outrec)
class FCGIServer(object):
request_class = Request
maxwrite = 8192
inputStreamShrinkThreshold = 102400 - 8192
def __init__(self, application, environ=None,
multithreaded=False, multiprocess=False,
debug=False, roles=(FCGI_RESPONDER,),
app_root=None):
if environ is None:
environ = {}
self.application = application
self.environ = environ
self.multithreaded = multithreaded
self.multiprocess = multiprocess
self.debug = debug
self.roles = roles
self._connectionClass = Connection
self.capability = {
# If threads aren't available, these are pretty much correct.
FCGI_MAX_CONNS: 1,
FCGI_MAX_REQS: 1,
FCGI_MPXS_CONNS: 0
}
self.app_root = app_root
def run(self):
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
stdin = os.fdopen(sys.stdin.fileno(), 'rb', 0)
stdout = os.fdopen(sys.stdout.fileno(), 'wb', 0)
conn = Connection(stdin, stdout, self)
try:
conn.run()
except Exception as e:
logging.exception(e)
raise
def handler(self, req):
"""Special handler for WSGI."""
if req.role not in self.roles:
return FCGI_UNKNOWN_ROLE, 0
# Mostly taken from example CGI gateway.
environ = req.params
environ.update(self.environ)
environ['wsgi.version'] = (1, 0)
environ['wsgi.input'] = req.stdin
# TODO - sys.stderr appears to be None here?? (on Windows/IIS)
stderr = TeeOutputStream((sys.stderr, req.stderr))
environ['wsgi.errors'] = stderr
environ['wsgi.multithread'] = False
environ['wsgi.multiprocess'] = False
environ['wsgi.run_once'] = False
if environ.get('HTTPS', 'off') in ('on', '1'):
environ['wsgi.url_scheme'] = 'https'
else:
environ['wsgi.url_scheme'] = 'http'
self._sanitizeEnv(environ)
headers_set = []
headers_sent = []
result = None
def write(data):
assert type(data) is bytes_type, 'write() argument must be bytes'
assert headers_set, 'write() before start_response()'
if not headers_sent:
status, responseHeaders = headers_sent[:] = headers_set
found = False
for header, value in responseHeaders:
if header.lower() == 'content-length':
found = True
break
if not found and result is not None:
try:
if len(result) == 1:
responseHeaders.append(('Content-Length',
str(len(data))))
except:
pass
s = 'Status: %s\r\n' % status
for header in responseHeaders:
s += '%s: %s\r\n' % header
s += '\r\n'
req.stdout.write(s.encode(FCGI_CONTENT_ENCODING))
req.stdout.write(data)
req.stdout.flush()
def start_response(status, response_headers, exc_info=None):
if exc_info:
try:
if headers_sent:
# Re-raise if too late
raise exc_info[0](exc_info[1]).with_traceback(exc_info[2])
finally:
exc_info = None # avoid dangling circular ref
else:
assert not headers_set, 'Headers already set!'
assert type(status) is str, 'Status | |
format :
str broker_id (not empty)
str market_id (not empty)
str symbol (not empty)
int market_type
int unit_type
int contract_type
int trade_type
int orders
str base (not empty)
str base_display (not empty)
int base_precision (not empty)
str quote (not empty)
str quote_display (not empty)
int quote_precision (not empty)
str expiry
int timestamp (or 0)
str lot_size
str contract_size
str base_exchange_rate
str value_per_pip
str one_pip_means
str margin_factor decimal as string or '-' for no margin
str min_size
str max_size
str step_size
str min_notional
str max_notional
str step_notional
str min_price
str max_price
str step_price
str maker_fee
str taker_fee
str maker_commission
str taker_commission
"""
with self._mutex:
if isinstance(data, list):
self._pending_market_info_insert.extend(data)
else:
self._pending_market_info_insert.append(data)
with self._condition:
self._condition.notify()
#
# async loads
#
def load_market_ohlc(self, service, broker_id: str, market_id: str, timeframe: float,
from_datetime: Optional[datetime] = None, to_datetime: Optional[datetime] = None):
"""
Load a set of market ohlc, fill the intermediates missing ohlcs if necessary
@param service to be notified once done
@param broker_id: str
@param market_id: str
@param timeframe: float
@param from_datetime datetime
@param to_datetime datetime
"""
with self._mutex:
from_ts = int(from_datetime.timestamp() * 1000) if from_datetime else None
to_ts = int(to_datetime.timestamp() * 1000) if to_datetime else None
self._pending_ohlc_select.append((service, broker_id, market_id, timeframe, from_ts, to_ts, None))
with self._condition:
self._condition.notify()
def load_market_ohlc_last_n(self, service, broker_id: str, market_id: str, timeframe: float, last_n: int):
"""
Load a set of market ohlc, fill the intermediates missing ohlcs if necessary
@param service to be notified once done
@param market_id: str
@param broker_id: str
@param timeframe: float
@param last_n: int last max n ohlcs to load
"""
with self._mutex:
self._pending_ohlc_select.append((service, broker_id, market_id, timeframe, None, None, last_n))
with self._condition:
self._condition.notify()
def load_market_info(self, service, broker_id: str, market_id: str):
"""
Load a specific market info given its market id.
@param service to be notified once done
@param market_id:
@param broker_id:
"""
with self._mutex:
self._pending_market_info_select.append((service, broker_id, market_id))
with self._condition:
self._condition.notify()
def load_market_list(self, service, broker_id: str):
"""
Load the complete list of market available for a specific broker id.
@param service to be notified once done
@param broker_id: str
"""
with self._mutex:
self._pending_market_list_select.append((service, broker_id))
with self._condition:
self._condition.notify()
#
# sync loads
#
def get_first_tick(self, broker_id: str, market_id: str):
"""Load and return only the first found and older stored tick."""
return FirstTickFinder(self._markets_path, broker_id, market_id, binary=True).first()
def get_last_tick(self, broker_id: str, market_id: str):
"""Load and return only the last found and most recent stored tick."""
return LastTickFinder(self._markets_path, broker_id, market_id, binary=True).last()
def get_last_quote(self, broker_id: str, market_id: str, timeframe: float):
"""Load and return only the last found and most recent stored tick."""
return LastQuoteFinder(self._markets_path, broker_id, market_id, timeframe, binary=True).last()
def get_last_ohlc(self, broker_id: str, market_id: str, timeframe: float):
"""Load and return only the last found and most recent stored OHLC from a specific timeframe."""
return None
def get_user_closed_trades(self, broker_id: str, account_id: str, strategy_id: str,
from_date: datetime, to_date: datetime, market_id: Optional[str] = None):
"""
Sync load and return the user closed trades for an account and strategy identifier and a period of date
Optional market_id.
"""
return None
#
# Tick and ohlc streamer
#
def create_tick_streamer(self, broker_id: str, market_id: str, from_date: datetime, to_date: datetime,
buffer_size: int = 32768):
"""
Create a new tick streamer.
"""
return TickStreamer(self._markets_path, broker_id, market_id, from_date, to_date, buffer_size, True)
def create_quote_streamer(self, broker_id: str, market_id: str, timeframe: float,
from_date: datetime, to_date: datetime, buffer_size: int = 8192):
"""
Create a new quote streamer. It comes from the OHLC file storage.
"""
return QuoteStreamer(self._markets_path, broker_id, market_id, timeframe, from_date, to_date, buffer_size, True)
def create_ohlc_streamer(self, broker_id: str, market_id: str, timeframe: float,
from_date: datetime, to_date: datetime, buffer_size: int = 8192):
"""
Create a new OHLC streamer. It comes from OHLC database table.
"""
return OhlcStreamer(self._db, broker_id, market_id, timeframe, from_date, to_date, buffer_size)
#
# User
#
def store_asset(self, data: Tuple[str, str, str, str, int, str, str, str]):
"""
@param data: is a tuple or an array of tuples containing data in that order and format :
str broker_id (not empty)
str account_id (not empty)
str asset_id (not empty) identifier of the asset
str last_trade_id (not empty) unique identifier when average price was updated
int timestamp (or 0) of the last PRU update in (ms)
str quantity (last update quantity)
str price (average unit price cost)
str quote_symbol (not empty) symbol of the quote used for the average price
"""
with self._mutex:
if isinstance(data, list):
self._pending_asset_insert.extend(data)
else:
self._pending_asset_insert.append(data)
with self._condition:
self._condition.notify()
def load_assets(self, service, trader, broker_id: str, account_id: str):
"""
Load all asset for a specific broker_id
@param service to be notified once done
@param trader: Trader
@param account_id: str
@param broker_id: str
"""
with self._mutex:
self._pending_asset_select.append((service, trader, broker_id, account_id))
with self._condition:
self._condition.notify()
def store_user_trade(self, data: Tuple[str, str, str, str, int, int, dict, dict]):
"""
@param data: is a tuple or an array of tuples containing data in that order and format :
str broker_id (not empty)
str account_id (not empty)
str market_id (not empty)
str strategy_id (not empty)
integer trade_id (not empty)
integer trade_type (not empty)
dict data (to be json encoded)
dict operations (to be json encoded)
"""
with self._mutex:
if isinstance(data, list):
self._pending_user_trade_insert.extend(data)
else:
self._pending_user_trade_insert.append(data)
with self._condition:
self._condition.notify()
def load_user_trades(self, service, strategy, broker_id: str, account_id: str, strategy_id: str):
"""
Load all user trades data and options for a specific strategy_id / broker_id / account_id
@param strategy_id: str
@param account_id: str
@param broker_id: str
@param strategy: Strategy
@param service to be notified once done
"""
with self._mutex:
self._pending_user_trade_select.append((service, strategy, broker_id, account_id, strategy_id))
with self._condition:
self._condition.notify()
def clear_user_trades(self, broker_id: str, account_id: str, strategy_id: str):
"""
Delete all user trades data and options for a specific strategy_id / broker_id / account_id
"""
with self._mutex:
self._pending_user_trade_delete.append((broker_id, account_id, strategy_id))
with self._condition:
self._condition.notify()
def store_user_trader(self, data: Tuple[str, str, str, str, int, dict, dict, dict]):
"""
@param data: is a tuple or an array of tuples containing data in that order and format :
str broker_id (not empty)
str account_id (not empty)
str market_id (not empty)
str strategy_id (not empty)
integer activity (not null)
dict data (to be json encoded)
dict regions (to be json encoded)
dict alerts (to be json encoded)
"""
with self._mutex:
if isinstance(data, list):
self._pending_user_trader_insert.extend(data)
else:
self._pending_user_trader_insert.append(data)
with self._condition:
self._condition.notify()
def load_user_traders(self, service, strategy, broker_id: str, account_id: str, strategy_id: str):
"""
Load all user traders data and options for a specific strategy_id / broker_id / account_id
@param service to be notified once done
@param strategy: Strategy
@param strategy_id: str
@param account_id: str
@param broker_id: str
"""
with self._mutex:
self._pending_user_trader_select.append((service, strategy, broker_id, account_id, strategy_id))
with self._condition:
self._condition.notify()
def store_user_closed_trade(self, data: Tuple[str, str, str, str, int, dict]):
"""
@param data: is a tuple or an array of tuples containing data in that order and format :
str broker_id (not empty)
str account_id (not empty)
str market_id (not empty)
str strategy_id (not empty)
integer timestamp (not empty)
dict data (to be json encoded)
"""
with self._mutex:
if isinstance(data, list):
self._pending_user_closed_trade_insert.extend(data)
else:
self._pending_user_closed_trade_insert.append(data)
with self._condition:
self._condition.notify()
#
# Processing
#
def run(self):
while self._running:
with self._condition:
self._condition.wait()
if self.connected:
self.process_userdata()
self.process_market()
self.process_ohlc()
self.process_tick()
self.process_quote()
def process_market(self):
pass
def process_userdata(self):
pass
def process_ohlc(self):
pass
def process_tick(self):
with self._mutex:
# are there some ticks to store
if not self._pending_tick_insert:
return
pti = self._pending_tick_insert
self._pending_tick_insert = set()
for tick_storage in pti:
if self._fetch or tick_storage.can_flush():
if tick_storage.has_data():
tick_storage.flush(close_at_end=not self._fetch)
if tick_storage.has_data():
# data remaining
with self._mutex:
self._pending_tick_insert.add(tick_storage)
def process_quote(self):
with self._mutex:
# are there some quotes to store
if not self._pending_quote_insert:
return
pqi = self._pending_quote_insert
self._pending_quote_insert = set()
for quote_storage in pqi:
if self._fetch or quote_storage.can_flush():
if quote_storage.has_data():
quote_storage.flush(close_at_end=not self._fetch)
if quote_storage.has_data():
# data remaining
with self._mutex:
self._pending_quote_insert.add(quote_storage)
#
# Extra
#
def cleanup_ohlc(self, broker_id: str, market_id: Optional[str] = None, timeframes=None,
from_date: Optional[datetime] = None, to_date: Optional[datetime] = None):
"""
Cleanup any OHLC for a specific broker_id.
If market_id is specified only delete for this market else any market related to the broker identifier
If timeframes is specified only delete this timeframes else any
@note This is | |
<gh_stars>1-10
#!/usr/bin/env python
""" md5s3stash
content addressable storage in AWS S3
"""
from __future__ import unicode_literals
import sys
import os
import argparse
import tempfile
import urllib2
import urllib
import urlparse
import base64
import logging
import hashlib
import basin
import boto
import magic
from PIL import Image
from collections import namedtuple
import re
regex_s3 = re.compile(r's3.*amazonaws.com')
def main(argv=None):
parser = argparse.ArgumentParser(
description='content addressable storage in AWS S3')
parser.add_argument('url', nargs='+',
help='URL or path of source file to stash')
parser.add_argument('-b', '--bucket_base', nargs="?",
help='this must be a unique name in all of AWS S3')
parser.add_argument('-s', '--bucket_scheme', nargs="?",
default="simple", choices=['simple', 'multivalue'],
help='this must be a unique name in all of AWS S3')
parser.add_argument(
'-t', '--tempdir',
required=False,
help="if your files might be large, make sure this is on a big disk"
)
parser.add_argument(
'-w', '--warnings',
default=False,
help='show python `DeprecationWarning`s supressed by default',
required=False,
action='store_true',
)
parser.add_argument('--loglevel', default='ERROR', required=False)
parser.add_argument('-u', '--username', required=False,
help='username for downloads requiring BasicAuth')
parser.add_argument('-p', '--password', required=False,
help='password for downloads requiring BasicAuth')
if argv is None:
argv = parser.parse_args()
if argv.bucket_base:
bucket_base = argv.bucket_base
else:
assert 'BUCKET_BASE' in os.environ, "`-b` or `BUCKET_BASE` must be set"
bucket_base = os.environ['BUCKET_BASE']
if not argv.warnings:
# supress warnings
# http://stackoverflow.com/a/2047600/1763984
import warnings
warnings.simplefilter("ignore", DeprecationWarning)
if argv.tempdir:
tempfile.tempdir = argv.tempdir
auth = None
if argv.username:
auth = (argv.username, argv.password)
# set debugging level
numeric_level = getattr(logging, argv.loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % argv.loglevel)
logging.basicConfig(level=numeric_level, )
# if being used in a library, probably want to be able to recycle
# connection?
conn = boto.connect_s3()
for url in argv.url:
print("{0}\t{1}\t{2}\t{3}".format(
*md5s3stash(url, bucket_base, conn, url_auth=auth, bucket_scheme=argv.bucket_scheme)
))
def md5s3stash(
url,
bucket_base,
conn=None,
url_auth=None,
url_cache={},
hash_cache={},
bucket_scheme='simple'
):
""" stash a file at `url` in the named `bucket_base` ,
`conn` is an optional boto.connect_s3()
`url_auth` is optional Basic auth ('<username>', '<password'>) tuple
to use if the url to download requires authentication.
`url_cache` is an object with a dict interface, keyed on url
url_cache[url] = { md5: ..., If-None-Match: etag, If-Modified-Since: date }
`hash_cache` is an obhect with dict interface, keyed on md5
hash_cache[md5] = ( s3_url, mime_type, dimensions )
`bucket_scheme` is text string 'simple' or 'multibucket'
"""
StashReport = namedtuple('StashReport', 'url, md5, s3_url, mime_type, dimensions')
(file_path, md5, mime_type) = checkChunks(url, url_auth, url_cache)
try:
return StashReport(url, md5, *hash_cache[md5])
except KeyError:
pass
s3_url = md5_to_s3_url(md5, bucket_base, bucket_scheme=bucket_scheme)
if conn is None:
conn = boto.connect_s3()
s3move(file_path, s3_url, mime_type, conn)
(mime, dimensions) = image_info(file_path)
os.remove(file_path) # safer than rmtree
hash_cache[md5] = (s3_url, mime, dimensions)
report = StashReport(url, md5, *hash_cache[md5])
logging.getLogger('MD5S3:stash').info(report)
return report
# think about refactoring the next two functions
def md5_to_s3_url(md5, bucket_base, bucket_scheme='multibucket'):
""" calculate the s3 URL given an md5 and an bucket_base """
if bucket_scheme == 'simple':
url = "s3://{0}/{1}".format(
bucket_base,
md5
)
elif bucket_scheme == 'multibucket':
url = "s3://{0}.{1}/{2}".format(
md5_to_bucket_shard(md5),
bucket_base,
md5
)
return url
def md5_to_http_url(md5, bucket_base, bucket_scheme='multibucket', s3_endpoint='s3.amazonaws.com'):
""" calculate the http URL given an md5 and an bucket_base """
if bucket_scheme == 'simple':
url = "http://{0}/{1}/{2}".format(
s3_endpoint,
bucket_base,
md5
)
elif bucket_scheme == 'multibucket':
url = "http://{1}.{2}.{0}/{3}".format(
s3_endpoint,
md5_to_bucket_shard(md5),
bucket_base,
md5
)
return url
def md5_to_bucket_shard(md5):
""" calculate the shard label of the bucket name from md5 """
# "Consider utilizing multiple buckets that start with different
# alphanumeric characters. This will ensure a degree of partitioning
# from the start. The higher your volume of concurrent PUT and
# GET requests, the more impact this will likely have."
# -- http://aws.amazon.com/articles/1904
# "Bucket names must be a series of one or more labels. Adjacent
# labels are separated by a single period (.). [...] Each label must
# start and end with a lowercase letter or a number. "
# -- http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
# see also: http://en.wikipedia.org/wiki/Base_36
ALPHABET = "0123456789abcdefghijklmnopqrstuvwxyz"
# http://stats.stackexchange.com/a/70884/14900
# take the first two digits of the hash and turn that into an inteter
# this should be evenly distributed
int_value = int(md5[0], 16)+10*int(md5[1], 16)
# divide by the length of the alphabet and take the remainder
bucket = int_value % len(ALPHABET)
return basin.encode(ALPHABET, bucket)
def is_s3_url(url):
'''For s3 urls, if you send http authentication headers, S3 will
send a "400 Bad Request" in response.
Now look for s3*.amazonaws.com
'''
# moving to OR this will be s3-us-west-2.amazonaws.com
match = regex_s3.search(url)
return True if match else False
def urlopen_with_auth(url, auth=None, cache={}):
'''Use urllib2 to open url if the auth is specified.
auth is tuple of (username, password)
'''
opener = urllib2.build_opener(DefaultErrorHandler())
req = urllib2.Request(url)
p = urlparse.urlparse(url)
# try to set headers for conditional get request
try:
here = cache[url]
if 'If-None-Match' in here:
req.add_header('If-None-Match', cache[url]['If-None-Match'],)
if 'If-Modified-Since' in here:
req.add_header('If-Modified-Since', cache[url]['If-Modified-Since'],)
except KeyError:
pass
if not auth or is_s3_url(url):
if p.scheme not in ['http', 'https']:
return urllib.urlopen(url) # urllib works with normal file paths
else:
# make sure https
if p.scheme != 'https':
raise urllib2.URLError('Basic auth not over https is bad idea! \
scheme:{0}'.format(p.scheme))
# Need to add header so it gets sent with first request,
# else redirected to shib
b64authstr = base64.b64encode('{0}:{1}'.format(*auth))
req.add_header('Authorization', 'Basic {0}'.format(b64authstr))
# return urllib2.urlopen(req)
return opener.open(req)
def checkChunks(url, auth=None, cache={}):
"""
Helper to download large files the only arg is a url this file
will go to a temp directory the file will also be downloaded in
chunks and md5 checksum is returned
based on downloadChunks@https://gist.github.com/gourneau/1430932
and http://www.pythoncentral.io/hashing-files-with-python/
"""
temp_file = tempfile.NamedTemporaryFile(delete=False, prefix='md5s3_')
logging.getLogger('MD5S3').info("temp file path %s" % temp_file.name)
hasher = hashlib.new('md5')
BLOCKSIZE = 1024 * hasher.block_size
try:
req = urlopen_with_auth(url, auth=auth, cache=cache)
thisurl = cache.get(url, dict())
if req.getcode() == 304:
return None, thisurl['md5'], None
mime_type = req.info()['Content-type']
# record these headers, they will let us pretend like we are a cacheing
# proxy server, and send conditional GETs next time we see this file
etag = req.info().get('ETag', None);
if etag:
thisurl['If-None-Match'] = etag
lmod = req.info().get('Last-Modified', None);
if lmod:
thisurl['If-Modified-Since'] = lmod
downloaded = 0
with temp_file:
while True:
chunk = req.read(BLOCKSIZE)
hasher.update(chunk)
downloaded += len(chunk)
if not chunk:
break
temp_file.write(chunk)
except urllib2.HTTPError, e:
print "HTTP Error:", e.code, url
return False
except urllib2.URLError, e:
print "URL Error:", e.reason, url
return False
md5 = hasher.hexdigest()
thisurl['md5'] = md5
cache[url] = thisurl
return temp_file.name, md5, mime_type
def s3move(place1, place2, mime, s3):
l = logging.getLogger('MD5S3:s3move')
l.debug({
'place1': place1,
'place2': place2,
'mime': mime,
's3': s3,
})
parts = urlparse.urlsplit(place2)
# SplitResult(scheme='s3', netloc='test.pdf', path='/dkd', query=''
# , fragment='')
try:
bucket = s3.get_bucket(parts.netloc, validate=False)
l.debug('bucket exists')
except boto.exception.S3ResponseError:
bucket = s3.create_bucket(parts.netloc)
l.debug('bucket created')
if not(bucket.get_key(parts.path, validate=False)):
key = bucket.new_key(parts.path)
# metadata has to be set before setting contents/creating object.
# See https://gist.github.com/garnaat/1791086
key.set_metadata("Content-Type", mime)
key.set_contents_from_filename(place1)
# key.set_acl('public-read')
l.debug('file sent to s3')
else:
l.info('key existed already')
def image_info(filepath):
''' get image info
`filepath` path to a file
returns
a tuple of two values
1. mime/type if an image; otherwise None
2. a tuple of (height, width) if an image; otherwise (0,0)
'''
try:
return (
magic.Magic(mime=True).from_file(filepath),
Image.open(filepath).size
)
except IOError as e:
if not e.message.startswith('cannot identify image file'):
raise e
else:
return (None, (0,0))
# example 11.7 Defining URL handlers
# http://www.diveintopython.net/http_web_services/etags.html
class DefaultErrorHandler(urllib2.HTTPDefaultErrorHandler):
def http_error_304(self, req, fp, code, msg, headers):
result = urllib2.HTTPError(
req.get_full_url(), code, msg, headers, fp)
result.status = code
return result
# main() idiom for importing into REPL for debugging
if __name__ == "__main__":
sys.exit(main())
"""
Copyright (c) 2015, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the {organization} nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN | |
2))
inp = X, Y = 7, 11
lmb(inp, out=out)
assert np.allclose(out, [[3 * X**2 * Y, X**3],
[Y + 1, X + 1]])
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_jacobian__broadcast():
x, y = se.symbols('x, y')
args = se.DenseMatrix(2, 1, [x, y])
v = se.DenseMatrix(2, 1, [x**3 * y, (x+1)*(y+1)])
jac = v.jacobian(args)
lmb = se.Lambdify(args, jac)
out = np.empty((3, 2, 2))
inp0 = 7, 11
inp1 = 8, 13
inp2 = 5, 9
inp = np.array([inp0, inp1, inp2])
lmb(inp, out=out)
for idx, (X, Y) in enumerate([inp0, inp1, inp2]):
assert np.allclose(out[idx, ...], [[3 * X**2 * Y, X**3],
[Y + 1, X + 1]])
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_excessive_args():
x = se.symbols('x')
lmb = se.Lambdify([x], [-x])
inp = np.ones(2)
out = lmb(inp)
assert np.allclose(inp, [1, 1])
assert len(out) == 2 # broad casting
assert np.allclose(out, -1)
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_excessive_out():
x = se.symbols('x')
lmb = se.Lambdify([x], [-x])
inp = np.ones(1)
out = np.ones(2)
_ = lmb(inp, out=out[:inp.size])
assert np.allclose(inp, [1, 1])
assert out[0] == -1
assert out[1] == 1
def all_indices(shape):
return itertools.product(*(range(dim) for dim in shape))
def ravelled(A):
try:
return A.ravel()
except AttributeError:
L = []
for idx in all_indices(A.memview.shape):
L.append(A[idx])
return L
def _get_2_to_2by2_list(real=True):
args = x, y = se.symbols('x y')
exprs = [[x + y*y, y*y], [x*y*y, se.sqrt(x)+y*y]]
L = se.Lambdify(args, exprs, real=real)
def check(A, inp):
X, Y = inp
assert A.shape[-2:] == (2, 2)
ref = [X + Y*Y, Y*Y, X*Y*Y, cmath.sqrt(X)+Y*Y]
ravA = ravelled(A)
size = _size(ravA)
for i in range(size//4):
for j in range(4):
assert isclose(ravA[i*4 + j], ref[j])
return L, check
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_2_to_2by2():
L, check = _get_2_to_2by2_list()
inp = [13, 17]
A = L(inp)
check(A, inp)
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_unsafe_real():
L, check = _get_2_to_2by2_list()
inp = np.array([13., 17.])
out = np.empty(4)
L.unsafe_real(inp, out)
check(out.reshape((2, 2)), inp)
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_unsafe_complex():
L, check = _get_2_to_2by2_list(real=False)
assert not L.real
inp = np.array([13+11j, 7+4j], dtype=np.complex128)
out = np.empty(4, dtype=np.complex128)
L.unsafe_complex(inp, out)
check(out.reshape((2, 2)), inp)
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_itertools_chain():
args, exprs, inp, check = _get_array()
L = se.Lambdify(args, exprs)
inp = itertools.chain([inp[0]], (inp[1],), [inp[2]])
A = L(inp)
check(A)
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_complex_1():
x = se.Symbol('x')
lmb = se.Lambdify([x], [1j + x], real=False)
assert abs(lmb([11+13j])[0] -
(11 + 14j)) < 1e-15
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_complex_2():
x = se.Symbol('x')
lmb = se.Lambdify([x], [3 + x - 1j], real=False)
assert abs(lmb([11+13j])[0] -
(14 + 12j)) < 1e-15
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_more_than_255_args():
# SymPy's lambdify can handle at most 255 arguments
# this is a proof of concept that this limitation does
# not affect SymEngine's Lambdify class
n = 257
x = se.symarray('x', n)
p, q, r = 17, 42, 13
terms = [i*s for i, s in enumerate(x, p)]
exprs = [se.add(*terms), r + x[0], -99]
callback = se.Lambdify(x, exprs)
input_arr = np.arange(q, q + n*n).reshape((n, n))
out = callback(input_arr)
ref = np.empty((n, 3))
coeffs = np.arange(p, p + n, dtype=np.int64)
for i in range(n):
ref[i, 0] = coeffs.dot(np.arange(q + n*i, q + n*(i+1), dtype=np.int64))
ref[i, 1] = q + n*i + r
ref[:, 2] = -99
assert np.allclose(out, ref)
def _Lambdify_heterogeneous_output(Lambdify):
x, y = se.symbols('x, y')
args = se.DenseMatrix(2, 1, [x, y])
v = se.DenseMatrix(2, 1, [x**3 * y, (x+1)*(y+1)])
jac = v.jacobian(args)
exprs = [jac, x+y, v, (x+1)*(y+1)]
lmb = Lambdify(args, *exprs)
inp0 = 7, 11
inp1 = 8, 13
inp2 = 5, 9
inp = np.array([inp0, inp1, inp2])
o_j, o_xpy, o_v, o_xty = lmb(inp)
for idx, (X, Y) in enumerate([inp0, inp1, inp2]):
assert np.allclose(o_j[idx, ...], [[3 * X**2 * Y, X**3],
[Y + 1, X + 1]])
assert np.allclose(o_xpy[idx, ...], [X+Y])
assert np.allclose(o_v[idx, ...], [[X**3 * Y], [(X+1)*(Y+1)]])
assert np.allclose(o_xty[idx, ...], [(X+1)*(Y+1)])
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_Lambdify_heterogeneous_output():
_Lambdify_heterogeneous_output(se.Lambdify)
def _sympy_lambdify_heterogeneous_output(cb, Mtx):
x, y = se.symbols('x, y')
args = Mtx(2, 1, [x, y])
v = Mtx(2, 1, [x**3 * y, (x+1)*(y+1)])
jac = v.jacobian(args)
exprs = [jac, x+y, v, (x+1)*(y+1)]
lmb = cb(args, exprs)
inp0 = 7, 11
inp1 = 8, 13
inp2 = 5, 9
for idx, (X, Y) in enumerate([inp0, inp1, inp2]):
o_j, o_xpy, o_v, o_xty = lmb(X, Y)
assert np.allclose(o_j, [[3 * X**2 * Y, X**3],
[Y + 1, X + 1]])
assert np.allclose(o_xpy, [X+Y])
assert np.allclose(o_v, [[X**3 * Y], [(X+1)*(Y+1)]])
assert np.allclose(o_xty, [(X+1)*(Y+1)])
@unittest.skipUnless(have_numpy, "Numpy not installed")
@unittest.skipUnless(have_sympy, "SymPy not installed")
def test_lambdify__sympy():
import sympy as sp
_sympy_lambdify_heterogeneous_output(se.lambdify, se.DenseMatrix)
_sympy_lambdify_heterogeneous_output(sp.lambdify, sp.Matrix)
def _test_Lambdify_scalar_vector_matrix(Lambdify):
if not have_numpy:
return
args = x, y = se.symbols('x y')
vec = se.DenseMatrix([x+y, x*y])
jac = vec.jacobian(se.DenseMatrix(args))
f = Lambdify(args, x**y, vec, jac)
assert f.n_exprs == 3
s, v, m = f([2, 3])
assert s == 2**3
assert np.allclose(v, [[2+3], [2*3]])
assert np.allclose(m, [
[1, 1],
[3, 2]
])
for inp in [[2, 3, 5, 7], np.array([[2, 3], [5, 7]])]:
s2, v2, m2 = f(inp)
assert np.allclose(s2, [2**3, 5**7])
assert np.allclose(v2, [
[[2+3], [2*3]],
[[5+7], [5*7]]
])
assert np.allclose(m2, [
[
[1, 1],
[3, 2]
],
[
[1, 1],
[7, 5]
]
])
def test_Lambdify_scalar_vector_matrix():
_test_Lambdify_scalar_vector_matrix(lambda *args: se.Lambdify(*args, backend='lambda'))
if se.have_llvm:
_test_Lambdify_scalar_vector_matrix(lambda *args: se.Lambdify(*args, backend='llvm'))
def test_Lambdify_scalar_vector_matrix_cse():
_test_Lambdify_scalar_vector_matrix(lambda *args: se.Lambdify(*args, backend='lambda', cse=True))
if se.have_llvm:
_test_Lambdify_scalar_vector_matrix(lambda *args: se.Lambdify(*args, backend='llvm', cse=True))
@unittest.skipUnless(have_numpy, "Numpy not installed")
def test_Lambdify_gh174():
# Tests array broadcasting if the expressions form an N-dimensional array
# of say shape (k, l, m) and it contains 'n' arguments (x1, ... xn), then
# if the user provides a Fortran ordered (column-major) input array of shape
# (n, o, p, q), then the returned array will be of shape (k, l, m, o, p, q)
args = x, y = se.symbols('x y')
nargs = len(args)
vec1 = se.DenseMatrix([x, x**2, x**3])
assert vec1.shape == (3, 1)
assert np.asarray(vec1).shape == (3, 1)
lmb1 = se.Lambdify([x], vec1)
out1 = lmb1(3)
assert out1.shape == (3, 1)
assert np.all(out1 == [[3], [9], [27]])
assert lmb1([2, 3]).shape == (2, 3, 1)
lmb1.order = 'F' # change order
out1a = lmb1([2, 3])
assert out1a.shape == (3, 1, 2)
ref1a_squeeze = [[2, 3],
[4, 9],
[8, 27]]
assert np.all(out1a.squeeze() == ref1a_squeeze)
assert out1a.flags['F_CONTIGUOUS']
assert not out1a.flags['C_CONTIGUOUS']
lmb2c = se.Lambdify(args, vec1, x+y, order='C')
lmb2f = se.Lambdify(args, vec1, x+y, order='F')
for out2a in [lmb2c([2, 3]), lmb2f([2, 3])]:
assert np.all(out2a[0] == [[2], [4], [8]])
assert out2a[0].ndim == 2
assert out2a[1] == 5
assert out2a[1].ndim == 0
inp2b = np.array([
[2.0, 3.0],
[1.0, 2.0],
[0.0, 6.0]
])
raises(ValueError, lambda: (lmb2c(inp2b.T)))
out2c = lmb2c(inp2b)
out2f = lmb2f(np.asfortranarray(inp2b.T))
assert out2c[0].shape == (3, 3, 1)
assert out2f[0].shape == (3, 1, 3)
for idx, (_x, _y) in enumerate(inp2b):
assert np.all(out2c[0][idx, ...] == [[_x], [_x**2], [_x**3]])
assert np.all(out2c[1] == [5, 3, 6])
assert np.all(out2f[1] == [5, 3, 6])
assert out2c[1].shape == (3,)
assert out2f[1].shape == (3,)
def _mtx3(_x, _y):
return [[_x**row_idx + _y**col_idx for col_idx in range(3)]
for row_idx in range(4)]
mtx3c = np.array(_mtx3(x, y), order='C')
mtx3f = np.array(_mtx3(x, y), order='F')
lmb3c = se.Lambdify([x, y], x*y, mtx3c, vec1, order='C')
lmb3f = se.Lambdify([x, y], x*y, mtx3f, vec1, order='F')
inp3c = np.array([[2., 3], [3, 4], [5, 7], [6, 2], [3, 1]])
inp3f = np.asfortranarray(inp3c.T)
raises(ValueError, lambda: (lmb3c(inp3c.T)))
out3c = lmb3c(inp3c)
assert out3c[0].shape == (5,)
assert out3c[1].shape == (5, 4, 3)
assert out3c[2].shape == (5, 3, 1) # user can apply numpy.squeeze if they want to.
for a, b in zip(out3c, lmb3c(np.ravel(inp3c))):
assert np.all(a == b)
out3f = lmb3f(inp3f)
assert out3f[0].shape == (5,)
assert out3f[1].shape == (4, 3, 5)
assert out3f[2].shape == (3, 1, 5) # user can apply numpy.squeeze if they want to.
for a, b in zip(out3f, lmb3f(np.ravel(inp3f, order='F'))):
assert np.all(a == b)
for idx, (_x, _y) in enumerate(inp3c):
assert out3c[0][idx] == _x*_y
assert out3f[0][idx] == _x*_y
assert np.all(out3c[1][idx, ...] == _mtx3(_x, _y))
assert np.all(out3f[1][..., idx] == _mtx3(_x, _y))
assert np.all(out3c[2][idx, ...] == [[_x],[_x**2],[_x**3]])
assert np.all(out3f[2][..., idx] == [[_x],[_x**2],[_x**3]])
def _get_Ndim_args_exprs_funcs(order):
args = x, y = se.symbols('x y')
# Higher dimensional inputs
def f_a(index, _x, _y):
a, b, c, d = index
return _x**a + _y**b + (_x+_y)**-d
nd_exprs_a = np.zeros((3, 5, 1, 4), dtype=object, order=order)
for index in np.ndindex(*nd_exprs_a.shape):
nd_exprs_a[index] = f_a(index, x, y)
| |
<filename>services/traction/acapy_client/api/credential_definition_api.py
"""
Aries Cloud Agent
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v0.7.2
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from acapy_client.api_client import ApiClient, Endpoint as _Endpoint
from acapy_client.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types,
)
from acapy_client.model.credential_definition_get_result import (
CredentialDefinitionGetResult,
)
from acapy_client.model.credential_definition_send_request import (
CredentialDefinitionSendRequest,
)
from acapy_client.model.credential_definitions_created_result import (
CredentialDefinitionsCreatedResult,
)
from acapy_client.model.txn_or_credential_definition_send_result import (
TxnOrCredentialDefinitionSendResult,
)
class CredentialDefinitionApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.credential_definitions_created_get_endpoint = _Endpoint(
settings={
"response_type": (CredentialDefinitionsCreatedResult,),
"auth": ["AuthorizationHeader"],
"endpoint_path": "/credential-definitions/created",
"operation_id": "credential_definitions_created_get",
"http_method": "GET",
"servers": None,
},
params_map={
"all": [
"cred_def_id",
"issuer_did",
"schema_id",
"schema_issuer_did",
"schema_name",
"schema_version",
],
"required": [],
"nullable": [],
"enum": [],
"validation": [
"cred_def_id",
"issuer_did",
"schema_id",
"schema_issuer_did",
"schema_version",
],
},
root_map={
"validations": {
("cred_def_id",): {
"regex": {
"pattern": r"^([123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]{21,22}):3:CL:(([1-9][0-9]*)|([123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]{21,22}:2:.+:[0-9.]+)):(.+)?$", # noqa: E501
},
},
("issuer_did",): {
"regex": {
"pattern": r"^(did:sov:)?[123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]{21,22}$", # noqa: E501
},
},
("schema_id",): {
"regex": {
"pattern": r"^[123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]{21,22}:2:.+:[0-9.]+$", # noqa: E501
},
},
("schema_issuer_did",): {
"regex": {
"pattern": r"^(did:sov:)?[123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]{21,22}$", # noqa: E501
},
},
("schema_version",): {
"regex": {
"pattern": r"^[0-9.]+$", # noqa: E501
},
},
},
"allowed_values": {},
"openapi_types": {
"cred_def_id": (str,),
"issuer_did": (str,),
"schema_id": (str,),
"schema_issuer_did": (str,),
"schema_name": (str,),
"schema_version": (str,),
},
"attribute_map": {
"cred_def_id": "cred_def_id",
"issuer_did": "issuer_did",
"schema_id": "schema_id",
"schema_issuer_did": "schema_issuer_did",
"schema_name": "schema_name",
"schema_version": "schema_version",
},
"location_map": {
"cred_def_id": "query",
"issuer_did": "query",
"schema_id": "query",
"schema_issuer_did": "query",
"schema_name": "query",
"schema_version": "query",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": [],
},
api_client=api_client,
)
self.credential_definitions_cred_def_id_get_endpoint = _Endpoint(
settings={
"response_type": (CredentialDefinitionGetResult,),
"auth": ["AuthorizationHeader"],
"endpoint_path": "/credential-definitions/{cred_def_id}",
"operation_id": "credential_definitions_cred_def_id_get",
"http_method": "GET",
"servers": None,
},
params_map={
"all": [
"cred_def_id",
],
"required": [
"cred_def_id",
],
"nullable": [],
"enum": [],
"validation": [
"cred_def_id",
],
},
root_map={
"validations": {
("cred_def_id",): {
"regex": {
"pattern": r"^([123456789ABCDEFGHJKLMNPQRST<KEY>]{21,22}):3:CL:(([1-9][0-9]*)|([123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]{21,22}:2:.+:[0-9.]+)):(.+)?$", # noqa: E501
},
},
},
"allowed_values": {},
"openapi_types": {
"cred_def_id": (str,),
},
"attribute_map": {
"cred_def_id": "cred_def_id",
},
"location_map": {
"cred_def_id": "path",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": [],
},
api_client=api_client,
)
self.credential_definitions_cred_def_id_write_record_post_endpoint = _Endpoint(
settings={
"response_type": (CredentialDefinitionGetResult,),
"auth": ["AuthorizationHeader"],
"endpoint_path": "/credential-definitions/{cred_def_id}/write_record",
"operation_id": "credential_definitions_cred_def_id_write_record_post",
"http_method": "POST",
"servers": None,
},
params_map={
"all": [
"cred_def_id",
],
"required": [
"cred_def_id",
],
"nullable": [],
"enum": [],
"validation": [
"cred_def_id",
],
},
root_map={
"validations": {
("cred_def_id",): {
"regex": {
"pattern": r"^([123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]{21,22}):3:CL:(([1-9][0-9]*)|([123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]{21,22}:2:.+:[0-9.]+)):(.+)?$", # noqa: E501
},
},
},
"allowed_values": {},
"openapi_types": {
"cred_def_id": (str,),
},
"attribute_map": {
"cred_def_id": "cred_def_id",
},
"location_map": {
"cred_def_id": "path",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": [],
},
api_client=api_client,
)
self.credential_definitions_post_endpoint = _Endpoint(
settings={
"response_type": (TxnOrCredentialDefinitionSendResult,),
"auth": ["AuthorizationHeader"],
"endpoint_path": "/credential-definitions",
"operation_id": "credential_definitions_post",
"http_method": "POST",
"servers": None,
},
params_map={
"all": [
"conn_id",
"create_transaction_for_endorser",
"body",
],
"required": [],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"conn_id": (str,),
"create_transaction_for_endorser": (bool,),
"body": (CredentialDefinitionSendRequest,),
},
"attribute_map": {
"conn_id": "conn_id",
"create_transaction_for_endorser": "create_transaction_for_endorser",
},
"location_map": {
"conn_id": "query",
"create_transaction_for_endorser": "query",
"body": "body",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": [],
},
api_client=api_client,
)
def credential_definitions_created_get(self, **kwargs):
"""Search for matching credential definitions that agent originated # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.credential_definitions_created_get(async_req=True)
>>> result = thread.get()
Keyword Args:
cred_def_id (str): Credential definition id. [optional]
issuer_did (str): Issuer DID. [optional]
schema_id (str): Schema identifier. [optional]
schema_issuer_did (str): Schema issuer DID. [optional]
schema_name (str): Schema name. [optional]
schema_version (str): Schema version. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
CredentialDefinitionsCreatedResult
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False)
kwargs["_content_type"] = kwargs.get("_content_type")
kwargs["_host_index"] = kwargs.get("_host_index")
return self.credential_definitions_created_get_endpoint.call_with_http_info(
**kwargs
)
def credential_definitions_cred_def_id_get(self, cred_def_id, **kwargs):
"""Gets a credential definition from the ledger # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.credential_definitions_cred_def_id_get(cred_def_id, async_req=True)
>>> result = thread.get()
Args:
cred_def_id (str): Credential definition identifier
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
CredentialDefinitionGetResult
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False)
kwargs["_content_type"] = kwargs.get("_content_type")
kwargs["_host_index"] = kwargs.get("_host_index")
kwargs["cred_def_id"] = cred_def_id
return self.credential_definitions_cred_def_id_get_endpoint.call_with_http_info(
**kwargs
)
def credential_definitions_cred_def_id_write_record_post(
self, cred_def_id, **kwargs
):
"""Writes a credential definition non-secret record to the wallet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.credential_definitions_cred_def_id_write_record_post(cred_def_id, async_req=True)
>>> result = thread.get()
Args:
cred_def_id (str): Credential definition identifier
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies | |
discovery service is used by
clients to query information about peers. Such as - which peers have joined a
channel, what is the latest channel config, and what possible sets of peers
satisfy the endorsement policy (given a smart contract and a channel).
:attr ConfigPeerLimits limits: (optional)
:attr ConfigPeerGateway gateway: (optional)
"""
def __init__(self,
*,
id: str = None,
network_id: str = None,
keepalive: 'ConfigPeerKeepalive' = None,
gossip: 'ConfigPeerGossip' = None,
authentication: 'ConfigPeerAuthentication' = None,
client: 'ConfigPeerClient' = None,
deliveryclient: 'ConfigPeerDeliveryclient' = None,
admin_service: 'ConfigPeerAdminService' = None,
validator_pool_size: float = None,
discovery: 'ConfigPeerDiscovery' = None,
limits: 'ConfigPeerLimits' = None,
gateway: 'ConfigPeerGateway' = None) -> None:
"""
Initialize a ConfigPeerUpdatePeer object.
:param str id: (optional) A unique id used to identify this instance.
:param str network_id: (optional) The ID to logically separate one network
from another.
:param ConfigPeerKeepalive keepalive: (optional) Keep alive settings
between the peer server and clients.
:param ConfigPeerGossip gossip: (optional)
:param ConfigPeerAuthentication authentication: (optional)
:param ConfigPeerClient client: (optional)
:param ConfigPeerDeliveryclient deliveryclient: (optional)
:param ConfigPeerAdminService admin_service: (optional) Used for
administrative operations such as control over logger levels. Only peer
administrators can use the service.
:param float validator_pool_size: (optional) Number of go-routines that
will execute transaction validation in parallel. By default, the peer
chooses the number of CPUs on the machine. It is recommended to use the
default values and not set this field.
:param ConfigPeerDiscovery discovery: (optional) The discovery service is
used by clients to query information about peers. Such as - which peers
have joined a channel, what is the latest channel config, and what possible
sets of peers satisfy the endorsement policy (given a smart contract and a
channel).
:param ConfigPeerLimits limits: (optional)
:param ConfigPeerGateway gateway: (optional)
"""
self.id = id
self.network_id = network_id
self.keepalive = keepalive
self.gossip = gossip
self.authentication = authentication
self.client = client
self.deliveryclient = deliveryclient
self.admin_service = admin_service
self.validator_pool_size = validator_pool_size
self.discovery = discovery
self.limits = limits
self.gateway = gateway
@classmethod
def from_dict(cls, _dict: Dict) -> 'ConfigPeerUpdatePeer':
"""Initialize a ConfigPeerUpdatePeer object from a json dictionary."""
args = {}
if 'id' in _dict:
args['id'] = _dict.get('id')
if 'networkId' in _dict:
args['network_id'] = _dict.get('networkId')
if 'keepalive' in _dict:
args['keepalive'] = ConfigPeerKeepalive.from_dict(_dict.get('keepalive'))
if 'gossip' in _dict:
args['gossip'] = ConfigPeerGossip.from_dict(_dict.get('gossip'))
if 'authentication' in _dict:
args['authentication'] = ConfigPeerAuthentication.from_dict(_dict.get('authentication'))
if 'client' in _dict:
args['client'] = ConfigPeerClient.from_dict(_dict.get('client'))
if 'deliveryclient' in _dict:
args['deliveryclient'] = ConfigPeerDeliveryclient.from_dict(_dict.get('deliveryclient'))
if 'adminService' in _dict:
args['admin_service'] = ConfigPeerAdminService.from_dict(_dict.get('adminService'))
if 'validatorPoolSize' in _dict:
args['validator_pool_size'] = _dict.get('validatorPoolSize')
if 'discovery' in _dict:
args['discovery'] = ConfigPeerDiscovery.from_dict(_dict.get('discovery'))
if 'limits' in _dict:
args['limits'] = ConfigPeerLimits.from_dict(_dict.get('limits'))
if 'gateway' in _dict:
args['gateway'] = ConfigPeerGateway.from_dict(_dict.get('gateway'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ConfigPeerUpdatePeer object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'id') and self.id is not None:
_dict['id'] = self.id
if hasattr(self, 'network_id') and self.network_id is not None:
_dict['networkId'] = self.network_id
if hasattr(self, 'keepalive') and self.keepalive is not None:
_dict['keepalive'] = self.keepalive.to_dict()
if hasattr(self, 'gossip') and self.gossip is not None:
_dict['gossip'] = self.gossip.to_dict()
if hasattr(self, 'authentication') and self.authentication is not None:
_dict['authentication'] = self.authentication.to_dict()
if hasattr(self, 'client') and self.client is not None:
_dict['client'] = self.client.to_dict()
if hasattr(self, 'deliveryclient') and self.deliveryclient is not None:
_dict['deliveryclient'] = self.deliveryclient.to_dict()
if hasattr(self, 'admin_service') and self.admin_service is not None:
_dict['adminService'] = self.admin_service.to_dict()
if hasattr(self, 'validator_pool_size') and self.validator_pool_size is not None:
_dict['validatorPoolSize'] = self.validator_pool_size
if hasattr(self, 'discovery') and self.discovery is not None:
_dict['discovery'] = self.discovery.to_dict()
if hasattr(self, 'limits') and self.limits is not None:
_dict['limits'] = self.limits.to_dict()
if hasattr(self, 'gateway') and self.gateway is not None:
_dict['gateway'] = self.gateway.to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ConfigPeerUpdatePeer object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ConfigPeerUpdatePeer') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ConfigPeerUpdatePeer') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConfigPeerAdminService():
"""
Used for administrative operations such as control over logger levels. Only peer
administrators can use the service.
:attr str listen_address: The interface and port on which the admin server will
listen on. Defaults to the same address as the peer's listen address and port
7051.
"""
def __init__(self,
listen_address: str) -> None:
"""
Initialize a ConfigPeerAdminService object.
:param str listen_address: The interface and port on which the admin server
will listen on. Defaults to the same address as the peer's listen address
and port 7051.
"""
self.listen_address = listen_address
@classmethod
def from_dict(cls, _dict: Dict) -> 'ConfigPeerAdminService':
"""Initialize a ConfigPeerAdminService object from a json dictionary."""
args = {}
if 'listenAddress' in _dict:
args['listen_address'] = _dict.get('listenAddress')
else:
raise ValueError('Required property \'listenAddress\' not present in ConfigPeerAdminService JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ConfigPeerAdminService object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'listen_address') and self.listen_address is not None:
_dict['listenAddress'] = self.listen_address
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ConfigPeerAdminService object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ConfigPeerAdminService') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ConfigPeerAdminService') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConfigPeerAuthentication():
"""
ConfigPeerAuthentication.
:attr str timewindow: The maximum acceptable difference between the current
server time and the client's time.
"""
def __init__(self,
timewindow: str) -> None:
"""
Initialize a ConfigPeerAuthentication object.
:param str timewindow: The maximum acceptable difference between the
current server time and the client's time.
"""
self.timewindow = timewindow
@classmethod
def from_dict(cls, _dict: Dict) -> 'ConfigPeerAuthentication':
"""Initialize a ConfigPeerAuthentication object from a json dictionary."""
args = {}
if 'timewindow' in _dict:
args['timewindow'] = _dict.get('timewindow')
else:
raise ValueError('Required property \'timewindow\' not present in ConfigPeerAuthentication JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ConfigPeerAuthentication object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'timewindow') and self.timewindow is not None:
_dict['timewindow'] = self.timewindow
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ConfigPeerAuthentication object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ConfigPeerAuthentication') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ConfigPeerAuthentication') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConfigPeerChaincode():
"""
ConfigPeerChaincode.
:attr ConfigPeerChaincodeGolang golang: (optional)
:attr List[ConfigPeerChaincodeExternalBuildersItem] external_builders:
(optional) List of directories to treat as external builders/launches of
chaincode.
:attr str install_timeout: (optional) Maximum duration to wait for the chaincode
build and install process to complete.
:attr str startuptimeout: (optional) Time for starting up a container and
waiting for Register to come through.
:attr str executetimeout: (optional) Time for Invoke and Init calls to return.
This timeout is used by all chaincodes in all the channels, including system
chaincodes. Note that if the image is not available the peer needs to build the
image, which will take additional time.
:attr ConfigPeerChaincodeSystem system: (optional) The complete whitelist for
system chaincodes. To append a new chaincode add the new id to the default list.
:attr ConfigPeerChaincodeLogging logging: (optional)
"""
def __init__(self,
*,
golang: 'ConfigPeerChaincodeGolang' = None,
external_builders: List['ConfigPeerChaincodeExternalBuildersItem'] = None,
install_timeout: str = None,
startuptimeout: str = None,
executetimeout: str = None,
system: 'ConfigPeerChaincodeSystem' = None,
logging: 'ConfigPeerChaincodeLogging' = None) -> None:
"""
Initialize a ConfigPeerChaincode object.
:param ConfigPeerChaincodeGolang golang: (optional)
:param List[ConfigPeerChaincodeExternalBuildersItem] external_builders:
(optional) List of | |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
''' Runs various chrome tests through valgrind_test.py.'''
import glob
import logging
import optparse
import os
import subprocess
import sys
import logging_utils
import path_utils
import common
import valgrind_test
class TestNotFound(Exception): pass
class MultipleGTestFiltersSpecified(Exception): pass
class BuildDirNotFound(Exception): pass
class BuildDirAmbiguous(Exception): pass
class ExecutableNotFound(Exception): pass
class BadBinary(Exception): pass
class ChromeTests:
SLOW_TOOLS = ["drmemory"]
def __init__(self, options, args, test):
if ':' in test:
(self._test, self._gtest_filter) = test.split(':', 1)
else:
self._test = test
self._gtest_filter = options.gtest_filter
if self._test not in self._test_list:
raise TestNotFound("Unknown test: %s" % test)
if options.gtest_filter and options.gtest_filter != self._gtest_filter:
raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter "
"and --test %s" % test)
self._options = options
self._args = args
# Compute the top of the tree (the "source dir") from the script dir
# (where this script lives). We assume that the script dir is in
# tools/drmemory/scripts relative to the top of the tree.
script_dir = os.path.dirname(path_utils.ScriptDir())
self._source_dir = os.path.dirname(os.path.dirname(script_dir))
# Setup Dr. Memory if it's not set up yet.
drmem_cmd = os.getenv("DRMEMORY_COMMAND")
if not drmem_cmd:
drmem_sfx = os.path.join(script_dir, "drmemory-windows-sfx.exe")
if not os.path.isfile(drmem_sfx):
raise RuntimeError, "Cannot find drmemory-windows-sfx.exe"
drmem_dir = os.path.join(script_dir, "unpacked")
subprocess.call([drmem_sfx, "-o" + drmem_dir, "-y"], 0)
drmem_cmd = os.path.join(drmem_dir, "bin", "drmemory.exe")
os.environ["DRMEMORY_COMMAND"] = drmem_cmd
# since this path is used for string matching, make sure it's always
# an absolute Unix-style path
self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/')
self._command_preamble = ["--source-dir=%s" % (self._source_dir)]
if not self._options.build_dir:
dirs = [
os.path.join(self._source_dir, "xcodebuild", "Debug"),
os.path.join(self._source_dir, "out", "Debug"),
os.path.join(self._source_dir, "build", "Debug"),
]
build_dir = [d for d in dirs if os.path.isdir(d)]
if len(build_dir) > 1:
raise BuildDirAmbiguous("Found more than one suitable build dir:\n"
"%s\nPlease specify just one "
"using --build-dir" % ", ".join(build_dir))
elif build_dir:
self._options.build_dir = build_dir[0]
else:
self._options.build_dir = None
if self._options.build_dir:
build_dir = os.path.abspath(self._options.build_dir)
self._command_preamble += ["--build-dir=%s" % (self._options.build_dir)]
def _EnsureBuildDirFound(self):
if not self._options.build_dir:
raise BuildDirNotFound("Oops, couldn't find a build dir, please "
"specify it manually using --build-dir")
def _DefaultCommand(self, tool, exe=None, valgrind_test_args=None):
'''Generates the default command array that most tests will use.'''
if exe and common.IsWindows():
exe += '.exe'
cmd = list(self._command_preamble)
# Find all suppressions matching the following pattern:
# tools/valgrind/TOOL/suppressions[_PLATFORM].txt
# and list them with --suppressions= prefix.
script_dir = path_utils.ScriptDir()
suppression_file = os.path.join(script_dir, "..", "suppressions.txt")
if os.path.exists(suppression_file):
cmd.append("--suppressions=%s" % suppression_file)
# Platform-specific suppression
for platform in common.PlatformNames():
platform_suppression_file = \
os.path.join(script_dir, "..", 'suppressions_%s.txt' % platform)
if os.path.exists(platform_suppression_file):
cmd.append("--suppressions=%s" % platform_suppression_file)
if self._options.valgrind_tool_flags:
cmd += self._options.valgrind_tool_flags.split(" ")
if self._options.keep_logs:
cmd += ["--keep_logs"]
if valgrind_test_args != None:
for arg in valgrind_test_args:
cmd.append(arg)
if exe:
self._EnsureBuildDirFound()
exe_path = os.path.join(self._options.build_dir, exe)
if not os.path.exists(exe_path):
raise ExecutableNotFound("Couldn't find '%s'" % exe_path)
cmd.append(exe_path)
# Valgrind runs tests slowly, so slow tests hurt more; show elapased time
# so we can find the slowpokes.
cmd.append("--gtest_print_time")
# Built-in test launcher for gtest-based executables runs tests using
# multiple process by default. Force the single-process mode back.
cmd.append("--single-process-tests")
if self._options.gtest_repeat:
cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat)
if self._options.gtest_shuffle:
cmd.append("--gtest_shuffle")
if self._options.gtest_break_on_failure:
cmd.append("--gtest_break_on_failure")
if self._options.test_launcher_bot_mode:
cmd.append("--test-launcher-bot-mode")
if self._options.test_launcher_total_shards is not None:
cmd.append("--test-launcher-total-shards=%d" % self._options.test_launcher_total_shards)
if self._options.test_launcher_shard_index is not None:
cmd.append("--test-launcher-shard-index=%d" % self._options.test_launcher_shard_index)
return cmd
def Run(self):
''' Runs the test specified by command-line argument --test '''
logging.info("running test %s" % (self._test))
return self._test_list[self._test](self)
def _AppendGtestFilter(self, tool, name, cmd):
'''Append an appropriate --gtest_filter flag to the googletest binary
invocation.
If the user passed his own filter mentioning only one test, just use it.
Othewise, filter out tests listed in the appropriate gtest_exclude files.
'''
if (self._gtest_filter and
":" not in self._gtest_filter and
"?" not in self._gtest_filter and
"*" not in self._gtest_filter):
cmd.append("--gtest_filter=%s" % self._gtest_filter)
return
filters = []
gtest_files_dir = os.path.join(path_utils.ScriptDir(), "gtest_exclude")
gtest_filter_files = [
os.path.join(gtest_files_dir, name + ".gtest-%s.txt" % tool.ToolName())]
# Use ".gtest.txt" files only for slow tools, as they now contain
# Valgrind- and Dr.Memory-specific filters.
# TODO(glider): rename the files to ".gtest_slow.txt"
if tool.ToolName() in ChromeTests.SLOW_TOOLS:
gtest_filter_files += [os.path.join(gtest_files_dir, name + ".gtest.txt")]
for platform_suffix in common.PlatformNames():
gtest_filter_files += [
os.path.join(gtest_files_dir, name + ".gtest_%s.txt" % platform_suffix),
os.path.join(gtest_files_dir, name + ".gtest-%s_%s.txt" % \
(tool.ToolName(), platform_suffix))]
logging.info("Reading gtest exclude filter files:")
for filename in gtest_filter_files:
# strip the leading absolute path (may be very long on the bot)
# and the following / or \.
readable_filename = filename.replace("\\", "/") # '\' on Windows
readable_filename = readable_filename.replace(self._source_dir, "")[1:]
if not os.path.exists(filename):
logging.info(" \"%s\" - not found" % readable_filename)
continue
logging.info(" \"%s\" - OK" % readable_filename)
f = open(filename, 'r')
for line in f.readlines():
if line.startswith("#") or line.startswith("//") or line.isspace():
continue
line = line.rstrip()
test_prefixes = ["FLAKY", "FAILS"]
for p in test_prefixes:
# Strip prefixes from the test names.
line = line.replace(".%s_" % p, ".")
# Exclude the original test name.
filters.append(line)
if line[-2:] != ".*":
# List all possible prefixes if line doesn't end with ".*".
for p in test_prefixes:
filters.append(line.replace(".", ".%s_" % p))
# Get rid of duplicates.
filters = set(filters)
gtest_filter = self._gtest_filter
if len(filters):
if gtest_filter:
gtest_filter += ":"
if gtest_filter.find("-") < 0:
gtest_filter += "-"
else:
gtest_filter = "-"
gtest_filter += ":".join(filters)
if gtest_filter:
cmd.append("--gtest_filter=%s" % gtest_filter)
@staticmethod
def ShowTests():
test_to_names = {}
for name, test_function in ChromeTests._test_list.iteritems():
test_to_names.setdefault(test_function, []).append(name)
name_to_aliases = {}
for names in test_to_names.itervalues():
names.sort(key=lambda name: len(name))
name_to_aliases[names[0]] = names[1:]
print
print "Available tests:"
print "----------------"
for name, aliases in sorted(name_to_aliases.iteritems()):
if aliases:
print " {} (aka {})".format(name, ', '.join(aliases))
else:
print " {}".format(name)
def SetupLdPath(self, requires_build_dir):
if requires_build_dir:
self._EnsureBuildDirFound()
elif not self._options.build_dir:
return
# Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded.
if (os.getenv("LD_LIBRARY_PATH")):
os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"),
self._options.build_dir))
else:
os.putenv("LD_LIBRARY_PATH", self._options.build_dir)
def SimpleTest(self, module, name, valgrind_test_args=None, cmd_args=None):
tool = valgrind_test.CreateTool(self._options.valgrind_tool)
cmd = self._DefaultCommand(tool, name, valgrind_test_args)
self._AppendGtestFilter(tool, name, cmd)
cmd.extend(['--test-tiny-timeout=1000'])
if cmd_args:
cmd.extend(cmd_args)
self.SetupLdPath(True)
return tool.Run(cmd, module)
def RunCmdLine(self):
tool = valgrind_test.CreateTool(self._options.valgrind_tool)
cmd = self._DefaultCommand(tool, None, self._args)
self.SetupLdPath(False)
return tool.Run(cmd, None)
def TestPDFiumUnitTests(self):
return self.SimpleTest("pdfium_unittests", "pdfium_unittests")
def TestPDFiumEmbedderTests(self):
return self.SimpleTest("pdfium_embeddertests", "pdfium_embeddertests")
def TestPDFiumTest(self, script_name):
# Build the command line in 'cmd'.
# It's going to be roughly
# python valgrind_test.py ...
# but we'll use the --indirect_pdfium_test flag to valgrind_test.py
# to avoid valgrinding python.
# Start by building the valgrind_test.py commandline.
tool = valgrind_test.CreateTool(self._options.valgrind_tool)
cmd = self._DefaultCommand(tool)
cmd.append("--trace_children")
cmd.append("--indirect_pdfium_test")
cmd.append("--ignore_exit_code")
# Now build script_cmd, the run_corpus_tests commandline.
script = os.path.join(self._source_dir, "testing", "tools", script_name)
script_cmd = ["python", script]
if self._options.build_dir:
script_cmd.extend(["--build-dir", self._options.build_dir])
# TODO(zhaoqin): it only runs in single process mode now,
# need figure out why it does not work with test_one_file_parallel
# in run_corpus_tests.py.
if script_name == "run_corpus_tests.py":
script_cmd.extend(["-j", "1"])
# Now run script_cmd with the wrapper in cmd
cmd.append("--")
cmd.extend(script_cmd)
ret = tool.Run(cmd, "layout", min_runtime_in_seconds=0)
return ret
def TestPDFiumJavascript(self):
return self.TestPDFiumTest("run_javascript_tests.py")
def TestPDFiumPixel(self):
return self.TestPDFiumTest("run_pixel_tests.py")
def TestPDFiumCorpus(self):
return self.TestPDFiumTest("run_corpus_tests.py")
# The known list of tests.
_test_list = {
"cmdline" : RunCmdLine,
"pdfium_corpus": TestPDFiumCorpus,
"pdfium_embeddertests": TestPDFiumEmbedderTests,
"pdfium_javascript": TestPDFiumJavascript,
"pdfium_pixel": TestPDFiumPixel,
"pdfium_unittests": TestPDFiumUnitTests,
}
def _main():
parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
"[-t <test> ...]")
parser.add_option("--help-tests", dest="help_tests", action="store_true",
default=False, help="List all available tests")
parser.add_option("-b", "--build-dir",
help="the location of the compiler output")
parser.add_option("--target", help="Debug or Release")
parser.add_option("-t", "--test", action="append", default=[],
help="which test to run, supports test:gtest_filter format "
"as well.")
parser.add_option("--gtest_filter",
help="additional arguments to --gtest_filter")
parser.add_option("--gtest_repeat", help="argument for --gtest_repeat")
parser.add_option("--gtest_shuffle", action="store_true", default=False,
help="Randomize tests' orders on every iteration.")
parser.add_option("--gtest_break_on_failure", action="store_true",
default=False,
help="Drop in to debugger on assertion failure. Also "
"useful for forcing tests to exit with a stack dump "
"on the first assertion failure when running with "
"--gtest_repeat=-1")
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="verbose output - enable debug log messages")
parser.add_option("--tool", dest="valgrind_tool", default="drmemory_full",
help="specify a valgrind tool to run the tests under")
parser.add_option("--tool_flags", dest="valgrind_tool_flags", default="",
help="specify custom flags for the selected valgrind tool")
parser.add_option("--keep_logs", action="store_true", default=False,
help="store memory tool logs in the <tool>.logs directory "
"instead of /tmp.\nThis can be useful for tool "
"developers/maintainers.\nPlease note that the <tool>"
".logs directory will be clobbered on tool startup.")
parser.add_option("--test-launcher-bot-mode", action="store_true",
help="run the tests with --test-launcher-bot-mode")
parser.add_option("--test-launcher-total-shards", type=int,
help="run the | |
# -*- coding: utf-8 -*-
################################################################################
# Copyright (c), AiiDA team and individual contributors. #
# All rights reserved. #
# This file is part of the AiiDA-wannier90 code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-wannier90 #
# For further information on the license, see the LICENSE.txt file #
################################################################################
import os
import fnmatch
from collections import namedtuple
from aiida.common import datastructures
from aiida.common import exceptions as exc
from aiida.engine import CalcJob
from aiida.orm import (
BandsData, Dict, FolderData, KpointsData, List, OrbitalData, RemoteData,
SinglefileData, StructureData
)
from .io import write_win
__all__ = ('Wannier90Calculation', )
_InputFileLists = namedtuple(
'_InputFileLists',
('local_copy_list', 'remote_copy_list', 'remote_symlink_list')
)
# When support for Python <3.7 is dropped, we can set 'default=(False, False)'
# here, and remove the extra kwargs in the constructor calls -- or use
# a dataclass to implement the same.
_InputFileSpec = namedtuple(
'_InputFileSpec', ('suffix', 'required', 'always_copy')
)
class Wannier90Calculation(CalcJob):
"""
Plugin for Wannier90, a code for computing maximally-localized Wannier
functions. See http://www.wannier.org/ for more details.
"""
# The input filename MUST end with .win. This is validated by the prepare_for_submission
_REQUIRED_INPUT_SUFFIX = ".win"
_DEFAULT_INPUT_FILE = 'aiida.win'
_DEFAULT_OUTPUT_FILE = 'aiida.wout'
# The following ones CANNOT be set by the user - in this case an exception will be raised
# IMPORTANT: define them here in lower-case
_BLOCKED_PARAMETER_KEYS = (
'length_unit',
'unit_cell_cart',
'atoms_cart',
'projections',
'postproc_setup' # Pass instead a 'postproc_setup' in the input `settings` node
)
# By default, retrieve all produced files except .nnkp (which
# is handled separately) and .chk (checkpoint files are large,
# and usually not needed).
_DEFAULT_RETRIEVE_SUFFIXES = (
'.wout', '.werr', '.r2mn', '_band.dat', '_band.dat', '_band.agr',
'_band.kpt', '.bxsf', '_w.xsf', '_w.cube', '_centres.xyz', '_hr.dat',
'_tb.dat', '_r.dat', '.bvec', '_wsvec.dat', '_qc.dat', '_dos.dat',
'_htB.dat', '_u.mat', '_u_dis.mat', '.vdw', '_band_proj.dat',
'_band.labelinfo.dat'
)
@classmethod
def define(cls, spec): # pylint: disable=no-self-argument
super(Wannier90Calculation, cls).define(spec)
spec.input(
"structure",
valid_type=StructureData,
help="input crystal structure"
)
spec.input(
"parameters",
valid_type=Dict,
help="Input parameters for the Wannier90 code"
)
spec.input(
"settings",
valid_type=Dict,
required=False,
help="""Additional settings to manage the Wannier90 calculation."""
)
spec.input(
"projections",
valid_type=(OrbitalData, Dict, List),
help="Starting projections for the Wannierisation procedure.",
required=False
)
spec.input(
"local_input_folder",
valid_type=FolderData,
required=False,
help=
"Get input files (``.amn``, ``.mmn``, ...) from a class ``FolderData`` stored in the AiiDA repository."
)
spec.input(
"remote_input_folder",
valid_type=RemoteData,
required=False,
help=
"Get input files (``.amn``, ``.mmn``, ...) from a class ``RemoteData`` possibly stored in a remote computer."
)
spec.input(
"kpoints",
valid_type=KpointsData,
help="k-point mesh used in the NSCF calculation."
)
spec.input(
"kpoint_path",
valid_type=Dict,
required=False,
help=
"Description of the k-points path to be used for bands interpolation; "
"it should contain two properties: "
"a list ``path`` of length-2 tuples with the labels of the endpoints of the path; and "
"a dictionary ``point_coords`` giving the scaled coordinates for each high-symmetry endpoint."
)
spec.output(
'output_parameters',
valid_type=Dict,
help=
'The ``output_parameters`` output node of the successful calculation.'
)
spec.output(
'interpolated_bands',
valid_type=BandsData,
required=False,
help='The interpolated band structure by Wannier90 (if any).'
)
spec.output(
'nnkp_file',
valid_type=SinglefileData,
required=False,
help='The ``.nnkp`` file, produced only in -pp (postproc) mode.'
)
spec.default_output_node = 'output_parameters'
spec.input(
'metadata.options.input_filename',
valid_type=str,
default=cls._DEFAULT_INPUT_FILE
)
spec.input(
'metadata.options.output_filename',
valid_type=str,
default=cls._DEFAULT_OUTPUT_FILE
)
spec.input(
'metadata.options.parser_name',
valid_type=str,
default='wannier90.wannier90'
)
# withmpi defaults to "False" in aiida-core 1.0. Below, we override to default to withmpi=True
spec.input('metadata.options.withmpi', valid_type=bool, default=True)
spec.exit_code(
200,
'ERROR_NO_RETRIEVED_FOLDER',
message='The retrieved folder data node could not be accessed.',
invalidates_cache=True
)
spec.exit_code(
210,
'ERROR_OUTPUT_STDOUT_MISSING',
message=
'The retrieved folder did not contain the required stdout output file.',
invalidates_cache=True
)
spec.exit_code(
300,
'ERROR_WERR_FILE_PRESENT',
message='A Wannier90 error file (.werr) has been found.'
)
spec.exit_code(
400,
'ERROR_EXITING_MESSAGE_IN_STDOUT',
message=
'The string "Exiting..." has been found in the Wannier90 output (some partial output might have been '
'parsed).'
)
@property
def _SEEDNAME(self):
"""
Return the default seedname, unless a custom one has been set in the
calculation settings.
:raise ValueError: if the input_filename does not end with ``.win``.
"""
input_filename = self.inputs.metadata.options.input_filename
if input_filename.endswith(self._REQUIRED_INPUT_SUFFIX):
return input_filename[:-len(self._REQUIRED_INPUT_SUFFIX)]
# If we are here, it's an invalid input filename.
raise ValueError(
"The input filename '{}' does not end with '{}', so I don't know how to get the seedname. "
"You need to change the `metadata.options.input_filename` in the process inputs."
.format(input_filename, self._REQUIRED_INPUT_SUFFIX)
)
def prepare_for_submission(self, folder): #pylint: disable=too-many-locals, too-many-statements # noqa: disable=MC0001
"""
Routine which creates the input file of Wannier90
:param folder: a aiida.common.folders.Folder subclass where
the plugin should put all its files.
"""
self._validate_input_output_names()
param_dict = self.inputs.parameters.get_dict()
self._validate_lowercase(param_dict)
self._validate_input_parameters(param_dict)
if 'settings' in self.inputs:
settings_dict = self.inputs.settings.get_dict()
else:
settings_dict = {}
self._validate_lowercase(settings_dict)
pp_setup = settings_dict.pop('postproc_setup', False)
if pp_setup:
param_dict.update({'postproc_setup': True})
has_local_input = 'local_input_folder' in self.inputs
has_remote_input = 'remote_input_folder' in self.inputs
if pp_setup:
if has_local_input or has_local_input:
raise exc.InputValidationError(
"Can not set 'local_input_folder' or 'remote_input_folder' "
"with the 'postproc_setup' option."
)
else:
if has_local_input and has_remote_input:
raise exc.InputValidationError(
"Both the 'local_input_folder' and 'remote_input_folder' "
"inputs are set, but they are exclusive. Exactly one of "
"the two must be given."
)
if not (has_local_input or has_remote_input):
raise exc.InputValidationError(
"None of the 'local_input_folder' and 'remote_input_folder' "
"inputs is set. Exactly one of the two must be given."
)
############################################################
# End basic check on inputs
############################################################
random_projections = settings_dict.pop('random_projections', False)
write_win(
filename=folder.get_abs_path('{}.win'.format(self._SEEDNAME)),
parameters=param_dict,
structure=self.inputs.structure,
kpoints=self.inputs.kpoints,
kpoint_path=getattr(self.inputs, 'kpoint_path', None),
projections=getattr(self.inputs, 'projections', None),
random_projections=random_projections,
)
input_file_lists = self._get_input_file_lists(pp_setup=pp_setup)
#######################################################################
calcinfo = datastructures.CalcInfo()
calcinfo.uuid = self.uuid
calcinfo.local_copy_list = input_file_lists.local_copy_list + settings_dict.pop(
"additional_local_copy_list", []
)
calcinfo.remote_copy_list = input_file_lists.remote_copy_list + settings_dict.pop(
"additional_remote_copy_list", []
)
calcinfo.remote_symlink_list = input_file_lists.remote_symlink_list + settings_dict.pop(
"additional_remote_symlink_list", []
)
codeinfo = datastructures.CodeInfo()
codeinfo.code_uuid = self.inputs.code.uuid
codeinfo.cmdline_params = [self._SEEDNAME]
calcinfo.codes_info = [codeinfo]
calcinfo.codes_run_mode = datastructures.CodeRunMode.SERIAL
retrieve_list = [
self._SEEDNAME + suffix
for suffix in self._DEFAULT_RETRIEVE_SUFFIXES
]
exclude_retrieve_list = settings_dict.pop("exclude_retrieve_list", [])
retrieve_list = [
filename for filename in retrieve_list if not any(
fnmatch.fnmatch(filename, pattern)
for pattern in exclude_retrieve_list
)
]
calcinfo.retrieve_list = retrieve_list
calcinfo.retrieve_temporary_list = []
if pp_setup:
# The parser will then put this in a SinglefileData (if present)
calcinfo.retrieve_temporary_list.append(
'{}.nnkp'.format(self._SEEDNAME)
)
# Retrieves bands automatically, if they are calculated
calcinfo.retrieve_list += settings_dict.pop(
"additional_retrieve_list", []
)
# pop input keys not used here
settings_dict.pop('seedname', None)
if settings_dict:
raise exc.InputValidationError(
"The following keys in settings are unrecognized: {}".format(
list(settings_dict.keys())
)
)
return calcinfo
def _validate_input_output_names(self):
"""
This function validates the input and output file names given in the
settings Dict.
"""
# Let's check that the user-specified input filename ends with .win
if not self.inputs.metadata.options.input_filename.endswith(
self._REQUIRED_INPUT_SUFFIX
):
raise exc.InputValidationError(
"The input filename for Wannier90 (specified in the metadata.options.input_filename) "
"must end with .win, you specified instead '{}'".format(
self.inputs.metadata.options.input_filename
)
)
# The output filename is defined by Wannier90 based on the seedname.
# In AiiDA, the output_filename needs to be specified as a metadata.option to allow for
# `verdi calcjob outputcat` to work correctly. Here we check that, if the users manually changed
# the input_filename, they also changed the output_filename accordingly
expected_output_filename = self._SEEDNAME + ".wout"
if self.inputs.metadata.options.output_filename != expected_output_filename:
raise exc.InputValidationError(
"The output filename specified is wrong. You probably changed the metadata.options.input_filename "
"but you forgot to adapt the metadata.options.output_filename accordingly! Currently, you have: "
"input_filename: '{}', output_filename: '{}', while I would expect '{}'"
.format(
self.inputs.metadata.options.input_filename,
self.inputs.metadata.options.output_filename,
expected_output_filename
)
)
@staticmethod
def _validate_lowercase(dictionary):
"""
This function gets a dictionary and checks that all keys are lower-case.
:param dict_node: a dictionary
:raises InputValidationError: if any of the keys is not lower-case
:return: ``None`` if validation passes
"""
non_lowercase = []
for key in dictionary:
if key != key.lower():
non_lowercase.append(key)
if non_lowercase:
raise exc.InputValidationError(
"input keys to the Wannier90 plugin must be all lower-case, but the following aren't : {}"
.format(", ".join(non_lowercase))
)
def _validate_input_parameters(self, parameters):
"""
This function gets a dictionary with the content of the parameters Dict passed by the user
and performs some validation.
In particular, it checks that there are no blocked parameters keys passed.
:param dict_node: a dictionary
:raises InputValidationError: if any of the validation fails
:return: ``None`` if validation passes
"""
existing_blocked_keys = []
for key in self._BLOCKED_PARAMETER_KEYS:
if key in parameters:
existing_blocked_keys.append(key)
if existing_blocked_keys:
raise exc.InputValidationError(
'The following blocked keys were found in the parameters: {}'.
format(", ".join(existing_blocked_keys))
)
def _get_input_file_lists(self, pp_setup):
"""
Generate the lists of files | |
0, 0, 0, 0],
[1401, 4.261563, 0, 9999, -9999, 1.0, 100, 1, 89.339497, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1402, 1.799696, 0, 9999, -9999, 1.0, 100, 1, 26.328902, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1403, 24.445525, 0, 9999, -9999, 1.0, 100, 1, 119.651672, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1404, 20.813313, 0, 9999, -9999, 1.0, 100, 1, 134.800518, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1405, 1.333472, 0, 9999, -9999, 1.0, 100, 1, 29.550802, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1406, 0.474553, 0, 9999, -9999, 1.0, 100, 1, 10.763987, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1407, 0.003756, 0, 9999, -9999, 1.0, 100, 1, 0.211614, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1408, 0.376621, 0, 9999, -9999, 1.0, 100, 1, 41.078698, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1409, 0.019494, 0, 9999, -9999, 1.0, 100, 1, 12.019786, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1410, 0.019449, 0, 9999, -9999, 1.0, 100, 1, 37.466518, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1411, 3.357106, 0, 9999, -9999, 1.0, 100, 1, 39.395367, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1418, 0.117764, 0, 9999, -9999, 1.0, 100, 1, 88.264613, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1419, 0.01415, 0, 9999, -9999, 1.0, 100, 1, 33.260903, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1421, 1.3e-05, 0, 9999, -9999, 0.999504, 100, 1, 6.972369, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1422, 0.000963, 0, 9999, -9999, 1.0, 100, 1, 4.730495, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1424, 8.304232, 0, 9999, -9999, 1.0, 100, 1, 219.092115, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1425, 2.424617, 0, 9999, -9999, 1.0, 100, 1, 21.366402, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1426, 10.725418, 0, 9999, -9999, 1.0, 100, 1, 68.762602, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1427, 1.217691, 0, 9999, -9999, 1.0, 100, 1, 480.698671, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1428, 0.720472, 0, 9999, -9999, 1.0, 100, 1, 334.885743, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1429, 0.104011, 0, 9999, -9999, 1.0, 100, 1, 13.279826, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1431, 46.796071, 0, 9999, -9999, 1.0, 100, 1, 227.662022, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1432, 0.218513, 0, 9999, -9999, 1.0, 100, 1, 12.058931, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1433, 58.120712, 0, 9999, -9999, 1.0, 100, 1, 1289.241188, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1434, 0.625055, 0, 9999, -9999, 1.0, 100, 1, 99.440014, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1435, 0.690986, 0, 9999, -9999, 1.0, 100, 1, 86.713217, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1436, 11.144608, 0, 9999, -9999, 1.0, 100, 1, 98.434116, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1437, 0.10054, 0, 9999, -9999, 1.0, 100, 1, 238.321958, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1438, 0.02338, 0, 9999, -9999, 1.0, 100, 1, 392.815158, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1439, 11.413979, 0, 9999, -9999, 1.0, 100, 1, 99.103164, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1440, 0.003143, 0, 9999, -9999, 1.0, 100, 1, 0.833609, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1443, 12.158504, 0, 9999, -9999, 1.0, 100, 1, 103.005076, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1446, 5.382052, 0, 9999, -9999, 1.0, 100, 1, 758.547933, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1447, 3.920572, 0, 9999, -9999, 1.0, 100, 1, 89.477411, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1448, 0.447507, 0, 9999, -9999, 1.0, 100, 1, 7.523578, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1449, 8.39589, 0, 9999, -9999, 1.0, 100, 1, 95.437673, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1450, 3.280032, 0, 9999, -9999, 1.0, 100, 1, 59.256809, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1451, 4.921146, 0, 9999, -9999, 1.0, 100, 1, 68.198838, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1452, 1.539194, 0, 9999, -9999, 1.0, 100, 1, 24.068921, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1455, 0.056485, 0, 9999, -9999, 1.0, 100, 1, 0.654438, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1456, 3.964026, 0, 9999, -9999, 1.0, 100, 1, 50.054822, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1457, 0.080631, 0, 9999, -9999, 1.0, 100, 1, 2.002672, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1458, 0.009912, 0, 9999, -9999, 1.0, 100, 1, 0.246199, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1459, 0.000475, 0, 9999, -9999, 1.0, 100, 1, 5.309059, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1460, 0.003204, 0, 9999, -9999, 1.0, 100, 1, 101.498473, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1461, 0.57153, 0, 9999, -9999, 1.0, 100, 1, 17.951737, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1462, 0.078078, 0, 9999, -9999, 1.0, 100, 1, 2.402686, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1463, 0.001103, 0, 9999, -9999, 1.0, 100, 1, 0.711207, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1465, 0.255603, 0, 9999, -9999, 1.0, 100, 1, 5.299939, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1466, 0.505495, 0, 9999, -9999, 1.0, 100, 1, 5.685017, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1467, 0.116069, 0, 9999, -9999, 1.0, 100, 1, 2.096155, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1468, 0.37127, 0, 9999, -9999, 1.0, 100, 1, 23.789171, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1469, 1.589656, 0, 9999, -9999, 1.0, 100, 1, 65.007467, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1470, 7.2152, 0, 9999, -9999, 1.0, 100, 1, 78.965265, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1471, 16.379925, 0, 9999, -9999, 1.0, 100, 1, 159.165074, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1472, 0.691649, 0, 9999, -9999, 1.0, 100, 1, 11.980182, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1473, 0.449175, 0, 9999, -9999, 1.0, 100, 1, 8.362608, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1474, 0.120458, 0, 9999, -9999, 1.0, 100, 1, 1.398948, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1475, 0.028939, 0, 9999, -9999, 1.0, 100, 1, 0.39088, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1476, 52.700905, 0, 9999, -9999, 1.0, 100, 1, 250.480113, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1477, 0.036369, 0, 9999, -9999, 1.0, 100, 1, 12.122974, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1483, 0.153679, 0, 9999, -9999, 1.0, 100, 1, 3.599649, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1484, 0.001756, 0, 9999, -9999, 1.0, 100, 1, 0.02991, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1485, 0.033081, 0, 9999, -9999, 1.0, 100, 1, 0.563547, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1486, 0.170195, 0, 9999, -9999, 1.0, 100, 1, 2.89934, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1489, 0.000189, 0, 9999, -9999, 1.0, 100, 1, 0.118938, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1490, 4.740612, 0, 9999, -9999, 1.0, 100, 1, 782.463701, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1491, 3.369764, 0, 9999, -9999, 1.0, 100, 1, 84.622838, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1492, 1.793157, 0, 9999, -9999, 1.0, 100, 1, 229.927503, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1493, 1.792753, 0, 9999, -9999, 1.0, 100, 1, 83.557175, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1494, 0.024698, 0, 9999, -9999, 1.0, 100, 1, 404.486733, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1495, 0.009191, 0, 9999, -9999, 1.0, 100, 1, 66.920717, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1497, 3.74881, 0, 9999, -9999, 1.0, 100, 1, 89.070006, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1498, 0.464031, 0, 9999, -9999, 1.0, 100, 1, 105.800802, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1502, 0.016503, 0, 9999, -9999, 1.0, 100, 1, 0.938928, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1503, 1.615092, 0, 9999, -9999, 1.0, 100, 1, 45.972187, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1504, 10.334707, 0, 9999, -9999, 1.0, 100, 1, 188.822836, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1505, 0.001886, 0, 9999, -9999, 1.0, 100, 1, 26.765913, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1506, 0.196333, 0, 9999, -9999, 1.0, 100, 1, 56.406717, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1507, 0.136074, 0, 9999, -9999, 1.0, 100, 1, 15.438042, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1510, 2.932637, 0, 9999, -9999, 1.0, 100, 1, 107.008141, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1511, 0.907882, 0, 9999, -9999, 1.0, 100, 1, 155.22192, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1512, 0.970201, 0, 9999, -9999, 1.0, 100, 1, 64.130052, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1513, 0.225875, 0, 9999, -9999, 1.0, 100, 1, 23.051786, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1517, 0.000508, 0, 9999, -9999, 1.0, 100, 1, 1.286804, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1518, 0.01146, 0, 9999, -9999, 1.0, 100, 1, 0.670542, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1519, 0.000795, 0, 9999, -9999, 1.0, 100, 1, 0.04654, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1555, 1.194655, 0, 9999, -9999, 1.0, 100, 1, 103.865774, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1659, 0.033807, 0, 9999, -9999, 1.0, 100, 1, 91.77667, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1698, 0.248914, 0, 9999, -9999, 1.0, 100, 1, 25.60631, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1699, 0.011801, 0, 9999, -9999, 1.0, 100, 1, 5.356106, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1700, 0.000135, 0, 9999, -9999, 1.0, 100, 1, 55.825815, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1735, 0.411535, 0, 9999, -9999, 1.0, 100, 1, 153.887449, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1761, 0.66805, 0, 9999, -9999, 1.0, 100, 1, 48.443946, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1792, 0.039504, 0, 9999, -9999, 1.0, 100, 1, 8.914306, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1793, 0.070585, 0, 9999, -9999, 1.0, 100, 1, 41.722817, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1799, 0.58889, 0, 9999, -9999, 1.0, 100, 1, 51.10225, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1801, 0.014708, 0, 9999, -9999, 1.0, 100, 1, 21.006749, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1830, 0.325031, 0, 9999, -9999, 1.0, 100, 1, 27.724768, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1831, 0.282415, 0, 9999, -9999, 1.0, 100, 1, 69.89001, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1838, 0.336339, 0, 9999, -9999, 1.0, 100, 1, 25.580913, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1842, 0.085567, 0, 9999, -9999, 1.0, 100, 1, 7.468633, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1855, 0.348341, 0, 9999, -9999, 1.0, 100, 1, 121.687485, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1856, 0.466032, 0, 9999, -9999, 1.0, 100, 1, 63.654358, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1966, 0.030058, 0, 9999, -9999, 1.0, 100, 1, 2.674199, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1990, 0.635663, 0, 9999, -9999, 1.0, 100, 1, 50.351426, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1997, 0.004519, 0, 9999, -9999, 1.0, 100, 1, 26.592561, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1998, 0.130952, 0, 9999, -9999, 1.0, 100, 1, 12.126511, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[2003, 0.277352, 0, 9999, -9999, 1.0, 100, 1, 23.645071, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[2004, 0.200444, 0, 9999, -9999, 1.0, 100, 1, 17.73338, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
])
ppc["branch"] = array([
[586, 1, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[589, 108, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[590, 108, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[593, 112, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[594, 114, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[595, 115, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[597, 118, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[598, 118, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[599, 119, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[600, 119, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[601, 119, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[602, 121, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[603, 526, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[605, 126, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[607, 127, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[608, 127, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[609, 529, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[610, 530, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[611, 493, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[612, 493, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[613, 130, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[614, 130, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[616, 132, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[617, 133, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[618, 133, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[619, 134, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[621, 136, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[623, 139, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[624, 14, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[625, 140, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[628, 142, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[629, 145, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[631, 145, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[632, 145, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[637, 148, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[638, 149, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[639, 150, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[640, 153, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[641, 155, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[642, 533, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[643, 534, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[646, 536, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[647, 536, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[650, 166, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[652, 167, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[655, 170, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[657, 174, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[658, 175, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[659, 175, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[661, 177, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[662, 178, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[663, 178, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[664, 180, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[666, 180, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[668, 183, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[670, 183, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[672, 185, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[674, 496, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[675, 19, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[676, 19, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[677, 190, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[678, 194, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[679, 196, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[681, 197, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[683, 200, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[687, 202, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[689, 204, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[691, 209, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[692, 21, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[693, 21, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[694, 21, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[695, 210, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[696, 211, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[697, 211, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[698, 212, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[699, 213, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[700, 214, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[701, 215, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[702, 215, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[703, 217, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[704, 217, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[705, 217, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[707, 219, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[708, 221, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[711, 224, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[712, 225, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[713, 225, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[714, 225, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[716, 226, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[717, 227, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[719, 229, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[721, 545, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[722, 545, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[723, 235, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[724, 238, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[725, 239, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[726, 240, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[727, 243, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[728, 244, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[730, 547, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[731, 548, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[732, 247, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[733, 549, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[734, 252, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[735, 253, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[737, 256, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[738, 258, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[739, 264, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[741, 264, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[742, 264, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[743, 500, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[745, 273, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[746, 273, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[747, 273, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[748, 274, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[749, 274, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[750, 557, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[751, 28, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[753, 28, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[754, 558, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[758, 286, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[760, 287, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[761, 288, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[762, 289, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[763, 560, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[764, 560, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[765, 560, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[766, 292, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[767, 292, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[769, 293, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[771, 297, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[772, 3, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[774, 300, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[775, 300, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[776, 300, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[777, 300, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[778, 300, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[779, 302, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[781, 303, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[784, 563, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[785, 501, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[786, 31, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[787, 308, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[788, 311, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[789, 565, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[790, 314, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[791, 314, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[792, 316, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[793, 318, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[794, 319, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[795, 319, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[796, 567, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[797, 321, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[798, 324, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[799, 325, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[800, 326, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[801, 327, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[802, 327, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[803, 502, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[804, 33, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[805, 328, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[806, 328, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[807, 329, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[808, 329, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[809, 329, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[810, 568, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[811, 568, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[812, 333, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[813, 570, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[814, 570, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[815, 335, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[816, 335, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[817, 571, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[818, 34, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[820, 338, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[821, 338, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[822, 339, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[823, 339, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[825, 339, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[826, 339, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[827, 340, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[828, 342, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[829, 345, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[830, 345, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[832, 346, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[833, 348, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[834, 572, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[835, 572, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[836, 572, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[837, 350, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[839, 350, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[840, 573, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[841, 573, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[842, 352, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[843, 352, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[844, 352, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[845, 356, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[847, 36, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[848, 574, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[849, 574, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[850, 574, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[851, 575, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[852, 361, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[853, 362, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[854, 363, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[855, 363, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[856, 363, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[857, 365, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[858, 368, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[859, 368, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[860, 371, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[862, 372, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[863, 374, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[864, 374, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[865, 375, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[866, 376, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[867, 376, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[868, 503, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[869, 503, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[870, 503, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[872, 378, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[873, 576, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[874, 576, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[875, 381, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[876, 578, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[877, 578, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[880, 39, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[881, 388, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[882, 388, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[883, 388, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[884, 392, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[885, 393, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[886, 394, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[888, 397, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[889, 397, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[890, 40, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[893, 400, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[894, 400, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[895, 580, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[896, 581, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[897, 403, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[898, 403, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[899, 405, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[900, 405, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[902, 405, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[903, 406, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[905, 413, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[906, 414, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[907, 583, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[908, 416, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[909, 417, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[911, 419, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[913, 422, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[914, 423, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[915, 423, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[916, 43, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[917, 43, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[918, 424, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[919, 427, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[920, 428, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[921, 428, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[922, 429, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[923, 432, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[924, 433, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[925, 44, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[926, 434, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[927, 435, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[928, 435, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[929, 436, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[931, 439, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[932, 440, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[933, 441, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[934, 45, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[935, 45, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[936, 445, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[937, 447, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[939, 450, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[940, 451, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[942, 458, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[943, 458, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[944, 458, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[945, 459, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[946, 459, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[948, 462, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[950, 462, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[951, 47, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[952, 47, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[956, 478, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[957, 478, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[958, 478, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[959, 478, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[960, 479, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[963, 481, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[965, 49, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[966, 49, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[967, 49, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[968, 486, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[969, 486, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[970, 489, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[971, 51, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[972, 506, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[973, 506, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[974, 507, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[975, 58, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[976, 58, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[977, 59, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[978, 491, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[980, 508, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[981, 62, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[982, 62, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[983, 62, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[984, 63, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[985, 63, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[986, 64, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[987, 65, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[988, 66, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[990, 67, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[992, 67, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[993, 67, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[994, 67, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[995, 509, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[996, 510, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[997, 510, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[998, 70, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[999, 70, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1000, 71, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1001, 71, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1002, 71, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1003, 72, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1004, 72, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1007, 511, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1008, 75, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1010, 79, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1011, 79, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1012, 81, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1013, 81, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1014, 83, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1016, 84, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1018, 514, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1019, 514, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1021, 89, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1023, 515, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1025, 518, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1026, 518, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1027, 218, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1028, 221, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1029, 268, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1030, 269, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1031, 498, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1032, 1, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1033, 3, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1034, 4, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1035, 6, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1036, 7, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1037, 8, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1038, 9, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1039, 11, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1040, 14, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1041, 16, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1042, 17, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1044, 21, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1045, 23, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1046, 25, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1047, 27, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1048, 28, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1049, 29, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1050, 31, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1051, 33, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1052, 34, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1053, 35, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1054, 36, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1055, 38, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1056, 39, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1057, 40, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1058, 41, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1059, 43, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1060, 44, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1061, 45, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1062, 47, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1063, 48, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1064, 49, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1065, 50, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1066, 51, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1072, 59, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1073, 60, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1074, 62, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1076, 64, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1077, 65, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1078, 66, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1079, 67, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1081, 71, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1082, 72, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1083, 73, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1084, 75, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1085, 76, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1086, 77, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1087, 79, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1088, 80, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1089, 81, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1090, 82, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1091, 83, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1092, 84, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1093, 85, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1094, 88, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1095, 89, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1096, 90, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1097, 91, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1098, 92, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1099, 93, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1100, 97, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1101, 98, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1102, 101, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1103, 102, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1104, 103, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1105, 108, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1106, 109, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1107, 110, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1108, 111, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1109, 112, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1110, 113, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1111, 114, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1112, 115, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1113, 116, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1115, 119, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1116, 121, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1117, 122, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1118, 126, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1119, 127, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1120, 130, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1121, 131, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1122, 132, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1123, 133, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1124, 134, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1125, 135, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1126, 136, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1127, 137, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1128, 139, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1129, 140, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1130, 141, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1131, 142, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1132, 144, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1133, 145, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1134, 146, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1135, 147, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1136, 148, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1137, 149, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1138, 150, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1139, 151, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1140, 152, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1141, 153, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1142, 154, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1143, 155, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1144, 158, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1145, 161, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1146, 162, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1147, 163, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1148, 164, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1149, 166, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1150, 167, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1151, 168, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1152, 169, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1153, 170, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1154, 171, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1155, 172, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1156, 173, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1157, 174, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1158, 175, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1159, 176, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1160, 177, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1161, 178, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1162, 179, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1163, 180, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1164, 181, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1165, 182, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1166, 183, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1167, 185, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1168, 186, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1169, 187, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1170, 188, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1173, 192, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1174, 193, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1175, 194, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1176, 196, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1177, 197, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1178, 198, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1179, 199, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1180, 200, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1181, 202, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1182, 203, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1183, 204, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1184, 205, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1185, 206, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1186, 207, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1187, 208, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1188, 209, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1189, 210, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1196, 217, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1197, 218, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1198, 219, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1199, 221, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1200, 222, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1204, 226, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1208, 230, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1216, 242, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1217, 243, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1218, 244, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1219, 247, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1220, 251, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1225, 256, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1226, 257, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1229, 263, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1230, 264, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1231, 266, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1232, 267, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1233, 268, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1234, 269, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1235, 271, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1236, 272, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1239, 275, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1240, 276, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1241, 278, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1242, 281, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1243, 282, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1244, 283, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1245, 284, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1246, 285, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1248, 287, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1249, 288, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1250, 289, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1251, 291, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1252, 292, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1253, 293, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1254, 294, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1255, 295, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1256, 296, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1257, 297, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1258, 298, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1259, 299, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1260, 300, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1261, 302, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1264, 307, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1265, 308, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1266, 309, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1267, 311, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1268, 312, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1269, 314, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1270, 316, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1274, 321, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1275, 322, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1276, 323, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1277, 324, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1278, 325, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1279, 326, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1280, 327, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1281, 328, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1282, 329, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1283, 331, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1284, 333, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1285, 335, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1287, 338, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1288, 339, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1289, 340, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1290, 341, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1291, 342, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1292, 343, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1293, 344, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1294, 345, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1295, 346, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1296, 347, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1297, 348, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1298, 350, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1299, 352, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1300, 353, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1301, 354, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1306, 361, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1307, 362, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1308, 363, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1312, 367, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1316, 371, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1317, 372, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1319, 374, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1323, 378, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1324, 379, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1325, 381, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1326, 384, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1327, 385, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1328, 386, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1329, 387, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1330, 388, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1331, 390, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1333, 392, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1336, 395, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1337, 396, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1339, 398, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1340, 399, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1341, 400, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1345, 406, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1346, 407, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1347, 408, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1348, 410, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1349, 411, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1351, 413, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1352, 414, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1356, 419, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1357, 420, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1359, 422, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1360, 423, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1361, 424, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1362, 425, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1366, 429, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1371, 434, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1372, 435, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1373, 436, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1374, 437, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1375, 438, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1376, 439, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1377, 440, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1378, 441, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1379, 442, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1380, 443, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1381, 445, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1382, 446, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1383, 447, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1384, 448, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1385, 449, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1386, 450, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1387, 451, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1388, 453, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1389, 454, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1390, 455, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1391, 456, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1392, 457, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1393, 458, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1394, 459, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1395, 460, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1397, 462, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1398, 463, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1399, 464, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1400, 465, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1401, 466, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1402, 467, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1403, 468, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1404, 469, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1405, 470, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1406, 471, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1407, 472, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1408, 473, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1409, 474, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1410, 475, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1411, 476, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1418, 483, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1419, 484, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1421, 486, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1422, 487, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1424, 489, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1425, 490, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1426, 491, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1427, 492, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1428, 493, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1429, 494, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1431, 496, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1432, 497, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1433, 498, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1434, 499, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1435, 500, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1436, 501, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1437, 502, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1438, 503, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1439, 504, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1440, 505, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1443, 508, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1446, 511, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1447, 512, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1448, 513, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1449, 514, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1450, 515, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1451, 516, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1452, 517, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1455, 520, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1456, 521, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1457, 522, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1458, 523, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1459, 524, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1460, 525, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1461, 526, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1462, 527, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1463, 528, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1465, 530, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1466, 531, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1467, 532, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1468, 533, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1469, 534, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1470, 535, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1471, 536, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1472, 537, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1473, 538, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1474, 539, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1475, 540, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1476, 541, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1477, 542, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1483, 548, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1484, 549, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1485, 550, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1486, 551, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1489, 555, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1490, 556, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1491, 557, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1492, 558, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1493, 559, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1494, 560, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1495, 561, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1497, 563, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1498, 564, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1502, 568, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1503, 569, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1504, 570, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1505, 571, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1506, 572, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1507, 573, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1510, 576, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1511, 577, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1512, 578, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1513, 579, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1517, 583, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1518, 584, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1519, 585, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1555, 53, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1659, 189, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1698, 235, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1699, 237, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1700, 238, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1735, 286, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1761, 319, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1792, 357, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1793, 359, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1799, 366, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1801, 368, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1830, 403, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1831, 404, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1838, 412, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1842, 417, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1855, 430, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1856, 431, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1966, 543, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1990, 567, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1997, 574, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1998, 575, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[2003, 580, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[2004, 581, 0, 1e-05, 0, 9999, 9999, 9999, 0, 0, 1, -360, 360 ],
[1, 490, 0, 0.01433884297520661, 0.151691958358336, 991.0, 991.0, 991.0, 0, 2, 1, -360, 43.375 ],
[3, 4, 0, 0.006291637811634348, 0.903417549506624, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 72.681 ],
[491, 6, 0, 0.011200661157024791, 0.118492839955776, 991.0, 991.0, 991.0, 0, 2, 1, -360, 33.882 ],
[7, 5, 0, 0.005794840720221606, 0.20802058859584005, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 33.471 ],
[8, 9, 0, 0.0024379328254847646, 0.350063268897336, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 28.163 ],
[492, 11, 0, 0.018224793388429753, 0.0482004476327704, 495.0, 495.0, 495.0, 0, 1, 1, -360, 27.565 ],
[11, 493, 0, 0.030286942148760328, 0.08010209706571599, 495.0, 495.0, 495.0, 0, 1, 1, -360, 45.809 ],
[492, 493, 0, 0.04521652892561983, 0.11958747011094399, 495.0, 495.0, 495.0, 0, 1, 1, -360, 68.39 ],
[494, 14, 0, 0.012990743801652892, 0.137430291356512, 991.0, 991.0, 991.0, 0, 2, 1, -360, 39.297 ],
[13, 15, 0, 0.007681959833795014, 0.27576354266704156, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 44.371 ],
[16, 5, 0, 0.006275623268698061, 0.22527950450957998, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 36.248000000000005 ],
[17, 18, 0, 0.04623522622347646, 0.9335989000302801, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 200.291 ],
[17, 12, 0, 0.0056020313942728535, 0.113118303398186, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 24.268 ],
[14, 495, 0, 0.0017957024793388433, 0.018996904156819597, 991.0, 991.0, 991.0, 0, 1, 1, -360, 5.432 ],
[494, 19, 0, 0.010246611570247935, 0.10839986031771602, 991.0, 991.0, 991.0, 0, 1, 1, -360, 30.996 ],
[20, 21, 0, 0.005415685595567867, 0.19440984828307922, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 31.281 ],
[20, 22, 0, 0.0049706544321329645, 0.713737278110032, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 57.42100000000001 ],
[497, 23, 0, 0.002190413223140496, 0.005793146490362, 495.0, 495.0, 495.0, 0, 1, 1, -360, 3.313 ],
[23, 499, 0, 0.020799669421487598, 0.22004164444829602, 991.0, 991.0, 991.0, 0, 1, 1, -360, 62.919 ],
[25, 26, 0, 0.00141845567867036, 0.050919084651523595, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 8.193 ],
[25, 22, 0, 0.0035578254847645433, 0.0319293051869808, 856.0, 856.0, 856.0, 0, 1, 1, -360, 10.275 ],
[23, 27, 0, 0.027738181818181818, 0.073361203699828, 495.0, 495.0, 495.0, 0, 1, 1, -360, 41.95399999999999 ],
[28, 23, 0, 0.012841652892561981, 0.0339632611780132, 495.0, 495.0, 495.0, 0, 1, 1, -360, 19.423 ],
[8, 21, 0, 0.004948753462603878, 0.17764812836304802, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 28.584 ],
[9, 29, 0, 0.002212863573407202, 0.31774552934092004, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 25.563000000000002 ],
[30, 25, 0, 0.019958795013850415, 0.17911796401827998, 856.0, 856.0, 856.0, 0, 1, 1, -360, 57.641000000000005 ],
[31, 32, 0, 0.0299776084949446, 0.605319030583196, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 129.863 ],
[32, 33, 0, 0.016762234533725762, 0.33846927983213604, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 72.61399999999999 ],
[34, 35, 0, 0.001931900826446281, 0.020437759184893597, 991.0, 991.0, 991.0, 0, 2, 1, -360, 5.843999999999999 ],
[35, 36, 0, 0.0008730578512396695, 0.0092361605077588, 991.0, 991.0, 991.0, 0, 2, 1, -360, 2.641 ],
[490, 6, 0, 0.049352066115702475, 0.130525028606764, 495.0, 495.0, 495.0, 0, 1, 1, -360, 74.645 ],
[37, 10, 0, 0.02404639889196676, 0.485553838251812, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 104.169 ],
[10, 38, 0, 0.006848799630657894, 0.13829351176534158, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 29.669 ],
[37, 38, 0, 0.01437834718372576, 1.1613317560186958, 2567.0, 2567.0, 2567.0, 0, 1, 1, -360, 124.574 ],
[39, 40, 0, 0.04521629732222991, 0.913024308337812, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 195.877 ],
[39, 41, 0, 0.017466989843005543, 0.35269996139852006, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 75.667 ],
[42, 41, 0, 0.031145429362880884, 0.6289001042979919, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 134.922 ],
[18, 42, 0, 0.03439750692520776, 0.6945672650962679, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 149.01 ],
[492, 43, 0, 0.01819173553719008, 0.192452068436848, 991.0, 991.0, 991.0, 0, 2, 1, -360, 55.03 ],
[44, 45, 0, 0.02562314049586777, 0.067767398802972, 495.0, 495.0, 495.0, 0, 1, 1, -360, 38.755 ],
[44, 505, 0, 0.006061487603305785, 0.0160312607980052, 495.0, 495.0, 495.0, 0, 1, 1, -360, 9.168 ],
[46, 12, 0, 0.0014741170360110802, 0.2116687641962416, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 17.029 ],
[47, 48, 0, 0.005344182825484765, 0.01199019212302604, 428.0, 428.0, 428.0, 0, 1, 1, -360, 7.7170000000000005 ],
[49, 50, 0, 0.0019151662049861494, 0.0171874439892256, 856.0, 856.0, 856.0, 0, 1, 1, -360, 5.531000000000001 ],
[31, 33, 0, 0.013475992613088641, 0.27211225959163604, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 58.378 ],
[31, 51, 0, 0.003518611495844875, 0.5052381383693519, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 40.647 ],
[52, 53, 0, 0.010464421745152355, 1.5025884408875438, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 120.885 ],
[52, 54, 0, 0.0076126500461911354, 0.1537174637168, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 32.978 ],
[506, 55, 0, 0.012634380165289257, 0.133660287181212, 991.0, 991.0, 991.0, 0, 1, 1, -360, 38.219 ],
[506, 507, 0, 0.044157355371900825, 0.11678619613628, 495.0, 495.0, 495.0, 0, 1, 1, -360, 66.788 ],
[57, 506, 0, 0.004687272727272727, 0.049587095736244, 991.0, 991.0, 991.0, 0, 1, 1, -360, 14.179 ],
[57, 58, 0, 0.014436363636363634, 0.0381809096340232, 495.0, 495.0, 495.0, 0, 1, 1, -360, 21.835 ],
[58, 506, 0, 0.019797685950413223, 0.052360391943288, 495.0, 495.0, 495.0, 0, 1, 1, -360, 29.944000000000003 ],
[59, 60, 0, 0.019407548476454296, 0.174170863885556, 856.0, 856.0, 856.0, 0, 1, 1, -360, 56.049 ],
[508, 62, 0, 0.051111404958677685, 0.03379452026753001, 248.0, 248.0, 248.0, 0, 1, 1, -360, 38.653 ],
[30, 61, 0, 0.03143698060941828, 0.28212765137935203, 856.0, 856.0, 856.0, 0, 1, 1, -360, 90.79 ],
[63, 506, 0, 0.027457190082644623, 0.072618044249872, 495.0, 495.0, 495.0, 0, 1, 1, -360, 41.528999999999996 ],
[13, 64, 0, 0.0014816481994459833, 0.2127501654814608, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 17.116 ],
[65, 66, 0, 0.03778185595567867, 0.7629053006222161, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 163.671 ],
[59, 67, 0, 0.0051880193905817175, 0.046559297286324804, 856.0, 856.0, 856.0, 0, 1, 1, -360, 14.982999999999999 ],
[61, 67, 0, 0.012931440443213295, 0.1160517597580644, 856.0, 856.0, 856.0, 0, 1, 1, -360, 37.346 ],
[68, 69, 0, 0.011149584487534626, 0.4002427745096039, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 64.4 ],
[70, 69, 0, 0.009625346260387812, 0.345526355460808, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 55.596000000000004 ],
[71, 72, 0, 0.008878635734072021, 0.318721276477736, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 51.283 ],
[73, 74, 0, 0.012529547553116345, 0.253001288604392, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 54.278 ],
[37, 75, 0, 0.027459141274238225, 0.5544652029066119, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 118.95299999999999 ],
[72, 75, 0, 0.006688711911357341, 0.240108375006292, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 38.634 ],
[37, 72, 0, 0.036222068328739615, 0.7314094881920841, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 156.914 ],
[76, 77, 0, 0.004683777700831025, 0.6725445900750401, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 54.107 ],
[77, 51, 0, 0.00363183864265928, 0.5214964473447999, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 41.955 ],
[73, 72, 0, 0.025475069252077563, 0.514402082018968, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 110.35799999999999 ],
[18, 40, 0, 0.01302770083102493, 0.26306018504072, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 56.43600000000001 ],
[492, 45, 0, 0.0308703030303719, 0.18370114733484796, 743.0, 743.0, 743.0, 0, 1, 1, -360, 70.03699999999999 ],
[10, 74, 0, 0.030167359187465374, 0.609150547206812, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 130.685 ],
[45, 511, 0, 0.08203371900826446, 0.05424014819960001, 248.0, 248.0, 248.0, 0, 1, 1, -360, 62.038000000000004 ],
[78, 32, 0, 0.013458795013850415, 0.48313777647302397, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 77.738 ],
[79, 80, 0, 0.0038086911357340715, 0.1367226831743568, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 21.999000000000002 ],
[81, 79, 0, 0.010767832409972299, 0.3865388099484561, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 62.195 ],
[34, 82, 0, 0.0015497520661157025, 0.00409874294399768, 495.0, 495.0, 495.0, 0, 1, 1, -360, 2.344 ],
[83, 84, 0, 0.00902611570247934, 0.0238720301499152, 495.0, 495.0, 495.0, 0, 1, 1, -360, 13.652000000000001 ],
[83, 499, 0, 0.04179570247933885, 0.0276350398834796, 248.0, 248.0, 248.0, 0, 1, 1, -360, 31.608 ],
[85, 86, 0, 0.00802354570637119, 0.28802563884886, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 46.343999999999994 ],
[87, 86, 0, 0.01904968836565097, 0.683837154069184, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 110.031 ],
[88, 89, 0, 0.00380297520661157, 0.010058007429140002, 495.0, 495.0, 495.0, 0, 1, 1, -360, 5.752000000000001 ],
[90, 86, 0, 0.012097818559556786, 0.434282055192244, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 69.877 ],
[91, 86, 0, 9.26246537396122e-05, 0.013299992817559201, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 1.07 ],
[86, 92, 0, 0.0001852493074792244, 0.0066499964087796005, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 1.07 ],
[86, 93, 0, 0.008152181440443215, 0.292643346635492, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 47.086999999999996 ],
[94, 86, 0, 0.012883829639889197, 0.46249792780547194, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 74.417 ],
[86, 95, 0, 0.010421052631578947, 0.37409026526870803, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 60.192 ],
[513, 517, 0, 0.0008733884297520661, 0.0023099144321748, 495.0, 495.0, 495.0, 0, 1, 1, -360, 1.321 ],
[97, 66, 0, 0.03812777008310249, 0.34217338998058805, 856.0, 856.0, 856.0, 0, 1, 1, -360, 110.113 ],
[42, 98, 0, 0.003091759002770083, 0.44394630230884, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 35.716 ],
[99, 100, 0, 0.016371537396121884, 0.587698093837988, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 94.56200000000001 ],
[42, 101, 0, 0.008165339335180054, 0.29311568282888, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 47.163000000000004 ],
[102, 42, 0, 0.012403047091412742, 0.44523901189173193, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 71.64 ],
[103, 87, 0, 0.007073060941828254, 0.25390556381756, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 40.854 ],
[104, 103, 0, 0.0028852146814404432, 0.1035721403291428, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 16.665 ],
[105, 87, 0, 0.006406682825484765, 0.22998422159488002, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 37.005 ],
[106, 107, 0, 0.005714219759923823, 0.11538365264216799, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 24.754 ],
[108, 107, 0, 0.0025427631578947367, 0.09127896939786201, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 14.687000000000001 ],
[109, 106, 0, 0.003030470914127424, 0.10878648330773438, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 17.504 ],
[110, 111, 0, 0.019821849030470913, 0.7115558306889919, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 114.491 ],
[87, 112, 0, 0.006135907202216068, 0.220264039928212, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 35.441 ],
[113, 87, 0, 0.003981648199445983, 0.14293141813921081, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 22.998 ],
[87, 85, 0, 0.011046225761772853, 0.3965324494097, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 63.803000000000004 ],
[110, 114, 0, 0.011665339335180056, 0.418757110306188, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 67.37899999999999 ],
[115, 116, 0, 0.007048925619834712, 0.07457124214588401, 991.0, 991.0, 991.0, 0, 1, 1, -360, 21.323 ],
[117, 118, 0, 0.005987534626038782, 0.21493782785077598, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 34.584 ],
[117, 119, 0, 0.0038738746537396117, 0.5562504472696961, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 44.751000000000005 ],
[117, 120, 0, 0.005886686288088643, 0.8452704781039522, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 68.003 ],
[121, 122, 0, 0.0021170360110803325, 0.0759964075574972, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 12.228 ],
[123, 124, 0, 0.0018386426592797783, 0.0660027680945204, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 10.62 ],
[125, 126, 0, 0.004941135734072022, 0.17737467056702802, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 28.54 ],
[127, 119, 0, 0.0029027008310249305, 0.1041998502705648, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 16.766 ],
[118, 128, 0, 0.007397160664819945, 0.265539950057812, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 42.726000000000006 ],
[121, 119, 0, 0.002552458448753463, 0.0916270065931116, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 14.743 ],
[530, 527, 0, 0.022726611570247933, 0.060106736329903994, 495.0, 495.0, 495.0, 0, 1, 1, -360, 34.374 ],
[125, 130, 0, 0.002931440443213297, 0.105231531956442, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 16.932000000000002 ],
[125, 123, 0, 0.0019078081717451524, 0.2739425623421336, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 22.039 ],
[131, 132, 0, 0.0035744459833795014, 0.12831385593973843, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 20.646 ],
[133, 123, 0, 0.003864439058171745, 0.13872389704704202, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 22.320999999999998 ],
[524, 134, 0, 0.008092231404958678, 0.08560847143881999, 991.0, 991.0, 991.0, 0, 1, 1, -360, 24.479 ],
[135, 136, 0, 0.005242901662049862, 0.1882073282678, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 30.283 ],
[123, 131, 0, 0.003138331024930748, 0.1126583971045252, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 18.127 ],
[117, 128, 0, 0.010800034626038782, 0.38769479063117196, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 62.381 ],
[137, 521, 0, 0.013832396694214875, 0.14633421587532003, 991.0, 991.0, 991.0, 0, 2, 1, -360, 41.843 ],
[531, 514, 0, 0.0059504132231404955, 0.035409362037522, 743.0, 743.0, 743.0, 0, 1, 1, -360, 13.5 ],
[139, 521, 0, 0.021257520661157023, 0.05622132386323199, 495.0, 495.0, 495.0, 0, 1, 1, -360, 32.152 ],
[140, 514, 0, 0.018527603305785127, 0.04900131122836401, 495.0, 495.0, 495.0, 0, 1, 1, -360, 28.023000000000003 ],
[522, 141, 0, 0.012168595041322314, 0.032183175718526795, 495.0, 495.0, 495.0, 0, 1, 1, -360, 18.405 ],
[142, 523, 0, 0.007060165289256198, 0.0746901476577608, 991.0, 991.0, 991.0, 0, 2, 1, -360, 21.357 ],
[530, 526, 0, 0.020281652892561983, 0.053640374808152, 495.0, 495.0, 495.0, 0, 1, 1, -360, 30.676 ],
[140, 532, 0, 0.004669090909090909, 0.0123486871461184, 495.0, 495.0, 495.0, 0, 1, 1, -360, 7.062 ],
[142, 144, 0, 0.006678126721756199, 0.0397397958689204, 743.0, 743.0, 743.0, 0, 1, 1, -360, 15.151 ],
[140, 522, 0, 0.020450247933884298, 0.05408627047793199, 495.0, 495.0, 495.0, 0, 1, 1, -360, 30.930999999999997 ],
[145, 146, 0, 0.028527603305785125, 0.07544904460236, 495.0, 495.0, 495.0, 0, 1, 1, -360, 43.148 ],
[147, 523, 0, 0.02461289256198347, 0.0650955220034416, 495.0, 495.0, 495.0, 0, 2, 1, -360, 37.227 ],
[144, 523, 0, 0.008479338842975206, 0.0224259292904064, 495.0, 495.0, 495.0, 0, 1, 1, -360, 12.825 ],
[139, 523, 0, 0.029245619834710742, 0.0193370088934308, 248.0, 248.0, 248.0, 0, 1, 1, -360, 22.116999999999997 ],
[140, 141, 0, 0.008362975206611572, 0.022118173847506, 495.0, 495.0, 495.0, 0, 1, 1, -360, 12.649000000000001 ],
[528, 526, 0, 0.015389090909090908, 0.0407006573227188, 495.0, 495.0, 495.0, 0, 1, 1, -360, 23.276 ],
[528, 148, 0, 0.014306115702479338, 0.0378364333712244, 495.0, 495.0, 495.0, 0, 1, 1, -360, 21.638 ],
[149, 150, 0, 0.013604628099173552, 0.035981157661543604, 495.0, 495.0, 495.0, 0, 1, 1, -360, 20.576999999999998 ],
[145, 528, 0, 0.00320595041322314, 0.0084790121737992, 495.0, 495.0, 495.0, 0, 1, 1, -360, 4.849 ],
[530, 151, 0, 0.013144462809917355, 0.0347641247737036, 495.0, 495.0, 495.0, 0, 1, 1, -360, 19.881 ],
[524, 152, 0, 0.014598347107438016, 0.03860931919944, 495.0, 495.0, 495.0, 0, 1, 1, -360, 22.08 ],
[149, 525, 0, 0.016897190082644627, 0.17875695122823998, 991.0, 991.0, 991.0, 0, 2, 1, -360, 51.114 ],
[139, 514, 0, 0.007824132231404959, 0.020693056313687997, 495.0, 495.0, 495.0, 0, 1, 1, -360, 11.834000000000001 ],
[126, 120, 0, 0.012780297783933518, 0.458781387757004, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 73.819 ],
[530, 153, 0, 0.02254545454545455, 0.059627617060924, 495.0, 495.0, 495.0, 0, 1, 1, -360, 34.1 ],
[528, 147, 0, 0.15786710743801652, 0.104380679149868, 248.0, 248.0, 248.0, 0, 1, 1, -360, 119.387 ],
[528, 154, 0, 0.006528264462809917, 0.017265779790547203, 495.0, 495.0, 495.0, 0, 2, 1, -360, 9.874 ],
[130, 120, 0, 0.01450502077562327, 0.5206947188067639, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 83.781 ],
[528, 155, 0, 0.16064132231404957, 0.1062149715341, 248.0, 248.0, 248.0, 0, 1, 1, -360, 121.485 ],
[524, 533, 0, 0.004432727272727273, 0.0468942356109744, 991.0, 991.0, 991.0, 0, 1, 1, -360, 13.409 ],
[524, 149, 0, 0.0056413223140495865, 0.05968007537478799, 991.0, 991.0, 991.0, 0, 2, 1, -360, 17.065 ],
[154, 150, 0, 0.007539173553719007, 0.0199394052006688, 495.0, 495.0, 495.0, 0, 2, 1, -360, 11.402999999999999 ],
[157, 110, 0, 0.009962084487534625, 0.357614433044424, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 57.541000000000004 ],
[119, 158, 0, 0.0002490189289012004, 0.08045252664623159, 5134.0, 5134.0, 5134.0, 0, 3, 1, -360, 4.315 ],
[159, 60, 0, 0.010967451523545706, 0.0984261617997728, 856.0, 856.0, 856.0, 0, 1, 1, -360, 31.674 ],
[536, 161, 0, 0.021314380165289255, 0.056371704363524, 495.0, 495.0, 495.0, 0, 1, 1, -360, 32.238 ],
[115, 151, 0, 0.00379404958677686, 0.0401376047510724, 991.0, 991.0, 991.0, 0, 1, 1, -360, 11.477 ],
[162, 134, 0, 0.0015910743801652895, 0.016832124393744, 991.0, 991.0, 991.0, 0, 2, 1, -360, 4.813 ],
[115, 526, 0, 0.0037884297520661154, 0.010019537998747198, 495.0, 495.0, 495.0, 0, 1, 1, -360, 5.73 ],
[138, 87, 0, 0.0011838642659279777, 0.16999131006813442, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 13.675999999999998 ],
[123, 163, 0, 0.0022778739612188364, 0.08177009602828919, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 13.157 ],
[112, 164, 0, 0.0008672957063711912, 0.12453516639176802, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 10.019 ],
[112, 165, 0, 0.005989439058171744, 0.21500619230086396, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 34.595 ],
[166, 165, 0, 0.002632790858725762, 0.09451074335350361, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 15.207 ],
[167, 537, 0, 0.00832595041322314, 0.08808100664460242, 991.0, 991.0, 991.0, 0, 2, 1, -360, 25.186 ],
[168, 104, 0, 0.002552458448753463, 0.0916270065931116, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 14.743 ],
[531, 520, 0, 0.016156694214876033, 0.042730794079516396, 495.0, 495.0, 495.0, 0, 1, 1, -360, 24.436999999999998 ],
[139, 520, 0, 0.010682314049586776, 0.0282522993797748, 495.0, 495.0, 495.0, 0, 1, 1, -360, 16.157 ],
[520, 169, 0, 0.0011328925619834712, 0.0119849761681232, 991.0, 991.0, 991.0, 0, 2, 1, -360, 3.427 ],
[168, 105, 0, 0.007340893351800554, 0.26352009133553606, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 42.401 ],
[520, 170, 0, 0.005842644628099174, 0.015452470732151198, 495.0, 495.0, 495.0, 0, 2, 1, -360, 8.837 ],
[171, 89, 0, 0.005505454545454546, 0.058242717567848004, 991.0, 991.0, 991.0, 0, 1, 1, -360, 16.654 ],
[521, 172, 0, 0.006304793388429752, 0.06669899780522001, 991.0, 991.0, 991.0, 0, 1, 1, -360, 19.072 ],
[123, 173, 0, 0.005247403047091413, 0.18836891696656402, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 30.309 ],
[521, 174, 0, 0.013300495867768597, 0.035176796844864404, 495.0, 495.0, 495.0, 0, 1, 1, -360, 20.117 ],
[37, 39, 0, 0.004338873499549862, 0.35044859579205606, 2567.0, 2567.0, 2567.0, 0, 2, 1, -360, 37.592 ],
[530, 175, 0, 0.013128595041322313, 0.0347221581224188, 495.0, 495.0, 495.0, 0, 1, 1, -360, 19.857 ],
[530, 176, 0, 0.005685289256198347, 0.01503630144005, 495.0, 495.0, 495.0, 0, 1, 1, -360, 8.599 ],
[88, 530, 0, 0.006015867768595041, 0.0159106066755372, 495.0, 495.0, 495.0, 0, 1, 1, -360, 9.099 ],
[177, 496, 0, 0.018632066115702478, 0.19711036673178398, 991.0, 991.0, 991.0, 0, 2, 1, -360, 56.361999999999995 ],
[178, 525, 0, 0.03106842975206612, 0.08216895464241199, 495.0, 495.0, 495.0, 0, 1, 1, -360, 46.99100000000001 ],
[179, 493, 0, 0.057079669421487594, 0.15096278779194802, 495.0, 495.0, 495.0, 0, 1, 1, -360, 86.333 ],
[180, 181, 0, 0.041027438016528923, 0.10850827416682, 495.0, 495.0, 495.0, 0, 1, 1, -360, 62.053999999999995 ],
[182, 180, 0, 0.00866314049586777, 0.09164817200545601, 991.0, 991.0, 991.0, 0, 2, 1, -360, 26.206 ],
[179, 181, 0, 0.01957223140495868, 0.051764115772731996, 495.0, 495.0, 495.0, 0, 1, 1, -360, 29.603 ],
[180, 493, 0, 0.06676561983471074, 0.17657993119175203, 495.0, 495.0, 495.0, 0, 1, 1, -360, 100.98299999999999 ],
[183, 30, 0, 0.0024804362880886427, 0.356166349712776, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 28.654 ],
[183, 21, 0, 0.0025647506925207757, 0.36827307214930394, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 29.628 ],
[538, 185, 0, 0.018631404958677687, 0.0123189607681008, 248.0, 248.0, 248.0, 0, 1, 1, -360, 14.09 ],
[538, 89, 0, 0.014509752066115702, 0.038375005396288, 495.0, 495.0, 495.0, 0, 1, 1, -360, 21.945999999999998 ],
[184, 186, 0, 0.0016554709141274237, 0.059427351084826, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 9.562000000000001 ],
[184, 187, 0, 0.002698753462603878, 0.09687863927102919, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 15.588 ],
[520, 172, 0, 0.0034188429752066113, 0.0361682589818792, 991.0, 991.0, 991.0, 0, 2, 1, -360, 10.342 ],
[89, 175, 0, 0.0037309090909090903, 0.0098674088877672, 495.0, 495.0, 495.0, 0, 1, 1, -360, 5.643 ],
[185, 89, 0, 0.005812892561983471, 0.0153737832609196, 495.0, 495.0, 495.0, 0, 1, 1, -360, 8.792 ],
[89, 188, 0, 0.003108760330578513, 0.008221966434607202, 495.0, 495.0, 495.0, 0, 1, 1, -360, 4.702 ],
[189, 190, 0, 0.008599492151454294, 0.17364414688031998, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 37.253 ],
[539, 172, 0, 0.0021570247933884296, 0.022819366646419197, 991.0, 991.0, 991.0, 0, 2, 1, -360, 6.525 ],
[504, 192, 0, 0.0003084297520661157, 0.00326290713886456, 991.0, 991.0, 991.0, 0, 2, 1, -360, 0.9329999999999999 ],
[105, 186, 0, 0.003273372576177285, 0.1175060580379876, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 18.907 ],
[105, 187, 0, 0.0021712257617728533, 0.0779416868808324, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 12.540999999999999 ],
[539, 193, 0, 0.005608595041322314, 0.01483346262541, 495.0, 495.0, 495.0, 0, 1, 1, -360, 8.482999999999999 ],
[187, 194, 0, 4.8649584487534626e-05, 0.0069856037041576, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 0.562 ],
[539, 540, 0, 0.004394710743801653, 0.0116230138006708, 495.0, 495.0, 495.0, 0, 1, 1, -360, 6.647 ],
[539, 196, 0, 0.00332297520661157, 0.008788516227194, 495.0, 495.0, 495.0, 0, 1, 1, -360, 5.026 ],
[197, 540, 0, 0.004737190082644629, 0.012528794024621601, 495.0, 495.0, 495.0, 0, 1, 1, -360, 7.165 ],
[110, 198, 0, 0.00018724030470914128, 0.02688587333118328, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 2.1630000000000003 ],
[197, 539, 0, 0.009172231404958677, 0.024258473063998802, 495.0, 495.0, 495.0, 0, 1, 1, -360, 13.873 ],
[199, 537, 0, 0.03612826446280991, 0.0238877676441712, 248.0, 248.0, 248.0, 0, 1, 1, -360, 27.322 ],
[134, 526, 0, 0.007771239669421488, 0.020553167475975197, 495.0, 495.0, 495.0, 0, 1, 1, -360, 11.754000000000001 ],
[200, 193, 0, 0.0009322314049586776, 0.009862163056380801, 991.0, 991.0, 991.0, 0, 2, 1, -360, 2.82 ],
[4, 201, 0, 0.013726108033240996, 0.49273365914097605, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 79.282 ],
[202, 86, 0, 0.00013365650969529087, 0.00479794133417816, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 0.772 ],
[85, 203, 0, 0.0019011426592797783, 0.2729854600553416, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 21.962 ],
[147, 204, 0, 0.0073874380165289254, 0.0781523963903056, 991.0, 991.0, 991.0, 0, 2, 1, -360, 22.346999999999998 ],
[147, 205, 0, 0.005959669421487603, 0.00394049369636956, 248.0, 248.0, 248.0, 0, 1, 1, -360, 4.507 ],
[123, 206, 0, 0.0005753116343490305, 0.0826091142668064, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 6.646 ],
[537, 207, 0, 0.018456198347107437, 0.048812461297776, 495.0, 495.0, 495.0, 0, 1, 1, -360, 27.915 ],
[165, 208, 0, 0.00414612188365651, 0.14883562055771601, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 23.948 ],
[4, 94, 0, 0.013687673130193905, 0.49135394025941603, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 79.06 ],
[4, 2, 0, 5.2054478301015697e-05, 0.016817654469309, 5134.0, 5134.0, 5134.0, 0, 3, 1, -360, 0.902 ],
[209, 4, 0, 0.0022369286703601107, 0.32120104149338397, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 25.840999999999998 ],
[119, 163, 0, 0.003535145429362881, 0.12690306230914922, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 20.419 ],
[210, 3, 0, 0.0003150969529085873, 0.011311208844832242, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 1.82 ],
[99, 211, 0, 0.0035045013850415513, 0.1258030161741948, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 20.242 ],
[99, 69, 0, 0.021717970914127423, 0.7796219621557, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 125.443 ],
[212, 99, 0, 0.008453774238227147, 0.30346978938770003, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 48.82899999999999 ],
[213, 214, 0, 0.01490115702479339, 0.15764073118032798, 991.0, 991.0, 991.0, 0, 2, 1, -360, 45.076 ],
[510, 215, 0, 0.002174710743801653, 0.09202587186721281, 1981.0, 1981.0, 1981.0, 0, 4, 1, -360, 13.157 ],
[128, 69, 0, 0.010711651662049862, 1.538088234801848, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 123.741 ],
[216, 69, 0, 0.009628462603878117, 1.3825528982351443, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 111.228 ],
[217, 98, 0, 0.0012787396121883656, 0.045903620070299994, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 7.386 ],
[504, 218, 0, 0.027480991735537193, 0.072680994226412, 495.0, 495.0, 495.0, 0, 1, 1, -360, 41.565 ],
[177, 504, 0, 0.07054809917355372, 0.18658373169634002, 495.0, 495.0, 495.0, 0, 1, 1, -360, 106.704 ],
[219, 209, 0, 0.003938798476454294, 0.5655728721401839, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 45.501000000000005 ],
[219, 220, 0, 0.0013026315789473684, 0.1870451326342096, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 15.048 ],
[94, 95, 0, 0.01070740997229917, 0.38436979242743197, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 61.846000000000004 ],
[159, 221, 0, 0.009937153739612188, 0.356719480257712, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 57.397 ],
[34, 161, 0, 0.010965289256198347, 0.116002818645824, 991.0, 991.0, 991.0, 0, 2, 1, -360, 33.17 ],
[222, 221, 0, 0.0046457756232686975, 0.16677196601221997, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 26.834 ],
[211, 52, 0, 0.05267313019390582, 0.472709090515552, 856.0, 856.0, 856.0, 0, 1, 1, -360, 152.12 ],
[215, 223, 0, 0.04873190082644628, 0.128884831985184, 495.0, 495.0, 495.0, 0, 1, 1, -360, 73.707 ],
[224, 215, 0, 0.019086280991735535, 0.050478887076288004, 495.0, 495.0, 495.0, 0, 1, 1, -360, 28.868000000000002 ],
[225, 224, 0, 0.04200925619834711, 0.11110496071615601, 495.0, 495.0, 495.0, 0, 1, 1, -360, 63.538999999999994 ],
[224, 223, 0, 0.031061818181818183, 0.082151468537468, 495.0, 495.0, 495.0, 0, 1, 1, -360, 46.981 ],
[226, 6, 0, 0.06420099173553719, 0.0424492677936932, 248.0, 248.0, 248.0, 0, 1, 1, -360, 48.552 ],
[7, 3, 0, 0.009332929362880887, 0.335029305054692, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 53.907 ],
[216, 227, 0, 0.01989941135734072, 0.7143401282507, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 114.939 ],
[228, 229, 0, 0.010545454545454545, 0.027890337012274, 495.0, 495.0, 495.0, 0, 1, 1, -360, 15.95 ],
[227, 230, 0, 0.003993074792243767, 0.573366419334696, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 46.128 ],
[231, 53, 0, 0.007193213296398893, 1.0328749562310842, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 83.096 ],
[544, 545, 0, 0.013061818181818181, 0.034545548464856, 495.0, 495.0, 495.0, 0, 1, 1, -360, 19.756 ],
[234, 235, 0, 0.04608859504132231, 0.121893887321888, 495.0, 495.0, 495.0, 0, 1, 1, -360, 69.709 ],
[546, 214, 0, 0.057025454545454546, 0.15081940173295602, 495.0, 495.0, 495.0, 0, 1, 1, -360, 86.251 ],
[233, 227, 0, 0.0029001038781163438, 0.1041066260218888, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 16.750999999999998 ],
[237, 238, 0, 0.026324628099173554, 0.06962267451304, 495.0, 495.0, 495.0, 0, 1, 1, -360, 39.816 ],
[212, 100, 0, 0.007955505540166205, 0.285583163531816, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 45.951 ],
[519, 239, 0, 0.01740429752066116, 0.046030422038308406, 495.0, 495.0, 495.0, 0, 1, 1, -360, 26.324 ],
[238, 519, 0, 0.015166280991735538, 0.040111375593995205, 495.0, 495.0, 495.0, 0, 1, 1, -360, 22.939 ],
[213, 240, 0, 0.01665388429752066, 0.04404574915373599, 1200.0, 1200.0, 1200.0, 0, 1, 1, -360, 25.189 ],
[241, 242, 0, 0.009862015235457064, 0.3540221919932281, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 56.963 ],
[70, 241, 0, 0.003819858033240997, 0.5484941897752321, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 44.126999999999995 ],
[509, 213, 0, 0.011363636363636364, 0.120216969880216, 991.0, 991.0, 991.0, 0, 2, 1, -360, 34.375 ],
[68, 243, 0, 0.003611668975069252, 0.1296500701715312, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 20.861 ],
[243, 244, 0, 0.0007699099722991691, 0.027637882270859202, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 4.447 ],
[68, 244, 0, 0.004104051246537396, 0.147325387728876, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 23.705 ],
[544, 547, 0, 0.02418776859504132, 0.255884661882476, 991.0, 991.0, 991.0, 0, 1, 1, -360, 73.168 ],
[245, 227, 0, 0.012676419667590028, 0.45505241780707606, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 73.219 ],
[246, 208, 0, 0.0010155817174515235, 0.0364568961999408, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 5.8660000000000005 ],
[112, 208, 0, 0.0017927631578947367, 0.0643558063672372, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 10.355 ],
[165, 247, 0, 0.0002113919667590028, 0.0075884538459086, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 1.2209999999999999 ],
[537, 549, 0, 0.00032066115702479337, 0.00084807607842936, 495.0, 495.0, 495.0, 0, 1, 1, -360, 0.485 ],
[537, 550, 0, 0.00032198347107438016, 0.0008515732993697601, 495.0, 495.0, 495.0, 0, 1, 1, -360, 0.48700000000000004 ],
[537, 551, 0, 0.0002651239669421488, 0.0007011927988648, 495.0, 495.0, 495.0, 0, 1, 1, -360, 0.401 ],
[110, 251, 0, 0.00023857340720221602, 0.008564200982522441, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 1.3780000000000001 ],
[510, 252, 0, 0.08467702479338843, 0.055987884365424005, 248.0, 248.0, 248.0, 0, 1, 1, -360, 64.03699999999999 ],
[529, 253, 0, 0.04859504132231405, 0.12852286961777998, 495.0, 495.0, 495.0, 0, 1, 1, -360, 73.5 ],
[237, 239, 0, 0.03309421487603306, 0.08752669712542799, 495.0, 495.0, 495.0, 0, 1, 1, -360, 50.055 ],
[254, 238, 0, 0.07815008264462811, 0.05167231372274401, 248.0, 248.0, 248.0, 0, 1, 1, -360, 59.101000000000006 ],
[69, 255, 0, 0.0009369806094182826, 0.134541235754472, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 10.824000000000002 ],
[510, 225, 0, 0.021953719008264466, 0.232250442756508, 991.0, 991.0, 991.0, 0, 1, 1, -360, 66.41 ],
[256, 257, 0, 0.010125619834710746, 0.0267799693631888, 495.0, 495.0, 495.0, 0, 1, 1, -360, 15.315 ],
[258, 190, 0, 0.011717451523545707, 0.10515695255750121, 856.0, 856.0, 856.0, 0, 1, 1, -360, 33.84 ],
[258, 259, 0, 0.015782548476454293, 0.1416387085570408, 856.0, 856.0, 856.0, 0, 1, 1, -360, 45.58 ],
[260, 261, 0, 0.006791031855955679, 0.9751256416231477, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 78.45 ],
[554, 553, 0, 0.17583338842975205, 0.11625986438453201, 248.0, 248.0, 248.0, 0, 1, 1, -360, 132.974 ],
[515, 263, 0, 0.006987107438016529, 0.0739172618295936, 991.0, 991.0, 991.0, 0, 2, 1, -360, 21.136 ],
[14, 264, 0, 0.01700694214876033, 0.17991802858084, 991.0, 991.0, 991.0, 0, 1, 1, -360, 51.446000000000005 ],
[116, 555, 0, 0.0009768595041322315, 0.0103342878835768, 991.0, 991.0, 991.0, 0, 2, 1, -360, 2.955 ],
[151, 116, 0, 0.007244958677685951, 0.0191612735410668, 495.0, 495.0, 495.0, 0, 1, 1, -360, 10.958 ],
[111, 114, 0, 0.008806613573407202, 0.3161358573133961, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 50.867 ],
[77, 111, 0, 0.00288452216066482, 0.41418912211817605, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 33.321999999999996 ],
[266, 525, 0, 0.01042909090909091, 0.027582581569373602, 495.0, 495.0, 495.0, 0, 1, 1, -360, 15.774000000000001 ],
[267, 120, 0, 0.013136945983379503, 0.471584184581432, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 75.87899999999999 ],
[268, 269, 0, 0.0010327272727272726, 0.0027313295556817604, 495.0, 495.0, 495.0, 0, 1, 1, -360, 1.5619999999999998 ],
[556, 271, 0, 0.052289586776859506, 0.0345735262323792, 248.0, 248.0, 248.0, 0, 1, 1, -360, 39.544000000000004 ],
[556, 272, 0, 0.04685355371900827, 0.030979257409249603, 248.0, 248.0, 248.0, 0, 1, 1, -360, 35.433 ],
[529, 273, 0, 0.0034604958677685953, 0.009152227205140799, 495.0, 495.0, 495.0, 0, 1, 1, -360, 5.234 ],
[128, 274, 0, 0.0029350761772853184, 0.1053620459045884, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 16.953 ],
[34, 275, 0, 0.0008290909090909092, 0.00054818938265696, 248.0, 248.0, 248.0, 0, 1, 1, -360, 0.627 ],
[503, 276, 0, 0.006707438016528925, 0.07095861291266, 991.0, 991.0, 991.0, 0, 2, 1, -360, 20.29 ],
[503, 504, 0, 0.06432727272727272, 0.680524223098808, 991.0, 991.0, 991.0, 0, 2, 1, -360, 194.59 ],
[177, 218, 0, 0.04330380165289256, 0.114528740018308, 495.0, 495.0, 495.0, 0, 1, 1, -360, 65.497 ],
[277, 278, 0, 0.007191135734072023, 1.032576638635032, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 83.072 ],
[557, 558, 0, 0.04341289256198347, 0.258338836678648, 743.0, 743.0, 743.0, 0, 1, 1, -360, 98.493 ],
[557, 559, 0, 0.03415867768595042, 0.09034195998366001, 495.0, 495.0, 495.0, 0, 1, 1, -360, 51.665 ],
[559, 558, 0, 0.04474314049586777, 0.11833546501370001, 495.0, 495.0, 495.0, 0, 1, 1, -360, 67.67399999999999 ],
[277, 78, 0, 0.03585768698060942, 0.32180078416049196, 856.0, 856.0, 856.0, 0, 1, 1, -360, 103.557 ],
[277, 279, 0, 0.021390927977839334, 0.191970480441328, 856.0, 856.0, 856.0, 0, 1, 1, -360, 61.777 ],
[78, 279, 0, 0.015811980609418283, 0.1419028439283376, 856.0, 856.0, 856.0, 0, 1, 1, -360, 45.665 ],
[281, 282, 0, 0.0023178670360110803, 0.08320574945862161, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 13.388 ],
[283, 161, 0, 0.036741157024793386, 0.09717203248350399, 495.0, 495.0, 495.0, 0, 2, 1, -360, 55.571000000000005 ],
[268, 161, 0, 0.018883636363636366, 0.199771751868832, 991.0, 991.0, 991.0, 0, 2, 1, -360, 57.123000000000005 ],
[256, 284, 0, 0.010755371900826446, 0.113782083346976, 991.0, 991.0, 991.0, 0, 2, 1, -360, 32.535 ],
[515, 516, 0, 0.04071140495867769, 0.107672438361532, 495.0, 495.0, 495.0, 0, 1, 1, -360, 61.576 ],
[263, 516, 0, 0.0030355371900826445, 0.128452925198488, 1981.0, 1981.0, 1981.0, 0, 2, 1, -360, 18.365 ],
[516, 285, 0, 0.006908429752066116, 0.018271230811372, 495.0, 495.0, 495.0, 0, 1, 1, -360, 10.449000000000002 ],
[63, 286, 0, 0.019088925619834708, 0.050485881518556, 495.0, 495.0, 495.0, 0, 1, 1, -360, 28.872 ],
[287, 516, 0, 0.01732892561983471, 0.011457770111127998, 248.0, 248.0, 248.0, 0, 1, 1, -360, 13.105 ],
[8, 102, 0, 0.015100069252077563, 0.542055501663692, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 87.21799999999999 ],
[8, 101, 0, 0.019246883656509697, 0.69091598202144, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 111.17 ],
[80, 288, 0, 0.007984072022160666, 0.2866086302684072, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 46.11600000000001 ],
[80, 289, 0, 0.0003782317636201524, 0.122198345223416, 5134.0, 5134.0, 5134.0, 0, 4, 1, -360, 6.553999999999999 ],
[276, 560, 0, 0.01778314049586777, 0.047032375838192794, 495.0, 495.0, 495.0, 0, 2, 1, -360, 26.897 ],
[37, 290, 0, 0.005629501385041551, 0.4546919507138321, 2567.0, 2567.0, 2567.0, 0, 2, 1, -360, 48.773999999999994 ],
[290, 74, 0, 0.02071595106187673, 1.673216783321968, 2567.0, 2567.0, 2567.0, 0, 2, 1, -360, 179.483 ],
[512, 291, 0, 0.0053299173553719, 0.056385693247479204, 991.0, 991.0, 991.0, 0, 2, 1, -360, 16.123 ],
[78, 292, 0, 0.0058149815327908595, 0.469673087481408, 2567.0, 2567.0, 2567.0, 0, 2, 1, -360, 50.381 ],
[199, 548, 0, 0.0015530578512396695, 0.00410748599634868, 495.0, 495.0, 495.0, 0, 1, 1, -360, 2.349 ],
[491, 293, 0, 0.014176528925619833, 0.009373426429729999, 248.0, 248.0, 248.0, 0, 1, 1, -360, 10.720999999999998 ],
[4, 294, 0, 9.669321329639889e-05, 0.013884198109531681, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 1.117 ],
[490, 541, 0, 0.050580495867768596, 0.133773946861896, 495.0, 495.0, 495.0, 0, 1, 1, -360, 76.503 ],
[491, 295, 0, 0.010613553719008264, 0.028070443890777202, 495.0, 495.0, 495.0, 0, 1, 1, -360, 16.053 ],
[491, 296, 0, 0.004400661157024794, 0.0116387512948784, 495.0, 495.0, 495.0, 0, 1, 1, -360, 6.656000000000001 ],
[295, 297, 0, 0.020297520661157024, 0.053682341459340005, 495.0, 495.0, 495.0, 0, 1, 1, -360, 30.7 ],
[508, 161, 0, 0.023239669421487603, 0.061463658055360006, 495.0, 495.0, 495.0, 0, 1, 1, -360, 35.15 ],
[117, 123, 0, 0.005876211911357341, 0.21094161505628, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 33.941 ],
[133, 117, 0, 0.004469182825484764, 0.0401081792747688, 856.0, 856.0, 856.0, 0, 1, 1, -360, 12.907 ],
[71, 74, 0, 0.03904524469065097, 0.7884161162841721, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 169.144 ],
[74, 278, 0, 0.0077122576177285325, 1.10740463560792, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 89.09200000000001 ],
[298, 515, 0, 0.021701157024793388, 0.05739464148919599, 495.0, 495.0, 495.0, 0, 1, 1, -360, 32.823 ],
[5, 299, 0, 0.0016232686980609415, 0.058271370400665996, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 9.376 ],
[32, 292, 0, 0.009679362880886427, 0.34746541983297996, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 55.908 ],
[5, 29, 0, 0.00743395083102493, 1.0674425076571843, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 85.87700000000001 ],
[503, 560, 0, 0.015140495867768593, 0.160172719142436, 991.0, 991.0, 991.0, 0, 1, 1, -360, 45.8 ],
[300, 301, 0, 0.004892053324099723, 0.7024509290644521, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 56.513000000000005 ],
[51, 300, 0, 0.002573493767313019, 0.3695284920307039, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 29.729 ],
[244, 302, 0, 0.007714508310249307, 1.107727813004004, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 89.118 ],
[31, 302, 0, 0.004369113573407203, 0.6273619041941161, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 50.472 ],
[51, 282, 0, 0.006288434903047093, 0.9029576432132521, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 72.64399999999999 ],
[303, 304, 0, 8.795013850415512e-05, 0.000789298639172312, 856.0, 856.0, 856.0, 0, 1, 1, -360, 0.254 ],
[305, 304, 0, 0.003881117266849031, 0.0783689646873844, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 16.813 ],
[305, 259, 0, 0.0025625, 0.36794989475177603, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 29.601999999999997 ],
[306, 307, 0, 0.03223268698060942, 0.289268628831688, 856.0, 856.0, 856.0, 0, 1, 1, -360, 93.088 ],
[305, 308, 0, 0.0024272853185595567, 0.0217833994511184, 856.0, 856.0, 856.0, 0, 1, 1, -360, 7.01 ],
[305, 309, 0, 0.011014773776523545, 0.22241441259921202, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 47.716 ],
[310, 309, 0, 0.009565962603878117, 0.343394627639832, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 55.253 ],
[306, 309, 0, 0.035333795013850415, 0.31709917455019604, 856.0, 856.0, 856.0, 0, 1, 1, -360, 102.044 ],
[311, 280, 0, 0.003433691135734072, 0.1232611016590444, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 19.833 ],
[280, 278, 0, 0.009749769159764544, 0.7874838737974121, 2567.0, 2567.0, 2567.0, 0, 1, 1, -360, 84.47200000000001 ],
[311, 32, 0, 0.01205909510619806, 0.9740069506375919, 2567.0, 2567.0, 2567.0, 0, 2, 1, -360, 104.48 ],
[13, 312, 0, 0.0043324965373961214, 0.622104056565324, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 50.049 ],
[313, 314, 0, 0.006092624653739613, 0.218710302449316, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 35.191 ],
[312, 313, 0, 0.00893957756232687, 0.32090893884734, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 51.635 ],
[547, 566, 0, 0.027035702479338848, 0.286013220297816, 991.0, 991.0, 991.0, 0, 1, 1, -360, 81.783 ],
[245, 315, 0, 0.014162569252077564, 0.508401547875772, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 81.803 ],
[312, 316, 0, 8.803670360110802e-05, 0.01264120812658816, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 1.0170000000000001 ],
[312, 314, 0, 0.005339854570637119, 0.191687700220296, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 30.843000000000004 ],
[554, 546, 0, 0.08174743801652892, 0.21620344446439202, 495.0, 495.0, 495.0, 0, 1, 1, -360, 123.64299999999999 ],
[262, 216, 0, 0.042641966759002774, 0.38268554099981195, 856.0, 856.0, 856.0, 0, 1, 1, -360, 123.15 ],
[317, 233, 0, 0.005647276084951523, 0.114031901035644, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 24.464000000000002 ],
[318, 317, 0, 0.008311634349030471, 0.16783161497270002, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 36.006 ],
[231, 52, 0, 0.035263677285318554, 1.2658796434850879, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 203.683 ],
[319, 567, 0, 0.006089586776859504, 0.0644223069721, 991.0, 991.0, 991.0, 0, 1, 1, -360, 18.421 ],
[557, 321, 0, 0.010004628099173555, 0.10583989458750401, 991.0, 991.0, 991.0, 0, 2, 1, -360, 30.264 ],
[277, 65, 0, 0.009430170821779778, 0.7616700793261759, 2567.0, 2567.0, 2567.0, 0, 2, 1, -360, 81.703 ],
[322, 288, 0, 0.006545013850415513, 0.528637424797136, 2567.0, 2567.0, 2567.0, 0, 2, 1, -360, 56.706 ],
[322, 323, 0, 0.0018503000923372577, 0.14944779312484, 2567.0, 2567.0, 2567.0, 0, 2, 1, -360, 16.031 ],
[277, 324, 0, 0.019719529085872576, 0.39818407235049996, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 85.425 ],
[324, 325, 0, 0.01103508771932133, 0.22282459929396403, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 47.803999999999995 ],
[277, 325, 0, 0.008665743305609418, 0.174981914850048, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 37.54 ],
[326, 327, 0, 0.007654214876033058, 0.0202436634226288, 495.0, 495.0, 495.0, 0, 1, 1, -360, 11.577 ],
[328, 326, 0, 0.10300958677685952, 0.068109252150368, 248.0, 248.0, 248.0, 0, 1, 1, -360, 77.90100000000001 ],
[328, 327, 0, 0.09827173553719008, 0.064976616491468, 248.0, 248.0, 248.0, 0, 1, 1, -360, 74.318 ],
[326, 329, 0, 0.028062148760330575, 0.07421802283046801, 495.0, 495.0, 495.0, 0, 1, 1, -360, 42.443999999999996 ],
[568, 329, 0, 0.05699900826446282, 0.15074945731414802, 495.0, 495.0, 495.0, 0, 1, 1, -360, 86.211 ],
[568, 326, 0, 0.03218644628099173, 0.08512585494846397, 495.0, 495.0, 495.0, 0, 1, 1, -360, 48.681999999999995 ],
[332, 78, 0, 0.006471029547541551, 0.522661750455416, 2567.0, 2567.0, 2567.0, 0, 2, 1, -360, 56.065 ],
[333, 306, 0, 0.008580159279778392, 0.308006702824228, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 49.559 ],
[332, 333, 0, 0.007504674515235457, 0.26939943395502003, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 43.347 ],
[332, 334, 0, 0.017124653739612188, 0.15368328149175597, 856.0, 856.0, 856.0, 0, 1, 1, -360, 49.456 ],
[66, 334, 0, 0.030625, 0.27484062260471603, 856.0, 856.0, 856.0, 0, 1, 1, -360, 88.445 ],
[330, 335, 0, 0.00550536703601108, 0.790516769355108, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 63.598 ],
[336, 66, 0, 0.015054362880886425, 0.1351036887216764, 856.0, 856.0, 856.0, 0, 1, 1, -360, 43.477 ],
[330, 336, 0, 0.039036357340720224, 0.350327404269788, 856.0, 856.0, 856.0, 0, 1, 1, -360, 112.73700000000001 ],
[68, 70, 0, 0.016314058171745152, 0.14640868261713597, 856.0, 856.0, 856.0, 0, 1, 1, -360, 47.115 ],
[509, 337, 0, 0.03494082644628099, 0.09241056617056001, 495.0, 495.0, 495.0, 0, 1, 1, -360, 52.848 ],
[324, 288, 0, 0.012627423822714683, 0.11332339674541761, 856.0, 856.0, 856.0, 0, 1, 1, -360, 36.468 ],
[338, 559, 0, 0.009228099173553718, 0.097624922595552, 991.0, 991.0, 991.0, 0, 2, 1, -360, 27.915 ],
[339, 559, 0, 0.03560595041322315, 0.023542417076125203, 248.0, 248.0, 248.0, 0, 1, 1, -360, 26.927 ],
[339, 340, 0, 0.08711537190082644, 0.23040041287850396, 495.0, 495.0, 495.0, 0, 1, 1, -360, 131.762 ],
[559, 340, 0, 0.20983272727272728, 0.138740000599684, 248.0, 248.0, 248.0, 0, 1, 1, -360, 158.686 ],
[341, 292, 0, 0.0009329409048961218, 0.07535316024134399, 2567.0, 2567.0, 2567.0, 0, 1, 1, -360, 8.083 ],
[557, 342, 0, 0.006019834710743802, 0.0636843933534336, 991.0, 991.0, 991.0, 0, 2, 1, -360, 18.21 ],
[558, 343, 0, 0.010650247933884296, 0.11266996708783199, 991.0, 991.0, 991.0, 0, 1, 1, -360, 32.217 ],
[502, 340, 0, 0.021737520661157025, 0.22996326026071198, 991.0, 991.0, 991.0, 0, 2, 1, -360, 65.756 ],
[72, 32, 0, 0.00675502077562327, 0.969954803293024, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 78.03399999999999 ],
[344, 345, 0, 0.0005762927054480609, 0.04654686738645321, 2567.0, 2567.0, 2567.0, 0, 1, 1, -360, 4.993 ],
[346, 47, 0, 0.0011340027700831024, 0.04070792194158799, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 6.55 ],
[46, 47, 0, 0.0008975069252077563, 0.0322183003580208, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 5.184 ],
[346, 345, 0, 0.0007217797783933517, 0.025910126194627202, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 4.169 ],
[347, 328, 0, 0.029905454545454544, 0.07909314882361201, 495.0, 495.0, 495.0, 0, 1, 1, -360, 45.232 ],
[347, 348, 0, 0.04883438016528925, 0.129155866607944, 495.0, 495.0, 495.0, 0, 1, 1, -360, 73.862 ],
[571, 348, 0, 0.041548429752066116, 0.10988617921762801, 495.0, 495.0, 495.0, 0, 1, 1, -360, 62.842 ],
[347, 572, 0, 0.016052231404958678, 0.04245451362512801, 495.0, 495.0, 495.0, 0, 1, 1, -360, 24.279 ],
[571, 570, 0, 0.17379041322314048, 0.11490906279551602, 248.0, 248.0, 248.0, 0, 1, 1, -360, 131.429 ],
[14, 350, 0, 0.02166743801652892, 0.05730546235524, 495.0, 495.0, 495.0, 0, 1, 1, -360, 32.772 ],
[350, 573, 0, 0.026277685950413226, 0.06949852316919598, 495.0, 495.0, 495.0, 0, 1, 1, -360, 39.745 ],
[15, 351, 0, 0.02639265927977839, 0.236857956201204, 856.0, 856.0, 856.0, 0, 1, 1, -360, 76.222 ],
[352, 15, 0, 0.0015260560941828254, 0.219126704094076, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 17.629 ],
[15, 335, 0, 0.0035338758079432133, 1.1417173740880242, 5134.0, 5134.0, 5134.0, 0, 1, 1, -360, 61.235 ],
[232, 227, 0, 5.5747922437673134e-05, 0.000500303468136644, 1200.0, 1200.0, 1200.0, 0, 1, 1, -360, 0.161 ],
[565, 544, 0, 0.0394803305785124, 0.10441652566461601, 495.0, 495.0, 495.0, 0, 1, 1, -360, 59.714 ],
[235, 567, 0, 0.02391404958677686, 0.25298896294275997, 991.0, 991.0, 991.0, 0, 1, 1, -360, 72.34 ],
[567, 286, 0, 0.008068760330578512, 0.34144067500694797, 1981.0, 1981.0, 1981.0, 0, 1, 1, -360, 48.816 ],
[353, 519, 0, 0.007621818181818182, 0.080631926038356, 991.0, 991.0, 991.0, 0, 1, 1, -360, 23.055999999999997 ],
[354, 353, 0, 0.0008436363636363636, 0.00892490784392768, 991.0, 991.0, 991.0, 0, 2, 1, -360, 2.552 ],
[355, 354, 0, 0.0068502479338842966, 0.0181173530898976, 495.0, 495.0, 495.0, 0, 1, 1, -360, 10.360999999999999 ],
[354, 356, 0, 0.01855404958677686, 0.049071255647172, 495.0, 495.0, 495.0, 0, 1, 1, -360, 28.063000000000002 ],
[357, 358, 0, 0.0034823407202216067, 0.5000300103406239, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 40.228 ],
[574, 359, 0, 0.013352066115702478, 0.0353131884615884, 495.0, 495.0, 495.0, 0, 1, 1, -360, 20.195 ],
[235, 575, 0, 0.007459504132231404, 0.0789147905557, 991.0, 991.0, 991.0, 0, 1, 1, -360, 22.565 ],
[167, 361, 0, 0.000616198347107438, 0.0065188198358579995, 991.0, 991.0, 991.0, 0, 1, 1, -360, 1.864 ],
[528, 362, 0, 0.0011960330578512398, 0.012652945368078402, 991.0, 991.0, 991.0, 0, 1, 1, -360, 3.6180000000000003 ],
[363, 344, 0, 0.0002662742382271468, 0.009558592968871479, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 1.538 ],
[259, 364, 0, 0.013069713758102496, 0.26390852570525997, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 56.618 ],
[54, 56, 0, 0.007723337950138504, 0.0693122289241068, 856.0, 856.0, 856.0, 0, 1, 1, -360, 22.305 ],
[365, 364, 0, 0.0049974607571537395, 0.10091058802821559, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 21.649 ],
[231, 366, 0, 0.0013273891966759002, 0.0476500209962672, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 7.667000000000001 ],
[30, 367, 0, 0.01126108033240997, 0.1010613005635992, 856.0, 856.0, 856.0, 0, 1, 1, -360, 32.522 ],
[61, 367, 0, 0.020337603878116343, 0.18251754162067196, 856.0, 856.0, 856.0, 0, 1, 1, -360, 58.735 ],
[254, 368, 0, 0.0004297520661157025, 0.00454638722456732, 991.0, 991.0, 991.0, 0, 1, 1, -360, 1.3 ],
[254, 369, 0, 0.00015999999999999999, 0.00169265493591832, 991.0, 991.0, 991.0, 0, 2, 1, -360, 0.484 ],
[254, 370, 0, 0.0003669421487603306, 0.0038819152455960805, 991.0, 991.0, 991.0, 0, 2, 1, -360, 1.11 ],
[99, 358, 0, 0.0020184383656509696, 0.28982797432374396, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 23.316999999999997 ],
[354, 519, 0, 0.006762644628099174, 0.07154264880985199, 991.0, 991.0, 991.0, 0, 1, 1, -360, 20.457 ],
[571, 371, 0, 0.023726942148760328, 0.06275238397221199, 495.0, 495.0, 495.0, 0, 1, 1, -360, 35.887 ],
[207, 372, 0, 0.002329256198347108, 0.006160354689297601, 495.0, 495.0, 495.0, 0, 1, 1, -360, 3.523 ],
[57, 373, 0, 0.0017725619834710745, 0.0046880246727212796, 495.0, 495.0, 495.0, 0, 1, 1, -360, 2.681 ],
[209, 374, 0, 0.0010122922437673131, 0.0363388121515216, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 5.847 ],
[375, 376, 0, 0.0045364727608518006, 0.0916021467933684, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 19.652 ],
[376, 377, 0, 0.0030886426592797783, 0.062367022394423606, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 13.38 ],
[16, 49, 0, 0.002266101108033241, 0.32538991773524, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 26.178 ],
[318, 377, 0, 0.004755078485685596, 0.0960163149704152, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 20.599 ],
[378, 297, 0, 0.01753917355371901, 0.046387138574374404, 495.0, 495.0, 495.0, 0, 1, 1, -360, 26.528000000000002 ],
[562, 379, 0, 0.01802314049586777, 0.047667121439141605, 495.0, 495.0, 495.0, 0, 1, 1, -360, 27.26 ],
[576, 563, 0, 0.001808264462809917, 0.004782449638150801, 495.0, 495.0, 495.0, 0, 1, 1, -360, 2.735 ],
[576, 381, 0, 0.0034320661157024794, 0.009077036954898, 495.0, 495.0, 495.0, 0, 1, 1, -360, 5.191 ],
[577, 576, 0, 0.06004495867768594, 0.15880530575430396, 495.0, 495.0, 495.0, 0, 1, 1, -360, 90.818 ],
[244, 383, 0, 0.006845567867036011, 0.1382282547912684, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 29.655 ],
[244, 306, 0, 0.02679108956599723, 0.5409756541164079, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 116.059 ],
[383, 306, 0, 0.0300685595567867, 0.269846910348376, 856.0, 856.0, 856.0, 0, 1, 1, -360, 86.838 ],
[380, 306, 0, 0.00025605955678670365, 0.03676764369572, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 2.958 ],
[252, 225, 0, 0.062094545454545444, 0.041056499553586, 248.0, 248.0, 248.0, 0, 1, 1, -360, 46.958999999999996 ],
[220, 76, 0, 0.002772074099722992, 0.398042682239984, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 32.023 ],
[542, 384, 0, 0.007939834710743802, 0.020999063146094, 495.0, 495.0, 495.0, 0, 1, 1, -360, 12.009 ],
[385, 384, 0, 0.053734876033057856, 0.035529141854791196, 248.0, 248.0, 248.0, 0, 1, 1, -360, 40.637 ],
[542, 385, 0, 0.011306115702479337, 0.119608453436296, 991.0, 991.0, 991.0, 0, 2, 1, -360, 34.201 ],
[386, 385, 0, 0.003668760330578512, 0.0388121580140316, 991.0, 991.0, 991.0, 0, 1, 1, -360, 11.097999999999999 ],
[387, 578, 0, 0.015444628099173553, 0.16339016240905604, 991.0, 991.0, 991.0, 0, 1, 1, -360, 46.72 ],
[332, 388, 0, 0.014036184210526315, 0.5038646344377999, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 81.07300000000001 ],
[382, 332, 0, 0.017764369806094183, 0.637697365901468, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 102.60700000000001 ],
[382, 388, 0, 0.00476159972299169, 0.17092976750548, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 27.503 ],
[579, 578, 0, 0.01911074380165289, 0.050543585664, 495.0, 495.0, 495.0, 0, 1, 1, -360, 28.905 ],
[577, 387, 0, 0.07597818181818182, 0.20094506949431204, 495.0, 495.0, 495.0, 0, 1, 1, -360, 114.917 ],
[144, 390, 0, 0.0004277685950413223, 0.0011313509747276, 495.0, 495.0, 495.0, 0, 1, 1, -360, 0.647 ],
[37, 49, 0, 0.008441481994459835, 0.303028527944352, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 48.758 ],
[391, 233, 0, 0.014211218836565096, 0.1275369872004348, 856.0, 856.0, 856.0, 0, 1, 1, -360, 41.042 ],
[392, 310, 0, 0.007035318559556785, 0.06313767618386361, 856.0, 856.0, 856.0, 0, 1, 1, -360, 20.317999999999998 ],
[260, 393, 0, 0.006341412742382271, 0.0569102963692744, 856.0, 856.0, 856.0, 0, 1, 1, -360, 18.314 ],
[394, 230, 0, 0.0007590027700831025, 0.00681158510656168, 856.0, 856.0, 856.0, 0, 1, 1, -360, 2.1919999999999997 ],
[395, 282, 0, 0.008762984764542936, 0.314569689934484, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 50.615 ],
[395, 244, 0, 0.0034046052631578946, 0.12221699007344, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 19.665 ],
[25, 396, 0, 0.008809037396121884, 0.316222866612064, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 50.881 ],
[81, 74, 0, 0.0075207756232686974, 0.26997742429652244, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 43.44 ],
[278, 80, 0, 0.016286011080332407, 0.5846279085788, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 94.068 ],
[81, 278, 0, 0.021054016620498613, 0.755787629231688, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 121.60799999999999 ],
[569, 570, 0, 0.03253950413223141, 0.08605961294018, 495.0, 495.0, 495.0, 0, 1, 1, -360, 49.216 ],
[397, 552, 0, 0.006289586776859504, 0.0166345314104904, 1200.0, 1200.0, 1200.0, 0, 1, 1, -360, 9.513 ],
[542, 398, 0, 0.0005580165289256199, 0.0059033089500572, 991.0, 991.0, 991.0, 0, 1, 1, -360, 1.6880000000000002 ],
[398, 385, 0, 0.021893553719008262, 0.05790348713648401, 495.0, 495.0, 495.0, 0, 1, 1, -360, 33.114000000000004 ],
[399, 499, 0, 0.03266380165289256, 0.021597087927192803, 248.0, 248.0, 248.0, 0, 1, 1, -360, 24.701999999999998 ],
[83, 399, 0, 0.025700495867768593, 0.016992996557050798, 248.0, 248.0, 248.0, 0, 1, 1, -360, 19.436 ],
[498, 400, 0, 0.012134214876033058, 0.032092247974028, 495.0, 495.0, 495.0, 0, 1, 1, -360, 18.352999999999998 ],
[518, 239, 0, 0.04685289256198347, 0.123915281026504, 495.0, 495.0, 495.0, 0, 1, 1, -360, 70.865 ],
[575, 543, 0, 0.0030307438016528923, 0.032062521596058796, 991.0, 991.0, 991.0, 0, 1, 1, -360, 9.168 ],
[401, 360, 0, 0.007957063711911357, 0.071409774520472, 856.0, 856.0, 856.0, 0, 1, 1, -360, 22.98 ],
[580, 581, 0, 0.007134545454545454, 0.018869255592422397, 495.0, 495.0, 495.0, 0, 1, 1, -360, 10.790999999999999 ],
[401, 402, 0, 0.0033434903047091418, 0.030005778188384805, 856.0, 856.0, 856.0, 0, 1, 1, -360, 9.656 ],
[403, 231, 0, 0.009592105263157893, 0.08608327126915, 856.0, 856.0, 856.0, 0, 1, 1, -360, 27.701999999999998 ],
[189, 360, 0, 0.028456024930747923, 0.255375399471348, 856.0, 856.0, 856.0, 0, 1, 1, -360, 82.181 ],
[234, 404, 0, 0.008092561983471074, 0.0214029921648796, 495.0, 495.0, 495.0, 0, 1, 1, -360, 12.24 ],
[235, 404, 0, 0.05107504132231405, 0.13508190749437998, 495.0, 495.0, 495.0, 0, 1, 1, -360, 77.251 ],
[235, 580, 0, 0.000580495867768595, 0.00153527999352772, 495.0, 495.0, 495.0, 0, 1, 1, -360, 0.878 ],
[216, 259, 0, 0.0022115650969529088, 0.079389770210892, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 12.774000000000001 ],
[405, 259, 0, 0.0052832409972299165, 0.1896554115982928, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 30.516 ],
[405, 318, 0, 0.0066348684210526315, 0.23817552558268398, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 38.323 ],
[406, 230, 0, 8.098164819944598e-05, 0.046512685161986804, 6845.0, 6845.0, 6845.0, 0, 1, 1, -360, 1.871 ],
[542, 407, 0, 0.025569586776859506, 0.067625761355152, 495.0, 495.0, 495.0, 0, 1, 1, -360, 38.674 ],
[23, 408, 0, 0.03224528925619835, 0.08528148128033601, 495.0, 495.0, 495.0, 0, 1, 1, -360, 48.771 ],
[577, 348, 0, 0.012999008264462809, 0.13751772188026398, 991.0, 991.0, 991.0, 0, 2, 1, -360, 39.321999999999996 ],
[562, 564, 0, 0.06921520661157024, 0.18305853298686803, 495.0, 495.0, 495.0, 0, 1, 1, -360, 104.68799999999999 ],
[582, 507, 0, 0.006357685950413223, 0.016814638289042002, 495.0, 495.0, 495.0, 0, 1, 1, -360, 9.616 ],
[27, 410, 0, 0.0030042975206611565, 0.007945685980170399, 495.0, 495.0, 495.0, 0, 1, 1, -360, 4.544 ],
[501, 27, 0, 0.003811570247933884, 0.040322957460962, 991.0, 991.0, 991.0, 0, 1, 1, -360, 11.53 ],
[27, 411, 0, 0.004648595041322314, 0.012294480221518, 495.0, 495.0, 495.0, 0, 1, 1, -360, 7.031000000000001 ],
[411, 410, 0, 0.002054214876033058, 0.0054329327333556, 495.0, 495.0, 495.0, 0, 1, 1, -360, 3.1069999999999998 ],
[403, 360, 0, 0.008191481994459833, 0.07351353506655639, 856.0, 856.0, 856.0, 0, 1, 1, -360, 23.656999999999996 ],
[412, 360, 0, 0.016761772853185596, 0.15042664773666, 856.0, 856.0, 856.0, 0, 1, 1, -360, 48.408 ],
[326, 413, 0, 0.012077024793388432, 0.12776397267356798, 991.0, 991.0, 991.0, 0, 2, 1, -360, 36.533 ],
[414, 413, 0, 0.008093223140495867, 0.08561896310149601, 991.0, 991.0, 991.0, 0, 2, 1, -360, 24.482 ],
[6, 297, 0, 0.019472396694214876, 0.0128750188978664, 248.0, 248.0, 248.0, 0, 1, 1, -360, 14.725999999999999 ],
[554, 580, 0, 0.07435371900826447, 0.196648733567264, 495.0, 495.0, 495.0, 0, 1, 1, -360, 112.46 ],
[262, 401, 0, 0.03931232686980609, 0.35280406181043206, 856.0, 856.0, 856.0, 0, 1, 1, -360, 113.53399999999999 ],
[499, 556, 0, 0.04185586776859504, 0.11069928308639199, 495.0, 495.0, 495.0, 0, 2, 1, -360, 63.306999999999995 ],
[224, 229, 0, 0.004135206611570248, 0.0437467367631624, 991.0, 991.0, 991.0, 0, 1, 1, -360, 12.509 ],
[583, 507, 0, 0.024632727272727268, 0.065147980317596, 495.0, 495.0, 495.0, 0, 1, 1, -360, 37.257 ],
[415, 307, 0, 0.015675554016620498, 0.1406784987952448, 856.0, 856.0, 856.0, 0, 1, 1, -360, 45.271 ],
[416, 507, 0, 0.0010555371900826446, 0.011166626467730801, 991.0, 991.0, 991.0, 0, 1, 1, -360, 3.193 ],
[284, 561, 0, 0.015221487603305786, 0.16102953827307598, 991.0, 991.0, 991.0, 0, 1, 1, -360, 46.045 ],
[543, 417, 0, 0.0006614876033057851, 0.027991756419545603, 1981.0, 1981.0, 1981.0, 0, 4, 1, -360, 4.002 ],
[418, 506, 0, 0.0009395041322314049, 0.009939101917118, 991.0, 991.0, 991.0, 0, 1, 1, -360, 2.842 ],
[220, 157, 0, 0.004599549861495845, 0.165112574384632, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 26.566999999999997 ],
[295, 419, 0, 0.0012023140495867769, 0.012719392565946, 991.0, 991.0, 991.0, 0, 1, 1, -360, 3.637 ],
[295, 420, 0, 0.0008003305785123967, 0.008466771900532, 991.0, 991.0, 991.0, 0, 1, 1, -360, 2.421 ],
[541, 62, 0, 0.05133355371900827, 0.0339414035471236, 248.0, 248.0, 248.0, 0, 1, 1, -360, 38.821 ],
[52, 421, 0, 0.00013885041551246538, 0.004984389831631239, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 0.802 ],
[60, 160, 0, 6.128808864265928e-05, 0.000550023067454096, 856.0, 856.0, 856.0, 0, 2, 1, -360, 0.177 ],
[535, 161, 0, 3.735537190082645e-05, 0.00039518596644331203, 991.0, 991.0, 991.0, 0, 2, 1, -360, 0.113 ],
[267, 282, 0, 0.0065652700831024926, 0.235677115717012, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 37.921 ],
[52, 365, 0, 0.007655586334279779, 0.15458444922992, 1283.0, 1283.0, 1283.0, 0, 1, 1, -360, 33.164 ],
[28, 27, 0, 0.015726942148760328, 0.041594197273402404, 495.0, 495.0, 495.0, 0, 1, 1, -360, 23.787 ],
[30, 201, 0, 0.009128289473684211, 0.327683234253536, 1711.0, 1711.0, 1711.0, 0, 2, 1, -360, 52.725 ],
[422, 81, 0, 0.0004226685133887349, 0.13655487952674, 5134.0, 5134.0, 5134.0, 0, 6, 1, -360, 7.324 ],
[119, 425, 0, 0.003579120498614958, 0.1284816595874996, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 20.673000000000002 ],
[423, 425, 0, 0.0006518351800554017, 0.0233992864289392, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 3.765 ],
[424, 425, 0, 0.005922957063711911, 0.21261965153389198, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 34.211 ],
[426, 428, 0, 0.013948429752066116, 0.14756174042535197, 991.0, 991.0, 991.0, 0, 2, 1, -360, 42.193999999999996 ],
[427, 428, 0, 0.0002664462809917355, 0.0028187600792304794, 991.0, 991.0, 991.0, 0, 2, 1, -360, 0.8059999999999999 ],
[19, 428, 0, 0.023607603305785128, 0.24974703912892798, 991.0, 991.0, 991.0, 0, 2, 1, -360, 71.413 ],
[45, 429, 0, 0.02562314049586777, 0.067767398802972, 495.0, 495.0, 495.0, 0, 1, 1, -360, 38.755 ],
[44, 429, 0, 5.289256198347107e-05, 0.00013988883767892, 495.0, 495.0, 495.0, 0, 1, 1, -360, 0.08 ],
[505, 429, 0, 0.006012561983471073, 0.015901863623161996, 495.0, 495.0, 495.0, 0, 1, 1, -360, 9.094 ],
[231, 431, 0, 0.011677285318559558, 0.4191859418495199, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 67.44800000000001 ],
[190, 431, 0, 0.009600761772853185, 0.34464383257266795, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 55.45399999999999 ],
[430, 431, 0, 0.0028100761772853187, 0.1008748520662472, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 16.230999999999998 ],
[286, 433, 0, 0.01568694214876033, 0.16595362535967603, 991.0, 991.0, 991.0, 0, 1, 1, -360, 47.453 ],
[432, 433, 0, 0.00010049586776859504, 0.00106315516636076, 991.0, 991.0, 991.0, 0, 1, 1, -360, 0.304 ],
[506, 433, 0, 0.0065904132231404955, 0.06972059669946801, 991.0, 991.0, 991.0, 0, 1, 1, -360, 19.936 ],
[23, 434, 0, 0.02613685950413223, 0.069126069139116, 495.0, 495.0, 495.0, 0, 2, 1, -360, 39.532 ],
[400, 434, 0, 0.008155371900826446, 0.021569110159669603, 495.0, 495.0, 495.0, 0, 2, 1, -360, 12.335 ],
[500, 434, 0, 0.006338512396694216, 0.0167639285853336, 495.0, 495.0, 495.0, 0, 2, 1, -360, 9.587 ],
[32, 436, 0, 0.0044813019390581715, 0.16086776359270402, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 25.884 ],
[435, 436, 0, 0.0006634349030470914, 0.023815688073266, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 3.832 ],
[78, 436, 0, 0.00897680055401662, 0.32224515307884394, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 51.85 ],
[86, 438, 0, 0.014693213296398892, 0.52745036936438, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 84.868 ],
[437, 438, 0, 1.0387811634349031e-05, 0.0003728969948845, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 0.06 ],
[221, 438, 0, 0.002280124653739612, 0.081850890377238, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 13.17 ],
[207, 439, 0, 0.055703801652892564, 0.0368309823503996, 248.0, 248.0, 248.0, 0, 1, 1, -360, 42.126000000000005 ],
[516, 439, 0, 0.05448462809917355, 0.03602487292327441, 248.0, 248.0, 248.0, 0, 1, 1, -360, 41.20399999999999 ],
[513, 439, 0, 0.046726611570247926, 0.0308953241066316, 248.0, 248.0, 248.0, 0, 1, 1, -360, 35.336999999999996 ],
[181, 441, 0, 0.040805289256198356, 0.10792074104825197, 495.0, 495.0, 495.0, 0, 1, 1, -360, 61.718 ],
[440, 441, 0, 0.0001322314049586777, 0.000349722094197784, 495.0, 495.0, 495.0, 0, 1, 1, -360, 0.2 ],
[504, 441, 0, 0.05916099173553719, 0.156467413554364, 495.0, 495.0, 495.0, 0, 1, 1, -360, 89.48100000000001 ],
[135, 442, 0, 0.004956890581717451, 0.177940231009092, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 28.631 ],
[109, 442, 0, 0.0015380886426592797, 0.055213615042649204, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 8.884 ],
[112, 442, 0, 0.0027304362880886425, 0.09801597510545401, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 15.770999999999999 ],
[113, 443, 0, 0.0019885734072022164, 0.07138491472072879, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 11.485999999999999 ],
[132, 443, 0, 0.006788434903047091, 0.24368818615747198, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 39.21 ],
[107, 443, 0, 2.2333795013850418e-05, 0.000801728539002036, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 0.129 ],
[444, 445, 0, 7.877423822714682e-05, 0.00282780221121528, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 0.455 ],
[112, 445, 0, 0.002816135734072022, 0.101092375313206, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 16.266 ],
[109, 445, 0, 0.0014354224376731304, 0.0515281497432104, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 8.291 ],
[119, 447, 0, 0.005212690443213296, 0.74849127803204, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 60.217 ],
[100, 447, 0, 0.0050695117728531865, 0.7279322237145921, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 58.563 ],
[446, 447, 0, 2.9518698060941832e-05, 0.00423859584186224, 3423.0, 3423.0, 3423.0, 0, 2, 1, -360, 0.341 ],
[124, 448, 0, 6.509695290858726e-05, 0.00233682116794768, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 0.376 ],
[125, 448, 0, 0.00615148891966759, 0.22082338542026803, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 35.531 ],
[131, 448, 0, 3.912742382271468e-05, 0.0014045786807313759, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 0.226 ],
[449, 450, 0, 0.0023614958448753462, 0.08477191683710039, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 13.64 ],
[173, 450, 0, 0.002862361495844876, 0.10275176694050518, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 16.533 ],
[184, 450, 0, 0.004022853185595568, 0.14441057621844403, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 23.236 ],
[144, 451, 0, 0.007672727272727273, 0.020292624515794402, 495.0, 495.0, 495.0, 0, 1, 1, -360, 11.605 ],
[140, 451, 0, 0.006991074380165291, 0.018489807120219602, 495.0, 495.0, 495.0, 0, 1, 1, -360, 10.574000000000002 ],
[514, 451, 0, 0.01149289256198347, 0.030396095817207994, 495.0, 495.0, 495.0, 0, 1, 1, -360, 17.383 ],
[537, 585, 0, 0.05072595041322314, 0.134158641165824, 495.0, 495.0, 495.0, 0, 1, 1, -360, 76.723 ],
[141, 585, 0, 0.007994710743801653, 0.0211441978151932, 495.0, 495.0, 495.0, 0, 1, 1, -360, 12.092 ],
[584, 585, 0, 9.256198347107438e-05, 0.000244805465938352, 495.0, 495.0, 495.0, 0, 1, 1, -360, 0.14 ],
[522, 454, 0, 0.0035008264462809916, 0.0092588924438956, 495.0, 495.0, 495.0, 0, 1, 1, -360, 5.295 ],
[144, 454, 0, 0.00452892561983471, 0.011977981726290799, 495.0, 495.0, 495.0, 0, 1, 1, -360, 6.85 ],
[453, 454, 0, 0.001114710743801653, 0.0029481572540882, 495.0, 495.0, 495.0, 0, 1, 1, -360, 1.686 ],
[199, 456, 0, 0.013063140495867768, 0.0086372614214612, 248.0, 248.0, 248.0, 0, 1, 1, -360, 9.879 ],
[140, 456, 0, 0.005061818181818182, 0.013387361765852802, 495.0, 495.0, 495.0, 0, 2, 1, -360, 7.656000000000001 ],
[455, 456, 0, 0.0011365289256198346, 0.00300586139962416, 495.0, 495.0, 495.0, 0, 2, 1, -360, 1.719 ],
[537, 456, 0, 0.039058512396694216, 0.025825228046024003, 248.0, 248.0, 248.0, 0, 1, 1, -360, 29.538 ],
[538, 457, 0, 0.027927272727272728, 0.0184653265736368, 248.0, 248.0, 248.0, 0, 1, 1, -360, 21.12 ],
[153, 457, 0, 0.030093223140495867, 0.019897438549384, 248.0, 248.0, 248.0, 0, 1, 1, -360, 22.758000000000003 ],
[176, 457, 0, 0.004579173553719009, 0.0030277190305137603, 248.0, 248.0, 248.0, 0, 1, 1, -360, 3.463 ],
[524, 459, 0, 0.004318677685950414, 0.011421923596476799, 495.0, 495.0, 495.0, 0, 1, 1, -360, 6.532 ],
[458, 459, 0, 0.001993388429752066, 0.0052720605700488, 495.0, 495.0, 495.0, 0, 1, 1, -360, 3.015 ],
[134, 459, 0, 0.011813553719008265, 0.031244171895617998, 495.0, 495.0, 495.0, 0, 1, 1, -360, 17.868 ],
[460, 461, 0, 6.611570247933885e-05, 0.000174861047098892, 495.0, 495.0, 495.0, 0, 1, 1, -360, 0.1 ],
[150, 461, 0, 0.008018512396694214, 0.021207147792120403, 495.0, 495.0, 495.0, 0, 1, 1, -360, 12.128 ],
[149, 461, 0, 0.005586115702479339, 0.0147740098693748, 495.0, 495.0, 495.0, 0, 1, 1, -360, 8.449 ],
[521, 463, 0, 0.014348429752066114, 0.009487086110365599, 248.0, 248.0, 248.0, 0, 1, 1, -360, 10.850999999999999 ],
[462, 463, 0, 0.007197355371900825, 0.0047588433967958406, 248.0, 248.0, 248.0, 0, 1, 1, -360, 5.443 ],
[538, 463, 0, 0.012211570247933883, 0.0080742088497664, 248.0, 248.0, 248.0, 0, 1, 1, -360, 9.235 ],
[110, 464, 0, 0.0025753116343490306, 0.0924473799817492, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 14.875 ],
[90, 464, 0, 0.007328947368421053, 0.26309125979076, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 42.332 ],
[165, 464, 0, 0.002152527700831025, 0.0772704722900764, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 12.433 ],
[458, 465, 0, 0.002003305785123967, 0.0052982897270776, 495.0, 495.0, 495.0, 0, 1, 1, -360, 3.03 ],
[134, 465, 0, 0.011838677685950413, 0.031310619093534, 495.0, 495.0, 495.0, 0, 1, 1, -360, 17.906 ],
[524, 465, 0, 0.004293553719008264, 0.0113554763986092, 495.0, 495.0, 495.0, 0, 1, 1, -360, 6.494 ],
[466, 467, 0, 0.0023509349030470914, 0.084392804892244, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 13.579 ],
[110, 467, 0, 0.0025337603878116343, 0.09095579200221118, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 14.635 ],
[165, 467, 0, 0.0022891274238227145, 0.08217406777274441, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 13.222000000000001 ],
[468, 469, 0, 0.0005269421487603305, 0.0013936425453786, 495.0, 495.0, 495.0, 0, 1, 1, -360, 0.797 ],
[541, 469, 0, 0.022390743801652895, 0.05921844221026801, 495.0, 495.0, 495.0, 0, 1, 1, -360, 33.866 ],
[490, 469, 0, 0.028243305785123966, 0.07469714209944801, 495.0, 495.0, 495.0, 0, 1, 1, -360, 42.718 ],
[263, 471, 0, 0.0371900826446281, 0.0245898347482832, 248.0, 248.0, 248.0, 0, 1, 1, -360, 28.125 ],
[470, 471, 0, 0.001570909090909091, 0.0010386746197682802, 248.0, 248.0, 248.0, 0, 1, 1, -360, 1.188 ],
[534, 471, 0, 0.024497190082644622, 0.0161973787927468, 248.0, 248.0, 248.0, 0, 1, 1, -360, 18.526 ],
[136, 472, 0, 0.0007079293628808865, 0.025412930201351602, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 4.0889999999999995 ],
[110, 472, 0, 0.00019511772853185596, 0.0070042485539216805, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 1.127 ],
[251, 472, 0, 4.207063711911357e-05, 0.00151023282928764, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 0.243 ],
[226, 474, 0, 0.017639669421487602, 0.011663231841509601, 248.0, 248.0, 248.0, 0, 1, 1, -360, 13.34 ],
[473, 474, 0, 0.003467107438016529, 0.00916971330986216, 495.0, 495.0, 495.0, 0, 2, 1, -360, 5.244 ],
[257, 474, 0, 0.020264462809917356, 0.053594910935781594, 495.0, 495.0, 495.0, 0, 2, 1, -360, 30.65 ],
[6, 474, 0, 0.08066247933884299, 0.05333349367016, 248.0, 248.0, 248.0, 0, 1, 1, -360, 61.001000000000005 ],
[299, 475, 0, 0.013238227146814403, 0.47521993028123993, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 76.464 ],
[3, 475, 0, 0.0002794321329639889, 0.010030929162389441, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 1.614 ],
[210, 475, 0, 0.0001481994459833795, 0.00531999712702368, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 0.856 ],
[297, 476, 0, 0.0193500826446281, 0.05117658265464801, 495.0, 495.0, 495.0, 0, 1, 1, -360, 29.267 ],
[296, 476, 0, 0.005596694214876033, 0.014801987636898, 495.0, 495.0, 495.0, 0, 1, 1, -360, 8.465 ],
[295, 476, 0, 0.0009474380165289256, 0.00250575880492432, 495.0, 495.0, 495.0, 0, 1, 1, -360, 1.433 ],
[313, 478, 0, 0.008696849030470914, 0.31219557906752804, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 50.233000000000004 ],
[477, 478, 0, 1.5235457063711912e-05, 0.0005469155924977479, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 0.08800000000000001 ],
[245, 478, 0, 0.005264542936288089, 0.188984197007248, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 30.408 ],
[479, 481, 0, 0.028420495867768597, 0.07516576970575199, 495.0, 495.0, 495.0, 0, 1, 1, -360, 42.986000000000004 ],
[565, 481, 0, 0.024842314049586776, 0.065702289836964, 495.0, 495.0, 495.0, 0, 1, 1, -360, 37.574 ],
[480, 481, 0, 7.735537190082645e-05, 0.000204587425105844, 495.0, 495.0, 495.0, 0, 1, 1, -360, 0.11699999999999999 ],
[415, 482, 0, 0.011021814404432133, 0.0989140353680364, 856.0, 856.0, 856.0, 0, 1, 1, -360, 31.831 ],
[56, 482, 0, 0.002630886426592798, 0.0236105947261788, 856.0, 856.0, 856.0, 0, 1, 1, -360, 7.598 ],
[409, 482, 0, 0.0007635041551246537, 0.0068519822810072005, 856.0, 856.0, 856.0, 0, 1, 1, -360, 2.205 ],
[483, 484, 0, 9.037396121883656e-05, 0.000811050963873968, 856.0, 856.0, 856.0, 0, 1, 1, -360, 0.261 ],
[3, 484, 0, 0.010022160664819944, 0.08994275516621358, 856.0, 856.0, 856.0, 0, 1, 1, -360, 28.944000000000003 ],
[301, 484, 0, 0.00966516620498615, 0.08673894848517479, 856.0, 856.0, 856.0, 0, 1, 1, -360, 27.913 ],
[233, 485, 0, 0.01410180055401662, 0.1265550251138996, 856.0, 856.0, 856.0, 0, 1, 1, -360, 40.726 ],
[392, 485, 0, 0.00914819944598338, 0.0820994883738036, 856.0, 856.0, 856.0, 0, 1, 1, -360, 26.42 ],
[391, 485, 0, 8.518005540166207e-05, 0.000764438839512864, 856.0, 856.0, 856.0, 0, 1, 1, -360, 0.24600000000000002 ],
[579, 488, 0, 0.004636473829194215, 0.11036180126571601, 1486.0, 1486.0, 1486.0, 0, 1, 1, -360, 21.038 ],
[486, 488, 0, 0.00016969696969690082, 0.00403929018798184, 1486.0, 1486.0, 1486.0, 0, 1, 1, -360, 0.77 ],
[487, 488, 0, 0.00014567493112954544, 0.00346749456396992, 1486.0, 1486.0, 1486.0, 0, 1, 1, -360, 0.6609999999999999 ],
[270, 489, 0, 0.0001745152354570637, 0.0062646695140596, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 1.008 ],
[331, 489, 0, 0.003002943213296399, 0.10779830627119119, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 17.345 ],
[396, 489, 0, 0.01124792243767313, 0.40377286606072005, 1711.0, 1711.0, 1711.0, 0, 1, 1, -360, 64.968 ],
[519, 253, 0, 0.013353485337561985, 0.141267767926912, 991.0, 991.0, 991.0, 0, 1, 1, -360, 40.394293146100004 ],
[382, 349, 0, 0.009091647380263157, 1.30547149138788, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 105.02671053600001 ],
[349, 351, 0, 0.0005858117819605263, 0.0841168325920224, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 6.76729770521 ],
[459, 465, 0, 1.578788789911157e-05, 0.00016702153987596, 991.0, 991.0, 991.0, 0, 1, 1, -360, 0.047758360894800005 ],
[549, 550, 0, 3.680432518409091e-05, 0.000389356391787088, 991.0, 991.0, 991.0, 0, 1, 1, -360, 0.111333083682 ],
[550, 551, 0, 5.755645674710744e-05, 0.0006088951287918401, 991.0, 991.0, 991.0, 0, 1, 1, -360, 0.17410828165999997 ],
[194, 195, 0, 1.7560672583171745e-05, 0.00252154053805592, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.202860889681 ],
[247, 248, 0, 2.1755213937811637e-05, 0.0031238355819477198, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.25131623141 ],
[2, 294, 0, 2.3531392658518004e-05, 0.003378877444715, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.271834647991 ],
[549, 551, 0, 9.265809538429751e-05, 0.0009802386406577602, 991.0, 991.0, 991.0, 0, 1, 1, -360, 0.28029073853799996 ],
[54, 365, 0, 2.573045189134349e-05, 0.00369464080598484, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.297238180249 ],
[131, 265, 0, 2.7616389041343487e-05, 0.00396544290388756, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.319024526206 ],
[91, 92, 0, 2.8945628197853184e-05, 0.0041563086239824396, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.33437989694200004 ],
[247, 249, 0, 3.098840072160664e-05, 0.00444963074500788, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.357978005136 ],
[186, 191, 0, 3.1591661821191135e-05, 0.00453625312865552, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.36494687735799997 ],
[129, 173, 0, 3.202671277479225e-05, 0.00459872218332188, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.369972585975 ],
[96, 202, 0, 3.5971247867797784e-05, 0.00516511877739804, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.415539855369 ],
[53, 320, 0, 3.784209581142659e-05, 0.00543375421308236, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.437151890814 ],
[24, 396, 0, 4.144748602818559e-05, 0.005951452925597279, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.47880135859800005 ],
[133, 156, 0, 4.431754564044322e-05, 0.0063635653674415605, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.511956287238 ],
[442, 452, 0, 4.483572190450138e-05, 0.006437970402313801, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.517942259441 ],
[445, 452, 0, 4.490753296371191e-05, 0.0064482817668697215, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.518771820797 ],
[247, 250, 0, 4.594910768732687e-05, 0.00659784169268824, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.530804092004 ],
[187, 195, 0, 4.755760376239612e-05, 0.006828805970367921, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.549385438663 ],
[216, 236, 0, 5.03353075283241e-05, 0.00722765701751724, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.581473472567 ],
[244, 389, 0, 5.1633313019736845e-05, 0.007414037889302401, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.596468032004 ],
[394, 406, 0, 5.6346419007686985e-05, 0.008090793734075721, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.650913832377 ],
[442, 445, 0, 6.388070648310249e-05, 0.00917264360085512, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.737949921293 ],
[442, 444, 0, 6.584378362735456e-05, 0.00945452224616264, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.760627388463 ],
[198, 472, 0, 8.37554210498615e-05, 0.0120264578966664, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.967542623967 ],
[464, 467, 0, 8.460287496468144e-05, 0.01214814397621276, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 0.977332411594 ],
[198, 251, 0, 8.83613182396122e-05, 0.012687819608389479, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 1.0207499483 ],
[112, 143, 0, 9.049653833033241e-05, 0.012994416294241841, 3423.0, 3423.0, 3423.0, 0, 1, 1, -360, 1.04541601079 ],
[2, 490, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[5, 491, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[10, 492, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[12, 493, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[13, 494, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[15, 495, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[18, 496, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[20, 497, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[22, 498, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[24, 499, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[26, 500, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[30, 501, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[32, 502, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[37, 503, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[42, 504, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[46, 505, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[52, 506, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[56, 507, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[61, 508, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[68, 509, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[69, 510, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[74, 511, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[78, 512, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[86, 513, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[87, 514, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[94, 515, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[95, 516, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[96, 517, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[99, 518, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[100, 519, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[104, 520, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[105, 521, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[106, 522, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[107, 523, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[117, 524, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[120, 525, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[123, 526, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[124, 527, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[125, 528, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[128, 529, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[129, 530, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[138, 531, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[143, 532, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[156, 533, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[157, 534, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[159, 535, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[160, 536, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[165, 537, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[184, 538, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[191, 539, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[195, 540, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[201, 541, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[220, 542, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[231, 543, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[232, 544, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[233, 545, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[236, 546, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[245, 547, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[246, 548, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[248, 549, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[249, 550, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[250, 551, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[259, 552, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[261, 553, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[262, 554, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[265, 555, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[270, 556, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[277, 557, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[279, 558, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[280, 559, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[290, 560, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[301, 561, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[305, 562, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[306, 563, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[310, 564, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[313, 565, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[315, 566, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[320, 567, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[330, 568, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[332, 569, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[334, 570, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[336, 571, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[349, 572, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[351, 573, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[358, 574, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[360, 575, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[380, 576, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[382, 577, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[383, 578, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[389, 579, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[401, 580, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[402, 581, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[409, 582, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[415, 583, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[444, 584, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ],
[452, 585, 0, 0.005, 0.0, 2000.0, 2000.0, 2000.0, 1.0, 0, 1, -360, 360 ]
])
ppc["gen_control"] = array([
[586, 1, 0.08658028904199107, 4.329014452099554, 0, 0, 0],
[589, 1, 0.010042676909098597, 0.5021338454549299, 0, 0, 0],
[590, 1, 0.012095775674984046, 0.6047887837492023, 0, 0, 0],
[593, 1, 0.0017666198683200384, 0.08833099341600192, 0, 0, 0],
[594, 1, 0.006047887837492023, 0.30239439187460115, 0, 0, 0],
[595, 1, 1.50560576164933, 75.2802880824665, 0, 0, 0],
[597, 1, 0.030239439187460113, 1.5119719593730057, 0, 0, 0],
[598, 1, 0.0038197186342054878, 0.1909859317102744, 0, 0, 0],
[599, 1, 0.0029602819415092537, 0.1480140970754627, 0, 0, 0],
[600, 1, 0.005379437076506062, 0.26897185382530314, 0, 0, 0],
[601, 1, 0.019576058000303126, 0.9788029000151565, 0, 0, 0],
[602, 1, 0.007830423200121252, 0.39152116000606263, 0, 0, 0],
[603, 1, 1.0997606567649967, 54.98803283824984, 0, 0, 0],
[605, 1, 0.0006366197723675814, 0.03183098861837907, 0, 0, 0],
[607, 1, 0.5729577951308232, 28.64788975654116, 0, 0, 0],
[608, 1, 0.0076394372684109755, 0.3819718634205488, 0, 0, 0],
[609, 1, 0.0057932399285449895, 0.2896619964272495, 0, 0, 0],
[610, 1, 0.019576058000303126, 0.9788029000151565, 0, 0, 0],
[611, 1, 0.0477464829275686, 2.3873241463784303, 0, 0, 0],
[612, 1, 0.00954929658551372, 0.477464829275686, 0, 0, 0],
[613, 1, 0.027056340325622208, 1.3528170162811104, 0, 0, 0],
[614, 1, 0.00954929658551372, 0.477464829275686, 0, 0, 0],
[616, 1, 0.0046154933496649645, 0.23077466748324824, 0, 0, 0],
[617, 1, 0.04360845440717932, 2.1804227203589663, 0, 0, 0],
[618, 1, 0.010631550198538607, 0.5315775099269304, 0, 0, 0],
[619, 1, 0.037560566569687294, 1.8780283284843649, 0, 0, 0],
[621, 1, 0.24350706293059987, 12.175353146529993, 0, 0, 0],
[623, 1, 0.2419155134996809, 12.095775674984045, 0, 0, 0],
[624, 1, 0.004297183463481174, 0.21485917317405873, 0, 0, 0],
[625, 1, 0.0035332397366400767, 0.17666198683200385, 0, 0, 0],
[628, 1, 0.14292113889652203, 7.1460569448261015, 0, 0, 0],
[629, 1, 0.023968734429639437, 1.198436721481972, 0, 0, 0],
[631, 1, 0.025401128917466494, 1.2700564458733248, 0, 0, 0],
[632, 1, 0.01435577586688896, 0.717788793344448, 0, 0, 0],
[637, 1, 0.017093240888069558, 0.854662044403478, 0, 0, 0],
[638, 1, 0.02048324117592693, 1.0241620587963465, 0, 0, 0],
[639, 1, 0.005029296201703893, 0.25146481008519467, 0, 0, 0],
[640, 1, 0.0038197186342054878, 0.1909859317102744, 0, 0, 0],
[641, 1, 0.0040107045659157625, 0.20053522829578813, 0, 0, 0],
[642, 1, 0.00919915571071155, 0.4599577855355775, 0, 0, 0],
[643, 1, 0.27279157245950864, 13.639578622975431, 0, 0, 0],
[646, 1, 0.03278591827693044, 1.6392959138465222, 0, 0, 0],
[647, 1, 0.00445633840657307, 0.2228169203286535, 0, 0, 0],
[650, 1, 0.4216014442504307, 21.080072212521536, 0, 0, 0],
[652, 1, 0.00746436683100989, 0.37321834155049455, 0, 0, 0],
[655, 1, 0.019576058000303126, 0.9788029000151565, 0, 0, 0],
[657, 1, 0.012095775674984046, 0.6047887837492023, 0, 0, 0],
[658, 1, 0.030239439187460113, 1.5119719593730057, 0, 0, 0],
[659, 1, 0.006684507609859604, 0.33422538049298023, 0, 0, 0],
[661, 1, 0.010408733278209955, 0.5204366639104978, 0, 0, 0],
[662, 1, 0.002928450952890874, 0.1464225476445437, 0, 0, 0],
[663, 1, 0.00238732414637843, 0.1193662073189215, 0, 0, 0],
[664, 1, 0.008085071109068283, 0.4042535554534142, 0, 0, 0],
[666, 1, 0.00919915571071155, 0.4599577855355775, 0, 0, 0],
[668, 1, 0.24382537281678363, 12.191268640839182, 0, 0, 0],
[670, 1, 0.0076394372684109755, 0.3819718634205488, 0, 0, 0],
[672, 1, 0.010536057232683471, 0.5268028616341736, 0, 0, 0],
[674, 1, 0.016806761990504147, 0.8403380995252074, 0, 0, 0],
[675, 1, 0.0033740847935481814, 0.16870423967740908, 0, 0, 0],
[676, 1, 0.11777465788800255, 5.888732894400127, 0, 0, 0],
[677, 1, 0.004265352474862795, 0.21326762374313976, 0, 0, 0],
[678, 1, 0.3237211542489151, 16.186057712445756, 0, 0, 0],
[679, 1, 0.2212253708977345, 11.061268544886726, 0, 0, 0],
[681, 1, 0.0063821132179850025, 0.31910566089925013, 0, 0, 0],
[683, 1, 0.008753521870054244, 0.4376760935027122, 0, 0, 0],
[687, 1, 0.42303383873825773, 21.151691936912886, 0, 0, 0],
[689, 1, 0.09867606471697511, 4.933803235848756, 0, 0, 0],
[691, 1, 0.008276057040778557, 0.4138028520389279, 0, 0, 0],
[692, 1, 0.040425355545341414, 2.0212677772670706, 0, 0, 0],
[693, 1, 0.06175211791965539, 3.0876058959827692, 0, 0, 0],
[694, 1, 0.005220282133414166, 0.2610141066707083, 0, 0, 0],
[695, 1, 0.004679155326901723, 0.23395776634508614, 0, 0, 0],
[696, 1, 0.22950142793851305, 11.475071396925653, 0, 0, 0],
[697, 1, 0.0036923946797319715, 0.1846197339865986, 0, 0, 0],
[698, 1, 0.0038197186342054878, 0.1909859317102744, 0, 0, 0],
[699, 1, 0.033295214094824506, 1.6647607047412254, 0, 0, 0],
[700, 1, 0.008594366926962348, 0.42971834634811745, 0, 0, 0],
[701, 1, 0.015024226627874922, 0.7512113313937461, 0, 0, 0],
[702, 1, 0.023363945645890238, 1.168197282294512, 0, 0, 0],
[703, 1, 0.03437746770784939, 1.7188733853924698, 0, 0, 0],
[704, 1, 0.16170142218136566, 8.085071109068283, 0, 0, 0],
[705, 1, 0.005411268065124442, 0.27056340325622213, 0, 0, 0],
[707, 1, 0.010822536130248884, 0.5411268065124443, 0, 0, 0],
[708, 1, 0.0024828171122335675, 0.12414085561167837, 0, 0, 0],
[711, 1, 0.056054370956965534, 2.802718547848277, 0, 0, 0],
[712, 1, 0.0036923946797319715, 0.1846197339865986, 0, 0, 0],
[713, 1, 0.004265352474862795, 0.21326762374313976, 0, | |
", "
except IndexError:
await ctx.send(
"Not enough values were provided to update the row in the table."
)
return
command += " WHERE " + category + "=?"
try:
filec.execute(command, (value,))
except Exception as e:
await ctx.send(
"Error while running sql command:\n```py\n"
+ "".join(traceback.format_exception(type(e), e, e.__traceback__))
+ "```"
)
await ctx.send(
"Your data failed to be updated into the table because of an error while inserting it. Please notify the owner of the bot about this issue."
)
else:
await ctx.send(
f"The data `{str(values)}` has been inserted into the table `{table}` (updated a row). Commit to database? (y/n)"
)
def check(m):
return (m.author.id == ctx.author.id) and (m.channel.id == ctx.channel.id)
try:
message = await self.bot.wait_for("message", check=check, timeout=30.0)
except asyncio.TimeoutError:
await ctx.send("Not committing to database.")
return
if message.content.lower().startswith("y"):
filedb.commit()
await ctx.send("Committed to database.")
else:
await ctx.send("Not committing to database.")
filedb.close()
else:
await ctx.send("Invalid `space` argument. Please use 'mem' or 'file'.")
@sql.command(name="all", aliases=["show"])
async def allt(self, ctx, space):
"""Returns all tables in either the bot's memory or your server's file. However, the list of tables in memory is taken from the memory settings, so you can't see other server's tables in memory.
Arguments:
Space: mem |or| file"""
if space == "mem":
self.memsetc.execute(
f"CREATE TABLE IF NOT EXISTS settings{str(ctx.guild.id)}(name STRING, edit INTEGER, view INTEGER)"
)
self.memsetc.execute(f"SELECT name FROM settings{str(ctx.guild.id)}")
tables = self.memsetc.fetchall()
if tableAvailable:
t = PrettyTable(["Table"])
for table_data in tables:
t.add_row(table_data)
await ctx.send(f"All tables in memory:```python\n{str(t)}```")
else:
await ctx.send(f"All tables in memory:```python\n{str(tables)}```")
elif space == "file":
filedb = sqlite3.connect(str(self.cog_path / f"{str(ctx.guild.id)}db.sqlite"))
filec = filedb.cursor()
filec.execute("SELECT name FROM sqlite_master WHERE type= 'table'")
tables = filec.fetchall()
if tableAvailable:
t = PrettyTable(["Table"])
for table_data in tables:
t.add_row(table_data)
await ctx.send(f"All tables in server file:```python\n{str(t)}```")
else:
await ctx.send(f"All tables in server file:```python\n{str(tables)}```")
filedb.close()
else:
await ctx.send("Invalid `space` argument. Please use 'mem' or 'file'.")
@sql.command()
async def insert(self, ctx, space, table, *values):
"""Inserts data into a table. Can only be run by users with the edit role that is specified in the table settings. Does sanitize data inputs.
Arguments:
Space: mem |or| file
Table: name of the table you wish to insert data into
Values: the data you wish to insert into the table, in column order"""
await ctx.send("Verifying authority...")
if space == "mem":
try:
self.memsetc.execute(
f"CREATE TABLE IF NOT EXISTS settings{str(ctx.guild.id)}(name TEXT, edit INTEGER, view INTEGER)"
)
self.memsetc.execute(f"SELECT * FROM settings{str(ctx.guild.id)}")
settings = self.memsetc.fetchall()
except Exception as e:
await ctx.send(
"Error while running sql command:\n```py\n"
+ "".join(traceback.format_exception(type(e), e, e.__traceback__))
+ "```"
)
await ctx.send(
"Your data failed to be inserted into the table because of an error while checking settings. Please notify the owner of the bot about this issues."
)
return
else:
table_settings = None
for entry in settings:
if entry[0] == table:
table_settings = entry
break
if table_settings == None:
return await ctx.send("That table does not exist.")
roles = [role.id for role in ctx.author.roles]
roles.append(0)
if int(table_settings[1]) in roles:
await ctx.send("Permissions confirmed. Inserting data...")
else:
await ctx.send(
"You do not have permission to insert data into this table. Please contact someone who has the appropriate edit role in order to insert data into this table."
)
return
command = "INSERT INTO " + table + " VALUES("
for x in range(len(values)):
command += "?"
if x != len(values) - 1:
command += ","
command += ")"
try:
self.memc.execute(command, values)
except Exception as e:
await ctx.send(
"Error while running sql command:\n```py\n"
+ "".join(traceback.format_exception(type(e), e, e.__traceback__))
+ "```"
)
await ctx.send(
"Your data failed to be inserted into the table because of an error while inserting it. Please notify the owner of the bot about this issue."
)
else:
await ctx.send(
f"The data `{str(values)}` has been inserted into the table `{table}`. Commit to database? (y/n)"
)
def check(m):
return (m.author.id == ctx.author.id) and (m.channel.id == ctx.channel.id)
try:
message = await self.bot.wait_for("message", check=check, timeout=30.0)
except asyncio.TimeoutError:
await ctx.send("Not committing to database.")
return
if message.content.lower().startswith("y"):
self.memdb.commit()
await ctx.send("Committed to database.")
else:
await ctx.send("Not committing to database.")
elif space == "file":
filedb = sqlite3.connect(str(self.cog_path / f"{str(ctx.guild.id)}db.sqlite"))
filec = filedb.cursor()
try:
self.filesetc.execute(
f"CREATE TABLE IF NOT EXISTS settings{str(ctx.guild.id)}(name TEXT, edit INTEGER, view INTEGER)"
)
self.filesetc.execute(f"SELECT * FROM settings{str(ctx.guild.id)}")
settings = self.filesetc.fetchall()
except Exception as e:
await ctx.send(
"Error while running sql command:\n```py\n"
+ "".join(traceback.format_exception(type(e), e, e.__traceback__))
+ "```"
)
await ctx.send(
"Your data failed to be inserted into the table because of an error while checking settings. Please notify the owner of the bot about this issues."
)
return
else:
table_settings = None
for entry in settings:
if entry[0] == table:
table_settings = entry
break
if table_settings == None:
return await ctx.send("That table does not exist.")
roles = [role.id for role in ctx.author.roles]
roles.append(0)
if int(table_settings[1]) in roles:
await ctx.send("Permissions confirmed. Inserting data...")
else:
await ctx.send(
"You do not have permission to insert data into this table. Please contact someone who has the appropriate edit role in order to insert data into this table."
)
return
command = "INSERT INTO " + table + " VALUES("
for x in range(len(values)):
command += "?"
if x != len(values) - 1:
command += ","
command += ")"
try:
filec.execute(command, values)
except Exception as e:
await ctx.send(
"Error while running sql command:\n```py\n"
+ "".join(traceback.format_exception(type(e), e, e.__traceback__))
+ "```"
)
await ctx.send(
"Your data failed to be inserted into the table because of an error while inserting it. Please notify the owner of the bot about this issue."
)
else:
await ctx.send(
f"The data `{str(values)}` has been inserted into the table `{table}`. Commit to database? (y/n)"
)
def check(m):
return (m.author.id == ctx.author.id) and (m.channel.id == ctx.channel.id)
try:
message = await self.bot.wait_for("message", check=check, timeout=30.0)
except asyncio.TimeoutError:
await ctx.send("Not committing to database.")
return
if message.content.lower().startswith("y"):
filedb.commit()
await ctx.send("Committed to database.")
else:
await ctx.send("Not committing to database.")
filedb.close()
else:
await ctx.send("Invalid `space` argument. Please use 'mem' or 'file'.")
@sql.command(name="view", aliases=["see", "select"])
async def select(self, ctx, space, table, category="", value=""):
"""Views data from a table, with a condition able to be specified. Only people who have the role to view the table can perform this command. Does sanitize data inputs.
If you wish to see a certain entry, you can specify the category and the value you want the category to be using the last two arguments.
Arguments:
Space: mem |or| file
Table: the table from which you'd like to read data
Category (optional): the name of the category of the value you are specifying
Value (optional): value of the column of the row of which you'd like to select data from"""
await ctx.send("Verifying authority...")
if space == "mem":
try:
self.memsetc.execute(
f"CREATE TABLE IF NOT EXISTS settings{str(ctx.guild.id)}(name TEXT, edit INTEGER, view INTEGER)"
)
self.memsetc.execute(f"SELECT * FROM settings{str(ctx.guild.id)}")
settings = self.memsetc.fetchall()
except Exception as e:
await ctx.send(
"Error while running sql command:\n```py\n"
+ "".join(traceback.format_exception(type(e), e, e.__traceback__))
+ "```"
)
await ctx.send(
"Your table failed to be deleted because of an error while checking settings. Please notify the owner of the bot about this issues."
)
return
else:
table_settings = None
for entry in settings:
if entry[0] == table:
table_settings = entry
break
if table_settings == None:
return await ctx.send("That table does not exist.")
roles = [role.id for role in ctx.author.roles]
roles.append(0)
if int(table_settings[2]) in roles:
await ctx.send("Permissions confirmed")
else:
await ctx.send(
"You do not have permission to view data from this table. Please contact someone who has the appropriate edit role in order to view this table."
)
return
if category == "":
command = "SELECT * FROM " + table
extra = False
else:
if value == "":
await ctx.send(
"You provided a column, but not a value for the column. Cannot perform sql command."
)
return
command = "SELECT * FROM " + table + " WHERE " + category + "=?"
extra = True
try:
if extra:
self.memc.execute(command, (value,))
else:
self.memc.execute(command)
except Exception as | |
import socket
import ssl
import os
import re
import gzip
import time
import tkinter
import tkinter.font
# DEFAULT_URL = "https://browser.engineering/http.html"
# DEFAULT_URL = "https://mozz.us/"
# DEFAULT_URL = "http://browser.engineering/redirect"
DEFAULT_URL = "file://" + os.path.abspath(os.path.join(os.path.dirname(__file__), "index.html"))
# DEFAULT_URL = "https://www.zggdwx.com/xiyou/1.html"
WIDTH, HEIGHT = 800, 600
HSTEP, VSTEP = 13, 18
SCROLL_STEP = 100
FONT_SIZE = 18
request_cache = {}
def request(url=DEFAULT_URL, request_headers=None, redirects=0):
if redirects > 5:
raise RuntimeError("Max redirects exceeded")
if url.startswith("data:"):
content_type, body = url[len("data:"):].split(",", 1)
headers = {"Content-Type": content_type}
return headers, body
cached_request = request_cache.get(url)
if cached_request and cached_request['exp'] < time.time():
return cached_request['headers'], cached_request['body']
scheme, authority = url.split("://", 1)
assert scheme in ["http", "https", "file"]
host, path = authority.split("/", 1)
path = "/" + path
port = 80 if scheme == "http" else 443
if ":" in host:
host, port = host.split(":", 1)
port = int(port)
if scheme == "file":
with open(path, "r") as fp:
body = fp.read()
return {}, body
with socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=socket.IPPROTO_TCP) as s:
s.connect((host, port))
if scheme == "https":
ctx = ssl.create_default_context()
s = ctx.wrap_socket(s, server_hostname=host)
request_headers = request_headers or {}
request_headers.setdefault("Host", host)
request_headers.setdefault("Connection", "close")
request_headers.setdefault("User-Agent", "mozz-test")
request_headers.setdefault("Accept-Encoding", "gzip")
request_body = f"GET {path} HTTP/1.1\r\n"
for key, value in request_headers.items():
request_body += f"{key}: {value}\r\n"
request_body += "\r\n"
s.send(request_body.encode())
response = s.makefile("rb", newline="\r\n")
status_line = response.readline().decode("ascii")
print(status_line)
version, status, explanation = status_line.split(" ", 2)
headers = {}
while True:
line = response.readline().decode("ascii")
if line == "\r\n":
break
header, value = line.split(":", 1)
headers[header.lower()] = value.strip()
if status.startswith("3"):
location = headers['location']
if location.startswith("/"):
redirect_url = f"{scheme}://{host}{location}"
else:
redirect_url = location
return request(redirect_url, redirects=redirects+1)
assert status == "200", "{}: {}".format(status, explanation)
body = response.read()
if headers.get('content-encoding') == "gzip":
if headers.get('transfer-encoding') == "chunked":
size_hex, body = body.split(b"\r\n", 1)
size = int(size_hex, 16)
buffer = b""
while size != 0:
buffer += gzip.decompress(body[:size])
body = body[size+2:]
size_hex, body = body.split(b"\r\n", 1)
size = int(size_hex, 16)
body = buffer
else:
body = gzip.decompress(body)
body = body.decode("utf-8")
cache_control = headers.get('cache-control', '')
if cache_control.startswith("max-age="):
max_age = int(cache_control[len("max-age="):])
request_cache[url] = {"exp": time.time() + max_age, "headers": headers, "body": body}
return headers, body
class Text:
def __init__(self, text, parent):
self.raw_text = text
self.text = self.clean_text(text)
self.children = []
self.parent = parent
def __repr__(self):
return repr(self.raw_text)
def clean_text(self, text):
text = re.sub("<", "<", text)
text = re.sub(">", ">", text)
text = re.sub("–", "-", text)
text = re.sub("©", "©", text)
text = re.sub("&", "&", text)
return text
class Element:
def __init__(self, tag, attributes, parent):
self.tag = tag
self.attributes = attributes
self.children = []
self.parent = parent
def __repr__(self):
return f"<{self.tag}>"
class HTMLParser:
SELF_CLOSING_TAGS = [
"area", "base", "br", "col", "embed", "hr", "img", "input",
"link", "meta", "param", "source", "track", "wbr",
]
HEAD_TAGS = [
"base", "basefont", "bgsound", "noscript",
"link", "meta", "title", "style", "script",
]
def __init__(self, body):
self.body = body
self.unfinished = []
def parse(self):
text = ""
in_tag = False
for c in self.body:
if c == "<":
in_tag = True
if text:
self.add_text(text)
text = ""
elif c == ">":
in_tag = False
self.add_tag(text)
text = ""
else:
text += c
if not in_tag and text:
self.add_text(text)
return self.finish()
def add_text(self, text):
if text.isspace():
return
self.implicit_tags(None)
parent = self.unfinished[-1]
node = Text(text, parent)
parent.children.append(node)
def add_tag(self, tag):
tag, attributes = self.get_attributes(tag)
if tag.startswith("!"):
return
self.implicit_tags(tag)
if tag.startswith("/"):
if len(self.unfinished) == 1:
return
node = self.unfinished.pop()
parent = self.unfinished[-1]
parent.children.append(node)
elif tag in self.SELF_CLOSING_TAGS:
parent = self.unfinished[-1]
node = Element(tag, attributes, parent)
parent.children.append(node)
else:
parent = self.unfinished[-1] if self.unfinished else None
node = Element(tag, attributes, parent)
self.unfinished.append(node)
def implicit_tags(self, tag):
while True:
open_tags = [node.tag for node in self.unfinished]
if open_tags == [] and tag != "html":
self.add_tag("html")
elif open_tags == ["html"] and tag not in ["head", "body", "/html"]:
if tag in self.HEAD_TAGS:
self.add_tag("head")
else:
self.add_tag("body")
elif open_tags == ["html", "head"] and tag not in ["/head"] + self.HEAD_TAGS:
self.add_tag("/head")
else:
break
def get_attributes(self, text):
parts = text.split()
tag = parts[0].lower()
attributes = {}
for pair in parts[1:]:
if "=" in pair:
key, value = pair.split("=", 1)
if len(value) > 2 and value[0] in ("'", '"'):
value = value[1:-1]
attributes[key.lower()] = value
else:
attributes[pair.lower()] = ""
return tag, attributes
def finish(self):
if len(self.unfinished) == 0:
self.add_tag("html")
while len(self.unfinished) > 1:
node = self.unfinished.pop()
parent = self.unfinished[-1]
parent.children.append(node)
return self.unfinished.pop()
def print_tree(node, indent=0):
print(" " * indent, node)
for child in node.children:
print_tree(child, indent + 2)
def transform(body):
body = re.sub("<", "<", body)
body = re.sub(">", ">", body)
return "<html><body>" + body + "</html></body>"
FONTS = {}
def get_font(size, weight, slant, family=None):
key = (size, weight, slant, family)
if key not in FONTS:
font = tkinter.font.Font(size=size, weight=weight, slant=slant, family=family)
FONTS[key] = font
return FONTS[key]
class Layout:
def __init__(self, nodes, width=WIDTH, size=16):
self.display_list = []
self.cursor_x = HSTEP
self.cursor_y = HSTEP
self.weight = "normal"
self.style = "roman"
self.width = width
self.size = size
self.halign = "normal"
self.valign = "normal"
self.abbr = False
self.pre = False
self.in_body = False
self.line = []
self.recurse(nodes)
self.flush()
def open_tag(self, tag):
if tag == "i":
self.style = "italic"
elif tag == "b":
self.weight = "bold"
elif tag == "small":
self.size -= 2
elif tag == "big":
self.size += 4
elif tag == "br":
self.flush()
elif tag == "p":
self.flush()
elif tag == "h1":
self.flush()
self.size += 4
self.halign = "center"
elif tag == "sup":
self.size -= 8
self.valign = "top"
elif tag == "abbr":
self.abbr = True
elif tag == "pre":
self.pre = True
elif tag == "body":
self.in_body = True
def close_tag(self, tag):
if tag == "i":
self.style = "roman"
elif tag == "b":
self.weight = "normal"
elif tag == "small":
self.size += 2
elif tag == "big":
self.size -= 4
elif tag == "p":
self.flush()
self.cursor_y += VSTEP
elif tag == "h1":
self.flush()
self.size -= 4
self.halign = "normal"
elif tag == "sup":
self.size += 8
self.valign = "normal"
elif tag == "abbr":
self.abbr = False
elif tag == "pre":
self.pre = False
elif tag == "body":
self.in_body = False
def recurse(self, tree):
if isinstance(tree, Text):
if self.in_body:
if self.pre:
self.pre_text(tree)
elif self.abbr:
self.abbr_text(tree)
else:
self.text(tree)
else:
self.open_tag(tree.tag)
for child in tree.children:
self.recurse(child)
self.close_tag(tree.tag)
def flush_abbr(self, buffer):
normal_font = get_font(self.size, self.weight, self.style)
abbr_font = get_font(int(self.size * 0.7), "bold", self.style)
if buffer.islower():
buffer = buffer.upper()
font = abbr_font
else:
font = normal_font
w = font.measure(buffer)
if self.cursor_x + w > self.width - HSTEP:
self.flush()
self.line.append((self.cursor_x, buffer, font, self.valign))
self.cursor_x += w
def abbr_text(self, tok):
font = get_font(self.size, self.weight, self.style)
for word in tok.text.split():
buffer = ""
for c in word:
if c.islower() == buffer.islower():
buffer += c
else:
self.flush_abbr(buffer)
buffer = c
if buffer:
self.flush_abbr(buffer)
self.cursor_x += font.measure(" ")
def pre_text(self, tok):
font = get_font(self.size, self.weight, self.style, "Courier")
for line in tok.text.splitlines(keepends=True):
text = line.rstrip()
w = font.measure(text)
self.line.append((self.cursor_x, text, font, "normal"))
if line.endswith("\n"):
self.flush()
else:
self.cursor_x += w + font.measure(" ")
def text(self, tok):
font = get_font(self.size, self.weight, self.style)
for word in tok.text.split():
w = font.measure(word)
if self.cursor_x + w > self.width - HSTEP:
self.flush()
self.line.append((self.cursor_x, word, font, self.valign))
self.cursor_x += w + font.measure(" ")
def flush(self):
if not self.line:
return
metrics = [font.metrics() for x, word, font, valign in self.line]
max_ascent = max([metric["ascent"] for metric in metrics])
baseline = self.cursor_y + 1.25 * max_ascent
if self.halign == "center":
x_offset = max((0, (self.width - HSTEP - self.cursor_x) / 2))
else:
x_offset = 0
for x, word, font, valign in self.line:
if valign == "normal":
y = baseline - font.metrics('ascent')
else:
y = baseline - (max_ascent / 1.25)
self.display_list.append((x + x_offset, y, word, font))
self.cursor_x = HSTEP
self.line = []
max_descent = max([metric['descent'] for metric in metrics])
self.cursor_y = baseline + 1.25 * max_descent
class Browser:
def __init__(self):
self.window = tkinter.Tk()
self.window.bind("<Down>", self.scrolldown)
self.window.bind("<Up>", self.scrollup)
self.window.bind("<MouseWheel>", self.scroll)
self.window.bind("<Configure>", self.configure)
self.window.bind("+", self.fontup)
self.window.bind("-", self.fontdown)
self.width = WIDTH
self.height = HEIGHT
self.canvas = tkinter.Canvas(self.window, width=self.width, height=self.height)
self.canvas.pack(expand=True, fill=tkinter.BOTH)
self.display_list = []
self.nodes = []
self.scroll = 0
self.font_size = FONT_SIZE
def draw(self):
self.canvas.delete("all")
for x, y, c, f in self.display_list:
if y > self.scroll + self.height:
continue
if y + VSTEP < self.scroll:
continue
self.canvas.create_text(x, | |
passage = passage[:truncate_num]
if len(passage) > max_x2_len:
max_x2_len = len(passage)
x2.append(passage)
return x1, x2, candidates, y_list, max_x1_len, max_x2_len, max_a_len
def get_eval_concat_samples_from_one_list(self, inst_idx, truncate_num=0):
concat_x = []
positives = []
candidates = []
max_x_len = -1
max_a_len = -1
pair_dict_ = self.pairs[inst_idx]
cand_list = []
y_list = []
def _get_key_from_list(input_list):
tmp_list = [str(x) for x in input_list]
return ' '.join(tmp_list)
positive_dict = {}
for action in pair_dict_['positives']:
action = [self.CLS_TOKEN] + action + [self.SEP_TOKEN]
if len(action) > max_a_len:
max_a_len = len(action)
cand_list.append(action)
y_list.append(1)
positive_dict[_get_key_from_list(action)] = 1
for action in pair_dict_["candidates"]:
action = [self.CLS_TOKEN] + action + [self.SEP_TOKEN]
key = _get_key_from_list(action)
if key in positive_dict:
continue
if len(action) > max_a_len:
max_a_len = len(action)
cand_list.append(action)
y_list.append(0)
zip_list = list(zip(cand_list, y_list))
random.shuffle(zip_list)
cand_list = [x[0] for x in zip_list]
y_list = [x[1] for x in zip_list]
candidates.append(cand_list)
# question = [102] + pair_dict_['input1'] + [5] + pair_dict_['input2']
# question = [self.CLS_TOKEN] + pair_dict_['input1'] + [self.SEP_TOKEN] + pair_dict_['input2']
question = [self.CLS_TOKEN] + pair_dict_['input1'] + [self.SEP_TOKEN]
if truncate_num > 0:
question = question[:truncate_num]
if len(question) > max_x_len:
max_x_len = len(question)
concat_x.append(question)
return concat_x, candidates, y_list, max_x_len, max_a_len
def print_info(self):
print('Number of positive actions: {}'.format(self.num_positive))
print('Number of total action candidates: {}'.format(self.num_total))
# In[ ]:
from pair_dataset import _preprocess_action, _match_action, _process_instance, _recover_root_template_action
class BERTStateState2ActionDataset(TextDataset):
def __init__(self, pretrain_path, data_dir, rom_dir, game2rom,
train_games=None, dev_games=None, setting='same_games',
num_negative=20, truncate_num=384, freq_threshold=2):
super(BERTStateState2ActionDataset, self).__init__(data_dir)
self.pretrain_path = pretrain_path
self.num_negative = num_negative
self.truncate_num = truncate_num
self.freq_threshold = freq_threshold
self.tokenizer = BertTokenizer.from_pretrained(self.pretrain_path)
self.word_vocab, self.idx_2_word = self.load_bert_vocab()
# self.word_vocab = {'<PAD>':0, '<START>':1, '<END>':2, '<UNK>':3, '<ANSWER>':4, '<SPLIT>':5, '|':6}
self.rom_dir = rom_dir
self.game2rom = game2rom
self.setting = setting
self.train_games = train_games
self.dev_games = dev_games
self.load_dataset()
def load_bert_vocab(self):
word_vocab = {}
filein = codecs.open(os.path.join(self.pretrain_path, 'vocab.txt'), 'r', encoding='utf8')
for line in filein:
word = line.strip()
word_vocab[word] = len(word_vocab)
idx_2_word = {value: key for key, value in word_vocab.items()}
return word_vocab, idx_2_word
def load_dataset(self):
self.data_sets = {}
if self.setting == 'same_games':
self.data_sets = self._load_pair_data_and_split(self.train_games)
elif self.setting == 'transfer':
# load train
self.data_sets['train'] = self._load_pair_data(self.train_games)
# load dev
self.data_sets['dev'] = self._load_pair_data(self.dev_games)
# self.data_sets['test'] = self._load_pair_data(os.path.join(self.data_dir, 'test.tsv'))
# build vocab
self._build_vocab()
def _load_pair_data_and_split(self, games, neg_removal=True):
"""
Splitting trajectories with 8:1:1
"""
datasets = {}
datasets['train'] = BERTPair2SeqSet()
datasets['dev'] = BERTPair2SeqSet()
datasets['test'] = BERTPair2SeqSet()
for game_name in games:
# rom_path = "../roms/jericho-game-suite/{}.z5".format(game_name)
print('# LOADING game data {} ...'.format(game_name))
rom_path = os.path.join(self.rom_dir, self.game2rom[game_name])
bindings = load_bindings(rom_path)
act_par = TemplateActionParser(bindings)
f = open(os.path.join(self.data_dir, '{}.ssa.wt_traj.tok'.format(game_name)), "r")
instances = f.readlines()
instances = [_process_instance(instance.lower()) for instance in instances]
for idx, instance in enumerate(instances):
if idx == len(instances) - 1:
continue
input1 = instance['observations']['obs']
input2 = instances[idx + 1]['observations']['obs']
action = _preprocess_action(instances[idx + 1]['observations']['action'])
if action == '':
continue
template = act_par.parse_action(action)
if template is None:
print('unmatched action: \'{}\''.format(action))
action = action
elif template[0] not in act_par.template2template:
if template[0] not in act_par.add_template2template:
print('cannot find root: {}'.format(action))
action = action
else:
action = _recover_root_template_action(template, act_par.add_template2template[template[0]])
else:
action = _recover_root_template_action(template, act_par.template2template[template[0]])
positives = []
candidates = []
all_actions = instance['valid_actions']
# print(all_actions[0])
if isinstance(all_actions[0], dict):
# print(all_actions)
all_actions = [all_actions]
for action_group in all_actions:
if _match_action(action_group, action):
for a in action_group:
positives.append(a['a'])
else:
for a in action_group:
candidates.append(a['a'])
if len(candidates) == 0:
continue
if len(positives) == 0:
positives.append(action)
# print('adding an action \"{}\" not in valid list'.format(action))
# print(all_actions)
# if action == 'east':
# print(all_actions)
if idx / len(instances) < 0.6:
datasets['train'].add_one(input1, input2, positives, candidates)
elif idx / len(instances) < 0.8:
datasets['dev'].add_one(input1, input2, positives, candidates)
else:
datasets['test'].add_one(input1, input2, positives, candidates)
for k, data_set in datasets.items():
print('# {} set'.format(k))
data_set.print_info()
return datasets
def _load_pair_data(self, games, neg_removal=True):
"""
Inputs:
fpath -- the path of the file.
Outputs:
positive_pairs -- a list of positive question-passage pairs
negative_pairs -- a list of negative question-passage pairs
"""
data_set = BERTPair2SeqSet()
def _preprocess_action(action):
action = action.lower()
if action == 'n':
action = 'north'
elif action == 's':
action = 'south'
elif action == 'e':
action = 'east'
elif action == 'w':
action = 'west'
elif action == 'se':
action = 'southeast'
elif action == 'sw':
action = 'southwest'
elif action == 'ne':
action = 'northeast'
elif action == 'nw':
action = 'northwest'
elif action == 'u':
action = 'up'
elif action == 'd':
action = 'down'
return action
def _match_action(action_group, target):
for action in action_group:
action = action['a']
if target == action:
return True
return False
def _process_instance(instance):
new_inst = json.loads(instance)
info = new_inst['observations'].split('|')
new_inst['observations'] = {'origin': new_inst['observations'], 'obs':' | '.join(info[:3]), 'action':info[3]}
return new_inst
def _recover_root_template_action(template, root_template):
t_tokens = root_template.split()
count = 1
for tid, t_token in enumerate(t_tokens):
if t_token == 'OBJ':
t_tokens[tid] = template[count]
count += 1
return ' '.join(t_tokens)
for game_name in games:
rom_path = "../roms/jericho-game-suite/{}.z5".format(game_name)
bindings = load_bindings(rom_path)
act_par = TemplateActionParser(bindings)
f = open(os.path.join(self.data_dir, '{}.ssa.wt_traj.tok'.format(game_name)), "r")
instances = f.readlines()
# instances = [json.loads(instance) for instance in instances]
# for instance in instances:
# info = instance['observations'].split('|')
# instance['observation'] = {'obs':' | '.join(info[:3]), 'action':info[3]}
instances = [_process_instance(instance.lower()) for instance in instances]
for idx, instance in enumerate(instances):
if idx == len(instances) - 1:
continue
input1 = instance['observations']['obs']
input2 = instances[idx + 1]['observations']['obs']
action = _preprocess_action(instances[idx + 1]['observations']['action'])
template = act_par.parse_action(action)
if template is None:
print('unmatched action: {}'.format(action))
action = action
elif template[0] not in act_par.template2template:
if template[0] not in act_par.add_template2template:
print('cannot find root: {}'.format(action))
action = action
else:
action = _recover_root_template_action(template, act_par.add_template2template[template[0]])
else:
action = _recover_root_template_action(template, act_par.template2template[template[0]])
positives = []
candidates = []
all_actions = instance['valid_actions']
for action_group in all_actions:
if _match_action(action_group, action):
for a in action_group:
positives.append(a['a'])
else:
for a in action_group:
candidates.append(a['a'])
if len(positives) == 0:
positives.append(action)
# print('adding an action \"{}\" not in valid list'.format(action))
# print(all_actions)
# if action == 'east':
# print(all_actions)
data_set.add_one(input1, input2, positives, candidates)
data_set.print_info()
return data_set
def _numeralize_pairs(self, pairs):
'''
numeralize passages in training pair lists
'''
ret_pair_list = []
for pair_dict_ in pairs:
new_pair_dict_ = {}
for k, v in pair_dict_.items():
if k == 'input1' or k == 'input2':
new_pair_dict_[k] = self.tokenizer.convert_tokens_to_ids(v.split())
elif k == 'positives' or k == 'candidates':
new_pair_dict_[k] = []
for seq in v:
new_pair_dict_[k].append(self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(seq)))
ret_pair_list.append(new_pair_dict_)
return ret_pair_list
def _build_vocab(self):
"""
Filter the vocabulary and numeralization
"""
for data_id, data_set in self.data_sets.items():
data_set.pairs = self._numeralize_pairs(data_set.get_pairs())
print('size of the final vocabulary:', len(self.word_vocab))
def get_train_batch(self, batch_size, num_negative=-1, inst_format='co_match'):
"""
randomly select a batch from a dataset
Inputs:
batch_size:
Outputs:
q_mat -- numpy array in shape of (batch_size, max length of the sequence in the batch)
p_mat -- numpy array in shape of (batch_size, max length of the sequence in the batch)
y_vec -- numpy array of binary labels, numpy array in shape of (batch_size,)
"""
set_id = 'train'
data_set = self.data_sets[set_id]
# print(data_set.size())
# print(batch_size)
batch_idx = np.random.randint(0, data_set.size(), size=batch_size)
if num_negative < 0:
num_negative = self.num_negative
if inst_format == 'co_match':
return self.get_batch(set_id, batch_idx, num_negative)
elif inst_format == 'concat':
return self.get_batch_concat(set_id, batch_idx, num_negative)
def get_batch(self, set_id, batch_idx, num_negative=-1):
"""
randomly select a batch from a dataset
Inputs:
batch_idx:
Outputs (all numpy arrays are sorted according to q_length):
q_mat -- numpy array in shape of (batch_size, max length of the sequence in the batch)
p_mat -- numpy array in shape of (batch_size, max length of the sequence in the batch)
y_vec -- numpy array of binary labels, numpy array in shape of (batch_size,)
q_mask -- numpy array of masks
p_mask -- numpy array of masks
p_sort_idx -- sorted idx according to p_length
revert_p_idx -- revert idx from p_mat[p_sort_idx] to p_mat
"""
if num_negative < 0:
num_negative = self.num_negative
data_set = self.data_sets[set_id]
x1, x2, positives, candidates, max_x1_len, max_x2_len, max_a_len = data_set.get_samples_from_one_list(batch_idx,
num_negative=num_negative,
truncate_num=self.truncate_num)
# qs_, ps_, ys_, max_q_len_, max_p_len_ = data_set.get_samples_from_one_list(batch_idx, self.truncate_num)
x1_masks_ = []
x2_masks_ = []
a_masks_ = | |
<filename>pygcam/mcs/XMLResultFile.py
# Created on 5/11/15
#
# Copyright (c) 2015-2017. The Regents of the University of California (Regents).
# See the file COPYRIGHT.txt for details.
import os
from collections import OrderedDict, defaultdict
from datetime import datetime
import pandas as pd
from ..config import getParam
from ..log import getLogger
from ..XMLFile import XMLFile
from .error import PygcamMcsUserError, PygcamMcsSystemError, FileMissingError
from .Database import getDatabase
from .XML import XMLWrapper, findAndSave, getBooleanXML
_logger = getLogger(__name__)
RESULT_TYPE_DIFF = 'diff'
RESULT_TYPE_SCENARIO = 'scenario'
DEFAULT_RESULT_TYPE = RESULT_TYPE_SCENARIO
QUERY_OUTPUT_DIR = 'queryResults'
RESULT_ELT_NAME = 'Result'
FILE_ELT_NAME = 'File'
CONSTRAINT_ELT_NAME = 'Constraint'
COLUMN_ELT_NAME = 'Column'
VALUE_ELT_NAME = 'Value'
class XMLConstraint(XMLWrapper):
equal = ['==', '=', 'equal', 'eq']
notEqual = ['!=', '<>', 'notEqual', 'not equal', 'neq']
strMatch = ['startswith', 'endswith', 'contains']
def __init__(self, element):
super(XMLConstraint, self).__init__(element)
self.column = element.get('column')
self.op = element.get('op')
self.value = element.get('value')
if self.op:
known = self.equal + self.notEqual + self.strMatch
if not self.op in known:
raise PygcamMcsUserError('Unknown operator in constraint: %s' % self.op)
if not self.value:
raise PygcamMcsUserError('Constraint with operator "%s" is missing a value' % self.op)
def asString(self):
if self.op in self.equal:
op = '=='
elif self.op in self.notEqual:
op = '!='
else:
# strMatch ops are handled outside of df.query()
op = None
return "%s %s %r" % (self.column, op, self.value) if op else None
def stringMatch(self, df):
if self.op not in self.strMatch:
return None
col = df[self.column]
if self.op == 'startswith': # simple string only
fn = col.str.startswith
elif self.op == 'endswith': # simple string only
fn = col.str.endswith
elif self.op == 'contains': # string or regex (note: quote regex chars if used literally)
fn = col.str.contains
mask = fn(self.value)
return df[mask]
class XMLColumn(XMLWrapper):
def __init__(self, element):
super(XMLColumn, self).__init__(element)
class XMLResult(XMLWrapper):
'''
Represents a single Result (model output) from the results.xml file.
'''
def __init__(self, element):
super(XMLResult, self).__init__(element)
self.name = element.get('name')
self.type = element.get('type', DEFAULT_RESULT_TYPE)
self.desc = element.get('desc')
self.unit = element.get('unit', '') # default is no unit
self.cumulative = getBooleanXML(element.get('cumulative', 0))
self.percentage = getBooleanXML(element.get('percentage', 0))
self.queryFile = self._getPath(FILE_ELT_NAME)
if self.percentage:
self.type = RESULT_TYPE_DIFF # makes no sense otherwise
col = self.element.find(COLUMN_ELT_NAME)
self.column = XMLColumn(col) if col is not None else None
# Create the "where" clause to use with a DataFrame.query() on the results we'll read in
self.constraints = [XMLConstraint(item) for item in self.element.iterfind(CONSTRAINT_ELT_NAME)]
constraintStrings = list(filter(None, map(XMLConstraint.asString, self.constraints)))
self.whereClause = ' and '.join(constraintStrings)
self.matchConstraints = list(filter(lambda constraint: constraint.op in XMLConstraint.strMatch, self.constraints))
def stringMatch(self, df):
"""
Handle any string matching constraints since these can't be handled in a df.query()
"""
for c in self.matchConstraints:
df = c.stringMatch(df)
return df
def isScalar(self):
return self.column is not None or self.cumulative
def _getPath(self, eltName):
'Get a single filename from the named element'
objs = self.element.findall(eltName)
filename = objs[0].get('name')
if os.path.isabs(filename):
raise PygcamMcsUserError("For %s named %s: path (%s) must be relative" % (eltName, self.name, filename))
return filename
def csvPathname(self, scenario, baseline=None, outputDir='.', type=RESULT_TYPE_SCENARIO):
"""
Compute the pathname of a .csv file from an outputDir,
scenario name, and optional baseline name.
"""
# Output files are stored in the output dir with same name as query file but with 'csv' extension.
basename = os.path.basename(self.queryFile)
mainPart, extension = os.path.splitext(basename)
middle = scenario if type == RESULT_TYPE_SCENARIO else ("%s-%s" % (scenario, baseline))
csvFile = "%s-%s.csv" % (mainPart, middle)
csvPath = os.path.abspath(os.path.join(outputDir, csvFile))
return csvPath
def columnName(self):
return self.column.getName() if self.column is not None else None
class XMLResultFile(XMLFile):
"""
XMLResultFile manipulation class.
"""
cache = {}
@classmethod
def getInstance(cls, filename):
try:
return cls.cache[filename]
except KeyError:
obj = cls.cache[filename] = cls(filename)
return obj
def __init__(self, filename):
super(XMLResultFile, self).__init__(filename, load=True, schemaPath='mcs/etc/results-schema.xsd')
root = self.tree.getroot()
self.results = OrderedDict() # the parsed fileNodes, keyed by filename
findAndSave(root, RESULT_ELT_NAME, XMLResult, self.results)
def getResultDefs(self, type=None):
"""
Get results of type 'diff' or 'scenario'
"""
results = self.results.values()
if type:
results = [result for result in results if result.type == type]
return results
def saveOutputDefs(self):
'''
Save the defined outputs in the SQL database
'''
db = getDatabase()
session = db.Session()
for result in self.getResultDefs():
db.createOutput(result.name, description=result.desc, unit=result.unit, session=session)
session.commit()
db.endSession(session)
@classmethod
def addOutputs(cls):
resultsFile = getParam('MCS.ResultsFile')
obj = cls(resultsFile)
obj.saveOutputDefs()
class QueryResult(object):
'''
Holds the results of an XPath batch query
'''
def __init__(self, filename):
self.filename = filename
self.title = None
self.df = None
self.units = None
self.readCSV()
@staticmethod
def parseScenarioString(scenStr):
"""
Parse a GCAM scenario string into a name and a time stamp
:param scenStr: (str) a scenario name string of the form
"Reference,date=2014-29-11T08:10:45-08:00"
:return: (str, datetime) the scenario name and a datetime instance
"""
name, datePart = scenStr.split(',')
# _logger.debug("datePart: %s", datePart)
dateWithTZ = datePart.split('=')[1] # drop the 'date=' part
# _logger.debug("dateWithTZ: %s", dateWithTZ)
# drops the timezone info with strptime doesn't handle.
# TBD: this is ok as long as all the scenarios were run in the same timezone...
lenTZ = len("-00:00")
dateWithoutTZ = dateWithTZ[:-lenTZ]
# _logger.debug("dateWithoutTZ: %s", dateWithoutTZ)
runDate = datetime.strptime(dateWithoutTZ, "%Y-%d-%mT%H:%M:%S") # N.B. order is DD-MM, not MM-DD
return name, runDate
def readCSV(self):
'''
Read a CSV file produced by a batch query. The first line is the name of the query;
the second line provides the column headings; all subsequent lines are data. Data
are comma-delimited, and strings with spaces are double-quoted. Assume units are
the same as in the first row of data.
'''
_logger.debug("readCSV: reading %s", self.filename)
with open(self.filename) as f:
self.title = f.readline().strip()
self.df = pd.read_table(f, sep=',', header=0, index_col=False, quoting=0)
df = self.df
if 'Units' in df.columns:
self.units = df.Units[0]
# split the scenario field into two parts; here we create the columns
df['ScenarioName'] = None
df['ScenarioDate'] = None
if 'scenario' in df.columns: # not the case for "diff" files
name, date = self.parseScenarioString(df.loc[0].scenario)
df['ScenarioName'] = name
df['ScenarioDate'] = date
def getFilename(self):
return self.filename
def getTitle(self):
return self.title
def getData(self):
return self.df
# A single result DF can have data for multiple outputs, so we cache the files
outputCache = defaultdict(lambda: None)
def getCachedFile(csvPath, loader=QueryResult, desc="query result"):
result = outputCache[csvPath]
if not result:
try:
outputCache[csvPath] = result = loader(csvPath)
except Exception as e:
_logger.warning('saveResults: Failed to read {}: {}'.format(desc, e))
raise FileMissingError(csvPath)
return result
def getOutputDir(trialDir, scenario, type):
subDir = 'queryResults' if type == RESULT_TYPE_SCENARIO else 'diffs'
return os.path.join(trialDir, scenario, subDir)
def extractResult(context, scenario, outputDef, type):
from .util import activeYears, YEAR_COL_PREFIX
_logger.debug("Extracting result for {}, name={}".format(context, outputDef.name))
trialDir = context.getTrialDir()
outputDir = getOutputDir(trialDir, scenario, type)
baseline = None if type == RESULT_TYPE_SCENARIO else context.baseline
csvPath = outputDef.csvPathname(scenario, outputDir=outputDir, baseline=baseline, type=type)
queryResult = getCachedFile(csvPath)
_logger.debug("queryResult:\n%s", queryResult.df)
paramName = outputDef.name
whereClause = outputDef.whereClause
_logger.debug("whereClause: %s", whereClause)
# apply (in)equality constraints
selected = queryResult.df.query(whereClause) if whereClause else queryResult.df
# apply string constraints
selected = outputDef.stringMatch(selected)
_logger.debug("Selected:\n%s", selected)
count = selected.shape[0]
if count == 0:
raise PygcamMcsUserError('Query for "{}" matched no results'.format(outputDef.name))
if 'region' in selected.columns:
firstRegion = selected.region.iloc[0]
if count == 1:
regionName = firstRegion
else:
_logger.debug("Query yielded {} rows; year columns will be summed".format(count))
regionName = firstRegion if len(selected.region.unique()) == 1 else 'Multiple'
else:
regionName = 'global'
isScalar = outputDef.isScalar()
# Create a dict to return. (context already has runId and scenario)
resultDict = dict(regionName=regionName, paramName=paramName, units=queryResult.units, isScalar=isScalar)
active = activeYears()
if isScalar:
if outputDef.cumulative:
total = selected[active].sum(axis=1)
value = float(total.sum() if isinstance(total, pd.Series) else total)
else:
colName = outputDef.columnName()
value = selected[colName].sum() # works for single or multiple rows
else:
# When no column name is specified, assume this is a time-series result, so save all years.
# Use sum() to collapse values to a single time series; for a single row it helpfully
# converts the 1-element series to a simple float.
yearCols = [YEAR_COL_PREFIX + y for y in active]
value = {colName: selected[yearStr].sum() for colName, yearStr in zip(yearCols, active)}
if outputDef.percentage:
# Recursively read the baseline scenario result so we can compute % change
newDef = XMLResult(outputDef.element)
newDef.percentage = False
baseResult = extractResult(context, context.baseline, newDef, RESULT_TYPE_SCENARIO)
bv = baseResult['value']
value = 100 * (dict(pd.Series(value) / pd.Series(bv)) if isinstance(bv, dict) else value / bv)
resultDict['value'] = value
return resultDict
def collectResults(context, type):
'''
Called by worker to process results, return a list of dicts
with data the master process can quickly write to | |
a specific CAN channel of a device.
:param int channel: CAN channel to be initialized (:data:`Channel.CHANNEL_CH0` or :data:`Channel.CHANNEL_CH1`).
:param int BTR:
Baud rate register BTR0 as high byte, baud rate register BTR1 as low byte (see enum :class:`Baudrate`).
:param int baudrate: Baud rate register for all systec USB-CANmoduls (see enum :class:`BaudrateEx`).
:param int AMR: Acceptance filter mask (see method :meth:`set_acceptance`).
:param int ACR: Acceptance filter code (see method :meth:`set_acceptance`).
:param int mode: Transmission mode of CAN channel (see enum :class:`Mode`).
:param int OCR: Output Control Register (see enum :class:`OutputControl`).
:param int rx_buffer_entries: The number of maximum entries in the receive buffer.
:param int tx_buffer_entries: The number of maximum entries in the transmit buffer.
"""
if not self._ch_is_initialized.get(channel, False):
init_param = InitCanParam(mode, BTR, OCR, AMR, ACR, baudrate, rx_buffer_entries, tx_buffer_entries)
UcanInitCanEx2(self._handle, channel, init_param)
self._ch_is_initialized[channel] = True
def read_can_msg(self, channel, count):
"""
Reads one or more CAN-messages from the buffer of the specified CAN channel.
:param int channel:
CAN channel to read from (:data:`Channel.CHANNEL_CH0`, :data:`Channel.CHANNEL_CH1`,
:data:`Channel.CHANNEL_ANY`).
:param int count: The number of CAN messages to be received.
:return: Tuple with list of CAN message/s received and the CAN channel where the read CAN messages came from.
:rtype: tuple(list(CanMsg), int)
"""
c_channel = BYTE(channel)
c_can_msg = (CanMsg * count)()
c_count = DWORD(count)
UcanReadCanMsgEx(self._handle, byref(c_channel), c_can_msg, byref(c_count))
return c_can_msg[:c_count.value], c_channel.value
def write_can_msg(self, channel, can_msg):
"""
Transmits one ore more CAN messages through the specified CAN channel of the device.
:param int channel:
CAN channel, which is to be used (:data:`Channel.CHANNEL_CH0` or :data:`Channel.CHANNEL_CH1`).
:param list(CanMsg) can_msg: List of CAN message structure (see structure :class:`CanMsg`).
:return: The number of successfully transmitted CAN messages.
:rtype: int
"""
c_can_msg = (CanMsg * len(can_msg))(*can_msg)
c_count = DWORD(len(can_msg))
UcanWriteCanMsgEx(self._handle, channel, c_can_msg, c_count)
return c_count
def set_baudrate(self, channel, BTR, baudarate):
"""
This function is used to configure the baud rate of specific CAN channel of a device.
:param int channel:
CAN channel, which is to be configured (:data:`Channel.CHANNEL_CH0` or :data:`Channel.CHANNEL_CH1`).
:param int BTR:
Baud rate register BTR0 as high byte, baud rate register BTR1 as low byte (see enum :class:`Baudrate`).
:param int baudarate: Baud rate register for all systec USB-CANmoduls (see enum :class:`BaudrateEx`>).
"""
UcanSetBaudrateEx(self._handle, channel, BTR >> 8, BTR, baudarate)
def set_acceptance(self, channel=Channel.CHANNEL_CH0, AMR=AMR_ALL, ACR=ACR_ALL):
"""
This function is used to change the acceptance filter values for a specific CAN channel on a device.
:param int channel:
CAN channel, which is to be configured (:data:`Channel.CHANNEL_CH0` or :data:`Channel.CHANNEL_CH1`).
:param int AMR: Acceptance filter mask (AMR).
:param int ACR: Acceptance filter code (ACR).
"""
UcanSetAcceptanceEx(self._handle, channel, AMR, ACR)
def get_status(self, channel=Channel.CHANNEL_CH0):
"""
Returns the error status of a specific CAN channel.
:param int channel: CAN channel, to be used (:data:`Channel.CHANNEL_CH0` or :data:`Channel.CHANNEL_CH1`).
:return: Tuple with CAN and USB status (see structure :class:`Status`).
:rtype: tuple(int, int)
"""
status = Status()
UcanGetStatusEx(self._handle, channel, byref(status))
return status.can_status, status.usb_status
def get_msg_count_info(self, channel=Channel.CHANNEL_CH0):
"""
Reads the message counters of the specified CAN channel.
:param int channel:
CAN channel, which is to be used (:data:`Channel.CHANNEL_CH0` or :data:`Channel.CHANNEL_CH1`).
:return: Tuple with number of CAN messages sent and received.
:rtype: tuple(int, int)
"""
msg_count_info = MsgCountInfo()
UcanGetMsgCountInfoEx(self._handle, channel, byref(msg_count_info))
return msg_count_info.sent_msg_count, msg_count_info.recv_msg_count
def reset_can(self, channel=Channel.CHANNEL_CH0, flags=ResetFlags.RESET_ALL):
"""
Resets a CAN channel of a device (hardware reset, empty buffer, and so on).
:param int channel: CAN channel, to be reset (:data:`Channel.CHANNEL_CH0` or :data:`Channel.CHANNEL_CH1`).
:param int flags: Flags defines what should be reset (see enum :class:`ResetFlags`).
"""
UcanResetCanEx(self._handle, channel, flags)
def get_hardware_info(self):
"""
Returns the extended hardware information of a device. With multi-channel USB-CANmoduls the information for
both CAN channels are returned separately.
:return:
Tuple with extended hardware information structure (see structure :class:`HardwareInfoEx`) and
structures with information of CAN channel 0 and 1 (see structure :class:`ChannelInfo`).
:rtype: tuple(HardwareInfoEx, ChannelInfo, ChannelInfo)
"""
hw_info_ex = HardwareInfoEx()
can_info_ch0, can_info_ch1 = ChannelInfo(), ChannelInfo()
UcanGetHardwareInfoEx2(self._handle, byref(hw_info_ex), byref(can_info_ch0), byref(can_info_ch1))
return hw_info_ex, can_info_ch0, can_info_ch1
def get_fw_version(self):
"""
Returns the firmware version number of the device.
:return: Firmware version number.
:rtype: int
"""
return UcanGetFwVersion(self._handle)
def define_cyclic_can_msg(self, channel, can_msg=None):
"""
Defines a list of CAN messages for automatic transmission.
:param int channel: CAN channel, to be used (:data:`Channel.CHANNEL_CH0` or :data:`Channel.CHANNEL_CH1`).
:param list(CanMsg) can_msg:
List of CAN messages (up to 16, see structure :class:`CanMsg`), or None to delete an older list.
"""
if can_msg is not None:
c_can_msg = (CanMsg * len(can_msg))(*can_msg)
c_count = DWORD(len(can_msg))
else:
c_can_msg = CanMsg()
c_count = 0
UcanDefineCyclicCanMsg(self._handle, channel, c_can_msg, c_count)
def read_cyclic_can_msg(self, channel, count):
"""
Reads back the list of CAN messages for automatically sending.
:param int channel: CAN channel, to be used (:data:`Channel.CHANNEL_CH0` or :data:`Channel.CHANNEL_CH1`).
:param int count: The number of cyclic CAN messages to be received.
:return: List of received CAN messages (up to 16, see structure :class:`CanMsg`).
:rtype: list(CanMsg)
"""
c_channel = BYTE(channel)
c_can_msg = (CanMsg * count)()
c_count = DWORD(count)
UcanReadCyclicCanMsg(self._handle, byref(c_channel), c_can_msg, c_count)
return c_can_msg[:c_count.value]
def enable_cyclic_can_msg(self, channel, flags):
"""
Enables or disables the automatically sending.
:param int channel: CAN channel, to be used (:data:`Channel.CHANNEL_CH0` or :data:`Channel.CHANNEL_CH1`).
:param int flags: Flags for enabling or disabling (see enum :class:`CyclicFlags`).
"""
UcanEnableCyclicCanMsg(self._handle, channel, flags)
def get_msg_pending(self, channel, flags):
"""
Returns the number of pending CAN messages.
:param int channel: CAN channel, to be used (:data:`Channel.CHANNEL_CH0` or :data:`Channel.CHANNEL_CH1`).
:param int flags: Flags specifies which buffers should be checked (see enum :class:`PendingFlags`).
:return: The number of pending messages.
:rtype: int
"""
count = DWORD(0)
UcanGetMsgPending(self._handle, channel, flags, byref(count))
return count.value
def get_can_error_counter(self, channel):
"""
Reads the current value of the error counters within the CAN controller.
:param int channel: CAN channel, to be used (:data:`Channel.CHANNEL_CH0` or :data:`Channel.CHANNEL_CH1`).
:return: Tuple with the TX and RX error counter.
:rtype: tuple(int, int)
.. note:: Only available for systec USB-CANmoduls (NOT for GW-001 and GW-002 !!!).
"""
tx_error_counter = DWORD(0)
rx_error_counter = DWORD(0)
UcanGetCanErrorCounter(self._handle, channel, byref(tx_error_counter), byref(rx_error_counter))
return tx_error_counter, rx_error_counter
def set_tx_timeout(self, channel, timeout):
"""
Sets the transmission timeout.
:param int channel: CAN channel, to be used (:data:`Channel.CHANNEL_CH0` or :data:`Channel.CHANNEL_CH1`).
:param float timeout: Transmit timeout in seconds (value 0 disables this feature).
"""
UcanSetTxTimeout(self._handle, channel, int(timeout * 1000))
def shutdown(self, channel=Channel.CHANNEL_ALL, shutdown_hardware=True):
"""
Shuts down all CAN interfaces and/or the hardware interface.
:param int channel:
CAN channel, to be used (:data:`Channel.CHANNEL_CH0`, :data:`Channel.CHANNEL_CH1` or
:data:`Channel.CHANNEL_ALL`)
:param bool shutdown_hardware: If true then the hardware interface will be closed too.
"""
# shutdown each channel if it's initialized
for _channel, is_initialized in self._ch_is_initialized.items():
if is_initialized and (_channel == channel or channel == Channel.CHANNEL_ALL or shutdown_hardware):
UcanDeinitCanEx(self._handle, _channel)
self._ch_is_initialized[_channel] = False
# shutdown hardware
if self._hw_is_initialized and shutdown_hardware:
UcanDeinitHardware(self._handle)
self._hw_is_initialized = False
self._handle = Handle(INVALID_HANDLE)
@staticmethod
def get_user_dll_version():
"""
Returns the version number of the USBCAN-library.
:return: Software version number.
:rtype: int
"""
return UcanGetVersionEx(VersionType.VER_TYPE_USER_DLL)
@staticmethod
def set_debug_mode(level, filename, flags=0):
"""
This function enables the creation of a debug log file out of the USBCAN-library. If this
feature has already been activated via the USB-CANmodul Control, the content of the
“old” log file will be copied to the new file. Further debug information will be appended to
the new file.
:param int level: Debug level (bit format).
:param str filename: File path to debug log file.
:param int flags: Additional flags (bit0: file append mode).
:return: False if logfile not created otherwise True.
:rtype: bool
"""
return UcanSetDebugMode(level, filename, flags)
@staticmethod
def get_can_status_message(can_status):
"""
Converts a given CAN status value to the appropriate message string.
:param can_status: CAN status value from method :meth:`get_status` (see enum :class:`CanStatus`)
:return: Status message string.
:rtype: str
"""
status_msgs = {
CanStatus.CANERR_TXMSGLOST: "Transmit message lost",
CanStatus.CANERR_MEMTEST: "Memory test failed",
CanStatus.CANERR_REGTEST: "Register test failed",
CanStatus.CANERR_QXMTFULL: "Transmit queue is full",
CanStatus.CANERR_QOVERRUN: "Receive queue overrun",
CanStatus.CANERR_QRCVEMPTY: "Receive queue is empty",
CanStatus.CANERR_BUSOFF: "Bus Off",
CanStatus.CANERR_BUSHEAVY: "Error Passive",
CanStatus.CANERR_BUSLIGHT: "Warning Limit",
CanStatus.CANERR_OVERRUN: "Rx-buffer is full",
CanStatus.CANERR_XMTFULL: "Tx-buffer is full",
}
return "OK" if can_status == CanStatus.CANERR_OK \
else ", ".join(msg for status, msg in status_msgs.items() if can_status & status)
@staticmethod
def get_baudrate_message(baudrate):
"""
| |
second a list of
numpy arrays containing returns for each corresponding year
'''
assert(len(timestamps) == len(returns))
if not len(timestamps): return np.array([], dtype=np.str), np.array([], dtype=np.float)
s = pd.Series(returns, index=timestamps)
years_list = []
rets_list = []
for year, rets in s.groupby(s.index.map(lambda x: x.year)):
years_list.append(year)
rets_list.append(rets.values)
return years_list, rets_list
def compute_annual_returns(timestamps: np.ndarray, returns: np.ndarray, periods_per_year: float) -> Tuple[np.ndarray, np.ndarray]:
'''Takes the output of compute_bucketed_returns and returns geometric mean of returns by year
Returns:
A tuple with the first element being an array of years (integer) and the second element
an array of annualized returns for those years
'''
assert(len(timestamps) == len(returns) and periods_per_year > 0)
if not len(timestamps): return np.array([], dtype=np.str), np.array([], dtype=np.float)
df = pd.DataFrame({'ret': returns, 'timestamp': timestamps})
years = []
gmeans = []
for k, g in df.groupby(df.timestamp.map(lambda x: x.year)):
years.append(k)
gmeans.append(compute_gmean(g.timestamp.values, g.ret.values, periods_per_year))
return np.array(years), np.array(gmeans)
class Evaluator:
"""You add functions to the evaluator that are dependent on the outputs of other functions.
The evaluator will call these functions in the right order
so dependencies are computed first before the functions that need their output.
You can retrieve the output of a metric using the metric member function
>>> evaluator = Evaluator(initial_metrics={'x': np.array([1, 2, 3]), 'y': np.array([3, 4, 5])})
>>> evaluator.add_metric('z', lambda x, y: sum(x, y), dependencies=['x', 'y'])
>>> evaluator.compute()
>>> evaluator.metric('z')
array([ 9, 10, 11])
"""
def __init__(self, initial_metrics: Dict[str, Any]) -> None:
"""Inits Evaluator with a dictionary of initial metrics that are used to compute subsequent metrics
Args:
initial_metrics: a dictionary of string name -> metric. metric can be any object including a scalar,
an array or a tuple
"""
assert(type(initial_metrics) == dict)
self.metric_values: Dict[str, Any] = initial_metrics.copy()
self._metrics: MutableMapping[str, Tuple[Callable, Sequence[str]]] = {}
def add_metric(self, name: str, func: Callable, dependencies: Sequence[str]) -> None:
self._metrics[name] = (func, dependencies)
def compute(self, metric_names: Sequence[str] = None) -> None:
'''Compute metrics using the internal dependency graph
Args:
metric_names: an array of metric names. If not passed in, evaluator will compute and store all metrics
'''
if metric_names is None: metric_names = list(self._metrics.keys())
for metric_name in metric_names:
self.compute_metric(metric_name)
def compute_metric(self, metric_name: str) -> None:
'''
Compute and store a single metric:
Args:
metric_name: string representing the metric to compute
'''
func, dependencies = self._metrics[metric_name]
for dependency in dependencies:
if dependency not in self.metric_values:
self.compute_metric(dependency)
dependency_values = {k: self.metric_values[k] for k in dependencies}
values = func(**dependency_values)
self.metric_values[metric_name] = values
def metric(self, metric_name: str) -> Any:
'''Return the value of a single metric given its name'''
return self.metric_values[metric_name]
def metrics(self) -> Mapping[str, Any]:
'''Return a dictionary of metric name -> metric value'''
return self.metric_values
def handle_non_finite_returns(timestamps: np.ndarray,
rets: np.ndarray,
leading_non_finite_to_zeros: bool,
subsequent_non_finite_to_zeros: bool) -> Tuple[np.ndarray, np.ndarray]:
'''
>>> np.set_printoptions(formatter={'float': '{: .6g}'.format})
>>> timestamps = np.arange(np.datetime64('2019-01-01'), np.datetime64('2019-01-07'))
>>> rets = np.array([np.nan, np.nan, 3, 4, np.nan, 5])
>>> handle_non_finite_returns(timestamps, rets, leading_non_finite_to_zeros = False, subsequent_non_finite_to_zeros = True)
(array(['2019-01-03', '2019-01-04', '2019-01-05', '2019-01-06'], dtype='datetime64[D]'), array([ 3, 4, 0, 5]))
>>> handle_non_finite_returns(timestamps, rets, leading_non_finite_to_zeros = True, subsequent_non_finite_to_zeros = False)
(array(['2019-01-01', '2019-01-02', '2019-01-03', '2019-01-04', '2019-01-06'], dtype='datetime64[D]'), array([ 0, 0, 3, 4, 5]))
>>> handle_non_finite_returns(timestamps, rets, leading_non_finite_to_zeros = False, subsequent_non_finite_to_zeros = False)
(array(['2019-01-01', '2019-01-02', '2019-01-03', '2019-01-04', '2019-01-06'], dtype='datetime64[D]'), array([ 0, 0, 3, 4, 5]))
>>> rets = np.array([1, 2, 3, 4, 4.5, 5])
>>> handle_non_finite_returns(timestamps, rets, leading_non_finite_to_zeros = False, subsequent_non_finite_to_zeros = True)
(array(['2019-01-01', '2019-01-02', '2019-01-03', '2019-01-04', '2019-01-05', '2019-01-06'],
dtype='datetime64[D]'), array([ 1, 2, 3, 4, 4.5, 5]))
'''
first_non_nan_index = np.ravel(np.nonzero(~np.isnan(rets)))
if len(first_non_nan_index):
first_non_nan_index = first_non_nan_index[0]
else:
first_non_nan_index = -1
if first_non_nan_index > 0 and first_non_nan_index < len(rets):
if leading_non_finite_to_zeros:
rets[:first_non_nan_index] = np.nan_to_num(rets[:first_non_nan_index])
else:
timestamps = timestamps[first_non_nan_index:]
rets = rets[first_non_nan_index:]
if subsequent_non_finite_to_zeros:
rets = np.nan_to_num(rets)
else:
timestamps = timestamps[np.isfinite(rets)]
rets = rets[np.isfinite(rets)]
return timestamps, rets
def compute_return_metrics(timestamps: np.ndarray,
rets: np.ndarray,
starting_equity: float,
leading_non_finite_to_zeros: bool = False,
subsequent_non_finite_to_zeros: bool = True) -> Evaluator:
'''
Compute a set of common metrics using returns (for example, of an instrument or a portfolio)
Args:
timestamps (np.array of datetime64): Timestamps for the returns
rets (nd.array of float): The returns, use 0.01 for 1%
starting_equity (float): Starting equity value in your portfolio
leading_non_finite_to_zeros (bool, optional): If set, we replace leading nan, inf, -inf returns with zeros.
For example, you may need a warmup period for moving averages. Default False
subsequent_non_finite_to_zeros (bool, optional): If set, we replace any nans that follow the first non nan value with zeros.
There may be periods where you have no prices but removing these returns would result in incorrect annualization.
Default True
Returns:
An Evaluator object containing computed metrics off the returns passed in.
If needed, you can add your own metrics to this object based on the values of existing metrics and recompute the Evaluator.
Otherwise, you can just use the output of the evaluator using the metrics function.
>>> timestamps = np.array(['2015-01-01', '2015-03-01', '2015-05-01', '2015-09-01'], dtype='M8[D]')
>>> rets = np.array([0.01, 0.02, np.nan, -0.015])
>>> starting_equity = 1.e6
>>> ev = compute_return_metrics(timestamps, rets, starting_equity)
>>> metrics = ev.metrics()
>>> assert(round(metrics['gmean'], 6) == 0.021061)
>>> assert(round(metrics['sharpe'], 6) == 0.599382)
>>> assert(all(metrics['returns_3yr'] == np.array([0.01, 0.02, 0, -0.015])))
'''
assert(starting_equity > 0.)
assert(type(rets) == np.ndarray and rets.dtype == np.float64)
assert(type(timestamps) == np.ndarray and np.issubdtype(timestamps.dtype, np.datetime64) and monotonically_increasing(timestamps))
timestamps, rets = handle_non_finite_returns(timestamps, rets, leading_non_finite_to_zeros, subsequent_non_finite_to_zeros)
ev = Evaluator({'timestamps': timestamps, 'returns': rets, 'starting_equity': starting_equity})
ev.add_metric('periods_per_year', compute_periods_per_year, dependencies=['timestamps'])
ev.add_metric('amean', compute_amean, dependencies=['returns', 'periods_per_year'])
ev.add_metric('std', compute_std, dependencies=['returns'])
ev.add_metric('up_periods', lambda returns: len(returns[returns > 0]), dependencies=['returns'])
ev.add_metric('down_periods', lambda returns: len(returns[returns < 0]), dependencies=['returns'])
ev.add_metric('up_pct',
lambda up_periods, down_periods: up_periods * 1.0 / (up_periods + down_periods) if (up_periods + down_periods) != 0 else np.nan,
dependencies=['up_periods', 'down_periods'])
ev.add_metric('gmean', compute_gmean, dependencies=['timestamps', 'returns', 'periods_per_year'])
ev.add_metric('sharpe', compute_sharpe, dependencies=['returns', 'periods_per_year', 'amean'])
ev.add_metric('sortino', compute_sortino, dependencies=['returns', 'periods_per_year', 'amean'])
ev.add_metric('equity', compute_equity, dependencies=['timestamps', 'starting_equity', 'returns'])
ev.add_metric('k_ratio', compute_k_ratio, dependencies=['equity', 'periods_per_year'])
ev.add_metric('k_ratio_weighted', lambda equity, periods_per_year: compute_k_ratio(equity, periods_per_year, 3),
dependencies=['equity', 'periods_per_year'])
# Drawdowns
ev.add_metric('rolling_dd', compute_rolling_dd, dependencies=['timestamps', 'equity'])
ev.add_metric('mdd_pct', lambda rolling_dd: compute_maxdd_pct(rolling_dd[1]), dependencies=['rolling_dd'])
ev.add_metric('mdd_date', lambda rolling_dd: compute_maxdd_date(rolling_dd[0], rolling_dd[1]), dependencies=['rolling_dd'])
ev.add_metric('mdd_start', lambda rolling_dd, mdd_date: compute_maxdd_start(rolling_dd[0], rolling_dd[1], mdd_date),
dependencies=['rolling_dd', 'mdd_date'])
ev.add_metric('mar', compute_mar, dependencies=['returns', 'periods_per_year', 'mdd_pct'])
ev.add_metric('timestamps_3yr', compute_dates_3yr, dependencies=['timestamps'])
ev.add_metric('returns_3yr', compute_returns_3yr, dependencies=['timestamps', 'returns'])
ev.add_metric('rolling_dd_3yr', compute_rolling_dd_3yr, dependencies=['timestamps', 'equity'])
ev.add_metric('mdd_pct_3yr', lambda rolling_dd_3yr: compute_maxdd_pct_3yr(rolling_dd_3yr[1]), dependencies=['rolling_dd_3yr'])
ev.add_metric('mdd_date_3yr', lambda rolling_dd_3yr: compute_maxdd_date_3yr(rolling_dd_3yr[0], rolling_dd_3yr[1]),
dependencies=['rolling_dd_3yr'])
ev.add_metric('mdd_start_3yr', lambda rolling_dd_3yr, mdd_date_3yr:
compute_maxdd_start_3yr(rolling_dd_3yr[0], rolling_dd_3yr[1], mdd_date_3yr),
dependencies=['rolling_dd_3yr', 'mdd_date_3yr'])
ev.add_metric('calmar', compute_calmar, dependencies=['returns_3yr', 'periods_per_year', 'mdd_pct_3yr'])
ev.add_metric('annual_returns', compute_annual_returns, dependencies=['timestamps', 'returns', 'periods_per_year'])
ev.add_metric('bucketed_returns', compute_bucketed_returns, dependencies=['timestamps', 'returns'])
ev.compute()
return ev
def display_return_metrics(metrics: Mapping[str, Any], float_precision: int = 3) -> pd.DataFrame:
'''
Creates a dataframe making it convenient to view the output of the metrics obtained using the compute_return_metrics function.
Args:
float_precision: Change if you want to display floats with more or less significant figures than the default,
3 significant figures.
Returns:
A one row dataframe with formatted metrics.
'''
from IPython.core.display import display
_metrics = {}
cols = ['gmean', 'amean', 'std', 'shrp', 'srt', 'k', 'calmar', 'mar', 'mdd_pct', 'mdd_start', 'mdd_date', 'dd_3y_pct',
'up_periods', 'down_periods', 'up_pct', 'mdd_start_3yr', 'mdd_date_3yr']
translate = {'shrp': 'sharpe', 'srt': 'sortino', 'dd_3y_pct': 'mdd_pct_3yr', 'k': 'k_ratio'}
for col in cols:
key = col
if col in translate: key = translate[col]
_metrics[col] = metrics[key]
_metrics['mdd_dates'] = f'{str(metrics["mdd_start"])[:10]}/{str(metrics["mdd_date"])[:10]}'
_metrics['up_dwn'] = f'{metrics["up_periods"]}/{metrics["down_periods"]}/{metrics["up_pct"]:.3g}'
_metrics['dd_3y_timestamps'] = f'{str(metrics["mdd_start_3yr"])[:10]}/{str(metrics["mdd_date_3yr"])[:10]}'
years = metrics['annual_returns'][0]
ann_rets = metrics['annual_returns'][1]
for i, year in enumerate(years):
_metrics[str(year)] = ann_rets[i]
format_str = '{:.' + str(float_precision) + 'g}'
for k, v in _metrics.items():
if isinstance(v, np.float) or isinstance(v, float):
_metrics[k] = format_str.format(v)
cols = ['gmean', 'amean', 'std', 'shrp', 'srt', 'k', 'calmar', 'mar', 'mdd_pct', 'mdd_dates', 'dd_3y_pct', 'dd_3y_timestamps', 'up_dwn'] + [
str(year) for year in sorted(years)]
df = pd.DataFrame(index=[''])
for metric_name, metric_value in _metrics.items():
df.insert(0, metric_name, metric_value)
df = df[cols]
display(df)
return df
def plot_return_metrics(metrics: Mapping[str, Any], title: str = None) -> Optional[Tuple[mpl_fig.Figure, mpl.axes.Axes]]:
'''
Plot equity, rolling drawdowns and and a boxplot of annual returns given the output of compute_return_metrics.
'''
timestamps = metrics['timestamps']
| |
import serial, traceback, threading, time, sys, struct, os, array
import zipfile, shelve, random, binascii
import cPickle as pickle
import fakeserial
import itertools
################################################################################
# parsers.py
# A fairly lightweight threaded library for parsing HEG input from Pocket
# Neurobics devices and from the Peanut.
#
# Copyright 2012, <NAME> (<EMAIL>)
# This software is available under the terms of the GNU Lesser Public License,
# version 3.0. For a copy of this license, see the file LGPL.txt, or visit
# http://www.gnu.org/licenses/lgpl-3.0.txt.
#
# A copy of the specification for one of the protocols implemented herein can
# be found at http://www.pocket-neurobics.com/simplex_spec.htm.
# This is not a complete implementation of the protocol. This is not an elegant
# implementation of the protocol. This is a good enough implementation of the
# protocol for the uses of software I've written.
#
# The Peanut protocol is currently undocumented. Read the code or bug me.
#
# Want to adapt this software for your project? Want to make your biofeedback
# hardware supported by this software? Please do! Email me, and I might even
# help.
################################################################################
__VERSION__ = version = '0.4.8b + HEGduino'
DEBUG = '--debug' in sys.argv
ENABLE_PNB = '--enable-pnb' in sys.argv or 'enable-pnb' in os.listdir('..')
file_extensions = ('pnb', 'pea', 'heg', 'heg.zip')
tmp_path = '.'
def make_zip(archive, rawdata, timing, *files, **kwargs):
if not 'overwrite' in kwargs: kwargs['overwrite'] = False
if not 'remove_originals' in kwargs: kwargs['remove_originals'] = True
if not 'strings' in kwargs: kwargs['strings'] = {}
if os.path.exists(archive) and not kwargs['overwrite']:
print "Path %s exists. Not overwriting." % archive
raise IOError
if os.path.exists(archive) and kwargs['overwrite']:
if DEBUG: print "Path %s exists. Overwriting." % archive
os.unlink(archive)
#try:
zip = zipfile.ZipFile(archive, mode='w', compression=zipfile.ZIP_DEFLATED)
zip.write(rawdata, 'rawdata')
zip.write(timing, 'timing')
for f in files:
zip.write(f)
for k,v in kwargs['strings'].items():
zip.writestr(k,v)
zip.close()
#except:
# zip.close()
# raise IOError
# Still with us? Good. Then let's...
if kwargs['remove_originals']:
for f in (rawdata, timing) + files:
try:
os.unlink(f)
except:
if DEBUG:
traceback.print_exc()
return
def is_heg_archive(path):
if hasattr(path, 'endswith'): return path.lower().endswith('.heg') or path.lower().endswith('heg.zip')
else: return False # fixme for file handles
def get_rawdata_handle(path):
try:
with zipfile.ZipFile(path) as zip:
rawdata = zip.open('rawdata')
return rawdata
except:
if DEBUG: traceback.print_exc()
return None
def enumerate_serial_ports():
if sys.platform == 'win32':
import _winreg as winreg
path = 'HARDWARE\\\\DEVICEMAP\\SERIALCOMM'
try:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, path)
except WindowsError:
return []
i = 0
comports = []
while 1:
try:
val = winreg.EnumValue(key, i)
comports.append((str(val[1]), str(val[0])))
i += 1
except EnvironmentError:
break
elif sys.platform == 'darwin':
comports = [('/dev/'+d, d) for d in os.listdir('/dev/') if d.startswith('cu.usbserial')]
elif sys.platform == 'linux':
comports = [('/dev/'+d, d) for d in os.listdir('/dev/') if d.startswith('ttyUSB')]
else:
comports = []
return comports
def detect_protocols(ports=None):
if ports==None:
ports = enumerate_serial_ports()
if sys.platform == 'win32':
ports = [port[0] for port in ports if 'VCP' in port[1] or 'USBSER' in port[1] or 'Silabser' in port[1]]
else:
ports = [port[0] for port in ports]
threads = []
for port in ports:
threads.append(ProtocolDetector(port=port))
threads[-1].start()
for t in threads:
t.join()
results = [(t.port, t.protocol) for t in threads if t.protocol != None]# and t.protocol != 'no data' and t.protocol != 'no connect']
return results
def export_csv(source, dest):
heg = AutoHEG(port=source, fast=True)
heg.start()
heg.join()
if heg.mgr: heg.mgr.join()
else:
if DEBUG: print "No mgr to join in parsers.export_csv()"
return heg.Close()
if DEBUG: print heg.detector.protocol
if heg.detector.protocol == 'pnb':
columns = ['sample', 'HEG Ratio']
outfile = file(dest, 'w')
outfile.write([','.join(i, h) for i,h in zip(range(len(heg.hegdata)), heg.hegdata)] + '\n')
outifle.close()
heg.Close()
else:
if 'rawdata6' in heg.data.keys():
rd6 = heg.data['rawdata6']
rds = {}
for i in range(6):
rds['rawdata6_%i' % i] = [v[i] for v in rd6]
rds['IR'] = [ ir - (1 + air1 + air2)/2 \
for ir, air1, air2 in zip(rds['rawdata6_1'], rds['rawdata6_0'], rds['rawdata6_2'])]
rds['RED'] = [red - (1 + ared1 + ared2)/2 \
for red, ared1, ared2 in zip(rds['rawdata6_4'], rds['rawdata6_3'], rds['rawdata6_5'])]
del heg.data['rawdata6']
heg.data.update(rds)
if DEBUG:
print heg.data.keys()
print len(heg.times)
keys = heg.data.keys()
keys.sort()
keys = ['timestamp', 'session time'] + keys
vals = {}
vals.update(heg.data)
vals['timestamp'] = heg.times
vals['session time'] = [t - heg.times[0] for t in heg.times]
outfile = file(dest, 'w')
outfile.write(','.join(keys) + '\n')
for i in range(len(heg.times)):
outfile.write(','.join([str(vals[k][i]).replace(',', '\\;') for k in keys]) + '\n')
# for d,t in zip(heg.dicts, heg.times):
# outfile.write(','.join([`t`] +
# [`t - heg.times[0]`] +
# [`d[col]`.replace(',', '\\;') if col in d else '' for col in columns[2:]]) + '\n')
outfile.close()
heg.Close()
def linear_FIR_filter_factory(taps):
def LinearFIRFilter(data):
if taps > len(data):
newtaps = len(data)
else:
newtaps = taps
newdata = data[-newtaps:]
result = 0.
for i in range(newtaps):
result += (i+.5) * newdata[i]
return 2.*result/float(newtaps*newtaps)
return LinearFIRFilter
class ProtocolDetector(threading.Thread):
def __init__(self, port, baudrate, timeout):
print 'Detecting Protocol'
self.port = port
self.baudrate = baudrate
self.timeout = timeout
self.protocol = None
if baudrate == 115200:
self.data = []
else:
self.data = ''
threading.Thread.__init__(self)
def run(self):
try:
port = self.port
if is_heg_archive(port):
s = get_rawdata_handle(port) # s is for serial port, which this imitates
else:
if type(port) == int: port -= 1
if hasattr(port, 'read'):
s = port
elif type(port) in (str, unicode) and not port.startswith('/dev') and not (port.startswith('COM') and port[3:].isdigit()):
s = fakeserial.FakeSerial(port, 'rb', byterate=0)
else:
print 'getting Serial connection on port: ', port
print 'Baudrate: ', self.baudrate
s = serial.Serial(port=port, baudrate=self.baudrate, timeout=2)
if self.baudrate == 38400:
s.write('protocol 3\n')
elif self.baudrate == 115200:
time.sleep(1)
self.protocol = 'hegduino'
print 'HEGduino connected'
s.write('t')
#s.flush()
#s.flushInput()
if self.baudrate == 115200:
print 'read initial bytes'
for i in range(1,20):
temp = s.readline()
print temp
if self.baudrate == 38400:
#s.flushInput()
print 'read initial bytes'
self.data += s.read(200)
data = self.data
print data
except Exception, e:
print e
try:
if DEBUG: print "Serial exception when detecting ", self.port
if DEBUG: print "Serial port is ", s
s.close()
except:
pass
self.protocol = 'no connect'
print self.protocol
try: s.close()
except: pass
return
data = []
if self.baudrate == 115200:
while(len(self.data) < 10):
temp = s.readline()
if temp.find(','):
temp = temp.split(',')
if len(temp) > 1 and temp[1].find('WAIT') == -1:
self.data.append(float(temp[1]))
#self.data.append(temp[0])
data = self.data
if len(self.data) < 1:
#if float(temp[0]) < 7000:
print 'Getting Baseline '
#else:
# print 'Light level too high, check sensor'
print data, '\n'
else:
print 'Callback: ', temp
if not data:
self.protocol = 'no data'
print self.protocol
s.close()
return
if self.baudrate == 38400:
while len(self.data) < 1024:
try:
if self.baudrate == 38400:
self.data += s.read(20)
data = self.data
print data
except serial.SerialException:
try:
if DEBUG: print "Serial exception when detecting ", self.port
s.close()
except:
pass
self.protocol = 'no connect'
s.close()
return
if self.baudrate == 38400:
peanutscore = data.count('\xaa\xaa')
print 'Peanut score: ',peanutscore
asciiscore = data.count('\n1, 0x')
i, j, pnbscore = 0, 0, 0
d = map(ord, data)
while i < len(d)-20:
if d[i]>>6 == 0 and d[i+5]>>6 == 1 and d[i+10]>>6 == 2 and d[i+15]>>6 == 3:
pnbscore += 1
i += 20
else:
i += 1
if ENABLE_PNB:
while i < len(data):
if not ord(data[i])>>6 == j:
i += 1
j = 0
else:
i += 5
j += 1
if j == 4:
j = 0
pnbscore += 1
if DEBUG: print "pnbscore: %i, asciiscore: %i, peanutscore: %i, len(data): %i" % (pnbscore, asciiscore, peanutscore, len(data))
#if asciiscore >= peanutscore*4 + 4 and asciiscore >= pnbscore + 4:
# self.protocol = 'ascii'
# break
if pnbscore >= peanutscore*4 + 4:# and pnbscore >= asciiscore + 2:
self.protocol = 'pnb'
break
elif peanutscore*4 >= asciiscore + 4:# and peanutscore >= pnbscore + 3:
self.protocol = 'peanut'
break
else:
self.protocol = 'unknown'
if DEBUG: print "pnbscore: %i, asciiscore: %i, peanutscore: %i, len(data): %i" % (pnbscore, asciiscore, peanutscore, len(data))
if self.baudrate == 115200:
s.write('f')
s.close()
print 'Protocol detected: ', self.protocol
return
class SerialManager(threading.Thread):
packet_length_pos = 0
file_extension = 'pna'
default_packet_length = 6
timeout_duration = .6
timeout_count = 3
def __init__(self, baudrate, port=None, callback=lambda x: x, rawlog=False, skip=0, fast=False, pause=False):
threading.Thread.__init__(self)
time1 = self.starttime = time.time()
self.port, self.fast, self.skip, self.callback, self.paused = port, fast, skip, callback, pause
self.ser = None
self.baudrate = baudrate
self.connected, self.keeprunning = False, False
self.timeouts = 0
self.read_str = ''
| |
FIXME: catch this bug in testcase
#self.change_focus((maxcol,maxrow), pos,
# row_offset+rows, 'above')
self.change_focus((maxcol,maxrow), pos,
row_offset-rows, 'above')
return
# check if cursor will stop scroll from taking effect
if cursor is not None:
x,y = cursor
if y+focus_row_offset-1 < 0:
# cursor position is a problem,
# choose another focus
if widget is None:
# try harder to get next widget
widget, pos = self.body.get_next(pos)
if widget is None:
return # can't do anything
else:
row_offset -= rows
if row_offset >= maxrow:
# must scroll further than 1 line
row_offset = maxrow-1
self.change_focus((maxcol,maxrow),pos,
row_offset, 'above', )
return
# if all else fails, keep the current focus.
self.shift_focus((maxcol,maxrow), focus_row_offset-1)
def _keypress_page_up(self, size):
(maxcol, maxrow) = size
middle, top, bottom = self.calculate_visible(
(maxcol,maxrow), True)
if middle is None: return 'page up'
row_offset, focus_widget, focus_pos, focus_rows, cursor = middle
trim_top, fill_above = top
# topmost_visible is row_offset rows above top row of
# focus (+ve) or -row_offset rows below top row of focus (-ve)
topmost_visible = row_offset
# scroll_from_row is (first match)
# 1. topmost visible row if focus is not selectable
# 2. row containing cursor if focus has a cursor
# 3. top row of focus widget if it is visible
# 4. topmost visible row otherwise
if not focus_widget.selectable():
scroll_from_row = topmost_visible
elif cursor is not None:
x,y = cursor
scroll_from_row = -y
elif row_offset >= 0:
scroll_from_row = 0
else:
scroll_from_row = topmost_visible
# snap_rows is maximum extra rows to scroll when
# snapping to new a focus
snap_rows = topmost_visible - scroll_from_row
# move row_offset to the new desired value (1 "page" up)
row_offset = scroll_from_row + maxrow
# not used below:
scroll_from_row = topmost_visible = None
# gather potential target widgets
t = []
# add current focus
t.append((row_offset,focus_widget,focus_pos,focus_rows))
pos = focus_pos
# include widgets from calculate_visible(..)
for widget, pos, rows in fill_above:
row_offset -= rows
t.append( (row_offset, widget, pos, rows) )
# add newly visible ones, including within snap_rows
snap_region_start = len(t)
while row_offset > -snap_rows:
widget, pos = self.body.get_prev(pos)
if widget is None: break
rows = widget.rows((maxcol,))
row_offset -= rows
# determine if one below puts current one into snap rgn
if row_offset > 0:
snap_region_start += 1
t.append( (row_offset, widget, pos, rows) )
# if we can't fill the top we need to adjust the row offsets
row_offset, w, p, r = t[-1]
if row_offset > 0:
adjust = - row_offset
t = [(ro+adjust, w, p, r) for (ro,w,p,r) in t]
# if focus_widget (first in t) is off edge, remove it
row_offset, w, p, r = t[0]
if row_offset >= maxrow:
del t[0]
snap_region_start -= 1
# we'll need this soon
self.update_pref_col_from_focus((maxcol,maxrow))
# choose the topmost selectable and (newly) visible widget
# search within snap_rows then visible region
search_order = ( range( snap_region_start, len(t))
+ range( snap_region_start-1, -1, -1 ) )
#assert 0, `t, search_order`
bad_choices = []
cut_off_selectable_chosen = 0
for i in search_order:
row_offset, widget, pos, rows = t[i]
if not widget.selectable():
continue
# try selecting this widget
pref_row = max(0, -row_offset)
# if completely within snap region, adjust row_offset
if rows + row_offset <= 0:
self.change_focus( (maxcol,maxrow), pos,
-(rows-1), 'below',
(self.pref_col, rows-1),
snap_rows-((-row_offset)-(rows-1)))
else:
self.change_focus( (maxcol,maxrow), pos,
row_offset, 'below',
(self.pref_col, pref_row), snap_rows )
# if we're as far up as we can scroll, take this one
if (fill_above and self.body.get_prev(fill_above[-1][1])
== (None,None) ):
pass #return
# find out where that actually puts us
middle, top, bottom = self.calculate_visible(
(maxcol,maxrow), True)
act_row_offset, _ign1, _ign2, _ign3, _ign4 = middle
# discard chosen widget if it will reduce scroll amount
# because of a fixed cursor (absolute last resort)
if act_row_offset > row_offset+snap_rows:
bad_choices.append(i)
continue
if act_row_offset < row_offset:
bad_choices.append(i)
continue
# also discard if off top edge (second last resort)
if act_row_offset < 0:
bad_choices.append(i)
cut_off_selectable_chosen = 1
continue
return
# anything selectable is better than what follows:
if cut_off_selectable_chosen:
return
if fill_above and focus_widget.selectable():
# if we're at the top and have a selectable, return
if self.body.get_prev(fill_above[-1][1]) == (None,None):
pass #return
# if still none found choose the topmost widget
good_choices = [j for j in search_order if j not in bad_choices]
for i in good_choices + search_order:
row_offset, widget, pos, rows = t[i]
if pos == focus_pos: continue
# if completely within snap region, adjust row_offset
if rows + row_offset <= 0:
snap_rows -= (-row_offset) - (rows-1)
row_offset = -(rows-1)
self.change_focus( (maxcol,maxrow), pos,
row_offset, 'below', None,
snap_rows )
return
# no choices available, just shift current one
self.shift_focus((maxcol, maxrow), min(maxrow-1,row_offset))
# final check for pathological case where we may fall short
middle, top, bottom = self.calculate_visible(
(maxcol,maxrow), True)
act_row_offset, _ign1, pos, _ign2, _ign3 = middle
if act_row_offset >= row_offset:
# no problem
return
# fell short, try to select anything else above
if not t:
return
_ign1, _ign2, pos, _ign3 = t[-1]
widget, pos = self.body.get_prev(pos)
if widget is None:
# no dice, we're stuck here
return
# bring in only one row if possible
rows = widget.rows((maxcol,), True)
self.change_focus((maxcol,maxrow), pos, -(rows-1),
'below', (self.pref_col, rows-1), 0 )
def _keypress_page_down(self, size):
(maxcol, maxrow) = size
middle, top, bottom = self.calculate_visible(
(maxcol,maxrow), True)
if middle is None: return 'page down'
row_offset, focus_widget, focus_pos, focus_rows, cursor = middle
trim_bottom, fill_below = bottom
# bottom_edge is maxrow-focus_pos rows below top row of focus
bottom_edge = maxrow - row_offset
# scroll_from_row is (first match)
# 1. bottom edge if focus is not selectable
# 2. row containing cursor + 1 if focus has a cursor
# 3. bottom edge of focus widget if it is visible
# 4. bottom edge otherwise
if not focus_widget.selectable():
scroll_from_row = bottom_edge
elif cursor is not None:
x,y = cursor
scroll_from_row = y + 1
elif bottom_edge >= focus_rows:
scroll_from_row = focus_rows
else:
scroll_from_row = bottom_edge
# snap_rows is maximum extra rows to scroll when
# snapping to new a focus
snap_rows = bottom_edge - scroll_from_row
# move row_offset to the new desired value (1 "page" down)
row_offset = -scroll_from_row
# not used below:
scroll_from_row = bottom_edge = None
# gather potential target widgets
t = []
# add current focus
t.append((row_offset,focus_widget,focus_pos,focus_rows))
pos = focus_pos
row_offset += focus_rows
# include widgets from calculate_visible(..)
for widget, pos, rows in fill_below:
t.append( (row_offset, widget, pos, rows) )
row_offset += rows
# add newly visible ones, including within snap_rows
snap_region_start = len(t)
while row_offset < maxrow+snap_rows:
widget, pos = self.body.get_next(pos)
if widget is None: break
rows = widget.rows((maxcol,))
t.append( (row_offset, widget, pos, rows) )
row_offset += rows
# determine if one above puts current one into snap rgn
if row_offset < maxrow:
snap_region_start += 1
# if we can't fill the bottom we need to adjust the row offsets
row_offset, w, p, rows = t[-1]
if row_offset + rows < maxrow:
adjust = maxrow - (row_offset + rows)
t = [(ro+adjust, w, p, r) for (ro,w,p,r) in t]
# if focus_widget (first in t) is off edge, remove it
row_offset, w, p, rows = t[0]
if row_offset+rows <= 0:
del t[0]
snap_region_start -= 1
# we'll need this soon
self.update_pref_col_from_focus((maxcol,maxrow))
# choose the bottommost selectable and (newly) visible widget
# search within snap_rows then visible region
search_order = ( range( snap_region_start, len(t))
+ range( snap_region_start-1, -1, -1 ) )
#assert 0, `t, search_order`
bad_choices = []
cut_off_selectable_chosen = 0
for i in search_order:
row_offset, widget, pos, rows = t[i]
if not widget.selectable():
continue
# try selecting this widget
pref_row = min(maxrow-row_offset-1, rows-1)
# if completely within snap region, adjust row_offset
if row_offset >= maxrow:
self.change_focus( (maxcol,maxrow), pos,
maxrow-1, 'above',
(self.pref_col, 0),
snap_rows+maxrow-row_offset-1 )
else:
self.change_focus( (maxcol,maxrow), pos,
row_offset, 'above',
(self.pref_col, pref_row), snap_rows )
# find out where that actually puts us
middle, top, bottom = self.calculate_visible(
(maxcol,maxrow), True)
act_row_offset, _ign1, _ign2, _ign3, _ign4 = middle
# discard chosen widget if it will reduce scroll amount
# because of a fixed cursor (absolute last resort)
if act_row_offset < row_offset-snap_rows:
bad_choices.append(i)
continue
if act_row_offset > row_offset:
bad_choices.append(i)
continue
# also discard if off top edge (second last resort)
if act_row_offset+rows > maxrow:
bad_choices.append(i)
cut_off_selectable_chosen = 1
continue
return
# anything selectable is better than what follows:
if cut_off_selectable_chosen:
return
# if still none found choose the bottommost widget
good_choices = [j for j in search_order if j not in bad_choices]
for i in good_choices + search_order:
row_offset, widget, pos, rows = t[i]
if pos == focus_pos: continue
# if completely within snap region, adjust row_offset
if row_offset >= maxrow:
snap_rows -= snap_rows+maxrow-row_offset-1
row_offset = maxrow-1
self.change_focus( (maxcol,maxrow), pos,
row_offset, 'above', None,
snap_rows )
return
# no choices available, just shift current one
self.shift_focus((maxcol, maxrow), max(1-focus_rows,row_offset))
# final check for pathological case where we may fall short
middle, top, bottom = self.calculate_visible(
(maxcol,maxrow), True)
act_row_offset, _ign1, pos, _ign2, _ign3 = middle
if act_row_offset <= row_offset:
# no problem
return
# fell short, try to select anything else below
if not t:
return
_ign1, _ign2, pos, _ign3 = t[-1]
widget, pos = self.body.get_next(pos)
if widget is None:
# no dice, we're stuck here
return
# bring in only one row if possible
rows = widget.rows((maxcol,), True)
self.change_focus((maxcol,maxrow), pos, maxrow-1,
'above', (self.pref_col, 0), 0 )
def mouse_event(self, size, event, button, col, row, focus):
"""
Pass the event to the contained widgets.
May change focus on button 1 press.
"""
(maxcol, maxrow) = size
middle, top, bottom = self.calculate_visible((maxcol, maxrow),
focus=True)
if middle is None:
return False
_ignore, focus_widget, focus_pos, focus_rows, cursor = middle
trim_top, fill_above = top
_ignore, fill_below = bottom
fill_above.reverse() # fill_above is in bottom-up order
w_list = ( fill_above +
[ (focus_widget, focus_pos, focus_rows) ] +
fill_below )
wrow = -trim_top
for w, w_pos, w_rows in w_list:
if wrow + w_rows > row:
break
wrow += w_rows
else:
return False
focus = focus and w == focus_widget
if is_mouse_press(event) and button==1:
if w.selectable():
self.change_focus((maxcol,maxrow), w_pos, wrow)
if not hasattr(w,'mouse_event'):
return False
return w.mouse_event((maxcol,), event, button, col, row-wrow,
focus)
def | |
= STEPS_LIB.api_get_load_related_model_float_parameter(ibus, ickt, model_type, par_name, self.toolkit_index);
par_name = self.__get_string_from_c_char_p(par_name)
parameters.append((par_name, par_value))
return tuple(parameters)
def get_line_related_model_name(self, line, model_type):
"""
Get transmission line related model name.
Args:
(1) line: Transmission line device id in format of (ibus, jbus, ickt).
(2) model_type: String of model type.
Rets:
(1) String of model name.
Tips:
If model type is not supported, empty string is returned.
Example: N/A
"""
global STEPS_LIB
ibus, jbus, ickt = self.__extract_double_bus_device_id(line)
ickt = self.__get_c_char_p_of_string(ickt)
model_type = self.__get_c_char_p_of_string(model_type)
model_name = self.__get_string_from_c_char_p(STEPS_LIB.api_get_line_related_model_name(ibus, jbus, ickt, model_type, self.toolkit_index))
if model_name=="":
model_name = None
return model_name
def get_line_related_model_data(self, line, model_type, par_name):
"""
Get transmission line related model data.
Args:
(1) line: Transmission line device id in format of (ibus, jbus, ickt).
(2) model_type: String of model type.
(3) par_name: String of parameter name.
Rets:
(1) Value of parameter
Tips:
If model type or parameter name is not supported, 0.0 is returned.
Example: N/A
"""
global STEPS_LIB
ibus, jbus, ickt = self.__extract_double_bus_device_id(line)
ickt = self.__get_c_char_p_of_string(ickt)
model_type = self.__get_c_char_p_of_string(model_type)
par_name = self.__get_c_char_p_of_string(par_name)
return STEPS_LIB.api_get_line_related_model_float_parameter(ibus, jbus, ickt, model_type, par_name, self.toolkit_index)
def set_line_related_model_data(self, line, model_type, par_name, value):
"""
Set transmission line related model data.
Args:
(1) line: Transmission line device id in format of (ibus, jbus, ickt).
(2) model_type: String of model type.
(3) par_name: String of parameter name.
(4) value: Value of parameter.
Rets: N/A
Tips:
If model type or parameter name is not supported, nothing will be changed.
If value is not a number, function may malfunction and package may exit with error.
Example: N/A
"""
global STEPS_LIB
ibus, jbus, ickt = self.__extract_double_bus_device_id(line)
ickt = self.__get_c_char_p_of_string(ickt)
model_type = self.__get_c_char_p_of_string(model_type)
par_name = self.__get_c_char_p_of_string(par_name)
return STEPS_LIB.api_set_line_related_model_float_parameter(ibus, jbus, ickt, model_type, par_name, value, self.toolkit_index)
def get_line_related_model_parameter_pair(self, line, model_type):
"""
Get transmission line related model parameter pair.
Args:
(1) line: Transmission line device id in format of (ibus, jbus, ickt).
(2) model_type: String of model type.
Rets:
(1) Tuple of parameter name and value pairs. Each parameter name and value pair is in format of (string of parameter name, value of parameter).
Tips:
If model type is not supported, empty tuple is returned.
Example: N/A
"""
global STEPS_LIB
ibus, jbus, ickt = self.__extract_double_bus_device_id(line)
ickt = self.__get_c_char_p_of_string(ickt)
model_type = self.__get_c_char_p_of_string(model_type)
parameters = []
n = STEPS_LIB.api_get_line_related_model_float_parameter_count(ibus, jbus, ickt, model_type, self.toolkit_index)
for parameter_index in range(n):
par_name = STEPS_LIB.api_get_line_related_model_float_parameter_name(ibus, jbus, ickt, model_type, parameter_index, self.toolkit_index)
par_value = STEPS_LIB.api_get_line_related_model_float_parameter(ibus, jbus, ickt, model_type, par_name, self.toolkit_index);
par_name = self.__get_string_from_c_char_p(par_name)
parameters.append((par_name, par_value))
return tuple(parameters)
def get_hvdc_related_model_name(self, hvdc, model_type):
"""
Get HVDC link related model name.
Args:
(1) hvdc: HVDC link device id in format of (ibus, jbus, ickt).
(2) model_type: String of model type.
Rets:
(1) String of model name.
Tips:
If model type is not supported, empty string is returned.
Example: N/A
"""
global STEPS_LIB
ibus, jbus, ickt = self.__extract_double_bus_device_id(hvdc)
ickt = self.__get_c_char_p_of_string(ickt)
model_type = self.__get_c_char_p_of_string(model_type)
model_name = self.__get_string_from_c_char_p(STEPS_LIB.api_get_hvdc_related_model_name(ibus, jbus, ickt, model_type, self.toolkit_index))
if model_name=="":
model_name = None
return model_name
def get_hvdc_related_model_data(self, hvdc, model_type, par_name):
"""
Get HVDC link related model data.
Args:
(1) hvdc: HVDC link device id in format of (ibus, jbus, ickt).
(2) model_type: String of model type.
(3) par_name: String of parameter name.
Rets:
(1) Value of parameter
Tips:
If model type or parameter name is not supported, 0.0 is returned.
Example: N/A
"""
global STEPS_LIB
ibus, jbus, ickt = self.__extract_double_bus_device_id(hvdc)
ickt = self.__get_c_char_p_of_string(ickt)
model_type = self.__get_c_char_p_of_string(model_type)
par_name = self.__get_c_char_p_of_string(par_name)
return STEPS_LIB.api_get_hvdc_related_model_float_parameter(ibus, jbus, ickt, model_type, par_name, self.toolkit_index)
def set_hvdc_related_model_data(self, hvdc, model_type, par_name, value):
"""
Set HVDC linke related model data.
Args:
(1) hvdc: HVDC link device id in format of (ibus, jbus, ickt).
(2) model_type: String of model type.
(3) par_name: String of parameter name.
(4) value: Value of parameter.
Rets: N/A
Tips:
If model type or parameter name is not supported, nothing will be changed.
If value is not a number, function may malfunction and package may exit with error.
Example: N/A
"""
global STEPS_LIB
ibus, jbus, ickt = self.__extract_double_bus_device_id(hvdc)
ickt = self.__get_c_char_p_of_string(ickt)
model_type = self.__get_c_char_p_of_string(model_type)
par_name = self.__get_c_char_p_of_string(par_name)
return STEPS_LIB.api_set_hvdc_related_model_float_parameter(ibus, jbus, ickt, model_type, par_name, value, self.toolkit_index)
def get_hvdc_related_model_parameter_pair(self, hvdc, model_type):
"""
Get HVDC link related model parameter pair.
Args:
(1) hvdc: HVDC link device id in format of (ibus, jbus, ickt).
(2) model_type: String of model type.
Rets:
(1) Tuple of parameter name and value pairs. Each parameter name and value pair is in format of (string of parameter name, value of parameter).
Tips:
If model type is not supported, empty tuple is returned.
Example: N/A
"""
global STEPS_LIB
ibus, jbus, ickt = self.__extract_double_bus_device_id(hvdc)
ickt = self.__get_c_char_p_of_string(ickt)
model_type = self.__get_c_char_p_of_string(model_type)
parameters = []
n = STEPS_LIB.api_get_hvdc_related_model_float_parameter_count(ibus, jbus, ickt, model_type, self.toolkit_index)
for parameter_index in range(n):
par_name = STEPS_LIB.api_get_hvdc_related_model_float_parameter_name(ibus, jbus, ickt, model_type, parameter_index, self.toolkit_index)
par_value = STEPS_LIB.api_get_hvdc_related_model_float_parameter(ibus, jbus, ickt, model_type, par_name, self.toolkit_index);
par_name = self.__get_string_from_c_char_p(par_name)
parameters.append((par_name, par_value))
return tuple(parameters)
def get_generator_governor_pmax(self, gen):
"""
Get generator pmax in MW from turbine governor model.
Args:
(1) gen: generator id in format of (ibus, ickt)
Rets:
(1) Pmax in MW
Example: N/A
"""
gov_name = self.get_generator_related_model_name(gen, "gov")
if gov_name is None:
pgen = self.get_generator_data(gen, "d", "PGEN_MW")
return pgen
else:
mbase = self.get_generator_data(gen, "d", "MBASE_MVA")
pmax = self.get_generator_related_model_data(gen, "gov", "pmax")
return pmax*mbase
def get_generator_governor_pmin(self, gen):
"""
Get generator pmin in MW from turbine governor model.
Args:
(1) gen: generator id in format of (ibus, ickt)
Rets:
(1) Pmin in MW
Example: N/A
"""
gov_name = self.get_generator_related_model_name(gen, "gov")
if gov_name is None:
pgen = self.get_generator_data(gen, "d", "PGEN_MW")
return pgen
else:
mbase = self.get_generator_data(gen, "d", "MBASE_MVA")
pmin = self.get_generator_related_model_data(gen, "gov", "pmin")
return pmin*mbase
def get_generator_governor_up_spinning_reserve(self, gen):
"""
Get up spinning reserve of generator.
Args:
(1) gen: generator id in format of (ibus, ickt)
Rets:
(1) up spinning reserve in MW
Example: N/A
"""
pgen = self.get_generator_data(gen, "d", "PGEN_MW")
pmax = self.get_generator_governor_pmax(gen)
return pmax - pgen
def get_generator_governor_down_spinning_reserve(self, gen):
"""
Get down spinning reserve of generator.
Args:
(1) gen: generator id in format of (ibus, ickt)
Rets:
(1) down spinning reserve in MW
Example: N/A
"""
pgen = self.get_generator_data(gen, "d", "PGEN_MW")
pmin = self.get_generator_governor_pmin(gen)
return pgen - pmin
def get_generator_governor_total_up_spinning_reserve_with_constraints(self, area=0,zone=0):
"""
Get up spinning reserve of generators in area.
Args:
(1) area: area number, default is 0
(2) zone: zone number, default is 0
Rets:
(1) total up spinning reserve in MW
Example: N/A
"""
pup = 0.0
gens = self.get_generators_with_constraints(area=area,zone=zone)
for gen in gens:
pup += self.get_generator_governor_up_spinning_reserve(gen)
return pup
def get_generator_governor_total_down_spinning_reserve_with_constraints(self, area=0,zone=0):
"""
Get down spinning reserve of generators in area.
Args:
(1) area: area number, default is 0
(2) zone: zone number, default is 0
Rets:
(1) total down spinning reserve in MW
Example: N/A
"""
pdown = 0.0
gens = self.get_generators_with_constraints(area=area,zone=zone)
for gen in gens:
pdown += self.get_generator_governor_down_spinning_reserve(gen)
return pdown
def get_generator_governor_total_pmax_with_constraints(self, area=0,zone=0):
"""
Get total pmax of generators in area and zone.
Args:
(1) area: area number, default is 0
(2) zone: zone number, default is 0
Rets:
(1) total pmax in MW
Example: N/A
"""
pmax = 0.0
gens = self.get_generators_with_constraints(area=area,zone=zone)
for gen in gens:
pmax += self.get_generator_governor_pmax(gen)
return pmax
def get_generator_governor_total_pmin_with_constraints(self, area=0,zone=0):
"""
Get total pmin of generators in area and zone.
Args:
(1) area: area number, default is 0
(2) zone: zone number, default is 0
Rets:
(1) total pmin in MW
Example: N/A
"""
pmin = 0.0
gens = self.get_generators_with_constraints(area=area,zone=zone)
for gen in gens:
pmin += self.get_generator_governor_pmin(gen)
return pmin
def get_dynamic_simulator_parameter(self, par_type, par_name):
"""
Get dynamic simulator configuration parameter.
Args:
(1) par_type: String of parameter type. Choose one from {"I", "F", "D", "S", "B"}.
(2) par_name: String of parameter name.
Rets:
(1) Value of parameter.
Tips:
The par_type meaning: "I": integer number, "F" or "D": float number, "S": string, "B": boolean data.
The type of given parameter MUST be consistent with the given parameter type. Otherwise, 0, 0.0, "", or False will be returned.
Example: N/A
"""
global STEPS_LIB
par_type = par_type.upper()
if | |
<reponame>SeraphRoy/PyPy-Functional
from rpython.jit.codewriter.effectinfo import EffectInfo
from rpython.jit.codewriter import longlong
from rpython.jit.metainterp import compile
from rpython.jit.metainterp.history import (Const, ConstInt, make_hashable_int,
ConstFloat)
from rpython.jit.metainterp.optimize import InvalidLoop
from rpython.jit.metainterp.optimizeopt.intutils import IntBound
from rpython.jit.metainterp.optimizeopt.optimizer import (
Optimization, OptimizationResult, REMOVED, CONST_0, CONST_1)
from rpython.jit.metainterp.optimizeopt.info import INFO_NONNULL, INFO_NULL
from rpython.jit.metainterp.optimizeopt.util import _findall, make_dispatcher_method
from rpython.jit.metainterp.resoperation import rop, ResOperation, opclasses,\
OpHelpers
from rpython.rlib.rarithmetic import highest_bit
from rpython.rtyper.lltypesystem import llmemory
from rpython.rtyper import rclass
import math
class CallLoopinvariantOptimizationResult(OptimizationResult):
def __init__(self, opt, op, old_op):
OptimizationResult.__init__(self, opt, op)
self.old_op = old_op
def callback(self):
self._callback(self.op, self.old_op)
def _callback(self, op, old_op):
key = make_hashable_int(op.getarg(0).getint())
self.opt.loop_invariant_producer[key] = self.opt.optimizer.getlastop()
self.opt.loop_invariant_results[key] = old_op
class OptRewrite(Optimization):
"""Rewrite operations into equivalent, cheaper operations.
This includes already executed operations and constants.
"""
def __init__(self):
self.loop_invariant_results = {}
self.loop_invariant_producer = {}
def setup(self):
self.optimizer.optrewrite = self
def produce_potential_short_preamble_ops(self, sb):
for op in self.loop_invariant_producer.values():
sb.add_loopinvariant_op(op)
def propagate_forward(self, op):
if opclasses[op.opnum].boolinverse != -1 or opclasses[op.opnum].boolreflex != -1:
if self.find_rewritable_bool(op):
return
return dispatch_opt(self, op)
def propagate_postprocess(self, op):
return dispatch_postprocess(self, op)
def try_boolinvers(self, op, targs):
oldop = self.get_pure_result(targs)
if oldop is not None:
b = self.getintbound(oldop)
if b.equal(1):
self.make_constant(op, CONST_0)
return True
elif b.equal(0):
self.make_constant(op, CONST_1)
return True
return False
def find_rewritable_bool(self, op):
oldopnum = op.boolinverse
arg0 = op.getarg(0)
arg1 = op.getarg(1)
if oldopnum != -1:
top = ResOperation(oldopnum, [arg0, arg1])
if self.try_boolinvers(op, top):
return True
oldopnum = op.boolreflex # FIXME: add INT_ADD, INT_MUL
if oldopnum != -1:
top = ResOperation(oldopnum, [arg1, arg0])
oldop = self.get_pure_result(top)
if oldop is not None:
self.optimizer.make_equal_to(op, oldop)
return True
if op.boolreflex == -1:
return False
oldopnum = opclasses[op.boolreflex].boolinverse
if oldopnum != -1:
top = ResOperation(oldopnum, [arg1, arg0])
if self.try_boolinvers(op, top):
return True
return False
def optimize_INT_AND(self, op):
b1 = self.getintbound(op.getarg(0))
b2 = self.getintbound(op.getarg(1))
if b1.equal(0) or b2.equal(0):
self.make_constant_int(op, 0)
return
elif b2.is_constant():
val = b2.lower
if val == -1 or (b1.bounded() and b1.lower >= 0
and b1.upper <= val & ~(val + 1)):
self.make_equal_to(op, op.getarg(0))
return
elif b1.is_constant():
val = b1.lower
if val == -1 or (b2.bounded() and b2.lower >= 0
and b2.upper <= val & ~(val + 1)):
self.make_equal_to(op, op.getarg(1))
return
return self.emit(op)
def optimize_INT_OR(self, op):
b1 = self.getintbound(op.getarg(0))
b2 = self.getintbound(op.getarg(1))
if b1.equal(0):
self.make_equal_to(op, op.getarg(1))
elif b2.equal(0):
self.make_equal_to(op, op.getarg(0))
else:
return self.emit(op)
def optimize_INT_SUB(self, op):
arg1 = self.get_box_replacement(op.getarg(0))
arg2 = self.get_box_replacement(op.getarg(1))
b1 = self.getintbound(arg1)
b2 = self.getintbound(arg2)
if b2.equal(0):
self.make_equal_to(op, arg1)
elif b1.equal(0):
op = self.replace_op_with(op, rop.INT_NEG, args=[arg2])
return self.emit(op)
elif arg1 == arg2:
self.make_constant_int(op, 0)
else:
return self.emit(op)
def postprocess_INT_SUB(self, op):
import sys
arg0 = op.getarg(0)
arg1 = op.getarg(1)
self.optimizer.pure_from_args(rop.INT_ADD, [op, arg1], arg0)
self.optimizer.pure_from_args(rop.INT_SUB, [arg0, op], arg1)
if isinstance(arg1, ConstInt):
# invert the constant
i1 = arg1.getint()
if i1 == -sys.maxint - 1:
return
inv_arg1 = ConstInt(-i1)
self.optimizer.pure_from_args(rop.INT_ADD, [arg0, inv_arg1], op)
self.optimizer.pure_from_args(rop.INT_ADD, [inv_arg1, arg0], op)
self.optimizer.pure_from_args(rop.INT_SUB, [op, inv_arg1], arg0)
self.optimizer.pure_from_args(rop.INT_SUB, [op, arg0], inv_arg1)
def optimize_INT_ADD(self, op):
if self.is_raw_ptr(op.getarg(0)) or self.is_raw_ptr(op.getarg(1)):
return self.emit(op)
arg1 = self.get_box_replacement(op.getarg(0))
b1 = self.getintbound(arg1)
arg2 = self.get_box_replacement(op.getarg(1))
b2 = self.getintbound(arg2)
# If one side of the op is 0 the result is the other side.
if b1.equal(0):
self.make_equal_to(op, arg2)
elif b2.equal(0):
self.make_equal_to(op, arg1)
else:
return self.emit(op)
def postprocess_INT_ADD(self, op):
import sys
arg0 = op.getarg(0)
arg1 = op.getarg(1)
self.optimizer.pure_from_args(rop.INT_ADD, [arg1, arg0], op)
# Synthesize the reverse op for optimize_default to reuse
self.optimizer.pure_from_args(rop.INT_SUB, [op, arg1], arg0)
self.optimizer.pure_from_args(rop.INT_SUB, [op, arg0], arg1)
if isinstance(arg0, ConstInt):
# invert the constant
i0 = arg0.getint()
if i0 == -sys.maxint - 1:
return
inv_arg0 = ConstInt(-i0)
elif isinstance(arg1, ConstInt):
# commutative
i0 = arg1.getint()
if i0 == -sys.maxint - 1:
return
inv_arg0 = ConstInt(-i0)
arg1 = arg0
else:
return
self.optimizer.pure_from_args(rop.INT_SUB, [arg1, inv_arg0], op)
self.optimizer.pure_from_args(rop.INT_SUB, [arg1, op], inv_arg0)
self.optimizer.pure_from_args(rop.INT_ADD, [op, inv_arg0], arg1)
self.optimizer.pure_from_args(rop.INT_ADD, [inv_arg0, op], arg1)
def optimize_INT_MUL(self, op):
arg1 = self.get_box_replacement(op.getarg(0))
b1 = self.getintbound(arg1)
arg2 = self.get_box_replacement(op.getarg(1))
b2 = self.getintbound(arg2)
# If one side of the op is 1 the result is the other side.
if b1.equal(1):
self.make_equal_to(op, arg2)
elif b2.equal(1):
self.make_equal_to(op, arg1)
elif b1.equal(0) or b2.equal(0):
self.make_constant_int(op, 0)
else:
for lhs, rhs in [(arg1, arg2), (arg2, arg1)]:
lh_info = self.getintbound(lhs)
if lh_info.is_constant():
x = lh_info.getint()
# x & (x - 1) == 0 is a quick test for power of 2
if x & (x - 1) == 0:
new_rhs = ConstInt(highest_bit(lh_info.getint()))
op = self.replace_op_with(op, rop.INT_LSHIFT, args=[rhs, new_rhs])
break
return self.emit(op)
def _optimize_CALL_INT_UDIV(self, op):
b2 = self.getintbound(op.getarg(2))
if b2.is_constant() and b2.getint() == 1:
self.make_equal_to(op, op.getarg(1))
self.last_emitted_operation = REMOVED
return True
return False
def optimize_INT_LSHIFT(self, op):
b1 = self.getintbound(op.getarg(0))
b2 = self.getintbound(op.getarg(1))
if b2.is_constant() and b2.getint() == 0:
self.make_equal_to(op, op.getarg(0))
elif b1.is_constant() and b1.getint() == 0:
self.make_constant_int(op, 0)
else:
return self.emit(op)
def optimize_INT_RSHIFT(self, op):
b1 = self.getintbound(op.getarg(0))
b2 = self.getintbound(op.getarg(1))
if b2.is_constant() and b2.getint() == 0:
self.make_equal_to(op, op.getarg(0))
elif b1.is_constant() and b1.getint() == 0:
self.make_constant_int(op, 0)
else:
return self.emit(op)
def optimize_INT_XOR(self, op):
b1 = self.getintbound(op.getarg(0))
b2 = self.getintbound(op.getarg(1))
if b1.equal(0):
self.make_equal_to(op, op.getarg(1))
elif b2.equal(0):
self.make_equal_to(op, op.getarg(0))
else:
return self.emit(op)
def optimize_FLOAT_MUL(self, op):
arg1 = op.getarg(0)
arg2 = op.getarg(1)
# Constant fold f0 * 1.0 and turn f0 * -1.0 into a FLOAT_NEG, these
# work in all cases, including NaN and inf
for lhs, rhs in [(arg1, arg2), (arg2, arg1)]:
v1 = self.get_box_replacement(lhs)
v2 = self.get_box_replacement(rhs)
if v1.is_constant():
if v1.getfloat() == 1.0:
self.make_equal_to(op, v2)
return
elif v1.getfloat() == -1.0:
newop = self.replace_op_with(op, rop.FLOAT_NEG, args=[rhs])
return self.emit(newop)
return self.emit(op)
def postprocess_FLOAT_MUL(self, op):
self.optimizer.pure_from_args(rop.FLOAT_MUL,
[op.getarg(1), op.getarg(0)], op)
def optimize_FLOAT_TRUEDIV(self, op):
arg1 = op.getarg(0)
arg2 = op.getarg(1)
v2 = self.get_box_replacement(arg2)
# replace "x / const" by "x * (1/const)" if possible
newop = op
if v2.is_constant():
divisor = v2.getfloat()
fraction = math.frexp(divisor)[0]
# This optimization is valid for powers of two
# but not for zeroes, some denormals and NaN:
if fraction == 0.5 or fraction == -0.5:
reciprocal = 1.0 / divisor
rfraction = math.frexp(reciprocal)[0]
if rfraction == 0.5 or rfraction == -0.5:
c = ConstFloat(longlong.getfloatstorage(reciprocal))
newop = self.replace_op_with(op, rop.FLOAT_MUL,
args=[arg1, c])
return self.emit(newop)
def optimize_FLOAT_NEG(self, op):
return self.emit(op)
def postprocess_FLOAT_NEG(self, op):
self.optimizer.pure_from_args(rop.FLOAT_NEG, [op], op.getarg(0))
def optimize_guard(self, op, constbox):
box = op.getarg(0)
if box.type == 'i':
intbound = self.getintbound(box)
if intbound.is_constant():
if not intbound.getint() == constbox.getint():
r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(
op)
raise InvalidLoop('A GUARD_{VALUE,TRUE,FALSE} (%s) '
'was proven to always fail' % r)
return
elif box.type == 'r':
box = self.get_box_replacement(box)
if box.is_constant():
if not box.same_constant(constbox):
r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(
op)
raise InvalidLoop('A GUARD_VALUE (%s) '
'was proven to always fail' % r)
return
return self.emit(op)
def optimize_GUARD_ISNULL(self, op):
info = self.getptrinfo(op.getarg(0))
if info is not None:
if info.is_null():
return
elif info.is_nonnull():
r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op)
raise InvalidLoop('A GUARD_ISNULL (%s) was proven to always '
'fail' % r)
return self.emit(op)
def postprocess_GUARD_ISNULL(self, op):
self.make_constant(op.getarg(0), self.optimizer.cpu.ts.CONST_NULL)
def optimize_GUARD_IS_OBJECT(self, op):
info = self.getptrinfo(op.getarg(0))
if info and info.is_constant():
if info.is_null():
raise InvalidLoop("A GUARD_IS_OBJECT(NULL) found")
c = self.get_box_replacement(op.getarg(0))
if self.optimizer.cpu.check_is_object(c.getref_base()):
return
raise InvalidLoop("A GUARD_IS_OBJECT(not-an-object) found")
if info is not None:
if info.is_about_object():
return
if info.is_precise():
raise InvalidLoop()
return self.emit(op)
def optimize_GUARD_GC_TYPE(self, op):
info = self.getptrinfo(op.getarg(0))
if info and info.is_constant():
c = self.get_box_replacement(op.getarg(0))
tid = self.optimizer.cpu.get_actual_typeid(c.getref_base())
if tid != op.getarg(1).getint():
raise InvalidLoop("wrong GC type ID found on a constant")
return
if info is not None and info.get_descr() is not None:
if info.get_descr().get_type_id() != op.getarg(1).getint():
raise InvalidLoop("wrong GC types passed around!")
return
return self.emit(op)
def optimize_GUARD_SUBCLASS(self, op):
info = self.getptrinfo(op.getarg(0))
optimizer = self.optimizer
if info and info.is_constant():
c = self.get_box_replacement(op.getarg(0))
vtable = optimizer.cpu.ts.cls_of_box(c).getint()
if optimizer._check_subclass(vtable, op.getarg(1).getint()):
return
raise InvalidLoop("GUARD_SUBCLASS(const) proven to always fail")
if info is not None and info.is_about_object():
known_class = info.get_known_class(optimizer.cpu)
if known_class:
if optimizer._check_subclass(known_class.getint(),
op.getarg(1).getint()):
return
elif info.get_descr() is not None:
if optimizer._check_subclass(info.get_descr().get_vtable(),
op.getarg(1).getint()):
return
return self.emit(op)
def optimize_GUARD_NONNULL(self, op):
opinfo = self.getptrinfo(op.getarg(0))
if opinfo is not None:
if opinfo.is_nonnull():
return
elif opinfo.is_null():
r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op)
raise InvalidLoop('A GUARD_NONNULL (%s) was proven to always '
'fail' % r)
return self.emit(op)
def postprocess_GUARD_NONNULL(self, op):
self.make_nonnull(op.getarg(0))
self.getptrinfo(op.getarg(0)).mark_last_guard(self.optimizer)
def optimize_GUARD_VALUE(self, op):
arg0 = op.getarg(0)
if arg0.type == 'r':
info = self.getptrinfo(arg0)
if info:
if info.is_virtual():
raise InvalidLoop("promote of a virtual")
old_guard_op = info.get_last_guard(self.optimizer)
if old_guard_op is not None:
op = self.replace_old_guard_with_guard_value(op, info,
old_guard_op)
elif arg0.type == 'f':
arg0 = self.get_box_replacement(arg0)
if arg0.is_constant():
return
constbox = | |
sessions
self.target_margin = pd.Series(np.array([2, 2, np.deg2rad(5), np.deg2rad(3), np.deg2rad(3), np.deg2rad(3), np.deg2rad(3)]), ismore_pos_states)
self.target_margin = self.target_margin[self.pos_states]
self.add_dtype('target_margin', 'f8', (len(self.target_margin),))
self.sounds_general_dir = os.path.expandvars('$HOME/code/ismore/sounds')
self.sounds_dir = os.path.join(self.sounds_general_dir, self.language)
self.sounds_dir_classical = os.path.expandvars('$HOME/code/ismore/sounds/classical')
self.reached_goal_position = False #If the goal_position is reached then give feedback to patient and start the movement back towards the rest_position
self.plant.enable()
[self.DoF_target_idx_init, self.DoF_target_idx_end, self.DoF_not_control_idx_init, self.DoF_not_control_idx_end] = check_plant_and_DoFs(self.plant_type, self.DoF_control, self.DoF_target)
self.subgoal_names = self.targets_matrix['subgoal_names']
# 4th order butterworth filter for command_vel
fs_synch = self.fps #Frequency at which the task is running
nyq = 0.5 * fs_synch
cuttoff_freq = 1.5 / nyq
bpf_kin_coeffs = butter(4, cuttoff_freq, btype='low')
self.command_lpfs = dict()
for state in self.vel_states:
self.command_lpfs[state] = Filter(bpf_kin_coeffs[0], bpf_kin_coeffs[1]) # low-pass filter to smooth out command velocities
pygame.mixer.init()
# import serial
# self.serial_trigger =serial.Serial(
# port='/dev/ttyUSB0',
# baudrate=9600,
# parity=serial.PARITY_NONE,
# stopbits=serial.STOPBITS_ONE,
# bytesize=serial.SEVENBITS
# )
def init(self):
kwargs = {
'call_rate': self.fps,
'xy_cutoff': 2.,
}
# self.assister = ismore_bmi_lib.LFC_GO_TO_START_ASSISTER_CLS_DICT[self.plant_type](**kwargs)
self.assister = ismore_bmi_lib.LFC_GO_TO_START_ASSISTER_CLS_DICT[self.plant_type](speed = self.speed,**kwargs)
super(EndPointMovement, self).init()
def pos_diff(self, x1, x2):
'''
Calculate x1 - x2, but with a specialized definition of "-"
'''
# if self.plant_type == 'ArmAssist':
# sub_fns = [operator.sub, operator.sub, angle_subtract]
# elif self.plant_type == 'ReHand':
# sub_fns = [angle_subtract, angle_subtract, angle_subtract, angle_subtract]
# elif self.plant_type == 'IsMore':
# sub_fns = [operator.sub, operator.sub, angle_subtract, angle_subtract, angle_subtract, angle_subtract, angle_subtract]
# x1 = np.array(x1).ravel()
# x2 = np.array(x2).ravel()
# diff_ = []
# for sub_fn, i, j in izip(sub_fns, x1, x2):
# diff_.append(sub_fn(i, j))
# return np.array(diff_)
x1 = np.array(x1).ravel()
x2 = np.array(x2).ravel()
diff = []
for i, j in izip(x1, x2):
diff.append(i-j)
return np.array(diff)
# return task_type
# def _set_subgoals(self):
# self.goal_position = self.targets_matrix[self.trial_type]
def _while_trial(self):
# fb_time = self.feedback_time[self.trial_type][self.subtrial_idx]
#self.task_data['audio_feedback_start'] = 0
if np.all(np.abs(self.pos_diff(self.goal_position[self.DoF_target_idx_init:self.DoF_target_idx_end],self.plant.get_pos()[self.DoF_target_idx_init:self.DoF_target_idx_end])) < self.target_margin[self.pos_states[self.DoF_target_idx_init:self.DoF_target_idx_end]]):
#self.task_data['audio_feedback_start'] = 1
# if self.give_feedback:
# # self._play_sound(os.path.join(self.sounds_general_dir, 'beep.wav')) # nerea
# self._play_sound(self.sounds_general_dir, ['beep'])
if self.goal_idx < len(self.targets_matrix[self.trial_type].keys())-1:
pygame.mixer.music.stop()
self.parallel_sound.stop()
self.goal_idx +=1
print 'heading to next subtarget'
self.goal_position = self.targets_matrix[self.trial_type][self.goal_idx][self.pos_states]
self._play_sound(self.sounds_dir, self.subgoal_names[self.trial_type][self.goal_idx]) #nerea
self.parallel_sound = pygame.mixer.Sound(os.path.join(self.sounds_dir_classical, self.subgoal_names[self.trial_type][self.goal_idx][0]+'.wav'))
self.parallel_sound.play()
# self._play_sound(self.sounds_dir_classical, [self.subgoal_names[self.trial_type][self.goal_idx][0]]) #nerea
else:
print 'all subtargets reached'
self.reached_goal_position = True
self.task_data['reached_goal_position'] = self.reached_goal_position
def _while_trial_return(self):
#fb_time = self.feedback_time[self.trial_type][self.subtrial_idx]
#self.task_data['audio_feedback_start'] = 0
if np.all(np.abs(self.pos_diff(self.goal_position[self.DoF_target_idx_init:self.DoF_target_idx_end],self.plant.get_pos()[self.DoF_target_idx_init:self.DoF_target_idx_end])) < self.target_margin[self.pos_states[self.DoF_target_idx_init:self.DoF_target_idx_end]]):
self.reached_goal_position = True
self.task_data['reached_goal_position'] = self.reached_goal_position
#self.goal_position = self.rest_position
#self.task_data['audio_feedback_start'] = 1
# if self.give_feedback:
# # self._play_sound(os.path.join(self.sounds_general_dir, 'beep.wav')) # nerea
# self._play_sound(self.sounds_general_dir, ['beep'])
def move_plant(self):
command_vel = pd.Series(0.0, self.vel_states)
command_vel_raw = pd.Series(0.0, self.vel_states)
command_vel_final = pd.Series(0.0, self.vel_states)
#calculate the output of the LQR controller at all states
current_pos = self.plant_pos[:].ravel()
current_state = np.hstack([current_pos, np.zeros_like(current_pos), 1]).reshape(-1, 1)
#print self.state
if self.state in ['wait','rest','rest_return', 'instruct_rest', 'preparation', 'preparation_return', 'instruct_go','instruct_go_return', 'instruct_trial_type', 'instruct_rest_return']:
#in return state and in the states where the exo does not move the target position is the rest position
target_state = current_state
elif self.state in ['trial_return', 'instruct_trial_return']:
#in return state and in the states where the exo does not move the target position is the rest position
target_state = np.hstack([self.targets_matrix['rest'][0][self.pos_states], np.zeros_like(current_pos),1]).reshape(-1,1)
elif self.state == 'trial':
target_state = np.hstack([self.targets_matrix[self.trial_type][self.goal_idx][self.pos_states], np.zeros_like(current_pos),1 ]).reshape(-1,1)
assist_output = self.assister(current_state, target_state, 1.)
Bu = np.array(assist_output["x_assist"]).ravel()
# command_vel[:] = Bu[len(current_pos):len(current_pos)*2]
command_vel_raw[:] = Bu[len(current_pos):len(current_pos)*2]
#copy the command_vel before fitlering
# command_vel_raw[:] = command_vel[:]
#filter command_vel
for state in self.vel_states:
# command_vel[state] = self.command_lpfs[state](command_vel[state])
command_vel[state] = self.command_lpfs[state](command_vel_raw[state])
if np.isnan(command_vel[state]):
command_vel[state] = 0
# Command zero velocity if the task is in a non-moving state
if self.state in ['wait','rest', 'instruct_rest', 'instruct_trial_type','instruct_trial_return','preparation_return']:
command_vel[:] = 0
# we could also set the raw signal to 0 or just keep the output of the LQR as it is
# command_vel_raw[:] = 0
#testing nerea
# # Command zero velocity if the task is in a non-moving state
# if self.state in ['wait','rest', 'instruct_rest', 'instruct_trial_type']:
# command_vel[:] = 0
# elif self.state in ['trial', 'trial_return']:
# current_pos = self.plant_pos[:].ravel()
# current_state = np.hstack([current_pos, np.zeros_like(current_pos), 1]).reshape(-1, 1)
# if self.state == 'trial_return':
# target_state = np.hstack([self.targets_matrix['rest'][0][self.pos_states], np.zeros_like(current_pos),1]).reshape(-1,1)
# elif self.state == 'trial':
# target_state = np.hstack([self.targets_matrix[self.trial_type][self.goal_idx][self.pos_states], np.zeros_like(current_pos),1 ]).reshape(-1,1)
# assist_output = self.assister(current_state, target_state, 1.)
# Bu = np.array(assist_output["x_assist"]).ravel()
# command_vel[:] = Bu[len(current_pos):len(current_pos)*2]
# command_vel_raw[:] = command_vel[:]
# # #Apply low-pass filter to command velocities
# for state in self.vel_states:
# command_vel[state] = self.command_lpfs[state](command_vel[state])
# if np.isnan(command_vel[state]):
# command_vel[state] = 0
# do NOT use this kind of functions in cycle, it causes delays in the reception of position and vel data and the closed-loop control does not work in real time
# rh_plant.get_enable_state()
# motor_res = 1e-3
# if any(command_vel.values < motor_res) and all(command_vel.values != 0):
# print command_vel.values
self.task_data['command_vel'] = command_vel.values
self.task_data['command_vel_raw'] = command_vel_raw.values
# set the velocities of the DoFs that should not be controlled to zero.
if np.isnan(self.DoF_not_control_idx_init) == False and np.isnan(self.DoF_not_control_idx_end) == False:
command_vel[self.DoF_not_control_idx_init:self.DoF_not_control_idx_end] = 0
# self.check_safety(command_vel) #nerea
self.plant.send_vel(command_vel.values) #send velocity command to EXO
self.task_data['command_vel_final'] = command_vel.values
def _cycle(self):
'''Runs self.fps times per second.'''
# get latest position/velocity information before calling move_plant()
self.plant_pos_raw[:] = self.plant.get_pos_raw()
self.plant_pos[:] = self.plant.get_pos()
self.plant_vel_raw[:] = self.plant.get_vel_raw()
self.plant_vel[:] = self.plant.get_vel()
self.move_plant()
self.update_plant_display()
# print self.subtrial_idx
if not self.state in ['trial','trial_return']:
#self.task_data['audio_feedback_start'] = 0
self.task_data['goal_pos'] = np.ones(len(self.pos_states))*np.nan
self.task_data['goal_idx'] = np.nan
else:
self.task_data['goal_pos'] = self.goal_position
self.task_data['goal_idx'] = self.goal_idx
self.task_data['plant_pos'] = self.plant_pos_raw.values
self.task_data['plant_pos_filt'] = self.plant_pos.values
self.task_data['plant_vel'] = self.plant_vel_raw.values
self.task_data['plant_vel_filt'] = self.plant_vel.values
self.task_data['plant_type'] = self.plant_type
self.task_data['trial_type'] = self.trial_type
self.task_data['ts'] = time.time()
self.task_data['target_margin'] = self.target_margin
self.task_data['reached_goal_position'] = self.reached_goal_position
self.task_data['speed'] = self.speed
super(EndPointMovement, self)._cycle()
#### STATE AND TEST FUNCTIONS ####
def _start_wait(self):
# try:
# self.serial_trigger.setRTS(True)
# except IOError as e:
# print(e)
# determine the random length of time to stay in the rest state
min_time, max_time = self.rest_interval
self.rest_time = random.random() * (max_time - min_time) + min_time
min_time, max_time = self.preparation_interval
self.preparation_time = random.random() * (max_time - min_time) + min_time
super(EndPointMovement, self)._start_wait()
def _test_end_rest(self, ts):
return ts > self.rest_time
def _test_end_preparation(self, ts):
return ts > self.preparation_time
def _parse_next_trial(self):
self.trial_type = self.next_trial
def _test_end_instruct(self, *args, **kwargs):
return not pygame.mixer.music.get_busy()
def _start_instruct_rest(self):
self.parallel_sound.stop()
# try:
# self.serial_trigger.setRTS(False)
# except IOError as e:
# print(e)
self._play_sound(self.sounds_dir, ['rest'])
print 'rest'
def _start_instruct_trial_type(self):
self._play_sound(self.sounds_dir, self.subgoal_names[self.trial_type][0])
def _end_instruct_trial_type(self):
self.reached_goal_position = False
def _start_instruct_trial_return(self):
self._play_sound(self.sounds_dir, ['back'])
def _end_instruct_trial_return(self):
self.reached_goal_position = False
def _start_instruct_go(self):
self._play_sound(self.sounds_dir, ['go'])
def _start_trial(self):
self.goal_position = self.targets_matrix[self.trial_type][0][self.pos_states]
self.goal_idx = 0
if self.music_feedback:
self._play_sound(self.sounds_dir_classical, [self.subgoal_names[self.trial_type][self.goal_idx][0]])
def _start_trial_return(self):
print 'return trial'
self.goal_position = self.targets_matrix['rest'][0][self.pos_states]
if self.music_feedback:
self._play_sound(self.sounds_dir_classical, [self.subgoal_names[self.trial_type][self.goal_idx][0]])
def _test_end_trial(self, ts):
return self.reached_goal_position
def _test_end_trial_return(self,ts):
return self.reached_goal_position
def _end_trial(self):
if self.music_feedback:
self.parallel_sound.stop()
pygame.mixer.music.stop()
else:
pass
def _end_trial_return(self):
if self.music_feedback:
self.parallel_sound.stop()
pygame.mixer.music.stop()
else:
pass
# def _test_at_starting_config(self, *args, **kwargs):
# start_targ = self.targets_matrix['rest']
# diff_to_start = np.abs(self.plant.get_pos() - start_targ[self.pos_states])
# return np.all(diff_to_start < self.rest_rect[:len(self.pos_states)])
# def cleanup(self, database, saveid, **kwargs):
# self.serial_trigger.close()
class EndPointMovement_testing(NonInvasiveBase):
'''
Drives the exo towards previously recorded target positions.
Class to make a position control based on target configurations / target positions
rather than playing back a previously recorded trajectory.
'''
fps = 20
status = {
'wait': {
'start_trial': 'instruct_rest',
'stop': None},
'instruct_rest': {
'end_instruct': 'rest',
'stop': None},
'rest': {
'end_rest': 'instruct_trial_type',
'stop': None},
'instruct_trial_type': {
'end_instruct': 'preparation',
'stop': None},
'preparation': {
'end_preparation': 'instruct_go',
'stop': None},
'instruct_go': {
'end_instruct': 'trial',
'stop': None},
'trial': {
# 'end_trial' : 'instruct_rest',
'end_trial' : 'instruct_trial_type',
'end_alltrials' : 'wait',
'stop': None},
}
state = 'wait' # initial state
# settable parameters on web interface
preparation_time = traits.Float(2, desc='time to remain in the preparation state.')
rest_interval = traits.Tuple((3., 4.), desc='Min and max time to remain in the rest state.')
targets_matrix = traits.DataFile(object,desc='test', bmi3d_query_kwargs=dict(system__name='misc'))
give_feedback = traits.Int((0), desc=' 0 if we do not give feedback, 1 if we give feedback about whether the patient reached the goal position')
music_feedback = traits.Int((1), desc=' 0 if we do not want to include music, 1 if we want different classical music pieces with increasing intensity to be played')
speed = traits.OptionsList(*speed_options, bmi3d_input_options= speed_options)
debug = False
#trial_end_states = ['rest']
| |
##############################################################################
# EVOLIFE www.dessalles.fr/Evolife <NAME> #
# Telecom ParisTech 2014 www.dessalles.fr #
##############################################################################
##############################################################################
# S_Signalling #
##############################################################################
""" EVOLIFE: 'Signalling' Scenario:
Individual A signals its competence
Other individuals may choose to join A based on that competence.
A benefits from their joining (e.g. protection)
They get a profit correlated with A's competence
(think of it as an ability to anticipate danger)
"""
#=============================================================#
# HOW TO MODIFY A SCENARIO: read Default_Scenario.py #
#=============================================================#
import random
import sys
if __name__ == '__main__': sys.path.append('../..') # for tests
from Evolife.Tools.Tools import percent, noise_add, error
from Evolife.Scenarii.Default_Scenario import Default_Scenario
######################################
# specific variables and functions #
######################################
class Interactions(object):
" A few functions used to rule interactions between agents "
def __init__(self, Parameters, FCompetence):
self.Parameters = Parameters # dict of (parameter, value) pairs
self.FCompetence = FCompetence # Competence function that computes the competence of individuals
self.RankEffects = [] # table of decreasing investments in friendship
self.RankEffect(0)
def RankEffect(self, Rank):
""" computes a decreasing coefficient depending on one's rank
in another agent's address book.
"""
if self.RankEffects == []:
# Initializing the table of social time given to friend
# depending on friend's rank
# T: total amount of available time
# tn: social time devoted to nth friend
# Tn: occupied social time with n friends
# T1 = t1
# Tn = Tn-1 + tn * (1 - Tn-1 / T)
# Tn = Tn-1 (1 - tn / T) + tn
# T controls overlap:
# T= 1 ==> all social time is crowded within constant time
# much overlap, more friends does not decrease
# each friend's share significantly
# T= 100 ==> no overlap, social times add to each other,
# shares diminish as the number of friends increases
RkEffect = self.Parameters('RankEffect')/100.0
if RkEffect == 0:
RkEffect = 0.000001
if self.Parameters('SocialOverlap'):
T = 100 * RkEffect / self.Parameters('SocialOverlap')
else:
T = 10000.0
for n in range(self.Parameters('MaxGurus') + 2):
tn = (RkEffect) ** (n+1) # friend #0 gets time = RkEffect; friend #1 gets time = RkEffect**2;
if n == 0:
Tn = RkEffect
else:
Tn = self.RankEffects[n-1][0] * (1-tn/T) + tn
self.RankEffects.append((Tn,tn))
if Rank >= 0:
try:
return self.RankEffects[Rank]
except IndexError:
error('S_Signalling: RankEffect', str('Rank == %d' % Rank))
else:
return (0,0)
def NegotiateOffer(self,Agent,Partner):
""" returns the ranks Agent and Partner are ready to assign to each other
in their respective address book. Agent's rank recursively depends on
Partner's attitude towards Agent.
"""
# non-recursive version
MaxOffer = 100
OldAgentOffer = OldPartnerOffer = (0,0) # (Rank, Offer)
AgentOffer = PartnerOffer = (0, MaxOffer) # AgentOffer = Agent's offer to Partner
while (OldAgentOffer, OldPartnerOffer) != (AgentOffer,PartnerOffer):
(OldAgentOffer, OldPartnerOffer) = (AgentOffer, PartnerOffer)
PartnerOffer = self.Offer(Partner, AgentOffer, Apparent=True)
if PartnerOffer[0] < 0:
return (0,0)
AgentOffer = self.Offer(Agent, PartnerOffer, Apparent=True)
#print 'Negotiation2: %s offers %d and %s offers %d (at ranks %d, %d)' \
# % (Agent.id,AgentOffer[1],Partner.id,PartnerOffer[1],AgentOffer[0], PartnerOffer[0])
if AgentOffer[0] < 0:
return (0,0)
return (AgentOffer[1], PartnerOffer[1])
def SocialOffer(self, Competence, PartnerRank, nbFriends):
""" An agent's social offer depends on its alleged or real competence,
on the rank it offers in its address book, and on the number of friends already
present in the address book (as it may influence available time)
"""
if PartnerRank < 0:
return 0
rankEffect = self.RankEffect(PartnerRank)[1] # rank in address book matters
sizeEffect = self.RankEffect(1 + nbFriends)[0] # size of address book matters
## if abs(rankEffect - sizeEffect) > 0.0001:
## print rankEffect, sizeEffect, Competence, PartnerRank, nbFriends
return float(Competence * rankEffect) / sizeEffect
def Offer(self, Agent, (PartnerRankOffer, PartnerSocialOffer), Apparent=True):
""" Agent is going to make an offer to Partner, based on Partner's offer
"""
if Agent.followers.accepts(PartnerSocialOffer) < 0:
# I don't follow you if I even don't want you to follow me
OfferedRank = -1
else:
OfferedRank = Agent.gurus.accepts(PartnerSocialOffer)
if self.Parameters('SocialSymmetry') > 0 and OfferedRank >= 0:
# Social symmetry supposes that friends put themselves at identical levels in their address book
OfferedRank = max(PartnerRankOffer, OfferedRank) # worst of the two ranks
SocialOffer = self.SocialOffer(self.FCompetence(Agent, Apparent), OfferedRank, Agent.nbFriends())
#print Agent.id, Agent.Signal, OfferedRank, SocialOffer
return (OfferedRank, SocialOffer)
def groom(self, Indiv, Partner):
""" The two individuals negotiate partnership.
First they signal their competence.
Then, they make a "social offer" based on the rank
(in their "address book") they are ready to assign to each other.
Lastly, each one independently decides to join the other or not.
cf. Dunbar's "grooming hypothesis"
"""
## #if histogram[Indiv.gurus.performance(Friend)] > 1:
## Ia = Indiv.gurus.signature()
## Fa = Partner.gurus.signature()
# new interaction puts previous ones into question
if Indiv in Partner.gurus.names():
## print '\nBefore talking: %d quits %d' % (Partner.Phene_value('Signal'),Indiv.Phene_value('Signal'))
Partner.quit_(Indiv)
if Partner in Indiv.gurus.names():
## print 'Before talking: %d quits %d' % (Indiv.Phene_value('Signal'),Partner.Phene_value('Signal'))
Indiv.quit_(Partner)
## if Indiv.nbFriends() > 1:
## print "%d has %d friends" % (Indiv.id, Indiv.nbFriends()), Indiv.friends()
## if Partner.nbFriends() > 1:
## print "%d has %d friends_" % (Partner.id, Partner.nbFriends()), Partner.friends()
# Negotiation takes place
(IndivOffer, PartnerOffer) = self.NegotiateOffer(Indiv, Partner)
# social links are established accordingly
if IndivOffer == 0 or PartnerOffer == 0:
# One doesn't care about the other
## print "\nNo deal: %s(%d)->%d, %s(%d)->%d" % \
## (Indiv.id, Indiv.Phene_value('Signal'), IndivOffer,\
## Partner.id, Partner.Phene_value('Signal'), PartnerOffer)
return # the deal is not made
IndivFriends = Indiv.friends()
if not Indiv.follows(IndivOffer, Partner, PartnerOffer):
# this may occur if Partner has too many followers
print ("***** Scenario Signalling:", "Negotiation not respected")
print Indiv.id, Indiv.Phene_value('Signal'), 'was accepted by', Partner.id,
print 'with offer-:', IndivOffer
print Indiv.id, sorted(Indiv.gurus.performances()),
print sorted(Indiv.followers.performances())
print Partner.id, sorted(Partner.gurus.performances()),
print sorted(Partner.followers.performances())
error('S_Signalling', "Negotiation not respected")
return # the deal is not made
PartnerFriends = Partner.friends()
if not Partner.follows(PartnerOffer, Indiv, IndivOffer):
# this may occur if Indiv has too many followers
Indiv.quit_(Partner)
error('S_Signalling', "Negotiation not respected")
return # the deal is not made
## print '%d (%02.02f) is accepted by %d' % (Indiv.id, Indiv.Signal, Partner.id), '////',
## print IndivFriends, 'becomes', Indiv.friends()
## print ' %d (%02.02f) is accepted by %d' % (Partner.id, Partner.Signal, Indiv.id), '///',
## print PartnerFriends, 'becomes', Partner.friends()
# suppression of broken links
for Friend in IndivFriends:
## print 'launching symmetry checking for %d...' % Friend.id,
Friend.restore_symmetry()
for Friend in PartnerFriends:
## print 'launching symmetry checking for %d_...' % Friend.id,
Friend.restore_symmetry()
## Ib = Indiv.gurus.signature()
## Fb = Partner.gurus.signature()
## I = zip(Ia+[('0',0)],Ib)
## F = zip(Fa+[('0',0)],Fb)
## print 'Indiv: ', Indiv.id, int(self.FCompetence(Indiv, Apparent=False)), int(self.FCompetence(Indiv,Apparent=True))
## print '\n'.join([str(f[0])+'\t'+str(f[1]) for f in I])
## print 'Friend:', Partner.id, int(self.FCompetence(Partner, Apparent=False)), int(self.FCompetence(Partner,Apparent=True))
## print '\n'.join([str(f[0])+'\t'+str(f[1]) for f in F])
## raw_input('________')
## Indiv.follows(self.SocialOffer(Indiv, 0), Partner, self.SocialOffer(Partner, 0))
## Partner.follows(self.SocialOffer(Partner, 0), Indiv, self.SocialOffer(Indiv, 0))
## print "\nNegotiation result: %s(%d)->%d rank %d, %s(%d)->%d rank %d" % \
## (Indiv.id, Indiv.Phene_value('Signal'), IndivOffer, Indiv.gurus.rank(Partner),\
## Partner.id, Partner.Phene_value('Signal'), PartnerOffer, Partner.gurus.rank(Indiv))
## raw_input('.')
return
######################################
# Description of the scenario #
######################################
class Scenario(Default_Scenario):
######################################
# Most functions below overload some #
# functions of Default_Scenario #
######################################
def initialization(self):
self.CompetenceAvgDistance = 0 # average difference in competence between best friends
self.RiskByCompetence = [[0,0] for x in range(self.Parameter('ControlGeneNumber'))]
self.GlobalComm = 0 # Average investment in communication by actual individuals
self.CompetenceHistogram = [[0,0] for ii in range(self.Parameter('ControlGeneNumber'))]
# stores individual repartition by competence
self.PopSize = 0 # number of individuals currently present
self.Interactions = Interactions(self.Parameter, self.Competence)
def genemap(self):
""" Defines the name of genes and their position on the DNA.
Accepted syntax:
['genename1', 'genename2',...]: lengths and coding are retrieved from configuration
[('genename1', 8), ('genename2', 4),...]: numbers give lengths in bits; coding is retrieved from configuration
[('genename1', 8, 'Weighted'), ('genename2', 4, 'Unweighted'),...]: coding can be 'Weighted', 'Unweighted', 'Gray', 'NoCoding'
"""
Gmap = []
for ControlGene in range(self.Parameter('ControlGeneNumber')):
Gmap.append('ControlGene'+str(ControlGene))
return [(G,0) for G in Gmap]
def phenemap(self):
""" Defines the set of non inheritable characteristics
"""
return ['Competence', # (non heritable) ability to be relevant
'Strength', # non heritable quality, used to introduce an irrelevant criterion in the construction of social networks
'Signal', # stores the signal that the individual actually emits
'SignalInvestment', # stores the individual's propensity to communicate
'Risk'] # stores the individual's exposure to life hazards
def new_agent(self, Child, parents):
" initializes newborns "
# determination of the newborn's signal level
# Investment in communication is genetically controlled
# Genetic control may vary according to competence range
SignalInvestment = Child.gene_relative_value('ControlGene'
+str(self.CompetenceRange(Child)))
Child.Phene_value('SignalInvestment', int(SignalInvestment), Levelling=True)
# The signal is based on the signaller's competence
Signal = self.Competence(Child, Apparent=True)
Signal = noise_add(Signal,self.Parameter('Noise'))
Child.Phene_value('Signal', int(Signal), Levelling=True)
def CompetenceRange(self, Indiv):
" assigns an individual to a category depending on its competence "
return int((Indiv.Phene_relative_value('Competence') *
self.Parameter('ControlGeneNumber')) / 100.01)
def Competence(self, Indiv, Apparent=False):
" Adds a bottom value to competence "
BC = self.Parameter('BottomCompetence')
Comp = percent(100-BC) * Indiv.Phene_relative_value('Competence') + BC
VisibleCompetence = percent(Comp * Indiv.Phene_relative_value('SignalInvestment'))
CompSign = self.Parameter('CompetenceSignificance')
Attractiveness = percent(100-CompSign) * Indiv.Phene_relative_value('Strength') \
+ percent(CompSign) * VisibleCompetence
if Apparent:
return Attractiveness
## return VisibleCompetence
else:
return Comp
def season(self, year, members):
""" This function is called at the beginning of each year
"""
self.GlobalComm = 0
self.CompetenceHistogram = [[0,0] for ii in range(self.Parameter('ControlGeneNumber'))]
self.RiskByCompetence = [[0,0] for x in range(self.Parameter('ControlGeneNumber'))]
self.PopSize = 0 # number of individuals currently present
self.CompetenceAvgDistance = 0
def start_game(self,members):
""" defines what is to be done at the group level before interactions
occur
"""
for Indiv in members:
Indiv.score(self.Parameter('SignallingCost'), FlagSet=True) # resetting scores
SigInv = Indiv.Phene_value('SignalInvestment')
self.GlobalComm += SigInv
SignalCost = percent(SigInv * self.Parameter('SignallingCost'))
Indiv.score(-SignalCost)
# friendship links (lessening with time) are updated
#Indiv.lessening_friendship()
# links are reset
#Indiv.detach()
# Monitoring competence distribution
self.CompetenceHistogram[self.CompetenceRange(Indiv)][0] += SigInv
self.CompetenceHistogram[self.CompetenceRange(Indiv)][1] += 1
self.PopSize += len(members) # number of individuals currently present
# Individuals first interact one more time with their current friends
for Indiv in members:
## | |
import asyncio
from asyncio import ensure_future as aef
from odroid_factory_api import API_MANAGER
from functools import wraps
from utils.log import init_logger
from copy import deepcopy
from usb import USB
import ethernet
import aiohttp
import iperf
import os
from evtest import Evtest
from task import Component
from task import Task
from task import cancelled_exception
import configparser
#import criteria
LOG = init_logger('', testing_mode='info')
class M1():
def __init__(self):
self.model = 'M1'
self.api_manager = API_MANAGER(board='m1')
self.config = configparser.ConfigParser()
self.config.read('config.ini')
self.cfg_sata = self.config['sata']
self.cfg_nvme = self.config['nvme']
self.cfg_usb2 = self.config['usb2']
self.cfg_usb3 = self.config['usb3']
self.cfg_iperf = self.config['iperf']
self.usb = USB()
self.ev = Evtest()
self.items = None
self.flag0 = ['finish', 'usb2_up', 'usb2_down', 'spi_btn', 'usb3_up_speed',
'usb3_up_sdx', 'usb3_up_rw', 'usb3_down_speed', 'usb3_down_sdx',
'usb3_down_rw', 'sata_speed', 'sata_sdx', 'sata_rw', 'nvme_speed',
'nvme_sdx', 'nvme_rw', 'eth_speed', 'mac', 'iperf', 'ipaddr_printer', 'hp_det', 'ir']
self.flag1 = ['ping', 'ping_printer', 'usb3_up_diff', 'usb3_down_diff',
'sata_diff', 'nvme_diff']
self.items0 = {k:Component(flag_text=1) for k in self.flag0}
self.items1 = {k:Component(flag_text=0) for k in self.flag1}
self.items = {**self.items0, **self.items1}
self.task_spi_btn = Task(self.check_spi_btn)
self.task_usb2 = Task(self.check_usb2)
self.task_usb3 = Task(self.check_usb3)
self.task_sata = Task(self.check_sata)
self.task_nvme = Task(self.check_nvme)
self.task_eth_speed = Task(self.check_eth_speed)
self.task_iperf = Task(self.check_iperf)
self.task_mac = Task(self.check_mac)
self.task_ping = Task(self.check_ping)
self.task_printer = Task(self.check_printer)
self.task_hp_detect = Task(self.check_hp_detect)
self.task_ir = Task(self.check_ir)
self.task_play_music = Task(self.play_music)
self.task_scan_iperf_server = Task(self.scan_iperf_server)
def init_item(self, item):
for k, v in self.items.items():
if k == item:
if k == 'finish':
v.okay = 2
continue
v.text = v.ack = v.ret = v.value = v.okay = None
v.update = 1
def init_variables(self):
for k, v in self.items.items():
if k == 'finish':
v.okay = 2
continue
v.text = v.ack = v.ret = v.value = v.okay = None
v.update = 1
async def cancel_tasks(self):
aef(self.task_usb2.cancelled())
aef(self.task_usb3.cancelled())
aef(self.task_sata.cancelled())
aef(self.task_nvme.cancelled())
aef(self.task_eth_speed.cancelled())
aef(self.task_printer.cancelled())
aef(self.task_mac.cancelled())
aef(self.task_ping.cancelled())
aef(self.task_hp_detect.cancelled())
aef(self.task_ir.cancelled())
aef(self.task_iperf.cancelled())
aef(self.task_play_music.cancelled())
async def finish(self):
ipaddr = self.cfg_iperf.get('ipaddr')
if self.items['mac'].value != None:
await iperf.control_external_iperf_server(ipaddr, 'mac,' + self.items['mac'].value)
_finish = deepcopy(self.items)
del(_finish['finish'])
err = set()
for k, v in _finish.items():
if v.okay != 1:
if 'usb3' in k:
err.add(k[:k.find('_', 5)])
elif 'nvme' in k or 'sata' in k:
err.add(k[:k.find('_', 4)])
else:
err.add(k)
if len(err) > 0:
self.fail_item('finish', 'FINISH')
ipaddr = self.cfg_iperf.get('ipaddr')
await iperf.control_external_iperf_server(ipaddr, "error," + ",".join(err))
return
self.okay_item('finish', 'FINISH')
async def sequence_main(self):
tasks = []
while True:
if self.seq_main == 1:
#aef(ethernet.set_eth_mode('1000'))
await self.usb.create_file()
self.seq_main = 2
aef(self.task_spi_btn.run())
tasks.append(aef(self.task_printer.run()))
tasks.append(aef(self.task_iperf.run()))
tasks.append(aef(self.task_mac.run()))
tasks.append(aef(self.task_ping.run()))
tasks.append(aef(self.task_eth_speed.run()))
tasks.append(aef(self.task_hp_detect.run()))
tasks.append(aef(self.task_ir.run()))
tasks.append(aef(self.task_sata.run()))
tasks.append(aef(self.task_nvme.run()))
tasks.append(aef(self.task_usb3.run()))
tasks.append(aef(self.task_usb2.run()))
tasks.append(aef(self.task_play_music.run()))
elif self.seq_main == 2:
_finish = deepcopy(self.items)
del(_finish['finish'])
finish = all(v.okay == 1 for k, v in _finish.items())
if finish == True:
await self.finish()
self.seq_main = 3
LOG.error("FINISH!!!!!!!!!!!!")
'''
if all([task.done() for task in tasks]):
await self.finish()
self.seq_main = 3
'''
await asyncio.sleep(1)
@cancelled_exception()
async def check_printer(self):
ipaddr = self.cfg_iperf.get('ipaddr')
self.okay_item('ipaddr_printer', ipaddr)
@cancelled_exception()
async def set_printer_ip(self, ipaddr):
self.init_item('ping_printer')
self.config.set('iperf', 'ipaddr', ipaddr)
with open('config.ini', 'w') as cfg:
self.config.write(cfg)
await self.check_printer()
@cancelled_exception()
async def play_music(self):
while True:
cmd = "amixer set 'Playback Path' 'HP'"
proc = await asyncio.create_subprocess_shell(cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
stdout, stderr = await proc.communicate()
cmd = "aplay piano.wav"
proc = await asyncio.create_subprocess_shell(cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
stdout, stderr = await proc.communicate()
await asyncio.sleep(1)
async def poweroff(self):
cmd = f"./run_poweroff.sh"
proc = await asyncio.create_subprocess_shell(cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
stdout, stderr = await proc.communicate()
if stderr != b'':
LOG.error(stderr)
if stderr == b'' and stdout == b'':
return 0
@cancelled_exception()
async def check_ir(self):
cnt_press = 0
cnt_release = 0
cnt_eth = {'green':0, 'yellow':0}
if self.ev.device_ir == None:
LOG.error("No ir event available")
self.fail_item('ir')
async for label, value in self.ev.read_ir():
if label == 'enter':
if value == 1:
cnt_press += 1
elif value == 0:
cnt_release += 1
elif label == 'eth_green' and value == 0:
cnt_eth['green'] += 1
aef(ethernet.set_eth_mode('100'))
elif label == 'eth_yellow' and value == 0:
cnt_eth['yellow'] += 1
aef(ethernet.set_eth_mode('1000'))
elif label == 'poweroff' and value == 0:
aef(self.poweroff())
elif label == 'scan' and value == 0:
aef(self.task_scan_iperf_server.run())
elif label == 'print' and value == 0:
await self.finish()
elif label == 'mac_rewrite' and value == 0:
print ("==> 2022.04.13 mac rewrite added for charles")
await aef(self.task_mac.run())
if cnt_press > 0 and cnt_release > 0 and cnt_eth['green'] > 0:
self.okay_item('ir', f"press : {cnt_press}, out : {cnt_release}, eth_green : {cnt_eth['green']}, eth_yellow : {cnt_eth['yellow']}")
else:
self.ready_item('ir', f"press : {cnt_press}, out : {cnt_release}, eth_green : {cnt_eth['green']}, eth_yellow : {cnt_eth['yellow']}")
@cancelled_exception()
async def check_hp_detect(self):
cnt_in = 0
cnt_out = 0
if self.ev.device_hp == None:
LOG.error("No hp_det event available")
self.fail_item('hp_det')
async for value in self.ev.read_hp_det():
if value == 0:
cnt_out += 1
elif value == 1:
cnt_in += 1
if cnt_in > 0 and cnt_out > 0:
self.okay_item('hp_det', f"in : {cnt_in}, out : {cnt_out}")
else:
self.ready_item('hp_det', f"in : {cnt_in}, out : {cnt_out}")
@cancelled_exception()
async def check_ping(self):
while True:
loss = await ethernet.ping()
if loss == '0%':
if self.items['ping'].okay != 1:
self.okay_item('ping')
else:
self.fail_item('ping')
ipaddr = self.cfg_iperf.get('ipaddr')
if await iperf.control_external_iperf_server(ipaddr, 'bind'):
if self.items['ping_printer'].okay != 1:
self.okay_item('ping_printer')
else:
self.fail_item('ping_printer')
await asyncio.sleep(1)
@cancelled_exception()
async def check_mac(self):
uuid = await ethernet.read_uuid()
mac = uuid[-12:]
if mac.startswith('001e06'):
self.items['mac'].value = mac
self.api_manager.mac_addr = mac
self.api_manager.uuid_mac = uuid
await self.api_manager.update_record({
'uuid': uuid})
self.okay_item('mac', mac)
return
uuid = await self.api_manager.request_mac_addr()
await ethernet.write_uuid(uuid)
uuid = await ethernet.read_uuid()
mac = uuid[-12:]
if mac.startswith('001e06'):
self.items['mac'].value = mac
self.api_manager.mac_addr = mac
self.api_manager.uuid_mac = uuid
await self.api_manager.update_record({
'uuid': uuid})
self.okay_item('mac', mac)
return
self.fail_item('mac')
@cancelled_exception()
async def check_spi_btn(self):
count_err = 0
count_click = 0
while True:
cmd = "hexdump -C /dev/mtdblock0 -n 1000 | head -10 | grep EFI"
proc = await asyncio.create_subprocess_shell(cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
stdout, stderr = await proc.communicate()
if not b'EFI PART' in stdout:
count_err += 1
if count_err > 0:
self.okay_item('spi_btn', 'SPI_BTN')
return
await asyncio.sleep(0.3)
@cancelled_exception()
async def _check_usb2(self, idx, place):
node, speed = await self.usb.scan_usb2(idx)
if node == None:
self.ready_item(f'usb2_{place}', speed)
return
if speed != self.cfg_usb2.get('speed', '480'):
self.ready_item(f'usb2_{place}', speed)
return
self.okay_item(f'usb2_{place}', speed)
@cancelled_exception()
async def check_usb2(self):
items_usb2 = ['usb2_up', 'usb2_down']
while True:
await asyncio.sleep(2)
if self.items['usb2_up'].okay != 1:
await self._check_usb2(0, 'up')
if self.items['usb2_down'].okay != 1:
await self._check_usb2(1, 'down')
@cancelled_exception()
async def _check_usb3(self, idx, place):
self.init_usb3_items(place)
self.usb.init_node(self.usb.usb3_nodes[idx])
node, speed = await self.usb.scan_usb3(idx)
if node == None:
self.ready_item(f'usb3_{place}_speed', speed)
return
if speed != self.cfg_usb3.get('speed', '5000'):
self.fail_item(f'usb3_{place}_speed', speed)
return
self.okay_item(f'usb3_{place}_speed', speed)
sdx = await self.usb.get_sdx(node)
if sdx == None:
return
elif not sdx.startswith('sd'):
self.fail_item(f'usb3_{place}_sdx', sdx)
return
self.okay_item(f'usb3_{place}_sdx', sdx)
if self.usb.check_test_file() == None:
LOG.debug(f"test file does not exists")
return
offset = await self.usb.determine_seek(sdx)
if offset == None:
return
self.ready_item(f'usb3_{place}_rw')
await asyncio.sleep(1)
w = await self.usb.write_to_disk(sdx, offset)
r = await self.usb.read_from_disk(sdx, offset)
if r == None or w == None:
self.fail_item(f'usb3_{place}_rw', str(r) + ',' + str(w))
return
else:
norm_r = self.cfg_usb3.getfloat('r', 30)
norm_w = self.cfg_usb3.getfloat('w', 20)
if float(r) > norm_r and float(w) > norm_w:
self.okay_item(f'usb3_{place}_rw', str(r) + ',' + str(w))
else:
self.fail_item(f'usb3_{place}_rw', str(r) + ',' + str(w))
if await self.usb.diff_files(sdx) == 0:
self.okay_item(f'usb3_{place}_diff')
else:
self.fail_item(f'usb3_{place}_diff')
@cancelled_exception()
async def check_usb3(self):
items_usb3_up = ['usb3_up_speed', 'usb3_up_sdx', 'usb3_up_rw', 'usb3_up_diff']
items_usb3_down = ['usb3_down_speed', 'usb3_down_sdx', 'usb3_down_rw', 'usb3_down_diff']
await asyncio.sleep(2)
while True:
await asyncio.sleep(1)
if any([self.items[usb].okay != 1 for usb in items_usb3_up]):
await self._check_usb3(0, 'up')
if any([self.items[usb].okay != 1 for usb in items_usb3_down]):
await self._check_usb3(1, 'down')
@cancelled_exception()
async def check_nvme(self):
items_nvme = ['nvme_speed', 'nvme_sdx', 'nvme_rw', 'nvme_diff']
count = 0
while True:
await asyncio.sleep(2)
if any([self.items[nvme].okay != 1 for nvme in items_nvme]):
self.init_disk_items('nvme')
self.usb.init_node(self.usb.nvme)
speed = await self.usb.scan_nvme()
if speed == None or speed == '<unknown>':
self.ready_item('nvme_speed', speed)
continue
_speed = speed.split()
norm_speeds = self.cfg_nvme['speed'].split()
if _speed[1] != norm_speeds[1]:
self.fail_item('nvme_speed', speed)
continue
if float(_speed[0]) < float(norm_speeds[0]):
self.fail_item('nvme_speed', speed)
continue
self.okay_item('nvme_speed', speed)
sdx = await self.usb.get_nvme()
if sdx == None:
continue
elif not sdx.startswith('nvme'):
self.fail_item(f'nvme_sdx', sdx)
continue
self.okay_item(f'nvme_sdx', sdx)
if self.usb.check_test_file() == None:
LOG.debug(f"test file does not exists")
continue
offset = await self.usb.determine_seek(sdx)
if offset == None:
continue
await asyncio.sleep(2)
w = await self.usb.write_to_disk(sdx, offset)
r = await self.usb.read_from_disk(sdx, offset)
if r != None and w != None:
count += 1
self.ready_item('nvme_rw', str(r) + ',' + str(w))
norm_r = self.cfg_nvme.getfloat('r', 60)
norm_w = self.cfg_nvme.getfloat('w', 30)
if float(r) > norm_r and float(w) > norm_w:
self.okay_item('nvme_rw')
else:
self.fail_item('nvme_rw', str(r) + ',' + str(w))
if await self.usb.diff_files(sdx) == 0:
self.okay_item(f'nvme_diff')
else:
self.fail_item(f'nvme_diff')
if count >= self.cfg_nvme.getint('retry', 3):
return
@cancelled_exception()
async def check_sata(self):
items_sata = ['sata_speed', 'sata_sdx', 'sata_rw', 'sata_diff']
count = 0
await asyncio.sleep(2)
while True:
await asyncio.sleep(2)
if any([self.items[sata].okay != 1 | |
<reponame>andersop91/core
"""Test config flow."""
from ipaddress import IPv4Address
from unittest.mock import ANY, patch
from pyatv import exceptions
from pyatv.const import PairingRequirement, Protocol
import pytest
from homeassistant import config_entries, data_entry_flow
from homeassistant.components import zeroconf
from homeassistant.components.apple_tv import CONF_ADDRESS, config_flow
from homeassistant.components.apple_tv.const import (
CONF_IDENTIFIERS,
CONF_START_OFF,
DOMAIN,
)
from .common import airplay_service, create_conf, mrp_service, raop_service
from tests.common import MockConfigEntry
DMAP_SERVICE = zeroconf.ZeroconfServiceInfo(
host="127.0.0.1",
hostname="mock_hostname",
port=None,
type="_touch-able._tcp.local.",
name="dmapid._touch-able._tcp.local.",
properties={"CtlN": "Apple TV"},
)
RAOP_SERVICE = zeroconf.ZeroconfServiceInfo(
host="127.0.0.1",
hostname="mock_hostname",
port=None,
type="_raop._tcp.local.",
name="AABBCCDDEEFF@Master Bed._raop._tcp.local.",
properties={"am": "AppleTV11,1"},
)
@pytest.fixture(autouse=True)
def zero_aggregation_time():
"""Prevent the aggregation time from delaying the tests."""
with patch.object(config_flow, "DISCOVERY_AGGREGATION_TIME", 0):
yield
@pytest.fixture(autouse=True)
def use_mocked_zeroconf(mock_async_zeroconf):
"""Mock zeroconf in all tests."""
@pytest.fixture(autouse=True)
def mock_setup_entry():
"""Mock setting up a config entry."""
with patch(
"homeassistant.components.apple_tv.async_setup_entry", return_value=True
):
yield
# User Flows
async def test_user_input_device_not_found(hass, mrp_device):
"""Test when user specifies a non-existing device."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"device_input": "none"},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "no_devices_found"}
async def test_user_input_unexpected_error(hass, mock_scan):
"""Test that unexpected error yields an error message."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
mock_scan.side_effect = Exception
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"device_input": "dummy"},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "unknown"}
async def test_user_adds_full_device(hass, full_device, pairing):
"""Test adding device with all services."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"device_input": "MRP Device"},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["description_placeholders"] == {
"name": "MRP Device",
"type": "Unknown",
}
result3 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result3["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result3["description_placeholders"] == {"protocol": "MRP"}
result4 = await hass.config_entries.flow.async_configure(
result["flow_id"], {"pin": 1111}
)
assert result4["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result4["description_placeholders"] == {"protocol": "DMAP", "pin": 1111}
result5 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result5["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result5["description_placeholders"] == {"protocol": "AirPlay"}
result6 = await hass.config_entries.flow.async_configure(
result["flow_id"], {"pin": 1234}
)
assert result6["type"] == "create_entry"
assert result6["data"] == {
"address": "127.0.0.1",
"credentials": {
Protocol.DMAP.value: "dmap_creds",
Protocol.MRP.value: "mrp_creds",
Protocol.AirPlay.value: "airplay_creds",
},
"identifiers": ["mrpid", "dmapid", "airplayid"],
"name": "MRP Device",
}
async def test_user_adds_dmap_device(hass, dmap_device, dmap_pin, pairing):
"""Test adding device with only DMAP service."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"device_input": "DMAP Device"},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["description_placeholders"] == {
"name": "DMAP Device",
"type": "Unknown",
}
result3 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result3["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result3["description_placeholders"] == {"pin": 1111, "protocol": "DMAP"}
result6 = await hass.config_entries.flow.async_configure(
result["flow_id"], {"pin": 1234}
)
assert result6["type"] == "create_entry"
assert result6["data"] == {
"address": "127.0.0.1",
"credentials": {Protocol.DMAP.value: "dmap_creds"},
"identifiers": ["dmapid"],
"name": "DMAP Device",
}
async def test_user_adds_dmap_device_failed(hass, dmap_device, dmap_pin, pairing):
"""Test adding DMAP device where remote device did not attempt to pair."""
pairing.always_fail = True
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await hass.config_entries.flow.async_configure(
result["flow_id"],
{"device_input": "DMAP Device"},
)
await hass.config_entries.flow.async_configure(result["flow_id"], {})
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result2["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result2["reason"] == "device_did_not_pair"
async def test_user_adds_device_with_ip_filter(
hass, dmap_device_with_credentials, mock_scan
):
"""Test add device filtering by IP."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"device_input": "127.0.0.1"},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["description_placeholders"] == {
"name": "DMAP Device",
"type": "Unknown",
}
@pytest.mark.parametrize("pairing_requirement", [(PairingRequirement.NotNeeded)])
async def test_user_pair_no_interaction(hass, dmap_with_requirement, pairing_mock):
"""Test pairing service without user interaction."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await hass.config_entries.flow.async_configure(
result["flow_id"],
{"device_input": "DMAP Device"},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result["data"] == {
"address": "127.0.0.1",
"credentials": {Protocol.DMAP.value: None},
"identifiers": ["dmapid"],
"name": "DMAP Device",
}
async def test_user_adds_device_by_ip_uses_unicast_scan(hass, mock_scan):
"""Test add device by IP-address, verify unicast scan is used."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await hass.config_entries.flow.async_configure(
result["flow_id"],
{"device_input": "127.0.0.1"},
)
assert str(mock_scan.hosts[0]) == "127.0.0.1"
async def test_user_adds_existing_device(hass, mrp_device):
"""Test that it is not possible to add existing device."""
MockConfigEntry(domain="apple_tv", unique_id="mrpid").add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"device_input": "127.0.0.1"},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "already_configured"}
async def test_user_connection_failed(hass, mrp_device, pairing_mock):
"""Test error message when connection to device fails."""
pairing_mock.begin.side_effect = exceptions.ConnectionFailedError
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await hass.config_entries.flow.async_configure(
result["flow_id"],
{"device_input": "MRP Device"},
)
await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result2["reason"] == "setup_failed"
async def test_user_start_pair_error_failed(hass, mrp_device, pairing_mock):
"""Test initiating pairing fails."""
pairing_mock.begin.side_effect = exceptions.PairingError
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await hass.config_entries.flow.async_configure(
result["flow_id"],
{"device_input": "MRP Device"},
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result2["reason"] == "invalid_auth"
async def test_user_pair_service_with_password(
hass, airplay_device_with_password, pairing_mock
):
"""Test pairing with service requiring a password (not supported)."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await hass.config_entries.flow.async_configure(
result["flow_id"],
{"device_input": "AirPlay Device"},
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "password"
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result3["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result3["reason"] == "setup_failed"
@pytest.mark.parametrize("pairing_requirement", [(PairingRequirement.Disabled)])
async def test_user_pair_disabled_service(hass, dmap_with_requirement, pairing_mock):
"""Test pairing with disabled service (is ignored with message)."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await hass.config_entries.flow.async_configure(
result["flow_id"],
{"device_input": "DMAP Device"},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "protocol_disabled"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result2["reason"] == "setup_failed"
@pytest.mark.parametrize("pairing_requirement", [(PairingRequirement.Unsupported)])
async def test_user_pair_ignore_unsupported(hass, dmap_with_requirement, pairing_mock):
"""Test pairing with disabled service (is ignored silently)."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await hass.config_entries.flow.async_configure(
result["flow_id"],
{"device_input": "DMAP Device"},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "setup_failed"
async def test_user_pair_invalid_pin(hass, mrp_device, pairing_mock):
"""Test pairing with invalid pin."""
pairing_mock.finish.side_effect = exceptions.PairingError
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await hass.config_entries.flow.async_configure(
result["flow_id"],
{"device_input": "MRP Device"},
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"pin": 1111},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "invalid_auth"}
async def test_user_pair_unexpected_error(hass, mrp_device, pairing_mock):
"""Test unexpected error when entering PIN code."""
pairing_mock.finish.side_effect = Exception
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await hass.config_entries.flow.async_configure(
result["flow_id"],
{"device_input": "MRP Device"},
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"pin": 1111},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "unknown"}
async def test_user_pair_backoff_error(hass, mrp_device, pairing_mock):
"""Test that backoff error is displayed in case device requests it."""
pairing_mock.begin.side_effect = exceptions.BackOffError
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await hass.config_entries.flow.async_configure(
result["flow_id"],
{"device_input": "MRP Device"},
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result2["reason"] == "backoff"
async def test_user_pair_begin_unexpected_error(hass, mrp_device, pairing_mock):
"""Test unexpected error during start of pairing."""
pairing_mock.begin.side_effect = Exception
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await hass.config_entries.flow.async_configure(
result["flow_id"],
{"device_input": "MRP Device"},
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result2["reason"] == "unknown"
async def test_ignores_disabled_service(hass, airplay_with_disabled_mrp, pairing):
"""Test adding device with only DMAP service."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
# Find based on mrpid (but do not pair that service since it's disabled)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"device_input": "mrpid"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["description_placeholders"] == {
"name": "AirPlay Device",
"type": "Unknown",
}
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["description_placeholders"] == {"protocol": "AirPlay"}
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"], {"pin": 1111}
)
assert result3["type"] == "create_entry"
assert result3["data"] == {
"address": "127.0.0.1",
"credentials": {
Protocol.AirPlay.value: "airplay_creds",
},
"identifiers": ["mrpid", "airplayid"],
"name": "AirPlay Device",
}
# Zeroconf
async def test_zeroconf_unsupported_service_aborts(hass):
"""Test discovering unsupported zeroconf service."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host="127.0.0.1",
hostname="mock_hostname",
name="mock_name",
port=None,
type="_dummy._tcp.local.",
properties={},
),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "unknown"
async def test_zeroconf_add_mrp_device(hass, mrp_device, pairing):
"""Test add MRP device discovered by zeroconf."""
unrelated_result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host="127.0.0.2",
hostname="mock_hostname",
port=None,
name="Kitchen",
properties={"UniqueIdentifier": "unrelated", "Name": "Kitchen"},
type="_mediaremotetv._tcp.local.",
),
)
assert unrelated_result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host="127.0.0.1",
hostname="mock_hostname",
port=None,
name="Kitchen",
properties={"UniqueIdentifier": "mrpid", "Name": "Kitchen"},
type="_mediaremotetv._tcp.local.",
),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["description_placeholders"] == {
"name": "MRP Device",
"type": "Unknown",
}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["description_placeholders"] == {"protocol": "MRP"}
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"], {"pin": 1111}
)
assert | |
binding "
"%(binding)s for port %(port)s, deleting "
"DHCP binding on server",
{'binding': binding['id'], 'port': port['id']})
fake_db_binding = {
'port_id': port['id'],
'nsx_service_id': dhcp_service['nsx_service_id'],
'nsx_binding_id': binding['id']}
self._delete_dhcp_binding_on_server(context, fake_db_binding)
def _add_dhcp_binding_on_server(self, context, dhcp_service_id, subnet_id,
ip, port):
try:
hostname = 'host-%s' % ip.replace('.', '-')
subnet = self.get_subnet(context, subnet_id)
gateway_ip = subnet.get('gateway_ip')
options = self._get_dhcp_options(
context, ip, port.get(ext_edo.EXTRADHCPOPTS),
port['network_id'], subnet)
binding = self.nsxlib.dhcp_server.create_binding(
dhcp_service_id, port['mac_address'], ip, hostname,
self._get_conf_attr('dhcp_lease_time'), options, gateway_ip)
LOG.debug("Created static binding (mac: %(mac)s, ip: %(ip)s, "
"gateway: %(gateway)s, options: %(options)s) for port "
"%(port)s on logical DHCP server %(server)s",
{'mac': port['mac_address'], 'ip': ip,
'gateway': gateway_ip, 'options': options,
'port': port['id'],
'server': dhcp_service_id})
return binding
except nsx_lib_exc.ManagerError:
with excutils.save_and_reraise_exception():
LOG.error("Unable to create static binding (mac: %(mac)s, "
"ip: %(ip)s, gateway: %(gateway)s, options: "
"%(options)s) for port %(port)s on logical DHCP "
"server %(server)s",
{'mac': port['mac_address'], 'ip': ip,
'gateway': gateway_ip, 'options': options,
'port': port['id'],
'server': dhcp_service_id})
def _delete_port_mp_dhcp_binding(self, context, port):
# Do not check device_owner here because Nova may have already
# deleted that before Neutron's port deletion.
bindings = nsx_db.get_nsx_dhcp_bindings(context.session, port['id'])
for binding in bindings:
self._delete_dhcp_binding_on_server(context, binding)
try:
nsx_db.delete_neutron_nsx_dhcp_binding(
context.session, binding['port_id'],
binding['nsx_binding_id'])
except db_exc.DBError:
LOG.error("Unable to delete mapping of DHCP binding "
"%(binding)s for port %(port)s",
{'binding': binding['nsx_binding_id'],
'port': binding['port_id']})
def _delete_dhcp_binding_on_server(self, context, binding):
try:
self.nsxlib.dhcp_server.delete_binding(
binding['nsx_service_id'], binding['nsx_binding_id'])
LOG.debug("Deleted static binding for port %(port)s) on "
"logical DHCP server %(server)s",
{'port': binding['port_id'],
'server': binding['nsx_service_id']})
except nsx_lib_exc.ManagerError:
with excutils.save_and_reraise_exception():
LOG.error("Unable to delete static binding for port "
"%(port)s) on logical DHCP server %(server)s",
{'port': binding['port_id'],
'server': binding['nsx_service_id']})
def _find_dhcp_binding(self, subnet_id, ip_address, bindings):
for binding in bindings:
if (subnet_id == binding['subnet_id'] and
ip_address == binding['ip_address']):
return binding
def _update_port_mp_dhcp_binding(self, context, old_port, new_port):
# First check if any IPv4 address in fixed_ips is changed.
# Then update DHCP server setting or DHCP static binding
# depending on the port type.
# Note that Neutron allows a port with multiple IPs in the
# same subnet. But backend DHCP server may not support that.
if (utils.is_port_dhcp_configurable(old_port) !=
utils.is_port_dhcp_configurable(new_port)):
# Note that the device_owner could be changed,
# but still needs DHCP binding.
if utils.is_port_dhcp_configurable(old_port):
self._delete_port_mp_dhcp_binding(context, old_port)
else:
self._add_port_mp_dhcp_binding(context, new_port)
return
# Collect IPv4 DHCP addresses from original and updated fixed_ips
# in the form of [(subnet_id, ip_address)].
old_fixed_ips = set([(fixed_ip['subnet_id'], fixed_ip['ip_address'])
for fixed_ip in self._filter_ipv4_dhcp_fixed_ips(
context, old_port['fixed_ips'])])
new_fixed_ips = set([(fixed_ip['subnet_id'], fixed_ip['ip_address'])
for fixed_ip in self._filter_ipv4_dhcp_fixed_ips(
context, new_port['fixed_ips'])])
# Find out the subnet/IP differences before and after the update.
ips_to_add = list(new_fixed_ips - old_fixed_ips)
ips_to_delete = list(old_fixed_ips - new_fixed_ips)
ip_change = (ips_to_add or ips_to_delete)
if (old_port["device_owner"] == constants.DEVICE_OWNER_DHCP and
ip_change):
# Update backend DHCP server address if the IP address of a DHCP
# port is changed.
if len(new_fixed_ips) != 1:
msg = _("Can only configure one IP address on a DHCP server")
LOG.error(msg)
raise n_exc.InvalidInput(error_message=msg)
# Locate the backend DHCP server for this DHCP port.
dhcp_service = nsx_db.get_nsx_service_binding(
context.session, old_port['network_id'],
nsxlib_consts.SERVICE_DHCP)
if dhcp_service:
new_ip = ips_to_add[0][1]
try:
self.nsxlib.dhcp_server.update(
dhcp_service['nsx_service_id'],
server_ip=new_ip)
LOG.debug("Updated IP %(ip)s for logical DHCP server "
"%(server)s",
{'ip': new_ip,
'server': dhcp_service['nsx_service_id']})
except nsx_lib_exc.ManagerError:
with excutils.save_and_reraise_exception():
LOG.error("Unable to update IP %(ip)s for logical "
"DHCP server %(server)s",
{'ip': new_ip,
'server': dhcp_service['nsx_service_id']})
elif utils.is_port_dhcp_configurable(old_port):
# Update static DHCP bindings for a compute port.
bindings = nsx_db.get_nsx_dhcp_bindings(context.session,
old_port['id'])
dhcp_opts = new_port.get(ext_edo.EXTRADHCPOPTS)
dhcp_opts_changed = (old_port[ext_edo.EXTRADHCPOPTS] !=
new_port[ext_edo.EXTRADHCPOPTS])
if ip_change:
# If IP address is changed, update associated DHCP bindings,
# metadata route, and default hostname.
# Mac address (if changed) will be updated at the same time.
if ([subnet_id for (subnet_id, ip) in ips_to_add] ==
[subnet_id for (subnet_id, ip) in ips_to_delete]):
# No change on subnet_id, just update corresponding IPs.
for i, (subnet_id, ip) in enumerate(ips_to_delete):
binding = self._find_dhcp_binding(subnet_id, ip,
bindings)
if binding:
subnet = self.get_subnet(context,
binding['subnet_id'])
self._update_dhcp_binding_on_server(
context, binding, new_port['mac_address'],
ips_to_add[i][1], old_port['network_id'],
dhcp_opts=dhcp_opts, subnet=subnet)
# Update DB IP
nsx_db.update_nsx_dhcp_bindings(context.session,
old_port['id'],
ip,
ips_to_add[i][1])
else:
for (subnet_id, ip) in ips_to_delete:
binding = self._find_dhcp_binding(subnet_id, ip,
bindings)
if binding:
self._delete_dhcp_binding_on_server(context,
binding)
if ips_to_add:
dhcp_service = nsx_db.get_nsx_service_binding(
context.session, new_port['network_id'],
nsxlib_consts.SERVICE_DHCP)
if dhcp_service:
for (subnet_id, ip) in ips_to_add:
self._add_dhcp_binding_on_server(
context, dhcp_service['nsx_service_id'],
subnet_id, ip, new_port)
elif (old_port['mac_address'] != new_port['mac_address'] or
dhcp_opts_changed):
# If only Mac address/dhcp opts is changed,
# update it in all associated DHCP bindings.
for binding in bindings:
subnet = self.get_subnet(context, binding['subnet_id'])
self._update_dhcp_binding_on_server(
context, binding, new_port['mac_address'],
binding['ip_address'], old_port['network_id'],
dhcp_opts=dhcp_opts, subnet=subnet)
def _cleanup_port(self, context, port_id, nsx_port_id=None):
# Clean up neutron port and nsx manager port if provided
# Does not handle cleanup of policy port
super(NsxPluginV3Base, self).delete_port(context, port_id)
if nsx_port_id and self.nsxlib:
self.nsxlib.logical_port.delete(nsx_port_id)
def _is_excluded_port(self, device_owner, port_security):
if device_owner == l3_db.DEVICE_OWNER_ROUTER_INTF:
return False
if device_owner == constants.DEVICE_OWNER_DHCP:
if not self._has_native_dhcp_metadata():
return True
elif not port_security:
return True
return False
def _validate_obj_az_on_creation(self, context, obj_data, obj_type):
# validate the availability zone, and get the AZ object
if az_def.AZ_HINTS in obj_data:
self._validate_availability_zones_forced(
context, obj_type, obj_data[az_def.AZ_HINTS])
return self.get_obj_az_by_hints(obj_data)
def _add_az_to_net(self, context, net_id, net_data):
if az_def.AZ_HINTS in net_data:
# Update the AZ hints in the neutron object
az_hints = az_validator.convert_az_list_to_string(
net_data[az_def.AZ_HINTS])
super(NsxPluginV3Base, self).update_network(
context, net_id,
{'network': {az_def.AZ_HINTS: az_hints}})
def _add_az_to_router(self, context, router_id, router_data):
if az_def.AZ_HINTS in router_data:
# Update the AZ hints in the neutron object
az_hints = az_validator.convert_az_list_to_string(
router_data[az_def.AZ_HINTS])
super(NsxPluginV3Base, self).update_router(
context, router_id,
{'router': {az_def.AZ_HINTS: az_hints}})
def get_network_availability_zones(self, net_db):
hints = az_validator.convert_az_string_to_list(
net_db[az_def.AZ_HINTS])
# When using the configured AZs, the az will always be the same
# as the hint (or default if none)
if hints:
az_name = hints[0]
else:
az_name = self.get_default_az().name
return [az_name]
def _get_router_az_obj(self, router):
l3_attrs_db.ExtraAttributesMixin._extend_extra_router_dict(
router, router)
return self.get_router_az(router)
def get_router_availability_zones(self, router):
"""Return availability zones which a router belongs to."""
return [self._get_router_az_obj(router).name]
def _validate_availability_zones_forced(self, context, resource_type,
availability_zones):
return self.validate_availability_zones(context, resource_type,
availability_zones,
force=True)
def _list_availability_zones(self, context, filters=None):
result = {}
for az in self._availability_zones_data.list_availability_zones():
# Add this availability zone as a network & router resource
if filters:
if 'name' in filters and az not in filters['name']:
continue
for res in ['network', 'router']:
if 'resource' not in filters or res in filters['resource']:
result[(az, res)] = True
return result
def validate_availability_zones(self, context, resource_type,
availability_zones, force=False):
# This method is called directly from this plugin but also from
# registered callbacks
if self._is_sub_plugin and not force:
# validation should be done together for both plugins
return
# Validate against the configured AZs
return self.validate_obj_azs(availability_zones)
def _ensure_nsxlib(self, feature):
if not self.nsxlib:
msg = (_("%s is not supported since passthough API is disabled") %
feature)
LOG.error(msg)
raise n_exc.InvalidInput(error_message=msg)
def _ensure_native_dhcp(self):
self._ensure_nsxlib("Native DHCP")
if not self._native_dhcp_enabled:
msg = (_("Native DHCP is not supported since dhcp_profile is not"
" provided in plugin configuration"))
LOG.error(msg)
raise n_exc.InvalidInput(error_message=msg)
def _get_net_dhcp_relay(self, context, net_id):
"""Should be implemented by each plugin"""
return None
def _get_ipv6_subnet(self, context, network):
for subnet in network.subnets:
if subnet.ip_version == 6:
return subnet
def _validate_single_ipv6_subnet(self, context, network, subnet):
if subnet.get('ip_version') == 6:
if self._get_ipv6_subnet(context, network):
msg = (_("Only one ipv6 subnet per network is supported"))
LOG.error(msg)
raise n_exc.InvalidInput(error_message=msg)
def _subnet_with_native_dhcp(self, subnet, orig_subnet=None):
native_metadata = self._has_native_dhcp_metadata()
default_enable_dhcp = (orig_subnet.get('enable_dhcp', False)
if orig_subnet else False)
# DHCPv6 is not yet supported, but slaac is.
# When configuring slaac, neutron requires the user
# to enable dhcp, however plugin code does not consider
# slaac as dhcp.
return (native_metadata and
subnet.get('enable_dhcp', default_enable_dhcp) and
subnet.get('ipv6_address_mode') != constants.IPV6_SLAAC)
def _validate_mp_subnet_ip_version(self, subnet):
# This validation only needs to be called at create,
# since ip version and ipv6 mode attributes are read only
if subnet.get('ip_version') == 4:
# No dhcp restrictions for V4
return
enable_dhcp = subnet.get('enable_dhcp', False)
is_slaac = (subnet.get('ipv6_address_mode') == constants.IPV6_SLAAC)
if enable_dhcp and not is_slaac:
# No DHCPv6 support with the MP DHCP
msg = _("DHCPv6 is not supported")
LOG.error(msg)
raise n_exc.InvalidInput(error_message=msg)
def _validate_net_dhcp_profile(self, context, network, az):
"""Validate that the dhcp profile edge cluster match the one of
the network TZ
"""
if not self.nsxlib:
msg = (_("Native DHCP is not supported since "
"passthough API is disabled"))
LOG.error(msg)
raise n_exc.InvalidInput(error_message=msg)
net_tz = self._get_net_tz(context, network['id'])
dhcp_profile = az._native_dhcp_profile_uuid
dhcp_obj = self.nsxlib.native_dhcp_profile.get(dhcp_profile)
ec_id = dhcp_obj['edge_cluster_id']
if not ec_id:
LOG.warning("DHCP profile %s is missing an edge cluster",
dhcp_profile)
| |
<reponame>ArcGIS/military-tools-geoprocessing-toolbox<filename>tools/militarytools/esri/toolboxes/scripts/VisTools.py<gh_stars>10-100
# coding: utf-8
'''
------------------------------------------------------------------------------
Copyright 2018 Esri
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
------------------------------------------------------------------------------
==================================================
VisTools.py
--------------------------------------------------
requirements: ArcGIS 10.3.1+, ArcGIS Pro 1.4+, Python 2.7 or Python 3.5+
author: ArcGIS Solutions
contact: <EMAIL>
company: Esri
==================================================
description:
Visibility Tool logic module.
==================================================
'''
import os
import arcpy
import sys
import traceback
try:
from . import Utilities
from . import VisibilityUtilities
except ImportError:
import Utilities
import VisibilityUtilities
class AddLinearLineOfSightFields(object):
'''
Adds an OFFSET field and user-defined/default value to
an input Observer and input Target features.
'''
class ToolValidator(object):
"""Class for validating a tool's parameter values and controlling
the behavior of the tool's dialog."""
def __init__(self, parameters):
"""Setup arcpy and the list of tool parameters."""
self.params = parameters
def initializeParameters(self):
"""Refine the properties of a tool's parameters. This method is
called when the tool is opened."""
# 0 - Input Observer Features
# 1 - Observer Height Above Surface
# 2 - Input Target Features
# 3 - Target Height Above Surface
# 4 - Output Observer Features
# 5 - Output Target Features
return
def updateParameters(self):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
return
def updateMessages(self):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
msgWarnLessThanZero = r"Values less than zero may produce unexpected results."
if self.params[1].altered:
if self.params[1].value < 0.0:
self.params[1].setWarningMessage(msgWarnLessThanZero)
if self.params[3].altered:
if self.params[3].value < 0.0:
self.params[3].setWarningMessage(msgWarnLessThanZero)
return
def __init__(self):
'''
Add LLOS Fields tool constructor method
'''
self.label = "Add LLOS Fields"
self.description = "Adds an OFFSET field and user-defined/default value to an input Observer and input Target features."
self.category = "Visibility"
self.canRunInBackground = False
def getParameterInfo(self):
'''
Define parameter definitions
'''
# Input_Observer_Features
param_1 = arcpy.Parameter()
param_1.name = u'Input_Observer_Features'
param_1.displayName = u'Input Observer Features'
param_1.parameterType = 'Required'
param_1.direction = 'Input'
param_1.datatype = u'Feature Layer'
# Observer_Height_Above_Surface
param_2 = arcpy.Parameter()
param_2.name = u'Observer_Height_Above_Surface'
param_2.displayName = u'Observer Height Above Surface'
param_2.parameterType = 'Required'
param_2.direction = 'Input'
param_2.datatype = u'Double'
param_2.value = u'2.0'
# Input_Target_Features
param_3 = arcpy.Parameter()
param_3.name = u'Input_Target_Features'
param_3.displayName = u'Input Target Features'
param_3.parameterType = 'Required'
param_3.direction = 'Input'
param_3.datatype = u'Feature Layer'
# Target_Height_Above_Surface
param_4 = arcpy.Parameter()
param_4.name = u'Target_Height_Above_Surface'
param_4.displayName = u'Target Height Above Surface'
param_4.parameterType = 'Required'
param_4.direction = 'Input'
param_4.datatype = u'Double'
param_4.value = u'0.0'
# Output_Observer_Features
param_5 = arcpy.Parameter()
param_5.name = u'Output_Observer_Features'
param_5.displayName = u'Output Observer Features'
param_5.parameterType = 'Derived'
param_5.direction = 'Output'
param_5.datatype = u'Feature Class'
param_5.parameterDependencies = ['Input_Observer_Features']
# Output_Target_Features
param_6 = arcpy.Parameter()
param_6.name = u'Output_Target_Features'
param_6.displayName = u'Output Target Features'
param_6.parameterType = 'Derived'
param_6.direction = 'Output'
param_6.datatype = u'Feature Class'
param_6.parameterDependencies = ['Input_Target_Features']
return [param_1, param_2, param_3, param_4, param_5, param_6]
def updateParameters(self, parameters):
'''
Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed.
'''
validator = getattr(self, 'ToolValidator', None)
if validator:
return validator(parameters).updateParameters()
def updateMessages(self, parameters):
validator = getattr(self, 'ToolValidator', None)
if validator:
return validator(parameters).updateMessages()
def execute(self, parameters, messages):
''' execute for toolbox'''
inputObserverFeatures = parameters[0].valueAsText
inputObserverHeight = parameters[1].value
inputTargetFeatures = parameters[2].valueAsText
inputTargetHeight = parameters[3].value
out_LLOS_added = VisibilityUtilities.addLLOSFields(inputObserverFeatures,
inputObserverHeight,
inputTargetFeatures,
inputTargetHeight)
return out_LLOS_added[0], out_LLOS_added[1]
class AddRadialLineOfSightObserverFields(object):
'''
Adds Observer fields and values to inputFeatures:
OFFSETA: observer offset height above surface, default is 2.0
OFFSETB: surface offset, default is 0.0
RADIUS1: Near distance, default is 0.0
RADIUS2: Farthest distance, default is 1000.0
AZIMUTH1: Left Azimuth in horizontal field of view, default is 0.0
AZIMUTH2: Right Azimuth in horizontal field of view, default is 360.0
VERT1: Top Angle in vertical field of view, default is 90.0
VERT2: Bottom Angle in vertical field of view, default is -90.0
'''
class ToolValidator(object):
"""Class for validating a tool's parameter values and controlling
the behavior of the tool's dialog."""
def __init__(self, parameters):
"""Setup arcpy and the list of tool parameters."""
self.params = parameters
def initializeParameters(self):
"""Refine the properties of a tool's parameters. This method is
called when the tool is opened."""
# 0 - Input Observer Features
# 1 - Observer Offset
# 2 - Surface Offset
# 3 - Minimum Distance Radius
# 4 - Maximum Distance Radius
# 5 - Left Bearing Azimuth
# 6 - Right Bearing Azimuth
# 7 - Top Vertical Angle
# 8 - Bottom Vertical Angle
# 9 - Output Observer Features
return
def updateParameters(self):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
# 0 - Input Observer Features
# 1 - Observer Offset
# 2 - Surface Offset
# 3 - Minimum Distance Radius
# 4 - Maximum Distance Radius
# 5 - Left Bearing Azimuth
# 6 - Right Bearing Azimuth
# 7 - Top Vertical Angle
# 8 - Bottom Vertical Angle
# 9 - Output Observer Features
return
def updateMessages(self):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
msgWarnLessThanZero = r"Value less than zero may produce unexpected results."
msgErrorLessThanZero = r"Values must be greater than zero."
msgErrorMaxDistGreater = r"Maximum Distance Radius value must be greater than Minimum Distance Radius value."
msgErrorRightAzimGreater = r"Right Bearing Azimuth must be greater than Left Bearing Azimuth."
msgErrorTopVertGreater = r"Top Vertical Angle must be greater than Bottom Vertical Angle."
msgErrorHFOVRange = r"Azimuth/Bearing values must be between 0 and 360."
msgErrorVFOVRange = r"Vertical Angles must be between 90 and -90."
# 0 - Input Observer Features
# 1 - Observer Offset
if self.params[1].altered:
if self.params[1].value < 0.0:
self.params[1].setWarningMessage(msgWarnLessThanZero)
# 2 - Surface Offset
if self.params[2].altered:
if self.params[2].value < 0.0:
self.params[2].setErrorMessage(msgErrorLessThanZero)
# 3 - Minimum Distance Radius
if self.params[3].altered:
if self.params[3].value < 0.0:
self.params[3].setErrorMessage(msgErrorLessThanZero)
if self.params[3].value >= self.params[4].value:
self.params[3].setErrorMessage(msgErrorMaxDistGreater)
# 4 - Maximum Distance Radius
if self.params[4].altered:
if self.params[4].value < 0.0:
self.params[4].setErrorMessage(msgErrorLessThanZero)
if self.params[3].value >= self.params[4].value:
self.params[4].setErrorMessage(msgErrorMaxDistGreater)
# 5 - Left Bearing Azimuth
if self.params[5].altered:
if self.params[5].value < 0.0 or self.params[5].value > 360.0:
self.params[5].setErrorMessage(msgErrorHFOVRange)
if self.params[5].value >= self.params[6].value:
self.params[5].setErrorMessage(msgErrorRightAzimGreater)
# 6 - Right Bearing Azimuth
if self.params[6].altered:
if self.params[6].value < 0.0 or self.params[6].value > 360.0:
self.params[6].setErrorMessage(msgErrorHFOVRange)
if self.params[5].value >= self.params[6].value:
self.params[6].setErrorMessage(msgErrorRightAzimGreater)
# 7 - Top Vertical Angle
if self.params[7].altered:
if self.params[7].value < -90.0 or self.params[7].value > 90.0:
self.params[7].setErrorMessage(msgErrorVFOVRange)
if self.params[7].value <= self.params[8].value:
self.params[7].setErrorMessage(msgErrorTopVertGreater)
# 8 - Bottom Vertical Angle
if self.params[8].altered:
if self.params[8].value < -90.0 or self.params[8].value > 90.0:
self.params[8].setErrorMessage(msgErrorVFOVRange)
if self.params[7].value <= self.params[8].value:
self.params[8].setErrorMessage(msgErrorTopVertGreater)
# 9 - Output Observer Features
return
def __init__(self):
'''
Add RLOS Fields tool constructor method
'''
self.label = "Add RLOS Fields"
self.description = "Adds Observer fields and values to inputFeatures."
self.category = "Visibility"
self.canRunInBackground = False
def getParameterInfo(self):
# Input_Observer_Features
input_observer_features = arcpy.Parameter()
input_observer_features.name = 'input_observer_features'
input_observer_features.displayName = 'Input Observer Features'
input_observer_features.parameterType = 'Required'
input_observer_features.direction = 'Input'
input_observer_features.datatype = 'Feature Layer'
# Observer_Offset
input_OFFSETA = arcpy.Parameter()
input_OFFSETA.name = 'Observer_Offset'
input_OFFSETA.displayName = 'Observer Offset'
input_OFFSETA.parameterType = 'Required'
input_OFFSETA.direction = 'Input'
input_OFFSETA.datatype = 'Double'
input_OFFSETA.value = '2'
# Surface_Offset
input_OFFSETB = arcpy.Parameter()
input_OFFSETB.name = 'Surface_Offset'
input_OFFSETB.displayName = 'Surface Offset'
input_OFFSETB.parameterType = 'Required'
input_OFFSETB.direction = 'Input'
input_OFFSETB.datatype = 'Double'
input_OFFSETB.value = '0'
# Minimum_Distance_Radius
input_RADIUS1 = arcpy.Parameter()
input_RADIUS1.name = 'Minimum_Distance_Radius'
input_RADIUS1.displayName = 'Minimum Distance Radius'
input_RADIUS1.parameterType = 'Required'
input_RADIUS1.direction = 'Input'
input_RADIUS1.datatype = 'Double'
input_RADIUS1.value = '0'
# Maximum_Distance_Radius
input_RADIUS2 = arcpy.Parameter()
input_RADIUS2.name = 'Maximum_Distance_Radius'
input_RADIUS2.displayName = 'Maximum Distance Radius'
input_RADIUS2.parameterType = 'Required'
input_RADIUS2.direction = 'Input'
input_RADIUS2.datatype = 'Double'
input_RADIUS2.value = '1000'
# Left_Bearing_Azimuth
input_AZIMUTH1 = arcpy.Parameter()
input_AZIMUTH1.name = 'Left_Bearing_Azimuth'
input_AZIMUTH1.displayName = 'Left Bearing Azimuth'
input_AZIMUTH1.parameterType | |
1, 0, 1, 0],
world=self.world)
no_14_vehicle_planner = WaypointFollower_FullMap(actor=self.no_14_vehicle,
target_speed=self.no_14_vehicle_speed,
actor_location=no_14_wp_location,
map=self._map, avoid_collision=True,
pattern_1=[1, 1, 0, 3, 1, 1, 1, 1, 1, 0, 1, 0],
world=self.world)
self.vehicle_planners = [first_vehicle_planner, second_vehicle_planner, next_second_vehicle_planner,
third_vehicle_planner, fourth_vehicle_planner,
fifth_vehicle_planner, next_fifth_vehicle_planner, sixth_vehicle_planner,
next_sixth_vehicle_planner, seventh_vehicle_planner,
next_seventh_vehicle_planner, eighth_vehicle_planner, next_eighth_vehicle_planner,
no_12_vehicle_planner, no_13_vehicle_planner, no_14_vehicle_planner,
]
for planner in self.vehicle_planners:
planner.setup()
def generate_car(self):
additional_zombie_car = list()
additional_zombie_car_speed = list()
additional_pattern = list()
additional_actor_location = list()
all_car_pos = [[93.75690460205078, -132.76296997070312, 9.84310531616211],
[143.19349670410156, -204.4090118408203, 1.8431016206741333],
[-2, -2, -2],
[-100.46805572509766, 16.266956329345703, 1.8431016206741333],
[-74.38717651367188, 99.71611022949219, 1.8052573204040527],
[-2.410623788833618, 207.50567626953125, 1.8431040048599243],
[-2, -2, -2],
[244.31658935546875, 53.67372131347656, 1.8431016206741333],
[-2, -2, -2],
[245.8651123046875, -9.9967041015625, 1.8431016206741333],
[-2, -2, -2],
[-6.594831466674805, -208.17323303222656, 1.8431016206741333],
[-2, -2, -2],
[4.926102638244629, 91.77217864990234, 1.8432115316390991],
[4.926102638244629, 40.57860565185547, 1.8431016206741333],
[5.430785179138184, 122.2763442993164, 1.8431016206741333]
]
all_pattern = [[1, 0, 2],
[0, 1, 0],
[1, 0, 0, 0, 1],
[0, 0, 0],
[0, 0, 0, 0, 1],
[0, 1, 0],
[1, 1, 0, 0, 0],
[0, 1, 0],
[1, 0, 0, 0, 1],
[0, 1, 0],
[1, 0, 0, 0, 1],
[0, 1, 0],
[0, 0, 0, 1, 1],
[1, 1, 0, 3, 1, 1, 1, 1, 1, 0, 1, 0],
[1, 1, 0, 3, 1, 1, 1, 1, 1, 0, 1, 0],
[1, 1, 0, 3, 1, 1, 1, 1, 1, 0, 1, 0],
]
for i, car_pos in enumerate(all_car_pos):
if car_pos == [-1, -1, -1] or car_pos == [-2, -2, -2]:
# car_pos == [-2, -2, -2]: get_left_lane(), speed=26
# car_pos == [-1, -1, -1]: get_left_lane()
car_pos = all_car_pos[i - 1]
orig_actor_location = carla.Location(x=car_pos[0], y=car_pos[1], z=car_pos[2])
vehicle_waypoint = self._map.get_waypoint(orig_actor_location).next(16)[0].get_left_lane()
actor_location = vehicle_waypoint.transform.location
else:
actor_location = carla.Location(x=car_pos[0], y=car_pos[1], z=car_pos[2])
vehicle_waypoint = self._map.get_waypoint(actor_location)
world_actors = self.world.get_actors().filter('vehicle.*')
flag_spawn = True
for adversary in world_actors:
if actor_location.distance(adversary.get_location()) < 30:
flag_spawn = False
if flag_spawn:
vehicle_transform = carla.Transform(vehicle_waypoint.transform.location,
vehicle_waypoint.transform.rotation)
try:
# vehicle = self.world.try_spawn_actor(self.blueprints[np.random.randint(0, len(self.blueprints))],
# vehicle_transform)
vehicle = self.world.spawn_actor(self.blueprints[np.random.randint(0, len(self.blueprints))],
vehicle_transform)
if car_pos == [-2, -2, -2]:
_vehicle_speed = 26
else:
_vehicle_speed = 25
additional_zombie_car.append(vehicle)
additional_zombie_car_speed.append(_vehicle_speed)
additional_pattern.append(all_pattern[i])
additional_actor_location.append(actor_location)
self.zombie_cars.append(vehicle)
except:
print('generate_car() Failed!', actor_location)
for i, (one_zombie_car, one_zombie_car_speed, one_pattern, one_actor_location) in enumerate(
zip(additional_zombie_car, additional_zombie_car_speed, additional_pattern,
additional_actor_location)):
vehicle_planner = WaypointFollower_FullMap(actor=one_zombie_car, map=self._map,
actor_location=one_actor_location,
target_speed=one_zombie_car_speed,
avoid_collision=True, pattern_1=one_pattern,
world=self.world)
self.vehicle_planners.append(vehicle_planner)
vehicle_planner.setup()
def _update(self):
# update action for two local planners
# if _dis3d(_pos3d(self.hero_car), _pos3d(self.first_vehicle)) > 26.:
# pass
# else:
# for planner in self.vehicle_planners:
# planner.update()
self.generate_car()
for planner in self.vehicle_planners:
planner.update()
def restart(self):
if self.only_reset_hero:
random.shuffle(self.hero_car_pos_candidate)
world_actors = self.world.get_actors().filter('vehicle.*')
for hero_car_pos in self.hero_car_pos_candidate:
wp_location = carla.Location(x=hero_car_pos[0], y=hero_car_pos[1], z=hero_car_pos[2])
flag_spawn = True
for adversary in world_actors:
if wp_location.distance(adversary.get_location()) < 10:
flag_spawn = False
if flag_spawn:
wp = self._map.get_waypoint(wp_location)
hero_vehicle_transform = wp.transform
hero_model = 'vehicle.lincoln.mkz2017'
blueprint = random.choice(self.blueprint_library.filter(hero_model))
blueprint.set_attribute('role_name', 'hero')
self.hero_car = self.world.try_spawn_actor(blueprint, hero_vehicle_transform)
break
else:
self._remove_all_actors()
self.zombie_cars = list()
self.vehicle_planners = list()
self._scenario_init()
def _remove_all_actors(self):
actors = [self.hero_car] + self.zombie_cars
# actors = self.zombie_cars
for actor in actors:
if actor.is_alive:
actor.destroy()
def _remove_zombie_cars(self):
actors = self.zombie_cars
for actor in actors:
if actor.is_alive:
actor.destroy()
class Cross_Join(object):
def __init__(self, name, map, world, only_reset_hero=False):
self.name = name
self._map = map
self.world = world
self.speed = 0
self.only_reset_hero = only_reset_hero
self.blueprint_library = self.world.get_blueprint_library()
self._scenario_init()
def _scenario_init(self):
# init hero car
# --------------------------------------------------------
# setup cars on a given waypoint
self.hero_car_pos = [-42.350990295410156, -2.835118293762207, 1.8431016206741333]
# self.hero_car_pos = [-74.38717651367188, 57.531620025634766, 1.805267095565796] # 13
wp_location = carla.Location(x=self.hero_car_pos[0], y=self.hero_car_pos[1], z=self.hero_car_pos[2]+10)
wp = self._map.get_waypoint(wp_location)
hero_vehicle_transform = wp.transform
hero_model = 'vehicle.lincoln.mkz2017'
blueprint = random.choice(self.blueprint_library.filter(hero_model))
blueprint.set_attribute('role_name', 'hero')
self.hero_car = self.world.try_spawn_actor(blueprint, hero_vehicle_transform)
models = ['vehicle.nissan.patrol', 'vehicle.audi.tt',
'vehicle.lincoln.mkz2017', 'vehicle.volkswagen.t2',
'vehicle.tesla.model3', 'vehicle.nissan.micra',
'vehicle.audi.a2',
]
blueprints = [random.choice(self.world.get_blueprint_library().filter(model)) for model in models]
for blueprint in blueprints:
blueprint.set_attribute('role_name', 'scenario')
self.blueprints = blueprints
self.models = models
# Not available: 135, 160
fourth_car_pos = [-74.38717651367188, 57.531620025634766, 1.805267095565796] # 15
fourth_wp_location = carla.Location(x=fourth_car_pos[0], y=fourth_car_pos[1], z=fourth_car_pos[2]+10)
fourth_vehicle_waypoint = self._map.get_waypoint(fourth_wp_location)
fourth_vehicle_transform = carla.Transform(fourth_vehicle_waypoint.transform.location,
fourth_vehicle_waypoint.transform.rotation)
self.fourth_vehicle = self.world.try_spawn_actor(blueprints[4 % len(models)], fourth_vehicle_transform)
# setup local planners for zombie cars
# speed_list = [21, 25, 31]
# speed = random.choice([21, 25, 31])
speed = 21
#speed = random.choice([31])
# speed = random.choice([25])
# speed = random.choice([21])
self.speed = speed
print('Velocity: ', self.speed)
self._fourth_vehicle_speed = speed #random.choice([21, 27, 31])
fifth_car_pos = [-74.38717651367188, 77.64903259277344, 1.8052573204040527] # 25
fifth_wp_location = carla.Location(x=fifth_car_pos[0], y=fifth_car_pos[1], z=fifth_car_pos[2]+10)
fifth_vehicle_waypoint = self._map.get_waypoint(fifth_wp_location)
fifth_vehicle_transform = carla.Transform(fifth_vehicle_waypoint.transform.location,
fifth_vehicle_waypoint.transform.rotation)
self.fifth_vehicle = self.world.try_spawn_actor(blueprints[4 % len(models)], fifth_vehicle_transform)
# setup local planners for zombie cars
self._fifth_vehicle_speed = speed-1 #random.choice([21, 27, 31])
sixth_car_pos = [-74.38717651367188, 97.71611022949219, 1.8052573204040527] # 27
sixth_wp_location = carla.Location(x=sixth_car_pos[0], y=sixth_car_pos[1], z=sixth_car_pos[2]+10)
sixth_vehicle_waypoint = self._map.get_waypoint(sixth_wp_location)
sixth_vehicle_transform = carla.Transform(sixth_vehicle_waypoint.transform.location,
sixth_vehicle_waypoint.transform.rotation)
self.sixth_vehicle = self.world.try_spawn_actor(blueprints[4 % len(models)], sixth_vehicle_transform)
# setup local planners for zombie cars
self._sixth_vehicle_speed = speed-1 #random.choice([21, 27, 31])
self.zombie_cars = [self.fourth_vehicle, self.fifth_vehicle, self.sixth_vehicle]
fourth_vehicle_planner = WaypointFollower_FullMap(actor=self.fourth_vehicle,
actor_location=fourth_wp_location,
target_speed=self._fourth_vehicle_speed,
map=self._map,
avoid_collision=False,
pattern_1=[1, 1, 1, 1, 1, 1, 0, 0, 1, ],
world=self.world)
fifth_vehicle_planner = WaypointFollower_FullMap(actor=self.fifth_vehicle,
actor_location=fifth_wp_location,
target_speed=self._fifth_vehicle_speed,
map=self._map,
avoid_collision=False,
pattern_1=[1, 1, 1, 1, 1, 1, 0, 0, 1, ],
world=self.world)
sixth_vehicle_planner = WaypointFollower_FullMap(actor=self.sixth_vehicle,
actor_location=sixth_wp_location,
target_speed=self._sixth_vehicle_speed,
map=self._map,
avoid_collision=False,
pattern_1=[1, 1, 1, 1, 1, 1, 0, 0, 1, ],
world=self.world)
self.vehicle_planners = [fourth_vehicle_planner, fifth_vehicle_planner, sixth_vehicle_planner]
for planner in self.vehicle_planners:
planner.setup()
def generate_car(self):
additional_zombie_car = list()
additional_zombie_car_speed = list()
additional_pattern = list()
additional_actor_location = list()
# all_car_pos = [[-74.38717651367188, 57.531620025634766, 1.805267095565796],
# [-74.38717651367188, 75.64903259277344, 1.8052573204040527],
# [-74.38717651367188, 99.71611022949219, 1.8052573204040527]]
all_car_pos = []
all_pattern = [[1, 1, 1, 1, 1, 1, 0, 0, 1, ], [1, 1, 1, 1, 1, 1, 0, 0, 1, ], [1, 1, 1, 1, 1, 1, 0, 0, 1, ], ]
for i, car_pos in enumerate(all_car_pos):
if car_pos == [-1, -1, -1] or car_pos == [-2, -2, -2]:
# car_pos == [-2, -2, -2]: get_left_lane(), speed=26
# car_pos == [-1, -1, -1]: get_left_lane()
car_pos = all_car_pos[i - 1]
orig_actor_location = carla.Location(x=car_pos[0], y=car_pos[1], z=car_pos[2])
vehicle_waypoint = self._map.get_waypoint(orig_actor_location).next(16)[0].get_left_lane()
actor_location = vehicle_waypoint.transform.location
else:
actor_location = carla.Location(x=car_pos[0], y=car_pos[1], z=car_pos[2])
vehicle_waypoint = self._map.get_waypoint(actor_location)
world_actors = self.world.get_actors().filter('vehicle.*')
flag_spawn = True
for adversary in world_actors:
if actor_location.distance(adversary.get_location()) < 5:
flag_spawn = False
if flag_spawn:
vehicle_transform = carla.Transform(vehicle_waypoint.transform.location,
vehicle_waypoint.transform.rotation)
vehicle = self.world.try_spawn_actor(self.blueprints[np.random.randint(0, len(self.blueprints))],
vehicle_transform)
if car_pos == [-2, -2, -2]:
_vehicle_speed = 26
else:
_vehicle_speed = 25
self.speed = _vehicle_speed
additional_zombie_car.append(vehicle)
additional_zombie_car_speed.append(_vehicle_speed)
additional_pattern.append(all_pattern[i])
additional_actor_location.append(actor_location)
self.zombie_cars.append(vehicle)
for i, (one_zombie_car, one_zombie_car_speed, one_pattern, one_actor_location) in enumerate(
zip(additional_zombie_car, additional_zombie_car_speed, additional_pattern, additional_actor_location)):
vehicle_planner = WaypointFollower_FullMap(actor=one_zombie_car, map=self._map,
actor_location=one_actor_location,
target_speed=one_zombie_car_speed,
avoid_collision=True, pattern_1=one_pattern,
world=self.world)
self.vehicle_planners.append(vehicle_planner)
vehicle_planner.setup()
def _update(self):
self.generate_car()
for planner in self.vehicle_planners:
planner.update()
def restart(self):
if self.only_reset_hero:
wp_location = carla.Location(x=self.hero_car_pos[0], y=self.hero_car_pos[1], z=self.hero_car_pos[2])
wp = self._map.get_waypoint(wp_location)
hero_vehicle_transform = wp.transform
hero_model = 'vehicle.lincoln.mkz2017'
blueprint = random.choice(self.blueprint_library.filter(hero_model))
blueprint.set_attribute('role_name', 'hero')
self.hero_car = self.world.try_spawn_actor(blueprint, hero_vehicle_transform)
else:
self._remove_all_actors()
# self.zombie_cars = list()
# self.vehicle_planners = list()
self._scenario_init()
def _remove_all_actors(self):
actors = [self.hero_car] + self.zombie_cars
# actors = self.zombie_cars
for actor in actors:
if actor.is_alive:
actor.destroy()
def _remove_zombie_cars(self):
actors = self.zombie_cars
for actor in actors:
if actor.is_alive:
actor.destroy()
class Ring_Join(object):
def __init__(self, name, map, world, only_reset_hero=False):
self.name = name
self._map = map
self.world = world
self.only_reset_hero = only_reset_hero
self.speed = 0
self.blueprint_library = self.world.get_blueprint_library()
self._scenario_init()
def _scenario_init(self):
# init hero car
# --------------------------------------------------------
# setup cars on a given waypoint
self.hero_car_pos = [52.61453628540039, -7.843905448913574, 1.8431028127670288] # 55
wp_location = carla.Location(x=self.hero_car_pos[0], y=self.hero_car_pos[1], z=self.hero_car_pos[2])
wp = self._map.get_waypoint(wp_location)
hero_vehicle_transform = wp.transform
hero_model = 'vehicle.lincoln.mkz2017'
blueprint = random.choice(self.blueprint_library.filter(hero_model))
blueprint.set_attribute('role_name', 'hero')
self.hero_car = self.world.try_spawn_actor(blueprint, hero_vehicle_transform)
models = ['vehicle.nissan.patrol', 'vehicle.audi.tt',
'vehicle.lincoln.mkz2017', 'vehicle.volkswagen.t2',
'vehicle.tesla.model3', 'vehicle.nissan.micra',
'vehicle.audi.a2',
]
blueprints = [random.choice(self.world.get_blueprint_library().filter(model)) for model in models]
for blueprint in blueprints:
blueprint.set_attribute('role_name', 'scenario')
self.blueprints = blueprints
self.models = models
# Not available: 135, 160
fourth_car_pos = [4.926102638244629, 40.57860565185547, 1.8431016206741333] # 145
fourth_wp_location = carla.Location(x=fourth_car_pos[0], y=fourth_car_pos[1], z=fourth_car_pos[2])
fourth_vehicle_waypoint = self._map.get_waypoint(fourth_wp_location)
fourth_vehicle_transform = carla.Transform(fourth_vehicle_waypoint.transform.location,
fourth_vehicle_waypoint.transform.rotation)
self.fourth_vehicle = self.world.try_spawn_actor(blueprints[4 % len(models)], fourth_vehicle_transform)
# setup local planners for zombie cars
# velocity = random.choice([21, 25, 31]) # default is 21, 27, 31
velocity = 25
print('velocity: ', velocity)
self._fourth_vehicle_speed = velocity
self.speed = velocity
fifth_car_pos = [4.926102638244629, 59.08685302734375, 1.8430894613265991] # 47
fifth_wp_location = carla.Location(x=fifth_car_pos[0], y=fifth_car_pos[1], z=fifth_car_pos[2])
fifth_vehicle_waypoint = self._map.get_waypoint(fifth_wp_location)
fifth_vehicle_transform = carla.Transform(fifth_vehicle_waypoint.transform.location,
fifth_vehicle_waypoint.transform.rotation)
self.fifth_vehicle = self.world.try_spawn_actor(blueprints[4 % len(models)], fifth_vehicle_transform)
# setup local planners for zombie cars
self._fifth_vehicle_speed = velocity-1
sixth_car_pos = [4.926102638244629, 72.03030395507812, 1.843079686164856] # 49
sixth_wp_location = carla.Location(x=sixth_car_pos[0], y=sixth_car_pos[1], z=sixth_car_pos[2])
sixth_vehicle_waypoint = self._map.get_waypoint(sixth_wp_location)
sixth_vehicle_transform = carla.Transform(sixth_vehicle_waypoint.transform.location,
sixth_vehicle_waypoint.transform.rotation)
self.sixth_vehicle = self.world.try_spawn_actor(blueprints[4 % len(models)], | |
<filename>libcloud/storage/drivers/local.py
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Provides storage driver for working with local filesystem
"""
from __future__ import with_statement
import errno
import os
import shutil
import sys
try:
import lockfile
from lockfile import LockTimeout, mkdirlockfile
except ImportError:
raise ImportError('Missing lockfile dependency, you can install it '
'using pip: pip install lockfile')
from libcloud.utils.files import read_in_chunks
from libcloud.utils.py3 import relpath
from libcloud.utils.py3 import u
from libcloud.common.base import Connection
from libcloud.storage.base import Object, Container, StorageDriver
from libcloud.common.types import LibcloudError
from libcloud.storage.types import ContainerAlreadyExistsError
from libcloud.storage.types import ContainerDoesNotExistError
from libcloud.storage.types import ContainerIsNotEmptyError
from libcloud.storage.types import ObjectError
from libcloud.storage.types import ObjectDoesNotExistError
from libcloud.storage.types import InvalidContainerNameError
IGNORE_FOLDERS = ['.lock', '.hash']
class LockLocalStorage(object):
"""
A class to help in locking a local path before being updated
"""
def __init__(self, path):
self.path = path
self.lock = mkdirlockfile.MkdirLockFile(self.path, threaded=True)
def __enter__(self):
try:
self.lock.acquire(timeout=0.1)
except LockTimeout:
raise LibcloudError('Lock timeout')
def __exit__(self, type, value, traceback):
if self.lock.is_locked():
self.lock.release()
if value is not None:
raise value
class LocalStorageDriver(StorageDriver):
"""
Implementation of local file-system based storage. This is helpful
where the user would want to use the same code (using libcloud) and
switch between cloud storage and local storage
"""
connectionCls = Connection
name = 'Local Storage'
website = 'http://example.com'
hash_type = 'md5'
def __init__(self, key, secret=None, secure=True, host=None, port=None,
**kwargs):
# Use the key as the path to the storage
self.base_path = key
if not os.path.isdir(self.base_path):
raise LibcloudError('The base path is not a directory')
super(LocalStorageDriver, self).__init__(key=key, secret=secret,
secure=secure, host=host,
port=port, **kwargs)
def _make_path(self, path, ignore_existing=True):
"""
Create a path by checking if it already exists
"""
try:
os.makedirs(path)
except OSError:
exp = sys.exc_info()[1]
if exp.errno == errno.EEXIST and not ignore_existing:
raise exp
def _check_container_name(self, container_name):
"""
Check if the container name is valid
:param container_name: Container name
:type container_name: ``str``
"""
if '/' in container_name or '\\' in container_name:
raise InvalidContainerNameError(value=None, driver=self,
container_name=container_name)
def _make_container(self, container_name):
"""
Create a container instance
:param container_name: Container name.
:type container_name: ``str``
:return: Container instance.
:rtype: :class:`Container`
"""
self._check_container_name(container_name)
full_path = os.path.join(self.base_path, container_name)
try:
stat = os.stat(full_path)
if not os.path.isdir(full_path):
raise OSError('Target path is not a directory')
except OSError:
raise ContainerDoesNotExistError(value=None, driver=self,
container_name=container_name)
extra = {}
extra['creation_time'] = stat.st_ctime
extra['access_time'] = stat.st_atime
extra['modify_time'] = stat.st_mtime
return Container(name=container_name, extra=extra, driver=self)
def _make_object(self, container, object_name):
"""
Create an object instance
:param container: Container.
:type container: :class:`Container`
:param object_name: Object name.
:type object_name: ``str``
:return: Object instance.
:rtype: :class:`Object`
"""
full_path = os.path.join(self.base_path, container.name, object_name)
if os.path.isdir(full_path):
raise ObjectError(value=None, driver=self, object_name=object_name)
try:
stat = os.stat(full_path)
except Exception:
raise ObjectDoesNotExistError(value=None, driver=self,
object_name=object_name)
# Make a hash for the file based on the metadata. We can safely
# use only the mtime attribute here. If the file contents change,
# the underlying file-system will change mtime
data_hash = self._get_hash_function()
data_hash.update(u(stat.st_mtime).encode('ascii'))
data_hash = data_hash.hexdigest()
extra = {}
extra['creation_time'] = stat.st_ctime
extra['access_time'] = stat.st_atime
extra['modify_time'] = stat.st_mtime
return Object(name=object_name, size=stat.st_size, extra=extra,
driver=self, container=container, hash=data_hash,
meta_data=None)
def iterate_containers(self):
"""
Return a generator of containers.
:return: A generator of Container instances.
:rtype: ``generator`` of :class:`Container`
"""
for container_name in os.listdir(self.base_path):
full_path = os.path.join(self.base_path, container_name)
if not os.path.isdir(full_path):
continue
yield self._make_container(container_name)
def _get_objects(self, container):
"""
Recursively iterate through the file-system and return the object names
"""
cpath = self.get_container_cdn_url(container, check=True)
for folder, subfolders, files in os.walk(cpath, topdown=True):
# Remove unwanted subfolders
for subf in IGNORE_FOLDERS:
if subf in subfolders:
subfolders.remove(subf)
for name in files:
full_path = os.path.join(folder, name)
object_name = relpath(full_path, start=cpath)
yield self._make_object(container, object_name)
def iterate_container_objects(self, container):
"""
Returns a generator of objects for the given container.
:param container: Container instance
:type container: :class:`Container`
:return: A generator of Object instances.
:rtype: ``generator`` of :class:`Object`
"""
return self._get_objects(container)
def get_container(self, container_name):
"""
Return a container instance.
:param container_name: Container name.
:type container_name: ``str``
:return: :class:`Container` instance.
:rtype: :class:`Container`
"""
return self._make_container(container_name)
def get_container_cdn_url(self, container, check=False):
"""
Return a container CDN URL.
:param container: Container instance
:type container: :class:`Container`
:param check: Indicates if the path's existence must be checked
:type check: ``bool``
:return: A CDN URL for this container.
:rtype: ``str``
"""
path = os.path.join(self.base_path, container.name)
if check and not os.path.isdir(path):
raise ContainerDoesNotExistError(value=None, driver=self,
container_name=container.name)
return path
def get_object(self, container_name, object_name):
"""
Return an object instance.
:param container_name: Container name.
:type container_name: ``str``
:param object_name: Object name.
:type object_name: ``str``
:return: :class:`Object` instance.
:rtype: :class:`Object`
"""
container = self._make_container(container_name)
return self._make_object(container, object_name)
def get_object_cdn_url(self, obj):
"""
Return an object CDN URL.
:param obj: Object instance
:type obj: :class:`Object`
:return: A CDN URL for this object.
:rtype: ``str``
"""
return os.path.join(self.base_path, obj.container.name, obj.name)
def enable_container_cdn(self, container):
"""
Enable container CDN.
:param container: Container instance
:type container: :class:`Container`
:rtype: ``bool``
"""
path = self.get_container_cdn_url(container)
lockfile.MkdirFileLock(path, threaded=True)
with LockLocalStorage(path):
self._make_path(path)
return True
def enable_object_cdn(self, obj):
"""
Enable object CDN.
:param obj: Object instance
:type obj: :class:`Object`
:rtype: ``bool``
"""
path = self.get_object_cdn_url(obj)
with LockLocalStorage(path):
if os.path.exists(path):
return False
try:
obj_file = open(path, 'w')
obj_file.close()
except:
return False
return True
def download_object(self, obj, destination_path, overwrite_existing=False,
delete_on_failure=True):
"""
Download an object to the specified destination path.
:param obj: Object instance.
:type obj: :class:`Object`
:param destination_path: Full path to a file or a directory where the
incoming file will be saved.
:type destination_path: ``str``
:param overwrite_existing: True to overwrite an existing file,
defaults to False.
:type overwrite_existing: ``bool``
:param delete_on_failure: True to delete a partially downloaded file if
the download was not successful (hash mismatch / file size).
:type delete_on_failure: ``bool``
:return: True if an object has been successfully downloaded, False
otherwise.
:rtype: ``bool``
"""
obj_path = self.get_object_cdn_url(obj)
base_name = os.path.basename(destination_path)
if not base_name and not os.path.exists(destination_path):
raise LibcloudError(
value='Path %s does not exist' % (destination_path),
driver=self)
if not base_name:
file_path = os.path.join(destination_path, obj.name)
else:
file_path = destination_path
if os.path.exists(file_path) and not overwrite_existing:
raise LibcloudError(
value='File %s already exists, but ' % (file_path) +
'overwrite_existing=False',
driver=self)
try:
shutil.copy(obj_path, file_path)
except IOError:
if delete_on_failure:
try:
os.unlink(file_path)
except Exception:
pass
return False
return True
def download_object_as_stream(self, obj, chunk_size=None):
"""
Return a generator which yields object data.
:param obj: Object instance
:type obj: :class:`Object`
:param chunk_size: Optional chunk size (in bytes).
:type chunk_size: ``int``
:rtype: ``object``
"""
path = self.get_object_cdn_url(obj)
with open(path) as obj_file:
for data in read_in_chunks(obj_file, chunk_size=chunk_size):
yield data
def upload_object(self, file_path, container, object_name, extra=None,
verify_hash=True):
"""
Upload an object currently located on a disk.
:param file_path: Path to the object on disk.
:type file_path: ``str``
:param container: Destination container.
:type container: :class:`Container`
:param object_name: Object name.
:type object_name: ``str``
:param verify_hash: Verify hast
:type verify_hash: ``bool``
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
:rtype: ``object``
"""
path = self.get_container_cdn_url(container, check=True)
obj_path = os.path.join(path, object_name)
base_path = os.path.dirname(obj_path)
self._make_path(base_path)
with LockLocalStorage(obj_path):
shutil.copy(file_path, obj_path)
os.chmod(obj_path, int('664', 8))
return self._make_object(container, object_name)
def upload_object_via_stream(self, iterator, container,
object_name,
extra=None):
"""
Upload an object using an iterator.
If a provider supports it, chunked transfer encoding is used and you
don't need to know in advance the amount of data to be uploaded.
Otherwise if a provider doesn't support it, iterator will be exhausted
so a total size for data to be uploaded can be determined.
Note: Exhausting the iterator means that the whole data must be
buffered in memory which might result in memory exhausting when
uploading a very large object.
If a file is located on a disk you are advised to use upload_object
function which uses fs.stat function to determine | |
self.build(context, token)
return 11
if self.match_TagLine(context, token):
self.end_rule(context, 'Scenario')
self.end_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'Rule')
self.start_rule(context, 'RuleHeader')
self.start_rule(context, 'Tags')
self.build(context, token)
return 22
if self.match_ExamplesLine(context, token):
self.start_rule(context, 'ExamplesDefinition')
self.start_rule(context, 'Examples')
self.build(context, token)
return 18
if self.match_ScenarioLine(context, token):
self.end_rule(context, 'Scenario')
self.end_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'Scenario')
self.build(context, token)
return 12
if self.match_RuleLine(context, token):
self.end_rule(context, 'Scenario')
self.end_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'Rule')
self.start_rule(context, 'RuleHeader')
self.build(context, token)
return 23
if self.match_Empty(context, token):
self.build(context, token)
return 14
state_comment = "State: 14 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:1>DescriptionHelper:2>#Comment:0"
token.detach
expected_tokens = ["#EOF", "#Comment", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Empty"]
error = UnexpectedEOFException(token, expected_tokens, state_comment) if token.eof() else UnexpectedTokenException(token, expected_tokens, state_comment)
if (self.stop_at_first_error):
raise error
self.add_error(context, error)
return 14
# GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:2>Step:0>#StepLine:0
def match_token_at_15(self, token, context):
if self.match_EOF(context, token):
self.end_rule(context, 'Step')
self.end_rule(context, 'Scenario')
self.end_rule(context, 'ScenarioDefinition')
self.end_rule(context, 'Feature')
self.build(context, token)
return 42
if self.match_TableRow(context, token):
self.start_rule(context, 'DataTable')
self.build(context, token)
return 16
if self.match_DocStringSeparator(context, token):
self.start_rule(context, 'DocString')
self.build(context, token)
return 47
if self.match_StepLine(context, token):
self.end_rule(context, 'Step')
self.start_rule(context, 'Step')
self.build(context, token)
return 15
if self.match_TagLine(context, token):
if self.lookahead_1(context, token):
self.end_rule(context, 'Step')
self.start_rule(context, 'ExamplesDefinition')
self.start_rule(context, 'Tags')
self.build(context, token)
return 17
if self.match_TagLine(context, token):
if self.lookahead_0(context, token):
self.end_rule(context, 'Step')
self.end_rule(context, 'Scenario')
self.end_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'Tags')
self.build(context, token)
return 11
if self.match_TagLine(context, token):
self.end_rule(context, 'Step')
self.end_rule(context, 'Scenario')
self.end_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'Rule')
self.start_rule(context, 'RuleHeader')
self.start_rule(context, 'Tags')
self.build(context, token)
return 22
if self.match_ExamplesLine(context, token):
self.end_rule(context, 'Step')
self.start_rule(context, 'ExamplesDefinition')
self.start_rule(context, 'Examples')
self.build(context, token)
return 18
if self.match_ScenarioLine(context, token):
self.end_rule(context, 'Step')
self.end_rule(context, 'Scenario')
self.end_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'Scenario')
self.build(context, token)
return 12
if self.match_RuleLine(context, token):
self.end_rule(context, 'Step')
self.end_rule(context, 'Scenario')
self.end_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'Rule')
self.start_rule(context, 'RuleHeader')
self.build(context, token)
return 23
if self.match_Comment(context, token):
self.build(context, token)
return 15
if self.match_Empty(context, token):
self.build(context, token)
return 15
state_comment = "State: 15 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:2>Step:0>#StepLine:0"
token.detach
expected_tokens = ["#EOF", "#TableRow", "#DocStringSeparator", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"]
error = UnexpectedEOFException(token, expected_tokens, state_comment) if token.eof() else UnexpectedTokenException(token, expected_tokens, state_comment)
if (self.stop_at_first_error):
raise error
self.add_error(context, error)
return 15
# GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:0>DataTable:0>#TableRow:0
def match_token_at_16(self, token, context):
if self.match_EOF(context, token):
self.end_rule(context, 'DataTable')
self.end_rule(context, 'Step')
self.end_rule(context, 'Scenario')
self.end_rule(context, 'ScenarioDefinition')
self.end_rule(context, 'Feature')
self.build(context, token)
return 42
if self.match_TableRow(context, token):
self.build(context, token)
return 16
if self.match_StepLine(context, token):
self.end_rule(context, 'DataTable')
self.end_rule(context, 'Step')
self.start_rule(context, 'Step')
self.build(context, token)
return 15
if self.match_TagLine(context, token):
if self.lookahead_1(context, token):
self.end_rule(context, 'DataTable')
self.end_rule(context, 'Step')
self.start_rule(context, 'ExamplesDefinition')
self.start_rule(context, 'Tags')
self.build(context, token)
return 17
if self.match_TagLine(context, token):
if self.lookahead_0(context, token):
self.end_rule(context, 'DataTable')
self.end_rule(context, 'Step')
self.end_rule(context, 'Scenario')
self.end_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'Tags')
self.build(context, token)
return 11
if self.match_TagLine(context, token):
self.end_rule(context, 'DataTable')
self.end_rule(context, 'Step')
self.end_rule(context, 'Scenario')
self.end_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'Rule')
self.start_rule(context, 'RuleHeader')
self.start_rule(context, 'Tags')
self.build(context, token)
return 22
if self.match_ExamplesLine(context, token):
self.end_rule(context, 'DataTable')
self.end_rule(context, 'Step')
self.start_rule(context, 'ExamplesDefinition')
self.start_rule(context, 'Examples')
self.build(context, token)
return 18
if self.match_ScenarioLine(context, token):
self.end_rule(context, 'DataTable')
self.end_rule(context, 'Step')
self.end_rule(context, 'Scenario')
self.end_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'Scenario')
self.build(context, token)
return 12
if self.match_RuleLine(context, token):
self.end_rule(context, 'DataTable')
self.end_rule(context, 'Step')
self.end_rule(context, 'Scenario')
self.end_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'Rule')
self.start_rule(context, 'RuleHeader')
self.build(context, token)
return 23
if self.match_Comment(context, token):
self.build(context, token)
return 16
if self.match_Empty(context, token):
self.build(context, token)
return 16
state_comment = "State: 16 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:0>DataTable:0>#TableRow:0"
token.detach
expected_tokens = ["#EOF", "#TableRow", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"]
error = UnexpectedEOFException(token, expected_tokens, state_comment) if token.eof() else UnexpectedTokenException(token, expected_tokens, state_comment)
if (self.stop_at_first_error):
raise error
self.add_error(context, error)
return 16
# GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:0>Tags:0>#TagLine:0
def match_token_at_17(self, token, context):
if self.match_TagLine(context, token):
self.build(context, token)
return 17
if self.match_ExamplesLine(context, token):
self.end_rule(context, 'Tags')
self.start_rule(context, 'Examples')
self.build(context, token)
return 18
if self.match_Comment(context, token):
self.build(context, token)
return 17
if self.match_Empty(context, token):
self.build(context, token)
return 17
state_comment = "State: 17 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:0>Tags:0>#TagLine:0"
token.detach
expected_tokens = ["#TagLine", "#ExamplesLine", "#Comment", "#Empty"]
error = UnexpectedEOFException(token, expected_tokens, state_comment) if token.eof() else UnexpectedTokenException(token, expected_tokens, state_comment)
if (self.stop_at_first_error):
raise error
self.add_error(context, error)
return 17
# GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:0>#ExamplesLine:0
def match_token_at_18(self, token, context):
if self.match_EOF(context, token):
self.end_rule(context, 'Examples')
self.end_rule(context, 'ExamplesDefinition')
self.end_rule(context, 'Scenario')
self.end_rule(context, 'ScenarioDefinition')
self.end_rule(context, 'Feature')
self.build(context, token)
return 42
if self.match_Empty(context, token):
self.build(context, token)
return 18
if self.match_Comment(context, token):
self.build(context, token)
return 20
if self.match_TableRow(context, token):
self.start_rule(context, 'ExamplesTable')
self.build(context, token)
return 21
if self.match_TagLine(context, token):
if self.lookahead_1(context, token):
self.end_rule(context, 'Examples')
self.end_rule(context, 'ExamplesDefinition')
self.start_rule(context, 'ExamplesDefinition')
self.start_rule(context, 'Tags')
self.build(context, token)
return 17
if self.match_TagLine(context, token):
if self.lookahead_0(context, token):
self.end_rule(context, 'Examples')
self.end_rule(context, 'ExamplesDefinition')
self.end_rule(context, 'Scenario')
self.end_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'Tags')
self.build(context, token)
return 11
if self.match_TagLine(context, token):
self.end_rule(context, 'Examples')
self.end_rule(context, 'ExamplesDefinition')
self.end_rule(context, 'Scenario')
self.end_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'Rule')
self.start_rule(context, 'RuleHeader')
self.start_rule(context, 'Tags')
self.build(context, token)
return 22
if self.match_ExamplesLine(context, token):
self.end_rule(context, 'Examples')
self.end_rule(context, 'ExamplesDefinition')
self.start_rule(context, 'ExamplesDefinition')
self.start_rule(context, 'Examples')
self.build(context, token)
return 18
if self.match_ScenarioLine(context, token):
self.end_rule(context, 'Examples')
self.end_rule(context, 'ExamplesDefinition')
self.end_rule(context, 'Scenario')
self.end_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'Scenario')
self.build(context, token)
return 12
if self.match_RuleLine(context, token):
self.end_rule(context, 'Examples')
self.end_rule(context, 'ExamplesDefinition')
self.end_rule(context, 'Scenario')
self.end_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'Rule')
self.start_rule(context, 'RuleHeader')
self.build(context, token)
return 23
if self.match_Other(context, token):
self.start_rule(context, 'Description')
self.build(context, token)
return 19
state_comment = "State: 18 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:0>#ExamplesLine:0"
token.detach
expected_tokens = ["#EOF", "#Empty", "#Comment", "#TableRow", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Other"]
error = UnexpectedEOFException(token, expected_tokens, state_comment) if token.eof() else UnexpectedTokenException(token, expected_tokens, state_comment)
if (self.stop_at_first_error):
raise error
self.add_error(context, error)
return 18
# GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:1>DescriptionHelper:1>Description:0>#Other:0
def match_token_at_19(self, token, context):
if self.match_EOF(context, token):
self.end_rule(context, 'Description')
self.end_rule(context, 'Examples')
self.end_rule(context, 'ExamplesDefinition')
self.end_rule(context, 'Scenario')
self.end_rule(context, 'ScenarioDefinition')
self.end_rule(context, 'Feature')
self.build(context, token)
return 42
if self.match_Comment(context, token):
self.end_rule(context, 'Description')
self.build(context, token)
return 20
if self.match_TableRow(context, token):
self.end_rule(context, 'Description')
self.start_rule(context, 'ExamplesTable')
self.build(context, token)
return 21
if self.match_TagLine(context, token):
if self.lookahead_1(context, token):
self.end_rule(context, 'Description')
self.end_rule(context, 'Examples')
self.end_rule(context, 'ExamplesDefinition')
self.start_rule(context, 'ExamplesDefinition')
self.start_rule(context, 'Tags')
self.build(context, token)
return 17
if self.match_TagLine(context, token):
if self.lookahead_0(context, token):
self.end_rule(context, 'Description')
self.end_rule(context, 'Examples')
self.end_rule(context, 'ExamplesDefinition')
self.end_rule(context, 'Scenario')
self.end_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'Tags')
self.build(context, token)
return 11
if self.match_TagLine(context, token):
self.end_rule(context, 'Description')
self.end_rule(context, 'Examples')
self.end_rule(context, 'ExamplesDefinition')
self.end_rule(context, 'Scenario')
self.end_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'Rule')
self.start_rule(context, 'RuleHeader')
self.start_rule(context, 'Tags')
self.build(context, token)
return 22
if self.match_ExamplesLine(context, token):
self.end_rule(context, 'Description')
self.end_rule(context, 'Examples')
self.end_rule(context, 'ExamplesDefinition')
self.start_rule(context, 'ExamplesDefinition')
self.start_rule(context, 'Examples')
self.build(context, token)
return 18
if self.match_ScenarioLine(context, token):
self.end_rule(context, 'Description')
self.end_rule(context, 'Examples')
self.end_rule(context, 'ExamplesDefinition')
self.end_rule(context, 'Scenario')
self.end_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'Scenario')
self.build(context, token)
return 12
if self.match_RuleLine(context, token):
self.end_rule(context, 'Description')
self.end_rule(context, 'Examples')
self.end_rule(context, 'ExamplesDefinition')
self.end_rule(context, 'Scenario')
self.end_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'Rule')
self.start_rule(context, 'RuleHeader')
self.build(context, token)
return 23
if self.match_Other(context, token):
self.build(context, token)
return 19
state_comment = "State: 19 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:1>DescriptionHelper:1>Description:0>#Other:0"
token.detach
expected_tokens = ["#EOF", "#Comment", "#TableRow", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Other"]
error = UnexpectedEOFException(token, expected_tokens, state_comment) if token.eof() else UnexpectedTokenException(token, expected_tokens, state_comment)
if (self.stop_at_first_error):
raise error
self.add_error(context, error)
return 19
# GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:1>DescriptionHelper:2>#Comment:0
def match_token_at_20(self, token, context):
if self.match_EOF(context, token):
self.end_rule(context, 'Examples')
self.end_rule(context, 'ExamplesDefinition')
self.end_rule(context, 'Scenario')
self.end_rule(context, 'ScenarioDefinition')
self.end_rule(context, 'Feature')
self.build(context, token)
return 42
if self.match_Comment(context, token):
self.build(context, token)
return 20
if self.match_TableRow(context, token):
self.start_rule(context, 'ExamplesTable')
self.build(context, token)
return 21
if self.match_TagLine(context, token):
if self.lookahead_1(context, token):
self.end_rule(context, 'Examples')
self.end_rule(context, 'ExamplesDefinition')
self.start_rule(context, 'ExamplesDefinition')
self.start_rule(context, 'Tags')
self.build(context, token)
return 17
if self.match_TagLine(context, token):
if self.lookahead_0(context, token):
self.end_rule(context, 'Examples')
self.end_rule(context, 'ExamplesDefinition')
self.end_rule(context, 'Scenario')
self.end_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'Tags')
self.build(context, token)
return 11
if self.match_TagLine(context, token):
self.end_rule(context, 'Examples')
self.end_rule(context, 'ExamplesDefinition')
self.end_rule(context, 'Scenario')
self.end_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'Rule')
self.start_rule(context, 'RuleHeader')
self.start_rule(context, 'Tags')
self.build(context, token)
return 22
if self.match_ExamplesLine(context, token):
self.end_rule(context, 'Examples')
self.end_rule(context, 'ExamplesDefinition')
self.start_rule(context, 'ExamplesDefinition')
self.start_rule(context, 'Examples')
self.build(context, token)
return 18
if self.match_ScenarioLine(context, token):
self.end_rule(context, 'Examples')
self.end_rule(context, 'ExamplesDefinition')
self.end_rule(context, 'Scenario')
self.end_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'Scenario')
self.build(context, token)
return 12
if self.match_RuleLine(context, token):
self.end_rule(context, 'Examples')
self.end_rule(context, 'ExamplesDefinition')
self.end_rule(context, 'Scenario')
self.end_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'Rule')
self.start_rule(context, 'RuleHeader')
self.build(context, token)
return 23
if self.match_Empty(context, token):
self.build(context, token)
return 20
state_comment = "State: 20 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:1>DescriptionHelper:2>#Comment:0"
token.detach
expected_tokens = ["#EOF", "#Comment", "#TableRow", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Empty"]
error = UnexpectedEOFException(token, expected_tokens, state_comment) if token.eof() else UnexpectedTokenException(token, expected_tokens, state_comment)
if (self.stop_at_first_error):
raise error
self.add_error(context, error)
return 20
# GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:2>ExamplesTable:0>#TableRow:0
def match_token_at_21(self, token, context):
if self.match_EOF(context, token):
self.end_rule(context, 'ExamplesTable')
self.end_rule(context, 'Examples')
self.end_rule(context, 'ExamplesDefinition')
self.end_rule(context, 'Scenario')
self.end_rule(context, 'ScenarioDefinition')
self.end_rule(context, 'Feature')
self.build(context, token)
return 42
if self.match_TableRow(context, token):
self.build(context, token)
return 21
if self.match_TagLine(context, token):
if self.lookahead_1(context, token):
self.end_rule(context, 'ExamplesTable')
self.end_rule(context, 'Examples')
self.end_rule(context, 'ExamplesDefinition')
self.start_rule(context, 'ExamplesDefinition')
self.start_rule(context, 'Tags')
self.build(context, token)
return 17
if self.match_TagLine(context, token):
if self.lookahead_0(context, token):
self.end_rule(context, 'ExamplesTable')
self.end_rule(context, 'Examples')
self.end_rule(context, 'ExamplesDefinition')
self.end_rule(context, 'Scenario')
self.end_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'ScenarioDefinition')
self.start_rule(context, 'Tags')
self.build(context, token)
return 11
if | |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from collections import defaultdict
from datetime import datetime
from dateutil import relativedelta
from itertools import groupby
from operator import itemgetter
from re import findall as regex_findall, split as regex_split
from odoo import api, fields, models, _, SUPERUSER_ID
from odoo.exceptions import UserError
from odoo.osv import expression
from odoo.tools.float_utils import float_compare, float_round, float_is_zero
PROCUREMENT_PRIORITIES = [('0', 'Not urgent'), ('1', 'Normal'), ('2', 'Urgent'), ('3', 'Very Urgent')]
class StockMove(models.Model):
_name = "stock.move"
_description = "Stock Move"
_order = 'sequence, id'
def _default_group_id(self):
if self.env.context.get('default_picking_id'):
return self.env['stock.picking'].browse(self.env.context['default_picking_id']).group_id.id
return False
name = fields.Char('Description', index=True, required=True)
sequence = fields.Integer('Sequence', default=10)
priority = fields.Selection(PROCUREMENT_PRIORITIES, 'Priority', default='1')
create_date = fields.Datetime('Creation Date', index=True, readonly=True)
date = fields.Datetime(
'Date', default=fields.Datetime.now, index=True, required=True,
states={'done': [('readonly', True)]},
help="Move date: scheduled date until move is done, then date of actual move processing")
company_id = fields.Many2one(
'res.company', 'Company',
default=lambda self: self.env.company,
index=True, required=True)
date_expected = fields.Datetime(
'Expected Date', default=fields.Datetime.now, index=True, required=True,
states={'done': [('readonly', True)]},
help="Scheduled date for the processing of this move")
product_id = fields.Many2one(
'product.product', 'Product',
check_company=True,
domain="[('type', 'in', ['product', 'consu']), '|', ('company_id', '=', False), ('company_id', '=', company_id)]", index=True, required=True,
states={'done': [('readonly', True)]})
description_picking = fields.Text('Description of Picking')
product_qty = fields.Float(
'Real Quantity', compute='_compute_product_qty', inverse='_set_product_qty',
digits=0, store=True, compute_sudo=True,
help='Quantity in the default UoM of the product')
product_uom_qty = fields.Float(
'Initial Demand',
digits='Product Unit of Measure',
default=0.0, required=True, states={'done': [('readonly', True)]},
help="This is the quantity of products from an inventory "
"point of view. For moves in the state 'done', this is the "
"quantity of products that were actually moved. For other "
"moves, this is the quantity of product that is planned to "
"be moved. Lowering this quantity does not generate a "
"backorder. Changing this quantity on assigned moves affects "
"the product reservation, and should be done with care.")
product_uom = fields.Many2one('uom.uom', 'Unit of Measure', required=True, domain="[('category_id', '=', product_uom_category_id)]")
product_uom_category_id = fields.Many2one(related='product_id.uom_id.category_id')
# TDE FIXME: make it stored, otherwise group will not work
product_tmpl_id = fields.Many2one(
'product.template', 'Product Template',
related='product_id.product_tmpl_id', readonly=False,
help="Technical: used in views")
location_id = fields.Many2one(
'stock.location', 'Source Location',
auto_join=True, index=True, required=True,
check_company=True,
help="Sets a location if you produce at a fixed location. This can be a partner location if you subcontract the manufacturing operations.")
location_dest_id = fields.Many2one(
'stock.location', 'Destination Location',
auto_join=True, index=True, required=True,
check_company=True,
help="Location where the system will stock the finished products.")
partner_id = fields.Many2one(
'res.partner', 'Destination Address ',
states={'done': [('readonly', True)]},
help="Optional address where goods are to be delivered, specifically used for allotment")
move_dest_ids = fields.Many2many(
'stock.move', 'stock_move_move_rel', 'move_orig_id', 'move_dest_id', 'Destination Moves',
copy=False,
help="Optional: next stock move when chaining them")
move_orig_ids = fields.Many2many(
'stock.move', 'stock_move_move_rel', 'move_dest_id', 'move_orig_id', 'Original Move',
copy=False,
help="Optional: previous stock move when chaining them")
picking_id = fields.Many2one('stock.picking', 'Transfer Reference', index=True, states={'done': [('readonly', True)]}, check_company=True)
picking_partner_id = fields.Many2one('res.partner', 'Transfer Destination Address', related='picking_id.partner_id', readonly=False)
note = fields.Text('Notes')
state = fields.Selection([
('draft', 'New'), ('cancel', 'Cancelled'),
('waiting', 'Waiting Another Move'),
('confirmed', 'Waiting Availability'),
('partially_available', 'Partially Available'),
('assigned', 'Available'),
('done', 'Done')], string='Status',
copy=False, default='draft', index=True, readonly=True,
help="* New: When the stock move is created and not yet confirmed.\n"
"* Waiting Another Move: This state can be seen when a move is waiting for another one, for example in a chained flow.\n"
"* Waiting Availability: This state is reached when the procurement resolution is not straight forward. It may need the scheduler to run, a component to be manufactured...\n"
"* Available: When products are reserved, it is set to \'Available\'.\n"
"* Done: When the shipment is processed, the state is \'Done\'.")
price_unit = fields.Float(
'Unit Price', help="Technical field used to record the product cost set by the user during a picking confirmation (when costing "
"method used is 'average price' or 'real'). Value given in company currency and in product uom.", copy=False) # as it's a technical field, we intentionally don't provide the digits attribute
backorder_id = fields.Many2one('stock.picking', 'Back Order of', related='picking_id.backorder_id', index=True, readonly=False)
origin = fields.Char("Source Document")
procure_method = fields.Selection([
('make_to_stock', 'Default: Take From Stock'),
('make_to_order', 'Advanced: Apply Procurement Rules')], string='Supply Method',
default='make_to_stock', required=True,
help="By default, the system will take from the stock in the source location and passively wait for availability. "
"The other possibility allows you to directly create a procurement on the source location (and thus ignore "
"its current stock) to gather products. If we want to chain moves and have this one to wait for the previous, "
"this second option should be chosen.")
scrapped = fields.Boolean('Scrapped', related='location_dest_id.scrap_location', readonly=True, store=True)
scrap_ids = fields.One2many('stock.scrap', 'move_id')
group_id = fields.Many2one('procurement.group', 'Procurement Group', default=_default_group_id)
rule_id = fields.Many2one(
'stock.rule', 'Stock Rule', ondelete='restrict', help='The stock rule that created this stock move',
check_company=True)
propagate_cancel = fields.Boolean(
'Propagate cancel and split', default=True,
help='If checked, when this move is cancelled, cancel the linked move too')
propagate_date = fields.Boolean(string="Propagate Rescheduling",
help='The rescheduling is propagated to the next move.')
propagate_date_minimum_delta = fields.Integer(string='Reschedule if Higher Than',
help='The change must be higher than this value to be propagated')
delay_alert = fields.Boolean('Alert if Delay')
picking_type_id = fields.Many2one('stock.picking.type', 'Operation Type', check_company=True)
inventory_id = fields.Many2one('stock.inventory', 'Inventory', check_company=True)
move_line_ids = fields.One2many('stock.move.line', 'move_id')
move_line_nosuggest_ids = fields.One2many('stock.move.line', 'move_id', domain=[('product_qty', '=', 0.0)])
origin_returned_move_id = fields.Many2one(
'stock.move', 'Origin return move', copy=False, index=True,
help='Move that created the return move', check_company=True)
returned_move_ids = fields.One2many('stock.move', 'origin_returned_move_id', 'All returned moves', help='Optional: all returned moves created from this move')
reserved_availability = fields.Float(
'Quantity Reserved', compute='_compute_reserved_availability',
digits='Product Unit of Measure',
readonly=True, help='Quantity that has already been reserved for this move')
availability = fields.Float(
'Forecasted Quantity', compute='_compute_product_availability',
readonly=True, help='Quantity in stock that can still be reserved for this move')
string_availability_info = fields.Text(
'Availability', compute='_compute_string_qty_information',
readonly=True, help='Show various information on stock availability for this move')
restrict_partner_id = fields.Many2one(
'res.partner', 'Owner ', help="Technical field used to depict a restriction on the ownership of quants to consider when marking this move as 'done'",
check_company=True)
route_ids = fields.Many2many(
'stock.location.route', 'stock_location_route_move', 'move_id', 'route_id', 'Destination route', help="Preferred route",
check_company=True)
warehouse_id = fields.Many2one('stock.warehouse', 'Warehouse', help="Technical field depicting the warehouse to consider for the route selection on the next procurement (if any).")
has_tracking = fields.Selection(related='product_id.tracking', string='Product with Tracking')
quantity_done = fields.Float('Quantity Done', compute='_quantity_done_compute', digits='Product Unit of Measure', inverse='_quantity_done_set')
show_operations = fields.Boolean(related='picking_id.picking_type_id.show_operations', readonly=False)
show_details_visible = fields.Boolean('Details Visible', compute='_compute_show_details_visible')
show_reserved_availability = fields.Boolean('From Supplier', compute='_compute_show_reserved_availability')
picking_code = fields.Selection(related='picking_id.picking_type_id.code', readonly=True)
product_type = fields.Selection(related='product_id.type', readonly=True)
additional = fields.Boolean("Whether the move was added after the picking's confirmation", default=False)
is_locked = fields.Boolean(compute='_compute_is_locked', readonly=True)
is_initial_demand_editable = fields.Boolean('Is initial demand editable', compute='_compute_is_initial_demand_editable')
is_quantity_done_editable = fields.Boolean('Is quantity done editable', compute='_compute_is_quantity_done_editable')
reference = fields.Char(compute='_compute_reference', string="Reference", store=True)
has_move_lines = fields.Boolean(compute='_compute_has_move_lines')
package_level_id = fields.Many2one('stock.package_level', 'Package Level', check_company=True, copy=False)
picking_type_entire_packs = fields.Boolean(related='picking_type_id.show_entire_packs', readonly=True)
display_assign_serial = fields.Boolean(compute='_compute_display_assign_serial')
next_serial = fields.Char('First SN')
next_serial_count = fields.Integer('Number of SN')
@api.onchange('product_id', 'picking_type_id')
def onchange_product(self):
if self.product_id:
self.description_picking = self.product_id._get_description(self.picking_type_id)
@api.depends('has_tracking', 'picking_type_id.use_create_lots', 'picking_type_id.use_existing_lots', 'state')
def _compute_display_assign_serial(self):
for move in self:
move.display_assign_serial = (
move.has_tracking == 'serial' and
move.state in ('partially_available', 'assigned', 'confirmed') and
move.picking_type_id.use_create_lots and
not move.picking_type_id.use_existing_lots
)
@api.depends('picking_id.is_locked')
def _compute_is_locked(self):
for move in self:
if move.picking_id:
move.is_locked = move.picking_id.is_locked
else:
move.is_locked = False
@api.depends('product_id', 'has_tracking')
def _compute_show_details_visible(self):
""" According to this field, the button that calls `action_show_details` will be displayed
to work on a move from its picking form view, or not.
"""
has_package = self.user_has_groups('stock.group_tracking_lot')
multi_locations_enabled = self.user_has_groups('stock.group_stock_multi_locations')
consignment_enabled = self.user_has_groups('stock.group_tracking_owner')
show_details_visible = multi_locations_enabled or has_package
for move in self:
if not move.product_id:
move.show_details_visible = False
else:
move.show_details_visible = (((consignment_enabled and move.picking_id.picking_type_id.code != 'incoming') or
show_details_visible or move.has_tracking != 'none') and
(move.state != 'draft' or (move.picking_id.immediate_transfer and move.state == 'draft')) and
move.picking_id.picking_type_id.show_operations is False)
def _compute_show_reserved_availability(self):
""" This field is only of use in an attrs in the picking view, in order to hide the
"available" column if the move is coming from a supplier.
"""
for move in self:
move.show_reserved_availability = not move.location_id.usage == 'supplier'
@api.depends('state', 'picking_id')
def _compute_is_initial_demand_editable(self):
for move in self:
if not move.picking_id.immediate_transfer and move.state == 'draft':
move.is_initial_demand_editable = True
elif not move.picking_id.is_locked and move.state != 'done' and move.picking_id:
move.is_initial_demand_editable = True
else:
move.is_initial_demand_editable = False
@api.depends('state', |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.