input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<reponame>Akatsuki96/falkon<filename>falkon/kernels/distance_kernel.py
import collections
import functools
import math
from abc import ABC, abstractmethod
from typing import Optional, Union, Tuple, Dict
import torch
from falkon.kernels import Kernel, KeopsKernelMixin
from falkon.options import BaseOptions, FalkonOptions
from falkon.sparse import sparse_ops
from falkon.sparse.sparse_tensor import SparseTensor
__all__ = (
'GaussianKernel',
'LaplacianKernel',
'MaternKernel',
)
DistKerContainer = collections.namedtuple('DistKerContainer', ['sq1', 'sq2'])
class L2DistanceKernel(Kernel, ABC):
r"""Base class for L2-based kernels
Such kernels are characterized by the squared norm of the difference between each input
sample. This involves computing the squared norm in :meth:`Kernel._prepare`, and a simple matrix
multiplication in :meth:`Kernel._apply`.
In :meth:`Kernel._finalize` the squared norm and matrix multiplication are added together to form
the kernel matrix.
Subclasses should implement the :meth:`Kernel._transform` method which applies additional elementwise
transformations to the kernel matrix. :meth:`Kernel._transform` is called after :meth:`Kernel._finalize`.
This class supports sparse data.
Parameters
----------
name : str
Descriptive name of the specialized kernel
opt : CompOpt or dict or None
Options which will be passed to the kernel operations
Notes
------
To efficiently compute kernels of the form k(x, x') = ||x - x'||^2 between two matrices of
data-points we decompose the squared norm of the difference into 3 terms:
||X||^2 and -2*XX'^T and ||X'||^2
The first and third term are calculated in the `_prepare` method while the second is
calculated in the `_apply` method. Finally the three terms are combined in the `_finalize`
method.
"""
kernel_type = "l2distance"
def __init__(self, name, opt: Optional[FalkonOptions] = None):
super().__init__(name, self.kernel_type, opt)
def _prepare(self, X1: torch.Tensor, X2: torch.Tensor) -> DistKerContainer:
return DistKerContainer(
sq1=torch.norm(X1, p=2, dim=1, keepdim=True).pow_(2),
sq2=torch.norm(X2, p=2, dim=1, keepdim=True).pow_(2)
)
def _prepare_sparse(self, X1: SparseTensor, X2: SparseTensor) -> DistKerContainer:
sq1 = torch.empty(X1.size(0), dtype=X1.dtype, device=X1.device)
sparse_ops.sparse_square_norm(X1, sq1)
sq2 = torch.empty(X2.size(0), dtype=X1.dtype, device=X1.device)
sparse_ops.sparse_square_norm(X2, sq2)
return DistKerContainer(
sq1=sq1.reshape(-1, 1), sq2=sq2.reshape(-1, 1)
)
def _apply(self, X1: torch.Tensor, X2: torch.Tensor, out: torch.Tensor) -> None:
out.addmm_(X1, X2)
def _apply_sparse(self, X1: SparseTensor, X2: SparseTensor, out: torch.Tensor) -> torch.Tensor:
return sparse_ops.sparse_matmul(X1, X2, out)
def _finalize(self, A: torch.Tensor, d: DistKerContainer) -> torch.Tensor:
A.mul_(-2.0)
A.add_(d.sq1.to(A))
A.add_(d.sq2.to(A).t())
A.clamp_min_(0)
return self._transform(A)
@abstractmethod
def _transform(self, A: torch.Tensor):
pass
class GaussianKernel(L2DistanceKernel, KeopsKernelMixin):
r"""Class for computing the Gaussian kernel and related kernel-vector products
The Gaussian kernel is one of the most common and effective kernel embeddings
since it is infinite dimensional, and governed by a single parameter. The kernel length-scale
determines the width of the Gaussian distribution which is placed on top of each point.
A larger sigma corresponds to a wide Gaussian, so that the relative influence of far away
points will be high for computing the kernel at a given datum.
On the opposite side of the spectrum, a small sigma means that only nearby points will
influence the kernel.
Parameters
-----------
sigma
The length-scale of the kernel.
This can be a scalar, and then it corresponds to the standard deviation
of the Gaussian distribution from which the kernel is derived.
If `sigma` is a vector of size `d` (where `d` is the dimensionality of the data), it is
interpreted as the diagonal standard deviation of the Gaussian distribution.
It can also be a matrix of size `d*d` where `d`, in which case sigma will be the precision
matrix (inverse covariance).
opt
Additional options to be forwarded to the matrix-vector multiplication
routines.
Examples
--------
Creating a Gaussian kernel with a single length-scale. Operations on this kernel will not
use KeOps.
>>> K = GaussianKernel(sigma=3.0, opt=FalkonOptions(keops_active="no"))
Creating a Gaussian kernel with a different length-scale per dimension
>>> K = GaussianKernel(sigma=torch.tensor([1.0, 3.5, 7.0]))
Creating a Gaussian kernel object with full covariance matrix (randomly chosen)
>>> mat = torch.randn(3, 3, dtype=torch.float64)
>>> sym_mat = mat @ mat.T
>>> K = GaussianKernel(sigma=sym_mat)
>>> K
GaussianKernel(sigma=tensor([[ 2.0909, 0.0253, -0.2490],
[ 0.0253, 0.3399, -0.5158],
[-0.2490, -0.5158, 4.4922]], dtype=torch.float64)) #random
Notes
-----
The Gaussian kernel with a single length-scale follows
.. math::
k(x, x') = \exp{-\dfrac{\lVert x - x' \rVert^2}{2\sigma^2}}
When the length-scales are specified as a matrix, the RBF kernel is determined by
.. math::
k(x, x') = \exp{-\dfrac{1}{2}x\Sigmax'}
In both cases, the actual computation follows a different path, working on the expanded
norm.
"""
kernel_name = "gaussian"
def __init__(self, sigma: Union[float, torch.Tensor], opt: Optional[FalkonOptions] = None):
super().__init__(self.kernel_name, opt)
self.sigma, self.gaussian_type = self._get_sigma_kt(sigma)
if self.gaussian_type in {'single', 'diag'}:
self.gamma = -0.5 / (self.sigma ** 2)
else: # self.gaussian_type == 'full'
self.gamma = torch.cholesky(self.sigma, upper=False)
if self.gaussian_type != 'single':
# Cannot use the distk variants
self.kernel_type = "l2-multi-distance"
@staticmethod
def _get_sigma_kt(sigma: Union[float, torch.Tensor]) -> Tuple[torch.Tensor, str]:
if isinstance(sigma, torch.Tensor):
# Sigma is a 1-item tensor ('single')
try:
sigma.item()
return sigma, "single"
except ValueError:
pass
# Sigma is a vector ('diag')
if sigma.dim() == 1 or sigma.shape[1] == 1:
return sigma.reshape(-1), "diag"
# Check correctness for 'full' sigma
if sigma.dim() != 2:
raise TypeError("Sigma can be specified as a 1D or a 2D tensor. "
"Found %dD tensor" % (sigma.dim()))
if sigma.shape[0] != sigma.shape[1]:
raise TypeError("Sigma passed as a 2D matrix must be square. "
"Found dimensions %s" % (sigma.size()))
return sigma, "full"
else:
try:
return torch.tensor([float(sigma)], dtype=torch.float64), "single"
except TypeError:
raise TypeError("Sigma must be a scalar or a tensor.")
def _keops_mmv_impl(self, X1, X2, v, kernel, out, opt: FalkonOptions):
if self.gaussian_type in {'single', 'diag'}:
formula = 'Exp(IntInv(-2) * SqDist(x1 / s, x2 / s)) * v'
aliases = [
'x1 = Vi(%d)' % (X1.shape[1]),
'x2 = Vj(%d)' % (X2.shape[1]),
'v = Vj(%d)' % (v.shape[1]),
's = Pm(%d)' % (self.sigma.shape[0])
]
other_vars = [self.sigma.to(device=X1.device, dtype=X1.dtype)]
elif self.gaussian_type == 'full':
# Since the covariance is a full matrix we use a different formulation
# Here sigma is the precision matrix (inverse covariance), and gamma is its Cholesky decomposition.
dim = self.gamma.shape[0]
formula = (
'Exp(IntInv(-2) * SqDist('
f'TensorDot(x1, g, Ind({dim}), Ind({dim}, {dim}), Ind(0), Ind(0)), '
f'TensorDot(x2, g, Ind({dim}), Ind({dim}, {dim}), Ind(0), Ind(0)))) * v'
)
aliases = [
'x1 = Vi(%d)' % (X1.shape[1]),
'x2 = Vj(%d)' % (X2.shape[1]),
'v = Vj(%d)' % (v.shape[1]),
'g = Pm(%d)' % (dim ** 2)
]
other_vars = [self.gamma.reshape(-1).to(device=X1.device, dtype=X1.dtype)]
else:
raise ValueError(f"Gaussian type '{self.gaussian_type}' invalid.")
return self.keops_mmv(X1, X2, v, out, formula, aliases, other_vars, opt)
def _decide_mmv_impl(self, X1, X2, v, opt: FalkonOptions):
if self.keops_can_handle_mmv(X1, X2, v, opt):
return self._keops_mmv_impl
else:
return super()._decide_mmv_impl(X1, X2, v, opt)
def _decide_dmmv_impl(self, X1, X2, v, w, opt: FalkonOptions):
if self.keops_can_handle_dmmv(X1, X2, v, w, opt):
return functools.partial(self.keops_dmmv_helper, mmv_fn=self._keops_mmv_impl)
else:
return super()._decide_dmmv_impl(X1, X2, v, w, opt)
def _prepare(self, X1, X2):
if self.gaussian_type == "single":
return super()._prepare(X1, X2)
elif self.gaussian_type == "diag":
sigma = self.sigma.to(X1)
return DistKerContainer(
sq1=torch.norm(X1 / sigma, p=2, dim=1, keepdim=True).pow_(2),
sq2=torch.norm(X2 / sigma, p=2, dim=1, keepdim=True).pow_(2)
)
else:
chol = self.gamma.to(X1)
return DistKerContainer(
sq1=torch.norm(X1 @ chol, p=2, dim=1, keepdim=True).pow_(2),
sq2=torch.norm(X2 @ chol, p=2, dim=1, keepdim=True).pow_(2)
)
def _prepare_sparse(self, X1: SparseTensor, X2: SparseTensor):
if self.gaussian_type == "single":
return super()._prepare_sparse(X1, X2)
else:
raise NotImplementedError(
"Sparse Gaussian kernel only implemented with scalar lengthscale.")
def _apply(self, X1, X2, out):
if self.gaussian_type == "single":
return super()._apply(X1, X2, out)
elif self.gaussian_type == "diag":
sigma = self.sigma.to(X1)
out.addmm_(X1 / (sigma ** 2), X2)
else:
sigma = self.sigma.to(X1)
out.addmm_(X1 @ sigma, X2)
def extra_mem(self) -> Dict[str, float]:
if self.gaussian_type == 'single':
# Only norms in prepare
return {'m': 1, 'n': 1}
elif self.gaussian_type in {'diag', 'full'}:
return {
# Data-matrix / sigma in prepare + Data-matrix / sigma in apply
'nd': 2,
'md': 1,
# Norm results in prepare
'm': 1,
'n': 1,
}
else:
raise ValueError(f"Gaussian type '{self.gaussian_type}' invalid.")
def _apply_sparse(self, X1: SparseTensor, X2: SparseTensor, out: torch.Tensor):
if self.gaussian_type == "single":
return super()._apply_sparse(X1, X2, out)
else:
raise NotImplementedError(
"Sparse Gaussian kernel only implemented with scalar sigma.")
def _transform(self, A) -> torch.Tensor:
if self.gaussian_type == "single":
gamma = self.gamma.to(A)
A.mul_(gamma)
A.exp_()
return A
else:
A.mul_(-0.5)
A.exp_()
return A
def __repr__(self):
return f"GaussianKernel(sigma={self.sigma})"
def __str__(self):
return f"Gaussian kernel<{self.sigma}>"
class LaplacianKernel(GaussianKernel):
r"""Class for computing the Laplacian kernel, and related kernel-vector products.
The Laplacian kernel is similar to the Gaussian kernel, but less sensitive to changes
in the parameter `sigma`.
Parameters
----------
sigma
The length-scale of the Laplacian kernel
Notes
-----
The Laplacian kernel | |
'cancel', 'cancel_run': True,
'msg': self.persister.secondary_database_fail}
else:
return True
else:
return True
# ===============================================================================
# setup
# ===============================================================================
def setup_persister(self):
sens = self._get_extraction_parameter('sensitivity_multiplier', default=1)
# setup persister. mirror a few of AutomatedRunsAttributes
script_name, script_blob = self._assemble_script_blob()
eqn, eqb = '', ''
queue = self.experiment_queue
eqn = queue.name
auto_save_detector_ic = queue.auto_save_detector_ic
self.debug('$$$$$$$$$$$$$$$ auto_save_detector_ic={}'.format(auto_save_detector_ic))
ext_name, ext_blob = '', ''
if self.extraction_script:
ext_name = self.extraction_script.name
ext_blob = self._assemble_extraction_blob()
ms_name, ms_blob, sfods, bsfods = '', '', {}, {}
if self.measurement_script:
ms_name = self.measurement_script.name
ms_blob = self.measurement_script.toblob()
sfods, bsfods = self._get_default_fods()
pe_name, pe_blob = '', ''
if self.post_equilibration_script:
pe_name = self.post_equilibration_script.name
pe_blob = self.post_equilibration_script.toblob()
pm_name, pm_blob = '', ''
if self.post_measurement_script:
pm_name = self.post_measurement_script.name
pm_blob = self.post_measurement_script.toblob()
ext_pos = []
if self.extraction_script:
ext_pos = self.extraction_script.get_extraction_positions()
self._update_persister_spec(save_as_peak_hop=False,
run_spec=self.spec,
isotope_group=self.isotope_group,
positions=self.spec.get_position_list(),
auto_save_detector_ic=auto_save_detector_ic,
extraction_positions=ext_pos,
sensitivity_multiplier=sens,
experiment_type=self.experiment_type,
experiment_queue_name=eqn,
experiment_queue_blob=eqb,
extraction_name=ext_name,
extraction_blob=ext_blob,
measurement_name=ms_name,
measurement_blob=ms_blob,
post_measurement_name=pm_name,
post_measurement_blob=pm_blob,
post_equilibration_name=pe_name,
post_equilibration_blob=pe_blob,
runscript_name=script_name,
runscript_blob=script_blob,
signal_fods=sfods,
baseline_fods=bsfods,
intensity_scalar=self.intensity_scalar,
laboratory=self.laboratory,
instrument_name=self.instrument_name)
# ===============================================================================
# doers
# ===============================================================================
def start_extraction(self):
return self._start_script('extraction')
def start_measurement(self):
return self._start_script('measurement')
def do_extraction(self):
self.debug('do extraction')
self._persister_action('pre_extraction_save')
self.info_color = EXTRACTION_COLOR
script = self.extraction_script
msg = 'Extraction Started {}'.format(script.name)
self.heading('{}'.format(msg))
self.spec.state = 'extraction'
self.experiment_queue.refresh_table_needed = True
self.debug('DO EXTRACTION {}'.format(self.runner))
script.set_run_identifier(self.runid)
queue = self.experiment_queue
script.set_load_identifier(queue.load_name)
syn_extractor = None
if script.syntax_ok(warn=False):
if self.use_syn_extraction and self.spec.syn_extraction:
p = os.path.join(paths.scripts_dir, 'syn_extraction', self.spec.syn_extraction)
p = add_extension(p, '.yaml')
if os.path.isfile(p):
from pychron.experiment.automated_run.syn_extraction import SynExtractionCollector
dur = script.calculate_estimated_duration(force=True)
syn_extractor = SynExtractionCollector(arun=weakref.ref(self)(),
path=p,
extraction_duration=dur)
syn_extractor.start()
else:
self.warning(
'Cannot start syn extraction collection. Configuration file does not exist. {}'.format(p))
else:
self.warning('Invalid script syntax for "{}"'.format(script.name))
return
try:
ex_result = script.execute()
except ExtractionException as e:
ex_result = False
self.debug('extraction exception={}'.format(e))
if ex_result:
if syn_extractor:
syn_extractor.stop()
# report the extraction results
ach, req = script.output_achieved()
self.info('Requested Output= {:0.3f}'.format(req))
self.info('Achieved Output= {:0.3f}'.format(ach))
rblob = script.get_response_blob()
oblob = script.get_output_blob()
sblob = script.get_setpoint_blob()
snapshots = script.snapshots
videos = script.videos
grain_polygons = script.get_grain_polygons() or []
self.debug('grain polygons n={}'.format(len(grain_polygons)))
pid = script.get_active_pid_parameters()
self._update_persister_spec(pid=pid or '',
grain_polygons=grain_polygons,
power_achieved=ach,
response_blob=rblob,
output_blob=oblob,
setpoint_blob=sblob,
snapshots=snapshots,
videos=videos)
self._persister_save_action('post_extraction_save')
self.heading('Extraction Finished')
self.info_color = None
# if overlapping need to wait for previous runs min mass spec pump time
self._wait_for_min_ms_pumptime()
return True
else:
if syn_extractor:
syn_extractor.stop()
self.do_post_equilibration()
self.do_post_measurement()
self.finish()
self.heading('Extraction Finished unsuccessfully', color='red')
self.info_color = None
return False
def do_measurement(self, script=None, use_post_on_fail=True):
self.debug('do measurement')
self.debug('L#={} analysis type={}'.format(self.spec.labnumber,
self.spec.analysis_type))
if not self._alive:
self.warning('run is not alive')
return
if script is None:
script = self.measurement_script
if script is None:
self.warning('no measurement script')
return
# use a measurement_script to explicitly define
# measurement sequence
self.info_color = MEASUREMENT_COLOR
msg = 'Measurement Started {}'.format(script.name)
self.heading('{}'.format(msg))
self.spec.state = 'measurement'
self.experiment_queue.refresh_table_needed = True
# get current spectrometer values
sm = self.spectrometer_manager
if sm:
self.debug('setting trap, emission, spec, defl, and gains')
self._update_persister_spec(spec_dict=sm.make_configuration_dict(),
defl_dict=sm.make_deflections_dict(),
gains=sm.make_gains_dict(),
trap=sm.read_trap_current(),
emission=sm.read_emission())
self._persister_action('pre_measurement_save')
self.measuring = True
self._persister_action('trait_set', save_enabled=True)
if script.execute():
# mem_log('post measurement execute')
self.heading('Measurement Finished')
self.measuring = False
self.info_color = None
self._measured = True
return True
else:
if use_post_on_fail:
self.do_post_equilibration()
self.do_post_measurement()
self.finish()
self.heading('Measurement Finished unsuccessfully. Aborted={}'.format(self._aborted), color='red')
self.measuring = False
self.info_color = None
return self._aborted
def do_post_measurement(self, script=None):
if script is None:
script = self.post_measurement_script
if not script:
return True
if not self._alive:
return
msg = 'Post Measurement Started {}'.format(script.name)
self.heading('{}'.format(msg))
if script.execute():
self.debug('setting _ms_pumptime')
self.executor_event = {'kind': 'ms_pumptime_start', 'time': time.time()}
self.heading('Post Measurement Finished')
return True
else:
self.heading('Post Measurement Finished unsuccessfully')
return False
def do_post_equilibration(self, block=False):
if block:
self._post_equilibration()
else:
t = Thread(target=self._post_equilibration,
name='post_equil')
t.setDaemon(True)
t.start()
def do_post_termination(self, do_post_equilibration=True):
self.heading('Post Termination Started')
if do_post_equilibration:
self.do_post_equilibration()
self.do_post_measurement()
self.stop()
self.heading('Post Termination Finished')
# ===============================================================================
# utilities
# ===============================================================================
def get_current_dac(self):
return self.spectrometer_manager.spectrometer.magnet.dac
def assemble_report(self):
signal_string = ''
signals = self.get_baseline_corrected_signals()
if signals:
signal_string = '\n'.join(['{} {} {}'.format(ai.name, ai.isotope,
signals[ai.isotope])
for ai in self._active_detectors])
age = ''
if self.isotope_group:
age = self.isotope_group.age
age_string = 'age={}'.format(age)
return '''runid={} timestamp={} {}
anaylsis_type={}
# ===============================================================================
# signals
# ===============================================================================
{}
{}
'''.format(self.runid, self.persister.rundate, self.persister.runtime,
self.spec.analysis_type,
signal_string, age_string)
def get_baseline_corrected_signals(self):
if self.isotope_group:
d = dict()
for k, iso in self.isotope_group.items():
d[k] = (iso.detector, iso.get_baseline_corrected_value())
return d
def setup_context(self, *args, **kw):
self._setup_context(*args, **kw)
def refresh_scripts(self):
self._refresh_scripts()
def update_detector_isotope_pairing(self, detectors, isotopes):
self.debug('update detector isotope pairing')
self.debug('detectors={}'.format(detectors))
self.debug('isotopes={}'.format(isotopes))
for di in self._active_detectors:
di.isotope = ''
for di, iso in zip(detectors, isotopes):
self.debug('updating pairing {} - {}'.format(di, iso))
det = self.get_detector(di)
det.isotope = iso
# ===============================================================================
# private
# ===============================================================================
def _get_environmentals(self):
self.info('getting environmentals')
env = {}
lclient = self.labspy_client
tst = time.time()
if lclient:
if lclient.connect():
for tag in ('lab_temperatures', 'lab_humiditys', 'lab_pneumatics'):
st = time.time()
try:
env[tag] = getattr(lclient, 'get_latest_{}'.format(tag))()
self.debug('Get latest {}. elapsed: {}'.format(tag, time.time() - st))
except BaseException as e:
self.debug('Get Labspy Environmentals: {}'.format(e))
self.debug_exception()
else:
self.debug('failed to connect to labspy client. Could not retrieve environmentals')
self.debug('Environmentals: {}'.format(pformat(env)))
else:
self.debug('LabspyClient not enabled. Could not retrieve environmentals')
self.info('getting environmentals finished: total duration: {}'.format(time.time() - tst))
return env
def _start(self):
# for testing only
# self._get_environmentals()
if self.isotope_group is None:
# load arar_age object for age calculation
if self.experiment_type == AR_AR:
from pychron.processing.arar_age import ArArAge
klass = ArArAge
else:
from pychron.processing.isotope_group import IsotopeGroup
klass = IsotopeGroup
self.isotope_group = klass()
es = self.extraction_script
if es is not None:
# get sensitivity multiplier from extraction script
v = self._get_yaml_parameter(es, 'sensitivity_multiplier', default=1)
self.isotope_group.sensitivity_multiplier = v
ln = self.spec.labnumber
ln = convert_identifier(ln)
self.debug('**************** Experiment Type: {}, {}'.format(self.experiment_type, AR_AR))
if self.experiment_type == AR_AR:
if not self.datahub.load_analysis_backend(ln, self.isotope_group):
self.debug('failed load analysis backend')
return
self.isotope_group.calculate_decay_factors()
self.py_clear_conditionals()
# setup default/queue conditionals
# clear the conditionals for good measure.
# conditionals should be cleared during teardown.
try:
self._add_conditionals()
except BaseException as e:
self.warning('Failed adding conditionals {}'.format(e))
return
try:
# add queue conditionals
self._add_queue_conditionals()
except BaseException as e:
self.warning('Failed adding queue conditionals. err={}'.format(e))
return
try:
# add default conditionals
self._add_system_conditionals()
except BaseException as e:
self.warning('Failed adding system conditionals. err={}'.format(e))
return
self.info('Start automated run {}'.format(self.runid))
self.measuring = False
self.truncated = False
self._alive = True
if self.plot_panel:
self.plot_panel.total_counts = 0
self.plot_panel.is_peak_hop = False
self.plot_panel.is_baseline = False
self.plot_panel.set_analysis_view(self.experiment_type)
self.multi_collector.canceled = False
self.multi_collector.is_baseline = False
self.multi_collector.for_peak_hop = False
self._equilibration_done = False
# setup the scripts
ip = self.spec.script_options
if ip:
ip = os.path.join(paths.scripts_dir, 'options', add_extension(ip, '.yaml'))
if self.measurement_script:
self.measurement_script.reset(self)
# set the interpolation path
self.measurement_script.interpolation_path = ip
for si in ('extraction', 'post_measurement', 'post_equilibration'):
script = getattr(self, '{}_script'.format(si))
if script:
self._setup_context(script)
script.interpolation_path = ip
# load extraction metadata
self.eqtime = self._get_extraction_parameter('eqtime', -1)
self.time_zero_offset = self.spec.collection_time_zero_offset
# setup persister. mirror a few of AutomatedRunsAttributes
self.setup_persister()
return True
def _set_filtering(self):
self.debug('Set filtering')
def _get_filter_outlier_dict(iso, kind):
if kind == 'baseline':
fods = self.persistence_spec.baseline_fods
key = iso.detector
else:
fods = self.persistence_spec.signal_fods
key = iso.name
try:
fod = fods[key]
except KeyError:
fod = {'filter_outliers': False, 'iterations': 1, 'std_devs': 2}
return fod
for i in self.isotope_group.values():
fod = _get_filter_outlier_dict(i, 'signal')
self.debug('setting fod for {}= {}'.format(i.name, fod))
i.set_filtering(fod)
fod = _get_filter_outlier_dict(i, 'baseline')
i.baseline.set_filtering(fod)
self.debug('setting fod for {}= {}'.format(i.detector, fod))
def _update_persister_spec(self, **kw):
ps = self.persistence_spec
for k, v in kw.items():
try:
ps.trait_set(**{k: v})
except TraitError as e:
self.warning('failed setting persistence spec attr={}, value={} error={}'.format(k, v, e))
def _persister_save_action(self, func, *args, **kw):
self.debug('persistence save...')
if self.use_db_persistence:
self.debug('persistence save - db')
getattr(self.persister, func)(*args, **kw)
if self.use_dvc_persistence:
self.debug('persistence save - dvc')
getattr(self.dvc_persister, func)(*args, **kw)
if self.use_xls_persistence:
self.debug('persistence save - xls')
getattr(self.xls_persister, func)(*args, **kw)
def _persister_action(self, func, *args, **kw):
getattr(self.persister, func)(*args, **kw)
for i, p in enumerate((self.xls_persister, self.dvc_persister)):
if p is None:
continue
try:
getattr(p, func)(*args, **kw)
except BaseException as e:
self.warning('{} persister action failed. {} func={}, excp={}'.format(i, p.__class__.__name__,
func, e))
import traceback
traceback.print_exc()
def _post_equilibration(self):
if self._equilibration_done:
return
self._equilibration_done = True
if not self._alive:
return
if self.post_equilibration_script is None:
return
msg = 'Post Equilibration Started {}'.format(self.post_equilibration_script.name)
self.heading('{}'.format(msg))
if self.post_equilibration_script.execute():
self.heading('Post Equilibration Finished')
else:
self.heading('Post Equilibration Finished unsuccessfully')
def _generate_ic_mftable(self, detectors, refiso, peak_center_config, n):
ret = True
from pychron.experiment.ic_mftable_generator import ICMFTableGenerator
e = ICMFTableGenerator()
if not e.make_mftable(self, detectors, refiso, peak_center_config, n):
ret = False
return ret
def _add_system_conditionals(self):
self.debug('add default conditionals')
p = get_path(paths.spectrometer_dir, '.*conditionals', ('.yaml', '.yml'))
if p is not None:
self.info('adding default conditionals from {}'.format(p))
self._add_conditionals_from_file(p, level=SYSTEM)
else:
self.warning('no Default Conditionals file. {}'.format(p))
def _add_queue_conditionals(self):
"""
load queue global conditionals (truncations, actions, terminations)
"""
self.debug('Add queue conditionals')
name = self.spec.queue_conditionals_name
if test_queue_conditionals_name(name):
| |
linewidth=linewidth
)
self.__bounding_boxes.append(visualise_bounding_box)
elif isinstance(bounding_box, (list, tuple, sitk.Image)):
# Use a default name if not specified
if name is None:
name = "Bounding box"
visualise_bounding_box = VisualiseBoundingBox(
bounding_box, name=name, color=color, linewidth=linewidth
)
self.__bounding_boxes.append(visualise_bounding_box)
else:
raise ValueError(
"Bounding boxes should be represented as a dict with bounding box name as key "
"and list or tuple as value"
)
def show(self, interact=False):
"""Render the image with all overlays"""
if len(self.__comparison_overlays) == 0:
self._display_slice()
else:
self._overlay_comparison()
self._overlay_scalar_field()
self._overlay_vector_field()
self._overlay_contours()
self._overlay_bounding_boxes()
self._adjust_view()
if interact:
logger.warning("Interactive mode not yet implemented")
# self.interact_adjust_slice()
self.__figure.canvas.draw()
self._add_legend()
self.__figure.set_facecolor("white")
return self.__figure
def _display_slice(self):
"""Display the configured image slice"""
image = self.__image
nda = sitk.GetArrayFromImage(image)
(ax_size, cor_size, sag_size) = nda.shape[:3]
window = self.__window
if window is None:
# We will choose it ourselves!
lower = nda.min()
# Check if we *probably* have a CT
if lower < -900:
# Just set a decent CT window
# Somewhere around soft tissue
window = (-250, 600)
# Otherwise just pick a reasonable upper limit
else:
upper = np.percentile(nda, 99)
window = (lower, upper - lower)
try:
logger.info(
f"Found a (z,y,x,{nda.shape[3]}) dimensional array - assuming this is an RGB"
"image."
)
nda /= nda.max()
except ValueError:
logger.warning("Problem converting RGB image to np.ndarray.")
except IndexError:
pass
sp_plane, _, sp_slice = image.GetSpacing()
asp = (1.0 * sp_slice) / sp_plane
if self.__projection is True:
projection = "max"
else:
projection = self.__projection
if self.__axis == "ortho":
figure_size = (
self.__figure_size,
self.__figure_size * (asp * ax_size + cor_size) / (1.0 * sag_size + cor_size),
)
self.__figure, ((ax_ax, blank), (ax_cor, ax_sag)) = plt.subplots(
2,
2,
figsize=figure_size,
gridspec_kw={
"height_ratios": [(cor_size) / (asp * ax_size), 1],
"width_ratios": [sag_size, cor_size],
},
)
blank.axis("off")
if self.__cut is None:
slice_ax = int(ax_size / 2.0)
slice_cor = int(cor_size / 2.0)
slice_sag = int(sag_size / 2.0)
self.__cut = [slice_ax, slice_cor, slice_sag]
if not self.__projection:
s_ax = return_slice("z", self.__cut[0])
s_cor = return_slice("y", self.__cut[1])
s_sag = return_slice("x", self.__cut[2])
ax_img = nda.__getitem__(s_ax)
cor_img = nda.__getitem__(s_cor)
sag_img = nda.__getitem__(s_sag)
else:
ax_img_proj = project_onto_arbitrary_plane(
image,
projection_axis=2,
projection_name=projection,
default_value=int(nda.min()),
)
ax_img = sitk.GetArrayFromImage(ax_img_proj)
# ax_img = (ax_img - ax_img.min()) / (ax_img.max() - ax_img.min())
cor_img_proj = project_onto_arbitrary_plane(
image,
projection_axis=1,
projection_name=projection,
default_value=int(nda.min()),
)
cor_img = sitk.GetArrayFromImage(cor_img_proj)
# cor_img = (cor_img - cor_img.min()) / (cor_img.max() - cor_img.min())
sag_img_proj = project_onto_arbitrary_plane(
image,
projection_axis=0,
projection_name=projection,
default_value=int(nda.min()),
)
sag_img = sitk.GetArrayFromImage(sag_img_proj)
# sag_img = (sag_img - sag_img.min()) / (sag_img.max() - sag_img.min())
ax_view = ax_ax.imshow(
ax_img,
aspect=1.0,
interpolation="none",
origin={"normal": "upper", "reversed": "lower"}[self.__origin],
cmap=self.__colormap,
clim=(window[0], window[0] + window[1]),
)
cor_view = ax_cor.imshow(
cor_img,
origin="lower",
aspect=asp,
interpolation="none",
cmap=self.__colormap,
clim=(window[0], window[0] + window[1]),
)
sag_view = ax_sag.imshow(
sag_img,
origin="lower",
aspect=asp,
interpolation="none",
cmap=self.__colormap,
clim=(window[0], window[0] + window[1]),
)
ax_ax.axis("off")
ax_cor.axis("off")
ax_sag.axis("off")
self.__figure.subplots_adjust(
left=0, right=1, wspace=0.01, hspace=0.01, top=1, bottom=0
)
self.__image_view = {
"ax_view": ax_view,
"cor_view": cor_view,
"sag_view": sag_view,
}
else:
if hasattr(self.__cut, "__iter__"):
warnings.warn(
"You have selected a single axis and multiple slice locations, attempting to "
"match."
)
self.__cut = self.__cut[{"x": 2, "y": 1, "z": 0}[self.__axis]]
if self.__axis == "x" or self.__axis == "sag":
axis_view_name_consistent = "sag_view"
figure_size = (
self.__figure_size,
self.__figure_size * (asp * ax_size) / (1.0 * cor_size),
)
self.__figure, ax = plt.subplots(1, 1, figsize=(figure_size))
org = "lower"
if not self.__cut:
self.__cut = int(sag_size / 2.0)
if self.__axis == "y" or self.__axis == "cor":
axis_view_name_consistent = "cor_view"
figure_size = (
self.__figure_size,
self.__figure_size * (asp * ax_size) / (1.0 * sag_size),
)
self.__figure, ax = plt.subplots(1, 1, figsize=(figure_size))
org = "lower"
if not self.__cut:
self.__cut = int(cor_size / 2.0)
if self.__axis == "z" or self.__axis == "ax":
axis_view_name_consistent = "ax_view"
asp = 1
figure_size = (
self.__figure_size,
self.__figure_size * (asp * cor_size) / (1.0 * sag_size),
)
self.__figure, ax = plt.subplots(1, 1, figsize=(figure_size))
org = {"normal": "upper", "reversed": "lower"}[self.__origin]
if not self.__cut:
self.__cut = int(ax_size / 2.0)
if not self.__projection:
s = return_slice(self.__axis, self.__cut)
disp_img = nda.__getitem__(s)
else:
disp_img_proj = project_onto_arbitrary_plane(
image,
projection_axis={"x": 0, "y": 1, "z": 2}[self.__axis],
projection_name=projection,
default_value=int(nda.min()),
)
disp_img = sitk.GetArrayFromImage(disp_img_proj)
# disp_img = (disp_img - disp_img.min()) / (disp_img.max() - disp_img.min())
s = return_slice(self.__axis, self.__cut)
ax_indiv = ax.imshow(
disp_img,
aspect=asp,
interpolation="none",
origin=org,
cmap=self.__colormap,
clim=(window[0], window[0] + window[1]),
)
ax.axis("off")
self.__figure.subplots_adjust(left=0, right=1, bottom=0, top=1)
self.__image_view = {axis_view_name_consistent: ax_indiv}
def _overlay_comparison(self):
"""Display an overlay comparison
Args:
color_rotation (float, optional): The hue used to color the original image (0 - 0.5).
"""
if len(self.__comparison_overlays) > 1:
raise ValueError("You can only display one comparison image.")
else:
comparison_overlay = self.__comparison_overlays[0]
image_original = self.__image
nda_original = sitk.GetArrayFromImage(image_original)
image_new = comparison_overlay.image
nda_new = sitk.GetArrayFromImage(image_new)
color_rotation = comparison_overlay.color_rotation
(ax_size, cor_size, sag_size) = nda_original.shape
sp_plane, _, sp_slice = image_original.GetSpacing()
asp = (1.0 * sp_slice) / sp_plane
window = self.__window
if window is None:
# We will choose it ourselves!
lower = nda_original.min()
# Check if we *probably* have a CT
if lower < -900:
# Just set a decent CT window
# Somewhere around soft tissue
window = (-250, 600)
# Otherwise just pick a reasonable upper limit
else:
upper = np.percentile(nda_original, 99)
window = (lower, upper - lower)
if self.__axis == "ortho":
figure_size = (
self.__figure_size,
self.__figure_size * (asp * ax_size + cor_size) / (1.0 * sag_size + cor_size),
)
self.__figure, ((ax_ax, blank), (ax_cor, ax_sag)) = plt.subplots(
2,
2,
figsize=figure_size,
gridspec_kw={
"height_ratios": [(cor_size) / (asp * ax_size), 1],
"width_ratios": [sag_size, cor_size],
},
)
blank.axis("off")
if self.__cut is None:
slice_ax = int(ax_size / 2.0)
slice_cor = int(cor_size / 2.0)
slice_sag = int(sag_size / 2.0)
self.__cut = [slice_ax, slice_cor, slice_sag]
s_ax = return_slice("z", self.__cut[0])
s_cor = return_slice("y", self.__cut[1])
s_sag = return_slice("x", self.__cut[2])
nda_colormix = generate_comparison_colormix(
[nda_original, nda_new],
arr_slice=s_ax,
window=window,
color_rotation=color_rotation,
)
ax_ax.imshow(
nda_colormix,
aspect=1.0,
origin={"normal": "upper", "reversed": "lower"}[self.__origin],
interpolation="none",
)
nda_colormix = generate_comparison_colormix(
[nda_original, nda_new],
arr_slice=s_cor,
window=window,
color_rotation=color_rotation,
)
ax_cor.imshow(
nda_colormix,
origin="lower",
aspect=asp,
interpolation="none",
)
nda_colormix = generate_comparison_colormix(
[nda_original, nda_new],
arr_slice=s_sag,
window=window,
color_rotation=color_rotation,
)
ax_sag.imshow(
nda_colormix,
origin="lower",
aspect=asp,
interpolation="none",
)
ax_ax.axis("off")
ax_cor.axis("off")
ax_sag.axis("off")
self.__figure.subplots_adjust(
left=0, right=1, wspace=0.01, hspace=0.01, top=1, bottom=0
)
else:
if hasattr(self.__cut, "__iter__"):
warnings.warn(
"You have selected a single axis and multiple slice locations, attempting to "
"match."
)
self.__cut = self.__cut[{"x": 2, "y": 1, "z": 0}[self.__axis]]
if self.__axis == "x" or self.__axis == "sag":
figure_size = (
self.__figure_size,
self.__figure_size * (asp * ax_size) / (1.0 * cor_size),
)
self.__figure, ax = plt.subplots(1, 1, figsize=(figure_size))
org = "lower"
if not self.__cut:
self.__cut = int(sag_size / 2.0)
if self.__axis == "y" or self.__axis == "cor":
figure_size = (
self.__figure_size,
self.__figure_size * (asp * ax_size) / (1.0 * sag_size),
)
self.__figure, ax = plt.subplots(1, 1, figsize=(figure_size))
org = "lower"
if not self.__cut:
self.__cut = int(cor_size / 2.0)
if self.__axis == "z" or self.__axis == "ax":
asp = 1
figure_size = (
self.__figure_size,
self.__figure_size * (asp * cor_size) / (1.0 * sag_size),
)
self.__figure, ax = plt.subplots(1, 1, figsize=(figure_size))
org = "upper"
if not self.__cut:
self.__cut = int(ax_size / 2.0)
s = return_slice(self.__axis, self.__cut)
nda_colormix = generate_comparison_colormix(
[nda_original, nda_new], arr_slice=s, window=window, color_rotation=color_rotation
)
ax.imshow(
nda_colormix,
aspect=asp,
interpolation="none",
origin=org,
)
ax.axis("off")
self.__figure.subplots_adjust(left=0, right=1, bottom=0, top=1)
def _adjust_view(self):
"""adjust_view is a helper function for modifying axis limits.
Specify *limits* when initialising the ImageVisulaiser to use.
Alternatively, use set_limits_from_label to specify automatically.
"""
limits = self.__limits
origin = self.__origin
if limits is not None:
if self.__axis == "ortho":
ax_ax, ax_blank, ax_cor, ax_sag = self.__figure.axes[:4]
cax_list = self.__figure.axes[4:]
ax_orig_0, ax_orig_1 = ax_cor.get_ylim()
cor_orig_0, cor_orig_1 = ax_ax.get_ylim()
sag_orig_0, sag_orig_1 = ax_ax.get_xlim()
ax_0, ax_1, cor_0, cor_1, sag_0, sag_1 = limits
# Perform some corrections
ax_0, ax_1 = sorted([ax_0, ax_1])
cor_0, cor_1 = sorted([cor_0, cor_1])
sag_0, sag_1 = sorted([sag_0, sag_1])
ax_orig_0, ax_orig_1 = sorted([ax_orig_0, ax_orig_1])
cor_orig_0, cor_orig_1 = sorted([cor_orig_0, cor_orig_1])
sag_orig_0, sag_orig_1 = sorted([sag_orig_0, sag_orig_1])
ax_size = ax_1 - ax_0
cor_size = cor_1 - cor_0
sag_size = sag_1 - sag_0
asp = ax_cor.get_aspect()
ratio_x = ((cor_1 - cor_0) + (sag_1 - sag_0)) / (
(cor_orig_1 - cor_orig_0) + (sag_orig_1 - sag_orig_0)
)
ratio_y = (1 / asp * (cor_1 - cor_0) + (ax_1 - ax_0)) / (
1 / asp * (cor_orig_1 - cor_orig_0) + | |
path into the job
workspace corresponding to the state point specified in the manifest file.
Alternatively the schema argument may be a string, that is converted into a schema function,
for example: Providing ``foo/{foo:int}`` as schema argument means that all directories under
``foo/`` will be imported and their names will be interpreted as the value for ``foo``
within the state point.
.. tip::
Use ``copytree=os.replace`` or ``copytree=shutil.move`` to move dataspaces on import
instead of copying them.
Warning: Imports can fail due to conflicts. Moving data instead of copying may
therefore lead to inconsistent states and users are advised to apply caution.
See Also
--------
:meth:`~signac.Project.export_to` : Export the project data space.
Parameters
----------
origin :
The path to the data space origin, which is to be imported. This may be a path to
a directory, a zip file, or a tarball archive (Default value = None).
schema :
An optional schema function, which is either a string or a function that accepts a
path as its first and only argument and returns the corresponding state point as dict.
(Default value = None).
sync :
If ``True``, the project will be synchronized with the imported data space. If a
dict of keyword arguments is provided, the arguments will be used for
:meth:`~signac.Project.sync` (Default value = None).
copytree :
Specify which exact function to use for the actual copytree operation.
Defaults to :func:`shutil.copytree`.
Returns
-------
dict
A dict that maps the source directory paths to the target directory paths.
"""
from .import_export import import_into_project
if sync:
with self.temporary_project() as tmp_project:
ret = tmp_project.import_from(origin=origin, schema=schema)
if sync is True:
self.sync(other=tmp_project)
else:
self.sync(other=tmp_project, **sync)
return ret
paths = dict(import_into_project(
origin=origin, project=self, schema=schema, copytree=copytree))
return paths
def check(self):
"""Check the project's workspace for corruption.
Raises
------
JobsCorruptedError
When one or more jobs are identified as corrupted.
"""
corrupted = []
logger.info("Checking workspace for corruption...")
for job_id in self._find_job_ids():
try:
sp = self._get_statepoint(job_id)
if calc_id(sp) != job_id:
corrupted.append(job_id)
else:
self.open_job(sp).init()
except JobsCorruptedError as error:
corrupted.extend(error.job_ids)
if corrupted:
logger.error(
"At least one job appears to be corrupted. Call Project.repair() "
"to try to fix errors.")
raise JobsCorruptedError(corrupted)
def repair(self, fn_statepoints=None, index=None, job_ids=None):
"""Attempt to repair the workspace after it got corrupted.
This method will attempt to repair lost or corrupted job state point
manifest files using a state points file or a document index or both.
Parameters
----------
fn_statepoints : str
The filename of the file containing the state points, defaults
to :attr:`~signac.Project.FN_STATEPOINTS`.
index :
A document index (Default value = None).
job_ids :
An iterable of job ids that should get repaired. Defaults to all jobs.
Raises
------
JobsCorruptedError
When one or more corrupted job could not be repaired.
"""
if job_ids is None:
job_ids = self._find_job_ids()
# Load internal cache from all available external sources.
self._read_cache()
try:
self._sp_cache.update(self.read_statepoints(fn=fn_statepoints))
except IOError as error:
if error.errno != errno.ENOENT or fn_statepoints is not None:
raise
if index is not None:
for doc in index:
self._sp_cache[doc['signac_id']] = doc['statepoint']
corrupted = []
for job_id in job_ids:
try:
# First, check if we can look up the state point.
sp = self._get_statepoint(job_id)
# Check if state point and id correspond.
correct_id = calc_id(sp)
if correct_id != job_id:
logger.warning(
"The job id of job '{}' is incorrect; "
"it should be '{}'.".format(job_id, correct_id))
invalid_wd = os.path.join(self.workspace(), job_id)
correct_wd = os.path.join(self.workspace(), correct_id)
try:
os.replace(invalid_wd, correct_wd)
except OSError as error:
logger.critical(
"Unable to fix location of job with "
" id '{}': '{}'.".format(job_id, error))
corrupted.append(job_id)
continue
else:
logger.info("Moved job to correct workspace.")
job = self.open_job(sp)
except KeyError:
logger.critical("Unable to lookup state point for job with id '{}'.".format(job_id))
corrupted.append(job_id)
else:
try:
# Try to reinit the job (triggers state point manifest file check).
job.init()
except Exception as error:
logger.error(
"Error during initalization of job with "
"id '{}': '{}'.".format(job_id, error))
try: # Attempt to fix the job manifest file.
job.init(force=True)
except Exception as error2:
logger.critical(
"Unable to force init job with id "
"'{}': '{}'.".format(job_id, error2))
corrupted.append(job_id)
if corrupted:
raise JobsCorruptedError(corrupted)
def _sp_index(self):
"""Update and return the state point index cache.
Returns
-------
dict
Dictionary containing ids and state points in the cache.
"""
job_ids = set(self._job_dirs())
to_add = job_ids.difference(self._index_cache)
to_remove = set(self._index_cache).difference(job_ids)
for _id in to_remove:
del self._index_cache[_id]
for _id in to_add:
self._index_cache[_id] = dict(statepoint=self._get_statepoint(_id), _id=_id)
return self._index_cache.values()
def _build_index(self, include_job_document=False):
"""Generate a basic state point index.
Parameters
----------
include_job_document :
Whether to include the job document in the index (Default value =
False).
"""
wd = self.workspace() if self.Job is Job else None
for _id in self._find_job_ids():
doc = dict(_id=_id, statepoint=self._get_statepoint(_id))
if include_job_document:
if wd is None:
doc.update(self.open_job(id=_id).document)
else: # use optimized path
try:
with open(os.path.join(wd, _id, self.Job.FN_DOCUMENT), 'rb') as file:
doc.update(json.loads(file.read().decode()))
except IOError as error:
if error.errno != errno.ENOENT:
raise
yield doc
def _update_in_memory_cache(self):
"""Update the in-memory state point cache to reflect the workspace."""
logger.debug("Updating in-memory cache...")
start = time.time()
job_ids = set(self._job_dirs())
cached_ids = set(self._sp_cache)
to_add = job_ids.difference(cached_ids)
to_remove = cached_ids.difference(job_ids)
if to_add or to_remove:
for _id in to_remove:
del self._sp_cache[_id]
def _add(_id):
self._sp_cache[_id] = self._get_statepoint_from_workspace(_id)
to_add_chunks = split_and_print_progress(
iterable=list(to_add),
num_chunks=max(1, min(100, int(len(to_add) / 1000))),
write=logger.info,
desc="Read metadata: ")
with ThreadPool() as pool:
for chunk in to_add_chunks:
pool.map(_add, chunk)
delta = time.time() - start
logger.debug("Updated in-memory cache in {:.3f} seconds.".format(delta))
return to_add, to_remove
else:
logger.debug("In-memory cache is up to date.")
def _remove_persistent_cache_file(self):
"""Remove the persistent cache file (if it exists)."""
try:
os.remove(self.fn(self.FN_CACHE))
except (OSError, IOError) as error:
if error.errno != errno.ENOENT:
raise error
def update_cache(self):
"""Update the persistent state point cache.
This function updates a persistent state point cache, which
is stored in the project root directory. Most data space operations,
including iteration and filtering or selection are expected
to be significantly faster after calling this function, especially
for large data spaces.
"""
logger.info('Update cache...')
start = time.time()
cache = self._read_cache()
self._update_in_memory_cache()
if cache is None or set(cache) != set(self._sp_cache):
fn_cache = self.fn(self.FN_CACHE)
fn_cache_tmp = fn_cache + '~'
try:
with gzip.open(fn_cache_tmp, 'wb') as cachefile:
cachefile.write(json.dumps(self._sp_cache).encode())
except OSError: # clean-up
try:
os.remove(fn_cache_tmp)
except (OSError, IOError):
pass
raise
else:
os.replace(fn_cache_tmp, fn_cache)
delta = time.time() - start
logger.info("Updated cache in {:.3f} seconds.".format(delta))
return len(self._sp_cache)
else:
logger.info("Cache is up to date.")
def _read_cache(self):
"""Read the persistent state point cache (if available)."""
logger.debug("Reading cache...")
start = time.time()
try:
with gzip.open(self.fn(self.FN_CACHE), 'rb') as cachefile:
cache = json.loads(cachefile.read().decode())
self._sp_cache.update(cache)
except IOError as error:
if not error.errno == errno.ENOENT:
raise
logger.debug("No cache file found.")
else:
delta = time.time() - start
logger.debug("Read cache in {:.3f} seconds.".format(delta))
return cache
def index(self, formats=None, depth=0,
skip_errors=False, include_job_document=True):
r"""Generate an index of the project's workspace.
This generator function indexes every file in the project's
workspace until the specified `depth`.
The job document if it exists, is always indexed, other
files need to be specified with the formats argument.
.. code-block:: python
for doc in project.index({r'.*\.txt', 'TextFile'}):
print(doc)
Parameters
----------
formats : str, dict
The format definitions as a pattern string (e.g. ``r'.*\.txt'``)
or a mapping from pattern strings to formats (e.g.
``'TextFile'``). If None, only the job document is indexed
(Default value = None).
depth : int
Specifies the crawling depth. A value of 0 means no limit
(Default value = 0).
skip_errors : bool
Skip all errors which occur during indexing. This is useful when
trying to repair a broken workspace (Default value = False).
include_job_document : bool
Include the contents of job documents (Default value = True).
Yields
------
dict
Index document.
"""
if formats is None:
root = self.workspace()
def _full_doc(doc):
"""Add `signac_id` and `root` to the index document.
Parameters
----------
doc : dict
Index document.
Returns
-------
dict
Modified index document.
"""
doc['signac_id'] = doc['_id']
doc['root'] = root
return doc
docs = self._build_index(include_job_document=include_job_document)
docs = map(_full_doc, docs)
else:
if isinstance(formats, str):
formats = {formats: 'File'}
class Crawler(SignacProjectCrawler):
pass
for pattern, fmt in formats.items():
Crawler.define(pattern, fmt)
crawler = Crawler(self.root_directory())
docs = | |
<filename>text_processing.py<gh_stars>0
"""
This module is a part of system for the automatic enrichment
of a WordNet-like taxonomy.
Copyright 2020 <NAME>, <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import codecs
import copy
from functools import reduce
import gzip
import json
import os
import re
from typing import Dict, List, Set, Tuple, Union
from gensim.models.fasttext import FastText
from nltk import wordpunct_tokenize
import numpy as np
from rusenttokenize import ru_sent_tokenize
from scipy.spatial.distance import cdist
from trainset_preparing import calculate_sentence_matrix
def tokenize(source_text: str) -> List[str]:
""" Prepare and tokenize a text.
Replaces all kinds of dashes with a simple dash, tokenize a transformed text using
the `nltk.wordpunct_tokenize` function and remove unnecessary punctuation.
:param source_text: an input text for processing and tokenization.
:return: a result as a Python's tuple of strings.
"""
unicode_dashes = ["\u2010", "\u2011", "\u2012", "\u2013", "\u2014", "\u2015", "\u2043", "‐", "‑",
"‒", "–", "—", "―", "⁃"]
dashes_expr = "(" + "|".join(unicode_dashes) + ")"
re_for_dashes = re.compile(dashes_expr)
prepared_text = re_for_dashes.sub("-", source_text)
prepared_text = prepared_text.replace("\u2026", "...").replace("…", "...")
prepared_text = prepared_text.replace("\u2025", "..").replace("‥", "..")
return list(filter(
lambda it2: (len(it2) > 0) and (it2.isalnum() or (it2 in {".", ",", "-", ":", ";", "(", ")"})),
map(lambda it1: it1.strip().lower(), wordpunct_tokenize(prepared_text))
))
def load_news(corpus_dir_name: str):
""" Load the news corpus, prepared for the competition, and create a generator.
:param corpus_dir_name: a directory with the news corpus files.
:return: a generator for each news line.
"""
re_for_filename = re.compile(r'^news_df_\d+.csv.gz$')
data_files = list(map(
lambda it2: os.path.join(os.path.normpath(corpus_dir_name), it2),
filter(
lambda it1: re_for_filename.match(it1.lower()) is not None,
os.listdir(os.path.normpath(corpus_dir_name))
)
))
assert len(data_files) > 0
for cur_file in data_files:
with gzip.open(cur_file, mode="rt", encoding="utf-8") as fp:
cur_line = fp.readline()
line_idx = 1
true_header = ["file_name", "file_sentences"]
loaded_header = []
while len(cur_line) > 0:
prep_line = cur_line.strip()
if len(prep_line) > 0:
err_msg = 'File `{0}`: line {1} is wrong!'.format(cur_file, line_idx)
if len(loaded_header) == 0:
line_parts = list(filter(
lambda it2: len(it2) > 0,
map(lambda it1: it1.strip(), prep_line.split(","))
))
else:
line_parts = list(filter(
lambda it2: len(it2) > 0,
map(lambda it1: it1.strip(), prep_line.split("\t"))
))
if len(line_parts) > 2:
line_parts = [line_parts[0], " ".join(line_parts[1:])]
assert len(line_parts) == 2, err_msg
if len(loaded_header) == 0:
assert line_parts == true_header, err_msg
loaded_header = line_parts
else:
url = line_parts[0].lower()
text_list = line_parts[1]
assert url.endswith(".htm") or url.endswith(".html"), err_msg
try:
data = json.loads(text_list, encoding='utf-8')
except:
data = None
assert data is not None, err_msg
assert isinstance(data, list), err_msg
assert len(data) > 0, err_msg
for text in data:
yield text
cur_line = fp.readline()
line_idx += 1
def load_wiki(file_name: str):
""" Load the Wikipedia dump, tokenize all texts from this dump by sentences and create a generator.
:param file_name:
:return: a generator for each news line (all such lines are prepared and tokenized by sentences).
"""
with gzip.open(file_name, mode="rt", encoding="utf-8") as fp:
cur_line = fp.readline()
while len(cur_line) > 0:
prep_line = cur_line.strip()
if len(prep_line) > 0:
yield from filter(lambda it2: len(it2) > 0, map(lambda it1: it1.strip(), ru_sent_tokenize(prep_line)))
cur_line = fp.readline()
def prepare_senses_index_for_search(senses_dict: Dict[str, Dict[str, Tuple[tuple, Tuple[int, int]]]]) -> \
Dict[str, Set[str]]:
""" Build a search index for a fast selection of sentence candidates, which contain some sense from the RuWordNet.
The RuWordNet contains a lot of terms (senses in the RuWordNet terminology), and if we want to find possible
occurrences in each input sentence using the exhaustive search, then we will do it very-very long, with
time complexity is O(n). So, we can divide the search procedure into two steps:
1) we select a sub-set of all RuWordNet's terms, which potentially can be a part of some sentence, using
a hash table of single words from all terms, and we do it with the constant time complexity O(1), because
it is the search complexity in the hash table;
2) we apply a full linear search for the selected sub-set of terms instead of all RuWordNet's terms.
And this function needs for building such search index in a form of the hash table (i.e., the Python's dictionary),
where keys are single words of the RuWordNet terms, and values are sense IDs of terms with these words.
:param senses_dict: a dictionary with inflected terms (see `ruwordnet_parsing.load_and_inflect_senses` function).
:return: the created search index.
"""
index = dict()
for sense_id in senses_dict:
for morpho_tag in senses_dict[sense_id]:
tokens = senses_dict[sense_id][morpho_tag][0]
main_word_start, main_word_end = senses_dict[sense_id][morpho_tag][1]
for main_token in filter(lambda it: it.isalnum(), tokens[main_word_start:main_word_end]):
if main_token in index:
index[main_token].add(sense_id)
else:
index[main_token] = {sense_id}
return index
def startswith(full_text: tuple, subphrase: tuple) -> int:
""" Check that the specified text starts with the specified subphrase without considering of punctuation.
Text and subphrase are tokenized, i.e. they are tuples of strings. Matching is realized recursively.
:param full_text: a tokenized text (tuple of strings).
:param subphrase: a tokenized subphrase (tuple of strings).
:return: a number of text's words, which coincide with all subphrase's words.
"""
n_full = len(full_text)
n_sub = len(subphrase)
if (n_sub == 0) or (n_full == 0):
return 0
if n_sub > n_full:
return 0
if ' '.join(full_text) == ' '.join(subphrase):
return n_full
if ' '.join(full_text[0:n_sub]) == ' '.join(subphrase):
return n_sub
if full_text[0].isalnum() and subphrase[0].isalnum():
if full_text[0] != subphrase[0]:
return 0
res = startswith(full_text[1:], subphrase[1:])
if res == 0:
return 0
return res + 1
if (not full_text[0].isalnum()) and (not subphrase[0].isalnum()):
if (n_full < 2) or (n_sub < 2):
return 0
res = startswith(full_text[1:], subphrase[1:])
if res == 0:
return 0
return res + 1
if full_text[0].isalnum():
return startswith(full_text, subphrase[1:])
res = startswith(full_text[1:], subphrase)
if res == 0:
return 0
return res + 1
def find_subphrase(full_text: tuple, subphrase: tuple) -> Union[Tuple[int, int], None]:
""" Find bounds of the specified subphrase in the specified text without considering of punctuation.
For example, if we want to find bounds of the subphrase ("hello", "how", "are", "you") in the text ("oh", ",",
"hello", "!", "how", "are", "you", "doing", "today", "?"), then we expect the result (2, 7). The same result will be
expected, if the above-mentioned subphrase contains a comma between "hello" and "how", for example. But if the text
will be modified in the following way, e.g. ("oh", ",", "hello", "how", "are", "you", "doing", "today", "?"), then
we expect another result (2, 6).
:param full_text: a tokenized text (tuple of strings).
:param subphrase: a tokenized subphrase (tuple of strings).
:return: a two-element tuple, i.e. bounds of subphrase, if these bounds are found, and None in another case.
"""
assert subphrase[0].isalnum() and subphrase[-1].isalnum(), \
"The subphrase `{0}` is wrong! Any subphrase must be started and ended with alphabetic or " \
"numeric words!".format(' '.join(subphrase))
n_full = len(full_text)
n_sub = len(subphrase)
if n_sub > n_full:
return None
start_pos = -1
end_pos = -1
for token_idx in range(n_full):
if full_text[token_idx] == subphrase[0]:
n = startswith(full_text[token_idx:], subphrase)
if n > 0:
start_pos = token_idx
end_pos = start_pos + n
break
if start_pos >= 0:
return start_pos, end_pos
return None
def find_senses_in_text(tokenized_text: tuple, senses_dict: Dict[str, Dict[str, Tuple[tuple, Tuple[int, int]]]],
search_index_for_senses: Dict[str, Set[str]]) -> \
Union[Dict[str, Dict[str, Tuple[int, int]]], None]:
""" Analyze an input sentence and find all admissible occurrences of the RuWordNet's terms (senses).
:param tokenized_text: an input sentence, which is tokenized using the `tokenize` function.
:param senses_dict: a dictionary with inflected terms (see `ruwordnet_parsing.load_and_inflect_senses` function).
:param search_index_for_senses: a search index, which is built using the `prepare_senses_index_for_search` function.
:return: None or the Python's dictionary "sense ID" -> "morphotag" -> "bounds in the sentence"
"""
filtered_sense_IDs = set()
for token in tokenized_text:
if token.isalnum():
filtered_sense_IDs |= search_index_for_senses.get(token, set())
if len(filtered_sense_IDs) == 0:
return None
res = dict()
for sense_ID in filtered_sense_IDs:
founds = dict()
for morpho_tag in senses_dict[sense_ID]:
sense_tokens = senses_dict[sense_ID][morpho_tag][0]
sense_bounds = find_subphrase(full_text=tokenized_text, subphrase=sense_tokens)
if sense_bounds is not None:
founds[morpho_tag] = sense_bounds
if len(founds) > 0:
res[sense_ID] = founds
del founds
| |
<reponame>onap/dcaegen2-deployments<filename>dcae-services-policy-sync/policysync/clients.py<gh_stars>0
# ============LICENSE_START=======================================================
# Copyright (c) 2021 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END=========================================================
"""Clients for communicating with both the post dublin and pre dublin APIs"""
import json
import re
import base64
import uuid
import asyncio
import aiohttp
import policysync.metrics as metrics
from .util import get_module_logger
logger = get_module_logger(__name__)
# Websocket config
WS_HEARTBEAT = 60
WS_NOTIFICATIONS_ENDPOINT = "pdp/notifications"
# REST config
V1_DECISION_ENDPOINT = "policy/pdpx/v1/decision"
V0_DECISION_ENDPOINT = "pdp/api"
APPLICATION_JSON = "application/json"
def get_single_regex(filters, ids):
"""given a list of filters and ids returns a single regex for matching"""
filters = [] if filters is None else filters
ids = [] if ids is None else ["{}[.][0-9]+[.]xml".format(x) for x in ids]
return "|".join(filters + ids) if filters is not None else ""
class BasePolicyClient:
""" Base policy client that is pluggable into inventory """
def __init__(self, pdp_url, headers=None):
self.headers = {} if headers is None else headers
self.session = None
self.pdp_url = pdp_url
def _init_rest_session(self):
"""
initialize an aiohttp rest session
:returns: an aiohttp rest session
"""
if self.session is None:
self.session = aiohttp.ClientSession(
headers=self.headers, raise_for_status=True
)
return self.session
async def _run_request(self, endpoint, request_data):
"""
execute a particular REST request
:param endpoint: str rest endpoint to query
:param request_data: dictionary request data
:returns: dictionary response data
"""
session = self._init_rest_session()
async with session.post(
"{0}/{1}".format(self.pdp_url, endpoint), json=request_data
) as resp:
data = await resp.read()
return json.loads(data)
def supports_notifications(self):
"""
does this particular client support real time notifictions
:returns: True
"""
# in derived classes we may use self
# pylint: disable=no-self-use
return True
async def list_policies(self, filters=None, ids=None):
"""
used to get a list of policies matching a particular ID
:param filters: list of regex filter strings for matching
:param ids: list of id strings for matching
:returns: List of policies matching filters or ids
"""
raise NotImplementedError
async def get_config(self, filters=None, ids=None):
"""
used to get a list of policies matching a particular ID
:returns: List of policies matching filters or ids
"""
raise NotImplementedError
async def notificationhandler(self, callback, ids=None, filters=None):
"""
Clients should implement this to support real time notifications
:param callback: func to execute when a matching notification is found
:param ids: list of id strings for matching
"""
raise NotImplementedError
async def close(self):
""" close the policy client """
logger.info("closing websocket clients...")
if self.session:
await self.session.close()
class PolicyClientV0(BasePolicyClient):
"""
Supports the legacy v0 policy API use prior to ONAP Dublin
"""
async def close(self):
""" close the policy client """
await super().close()
if self.ws_session is not None:
await self.ws_session.close()
def __init__(
self,
headers,
pdp_url,
decision_endpoint=V0_DECISION_ENDPOINT,
ws_endpoint=WS_NOTIFICATIONS_ENDPOINT
):
"""
Initialize a v0 policy client
:param headers: Headers to use for policy rest api
:param pdp_url: URL of the PDP
:param decision_endpoint: root for the decison API
:param websocket_endpoint: root of the websocket endpoint
"""
super().__init__(pdp_url, headers=headers)
self.ws_session = None
self.session = None
self.decision_endpoint = decision_endpoint
self.ws_endpoint = ws_endpoint
self._ws = None
def _init_ws_session(self):
"""initialize a websocket session for notifications"""
if self.ws_session is None:
self.ws_session = aiohttp.ClientSession()
return self.ws_session
@metrics.list_policy_exceptions.count_exceptions()
async def list_policies(self, filters=None, ids=None):
"""
used to get a list of policies matching a particular ID
:param filters: list of regex filter strings for matching
:param ids: list of id strings for matching
:returns: List of policies matching filters or ids
"""
request_data = self._prepare_request(filters, ids)
policies = await self._run_request(
f"{self.decision_endpoint}/listPolicy", request_data
)
return set(policies)
@classmethod
def _prepare_request(cls, filters, ids):
"""prepare the request body for the v0 api"""
regex = get_single_regex(filters, ids)
return {"policyName": regex}
@metrics.get_config_exceptions.count_exceptions()
async def get_config(self, filters=None, ids=None):
"""
Used to get the actual policy configuration from PDP
:return: the policy objects that are currently active
for the given set of filters
"""
request_data = self._prepare_request(filters, ids)
policies = await self._run_request(
f"{self.decision_endpoint}/getConfig", request_data)
for policy in policies:
try:
policy["config"] = json.loads(policy["config"])
except json.JSONDecodeError:
pass
return policies
@classmethod
def _needs_update(cls, update, ids=None, filters=None):
"""
Expect something like this
{
"removedPolicies": [{
"policyName": "xyz.45.xml",
"versionNo": "45"
}],
"loadedPolicies": [{
"policyName": "xyz.46.xml",
"versionNo": "46",
"matches": {
"ONAPName": "DCAE",
"ConfigName": "DCAE_HighlandPark_AgingConfig",
"service": "DCAE_HighlandPark_AgingConfig",
"guard": "false",
"location": " Edge",
"TTLDate": "NA",
"uuid": "TestUUID",
"RiskLevel": "5",
"RiskType": "default"
},
"updateType": "UPDATE"
}],
"notificationType": "BOTH"
}
"""
for policy in update.get("removedPolicies", []) + update.get(
"loadedPolicies", []
):
if (
re.match(get_single_regex(filters, ids), policy["policyName"])
is not None
):
return True
return False
async def notificationhandler(self, callback, ids=None, filters=None):
"""
websocket based notification handler for
:param callback: function to execute when
a matching notification is found
:param ids: list of id strings for matching
"""
url = self.pdp_url.replace("https", "wss")
# The websocket we start here will periodically
# send heartbeat (ping frames) to policy
# this ensures that we are never left hanging
# with our communication with policy.
session = self._init_ws_session()
try:
websocket = await session.ws_connect(
"{0}/{1}".format(url, self.ws_endpoint), heartbeat=WS_HEARTBEAT
)
logger.info("websock with policy established")
async for msg in websocket:
# check for websocket errors
# break out of this async for loop. to attempt reconnection
if msg.type in (aiohttp.WSMsgType.CLOSED, aiohttp.WSMsgType.ERROR):
break
if msg.type is (aiohttp.WSMsgType.TEXT):
if self._needs_update(
json.loads(msg.data),
ids=ids,
filters=filters
):
logger.debug(
"notification received from pdp websocket -> %s", msg
)
await callback()
else:
logger.warning(
"unexpected websocket message type received %s", msg.type
)
except aiohttp.ClientError:
logger.exception("Received connection error with websocket")
class PolicyClientV1(BasePolicyClient):
"""
Supports the v1 policy API introduced in ONAP's dublin release
"""
async def close(self):
""" close the policy client """
await super().close()
if self.dmaap_session is not None:
await self.dmaap_session.close()
def _init_dmaap_session(self):
""" initialize a dmaap session for notifications """
if self.dmaap_session is None:
self.dmaap_session = aiohttp.ClientSession(
headers=self.dmaap_headers,
raise_for_status=True
)
return self.dmaap_session
def __init__(
self,
headers,
pdp_url,
**kwargs,
):
super().__init__(pdp_url, headers=headers)
self._ws = None
self.audit_uuid = str(uuid.uuid4())
self.dmaap_url = kwargs.get('dmaap_url')
self.dmaap_timeout = 15000
self.dmaap_session = None
self.dmaap_headers = kwargs.get('dmaap_headers', {})
self.decision = kwargs.get('v1_decision', V1_DECISION_ENDPOINT)
async def list_policies(self, filters=None, ids=None):
"""
ONAP has no real equivalent to this.
:returns: None
"""
# in derived classes we may use self
# pylint: disable=no-self-use
return None
@classmethod
def convert_to_policy(cls, policy_body):
"""
Convert raw policy to format expected by microservices
:param policy_body: raw dictionary output from pdp
:returns: data in proper formatting
"""
pdp_metadata = policy_body.get("metadata", {})
policy_id = pdp_metadata.get("policy-id")
policy_version = policy_body.get("version")
if not policy_id or policy_version is None:
logger.warning("Malformed policy is missing policy-id and version")
return None
policy_body["policyName"] = "{}.{}.xml".format(
policy_id, str(policy_version.replace(".", "-"))
)
policy_body["policyVersion"] = str(policy_version)
if "properties" in policy_body:
policy_body["config"] = policy_body["properties"]
del policy_body["properties"]
return policy_body
@metrics.get_config_exceptions.count_exceptions()
async def get_config(self, filters=None, ids=None):
"""
Used to get the actual policy configuration from PDP
:returns: the policy objects that are currently active
for the given set of filters
"""
if ids is None:
ids = []
request_data = {
"ONAPName": "DCAE",
"ONAPComponent": "policy-sync",
"ONAPInstance": self.audit_uuid,
"action": "configure",
"resource": {"policy-id": ids}
}
data = await self._run_request(self.decision, request_data)
out = []
for policy_body in data["policies"].values():
policy = self.convert_to_policy(policy_body)
if policy is not None:
out.append(policy)
return out
def supports_notifications(self):
"""
Does this policy client support real time notifications
:returns: True if the dmaap url is set else return false
"""
return self.dmaap_url is not None
@classmethod
def _needs_update(cls, update, ids):
"""
expect something like this
{
"deployed-policies": [
{
"policy-type": "onap.policies.monitoring.tcagen2",
"policy-type-version": "1.0.0",
"policy-id": "onap.scaleout.tca",
"policy-version": "2.0.0",
"success-count": 3,
"failure-count": 0
}
],
"undeployed-policies": [
{
"policy-type": "onap.policies.monitoring.tcagen2",
"policy-type-version": "1.0.0",
"policy-id": "onap.firewall.tca",
"policy-version": "6.0.0",
"success-count": 3,
"failure-count": 0
}
]
}
"""
for policy in update.get("deployed-policies", []) + update.get(
"undeployed-policies", []
):
if policy.get("policy-id") in ids:
return True
return False
async def poll_dmaap(self, callback, ids=None):
"""
one GET request to dmaap
:param callback: function to execute when a
matching notification is found
:param ids: | |
),
)
case_stmt_py_syntax = (
match_kwd + testlist_star_namedexpr + colon.suppress() + newline.suppress()
+ indent.suppress() + Group(OneOrMore(case_match_py_syntax))
+ dedent.suppress() + Optional(keyword("else").suppress() - suite)
)
case_stmt_ref = case_stmt_co_syntax | case_stmt_py_syntax
assert_stmt = addspace(keyword("assert") - testlist)
if_stmt = condense(
addspace(keyword("if") + condense(namedexpr_test + suite))
- ZeroOrMore(addspace(keyword("elif") - condense(namedexpr_test - suite)))
- Optional(else_stmt),
)
while_stmt = addspace(keyword("while") - condense(namedexpr_test - suite - Optional(else_stmt)))
for_stmt = addspace(keyword("for") - assignlist - keyword("in") - condense(testlist - suite - Optional(else_stmt)))
exec_stmt = Forward()
exec_stmt_ref = keyword("exec").suppress() + lparen.suppress() + test + Optional(
comma.suppress() + test + Optional(
comma.suppress() + test + Optional(
comma.suppress(),
),
),
) + rparen.suppress()
except_item = (
testlist_has_comma("list")
| test("test")
) - Optional(
keyword("as").suppress() - name,
)
except_clause = attach(except_kwd + except_item, except_handle)
except_star_clause = Forward()
except_star_clause_ref = attach(except_star_kwd + except_item, except_handle)
try_stmt = condense(
keyword("try") - suite + (
keyword("finally") - suite
| (
OneOrMore(except_clause - suite) - Optional(except_kwd - suite)
| except_kwd - suite
| OneOrMore(except_star_clause - suite)
) - Optional(else_stmt) - Optional(keyword("finally") - suite)
),
)
with_item = addspace(test + Optional(keyword("as") + base_assign_item))
with_item_list = Group(maybeparens(lparen, tokenlist(with_item, comma), rparen))
with_stmt_ref = keyword("with").suppress() - with_item_list - suite
with_stmt = Forward()
return_typedef = Forward()
name_funcdef = trace(condense(dotted_name + parameters))
op_tfpdef = unsafe_typedef_default | condense(name + Optional(default))
op_funcdef_arg = name | condense(lparen.suppress() + op_tfpdef + rparen.suppress())
op_funcdef_name = unsafe_backtick.suppress() + dotted_name + unsafe_backtick.suppress()
op_funcdef = trace(
attach(
Group(Optional(op_funcdef_arg))
+ op_funcdef_name
+ Group(Optional(op_funcdef_arg)),
op_funcdef_handle,
),
)
return_typedef_ref = arrow.suppress() + typedef_test
end_func_colon = return_typedef + colon.suppress() | colon
base_funcdef = op_funcdef | name_funcdef
funcdef = trace(addspace(keyword("def") + condense(base_funcdef + end_func_colon + nocolon_suite)))
name_match_funcdef = Forward()
op_match_funcdef = Forward()
op_match_funcdef_arg = Group(
Optional(
lparen.suppress()
+ Group(match + Optional(equals.suppress() + test))
+ rparen.suppress(),
),
)
name_match_funcdef_ref = keyword("def").suppress() + dotted_name + lparen.suppress() + match_args_list + match_guard + rparen.suppress()
op_match_funcdef_ref = keyword("def").suppress() + op_match_funcdef_arg + op_funcdef_name + op_match_funcdef_arg + match_guard
base_match_funcdef = trace(op_match_funcdef | name_match_funcdef)
def_match_funcdef = trace(
attach(
base_match_funcdef
+ end_func_colon
- (
attach(simple_stmt, make_suite_handle)
| (
newline.suppress()
- indent.suppress()
- Optional(docstring)
- attach(condense(OneOrMore(stmt)), make_suite_handle)
- dedent.suppress()
)
),
join_match_funcdef,
),
)
match_def_modifiers = trace(
Optional(
# we don't suppress addpattern so its presence can be detected later
match_kwd.suppress() + Optional(addpattern_kwd)
| addpattern_kwd + Optional(match_kwd.suppress()),
),
)
match_funcdef = addspace(match_def_modifiers + def_match_funcdef)
where_stmt = attach(
unsafe_simple_stmt_item
+ where_kwd.suppress()
- full_suite,
where_handle,
)
implicit_return = (
invalid_syntax(return_stmt, "expected expression but got return statement")
| attach(return_testlist, implicit_return_handle)
)
implicit_return_where = attach(
implicit_return
+ where_kwd.suppress()
- full_suite,
where_handle,
)
implicit_return_stmt = (
condense(implicit_return + newline)
| implicit_return_where
)
math_funcdef_body = condense(ZeroOrMore(~(implicit_return_stmt + dedent) + stmt) - implicit_return_stmt)
math_funcdef_suite = (
attach(implicit_return_stmt, make_suite_handle)
| condense(newline - indent - math_funcdef_body - dedent)
)
end_func_equals = return_typedef + equals.suppress() | fixto(equals, ":")
math_funcdef = trace(
attach(
condense(addspace(keyword("def") + base_funcdef) + end_func_equals) - math_funcdef_suite,
math_funcdef_handle,
),
)
math_match_funcdef = trace(
addspace(
match_def_modifiers
+ attach(
base_match_funcdef
+ end_func_equals
+ (
attach(implicit_return_stmt, make_suite_handle)
| (
newline.suppress() - indent.suppress()
+ Optional(docstring)
+ attach(math_funcdef_body, make_suite_handle)
+ dedent.suppress()
)
),
join_match_funcdef,
),
),
)
async_stmt = Forward()
async_stmt_ref = addspace(async_kwd + (with_stmt | for_stmt))
async_funcdef = async_kwd.suppress() + (funcdef | math_funcdef)
async_match_funcdef = trace(
addspace(
(
# we don't suppress addpattern so its presence can be detected later
match_kwd.suppress() + addpattern_kwd + async_kwd.suppress()
| addpattern_kwd + match_kwd.suppress() + async_kwd.suppress()
| match_kwd.suppress() + async_kwd.suppress() + Optional(addpattern_kwd)
| addpattern_kwd + async_kwd.suppress() + Optional(match_kwd.suppress())
| async_kwd.suppress() + match_def_modifiers
) + (def_match_funcdef | math_match_funcdef),
),
)
yield_normal_funcdef = keyword("yield").suppress() + funcdef
yield_match_funcdef = trace(
addspace(
(
# must match async_match_funcdef above with async_kwd -> keyword("yield")
match_kwd.suppress() + addpattern_kwd + keyword("yield").suppress()
| addpattern_kwd + match_kwd.suppress() + keyword("yield").suppress()
| match_kwd.suppress() + keyword("yield").suppress() + Optional(addpattern_kwd)
| addpattern_kwd + keyword("yield").suppress() + Optional(match_kwd.suppress())
| keyword("yield").suppress() + match_def_modifiers
) + def_match_funcdef,
),
)
yield_funcdef = attach(yield_normal_funcdef | yield_match_funcdef, yield_funcdef_handle)
datadef = Forward()
data_args = Group(
Optional(
lparen.suppress() + ZeroOrMore(
Group(
# everything here must end with arg_comma
(name + arg_comma.suppress())("name")
| (name + equals.suppress() + test + arg_comma.suppress())("default")
| (star.suppress() + name + arg_comma.suppress())("star")
| (name + colon.suppress() + typedef_test + equals.suppress() + test + arg_comma.suppress())("type default")
| (name + colon.suppress() + typedef_test + arg_comma.suppress())("type"),
),
) + rparen.suppress(),
),
) + Optional(keyword("from").suppress() + testlist)
data_suite = Group(
colon.suppress() - (
(newline.suppress() + indent.suppress() + Optional(docstring) + Group(OneOrMore(stmt)) - dedent.suppress())("complex")
| (newline.suppress() + indent.suppress() + docstring - dedent.suppress() | docstring)("docstring")
| simple_stmt("simple")
) | newline("empty"),
)
datadef_ref = data_kwd.suppress() + name + data_args + data_suite
match_datadef = Forward()
match_data_args = lparen.suppress() + Group(
match_args_list + match_guard,
) + rparen.suppress() + Optional(keyword("from").suppress() + testlist)
match_datadef_ref = Optional(match_kwd.suppress()) + data_kwd.suppress() + name + match_data_args + data_suite
simple_decorator = condense(dotted_name + Optional(function_call))("simple")
complex_decorator = namedexpr_test("complex")
decorators_ref = OneOrMore(at.suppress() - Group(longest(simple_decorator, complex_decorator)) - newline.suppress())
decorators = Forward()
decoratable_normal_funcdef_stmt = Forward()
normal_funcdef_stmt = (
funcdef
| math_funcdef
| math_match_funcdef
| match_funcdef
| yield_funcdef
)
decoratable_normal_funcdef_stmt_ref = Optional(decorators) + normal_funcdef_stmt
decoratable_async_funcdef_stmt = Forward()
async_funcdef_stmt = async_funcdef | async_match_funcdef
decoratable_async_funcdef_stmt_ref = Optional(decorators) + async_funcdef_stmt
decoratable_func_stmt = decoratable_normal_funcdef_stmt | decoratable_async_funcdef_stmt
class_stmt = classdef | datadef | match_datadef
decoratable_class_stmt = trace(condense(Optional(decorators) + class_stmt))
passthrough_stmt = condense(passthrough_block - (base_suite | newline))
simple_compound_stmt = trace(
if_stmt
| try_stmt
| match_stmt
| passthrough_stmt,
)
compound_stmt = trace(
decoratable_class_stmt
| decoratable_func_stmt
| for_stmt
| while_stmt
| with_stmt
| async_stmt
| simple_compound_stmt
| where_stmt,
)
endline_semicolon = Forward()
endline_semicolon_ref = semicolon.suppress() + newline
keyword_stmt = trace(
flow_stmt
| import_stmt
| assert_stmt
| pass_stmt
| del_stmt
| global_stmt
| nonlocal_stmt
| exec_stmt,
)
special_stmt = (
keyword_stmt
| augassign_stmt
| typed_assign_stmt
)
unsafe_simple_stmt_item <<= special_stmt | longest(basic_stmt, destructuring_stmt)
simple_stmt_item <<= (
special_stmt
| basic_stmt + end_simple_stmt_item
| destructuring_stmt + end_simple_stmt_item
)
simple_stmt <<= condense(
simple_stmt_item
+ ZeroOrMore(fixto(semicolon, "\n") + simple_stmt_item)
+ (newline | endline_semicolon),
)
stmt <<= final(
compound_stmt
| simple_stmt
# must come at end due to ambiguity with destructuring
| case_stmt,
)
base_suite <<= condense(newline + indent - OneOrMore(stmt) - dedent)
simple_suite = attach(stmt, make_suite_handle)
nocolon_suite <<= base_suite | simple_suite
suite <<= condense(colon + nocolon_suite)
line = trace(newline | stmt)
single_input = trace(condense(Optional(line) - ZeroOrMore(newline)))
file_input = trace(condense(moduledoc_marker - ZeroOrMore(line)))
eval_input = trace(condense(testlist - ZeroOrMore(newline)))
single_parser = start_marker - single_input - end_marker
file_parser = start_marker - file_input - end_marker
eval_parser = start_marker - eval_input - end_marker
# end: MAIN GRAMMAR
# -----------------------------------------------------------------------------------------------------------------------
# EXTRA GRAMMAR:
# -----------------------------------------------------------------------------------------------------------------------
just_non_none_atom = start_marker + ~keyword("None") + known_atom + end_marker
parens = originalTextFor(nestedExpr("(", ")"))
brackets = originalTextFor(nestedExpr("[", "]"))
braces = originalTextFor(nestedExpr("{", "}"))
any_char = regex_item(r".", re.DOTALL)
original_function_call_tokens = lparen.suppress() + (
rparen.suppress()
# we need to add parens here, since f(x for x in y) is fine but tail_call(f, x for x in y) is not
| attach(originalTextFor(test + comp_for), add_paren_handle) + rparen.suppress()
| originalTextFor(tokenlist(call_item, comma)) + rparen.suppress()
)
def get_tre_return_grammar(self, func_name):
return (
self.start_marker
+ keyword("return").suppress()
+ maybeparens(
self.lparen,
keyword(func_name, explicit_prefix=False).suppress()
+ self.original_function_call_tokens,
self.rparen,
) + self.end_marker
)
tco_return = attach(
start_marker
+ keyword("return").suppress()
+ maybeparens(
lparen,
disallow_keywords(untcoable_funcs, with_suffix=lparen)
+ condense(
(base_name | parens | brackets | braces | string)
+ ZeroOrMore(
dot + base_name
| brackets
# don't match the last set of parentheses
| parens + ~end_marker + ~rparen,
),
)
+ original_function_call_tokens,
rparen,
) + end_marker,
tco_return_handle,
# this is the root in what it's used for, so might as well evaluate greedily
greedy=True,
)
rest_of_arg = ZeroOrMore(parens | brackets | braces | ~comma + ~rparen + any_char)
tfpdef_tokens = base_name - Optional(originalTextFor(colon - rest_of_arg))
tfpdef_default_tokens = base_name - Optional(originalTextFor((equals | colon) - rest_of_arg))
parameters_tokens = Group(
Optional(
tokenlist(
Group(
dubstar - tfpdef_tokens
| star - Optional(tfpdef_tokens)
| slash
| tfpdef_default_tokens,
) + Optional(passthrough.suppress()),
comma + Optional(passthrough), # implicitly suppressed
),
),
)
dotted_base_name = condense(base_name + ZeroOrMore(dot + base_name))
split_func = (
start_marker
- keyword("def").suppress()
- dotted_base_name
- lparen.suppress() - parameters_tokens - rparen.suppress()
)
stores_scope = (
lambda_kwd
# match comprehensions but not for loops
| ~indent + ~dedent + any_char + keyword("for") + base_name + keyword("in")
)
just_a_string = start_marker + string + end_marker
| |
import glob
from subprocess import Popen, PIPE, STDOUT
import os
from backend.smc2py import parseEngineStdout
import math
import copy
# validate data and proxy to real functions
def execute(data):
models = list_models()['models']
if 'type' not in data:
return {'error': 'No type recieved'}
if data['type'] not in ['log', 'log+rssi']:
if 'model' not in data:
return {'error': 'No Model received'}
if data['model'] not in models:
return {'error': 'Model not available: ' + data['model'] + " use one of " + str(models)}
if data['type'] == 'static':
if 'topology' in data:
if data['topology'] == 'grid':
if 'number_of_nodes' in data and 'node_init_time' in data and 'duration' in data:
nn = 0
it = 0
dur = 0
try:
nn = int(data['number_of_nodes'])
except Exception:
{'error': "number_of_nodes is not a number"}
try:
it = int(data['node_init_time'])
except Exception:
{'error': "node_init_time is not a number"}
try:
dur = int(data['duration'])
except Exception:
{'error': "duration is not a number"}
return run_static_grid(data['model'], nn, it, dur)
else:
return {'error': 'Missing arguments for simulation'}
else:
return {'error': 'Unknown topology'}
else:
return {'error': "No topology received"}
error, parsed, edges = None, None, None
if data['type'] in ['gps', 'log', 'log+rssi']:
if 'gps_data' not in data:
return {'error': "No GPS-log"}
if data['type'] == 'log+rssi':
error, parsed, edges = parse_gps(data['gps_data'], with_rssi=True)
else:
error, parsed, edges = parse_gps(data['gps_data'])
if error is not None:
return error
if data['type'] in ['log', 'log+rssi']:
return run_log(0, -1, parsed, edges)
if data['type'] == 'gps':
fdur = 0
tdur = -1
if 'from_duration' in data and len(data['from_duration'].strip()) > 0:
try:
fdur = int(data['from_duration'])
except Exception:
return {'error': "from_duration is not a number"}
if 'to_duration' in data and len(data['to_duration'].strip()) > 0:
try:
tdur = int(data['to_duration'])
except Exception:
return {'error': "to_duration is not a number"}
return run_gps(fdur, tdur, parsed)
return {'error': "Unknown type or topology"}
def parse_gps(data, with_rssi: bool = False):
raw = data.splitlines()
parsed = []
edges = {} if with_rssi else None
for i in range(len(raw)):
if '#' in raw[i] or not raw[i].strip():
continue
entry = raw[i].split(",")
if entry[-1] == "":
del entry[-1]
if len(entry) < 4:
return file_error(i, 'less than four entries'), None, None
id = 0
lat = 0
lng = 0
ts = 0
try:
id = int(entry[0])
except Exception:
return file_error(i, 'entry 0 is not an id'), None, None
if id < 0:
return file_error(i, 'entry 0 is not an id'), None, None
try:
lat = float(entry[1])
except Exception:
return file_error(i, 'entry 0 is not a latitude'), None, None
if lat < -90 or lat > 90:
return file_error(i, 'entry 0 is not a latitude'), None, None
try:
lng = float(entry[2])
except Exception:
return file_error(i, 'entry 0 is not a latitude'), None, None
if lng < -180 or lng > 180:
return file_error(i, 'entry 0 is not a latitude'), None, None
try:
ts = float(entry[3])
except Exception:
return file_error(i, 'entry 3 is not a timestamp'), None, None
if ts < 0:
return file_error(i, 'entry 3 is not a timestamp'), None, None
# if log contains rssi values
if with_rssi:
for j in range(5, len(entry), 2):
if id not in edges:
edges[id] = {}
if ts not in edges[id]:
edges[id][ts] = {}
edges[id][ts][int(entry[j - 1])] = entry[j]
parsed.append((id, lat, lng, ts))
return None, parsed, edges
def file_error(line, message):
return {'error': 'Line ' + str(line) + ' - ' + message}
model_folder = "backend/models/"
def list_models():
res = []
postfix = ".xml"
for file in glob.glob(model_folder + "/*" + postfix):
model = str(file)
res.append(model[len(model_folder): - len(postfix)])
return {"models": res}
def get_id(data, first):
last = first
while data[last] != ']':
last += 1
return (data[first: last], last)
def run_static_grid(model, num_nodes, init_time, duration):
if num_nodes <= 0:
return {'error': 'Expected at least one node'}
if num_nodes >= 10000:
return {'error': 'Expected less than 10000 nodes'}
if init_time < 0:
return {'error': 'Expected at least some init time'}
if init_time >= duration:
return {'error': 'Expected duration to be larger than init_time'}
path = model_folder + "/" + model + ".xml"
if not os.path.isfile(path):
return {'error': 'Could not find ' + str(path)}
p = Popen(['verifyta', "-W", "-s", path], stdout=PIPE, stdin=PIPE, stderr=PIPE, universal_newlines=True)
lines = str(duration) + " " + str(num_nodes) + " "
for n in range(num_nodes):
lines += str(0) + " "
lines += str(init_time) + " "
lines += str(duration) + " "
(stdout, stderr) = p.communicate(input=lines)
data = parseEngineStdout(stdout)
minlat = 57.013219
minlng = 9.991016
maxlat = 57.017997
maxlng = 10.001937
nodes = {}
square = int(math.sqrt(num_nodes))
dlat = (maxlat - minlat) / square
dlon = (maxlng - minlng) / square
maxlat = minlat
maxlng = minlng
for i in range(num_nodes):
lat = minlat + int(i / square) * dlat
lng = minlng + int(i % square) * dlon
maxlat = max(maxlat, lat)
maxlng = max(maxlng, lng)
fields = data[0].variables()
no = 0
edges = {}
for field in fields:
raw = data[0].raw(no)
if field[7] == 'N': # OUTPUT_NODES[
(id, last) = get_id(field, 12)
lat = minlat + int(int(id) / square) * dlat
lng = minlng + int(int(id) % square) * dlon
maxlat = max(maxlat, lat)
maxlng = max(maxlng, lng)
field = field[last + 2:]
if id not in nodes:
nodes[id] = []
num = 0
lastval = 0
for (ts, val) in raw:
while num < len(nodes[id]) and nodes[id][num]['timestamp'] < ts:
if num is not 0:
nodes[id][num][field] = lastval
num += 1
lastval = val
if num == len(nodes[id]) and num == 0:
nodes[id].append({'lat': lat, 'lng': lng, 'timestamp': ts, field: val})
elif num < len(nodes[id]) and nodes[id][num]['timestamp'] == ts:
nodes[id][num][field] = val
else:
nodes[id].insert(num, copy.deepcopy(nodes[id][num - 1]))
nodes[id][num][field] = val
nodes[id][num]['timestamp'] = ts
elif field[7] == 'E': # OUTPUT_EDGE[
(id, last) = get_id(field, 12)
(oid, last) = get_id(field, last + 2)
if id == oid:
no += 1
continue
field = field[last + 2:]
if id not in edges:
edges[id] = {}
if oid not in edges[id]:
edges[id][oid] = []
num = 0
lastval = 0
for (ts, val) in raw:
while num < len(edges[id][oid]) and edges[id][oid][num]['timestamp'] < ts:
if num is not 0:
edges[id][oid][num][field] = lastval
num += 1
lastval = val
if num == len(edges[id][oid]) and num == 0:
edges[id][oid].append({'timestamp': ts, field: val, 'dest': int(oid)})
elif num < len(edges[id][oid]) and edges[id][oid][num]['timestamp'] == ts:
edges[id][oid][num][field] = val
else:
edges[id][oid].insert(num, copy.deepcopy(edges[id][oid][num - 1]))
edges[id][oid][num][field] = val
edges[id][oid][num]['timestamp'] = ts
no += 1
for n in nodes:
if nodes[n][-1]['timestamp'] != duration:
nodes[n].append(copy.deepcopy(nodes[n][-1]))
nodes[n][-1]['timestamp'] = duration
for n in edges:
for n2 in edges[n]:
if edges[n][n2][-1]['timestamp'] != duration:
edges[n][n2].append(copy.deepcopy(edges[n][n2][-1]))
edges[n][n2][-1]['timestamp'] = duration
return {'nodes': nodes, 'edges': edges, 'min_lat': minlat, 'max_lat': maxlat, 'min_lng': minlng, 'max_lng': maxlng,
'first_time': 0, 'last_time': duration}
def run_gps(fdur, tdur, data):
if tdur is not -1 and tdur <= fdur:
return {'error': 'From duration should be smaller than to duration (' + str(fdur) + ", " + str(tdur) + ")"}
return {'error', 'Not yet implemented'}
def run_log(fdur, tdur, data, log_edges):
if tdur is not -1 and tdur <= fdur:
return {'error': 'From duration should be smaller than to duration (' + str(fdur) + ", " + str(tdur) + ")"}
nodes = {}
edges = {}
minlat = math.inf
maxlat = -math.inf
minlng = math.inf
maxlng = -math.inf
mints = math.inf
maxts = -math.inf
for row in data:
nd = {'lat': row[1], 'lng': row[2], 'timestamp': row[3]}
if row[3] < fdur or (tdur is not -1 and row[3] > tdur):
continue
if row[0] not in nodes:
nodes[row[0]] = [nd]
else:
nodes[row[0]].append(nd)
minlat = min(minlat, row[1])
maxlat = max(maxlat, row[1])
minlng = min(minlng, row[2])
maxlng = max(maxlng, row[2])
mints = min(mints, row[3])
maxts = max(maxts, row[3])
if len(nodes) == 0:
return {'error': 'No nodes within the duration (or file is empty or could not be parsed)'}
for key in nodes:
nodes[key].sort(key=lambda el: el['timestamp'])
if log_edges is None:
for id in nodes:
for step in nodes[id]:
for other in nodes:
if (id == other
or nodes[other][0]['timestamp'] > step['timestamp']
or nodes[other][-1]['timestamp'] < step['timestamp']):
continue
nd = {'timestamp': step['timestamp'], 'dest': other}
if id not in | |
from!")
elif (sizeOfSets == 2): # if we have only 2 sets for intersection
methods = self.loadAnnotationFiles(inputDir, inputFiles)
colors = []
methodNames = []
for method in methods.keys():
colors.append(self.methodColors[method])
if len(method) > 20:
method = method.replace("_", "_\n")
methodNames.append(method)
venn2(methods.values(), set_labels = methodNames, set_colors = colors, alpha = 1.0)
matplots.pyplot.savefig(self.output + "overlaps" + fileSuffix + ".pdf")
matplots.pyplot.clf()
elif (sizeOfSets == 3): # if we have only 3 sets for intersection
methods = self.loadAnnotationFiles(inputDir, inputFiles)
colors = []
methodNames = []
for method in methods.keys():
colors.append(self.methodColors[method])
if len(method) > 20:
method = method.replace("_", "_\n")
methodNames.append(method)
venn3(methods.values(), set_labels = methodNames, set_colors = colors, alpha = 1.0)
matplots.pyplot.savefig(self.output + "overlaps" + fileSuffix + ".pdf")
matplots.pyplot.clf()
else: # for more sets use UpSets package
params = [self.output + "overlaps" + fileSuffix + ".pdf", self.evalConfig["topKmax"], inputDir]
colors = ""
filenames = []
for file in inputFiles:
method = "_".join(file.split("_")[1:-1]) # get method name from filename without ending (format: top5_APPROACH_annotation.txt)
colors += "_" + self.methodColors[method]
filenames.append(file)
params.append(colors)
params.extend(filenames)
benchutils.runRCommand(self.rConfig, "UpsetDiagramCreation.R", params)
def evaluate(self):
"""Runs the annotation/enrichment evaluation on the rankings.
Depending on what was specified in the config file, annotate and/or enrich feature rankings and compute overlaps or percentages.
Overlaps then can show a) if feature rankings represent the same underlying processes via annotation (maybe although having selected different features), or b) if the underlying processes are equally strongly represented by checking the enrichment (maybe altough having seleced different features).
"""
geneLists = self.loadRankings(self.input, int(self.evalConfig["topKmax"]), False)
outputPath = self.output + "top" + self.evalConfig["topKmax"] + "_"
enrichr = knowledgebases.Enrichr()
#if there is any measure mentioned related to annotation
if "annotation" in self.metrics:
# for every top k gene ranking, do enrichment analysis
for approach, geneList in geneLists.items():
outputFile = outputPath + approach
enrichr.annotateGenes(geneList, outputFile)
if "annotation_overlap" in self.metrics:
# compute overlap of annotated genes
self.computeOverlap(self.output, "_annotatedGenes")
if "annotation_percentage" in self.metrics:
self.countAnnotationPercentages(geneLists, outputPath)
if "enrichment_overlap" in self.metrics:
# for every top k gene ranking, do enrichment analysis
for approach, geneList in geneLists.items():
if (len(geneList) > 0):
outputFile = outputPath + approach
enrichr.enrichGeneset(geneList, outputFile)
# compute overlap of annotated terms
self.computeOverlap(self.output, "_enrichedTerms")
####### Creates density plots for input data set (if available, also for crossvalidation data set) #######
class DatasetEvaluator(Evaluator):
"""Creates plots regarding data set quality, currently: MDS, density, and box plots.
Wrapper class because the actual evaluation and plot creation is done in an R script.
:param input: absolute path to the directory where the input data set is located (for which to create the plots).
:type input: str
:param output: absolute path to the directory to which to save plots.
:type output: str
:param separator: separator character in data set to read it correctly.
:type separator: str
:param options: what plots to create, a list of method names that must be specified in the config file.
:type options: list of str
:param javaConfig: configuration parameters for java code (as specified in the config file).
:type javaConfig: str
:param rConfig: configuration parameters for R code (as specified in the config file).
:type rConfig: str
:param evalConfig: configuration parameters for evaluation, e.g. how many features to select (as specified in the config file).
:type evalConfig: str
:param classificationConfig: configuration parameters for classification, e.g. which classifiers to use (as specified in the config file).
:type classificationConfig: str
"""
def __init__(self, input, output, separator, options):
#options must be a list of strings
self.options = options
self.separator = separator
super().__init__(input, output, None)
def evaluate(self):
"""Triggers the actual evaluation/plot generation in R.
If a second data set for cross-validation was provided, also run the corresponding R script on that data set.
"""
benchutils.logInfo("######################## EVALUATE INPUT DATA... ########################")
params = [self.input, self.output, self.separator, "TRUE"]
params.extend(self.options)
benchutils.runRCommand(benchutils.getConfig("R"), "DataCharacteristicsPlotting.R", params)
if (benchutils.getConfigBoolean("Evaluation", "enableCrossEvaluation")):
crossValidationFile = benchutils.getConfigValue("Evaluation", "crossEvaluationData")
params = [crossValidationFile, self.output, self.separator, "TRUE"]
params.extend(self.options)
benchutils.runRCommand(benchutils.getConfig("R"), "DataCharacteristicsPlotting.R", params)
benchutils.logInfo("######################## ... FINISHED ########################")
class KnowledgeBaseEvaluator(Evaluator):
"""Creates plots to evaluate knowledge base coverage.
Queries the knowledge bases with the given search terms and checks how many genes or pathways are found.
:param output: absolute path to the directory to which to save plots.
:type output: str
:param knowledgebases: a list of knowledgebases to test.
:type knowledgebases: list of str
:param searchterms: list of search terms for which to check knowledge base coverage.
:type searchterms: list of str
:param javaConfig: configuration parameters for java code (as specified in the config file).
:type javaConfig: str
:param rConfig: configuration parameters for R code (as specified in the config file).
:type rConfig: str
:param evalConfig: configuration parameters for evaluation, e.g. how many features to select (as specified in the config file).
:type evalConfig: str
:param classificationConfig: configuration parameters for classification, e.g. which classifiers to use (as specified in the config file).
:type classificationConfig: str
"""
def __init__(self, output, knowledgebases, searchterms):
self.knowledgebases = self.createKnowledgeBases(knowledgebases)
self.searchterms = searchterms
super().__init__(None, output, None)
def drawCombinedPlot(self, stats, colIndex, filename, title, ylabel1, ylabel2, colors):
fig, ax = plt.subplots(figsize=(8, 6))
ax2 = ax.twinx()
df_gb = stats.groupby('search term').count()
# create box plot
pl = stats.boxplot(ax=ax, by=stats.columns[0], rot=90, column=stats.columns[colIndex], patch_artist=True,
return_type="both",
boxprops=dict(color="k"), medianprops=dict(color="k"), whiskerprops=dict(color="k"),
capprops=dict(color="k"), flierprops=dict(color="k", markeredgecolor="k")) # fill with color
bplot = pl.iloc[0]
for patch, color in zip(bplot[1]['boxes'], colors):
patch.set_facecolor(color)
bplot[0].set_ylabel(ylabel1)
bplot[0].set_xlabel("")
bplot[0].set_title(title)
bplot[0].figure.texts = []
ind = np.arange(1, len(colors) + 1)
bplot[0].set_xticks(ind)
bplot[0].set_xticklabels(bplot[0].get_xticklabels())
plt.gca().autoscale()
# create bar plot
ax2.bar(range(1, len(df_gb.iloc[:,(colIndex-1)]) +1 ), height=df_gb.iloc[:,(colIndex-1)], align='center', alpha=0.3,
color=colors)
ax2.set_ylabel(ylabel2)
matplots.pyplot.savefig(self.output + filename, bbox_inches="tight")
matplots.pyplot.clf()
def createKnowledgeBases(self, knowledgebaseList):
"""Creates knowledge base objects from a given list.
:param knowledgeBaseList: List of knowledge base names to create.
:type knowledgeBaseList: :class:`List` of str.
:return: List of knowledge base objects
:rtype: :class:`List` of :class:`KnowledgeBase` or inheriting classes
"""
kbfactory = knowledgebases.KnowledgeBaseFactory()
kbs = []
for kb in knowledgebaseList:
kbs.append(kbfactory.createKnowledgeBase(kb))
return kbs
def checkCoverage(self, kb, colors, useIDs):
"""Checks the coverage for a given knowledge base and creates corresponding plots.
:param kb: knowledge base object for which to check coverage.
:type kb: :class:`knowledgebases.KnowledgeBase` or inheriting class
:param colors: List of colors to use for plots.
:type colors: :class:`List` of str
"""
stats = pd.DataFrame()
for term in self.searchterms:
# query knowledge base
geneSet = kb.getGeneScores([term])
if useIDs:
geneSet.insert(0, "search term", [int(self.searchterms.index(term) + 1)] * len(geneSet.index), True)
else:
geneSet.insert(0, "search term", [term] * len(geneSet.index), True)
if stats.empty:
stats = geneSet
else:
stats = stats.append(geneSet)
stats.columns = ["search term", "gene", "score"]
# write to file
stats.to_csv(self.output + kb.getName() + "_GeneStats.csv", index=False)
combinedplotfile = kb.getName() + "_GeneCoverage.pdf"
#df_statscounts = stats["search term"].value_counts()
if not stats.empty:
self.drawCombinedPlot(stats, 2, combinedplotfile, kb.getName(),
"gene association scores", "number of genes per search term", colors)
else:
benchutils.logWarning("WARNING: No results for search terms, so no plots are generated.")
def checkPathwayCoverage(self, kb, colors, useIDs):
"""Checks the pathway coverage for a given knowledge base and creates corresponding plots.
:param kb: knowledge base object for which to check pathway coverage.
:type kb: :class:`knowledgebases.KnowledgeBase` or inheriting class
:param colors: List of colors to use for plots.
:type colors: :class:`List` of str
"""
stats = []
for term in self.searchterms:
#query knowledge base
pathways = kb.getRelevantPathways([term])
for pathwayName, value in pathways.items():
numGenes = value.vcount
if useIDs:
stats.append((int(self.searchterms.index(term) + 1), pathwayName, numGenes))
else:
stats.append((term, pathwayName, numGenes))
#make dataframe from list
df_stats = pd.DataFrame(stats, columns = ["search term", "pathway", "#genes"])
#write to file
df_stats.to_csv(self.output + kb.getName() + "_PathwayStats.csv", index = False)
combinedplotfile = kb.getName() + "_PathwayCoverage.pdf"
if not df_stats.empty:
self.drawCombinedPlot(df_stats, 2, combinedplotfile, kb.getName(),
"pathway sizes (#genes)", "number of pathways", colors)
else:
benchutils.logWarning("WARNING: No results for search terms, so no plots generated.")
def evaluate(self):
"""Evaluates every given knowledge base and checks how many genes and pathways (and how large they are) are in there for the given search terms.
Creates corresponding plots.
"""
benchutils.logInfo("######################## EVALUATE KNOWLEDGE BASES... ########################")
#set colors for every search term
colors = []
#check if the individual length of a search term is longer than 15
#map them to IDs then instead to avoid too long axis labels to be plotted
maxLength = len(max(self.searchterms, key=len))
useIDs = False
| |
from .models import Lap, QCAR, Accumulator
from .forms import dataForm, quarterCarForm, accumulatorForm
from flask import Blueprint,render_template, redirect, url_for, request, flash, send_file
from flask_login import LoginManager,login_user,current_user,logout_user, login_required
import datetime
from . import db
from werkzeug.utils import secure_filename
from werkzeug.security import generate_password_hash, check_password_hash
import os
import sys
from plotmass import PlotMassSimulation
from roadload import Roadload
from pypresence import Presence
from github import Github
from dotenv import load_dotenv, find_dotenv
from scipy.io import savemat
import pickle
# globals
window_w = window_h = 0
mat_upload_number = 0
# discord rich presence
rpc_activated = False
try:
client_id = '708329075546128437'
RPC = Presence(client_id)
RPC.connect()
rpc_activated = True
except:
rpc_activated = False
# github
load_dotenv(find_dotenv())
g = Github(os.getenv("GITHUB"))
# Initalise blueprint
bp = Blueprint('main', __name__)
# Helper functions
def check_upload_file(form):
global mat_upload_number
# get file data from form
fp = form.mat.data
# get the current path of the module file... store file relative to this path
BASE_PATH= os.path.dirname(__file__)
mat_upload_number += 1
mat_file_name = 'track_' + str(mat_upload_number) + '.mat'
# uploadfilelocation – directory of this file/static/image
upload_path= os.path.join(BASE_PATH, 'static/mat', secure_filename(mat_file_name))
# store relative path in DB as image location in HTML is relative
db_upload_path= secure_filename(mat_file_name)
# save the file and return the dbupload path
fp.save(upload_path)
return db_upload_path
def fetch_mat_file(mat_name):
BASE_PATH = os.path.dirname(__file__)
matfile = os.path.join(BASE_PATH, 'static/mat', mat_name)
return matfile
def base10_round(x, base=5):
return base * round(x/base)
# Generate values from 150% car weight to 50% car weight
def stepped_values(max_value, step=5):
values = []
i = base10_round(max_value * 1.5, step)
while i > max_value / 2:
values.append(i)
i -= step
return values
# Home page
@bp.route('/')
def home():
if rpc_activated:
RPC.update(state="Our History", details="Browsing", large_image="qut-logo")
try:
repo = g.get_repo("QUT-Motorsport/QUTMS_VehicleSim")
open_issues = repo.get_issues(state='open')
labels = repo.get_topics()
commits = repo.get_commits()
latest_commits = []
for commit in commits[:6]:
latest_commit = {}
latest_commit["title"] = commit.commit.message
latest_commit["url"] = commit.commit.html_url
latest_commit["author"] = commit.commit.author.name
latest_commit["date"] = commit.commit.author.date
latest_commits.append(latest_commit)
return render_template('home.html', issues=open_issues, labels=labels, commits=latest_commits)
except:
print("Github API Call was unsuccessful")
return render_template('home.html')
# Telemetry Page
@bp.route('/telemetry')
def live_telemetry():
if current_user.is_anonymous:
flash('Error: User must sign-in to access feature')
return redirect('/login')
dataform = dataForm()
title = 'QUTMS | Live Telemetry'
if rpc_activated:
RPC.update(state="Telemetry", details="Analyzing...", large_image="qut-logo")
return render_template('live_telemetry.html', title=title, dataform = dataform)
# Upload Lap Page
@bp.route('/upload/lap')
def upload():
if current_user.is_anonymous:
flash('Error: User must sign-in to access feature')
return redirect('/login')
dataform = dataForm()
title = 'QUTMS | Upload - Lap'
if rpc_activated:
RPC.update(state="Simulation", details="Uploading Lap Time", large_image="qut-logo")
return render_template('upload.html', title=title, dataform = dataform)
# Analyse table for Plot Mass
@bp.route('/analysis/lap')
def analysis_lap():
if current_user.is_anonymous:
flash('Error: User must sign-in to access feature')
return redirect('/login')
data = Lap.query.order_by(Lap.id.desc()).all()
title = 'QUTMS | Analysis'
if rpc_activated:
RPC.update(state="Point Mass Lap Simulations", details="Analyzing...", large_image="qut-logo")
return render_template('analysis_lap.html', title=title, data=data)
# Analyse table for Quarter Car
@bp.route('/analysis/qcar')
def analysis_qcar():
if current_user.is_anonymous:
flash('Error: User must sign-in to access feature')
return redirect('/login')
data = QCAR.query.order_by(QCAR.id.desc()).all()
title = 'QUTMS | Analysis'
if rpc_activated:
RPC.update(state="Quarter Car", details="Analyzing...", large_image="qut-logo")
return render_template('analysis_qcar.html', title=title, data=data)
# Standard Output for QCAR Model
@bp.route('/qcar/<id>', defaults={'width': None, 'height': None})
@bp.route('/qcar/<id>/<width>/<height>')
def qcar(id, width=None, height=None):
# Fetch Browser Height and Width
if not width or not height:
return """
<script>
(() => window.location.href = window.location.href +
['', window.innerWidth, window.innerHeight].join('/'))()
</script>
"""
# Fetch QCAR instance by ID
id = QCAR.query.filter_by(id=id).first()
qcar_m_s = id.sprungmass # Sprung Mass (kg)
qcar_m_u = id.unsprungmass # Unsprung Mass (kg)
qcar_s_l = id.linearspring # Linear Spring Rate? (N/m)
qcar_s_nl = id.nonlinearspring # Non-Linear Spring? (?)
qcar_d_c = id.damperscompression # Dampering Ratio Comp? (ratio)
qcar_d_r = id.dampersrebound # Dampers Rebound? (?)
qcar_t_l = id.tireslinear # Tires Linear? (?)
qcar_t_nl = id.tiresnonlinear # Tires Non-Linear? (?)
qcar_t_L = id.tireslift # Tires Lift? (?)
qcar_b_l = id.bumplinear # Bump Linear? (?)
qcar_b_nl = id.bumpnonlinear # Bump Non-Linear? (?)
qcar_b_h = id.bumphysteresis # Bump Hysteresis? (?)
qcar_primitive = primitives(qcar_m_s, qcar_s_l, qcar_d_c)
headings = ["Sprung Mass Natural Frequency (Hz)",
"Sprung Mass Damped Frequency (Hz)",
"Unsprung Mass Natural Frequency",
"Unsprung Mass Damped Frequency",
"Eigen Values and Eigen Vectors of the Quarter Car"]
values = [qcar_primitive.get_sprung_mass_natural_frequency(),
qcar_primitive.get_sprung_mass_damped_frequency(),
qcar_primitive.get_unsprung_mass_natural_frequency(),
qcar_primitive.get_unsprung_mass_damped_frequency(),
qcar_primitive.get_eigen_values()]
data = load_template(headings, values)
title = 'QUTMS | QCAR'
return render_template('qcar_output.html',title=title,id=id,name=id.name,output_html=data)
# Analyse table for Editing entries in DB
@bp.route('/edit')
def edit():
if current_user.is_anonymous:
flash('Error: User must sign-in to access feature')
return redirect('/login')
data = Lap.query.order_by(Lap.id.desc()).all()
qcar = QCAR.query.order_by(QCAR.id.desc()).all()
title = 'QUTMS | Edit'
if rpc_activated:
RPC.update(state="Vehicle Simulations", details="Editing", large_image="qut-logo")
return render_template('edit.html', title=title, data=data,qcar=qcar)
# View Help for VD Symbols
@bp.route('/help')
def help():
if current_user.is_anonymous:
flash('Error: User must sign-in to access feature')
return redirect('/login')
title = 'QUTMS | Help'
if rpc_activated:
RPC.update(state="Vehicle Dynamics", details="Studying", large_image="qut-logo")
return render_template('help.html', title=title)
# Upload parameters for Quarter Car
@bp.route('/upload/qcar-upload')
def qcar_upload():
if current_user.is_anonymous:
flash('Error: User must sign-in to access feature')
return redirect('/login')
dataform = quarterCarForm()
title = 'QUTMS | QCar'
if rpc_activated:
RPC.update(state="Quarter Car", details="Uploading", large_image="qut-logo")
return render_template('qcar_upload.html', title=title, dataform=dataform)
# Standard Graph for Plot Mass
@bp.route('/graph/<id>', defaults={'width': None, 'height': None})
@bp.route('/graph/<id>/<width>/<height>')
def graph(id, width=None, height=None):
if current_user.is_anonymous:
flash('Error: User must sign-in to access feature')
return redirect('/login')
if not width or not height:
return """
<script>
(() => window.location.href = window.location.href +
['', window.innerWidth, window.innerHeight].join('/'))()
</script>
"""
# Set tab title
title = 'QUTMS | Graph'
# Fetch lap instance from DB
id = Lap.query.filter_by(id=id).first()
# Fetch mat file for simulation input
matfile = fetch_mat_file(id.mat)
# Initialise Simulation
simulation = PlotMassSimulation(matfile, id.curvature, int(width), int(height), id.mass, id.power, id.air_density, id.reference_area, id.coefficient_of_drag, id.coefficient_of_friction, id.coefficient_of_lift)
# Pickle graph & stats for download
simulation.pickle(simulation.plot(), "graph_all.p")
# Update discord rich presence
if rpc_activated:
RPC.update(state= str(int(id.mass)) + 'kg @ ' + str(int(id.power)) + 'W - ' + str(simulation.get_fastest_lap()), details=str(id.name) + ' - View Plots', large_image="qut-logo")
return render_template('graph.html', graph_html=simulation.plot_html(), title=title, name=id.name, fastest_lap=simulation.get_fastest_lap()[2:], id=id)
# GG Only Diagram for Plot Mass
@bp.route('/gg/<id>', defaults={'width': None, 'height': None})
@bp.route('/gg/<id>/<width>/<height>')
def gg_diagram(id, width=None, height=None):
if current_user.is_anonymous:
flash('Error: User must sign-in to access feature')
return redirect('/login')
if not width or not height:
return """
<script>
(() => window.location.href = window.location.href +
['', window.innerWidth, window.innerHeight].join('/'))()
</script>
"""
# Set tab title
title = 'QUTMS | GG Diagram'
# Fetch lap instance from DB
id = Lap.query.filter_by(id=id).first()
# Fetch mat file for simulation input
matfile = fetch_mat_file(id.mat)
# Initialise Simulation
simulation = PlotMassSimulation(matfile, id.curvature, int(width), int(height), id.mass, id.power, id.air_density, id.reference_area, id.coefficient_of_drag, id.coefficient_of_friction, id.coefficient_of_lift)
# Pickle graph & stats for download
simulation.pickle(simulation.plot_gg(), "graph_gg.p")
# Update discord rich presence
if rpc_activated:
RPC.update(state= str(int(id.mass)) + 'kg @ ' + str(int(id.power)) + 'W - ' + str(simulation.get_fastest_lap()), details=str(id.name), large_image="qut-logo")
return render_template('gg_diagram.html', id=id, graph_html=simulation.plot_gg_html(), title=title, name=id.name, fastest_lap=simulation.get_fastest_lap()[2:])
# Standard Graph for Speed v Curvature
@bp.route('/speedcurvature/<id>', defaults={'width': None, 'height': None})
@bp.route('/speedcurvature/<id>/<width>/<height>')
def speedcurvature(id, width=None, height=None):
if current_user.is_anonymous:
flash('Error: User must sign-in to access feature')
return redirect('/login')
if not width or not height:
return """
<script>
(() => window.location.href = window.location.href +
['', window.innerWidth, window.innerHeight].join('/'))()
</script>
"""
# Set tab title
title = 'QUTMS | Speed v Curvature'
# Fetch lap instance from DB
id = Lap.query.filter_by(id=id).first()
# Fetch mat file for simulation input
matfile = fetch_mat_file(id.mat)
# Initialise Simulation
simulation = PlotMassSimulation(matfile, id.curvature, int(width), int(height), id.mass, id.power, id.air_density, id.reference_area, id.coefficient_of_drag, id.coefficient_of_friction, id.coefficient_of_lift)
# Pickle graph & stats for download
simulation.pickle(simulation.plot_speed_curvature(), "graph_curvature.p")
# Update discord rich presence
if rpc_activated:
RPC.update(state= str(int(id.mass)) + 'kg @ ' + str(int(id.power)) + 'W - ' + str(simulation.get_fastest_lap()), details=str(id.name) + ' - Speed v Curvature', large_image="qut-logo")
return render_template('speedcurvature.html', graph_html=simulation.plot_speed_curvature_html(), title=title, name=id.name, fastest_lap=simulation.get_fastest_lap()[2:], id=id)
# Standard Input page for Roadload Modelling
@bp.route('/accumulator/<id>', defaults={'width': None, 'height': None})
@bp.route('/accumulator/<id>/<width>/<height>')
def accumulator(id, width=None, height=None):
if current_user.is_anonymous:
flash('Error: User must sign-in to access feature')
return redirect('/login')
if not width or not height:
return """
<script>
(() => window.location.href = window.location.href +
['', window.innerWidth, window.innerHeight].join('/'))()
</script>
"""
# Fetch lap simulation inputs from database
id = Lap.query.filter_by(id=id).first()
# Update page title
title = 'QUTMS | Accumulator'
# Fetch pre-existing roadload inputs in database
roadload_inputs = Accumulator.query.order_by(Accumulator.id.desc()).all()
# Update discord rich presence
if rpc_activated:
RPC.update(state= "A Chunky Accumulator", details='Modelling', large_image="qut-logo")
return render_template('accumulator.html', name=id.name, id=id, title=title, car_mass=int(id.mass), data=roadload_inputs, dataform=accumulatorForm())
# Graph roadload model
@bp.route('/roadload/<roadload_id>/<lap_id>', defaults={'width': None, 'height': None})
@bp.route('/roadload/<roadload_id>/<lap_id>/<width>/<height>')
def roadload(roadload_id, lap_id, width=None, height=None):
if not width or not height:
return """
<script>
(() => window.location.href = window.location.href +
['', window.innerWidth, window.innerHeight].join('/'))()
</script>
"""
# Fetch roadload simulation inputs from database
accumulator_spec = Accumulator.query.filter_by(id=roadload_id).first()
# Fetch lap simulation inputs from database
lap_spec = Lap.query.filter_by(id=lap_id).first()
# Update page title
title = 'QUTMS | Roadload'
# Fetch mat file for simulation input
matfile = fetch_mat_file(lap_spec.mat)
# Initialise Lap Simulation
lap_simulation = | |
self.covariance_factors()[:2]
new_A = self.linear_part.dot(sqrt_cov)
den = np.sqrt((new_A**2).sum(1))
new_b = self.offset - self.linear_part.dot(self.mean)
new_con = constraints(new_A / den[:,None], new_b / den)
mu = self.mean.copy()
def inverse_map(Z):
if Z.ndim == 2:
return sqrt_cov.dot(Z) + mu[:,None]
else:
return sqrt_cov.dot(Z) + mu
forward_map = lambda W: sqrt_inv.dot(W - mu)
return inverse_map, forward_map, new_con
def project_rowspace(self, direction):
"""
Project a vector onto rowspace
of the covariance.
"""
rowspace = self.covariance_factors()[-1]
return rowspace.dot(rowspace.T.dot(direction))
def solve(self, direction):
"""
Compute the inverse of the covariance
times a direction vector.
"""
sqrt_inv = self.covariance_factors()[1]
return sqrt_inv.T.dot(sqrt_inv.dot(direction))
def stack(*cons):
"""
Combine constraints into a large constaint
by intersection.
Parameters
----------
cons : [`selection.affine.constraints`_]
A sequence of constraints.
Returns
-------
intersection : `selection.affine.constraints`_
Notes
-----
Resulting constraint will have mean 0 and covariance $I$.
"""
ineq, ineq_off = [], []
for con in cons:
ineq.append(con.linear_part)
ineq_off.append(con.offset)
intersection = constraints(np.vstack(ineq),
np.hstack(ineq_off))
return intersection
def interval_constraints(support_directions,
support_offsets,
covariance,
observed_data,
direction_of_interest,
tol = 1.e-4):
r"""
Given an affine constraint $\{z:Az \leq b \leq \}$ (elementwise)
specified with $A$ as `support_directions` and $b$ as
`support_offset`, a new direction of interest $\eta$, and
an `observed_data` is Gaussian vector $Z \sim N(\mu,\Sigma)$
with `covariance` matrix $\Sigma$, this
function returns $\eta^TZ$ as well as an interval
bounding this value.
The interval constructed is such that the endpoints are
independent of $\eta^TZ$, hence the $p$-value
of `Kac Rice`_
can be used to form an exact pivot.
Parameters
----------
support_directions : np.float
Matrix specifying constraint, $A$.
support_offsets : np.float
Offset in constraint, $b$.
covariance : np.float
Covariance matrix of `observed_data`.
observed_data : np.float
Observations.
direction_of_interest : np.float
Direction in which we're interested for the
contrast.
tol : float
Relative tolerance parameter for deciding
sign of $Az-b$.
Returns
-------
lower_bound : float
observed : float
upper_bound : float
sigma : float
"""
# shorthand
A, b, S, X, w = (support_directions,
support_offsets,
covariance,
observed_data,
direction_of_interest)
U = A.dot(X) - b
if not np.all(U < tol * np.fabs(U).max()) and WARNINGS:
warn('constraints not satisfied: %s' % repr(U))
Sw = S.dot(w)
sigma = np.sqrt((w*Sw).sum())
alpha = A.dot(Sw) / sigma**2
V = (w*X).sum() # \eta^TZ
# adding the zero_coords in the denominator ensures that
# there are no divide-by-zero errors in RHS
# these coords are never used in upper_bound or lower_bound
zero_coords = alpha == 0
RHS = (-U + V * alpha) / (alpha + zero_coords)
RHS[zero_coords] = np.nan
pos_coords = alpha > tol * np.fabs(alpha).max()
if np.any(pos_coords):
upper_bound = RHS[pos_coords].min()
else:
upper_bound = np.inf
neg_coords = alpha < -tol * np.fabs(alpha).max()
if np.any(neg_coords):
lower_bound = RHS[neg_coords].max()
else:
lower_bound = -np.inf
return lower_bound, V, upper_bound, sigma
def selection_interval(support_directions,
support_offsets,
covariance,
observed_data,
direction_of_interest,
tol = 1.e-4,
alpha = 0.05,
UMAU=True):
"""
Given an affine in cone constraint $\{z:Az+b \leq 0\}$ (elementwise)
specified with $A$ as `support_directions` and $b$ as
`support_offset`, a new direction of interest $\eta$, and
an `observed_data` is Gaussian vector $Z \sim N(\mu,\Sigma)$
with `covariance` matrix $\Sigma$, this
function returns a confidence interval
for $\eta^T\mu$.
Parameters
----------
support_directions : np.float
Matrix specifying constraint, $A$.
support_offset : np.float
Offset in constraint, $b$.
covariance : np.float
Covariance matrix of `observed_data`.
observed_data : np.float
Observations.
direction_of_interest : np.float
Direction in which we're interested for the
contrast.
tol : float
Relative tolerance parameter for deciding
sign of $Az-b$.
UMAU : bool
Use the UMAU interval, or twosided pivot.
Returns
-------
selection_interval : (float, float)
"""
lower_bound, V, upper_bound, sigma = interval_constraints( \
support_directions,
support_offsets,
covariance,
observed_data,
direction_of_interest,
tol=tol)
truncated = truncated_gaussian_old([(lower_bound, upper_bound)], scale=sigma)
if UMAU:
_selection_interval = truncated.UMAU_interval(V, alpha)
else:
_selection_interval = truncated.equal_tailed_interval(V, alpha)
return _selection_interval
def sample_from_constraints(con,
Y,
direction_of_interest=None,
how_often=-1,
ndraw=1000,
burnin=1000,
white=False,
use_constraint_directions=True,
use_random_directions=True,
accept_reject_params=()):
r"""
Use Gibbs sampler to simulate from `con`.
Parameters
----------
con : `selection.affine.constraints`_
Y : np.float
Point satisfying the constraint.
direction_of_interest : np.float (optional)
Which projection is of most interest?
how_often : int (optional)
How often should the sampler make a move along `direction_of_interest`?
If negative, defaults to ndraw+burnin (so it will never be used).
ndraw : int (optional)
Defaults to 1000.
burnin : int (optional)
Defaults to 1000.
white : bool (optional)
Is con.covariance equal to identity?
use_constraint_directions : bool (optional)
Use the directions formed by the constraints as in
the Gibbs scheme?
use_random_directions : bool (optional)
Use additional random directions in
the Gibbs scheme?
accept_reject_params : tuple
If not () should be a tuple (num_trial, min_accept, num_draw).
In this case, we first try num_trial accept-reject samples,
if at least min_accept of them succeed, we just draw num_draw
accept_reject samples.
Returns
-------
Z : np.float((ndraw, n))
Sample from the Gaussian distribution conditioned on the constraints.
"""
if direction_of_interest is None:
direction_of_interest = np.random.standard_normal(Y.shape)
if how_often < 0:
how_often = ndraw + burnin
DEBUG = False
if not white:
inverse_map, forward_map, white_con = con.whiten()
white_Y = forward_map(Y)
white_direction_of_interest = forward_map(con.covariance.dot(direction_of_interest))
if DEBUG:
print (white_direction_of_interest * white_Y).sum(), (Y * direction_of_interest).sum(), 'white'
else:
white_con = con
inverse_map = lambda V: V
# try 100 draws of accept reject
# if we get more than 50 good draws, then just return a smaller sample
# of size (burnin+ndraw)/5
if accept_reject_params:
use_hit_and_run = False
num_trial, min_accept, num_draw = accept_reject_params
def _accept_reject(sample_size, linear_part, offset):
Z_sample = np.random.standard_normal((100, linear_part.shape[1]))
constraint_satisfied = (Z_sample.dot(linear_part.T) -
offset[None,:]).max(1) < 0
return Z_sample[constraint_satisfied]
Z_sample = _accept_reject(100,
white_con.linear_part,
white_con.offset)
if Z_sample.shape[0] >= min_accept:
while True:
Z_sample = np.vstack([Z_sample,
_accept_reject(num_draw / 5,
white_con.linear_part,
white_con.offset)])
if Z_sample.shape[0] > num_draw:
break
white_samples = Z_sample
else:
use_hit_and_run = True
else:
use_hit_and_run = True
if use_hit_and_run:
white_samples = sample_truncnorm_white(
white_con.linear_part,
white_con.offset,
white_Y,
white_direction_of_interest,
how_often=how_often,
ndraw=ndraw,
burnin=burnin,
sigma=1.,
use_constraint_directions=use_constraint_directions,
use_random_directions=use_random_directions)
Z = inverse_map(white_samples.T).T
return Z
def sample_from_sphere(con,
Y,
direction_of_interest=None,
how_often=-1,
ndraw=1000,
burnin=1000,
use_constraint_directions=True,
use_random_directions=True,
white=False):
r"""
Use Gibbs sampler to simulate from `con`
intersected with (whitened) sphere of radius `np.linalg.norm(Y)`.
When `con.covariance` is not $I$, it samples from the
ellipse of constant Mahalanobis distance from `con.mean`.
Parameters
----------
con : `selection.affine.constraints`_
Y : np.float
Point satisfying the constraint.
direction_of_interest : np.float (optional)
Which projection is of most interest?
how_often : int (optional)
How often should the sampler make a move along `direction_of_interest`?
If negative, defaults to ndraw+burnin (so it will never be used).
ndraw : int (optional)
Defaults to 1000.
burnin : int (optional)
Defaults to 1000.
white : bool (optional)
Is con.covariance equal to identity?
Returns
-------
Z : np.float((ndraw, n))
Sample from the sphere intersect the constraints.
weights : np.float(ndraw)
Importance weights for the sample.
"""
if direction_of_interest is None:
direction_of_interest = np.random.standard_normal(Y.shape)
if how_often < 0:
how_often = ndraw + burnin
if not white:
inverse_map, forward_map, white = con.whiten()
white_Y = forward_map(Y)
white_direction_of_interest = forward_map(direction_of_interest)
else:
white = con
inverse_map = lambda V: V
white_samples, weights = sample_truncnorm_white_sphere(white.linear_part,
white.offset,
white_Y,
white_direction_of_interest,
how_often=how_often,
ndraw=ndraw,
burnin=burnin,
use_constraint_directions=use_constraint_directions,
use_random_directions=use_random_directions)
Z = inverse_map(white_samples.T).T
return Z, weights
def gibbs_test(affine_con, Y, direction_of_interest,
how_often=-1,
ndraw=5000,
burnin=2000,
white=False,
alternative='twosided',
UMPU=True,
sigma_known=False,
alpha=0.05,
pvalue=True,
use_constraint_directions=False,
use_random_directions=True,
tilt=None,
test_statistic=None,
accept_reject_params=(100, 15, 2000),
MLE_opts={'burnin':1000,
'ndraw':500,
'how_often':5,
'niter':10,
'step_size':0.2,
'hessian_min':1.,
'tol':1.e-6,
'startMLE':None}
):
"""
A Monte Carlo significance test for
a given function of `con.mean`.
Parameters
----------
affine_con : `selection.affine.constraints`_
Y : np.float
Point satisfying the constraint.
direction_of_interest: np.float
Which linear function of `con.mean` is of interest?
(a.k.a. $\eta$ in many of related papers)
how_often : int (optional)
How often should the sampler make a move along `direction_of_interest`?
If negative, defaults to ndraw+burnin (so it will never be used).
ndraw : int (optional)
Defaults to 5000.
burnin : int (optional)
Defaults to 2000.
white : bool (optional)
Is con.covariance equal to identity?
alternative : str
One of ['greater', 'less', 'twosided']
UMPU : bool
Perform the UMPU test?
sigma_known : bool
Is $\sigma$ assumed known?
alpha :
Level for UMPU test.
pvalue :
Return a pvalue or just | |
#!/usr/bin/env python3
#
# Copyright (c) 2004-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
import collections
import time
from fboss.cli.utils import utils
from fboss.cli.commands import commands as cmds
from math import log10
from neteng.fboss.optic import ttypes as optic_ttypes
from neteng.fboss.ttypes import FbossBaseError
from thrift.Thrift import TApplicationException
class PortDetailsCmd(cmds.FbossCmd):
def run(self, ports):
try:
self._client = self._create_ctrl_client()
# No ports specified, get all ports
if not ports:
resp = self._client.getAllPortInfo()
except FbossBaseError as e:
raise SystemExit('Fboss Error: ' + e)
except Exception as e:
raise Exception('Error: ' + e)
if ports:
for port in ports:
self._print_port_details(port)
elif resp:
all_ports = sorted(resp.values(), key=utils.port_sort_fn)
all_ports = [port for port in all_ports if port.operState == 1]
for port in all_ports:
self._print_port_details(port.portId, port)
else:
print("No Ports Found")
def _convert_bps(self, bps):
''' convert bps to human readable form
:var bps int: port speed in bps
:return bps_per_unit float: bps divided by factor of the unit found
:return suffix string: human readable format
'''
bps_per_unit = suffix = None
value = bps
# expand to 'T' and beyond by adding in the proper unit
for factor, unit in [(1, ''), (3, 'K'), (6, 'M'), (9, 'G')]:
if value < 1000:
bps_per_unit = bps / 10 ** factor
suffix = '{}bps'.format(unit)
break
value /= 1000
assert bps_per_unit is not None and suffix, (
'Unable to convert bps to human readable format')
return bps_per_unit, suffix
def _print_port_details(self, port_id, port_info=None):
''' Print out port details
:var port_id int: port identifier
:var port_info PortInfoThrift: port information
'''
if not port_info:
port_info = self._client.getPortInfo(port_id)
admin_status = "ENABLED" if port_info.adminState else "DISABLED"
oper_status = "UP" if port_info.operState else "DOWN"
speed, suffix = self._convert_bps(port_info.speedMbps * (10 ** 6))
vlans = ' '.join(str(vlan) for vlan in (port_info.vlans or []))
fmt = '{:.<50}{}'
lines = [
('Name', port_info.name.strip()),
('Port ID', str(port_info.portId)),
('Admin State', admin_status),
('Link State', oper_status),
('Speed', '{:.0f} {}'.format(speed, suffix)),
('VLANs', vlans)
]
print()
print('\n'.join(fmt.format(*l) for l in lines))
print('Description'.ljust(20, '.') + (port_info.description or ""))
class PortFlapCmd(cmds.FbossCmd):
def run(self, ports):
try:
if not ports:
print("Hmm, how did we get here?")
else:
self.flap_ports(ports)
except FbossBaseError as e:
raise SystemExit('Fboss Error: ' + e)
def flap_ports(self, ports):
self._client = self._create_ctrl_client()
resp = self._client.getPortStatus(ports)
for port, status in resp.items():
if not status.enabled:
print("Port %d is disabled by configuration, cannot flap" %
(port))
continue
print("Disabling port %d" % (port))
self._client.setPortState(port, False)
time.sleep(1)
for port, status in resp.items():
if status.enabled:
print("Enabling port %d" % (port))
self._client.setPortState(port, True)
class PortStatusCmd(cmds.FbossCmd):
def run(self, detail, ports, verbose):
self._client = self._create_ctrl_client()
if detail or verbose:
PortStatusDetailCmd(
self._client, ports, verbose).get_detail_status()
else:
self.list_ports(ports)
def list_ports(self, ports):
try:
field_fmt = '{:>10} {:>12} {}{:>10} {:>12} {:>6}'
print(field_fmt.format('Port', 'Admin State', '', 'Link State',
'Transceiver', 'Speed'))
print('-' * 59)
resp = self._client.getPortStatus(ports)
port_info = self._client.getAllPortInfo()
for port_data in sorted(port_info.values(), key=utils.port_sort_fn):
port = port_data.portId
if port not in resp:
continue
status = resp[port]
attrs = utils.get_status_strs(status)
if status.enabled:
name = port_data.name if port_data.name else port
print(field_fmt.format(
name, attrs['admin_status'], attrs['color_align'],
attrs['link_status'], attrs['present'], attrs['speed']))
except KeyError as e:
print("Invalid port", e)
class PortStatusDetailCmd(object):
''' Print detailed/verbose port status '''
def __init__(self, client, ports, verbose):
self._client = client
self._ports = ports
self._port_speeds = self._get_port_speeds()
self._info_resp = None
self._status_resp = self._client.getPortStatus(ports)
# map of { transceiver_id -> { channel_id -> port } }
self._t_to_p = collections.defaultdict(dict)
self._transceiver = []
self._verbose = verbose
def _get_port_speeds(self):
''' Get speeds for all ports '''
all_info = self._client.getAllPortInfo()
return dict((p, info.speedMbps) for p, info in all_info.items())
def _get_port_channels(self, port, xcvr_info):
''' This function handles figuring out correct channel info even for
older controllers that don't return full channel info. '''
start_channel = xcvr_info.channelId
speed = self._port_speeds[port]
# speed == 1000 and N/A are one channel
channels = [start_channel]
if speed == 20000:
channels = range(start_channel, start_channel + 2)
elif speed == 40000:
channels = range(start_channel, start_channel + 4)
return channels
def _get_channel_detail(self, port, status):
''' Get channel detail for port '''
channels = status.transceiverIdx.channels
if not channels:
channels = self._get_port_channels(
port, status.transceiverIdx)
tid = status.transceiverIdx.transceiverId
for ch in channels:
self._t_to_p[tid][ch] = port
if tid not in self._transceiver:
self._transceiver.append(tid)
def _mw_to_dbm(self, mw):
if mw == 0:
return 0.0
else:
return (10 * log10(mw))
def _print_port_details_sfpdom(self, port, dom):
status = self._status_resp[port]
print("Port %d: %s" % (port, dom.name))
attrs = utils.get_status_strs(status)
admin_status = attrs['admin_status']
link_status = attrs['link_status']
print(" Admin Status: %s" % admin_status)
print(" Oper Status: %s" % link_status)
print(" Module Present: %s" % dom.sfpPresent)
if dom.sfpPresent and dom.vendor is not None:
print(" Vendor Name: %s" % dom.vendor.name)
print(" Part Number: %s" % dom.vendor.partNumber)
print(" Revision: %s" % dom.vendor.rev)
print(" Serial Number: %s" % dom.vendor.serialNumber)
print(" Date Code: %s" % dom.vendor.dateCode)
print(" Monitoring Information:")
if not dom.domSupported:
print(" DOM Not Supported")
return
print(" Values:")
print(" {:<15} {:0.4}".format("Temperature", dom.value.temp))
print(" {:<15} {:0.4}".format("Vcc", dom.value.vcc))
print(" {:<15} {:0.4}".format("Tx Bias", dom.value.txBias))
print(" {:<15} {:0.4}".format("Tx Power(dBm)",
self._mw_to_dbm(dom.value.txPwr)))
print(" {:<15} {:0.4}".format("Rx Power(dBm)",
self._mw_to_dbm(dom.value.rxPwr)))
print(" {:<14} {:>15} {:>15} {:>15} {:>15}".format(
'Flags:',
'Alarm Low', 'Warning Low', 'Warning High', 'Alarm High'))
print(" {:<14} {:>15} {:>15} {:>15} {:>15}".format(
'Temperature:',
dom.flags.tempAlarmLow, dom.flags.tempWarnLow,
dom.flags.tempWarnHigh, dom.flags.tempAlarmHigh))
print(" {:<14} {:>15} {:>15} {:>15} {:>15}".format(
'Vcc:',
dom.flags.vccAlarmLow, dom.flags.vccWarnLow,
dom.flags.vccWarnHigh, dom.flags.vccAlarmHigh))
print(" {:<14} {:>15} {:>15} {:>15} {:>15}".format(
'Tx Bias:',
dom.flags.txBiasAlarmLow, dom.flags.txBiasWarnLow,
dom.flags.txBiasWarnHigh, dom.flags.txBiasAlarmHigh))
print(" {:<14} {:>15} {:>15} {:>15} {:>15}".format(
'Tx Power(dBm):',
self._mw_to_dbm(dom.flags.txPwrAlarmLow),
self._mw_to_dbm(dom.flags.txPwrWarnLow),
self._mw_to_dbm(dom.flags.txPwrWarnHigh),
self._mw_to_dbm(dom.flags.txPwrAlarmHigh)))
print(" {:<14} {:>15} {:>15} {:>15} {:>15}".format(
'Rx Power(dBm):',
self._mw_to_dbm(dom.flags.rxPwrAlarmLow),
self._mw_to_dbm(dom.flags.rxPwrWarnLow),
self._mw_to_dbm(dom.flags.rxPwrWarnHigh),
self._mw_to_dbm(dom.flags.rxPwrAlarmHigh)))
thresh = dom.threshValue
print(" {:<16} {:>15} {:>15} {:>15} {:>15}".format(
'Thresholds:',
'Alarm Low', 'Warning Low', 'Warning High', 'Alarm High'))
print(" {:<14} {:>15.4} {:>15.4} {:>15.4} {:>15.4}".format(
'Temperature:',
thresh.tempAlarmLow, thresh.tempWarnLow,
thresh.tempWarnHigh, thresh.tempAlarmHigh))
print(" {:<14} {:>15.4} {:>15.4} {:>15.4} {:>15.4}".format(
'Vcc:',
thresh.vccAlarmLow, thresh.vccWarnLow,
thresh.vccWarnHigh, thresh.vccAlarmHigh))
print(" {:<14} {:>15.4} {:>15.4} {:>15.4} {:>15.4}".format(
'Tx Bias:',
thresh.txBiasAlarmLow, thresh.txBiasWarnLow,
thresh.txBiasWarnHigh, thresh.txBiasAlarmHigh))
print(" {:<14} {:>15.4} {:>15.4} {:>15.4} {:>15.4}".format(
'Tx Power(dBm):',
self._mw_to_dbm(thresh.txPwrAlarmLow),
self._mw_to_dbm(thresh.txPwrWarnLow),
self._mw_to_dbm(thresh.txPwrWarnHigh),
self._mw_to_dbm(thresh.txPwrAlarmHigh)))
print(" {:<14} {:>15.4} {:>15.4} {:>15.4} {:>15.4}".format(
'Rx Power(dBm):',
self._mw_to_dbm(thresh.rxPwrAlarmLow),
self._mw_to_dbm(thresh.rxPwrWarnLow),
self._mw_to_dbm(thresh.rxPwrWarnHigh),
self._mw_to_dbm(thresh.rxPwrAlarmHigh)))
def _list_ports_detail_sfpdom(self):
''' Print ports detail based on Sfp DOM info '''
dom_resp = self._client.getSfpDomInfo(self._ports)
for port in self._status_resp.keys():
if port not in dom_resp:
sfp_dom = optic_ttypes.SfpDom()
sfp_dom.name = 'Ethernet%d' % port
sfp_dom.sfpPresent = False
sfp_dom.domSupported = False
dom_resp[port] = sfp_dom
for port in self._status_resp.keys():
self._print_port_details_sfpdom(port, dom_resp[port])
def _get_dummy_status(self):
''' Get dummy status for ports without data '''
for port, status in sorted(self._status_resp.items()):
if status.transceiverIdx:
tid = status.transceiverIdx.transceiverId
if tid not in self._info_resp.keys():
info = optic_ttypes.TransceiverInfo()
info.port = port
info.present = False
self._info_resp[port] = info
def _print_transceiver_ports(self, ch_to_port, info):
# Print port info if the transceiver doesn't have any
for port in ch_to_port.values():
attrs = utils.get_status_strs(self._status_resp[port])
print("Port: {:>2} Status: {:<8} Link: {:<4} Transceiver: {}"
.format(port, attrs['admin_status'], attrs['link_status'],
attrs['present']))
def _print_vendor_details(self, info):
''' print vendor details '''
print("Vendor: {:<16} Part Number: {:<16}".format(
info.vendor.name, info.vendor.partNumber))
print("Serial: {:<16} ".format(info.vendor.serialNumber), end="")
print("Date Code: {:<8} Revision: {:<2}".format(
info.vendor.dateCode, info.vendor.rev))
def _print_settings_details(self, info):
''' print setting details'''
print("CDR Tx: {}\tCDR Rx: {}".format(
optic_ttypes.FeatureState._VALUES_TO_NAMES[info.settings.cdrTx],
optic_ttypes.FeatureState._VALUES_TO_NAMES[info.settings.cdrRx]))
print("Rate select: {}".format(
optic_ttypes.RateSelectState._VALUES_TO_NAMES[
info.settings.rateSelect]))
print("\tOptimised for: {}".format(
optic_ttypes.RateSelectSetting._VALUES_TO_NAMES[
info.settings.rateSelectSetting]))
print("Power measurement: {}".format(
optic_ttypes.FeatureState._VALUES_TO_NAMES[
info.settings.powerMeasurement]))
print("Power control: {}".format(
optic_ttypes.PowerControlState._VALUES_TO_NAMES[
info.settings.powerControl]))
def _print_cable_details(self, info):
''' print cable details '''
print("Cable:", end=""),
if info.cable.singleModeKm:
print("Single Mode: {}km".format(
info.cable.singleModeKm % 1000), end=""),
if info.cable.singleMode:
print("Single Mode: {}m".format(
info.cable.singleMode), end=""),
if info.cable.om3:
print("OM3: {}m".format(info.cable.om3), end=""),
if info.cable.om2:
print("OM2: {}m".format(info.cable.om2), end=""),
if info.cable.om1:
print("OM1: {}m".format(info.cable.om1), end=""),
if info.cable.copper:
print("Copper: {}m".format(info.cable.copper), end="")
print("")
def _print_thresholds(self, thresh):
''' print threshold details '''
print(" {:<16} {:>10} {:>15} {:>15} {:>10}".format(
'Thresholds:',
'Alarm Low', 'Warning Low', 'Warning High', 'Alarm High'))
print(" {:<14} {:>9.4}C {:>14.4}C {:>14.4}C {:>9.4}C".format(
'Temp:',
thresh.temp.alarm.low, thresh.temp.warn.low,
thresh.temp.warn.high, thresh.temp.alarm.high))
print(" {:<14} {:>10.4} {:>15.4} {:>15.4} {:>10.4}".format(
'Vcc:',
thresh.vcc.alarm.low, thresh.vcc.warn.low,
thresh.vcc.warn.high, thresh.vcc.alarm.high))
print(" {:<14} {:>10.4} {:>15.4} {:>15.4} {:>10.4}".format(
'Tx Bias:',
thresh.txBias.alarm.low, thresh.txBias.warn.low,
thresh.txBias.warn.high, thresh.txBias.alarm.high))
if thresh.txPwr:
print(" {:<14} {:>10.4} {:>15.4} {:>15.4} {:>10.4}".format(
'Tx Power(dBm):',
self._mw_to_dbm(thresh.txPwr.alarm.low),
self._mw_to_dbm(thresh.txPwr.warn.low),
self._mw_to_dbm(thresh.txPwr.warn.high),
self._mw_to_dbm(thresh.txPwr.alarm.high)))
print(" {:<14} {:>10.4} {:>15.4} {:>15.4} {:>10.4}".format(
'Rx Power(dBm):',
self._mw_to_dbm(thresh.rxPwr.alarm.low),
self._mw_to_dbm(thresh.rxPwr.warn.low),
self._mw_to_dbm(thresh.rxPwr.warn.high),
self._mw_to_dbm(thresh.rxPwr.alarm.high)))
def _print_sensor_flags(self, sensor):
''' print details about sensor flags '''
# header
print(" {:<12} {:>10} {:>15} | |
"""
StarryPy Player Manager Plugin
Provides core player management features:
- implements roles
- implements bans
- manages player database
Original authors: AMorporkian
Updated for release: kharidiron
"""
import asyncio
import datetime
import pprint
import re
import json
from operator import attrgetter
from base_plugin import SimpleCommandPlugin
from data_parser import ConnectFailure, ServerDisconnect
from pparser import build_packet
from utilities import Command, DotDict, State, broadcast, send_message, \
WarpType, WarpWorldType, WarpAliasType, Cupboard
from packets import packets
class Player:
"""
Prototype class for a player.
"""
def __init__(self, uuid, species="unknown", name="", alias="",
last_seen=None, ranks=None, logged_in=False,
connection=None, client_id=-1, ip="", planet="",
muted=False, state=None, team_id=None):
"""
Initialize a player object. Populate all the necessary details.
:param uuid:
:param species:
:param name:
:param last_seen:
:param ranks:
:param logged_in:
:param connection:
:param client_id:
:param ip:
:param planet:
:param muted:
:param state:
:param team_id:
:return:
"""
self.uuid = uuid
self.species = species
self.name = name
self.alias = alias
if last_seen is None:
self.last_seen = datetime.datetime.now()
else:
self.last_seen = last_seen
if ranks is None:
self.ranks = set()
else:
self.ranks = set(ranks)
self.granted_perms = set()
self.revoked_perms = set()
self.permissions = set()
self.chat_prefix = ""
self.priority = 0
self.logged_in = logged_in
self.connection = connection
self.client_id = client_id
self.ip = ip
self.location = planet
self.last_location = planet
self.muted = muted
self.team_id = team_id
def __str__(self):
"""
Convenience method for peeking at the Player object.
:return: Pretty-printed dictionary of Player object.
"""
return pprint.pformat(self.__dict__)
def __getstate__(self):
"""
Strip unpicklable attributes when called for pickling.
:return: The object's __dict__ with connection-related attributes
removed.
"""
res = self.__dict__.copy()
if "connection" in res:
del res["connection"]
if res["logged_in"]:
res["logged_in"] = False
res["location"] = None
res["last_seen"] = datetime.datetime.now()
return res
def __eq__(self, other):
"""
Comparison function to check if this player object equals another.
:param other: The other object being compared.
:return: Boolean: True if the two objects are Players and their UUID is
identical, False otherwise.
"""
if isinstance(other, Player):
return self.uuid == other.uuid
return False
def __hash__(self):
return id(self)
def update_ranks(self, ranks):
"""
Update the player's info to match any changes made to their ranks.
:return: Null.
"""
self.permissions = set()
highest_rank = None
self.ranks = {x.lower() for x in self.ranks}
for r in self.ranks:
if not highest_rank:
highest_rank = r
self.permissions |= ranks[r]['permissions']
if ranks[r]['priority'] > ranks[highest_rank]['priority']:
highest_rank = r
self.permissions |= self.granted_perms
self.permissions -= self.revoked_perms
if highest_rank:
self.priority = ranks[highest_rank]['priority']
self.chat_prefix = ranks[highest_rank]['prefix']
else:
self.priority = 0
self.chat_prefix = ""
def perm_check(self, perm):
if not perm:
return True
elif "special.allperms" in self.permissions:
return True
elif perm.lower() in self.revoked_perms:
return False
elif perm.lower() in self.permissions:
return True
else:
return False
class Ship:
"""
Prototype class for a Ship.
"""
def __init__(self, uuid, player):
self.uuid = uuid
self.player = player
def __str__(self):
return "{}'s ship".format(self.player)
def locationtype(self):
return "ShipWorld"
class Planet:
"""
Prototype class for a planet.
"""
def __init__(self, location=(0, 0, 0), planet=0,
satellite=0, name=""):
self.x, self.y, self.z = location
self.planet = planet
self.satellite = satellite
self.name = name
def _gen_planet_string(self):
s = list("CelestialWorld:")
s.append("{}:{}:{}:{}".format(self.x, self.y, self.z, self.planet))
if self.satellite > int(0):
s.append(":{}".format(self.satellite))
return "".join(s)
def __str__(self):
return "CelestialWorld:{}:{}:{}:{}:{}".format(self.x, self.y, self.z,
self.planet,
self.satellite)
def locationtype(self):
return "CelestialWorld"
class IPBan:
"""
Prototype class a Ban object.
"""
def __init__(self, ip, reason, banned_by, timeout=None):
self.ip = ip
self.reason = reason
self.timeout = timeout
self.banned_by = banned_by
self.banned_at = datetime.datetime.now()
###
class PlayerManager(SimpleCommandPlugin):
name = "player_manager"
def __init__(self):
self.default_config = {"player_db": "config/player",
"owner_uuid": "!--REPLACE IN CONFIG FILE--!",
"owner_ranks": ["Owner"],
"new_user_ranks": ["Guest"],
"db_save_interval": 900}
super().__init__()
self.shelf = Cupboard(self.plugin_config.player_db)
self.sync()
self.players = self.shelf["players"]
self.planets = self.shelf["planets"]
self.plugin_shelf = self.shelf["plugins"]
self.players_online = []
try:
with open("config/permissions.json", "r") as file:
self.rank_config = json.load(file)
except IOError as e:
self.logger.error("Fatal: Could not read permissions file!")
self.logger.error(e)
raise SystemExit
except json.JSONDecodeError as e:
self.logger.error("Fatal: Could not parse permissions.json!")
self.logger.error(e)
raise SystemExit
self.ranks = self._rebuild_ranks(self.rank_config)
asyncio.ensure_future(self._reap())
asyncio.ensure_future(self._save_shelf())
# Packet hooks - look for these packets and act on them
def on_protocol_request(self, data, connection):
"""
Catch when a client first pings the server for a connection. Set the
'state' variable to keep track of this.
:param data: The packet containing the action.
:param connection: The connection from which the packet came.
:return: Boolean: True. Must be true, so that packet get passed on.
"""
connection.state = State.VERSION_SENT
return True
def on_handshake_challenge(self, data, connection):
"""
Catch when a client tries to handshake with server. Update the 'state'
variable to keep track of this. Note: This step only occurs when a
server requires name/password authentication.
:param data:
:param connection:
:return: Boolean: True. Must be true, so that packet get passed on.
"""
connection.state = State.HANDSHAKE_CHALLENGE_SENT
return True
def on_handshake_response(self, data, connection):
"""
Catch when the server responds to a client's handshake. Update the
'state' variable to keep track of this. Note: This step only occurs
when a server requires name/password authentication.
:param data:
:param connection:
:return: Boolean: True. Must be true, so that packet get passed on.
"""
connection.state = State.HANDSHAKE_RESPONSE_RECEIVED
return True
def on_client_connect(self, data, connection):
"""
Catch when a the client updates the server with its connection
details. This is a key step to fingerprinting the client, and
ensuring they stay in the wrapper. This is also where we apply our
bans.
:param data:
:param connection:
:return: Boolean: True on successful connection, False on a
failed connection.
"""
try:
player = yield from self._add_or_get_player(**data["parsed"])
self.check_bans(connection)
except (NameError, ValueError) as e:
yield from connection.raw_write(self.build_rejection(str(e)))
self.logger.info("Player with IP {}'s connection was rejected. "
"Reason: {}".format(connection.client_ip, str(e)))
connection.die()
return False
player.ip = connection.client_ip
connection.player = player
return True
def on_connect_success(self, data, connection):
"""
Catch when a successful connection is established. Update the 'state'
variable to keep track of this. Since the client successfully
connected, update their details in storage (client id, location,
logged_in state).
:param data:
:param connection:
:return: Boolean: True. Must be true, so that packet get passed on.
"""
response = data["parsed"]
connection.player.connection = connection
connection.player.client_id = response["client_id"]
connection.state = State.CONNECTED
connection.player.logged_in = True
connection.player.last_seen = datetime.datetime.now()
self.players_online.append(connection.player.uuid)
return True
def on_client_disconnect_request(self, data, connection):
"""
Catch when a client requests a disconnect from the server. At this
point, we need to clean up the connection information we have for the
client (logged_in state, location).
:param data:
:param connection:
:return: Boolean: True. Must be true, so that packet get passed on.
"""
return True
def on_server_disconnect(self, data, connection):
"""
Catch when the server disconnects a client. Similar to the client
disconnect packet, use this as a cue to perform cleanup, if it wasn't
done already.
:param data:
:param connection:
:return: Boolean: True. Must be true, so that packet get passed on.
"""
self._set_offline(connection)
return True
def on_world_start(self, data, connection):
"""
Hook when a new world instance is started. Use the details passed to
determine the location of the world, and update the player's
information accordingly.
:param data:
:param connection:
:return: Boolean: True. Don't stop the packet here.
"""
planet = data["parsed"]["template_data"]
if planet["celestialParameters"] is not None:
location = yield from self._add_or_get_planet(
**planet["celestialParameters"]["coordinate"])
connection.player.location = location
self.logger.info("Player {} is now at location: {}".format(
connection.player.alias,
connection.player.location))
return True
def on_player_warp_result(self, data, connection):
"""
Hook when a player warps to a world. This action is also used when
a player first logs in. Use the details passed to determine the
location of the world, and update the player's information accordingly.
:param data:
:param connection:
:return: Boolean: True. Don't stop the packet here.
"""
if data["parsed"]["warp_success"]:
warp_data = data["parsed"]["warp_action"]
p = connection.player
if warp_data["warp_type"] == WarpType.TO_ALIAS:
if warp_data["alias_id"] == WarpAliasType.ORBITED:
# down to planet, need coordinates from world_start
p.last_location = p.location
pass
elif warp_data["alias_id"] == WarpAliasType.SHIP:
# back on own ship
p.last_location = p.location
p.location = yield from self._add_or_get_ship(p.uuid)
elif warp_data["alias_id"] == WarpAliasType.RETURN:
p.location, p.last_location = p.last_location, p.location
elif warp_data["warp_type"] == WarpType.TO_PLAYER:
target = self.get_player_by_uuid(warp_data["player_id"]
.decode("utf-8"))
p.last_location = p.location
p.location = target.location
elif warp_data["warp_type"] == WarpType.TO_WORLD:
if warp_data["world_id"] == WarpWorldType.CELESTIAL_WORLD:
p.last_location = p.location
pass
elif warp_data["world_id"] == WarpWorldType.PLAYER_WORLD:
p.last_location = p.location
p.location = yield from self._add_or_get_ship(
warp_data["ship_id"])
elif warp_data["world_id"] == WarpWorldType.UNIQUE_WORLD:
p.last_location = p.location
p.location = yield from self._add_or_get_instance(warp_data)
elif warp_data["world_id"] | |
# /usr/bin/env python3
import datetime
import itertools
import json
import logging
import os
import os.path
import pathlib
import sys
import click
import osxmetadata
from ._version import __version__
from .attributes import _LONG_NAME_WIDTH, _SHORT_NAME_WIDTH, ATTRIBUTES
from .backup import load_backup_file, write_backup_file
from .classes import _AttributeList, _AttributeTagsList
from .constants import (
_BACKUP_FILENAME,
_COLORNAMES_LOWER,
_FINDERINFO_NAMES,
_TAGS_NAMES,
FINDER_COLOR_NONE,
)
from .findertags import Tag, tag_factory
# TODO: how is metadata on symlink handled?
# should symlink be resolved before gathering metadata?
# currently, symlinks are resolved before handling metadata but not sure this is the right behavior
# in 10.13.6: synlinks appear to inherit tags but not Finder Comments:
# e.g. changes to tags in a file changes tags in the symlink but symlink can have it's own finder comment
# Finder aliases inherit neither
# TODO: add selective restore (e.g only restore files matching command line path)
# e.g osxmetadata -r meta.json *.pdf
# Click CLI object & context settings
class CLI_Obj:
def __init__(self, debug=False, files=None):
self.debug = debug
if debug:
osxmetadata.debug._set_debug(True)
self.files = files
class MyClickCommand(click.Command):
""" Custom click.Command that overrides get_help() to show additional info """
def get_help(self, ctx):
help_text = super().get_help(ctx)
formatter = click.HelpFormatter()
# build help text from all the attribute names
# get set of attribute names
# (to eliminate the duplicate entries for short_constant and long costant)
# then sort and get the short constant, long constant, and help text
# passed to click.HelpFormatter.write_dl for formatting
attr_tuples = [("Short Name", "Description")]
attr_tuples.extend(
(
ATTRIBUTES[attr].name,
f"{ATTRIBUTES[attr].short_constant}, "
+ f"{ATTRIBUTES[attr].constant}; {ATTRIBUTES[attr].help}",
)
for attr in sorted(
[attr for attr in {attr.name for attr in ATTRIBUTES.values()}]
)
)
formatter.write("\n\n")
formatter.write_text(
"Valid attributes for ATTRIBUTE: "
+ "Each attribute has a short name, a constant name, and a long constant name. "
+ "Any of these may be used for ATTRIBUTE"
)
formatter.write("\n")
formatter.write_text('For example: --set findercomment "Hello world"')
formatter.write_text('or: --set kMDItemFinderComment "Hello world"')
formatter.write_text(
'or: --set com.apple.metadata:kMDItemFinderComment "Hello world"'
)
formatter.write("\n")
formatter.write_text(
"Attributes that are strings can only take one value for --set; "
+ "--append will append to the existing value. "
+ "Attributes that are arrays can be set multiple times to add to the array: "
+ "e.g. --set keywords 'foo' --set keywords 'bar' will set keywords to ['foo', 'bar']"
)
formatter.write("\n")
formatter.write_text(
"Options are executed in the following order regardless of order "
+ "passed on the command line: "
+ "restore, wipe, copyfrom, clear, set, append, update, remove, mirror, get, list, backup. "
+ "--backup and --restore are mutually exclusive. "
+ "Other options may be combined or chained together."
)
formatter.write("\n")
formatter.write_text(
"Finder tags (tags attribute) contain both a name and an optional color. "
+ "To specify the color, append comma + color name (e.g. 'red') after the "
+ "tag name. For example --set tags Foo,red. "
+ "Valid color names are: "
+ f"{', '.join([color for color, colorid in _COLORNAMES_LOWER.items() if colorid != FINDER_COLOR_NONE])}. "
+ "If color is not specified but a tag of the same name has already been assigned a color "
+ "in the Finder, the same color will automatically be assigned. "
)
formatter.write("\n")
formatter.write_dl(attr_tuples)
help_text += formatter.getvalue()
return help_text
# All the command line options defined here
FILES_ARGUMENT = click.argument(
"files", metavar="FILE", nargs=-1, type=click.Path(exists=True)
)
HELP_OPTION = click.option(
# add this only so I can show help text via echo_via_pager
"--help",
"-h",
"help_",
help="Show this message and exit.",
is_flag=True,
default=False,
required=False,
)
WALK_OPTION = click.option(
"--walk",
"-w",
is_flag=True,
help="Walk directory tree, processing each file in the tree.",
default=False,
)
JSON_OPTION = click.option(
"--json",
"-j",
"json_",
is_flag=True,
help="Print output in JSON format, for use with --list and --get.",
default=False,
)
DEBUG_OPTION = click.option(
"--debug", required=False, is_flag=True, default=False, hidden=True
)
SET_OPTION = click.option(
"--set",
"-s",
"set_",
metavar="ATTRIBUTE VALUE",
help="Set ATTRIBUTE to VALUE.",
nargs=2,
multiple=True,
required=False,
)
GET_OPTION = click.option(
"--get",
"-g",
help="Get value of ATTRIBUTE.",
metavar="ATTRIBUTE",
nargs=1,
multiple=True,
required=False,
)
LIST_OPTION = click.option(
"--list",
"-l",
"list_",
help="List all metadata attributes for FILE.",
is_flag=True,
default=False,
)
CLEAR_OPTION = click.option(
"--clear",
"-c",
help="Remove attribute from FILE.",
metavar="ATTRIBUTE",
nargs=1,
multiple=True,
required=False,
)
WIPE_OPTION = click.option(
"--wipe",
"-X",
help="Wipe all metadata attributes from FILE.",
is_flag=True,
default=False,
required=False,
)
APPEND_OPTION = click.option(
"--append",
"-a",
metavar="ATTRIBUTE VALUE",
help="Append VALUE to ATTRIBUTE.",
nargs=2,
multiple=True,
required=False,
)
UPDATE_OPTION = click.option(
"--update",
"-u",
metavar="ATTRIBUTE VALUE",
help="Update ATTRIBUTE with VALUE; for multi-valued attributes, "
"this adds VALUE to the attribute if not already in the list.",
nargs=2,
multiple=True,
required=False,
)
REMOVE_OPTION = click.option(
"--remove",
"-r",
metavar="ATTRIBUTE VALUE",
help="Remove VALUE from ATTRIBUTE; only applies to multi-valued attributes.",
nargs=2,
multiple=True,
required=False,
)
MIRROR_OPTION = click.option(
"--mirror",
"-m",
metavar="ATTRIBUTE1 ATTRIBUTE2",
help="Mirror values between ATTRIBUTE1 and ATTRIBUTE2 so that ATTRIBUTE1 = ATTRIBUTE2; "
"for multi-valued attributes, merges values; for string attributes, sets ATTRIBUTE1 = ATTRIBUTE2 "
"overwriting any value in ATTRIBUTE1. "
"For example: '--mirror keywords tags' sets tags and keywords to same values.",
nargs=2,
required=False,
multiple=True,
)
BACKUP_OPTION = click.option(
"--backup",
"-B",
help="Backup FILE attributes. "
"Backup file '.osxmetadata.json' will be created in same folder as FILE.",
is_flag=True,
required=False,
default=False,
)
RESTORE_OPTION = click.option(
"--restore",
"-R",
help="Restore FILE attributes from backup file. "
"Restore will look for backup file '.osxmetadata.json' in same folder as FILE.",
is_flag=True,
required=False,
default=False,
)
VERBOSE_OPTION = click.option(
"--verbose",
"-V",
help="Print verbose output.",
is_flag=True,
default=False,
required=False,
)
COPY_FROM_OPTION = click.option(
"--copyfrom",
"-f",
metavar="SOURCE_FILE",
help="Copy attributes from file SOURCE_FILE.",
type=click.Path(exists=True),
nargs=1,
multiple=False,
required=False,
)
@click.command(cls=MyClickCommand)
@click.version_option(__version__, "--version", "-v")
@HELP_OPTION
@DEBUG_OPTION
@FILES_ARGUMENT
@WALK_OPTION
@JSON_OPTION
@WIPE_OPTION
@SET_OPTION
@LIST_OPTION
@CLEAR_OPTION
@APPEND_OPTION
@GET_OPTION
@REMOVE_OPTION
@UPDATE_OPTION
@MIRROR_OPTION
@BACKUP_OPTION
@RESTORE_OPTION
@VERBOSE_OPTION
@COPY_FROM_OPTION
@click.pass_context
def cli(
ctx,
help_,
debug,
files,
walk,
json_,
wipe,
set_,
list_,
clear,
append,
get,
remove,
update,
mirror,
backup,
restore,
verbose,
copyfrom,
):
""" Read/write metadata from file(s). """
if help_:
click.echo_via_pager(ctx.get_help())
ctx.exit(0)
if debug:
logging.disable(logging.NOTSET)
logging.debug(
f"ctx={ctx} debug={debug} files={files} walk={walk} json={json_} "
f"set={set_}, list={list_},clear={clear},append={append},get={get}, remove={remove} "
f"backup={backup}, restore={restore}, mirror={mirror}"
)
if not files:
click.echo(ctx.get_help())
ctx.exit()
# validate values for --set, --clear, --append, --get, --remove, --mirror
if any([set_, append, remove, clear, get, mirror]):
attributes = (
[a[0] for a in set_]
+ [a[0] for a in append]
+ list(clear)
+ list(get)
+ list(itertools.chain(*mirror))
)
invalid_attr = False
for attr in attributes:
if attr not in ATTRIBUTES:
click.echo(f"Invalid attribute {attr}", err=True)
invalid_attr = True
if invalid_attr:
# click.echo("") # add a new line before rest of help text
# click.echo(ctx.get_help())
ctx.exit(2)
# check that json_ only used with get or list_
if json_ and not any([get, list_]):
click.echo("--json can only be used with --get or --list", err=True)
# click.echo("") # add a new line before rest of help text
# click.echo(ctx.get_help())
ctx.exit(2)
# can't backup and restore at once
if backup and restore:
click.echo("--backup and --restore cannot be used together", err=True)
# click.echo("") # add a new line before rest of help text
# click.echo(ctx.get_help())
ctx.exit(2)
# check compatible types for mirror
if mirror:
for item in mirror:
attr1, attr2 = item
attribute1 = ATTRIBUTES[attr1]
attribute2 = ATTRIBUTES[attr2]
# avoid self mirroring
if attribute1 == attribute2:
click.echo(
f"cannot mirror the same attribute: {attribute1.name} {attribute2.name}",
err=True,
)
ctx.get_help()
ctx.exit(2)
# check type compatibility
if (
attribute1.list != attribute2.list
or attribute1.type_ != attribute2.type_
):
# can only mirror compatible attributes
click.echo(
f"Cannot mirror {attr1}, {attr2}: incompatible types", err=True
)
# click.echo("") # add a new line before rest of help text
# click.echo(ctx.get_help())
ctx.exit(2)
# loop through each file, process it, then do backup or restore if needed
for filename in files:
if walk and os.path.isdir(filename):
for root, _, filenames in os.walk(filename):
backup_file = pathlib.Path(root) / _BACKUP_FILENAME
if verbose:
click.echo(f"Processing directory {root}")
if restore:
try:
backup_data = load_backup_file(backup_file)
except FileNotFoundError:
click.echo(
f"Missing backup file {backup_file} for {root}, skipping restore",
err=True,
)
backup_data = {}
else:
backup_data = {}
for fname in filenames:
fpath = pathlib.Path(f"{root}/{fname}").resolve()
if restore and backup_data:
try:
attr_dict = backup_data[fname]
if verbose:
click.echo(f" Restoring attribute data for {fpath}")
md = osxmetadata.OSXMetaData(fpath)
md._restore_attributes(attr_dict)
except:
if verbose:
click.echo(
f" Skipping restore for file {fpath}: not in backup file"
)
if verbose:
click.echo(f" Processing file: {fpath}")
process_file(
ctx,
fpath,
json_,
set_,
append,
update,
remove,
clear,
get,
list_,
mirror,
wipe,
verbose,
copyfrom,
)
if backup:
if verbose:
click.echo(f" Backing up attribute data for {fpath}")
json_data = osxmetadata.OSXMetaData(fpath)._to_dict()
backup_data[fpath.name] = json_data
if backup:
# done walking through files in this folder, write the backup data
write_backup_file(backup_file, backup_data)
elif os.path.isdir(filename):
# skip directory
if verbose:
click.echo(
f"skipping directory: | |
<gh_stars>0
# pylint: skip-file
import cv2
import time
import collections
import enum
import math
import numpy as np
import os
import pathlib
import uuid
from collections import OrderedDict
from importlib import import_module
from darcyai.utils import validate_type, validate
from darcyai.config import Config
from .coral_perceptor_base import CoralPerceptorBase
from .people_perceptor_pom import PeoplePOM
class KeypointType(enum.IntEnum):
"""Pose kepoints."""
NOSE = 0
LEFT_EYE = 1
RIGHT_EYE = 2
LEFT_EAR = 3
RIGHT_EAR = 4
LEFT_SHOULDER = 5
RIGHT_SHOULDER = 6
LEFT_ELBOW = 7
RIGHT_ELBOW = 8
LEFT_WRIST = 9
RIGHT_WRIST = 10
LEFT_HIP = 11
RIGHT_HIP = 12
LEFT_KNEE = 13
RIGHT_KNEE = 14
LEFT_ANKLE = 15
RIGHT_ANKLE = 16
Point = collections.namedtuple('Point', ['x', 'y'])
Point.distance = lambda a, b: math.sqrt((a.x - b.x)**2 + (a.y - b.y)**2)
Point.distance = staticmethod(Point.distance)
Keypoint = collections.namedtuple('Keypoint', ['point', 'score'])
Pose = collections.namedtuple('Pose', ['keypoints', 'score'])
class PoseEngine():
"""Engine used for pose tasks."""
def __init__(self, model_path, mirror=False, arch=os.uname().machine):
"""Creates a PoseEngine with given model.
Args:
model_path: String, path to TF-Lite Flatbuffer file.
mirror: Flip keypoints horizontally.
Raises:
ValueError: An error occurred when model output is invalid.
"""
self.__edgetpu = import_module("pycoral.utils.edgetpu")
tflite_runtime = load_delegate = import_module("tflite_runtime.interpreter")
load_delegate = tflite_runtime.load_delegate
Interpreter = tflite_runtime.Interpreter
script_dir = os.path.dirname(os.path.realpath(__file__))
edgetpu_shared_lib = 'libedgetpu.so.1'
posenet_shared_lib = os.path.join(
script_dir, 'posenet_lib', arch, 'posenet_decoder.so')
edgetpu_delegate = load_delegate(edgetpu_shared_lib)
posenet_decoder_delegate = load_delegate(posenet_shared_lib)
self._interpreter = Interpreter(
model_path, experimental_delegates=[edgetpu_delegate, posenet_decoder_delegate])
self._interpreter.allocate_tensors()
self._mirror = mirror
self._input_tensor_shape = self.get_input_tensor_shape()
if (self._input_tensor_shape.size != 4 or
self._input_tensor_shape[3] != 3 or
self._input_tensor_shape[0] != 1):
raise ValueError(
('Image model should have input shape [1, height, width, 3]!'
' This model has {}.'.format(self._input_tensor_shape)))
_, self._input_height, self._input_width, self._input_depth = self.get_input_tensor_shape()
self._input_type = self._interpreter.get_input_details()[0]['dtype']
self._inf_time = 0
def run_inference(self, input_data):
"""Run inference using the zero copy feature from pycoral and returns inference time in ms.
"""
start = time.monotonic()
self.__edgetpu.run_inference(self._interpreter, input_data)
self._inf_time = time.monotonic() - start
return (self._inf_time * 1000)
def DetectPosesInImage(self, img):
"""Detects poses in a given image.
For ideal results make sure the image fed to this function is close to the
expected input size - it is the caller's responsibility to resize the
image accordingly.
Args:
img: numpy array containing image
"""
# Extend or crop the input to match the input shape of the network.
if img.shape[0] < self._input_height or img.shape[1] < self._input_width:
img = np.pad(img, [[0, max(0, self._input_height - img.shape[0])],
[0, max(0, self._input_width - img.shape[1])], [0, 0]],
mode='constant')
img = img[0:self._input_height, 0:self._input_width]
assert (img.shape == tuple(self._input_tensor_shape[1:]))
input_data = np.expand_dims(img, axis=0)
if self._input_type is np.float32:
# Floating point versions of posenet take image data in [-1,1] range.
input_data = np.float32(img) / 128.0 - 1.0
else:
# Assuming to be uint8
input_data = np.asarray(img)
self.run_inference(input_data.flatten())
return self.ParseOutput()
def get_input_tensor_shape(self):
"""Returns input tensor shape."""
return self._interpreter.get_input_details()[0]['shape']
def get_output_tensor(self, idx):
"""Returns output tensor view."""
return np.squeeze(self._interpreter.tensor(
self._interpreter.get_output_details()[idx]['index'])())
def ParseOutput(self):
"""Parses interpreter output tensors and returns decoded poses."""
keypoints = self.get_output_tensor(0)
keypoint_scores = self.get_output_tensor(1)
pose_scores = self.get_output_tensor(2)
num_poses = self.get_output_tensor(3)
poses = []
for i in range(int(num_poses)):
pose_score = pose_scores[i]
pose_keypoints = {}
for j, point in enumerate(keypoints[i]):
y, x = point
if self._mirror:
y = self._input_width - y
pose_keypoints[KeypointType(j)] = Keypoint(
Point(x, y), keypoint_scores[i, j])
poses.append(Pose(pose_keypoints, pose_score))
return poses, self._inf_time
class PeoplePerceptor(CoralPerceptorBase):
def __init__(self, **kwargs):
super().__init__(model_path="")
self.config_schema = [
Config("minimum_face_threshold", "float", 0.4, "Minimum Face Threshold"),
Config("minimum_body_threshold", "float", 0.2, "Minimum Body Threshold"),
Config("minimum_face_height", "int", 20, "Minimum Face Height"),
Config("minimum_body_height", "int", 120, "Minimum Body Height"),
Config("show_pose_landmark_dots", "bool", False, "Show pose landmark dots"),
Config("show_body_rectangle", "bool", False, "Show body rectangle"),
Config("show_face_rectangle", "bool", False, "Show face rectangle"),
Config("face_rectangle_color", "str", "255,0,0", "Face rectangle color"),
Config("face_rectangle_thickness", "int", 1, "Face rectangle thickness"),
Config("body_rectangle_color", "str", "0,255,0", "Body rectangle color"),
Config("pose_landmark_dot_confidence_threshold", "float", 0.5, "Pose landmark dot confidence threshold"),
Config("pose_landmark_dot_size", "int", 1, "Pose landmark dot size"),
Config("pose_landmark_dot_color", "str", "255,255,255", "Pose landmark dot color"),
Config("show_face_position_arrow", "bool", False, "Show face position arrow"),
Config("face_position_arrow_color", "str", "255,255,255", "Face position arrow color"),
Config("face_position_arrow_stroke", "int", 1, "Face position arrow stroke"),
Config("face_position_arrow_offset_x", "int", 0, "Face position arrow offset x"),
Config("face_position_arrow_offset_y", "int", -30, "Face position arrow offset y"),
Config("face_position_arrow_length", "int", 20, "Face position arrow length"),
Config("face_position_left_right_threshold", "float", 0.3, "Face position left/right threshold"),
Config("face_position_straight_threshold", "float", 0.7, "Face position straight threshold"),
Config("show_forehead_center_dot", "bool", False, "Show forehead center dot"),
Config("forehead_center_dot_color", "str", "255,255,255", "Forehead center dot color"),
Config("forehead_center_dot_size", "int", 1, "Forehead center dot size"),
Config("body_rectangle_thickness", "int", 1, "Body rectangle thickness"),
Config("face_rectangle_y_factor", "float", 1.0, "Face rectangle Y factor"),
Config("show_centroid_dots", "bool", False, "Show centroid dots"),
Config("centroid_dots_color", "str", "255,255,255", "Centroid dots color"),
Config("centroid_dots_size", "int", 1, "Centroid dots size"),
Config("object_tracking_allowed_missed_frames", "int", 50, "Object tracking allowed missed frames"),
Config("object_tracking_color_sample_pixels", "int", 4, "Object tracking color sample pixels"),
Config("object_tracking_info_history_count", "int", 3, "Object tracking info history count"),
Config("object_tracking_removal_count", "int", 50, "Object tracking removal count"),
Config("object_tracking_centroid_weight", "float", 0.25, "Object tracking centroid weight"),
Config("object_tracking_color_weight", "float", 0.25, "Object tracking color weight"),
Config("object_tracking_vector_weight", "float", 0.25, "Object tracking vector weight"),
Config("object_tracking_size_weight", "float", 0.25, "Object tracking size weight"),
Config("object_tracking_creation_m", "int", 10, "Object tracking creation M"),
Config("object_tracking_creation_n", "int", 7, "Object tracking creation N"),
Config("person_tracking_creation_m", "int", 20, "Person tracking creation M"),
Config("person_tracking_creation_n", "int", 16, "Person tracking creation N"),
Config("show_person_id", "bool", False, "Show person ID"),
Config("person_data_line_color", "str", "255,255,255", "Person data line color"),
Config("person_data_line_thickness", "int", 1, "Person data line thickness"),
Config("person_data_identity_text_color", "str", "255,255,255", "Person data identity text color"),
Config("person_data_identity_text_stroke", "int", 1, "Person data identity text stroke"),
Config("person_data_identity_text_font_size", "float", 1.0, "Person data identity text font size"),
Config("person_data_text_offset_x", "int", 30, "Person data text offset X"),
Config("person_data_text_offset_y", "int", -40, "Person data text offset Y"),
Config("identity_text_prefix", "str", "Person ID: ", "Identity text prefix"),
Config("face_height_resize_factor", "float", 0.1, "Face height resize factor"),
Config("rolling_video_storage_frame_count", "int", 100, "Rolling video storage frame count")
]
self.event_names = [
"new_person_entered_scene",
"person_facing_new_direction",
"new_person_in_front",
"person_left_scene",
"identity_determined_for_person",
"person_got_too_close",
"person_went_too_far_away",
"max_person_count_reached",
"person_count_fell_below_maximum",
"person_occluded"
]
self.__body_number = 0
self.__body_history = OrderedDict()
self.__body_missing = OrderedDict()
self.__body_count = OrderedDict()
self.__body_seen = OrderedDict()
self.__person_event_history = OrderedDict()
self.__primary_pose_engine = None
self.__latest_frame = None
self.__person_data = OrderedDict()
self.__poi = None
self.__prior_poi_id = 0
self.__poiFaceHeight = 0
self.__frame_number = 0
self.__frame_history = OrderedDict()
self.__frame_width = 0
self.__frame_height = 0
def run(self, input_data, config):
#Start with basic frame operations
frame = input_data
self.__frame_number += 1
self.__set_frame_dimensions(frame)
self.__add_current_frame_to_rolling_history(frame, config)
#Perform posenet primary AI inference
poses, inference_time = self.__primary_pose_engine.DetectPosesInImage(frame)
#Create POM object
pom = PeoplePOM()
#Place raw frame into POM
pom.set_raw_frame(frame.copy())
#Pass detected poses through body qualifier
bodies = self.__get_qualified_body_detections(poses, config)
#Process the bodies for various attributes
bodies = self.__process_body_attributes(bodies, frame, config)
#Perform tracking on bodies
self.__apply_best_object_matches_to_body_tracking_info(self.__frame_number, bodies, config)
self.__mark_unmatched_body_ids_as_missing(self.__frame_number, config)
#Check if a new POI has been determined
if self.__poi != None and ('person_id' in self.__poi) and self.__poi['person_id'] != 0:
if self.__prior_poi_id != self.__poi['person_id']:
self.__set_person_as_poi(self.__poi['person_id'], self.__frame_number, self.__poi, config)
#Perform cleanup on people who are not detected
self.__process_cleanup_of_missing_bodies(frame, config)
#Annotate the frame as determined by configuration
annotated_frame = self.__annotate_frame(bodies, frame.copy(), config)
pom.set_annotated_frame(annotated_frame)
people = OrderedDict()
for body in bodies:
if self.__check_if_person_data_record_exists_for_body_id(body["body_id"]):
personIdValue = self.__person_data[body["person_id"]]["uuid"]
body["person_uuid"] = personIdValue
body["is_poi"] = self.__person_data[body["person_id"]]["is_poi"]
people[personIdValue] = body
pom.set_people(people)
return pom
def load(self, accelerator_idx:[int, None]) -> None:
script_dir = pathlib.Path(__file__).parent.absolute()
model_file = os.path.join(script_dir, "models/posenet.tflite")
if accelerator_idx is None:
self.__primary_pose_engine = PoseEngine(model_file)
else:
validate_type(accelerator_idx, int, "accelerator_idx must be an integer")
validate(accelerator_idx >= 0, "accelerator_idx must be greater than or equal to 0")
#TODO: implement accelerator index pass-through to PoseEngine class above
self.__primary_pose_engine = PoseEngine(model_file)
super().set_loaded(True)
def __set_frame_dimensions(self, frame):
self.__frame_width = frame.shape[1]
self.__frame_height = frame.shape[0]
def __add_current_frame_to_rolling_history(self, frame, config):
milltime = int(round(time.time() * 1000))
curFrame = {"timestamp" : milltime, "frame" : frame.copy()}
self.__frame_history[self.__frame_number] = curFrame
if len(self.__frame_history) > config.rolling_video_storage_frame_count:
self.__frame_history.popitem(last=False)
def __process_body_attributes(self, bodies, frame, config):
for body in bodies:
body["face_position"] = self.__determine_face_position(body, config)
body["has_forehead"] = self.__determine_if_forehead_visible(body, config)
body["forehead_center"] = self.__determine_forehead_center(body, config)
body["body_rectangle"] = self.__find_body_rectangle(body, config)
body["face_rectangle"] = self.__find_face_rectangle(body, config)
body["tracking_info"] = self.__generate_tracking_info_for_body(frame, body, config)
curFaceHeight = int(body["face_rectangle"][1][1] - body["face_rectangle"][0][1])
if curFaceHeight > self.__poiFaceHeight:
self.__poi = body
self.__poiFaceHeight = curFaceHeight
return bodies
def __annotate_frame(self, bodies, frame, config):
if config.show_pose_landmark_dots:
for body in bodies:
frame = self.__draw_landmark_points_on_body(frame, body, config)
if config.show_face_position_arrow:
for body in bodies:
frame = self.__draw_face_position_arrow_on_frame(frame, body, config)
if config.show_forehead_center_dot:
for body in bodies:
frame = self.__draw_forehead_center_dot_on_frame(frame, body, config)
if config.show_body_rectangle:
for body in bodies:
frame = self.__draw_body_rectangle_on_frame(frame, body, config)
if config.show_face_rectangle:
for body in bodies:
frame = self.__draw_face_rectangle_on_frame(frame, body, config)
if config.show_centroid_dots:
for body in bodies:
frame = self.__draw_centroid_circles_on_frame(frame, body, config)
if | |
Shortsword+8",
200809: "Fire Shortsword+9",
200810: "Fire Shortsword+10",
200900: "Chaos Shortsword",
200901: "Chaos Shortsword+1",
200902: "Chaos Shortsword+2",
200903: "Chaos Shortsword+3",
200904: "Chaos Shortsword+4",
200905: "Chaos Shortsword+5",
201000: "Longsword",
201001: "Longsword+1",
201002: "Longsword+2",
201003: "Longsword+3",
201004: "Longsword+4",
201005: "Longsword+5",
201006: "Longsword+6",
201007: "Longsword+7",
201008: "Longsword+8",
201009: "Longsword+9",
201010: "Longsword+10",
201011: "Longsword+11",
201012: "Longsword+12",
201013: "Longsword+13",
201014: "Longsword+14",
201015: "Longsword+15",
201100: "Crystal Longsword",
201101: "Crystal Longsword+1",
201102: "Crystal Longsword+2",
201103: "Crystal Longsword+3",
201104: "Crystal Longsword+4",
201105: "Crystal Longsword+5",
201200: "Lightning Longsword",
201201: "Lightning Longsword+1",
201202: "Lightning Longsword+2",
201203: "Lightning Longsword+3",
201204: "Lightning Longsword+4",
201205: "Lightning Longsword+5",
201300: "Raw Longsword",
201301: "Raw Longsword+1",
201302: "Raw Longsword+2",
201303: "Raw Longsword+3",
201304: "Raw Longsword+4",
201305: "Raw Longsword+5",
201400: "Magic Longsword",
201401: "Magic Longsword+1",
201402: "Magic Longsword+2",
201403: "Magic Longsword+3",
201404: "Magic Longsword+4",
201405: "Magic Longsword+5",
201406: "Magic Longsword+6",
201407: "Magic Longsword+7",
201408: "Magic Longsword+8",
201409: "Magic Longsword+9",
201410: "Magic Longsword+10",
201500: "Enchanted Longsword",
201501: "Enchanted Longsword+1",
201502: "Enchanted Longsword+2",
201503: "Enchanted Longsword+3",
201504: "Enchanted Longsword+4",
201505: "Enchanted Longsword+5",
201600: "Divine Longsword",
201601: "Divine Longsword+1",
201602: "Divine Longsword+2",
201603: "Divine Longsword+3",
201604: "Divine Longsword+4",
201605: "Divine Longsword+5",
201606: "Divine Longsword+6",
201607: "Divine Longsword+7",
201608: "Divine Longsword+8",
201609: "Divine Longsword+9",
201610: "Divine Longsword+10",
201700: "Occult Longsword",
201701: "Occult Longsword+1",
201702: "Occult Longsword+2",
201703: "Occult Longsword+3",
201704: "Occult Longsword+4",
201705: "Occult Longsword+5",
201800: "Fire Longsword",
201801: "Fire Longsword+1",
201802: "Fire Longsword+2",
201803: "Fire Longsword+3",
201804: "Fire Longsword+4",
201805: "Fire Longsword+5",
201806: "Fire Longsword+6",
201807: "Fire Longsword+7",
201808: "Fire Longsword+8",
201809: "Fire Longsword+9",
201810: "Fire Longsword+10",
201900: "Chaos Longsword",
201901: "Chaos Longsword+1",
201902: "Chaos Longsword+2",
201903: "Chaos Longsword+3",
201904: "Chaos Longsword+4",
201905: "Chaos Longsword+5",
202000: "Broadsword",
202001: "Broadsword+1",
202002: "Broadsword+2",
202003: "Broadsword+3",
202004: "Broadsword+4",
202005: "Broadsword+5",
202006: "Broadsword+6",
202007: "Broadsword+7",
202008: "Broadsword+8",
202009: "Broadsword+9",
202010: "Broadsword+10",
202011: "Broadsword+11",
202012: "Broadsword+12",
202013: "Broadsword+13",
202014: "Broadsword+14",
202015: "Broadsword+15",
202100: "Crystal Broadsword",
202101: "Crystal Broadsword+1",
202102: "Crystal Broadsword+2",
202103: "Crystal Broadsword+3",
202104: "Crystal Broadsword+4",
202105: "Crystal Broadsword+5",
202200: "Lightning Broadsword",
202201: "Lightning Broadsword+1",
202202: "Lightning Broadsword+2",
202203: "Lightning Broadsword+3",
202204: "Lightning Broadsword+4",
202205: "Lightning Broadsword+5",
202300: "Raw Broadsword",
202301: "Raw Broadsword+1",
202302: "Raw Broadsword+2",
202303: "Raw Broadsword+3",
202304: "Raw Broadsword+4",
202305: "Raw Broadsword+5",
202400: "Magic Broadsword",
202401: "Magic Broadsword+1",
202402: "Magic Broadsword+2",
202403: "Magic Broadsword+3",
202404: "Magic Broadsword+4",
202405: "Magic Broadsword+5",
202406: "Magic Broadsword+6",
202407: "Magic Broadsword+7",
202408: "Magic Broadsword+8",
202409: "Magic Broadsword+9",
202410: "Magic Broadsword+10",
202500: "Enchanted Broadsword",
202501: "Enchanted Broadsword+1",
202502: "Enchanted Broadsword+2",
202503: "Enchanted Broadsword+3",
202504: "Enchanted Broadsword+4",
202505: "Enchanted Broadsword+5",
202600: "Divine Broadsword",
202601: "Divine Broadsword+1",
202602: "Divine Broadsword+2",
202603: "Divine Broadsword+3",
202604: "Divine Broadsword+4",
202605: "Divine Broadsword+5",
202606: "Divine Broadsword+6",
202607: "Divine Broadsword+7",
202608: "Divine Broadsword+8",
202609: "Divine Broadsword+9",
202610: "Divine Broadsword+10",
202700: "Occult Broadsword",
202701: "Occult Broadsword+1",
202702: "Occult Broadsword+2",
202703: "Occult Broadsword+3",
202704: "Occult Broadsword+4",
202705: "Occult Broadsword+5",
202800: "Fire Broadsword",
202801: "Fire Broadsword+1",
202802: "Fire Broadsword+2",
202803: "Fire Broadsword+3",
202804: "Fire Broadsword+4",
202805: "Fire Broadsword+5",
202806: "Fire Broadsword+6",
202807: "Fire Broadsword+7",
202808: "Fire Broadsword+8",
202809: "Fire Broadsword+9",
202810: "Fire Broadsword+10",
202900: "Chaos Broadsword",
202901: "Chaos Broadsword+1",
202902: "Chaos Broadsword+2",
202903: "Chaos Broadsword+3",
202904: "Chaos Broadsword+4",
202905: "Chaos Broadsword+5",
203000: "Broken Straight Sword",
203001: "Broken Straight Sword+1",
203002: "Broken Straight Sword+2",
203003: "Broken Straight Sword+3",
203004: "Broken Straight Sword+4",
203005: "Broken Straight Sword+5",
203006: "Broken Straight Sword+6",
203007: "Broken Straight Sword+7",
203008: "Broken Straight Sword+8",
203009: "Broken Straight Sword+9",
203010: "Broken Straight Sword+10",
203011: "Broken Straight Sword+11",
203012: "Broken Straight Sword+12",
203013: "Broken Straight Sword+13",
203014: "Broken Straight Sword+14",
203015: "Broken Straight Sword+15",
203100: "Crys. Broken Str. Sword",
203101: "Crys. Broken Str. Sword+1",
203102: "Crys. Broken Str. Sword+2",
203103: "Crys. Broken Str. Sword+3",
203104: "Crys. Broken Str. Sword+4",
203105: "Crys. Broken Str. Sword+5",
203200: "Ltng. Broken Str. Sword",
203201: "Ltng. Broken Str. Sword+1",
203202: "Ltng. Broken Str. Sword+2",
203203: "Ltng. Broken Str. Sword+3",
203204: "Ltng. Broken Str. Sword+4",
203205: "Ltng. Broken Str. Sword+5",
203300: "Raw Broken Straight Sword",
203301: "Raw Broken Straight Sword+1",
203302: "Raw Broken Straight Sword+2",
203303: "Raw Broken Straight Sword+3",
203304: "Raw Broken Straight Sword+4",
203305: "Raw Broken Straight Sword+5",
203400: "Magic Broken Straight Sword",
203401: "Magic Broken Str. Sword+1",
203402: "Magic Broken Str. Sword+2",
203403: "Magic Broken Str. Sword+3",
203404: "Magic Broken Str. Sword+4",
203405: "Magic Broken Str. Sword+5",
203406: "Magic Broken Str. Sword+6",
203407: "Magic Broken Str. Sword+7",
203408: "Magic Broken Str. Sword+8",
203409: "Magic Broken Str. Sword+9",
203410: "Magic Broken Str. Sword+10",
203500: "Ench. Broken Str. Sword",
203501: "Ench. Broken Str. Sword+1",
203502: "Ench. Broken Str. Sword+2",
203503: "Ench. Broken Str. Sword+3",
203504: "Ench. Broken Str. Sword+4",
203505: "Ench. Broken Str. Sword+5",
203600: "Div. Broken Str. Sword",
203601: "Div. Broken Str. Sword+1",
203602: "Div. Broken Str. Sword+2",
203603: "Div. Broken Str. Sword+3",
203604: "Div. Broken Str. Sword+4",
203605: "Div. Broken Str. Sword+5",
203606: "Div. Broken Str. Sword+6",
203607: "Div. Broken Str. Sword+7",
203608: "Div. Broken Str. Sword+8",
203609: "Div. Broken Str. Sword+9",
203610: "Div. Broken Str. Sword+10",
203700: "Occ. Broken Str. Sword",
203701: "Occ. Broken Str. Sword+1",
203702: "Occ. Broken Str. Sword+2",
203703: "Occ. Broken Str. Sword+3",
203704: "Occ. Broken Str. Sword+4",
203705: "Occ. Broken Str. Sword+5",
203800: "Fire Broken Straight Sword",
203801: "Fire Broken Str. Sword+1",
203802: "Fire Broken Str. Sword+2",
203803: "Fire Broken Str. Sword+3",
203804: "Fire Broken Str. Sword+4",
203805: "Fire Broken Str. Sword+5",
203806: "Fire Broken Str. Sword+6",
203807: "Fire Broken Str. Sword+7",
203808: "Fire Broken Str. Sword+8",
203809: "Fire Broken Str. Sword+9",
203810: "Fire Broken Str. Sword+10",
203900: "Chaos Broken Straight Sword",
203901: "Chaos Broken Str. Sword+1",
203902: "Chaos Broken Str. Sword+2",
203903: "Chaos Broken Str. Sword+3",
203904: "Chaos Broken Str. Sword+4",
203905: "Chaos Broken Str. Sword+5",
204000: "Balder Side Sword",
204001: "Balder Side Sword+1",
204002: "Balder Side Sword+2",
204003: "Balder Side Sword+3",
204004: "Balder Side Sword+4",
204005: "Balder Side Sword+5",
204006: "Balder Side Sword+6",
204007: "Balder Side Sword+7",
204008: "Balder Side Sword+8",
204009: "Balder Side Sword+9",
204010: "Balder Side Sword+10",
204011: "Balder Side Sword+11",
204012: "Balder Side Sword+12",
204013: "Balder Side Sword+13",
204014: "Balder Side Sword+14",
204015: "Balder Side Sword+15",
204100: "Crystal Balder Side Sword",
204101: "Crystal Balder Side Sword+1",
204102: "Crystal Balder Side Sword+2",
204103: "Crystal Balder Side Sword+3",
| |
uk_145
+ 26640 * uk_146
+ 31248 * uk_147
+ 20016 * uk_148
+ 410700 * uk_149
+ 10275601 * uk_15
+ 481740 * uk_150
+ 308580 * uk_151
+ 565068 * uk_152
+ 361956 * uk_153
+ 231852 * uk_154
+ 6331625 * uk_155
+ 7426825 * uk_156
+ 4757275 * uk_157
+ 8711465 * uk_158
+ 5580155 * uk_159
+ 6582067 * uk_16
+ 3574385 * uk_160
+ 10218313 * uk_161
+ 6545371 * uk_162
+ 4192657 * uk_163
+ 2685619 * uk_164
+ 3969 * uk_17
+ 11214 * uk_18
+ 8757 * uk_19
+ 63 * uk_2
+ 756 * uk_20
+ 11655 * uk_21
+ 13671 * uk_22
+ 8757 * uk_23
+ 31684 * uk_24
+ 24742 * uk_25
+ 2136 * uk_26
+ 32930 * uk_27
+ 38626 * uk_28
+ 24742 * uk_29
+ 178 * uk_3
+ 19321 * uk_30
+ 1668 * uk_31
+ 25715 * uk_32
+ 30163 * uk_33
+ 19321 * uk_34
+ 144 * uk_35
+ 2220 * uk_36
+ 2604 * uk_37
+ 1668 * uk_38
+ 34225 * uk_39
+ 139 * uk_4
+ 40145 * uk_40
+ 25715 * uk_41
+ 47089 * uk_42
+ 30163 * uk_43
+ 19321 * uk_44
+ 106179944855977 * uk_45
+ 141265316367 * uk_46
+ 399130576402 * uk_47
+ 311680618651 * uk_48
+ 26907679308 * uk_49
+ 12 * uk_5
+ 414826722665 * uk_50
+ 486580534153 * uk_51
+ 311680618651 * uk_52
+ 187944057 * uk_53
+ 531016542 * uk_54
+ 414670221 * uk_55
+ 35798868 * uk_56
+ 551899215 * uk_57
+ 647362863 * uk_58
+ 414670221 * uk_59
+ 185 * uk_6
+ 1500332452 * uk_60
+ 1171607926 * uk_61
+ 101146008 * uk_62
+ 1559334290 * uk_63
+ 1829056978 * uk_64
+ 1171607926 * uk_65
+ 914907313 * uk_66
+ 78984804 * uk_67
+ 1217682395 * uk_68
+ 1428308539 * uk_69
+ 217 * uk_7
+ 914907313 * uk_70
+ 6818832 * uk_71
+ 105123660 * uk_72
+ 123307212 * uk_73
+ 78984804 * uk_74
+ 1620656425 * uk_75
+ 1900986185 * uk_76
+ 1217682395 * uk_77
+ 2229805417 * uk_78
+ 1428308539 * uk_79
+ 139 * uk_8
+ 914907313 * uk_80
+ 250047 * uk_81
+ 706482 * uk_82
+ 551691 * uk_83
+ 47628 * uk_84
+ 734265 * uk_85
+ 861273 * uk_86
+ 551691 * uk_87
+ 1996092 * uk_88
+ 1558746 * uk_89
+ 2242306609 * uk_9
+ 134568 * uk_90
+ 2074590 * uk_91
+ 2433438 * uk_92
+ 1558746 * uk_93
+ 1217223 * uk_94
+ 105084 * uk_95
+ 1620045 * uk_96
+ 1900269 * uk_97
+ 1217223 * uk_98
+ 9072 * uk_99,
uk_0
+ 47353 * uk_1
+ 2983239 * uk_10
+ 94248 * uk_100
+ 109368 * uk_101
+ 89712 * uk_102
+ 2203047 * uk_103
+ 2556477 * uk_104
+ 2097018 * uk_105
+ 2966607 * uk_106
+ 2433438 * uk_107
+ 1996092 * uk_108
+ 74088 * uk_109
+ 1988826 * uk_11
+ 313992 * uk_110
+ 14112 * uk_111
+ 329868 * uk_112
+ 382788 * uk_113
+ 313992 * uk_114
+ 1330728 * uk_115
+ 59808 * uk_116
+ 1398012 * uk_117
+ 1622292 * uk_118
+ 1330728 * uk_119
+ 8428834 * uk_12
+ 2688 * uk_120
+ 62832 * uk_121
+ 72912 * uk_122
+ 59808 * uk_123
+ 1468698 * uk_124
+ 1704318 * uk_125
+ 1398012 * uk_126
+ 1977738 * uk_127
+ 1622292 * uk_128
+ 1330728 * uk_129
+ 378824 * uk_13
+ 5639752 * uk_130
+ 253472 * uk_131
+ 5924908 * uk_132
+ 6875428 * uk_133
+ 5639752 * uk_134
+ 11392 * uk_135
+ 266288 * uk_136
+ 309008 * uk_137
+ 253472 * uk_138
+ 6224482 * uk_139
+ 8855011 * uk_14
+ 7223062 * uk_140
+ 5924908 * uk_141
+ 8381842 * uk_142
+ 6875428 * uk_143
+ 5639752 * uk_144
+ 512 * uk_145
+ 11968 * uk_146
+ 13888 * uk_147
+ 11392 * uk_148
+ 279752 * uk_149
+ 10275601 * uk_15
+ 324632 * uk_150
+ 266288 * uk_151
+ 376712 * uk_152
+ 309008 * uk_153
+ 253472 * uk_154
+ 6539203 * uk_155
+ 7588273 * uk_156
+ 6224482 * uk_157
+ 8805643 * uk_158
+ 7223062 * uk_159
+ 8428834 * uk_16
+ 5924908 * uk_160
+ 10218313 * uk_161
+ 8381842 * uk_162
+ 6875428 * uk_163
+ 5639752 * uk_164
+ 3969 * uk_17
+ 2646 * uk_18
+ 11214 * uk_19
+ 63 * uk_2
+ 504 * uk_20
+ 11781 * uk_21
+ 13671 * uk_22
+ 11214 * uk_23
+ 1764 * uk_24
+ 7476 * uk_25
+ 336 * uk_26
+ 7854 * uk_27
+ 9114 * uk_28
+ 7476 * uk_29
+ 42 * uk_3
+ 31684 * uk_30
+ 1424 * uk_31
+ 33286 * uk_32
+ 38626 * uk_33
+ 31684 * uk_34
+ 64 * uk_35
+ 1496 * uk_36
+ 1736 * uk_37
+ 1424 * uk_38
+ 34969 * uk_39
+ 178 * uk_4
+ 40579 * uk_40
+ 33286 * uk_41
+ 47089 * uk_42
+ 38626 * uk_43
+ 31684 * uk_44
+ 106179944855977 * uk_45
+ 141265316367 * uk_46
+ 94176877578 * uk_47
+ 399130576402 * uk_48
+ 17938452872 * uk_49
+ 8 * uk_5
+ 419311335883 * uk_50
+ 486580534153 * uk_51
+ 399130576402 * uk_52
+ 187944057 * uk_53
+ 125296038 * uk_54
+ 531016542 * uk_55
+ 23865912 * uk_56
+ 557865693 * uk_57
+ 647362863 * uk_58
+ 531016542 * uk_59
+ 187 * uk_6
+ 83530692 * uk_60
+ 354011028 * uk_61
+ 15910608 * uk_62
+ 371910462 * uk_63
+ 431575242 * uk_64
+ 354011028 * uk_65
+ 1500332452 * uk_66
+ 67430672 * uk_67
+ 1576191958 * uk_68
+ 1829056978 * uk_69
+ 217 * uk_7
+ 1500332452 * uk_70
+ 3030592 * uk_71
+ 70840088 * uk_72
+ 82204808 * uk_73
+ 67430672 * uk_74
+ 1655887057 * uk_75
+ 1921537387 * uk_76
+ 1576191958 * uk_77
+ 2229805417 * uk_78
+ 1829056978 * uk_79
+ 178 * uk_8
+ 1500332452 * uk_80
+ 250047 * uk_81
+ 166698 * uk_82
+ 706482 * uk_83
+ 31752 * uk_84
+ 742203 * uk_85
+ 861273 * uk_86
+ 706482 * uk_87
+ 111132 * uk_88
+ 470988 * uk_89
+ 2242306609 * uk_9
+ 21168 * uk_90
+ 494802 * uk_91
+ 574182 * uk_92
+ 470988 * uk_93
+ 1996092 * uk_94
+ 89712 * uk_95
+ 2097018 * uk_96
+ 2433438 * uk_97
+ 1996092 * uk_98
+ 4032 * uk_99,
uk_0
+ 47353 * uk_1
+ 2983239 * uk_10
+ 142884 * uk_100
+ 164052 * uk_101
+ 31752 * uk_102
+ 2250423 * uk_103
+ 2583819 * uk_104
+ 500094 * uk_105
+ 2966607 * uk_106
+ 574182 * uk_107
+ 111132 * uk_108
+ 1092727 * uk_109
+ 4877359 * uk_11
+ 445578 * uk_110
+ 127308 * uk_111
+ 2005101 * uk_112
+ 2302153 * uk_113
+ 445578 * uk_114
+ 181692 * uk_115
+ 51912 * uk_116
+ 817614 * uk_117
+ 938742 * uk_118
+ 181692 * uk_119
+ 1988826 * uk_12
+ 14832 * uk_120
+ 233604 * uk_121
+ 268212 * uk_122
+ 51912 * uk_123
+ 3679263 * uk_124
+ 4224339 * uk_125
+ 817614 * uk_126
+ 4850167 * uk_127
+ 938742 * uk_128
+ 181692 * uk_129
+ 568236 * uk_13
+ 74088 * uk_130
+ 21168 * uk_131
+ 333396 * uk_132
+ 382788 * uk_133
+ 74088 * uk_134
+ 6048 * uk_135
+ 95256 * uk_136
+ 109368 * uk_137
+ | |
csv
cmdlsamplemodel=None, #sampling of model data
cmdlsampleprediction=None, #sampling of prediction data
cmdlmodelscalefeatures=True, #scale model data
cmdlpredictioncolsrange=None,
cmdlpredictioncolselect=None,
cmdlpredictionscalefeatures=True, #scale predcition data
cmdlmodelscalesave=True, #save model scaler should be true
cmdlpredictionscalesave=False, #save prediction scaler should be false to use already saved scaler
cmdlkind='standard',
cmdlpredictionidcol=None, #id column for prediction csv
cmdlscaletarget=None,
cmdlminmaxscale=None,
cmdlscaleminmaxvalues=None,
cmdloutdir=None,
cmdlhideplot=False):
#scalesave has to be True to save scaler to be used later by predictiondata
modelcsv=prepcsv(modeldatacsv,idcols=cmdlmodelidcol,targetcol=cmdlmodeltargetcol,
colsrange=cmdlmodelcolsrange,colsselect=cmdlmodelcolselect,
scalefeatures=cmdlmodelscalefeatures,scaletype=cmdlkind,
scalesave=True,sample=cmdlsamplemodel)
#I hard coded scalesave to True for model data
#extract and scale columns of data only
X,colnames,dfin=modelcsv.extract_scale_cols()
y,ycolname=modelcsv.extract_target()
if cmdlmodelidcol:
dfin=modelcsv.addback_idcols(dfin)
if cmdlmodeltargetcol:
dfin,tcolnum=modelcsv.addback_targetcol(dfin)
#targetcol has to be None
#scalesave has to be False to read from saved modeldata scale
predictioncsv=prepcsv(predictiondatacsv,idcols=cmdlpredictionidcol,targetcol=None,
colsrange=cmdlpredictioncolsrange,colsselect=cmdlpredictioncolselect,
scalefeatures=cmdlpredictionscalefeatures,scaletype=cmdlkind,
scalesave=False,sample=cmdlsampleprediction)
#I hard coded prediction scale save to false to read already saved scaler file
#extract and scale columns of data only
Xpred,colnamespred,dfinpred=predictioncsv.extract_scale_cols()
dirsplit,fextsplit=os.path.split(modeldatacsv)
fname,fextn=os.path.splitext(fextsplit)
#plt.style.use('seaborn-whitegrid')
lm=LinearRegression()
lm.fit(X, y) # Fitting all predictors 'X' to the target 'y' using linear fit model
ypred=lm.predict(X)
# Print intercept and coefficients
print ('Intercept: ',lm.intercept_)
print ('Coefficients: ',lm.coef_)
print ('R2 Score:',lm.score(X, y))
# Calculating coefficients
cflst=lm.coef_.tolist()
#cflst.append(lm.intercept_)
cflst.insert(0,lm.intercept_)
cnameslst=colnames.tolist()
#cnameslst.append('Intercept')
cnameslst.insert(0,'Intercept')
coeff=pd.DataFrame(cnameslst,columns=['Attribute'])
coeff['Coefficient Estimate']=pd.Series(cflst)
pred=lm.predict(Xpred)
if cmdlscaletarget:
if cmdlscaleminmaxvalues:
ymin,ymax=cmdlscaleminmaxvalues[0],cmdlscaleminmaxvalues[1]
print('****Manual Scaling of output data to min: %10.4f, max: %10.4f'
%(cmdlscaleminmaxvalues[0],cmdlscaleminmaxvalues[1]))
else:
ymin,ymax=y.min(), y.max()
mmscale=MinMaxScaler((ymin,ymax))
#mmscale.fit(pred)
pred1=pred.reshape(-1,1)
predscaled=mmscale.fit_transform(pred1)
dfinpred['LRPred']=predscaled
else:
dfinpred['LRPred']=pred
# addback id column
if cmdlmodelidcol >=0:
dfspred=predictioncsv.addback_idcols(dfinpred)
#ax=plt.scatter(y,ypred)
ax=sns.regplot(x=y,y=ypred)
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.title('Linear Regressor %s' %ycolname)
if not cmdlhideplot:
plt.show()
if cmdloutdir:
pdfcl=os.path.join(cmdloutdir,fname) +"_lreg.pdf"
xyplt=os.path.join(cmdloutdir,fname) +"_lregxplt.csv"
else:
pdfcl=os.path.join(dirsplit,fname) +"_lreg.pdf"
xyplt=os.path.join(dirsplit,fname) +"_lregxplt.csv"
fig=ax.get_figure()
fig.savefig(pdfcl)
xpltcols=['Actual','Predicted']
# xpltdf=dfin[dfin.columns[cmdlmodelidcol]].copy() #copy back model id col
xpltdf=dfin[dfin.columns[cmdlmodelidcol]].copy()
#copy back model id col assuming idcol was added back at column 0
xpltdf['Actual']=y
xpltdf['Predicted']=ypred
xpltdf.to_csv(xyplt,index=False)
print('Sucessfully generated xplot file %s' % xyplt)
if cmdlpredictionidcol:
dfinpred=predictioncsv.addback_idcols(dfinpred)
idtargetdf=predictioncsv.idtarget_merge(predicteddf=dfinpred,predicteddfcols=-1)
savefiles(seisf=predictiondatacsv,
sdf=dfinpred,
wellf=modeldatacsv,
wdf=coeff,
sxydf=idtargetdf,
outdir=cmdloutdir,
ssuffix='_lr',
wsuffix='_lrcf',name2merge=modeldatacsv)
def process_KNNtest(modeldatacsv,
cmdlmodelcolsrange=None,
cmdlmodelcolselect=None,
cmdlmodeltargetcol=None, #target column for model csv
cmdlmodelidcol=None, #idcolumn for model csv
cmdlsamplemodel=None, #sampling of model data
cmdlmodelscalefeatures=True, #scale model data
cmdlmodelscalesave=True, #save model scaler should be true
cmdlkind='standard',
cmdlcv=None,
cmdloutdir=None,
cmdlhideplot=False):
modelcsv=prepcsv(modeldatacsv,idcols=cmdlmodelidcol,targetcol=cmdlmodeltargetcol,
colsrange=cmdlmodelcolsrange,colsselect=cmdlmodelcolselect,
scalefeatures=cmdlmodelscalefeatures,scaletype=cmdlkind,
scalesave=True,sample=cmdlsamplemodel)
#I hard coded scalesave to True for model data
# returns X data, column names, and dataframe that is scaled
X,colnames,dfin=modelcsv.extract_scale_cols()
y,ycolname=modelcsv.extract_target()
if cmdlmodelidcol:
dfin=modelcsv.addback_idcols(dfin)
if cmdlmodeltargetcol:
dfin,tcolnum=modelcsv.addback_targetcol(dfin)
k_values=np.array([n for n in range(1,21)])
#print('kvalues:',k_values)
mselist=[]
stdlist=[]
for k in k_values:
num_folds=cmdlcv
kfold=KFold(n_splits=10, random_state=7)
KNNmodel=KNeighborsRegressor(n_neighbors=k)
scoring='neg_mean_squared_error'
results=cross_val_score(KNNmodel, X, y, cv=kfold, scoring=scoring)
print("K value: %2d MSE: %.3f (%.3f)" % (k,results.mean(), results.std()))
mselist.append(results.mean())
stdlist.append(results.std())
dirsplit,fextsplit=os.path.split(modeldatacsv)
fname,fextn=os.path.splitext(fextsplit)
if cmdloutdir:
pdfcl=os.path.join(cmdloutdir,fname) +"_knn.pdf"
else:
pdfcl=os.path.join(dirsplit,fname) +"_knn.pdf"
ax=plt.plot(k_values,mselist)
plt.xlabel('# of clusters')
plt.ylabel('Neg Mean Sqr Error')
plt.savefig(pdfcl)
if not cmdlhideplot:
plt.show()
def process_KNNfitpredict(modeldatacsv,predictiondatacsv,
cmdlmodelcolsrange=None,
cmdlmodelcolselect=None,
cmdlmodeltargetcol=None, #target column for model csv
cmdlmodelidcol=None, #idcolumn for model csv
cmdlsamplemodel=None, #sampling of model data
cmdlsampleprediction=None, #sampling of prediction data
cmdlmodelscalefeatures=True, #scale model data
cmdlpredictioncolsrange=None,
cmdlpredictioncolselect=None,
cmdlpredictionscalefeatures=True, #scale predcition data
cmdlmodelscalesave=True, #save model scaler should be true
cmdlpredictionscalesave=False, #save prediction scaler should be false to use already saved scaler
cmdlkind='standard',
cmdlpredictionidcol=None, #id column for prediction csv
cmdltargetscale=None,
cmdlminmaxscale=None,
cmdlscaleminmaxvalues=None,
cmdloutdir=None,
cmdlhideplot=False,
cmdlkneighbors=10):
modelcsv=prepcsv(modeldatacsv,idcols=cmdlmodelidcol,targetcol=cmdlmodeltargetcol,
colsrange=cmdlmodelcolsrange,colsselect=cmdlmodelcolselect,
scalefeatures=cmdlmodelscalefeatures,scaletype=cmdlkind,
scalesave=True,sample=cmdlsamplemodel)
#I hard coded scalesave to True for model data
# returns X data, column names, and dataframe that is scaled
X,colnames,dfin=modelcsv.extract_scale_cols()
y,ycolname=modelcsv.extract_target()
if cmdlmodelidcol:
dfin=modelcsv.addback_idcols(dfin)
if cmdlmodeltargetcol:
dfin,tcolnum=modelcsv.addback_targetcol(dfin)
#targetcol has to be None
#scalesave has to be False to read from saved modeldata scale
predictioncsv=prepcsv(predictiondatacsv,idcols=cmdlpredictionidcol,targetcol=None,
colsrange=cmdlpredictioncolsrange,colsselect=cmdlpredictioncolselect,
scalefeatures=cmdlpredictionscalefeatures,scaletype=cmdlkind,
scalesave=False,sample=cmdlsampleprediction)
#I hard coded prediction scale save to false to read already saved scaler file
#extract and scale columns of data only
Xpred,colnamespred,dfinpred=predictioncsv.extract_scale_cols()
dirsplit,fextsplit=os.path.split(modeldatacsv)
fname,fextn=os.path.splitext(fextsplit)
#plt.style.use('seaborn-whitegrid')
KNNmodel=KNeighborsRegressor(n_neighbors=cmdlkneighbors)
KNNmodel.fit(X, y)
ypred=KNNmodel.predict(X)
# Calculating Mean Squared Error
mse=np.mean((ypred - y)**2)
print('Metrics on input data: ')
print('MSE: %.4f' %(mse))
print('R2 Score: %.4f' %(KNNmodel.score(X,y)))
pred=KNNmodel.predict(Xpred)
if cmdlminmaxscale:
if cmdlscaleminmaxvalues:
ymin,ymax=cmdlscaleminmaxvalues[0],cmdlscaleminmaxvalues[1]
print('****Manual Scaling of output data to min: %10.4f, max: %10.4f'
%(cmdlscaleminmaxvalues[0],cmdlscaleminmaxvalues[1]))
else:
ymin,ymax=y.min(), y.max()
mmscale=MinMaxScaler((ymin,ymax))
#mmscale.fit(pred)
pred1=pred.reshape(-1,1)
predscaled=mmscale.fit_transform(pred1)
dfinpred['KNNPred']=predscaled
else:
dfinpred['KNNPred']=pred
# addback id column
if cmdlpredictionidcol >=0:
dfinpred=predictioncsv.addback_idcols(dfinpred)
#ax=plt.scatter(y,ypred)
sns.set(color_codes=True)
ax=sns.regplot(x=y,y=ypred)
plt.xlabel('Actual')
plt.ylabel('Predicted')
# plt.title('KNN Regressor %s' %dfin.columns[cmdlmodeltargetcol])
plt.title('KNN Regressor %s' %ycolname)
if not cmdlhideplot:
plt.show()
if cmdloutdir:
pdfcl=os.path.join(cmdloutdir,fname) +"_knnreg.pdf"
xyplt=os.path.join(cmdloutdir,fname) +"_knnregxplt.csv"
else:
pdfcl=os.path.join(dirsplit,fname) +"_knnreg.pdf"
xyplt=os.path.join(dirsplit,fname) +"_knnregxplt.csv"
fig=ax.get_figure()
fig.savefig(pdfcl)
xpltcols=['Actual','Predicted']
# xpltdf=dfs.iloc[:,:3].copy() #copy well x y
# xpltdf=dfin[dfin.columns[cmdlmodelidcol]].copy() #copy back model id col
xpltdf=dfin[dfin.columns[cmdlmodelidcol]].copy() #copy back model id col
xpltdf['Actual']=y
xpltdf['Predicted']=ypred
xpltdf.to_csv(xyplt,index=False)
print('Sucessfully generated xplot file %s' % xyplt)
if cmdlpredictionidcol:
dfinpred=predictioncsv.addback_idcols(dfinpred)
idtargetdf=predictioncsv.idtarget_merge(predicteddf=dfinpred,predicteddfcols=-1)
savefiles(seisf=predictiondatacsv,
sdf=dfinpred,
sxydf=idtargetdf,
outdir=cmdloutdir,
ssuffix='_KNN',
name2merge=modeldatacsv)
def process_TuneCatBoostRegressor(modeldatacsv,predictiondatacsv,
cmdlmodelcolsrange=None,
cmdlmodelcolselect=None,
cmdlmodeltargetcol=None, #target column for model csv
cmdlmodelidcol=None, #idcolumn for model csv
cmdlsamplemodel=None, #sampling of model data
cmdlsampleprediction=None, #sampling of prediction data
cmdlmodelscalefeatures=True, #scale model data
cmdlpredictioncolsrange=None,
cmdlpredictioncolselect=None,
cmdlpredictionscalefeatures=True, #scale predcition data
cmdlmodelscalesave=True, #save model scaler should be true
cmdlpredictionscalesave=False, #save prediction scaler should be false to use already saved scaler
cmdlkind='standard',
cmdlpredictionidcol=None, #id column for prediction csv
cmdltargetscale=None,
cmdlminmaxscale=None,
cmdliterations=None,
cmdllearningrate=None,
cmdldepth=None,
cmdlcv=None,
cmdlscaleminmaxvalues=None,
cmdlfeatureimportance=None,
cmdloutdir=None,
cmdlhideplot=False,
cmdlvalsize=0.3):
modelcsv=prepcsv(modeldatacsv,idcols=cmdlmodelidcol,targetcol=cmdlmodeltargetcol,
colsrange=cmdlmodelcolsrange,colsselect=cmdlmodelcolselect,
scalefeatures=cmdlmodelscalefeatures,scaletype=cmdlkind,
scalesave=True,sample=cmdlsamplemodel)
#I hard coded scalesave to True for model data
# returns X data, column names, and dataframe that is scaled
X,colnames,dfin=modelcsv.extract_scale_cols()
y,ycolname=modelcsv.extract_target()
if cmdlmodelidcol:
dfin=modelcsv.addback_idcols(dfin)
if cmdlmodeltargetcol:
dfin,tcolnum=modelcsv.addback_targetcol(dfin)
#targetcol has to be None
#scalesave has to be False to read from saved modeldata scale
predictioncsv=prepcsv(predictiondatacsv,idcols=cmdlpredictionidcol,targetcol=None,
colsrange=cmdlpredictioncolsrange,colsselect=cmdlpredictioncolselect,
scalefeatures=cmdlpredictionscalefeatures,scaletype=cmdlkind,
scalesave=False,sample=cmdlsampleprediction)
#I hard coded prediction scale save to false to read already saved scaler file
#extract and scale columns of data only
Xpred,colnamespred,dfinpred=predictioncsv.extract_scale_cols()
dirsplit,fextsplit=os.path.split(modeldatacsv)
fname,fextn=os.path.splitext(fextsplit)
params={'iterations': cmdliterations,
'learning_rate': cmdllearningrate,
'depth': cmdldepth}
grdcv=GridSearchCV(CatBoostRegressor(loss_function='RMSE'),params,cv=cmdlcv)
# Fit model
grdcv.fit(X, y)
print(grdcv.best_params_)
clf=grdcv.best_estimator_
print(grdcv.best_estimator_)
# Get predictions
ypred=clf.predict(X)
msev=np.mean((ypred - y)**2)
print('Metrics on Well data: ')
print('Well Data Best Estimator MSE: %.4f' %(msev))
r2v=r2_score(y,ypred)
print('Well Data Best Estimator R2 : %10.3f' % r2v)
pred=clf.predict(Xpred) #all seismic using optimum params
if cmdlminmaxscale:
ymin,ymax=y.min(), y.max()
mmscale=MinMaxScaler((ymin,ymax))
#mmscale.fit(pred)
pred1=pred.reshape(-1,1)
predscaled=mmscale.fit_transform(pred1)
dfinpred['CatBoostPred']=predscaled
else:
dfinpred['CatBoostPred']=pred
#ax=plt.scatter(y,ypred)
sns.set(color_codes=True)
ax=sns.regplot(x=y,y=ypred)
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.title('CatBoostRegressor %s' %ycolname)
if not cmdlhideplot:
plt.show()
if cmdloutdir:
pdfcl=os.path.join(cmdloutdir,fname) +"_cbreg.pdf"
xyplt=os.path.join(cmdloutdir,fname) +"_cbrxplt.csv"
else:
pdfcl=os.path.join(dirsplit,fname) +"_cbreg.pdf"
xyplt=os.path.join(dirsplit,fname) +"_cbrxplt.csv"
fig=ax.get_figure()
fig.savefig(pdfcl)
# xpltcols=['Actual','Predicted']
# xpltdf=swa.iloc[:,:3].copy() #copy well x y
xpltdf=dfin[dfin.columns[cmdlmodelidcol]].copy()
# copy back model id col
xpltdf['Actual']=y
xpltdf['Predicted']=ypred
xpltdf.to_csv(xyplt,index=False)
print('Sucessfully generated xplot file %s' % xyplt)
if cmdlpredictionidcol:
dfinpred=predictioncsv.addback_idcols(dfinpred)
idtargetdf=predictioncsv.idtarget_merge(predicteddf=dfinpred,predicteddfcols=-1)
savefiles(seisf=predictiondatacsv,
sdf=dfinpred,
sxydf=idtargetdf,
outdir=cmdloutdir,
ssuffix='_stcbr',name2merge=modeldatacsv)
def process_feature_ranking(modeldatacsv,
cmdlmodelcolsrange=None,
cmdlmodelcolselect=None,
cmdlmodeltargetcol=None, #target column for model csv
cmdlmodelidcol=None, #idcolumn for model csv
cmdlsamplemodel=None, #sampling of model data
cmdlmodelscalefeatures=True, #scale model data
cmdlkind='standard',
cmdlmodelscalesave=True, #save model scaler should be true
cmdltestfeatures=None,
cmdlcv=3, #cv for SVR
cmdltraintestsplit=.3,
cmdlfeatures2keep=None,
cmdllassoalpha=None):
modelcsv=prepcsv(modeldatacsv,idcols=cmdlmodelidcol,targetcol=cmdlmodeltargetcol,
colsrange=cmdlmodelcolsrange,colsselect=cmdlmodelcolselect,
scalefeatures=cmdlmodelscalefeatures,scaletype=cmdlkind,
scalesave=True,sample=cmdlsamplemodel)
#I hard coded scalesave to True for model data
# returns X data, column names, and dataframe that is scaled
X,colnames,dfin=modelcsv.extract_scale_cols()
y,ycolname=modelcsv.extract_target()
print(X.shape,len(colnames))
if cmdltestfeatures == 'mutualinforeg':
mi=mutual_info_regression(X, y)
mi /=np.max(mi)
print ("Features sorted by their score:")
# print (sorted(zip(map(lambda x: round(x, 4), mi),colnames), reverse=True))
fimp=pd.DataFrame(sorted(zip(mi, colnames),reverse=True),columns=['MutualInfoRegression ','Attribute'])
print('Feature Ranking by Mutual Info Regression: ')
print(fimp)
elif cmdltestfeatures == 'rfe':
#rank all features, i.e continue the elimination until the last one
lm=LinearRegression()
rfe=RFE(lm, n_features_to_select=cmdlfeatures2keep)
rfe.fit(X,y)
#print ("Features sorted by their rank:")
#print (sorted(zip(map(lambda x: round(x, 4), rfe.ranking_), colnames)))
scores=[]
for i in range(X.shape[1]):
score=cross_val_score(lm, X[:, i:i+1], y, scoring="r2",
cv=ShuffleSplit(len(X), cmdlcv, cmdltraintestsplit))
#scores.append(round(np.mean(score), 3))
scores.append(np.mean(score))
#print (sorted(scores, reverse=True))
r2fr=pd.DataFrame(sorted(zip(scores, colnames),reverse=True),columns=['R2 Score ','Attribute'])
print('Feature Ranking by R2 scoring: ')
print(r2fr)
elif cmdltestfeatures == 'svrcv':
#rank all features, i.e continue the elimination until the last one
estimator=SVR(kernel="linear")
selector=RFECV(estimator, step=1, cv=cmdlcv)
selector=selector.fit(X, y)
fr=pd.DataFrame(sorted(zip(selector.ranking_, colnames)),columns=['Importance','Attribute'])
print('Feature Ranking with Cross Validated Recursive Feature Elimination Using SVR: ')
print(fr)
elif cmdltestfeatures == 'svr':
estimator=SVR(kernel="linear")
selector=RFE(estimator, cmdlfeatures2keep, step=1)
selector=selector.fit(X, y)
fr=pd.DataFrame(sorted(zip(selector.ranking_, colnames)),columns=['Importance','Attribute'])
print('Feature Ranking with Recursive Feature Elimination Using SVR: ')
print(fr)
elif cmdltestfeatures == 'rfregressor':
rf=RandomForestRegressor(n_estimators=20, max_depth=4)
rf.fit(X,y)
fi=pd.DataFrame(sorted(zip(map(lambda x: round(x, 4), rf.feature_importances_), colnames),reverse=True),columns=['Importance','Attribute'])
print(fi)
#print (sorted(zip(map(lambda x: round(x, 4), rf.feature_importances_), colnames)))
#print(rf.feature_importances_)
scores=[]
for i in range(X.shape[1]):
score=cross_val_score(rf, X[:, i:i+1], y, scoring="r2",
cv=ShuffleSplit(len(X), cmdlcv, cmdltraintestsplit))
scores.append((round(np.mean(score), 3), colnames[i]))
cvscoredf=pd.DataFrame(sorted( scores,reverse=True),columns=['Partial R2','Attribute'])
print('\nCross Validation:')
print (cvscoredf)
elif cmdltestfeatures == 'decisiontree':
regressor=DecisionTreeRegressor(random_state=0)
#cross_val_score(regressor, X, y, cv=3)
regressor.fit(X,y)
#print(regressor.feature_importances_)
fr=pd.DataFrame(sorted(zip(map(lambda x: round(x, 4), regressor.feature_importances_), colnames),reverse=True),columns=['Importance','Attribute'])
print('Feature Ranking with Decision Tree Regressor: ')
print(fr)
#**********CatBoostRegressor
def process_CatBoostRegressor(modeldatacsv,predictiondatacsv,
cmdlmodelcolsrange=None,
cmdlmodelcolselect=None,
cmdlmodeltargetcol=None, #target column for model csv
cmdlmodelidcol=None, #idcolumn for model csv
cmdlsamplemodel=None, #sampling of model data
cmdlsampleprediction=None, #sampling of prediction data
cmdlmodelscalefeatures=True, #scale model data
cmdlpredictioncolsrange=None,
cmdlpredictioncolselect=None,
cmdlpredictionscalefeatures=True, #scale predcition data
cmdlmodelscalesave=True, #save model scaler should be true
cmdlpredictionscalesave=False, #save prediction | |
articles.article_id = lfind.article_id
AND lfind.article_link = '${aLink}'
"""
# [ LIMIT { number | ALL } ] [ OFFSET number ]
# article_id
tplWrk = string.Template(strTpl) # strTpl
strSelect = tplWrk.substitute(sId=str(spectatorId), aLink=article_link)
# logging.info( 'Article ::: get strSelect = ' + str(strSelect))
getRez = self.rowSelect(str(strSelect))
if len(getRez) == 0:
logging.info( 'Article ::: ARTICLE_NOT_FOUND get articleLink = ' + str(articleLink))
locEx = ArticleNotFound()
logging.info( 'Article ::: locEx = ' + str(locEx))
raise ArticleNotFound()
elif len(getRez) == 1:
outArt = self.articleDecode(getRez[0], spectatorAuthor)
# logging.info( 'Article ::: >>>>> get outArt = ' + str(outArt))
return outArt
def getById(self, articleId):
"""
получить статью по ID (одну) - функция для пердставления данных (!!!)
получить ОЛЬКО опубликованный текст (активную статью) - для редактирования получаем статью иным образом!
"""
# logging.info( 'Article ::: getById articleId = ' + str(articleId))
getRez = self.select(
'articles.article_id, articles.article_title, articles.article_link, '+
'articles.article_annotation, articles.article_source, articles.article_category_id, '+
'articles.revision_author_id, articles.article_template_id, articles.article_permissions',
'',
{
'whereStr': ' articles.article_id = ' + str(articleId) +\
" AND articles.actual_flag = 'A' " ,
}
)
if len(getRez) == 0:
logging.info( 'Article ::: ARTICLE_NOT_FOUND getById articleId = ' + str(articleId))
raise ArticleNotFound()
elif len(getRez) == 1:
# logging.info( ' getById getRez = ' + str(getRez[0]))
outArt = self.articleDecode(getRez[0])
# logging.info( ' getById outArt = ' + str(outArt))
return outArt
def getByUsingHash(self, spectatorId, hash):
"""
получить статью (ВЕРСИЮ) по hash (одну)
а вот что показать?
ну, походу, все, из ТОЙ версии, которую заказал пользователь!!!
"""
# logging.info( 'Article ::: getByUsingHash hash = ' + str(hash))
strTpl = """
SELECT
lfind.article_id, lfind.article_title, lfind.article_link,
lfind.article_annotation, lfind.article_source,
lfind.article_category_id, lfind.revision_author_id,
lfind.article_template_id, lfind.article_permissions, lfind.actual_flag
FROM articles lfind
WHERE lfind.sha_hash = '${aHash}'
UNION
SELECT
lfind.article_id, lfind.article_title, lfind.article_link,
lfind.article_annotation, lfind.article_source, lfind.article_category_id, lfind.revision_author_id,
lfind.article_template_id, lfind.article_permissions, lfind.actual_flag
FROM groups, librarys, articles lfind
WHERE lfind.article_permissions = 'grp'
AND groups.revision_author_id = lfind.revision_author_id
AND groups.revision_author_id = $sId
AND groups.dt_header_id = librarys.group_id
AND librarys.article_id = lfind.article_id
AND lfind.sha_hash = '${aHash}'
"""
# article_id
tplWrk = string.Template(strTpl) # strTpl
strSelect = tplWrk.substitute(sId=str(spectatorId), aHash=hash)
# logging.info( 'Article ::: getByUsingHash strSelect = ' + str(strSelect))
getRez = self.rowSelect(str(strSelect))
if len(getRez) == 0:
logging.info( 'getByUsingHash ::: ARTICLE_NOT_FOUND getByUsingHash hash = ' + str(hash))
raise ArticleNotFound()
elif len(getRez) == 1:
outArt = self.articleDecode(getRez[0])
return outArt
def list(self, serchOptions = None):
"""
получить список статей
упорядочивать потом будем
получить список статей
- выбираем данные из "articles" - получить при этом АКТАЛЬНЫЕ ИД ревизий!
"""
whereStr = ''
if serchOptions != None and hasattr(serchOptions, 'categoryId') and serchOptions.categoryId > 0 :
whereStr = ' articles.article_category_id = ' + str(serchOptions.categoryId)
if whereStr == '':
whereStr += " articles.actual_flag = 'A' "
else:
whereStr += " AND articles.actual_flag = 'A' "
getRez = self.select(
# 'articles.article_id, FROM_BASE64(articles.article_title), FROM_BASE64(articles.article_source) ',
'articles.article_id, articles.article_title, articles.article_link, ' +
'articles.article_annotation, articles.article_category_id, articles.revision_author_id, '+
' articles.article_template_id, articles.article_permissions ',
'',
{
'whereStr': whereStr, # строка набор условий для выбора строк
'orderStr': ' articles.article_title ', # строка порядок строк
# 'orderStr': 'FROM_BASE64( articles.article_title )', # строка порядок строк
}
)
# logging.info( 'list:: getRez = ' + str(getRez))
if len(getRez) == 0:
# raise WikiException( ARTICLE_NOT_FOUND )
return []
# for oneObj in getRez:
# oneObj.article_title = base64.b64decode(oneObj.article_title).decode(encoding='UTF-8')
# oneObj.article_link = base64.b64decode(oneObj.article_link).decode(encoding='UTF-8')
# articleTitle = oneObj.article_title.strip().strip(" \t\n")
# oneObj.article_link = articleTitle.lower().replace(' ','_')
# oneObj.article_annotation = base64.b64decode(oneObj.article_annotation).decode(encoding='UTF-8')
# logging.info( 'list:: After oneArt = ' + str(oneObj))
return getRez
def listByAutorId(self, authorId = 0, spectatorId = 0):
"""
получить список статей
одного автор - все статьи, всех категорий!
получить список статей
- выбираем данные из "articles" - получить при этом АКТАЛЬНЫЕ ИД ревизий!
authorId - ИД автора статей,
spectatorId - ИД зрителя - посмотреть статьи из "закрытых" групп - может только соучастник
Если authorId == spectatorId Значит это сам автор просматривает свои материалы.
ТЕХ групп.... а если нет, то - не показывать!!!
то есть, показываем
- "паблик" статьи
- групповые из ОТКРЫТЫХ групп
- групповые из ЗАКРЫТЫХ групп (там, куда вхож ЗРИТЕЛЬ)
- Е показывать все остальное!!!!!
а, ну если сам себя зритель?????? - показать то, что идет для незареганого пользователя!!!!!
потому что все статьи - пользователь может видеть на свей странице!!!!!
пока в селекте про зрителя нет ничего!!!!!!
"""
if int(authorId) > 0 and int(spectatorId) == 0:
strTpl = """
SELECT
articles.article_id, articles.article_title, articles.article_link, articles.article_annotation,
articles.article_category_id,
articles.revision_author_id, articles.article_template_id, articles.article_permissions,
groups.group_title, groups.group_annotation, groups.dt_header_id AS group_id
FROM articles
LEFT JOIN librarys ON librarys.article_id = articles.article_id
LEFT JOIN groups ON groups.dt_header_id = librarys.group_id
WHERE articles.article_id IN
(SELECT DISTINCT articles.article_id FROM articles WHERE articles.revision_author_id = $aId)
AND articles.actual_flag = 'A'
ORDER BY 2
"""
# AND articles.article_permissions = 'pbl'
# article_id
tplWrk = string.Template(strTpl) # strTpl
strSelect = tplWrk.substitute(aId=str(authorId), sId=str(spectatorId))
# logging.info('listByAutorId:: strSelect = ' + str (strSelect) )
getRez = self.rowSelect(str(strSelect))
else:
autorIdStr = '';
if authorId > 0 :
autorIdStr = ' articles.revision_author_id = ' + str(authorId)
if autorIdStr == '':
autorIdStr += " articles.actual_flag = 'A' "
else:
autorIdStr += " AND articles.actual_flag = 'A' "
getRez = self.select(
# 'articles.article_id, FROM_BASE64(articles.article_title), FROM_BASE64(articles.article_source) ',
" articles.article_id, articles.article_title, articles.article_link, " +
" articles.article_annotation, articles.article_category_id, articles.revision_author_id, "+
" articles.article_template_id, articles.article_permissions, " +
" groups.group_title, groups.group_annotation, groups.dt_header_id AS group_id " ,
"",
{
"joinStr": "LEFT JOIN librarys ON librarys.article_id = articles.article_id LEFT JOIN groups ON groups.dt_header_id = librarys.group_id",
"whereStr": autorIdStr , # строка набор условий для выбора строк
"orderStr": " 2 ", # articles.article_id строка порядок строк
# "orderStr": "FROM_BASE64( articles.article_title )", # строка порядок строк
}
)
# logging.info( 'listByAutorId:: getRez = ' + str(getRez))
if len(getRez) == 0:
# raise WikiException( ARTICLE_NOT_FOUND )
return []
return getRez
def getListArticlesAll (self, spectatorId = 0):
"""
получить поный списк статей, не только своих,
а вообще всех, ДОСТУПНЫХ
"""
if int(spectatorId) > 0: # and int(spectatorId) != int(authorId):
strTpl = """
SELECT
articles.article_id, articles.article_title, articles.article_link, articles.article_annotation,
articles.article_category_id,
articles.revision_author_id, articles.article_template_id, articles.article_permissions,
groups.group_title, groups.group_annotation, groups.dt_header_id AS group_id
FROM articles
LEFT JOIN librarys ON librarys.article_id = articles.article_id
LEFT JOIN groups ON groups.revision_author_id = articles.revision_author_id
AND groups.dt_header_id = librarys.group_id
WHERE articles.actual_flag = 'A'
AND ( articles.article_permissions != 'sol' OR
articles.article_id IN
(SELECT DISTINCT articles.article_id FROM articles WHERE articles.revision_author_id = $sId)
)
ORDER BY 2
"""
# article_id
tplWrk = string.Template(strTpl) # strTpl
strSelect = tplWrk.substitute( sId=str(spectatorId))
# logging.info( 'getListArticlesAll:: strSelect = ' + str(strSelect))
getRez = self.rowSelect(str(strSelect))
else:
autorIdStr = " articles.article_permissions != 'sol' AND articles.actual_flag = 'A' ";
getRez = self.select(
# 'articles.article_id, FROM_BASE64(articles.article_title), FROM_BASE64(articles.article_source) ',
" articles.article_id, articles.article_title, articles.article_link, " +
" articles.article_annotation, articles.article_category_id, "+
" articles.article_template_id, articles.article_permissions, " +
" groups.group_title, groups.group_annotation, groups. group_id " ,
"",
{
"joinStr": "LEFT JOIN librarys ON librarys.article_id = articles.article_id " +
" LEFT JOIN groups ON groups.dt_header_id AS group_id = librarys.group_id",
"whereStr": autorIdStr , # строка набор условий для выбора строк
"orderStr": " 2 ", # articles.article_id строка порядок строк
# "orderStr": "FROM_BASE64( articles.article_title )", # строка порядок строк
}
)
# logging.info( 'listByAutorId:: getRez = ' + str(getRez))
if len(getRez) == 0:
return []
return getRez
def IsUniqueRevision(self, titleHash, annotationHash, articleHash):
"""
проверить, является ли данная ревизия уникальной
- может поменятся все,
- пожет - заглавие
- может текст
"""
revControl = self.RevisionLoc()
return revControl.IsUniqueRevision(titleHash, annotationHash, articleHash)
# def select(self,
# selectStr, # строка - чего хотим получить из селекта
# addTables, # строка - список ДОПОЛНИТЕЛЬНЫХ таблиц (основную таблизу для объекта указываем при инициализации)
# anyParams = {} # все остальные секции селекта
# ):
# вот тут надо добавить к списку того, что может придти из наследной модели :-)
# | |
cmd = listTemplates.listTemplatesCmd()
cmd.templatefilter = template_filter
if domain_id is not None:
cmd.domainid = domain_id
if zone_id is not None:
cmd.zoneid = zone_id
if template_id is not None:
cmd.id = template_id
if template_name is not None:
cmd.name = template_name
if hypervisor is not None:
cmd.hypervisor = hypervisor
if project_id is not None:
cmd.projectid = project_id
if account is not None:
cmd.account = account
'''
Get the Templates pertaining to the inputs provided
'''
list_templatesout = apiclient.listTemplates(cmd)
#print("template result is %s"%(list_templatesout))
if list_templatesout is None:
return FAILED
if validateList(list_templatesout[0]) == FAIL :
return FAILED
for template in list_templatesout:
if template.isready and template.templatetype == "USER" and template.ostypename == ostype_desc:
return template
'''
Return default first template, if no template matched
'''
return FAILED
def download_systemplates_sec_storage(server, services):
"""Download System templates on sec storage"""
try:
# Login to management server
ssh = SshClient(
server["ipaddress"],
server["port"],
server["username"],
server["password"]
)
except Exception:
raise Exception("SSH access failed for server with IP address: %s" %
server["ipaddess"])
# Mount Secondary Storage on Management Server
cmds = [
"mkdir -p %s" % services["mnt_dir"],
"mount -t nfs %s:/%s %s" % (
services["sec_storage"],
services["path"],
services["mnt_dir"]
),
"%s -m %s -u %s -h %s -F" % (
services["command"],
services["mnt_dir"],
services["download_url"],
services["hypervisor"]
)
]
for c in cmds:
result = ssh.execute(c)
res = str(result)
# Unmount the Secondary storage
ssh.execute("umount %s" % (services["mnt_dir"]))
if res.count("Successfully installed system VM template") == 1:
return
else:
raise Exception("Failed to download System Templates on Sec Storage")
return
def wait_for_ssvms(apiclient, zoneid, podid, interval=60):
"""After setup wait for SSVMs to come Up"""
time.sleep(interval)
timeout = 40
while True:
list_ssvm_response = list_ssvms(
apiclient,
systemvmtype='secondarystoragevm',
zoneid=zoneid,
podid=podid
)
ssvm = list_ssvm_response[0]
if ssvm.state != 'Running':
# Sleep to ensure SSVMs are Up and Running
time.sleep(interval)
timeout = timeout - 1
elif ssvm.state == 'Running':
break
elif timeout == 0:
raise Exception("SSVM failed to come up")
break
timeout = 40
while True:
list_ssvm_response = list_ssvms(
apiclient,
systemvmtype='consoleproxy',
zoneid=zoneid,
podid=podid
)
cpvm = list_ssvm_response[0]
if cpvm.state != 'Running':
# Sleep to ensure SSVMs are Up and Running
time.sleep(interval)
timeout = timeout - 1
elif cpvm.state == 'Running':
break
elif timeout == 0:
raise Exception("CPVM failed to come up")
break
return
def get_builtin_template_info(apiclient, zoneid):
"""Returns hypervisor specific infor for templates"""
list_template_response = Template.list(
apiclient,
templatefilter='featured',
zoneid=zoneid,
)
for b_template in list_template_response:
if b_template.templatetype == 'BUILTIN':
break
extract_response = Template.extract(apiclient,
b_template.id,
'HTTP_DOWNLOAD',
zoneid)
return extract_response.url, b_template.hypervisor, b_template.format
def download_builtin_templates(apiclient, zoneid, hypervisor, host,
linklocalip, interval=60):
"""After setup wait till builtin templates are downloaded"""
# Change IPTABLES Rules
get_process_status(
host["ipaddress"],
host["port"],
host["username"],
host["password"],
linklocalip,
"iptables -P INPUT ACCEPT"
)
time.sleep(interval)
# Find the BUILTIN Templates for given Zone, Hypervisor
list_template_response = list_templates(
apiclient,
hypervisor=hypervisor,
zoneid=zoneid,
templatefilter='self'
)
if not isinstance(list_template_response, list):
raise Exception("Failed to download BUILTIN templates")
# Ensure all BUILTIN templates are downloaded
templateid = None
for template in list_template_response:
if template.templatetype == "BUILTIN":
templateid = template.id
# Sleep to ensure that template is in downloading state after adding
# Sec storage
time.sleep(interval)
while True:
template_response = list_templates(
apiclient,
id=templateid,
zoneid=zoneid,
templatefilter='self'
)
template = template_response[0]
# If template is ready,
# template.status = Download Complete
# Downloading - x% Downloaded
# Error - Any other string
if template.status == 'Download Complete':
break
elif 'Downloaded' in template.status:
time.sleep(interval)
elif 'Installing' not in template.status:
raise Exception("ErrorInDownload")
return
def update_resource_limit(apiclient, resourcetype, account=None,
domainid=None, max=None, projectid=None):
"""Updates the resource limit to 'max' for given account"""
cmd = updateResourceLimit.updateResourceLimitCmd()
cmd.resourcetype = resourcetype
if account:
cmd.account = account
if domainid:
cmd.domainid = domainid
if max:
cmd.max = max
if projectid:
cmd.projectid = projectid
apiclient.updateResourceLimit(cmd)
return
def list_os_types(apiclient, **kwargs):
"""List all os types matching criteria"""
cmd = listOsTypes.listOsTypesCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listOsTypes(cmd))
def list_routers(apiclient, **kwargs):
"""List all Routers matching criteria"""
cmd = listRouters.listRoutersCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listRouters(cmd))
def list_zones(apiclient, **kwargs):
"""List all Zones matching criteria"""
cmd = listZones.listZonesCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listZones(cmd))
def list_networks(apiclient, **kwargs):
"""List all Networks matching criteria"""
cmd = listNetworks.listNetworksCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listNetworks(cmd))
def list_clusters(apiclient, **kwargs):
"""List all Clusters matching criteria"""
cmd = listClusters.listClustersCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listClusters(cmd))
def list_ssvms(apiclient, **kwargs):
"""List all SSVMs matching criteria"""
cmd = listSystemVms.listSystemVmsCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listSystemVms(cmd))
def list_storage_pools(apiclient, **kwargs):
"""List all storage pools matching criteria"""
cmd = listStoragePools.listStoragePoolsCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listStoragePools(cmd))
def list_virtual_machines(apiclient, **kwargs):
"""List all VMs matching criteria"""
cmd = listVirtualMachines.listVirtualMachinesCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listVirtualMachines(cmd))
def list_hosts(apiclient, **kwargs):
"""List all Hosts matching criteria"""
cmd = listHosts.listHostsCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listHosts(cmd))
def list_configurations(apiclient, **kwargs):
"""List configuration with specified name"""
cmd = listConfigurations.listConfigurationsCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listConfigurations(cmd))
def list_publicIP(apiclient, **kwargs):
"""List all Public IPs matching criteria"""
cmd = listPublicIpAddresses.listPublicIpAddressesCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listPublicIpAddresses(cmd))
def list_nat_rules(apiclient, **kwargs):
"""List all NAT rules matching criteria"""
cmd = listPortForwardingRules.listPortForwardingRulesCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listPortForwardingRules(cmd))
def list_lb_rules(apiclient, **kwargs):
"""List all Load balancing rules matching criteria"""
cmd = listLoadBalancerRules.listLoadBalancerRulesCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listLoadBalancerRules(cmd))
def list_lb_instances(apiclient, **kwargs):
"""List all Load balancing instances matching criteria"""
cmd = listLoadBalancerRuleInstances.listLoadBalancerRuleInstancesCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listLoadBalancerRuleInstances(cmd))
def list_firewall_rules(apiclient, **kwargs):
"""List all Firewall Rules matching criteria"""
cmd = listFirewallRules.listFirewallRulesCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listFirewallRules(cmd))
def list_volumes(apiclient, **kwargs):
"""List all volumes matching criteria"""
cmd = listVolumes.listVolumesCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listVolumes(cmd))
def list_isos(apiclient, **kwargs):
"""Lists all available ISO files."""
cmd = listIsos.listIsosCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listIsos(cmd))
def list_snapshots(apiclient, **kwargs):
"""List all snapshots matching criteria"""
cmd = listSnapshots.listSnapshotsCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listSnapshots(cmd))
def list_templates(apiclient, **kwargs):
"""List all templates matching criteria"""
cmd = listTemplates.listTemplatesCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listTemplates(cmd))
def list_domains(apiclient, **kwargs):
"""Lists domains"""
cmd = listDomains.listDomainsCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listDomains(cmd))
def list_accounts(apiclient, **kwargs):
"""Lists accounts and provides detailed account information for
listed accounts"""
cmd = listAccounts.listAccountsCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listAccounts(cmd))
def list_users(apiclient, **kwargs):
"""Lists users and provides detailed account information for
listed users"""
cmd = listUsers.listUsersCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listUsers(cmd))
def list_snapshot_policy(apiclient, **kwargs):
"""Lists snapshot policies."""
cmd = listSnapshotPolicies.listSnapshotPoliciesCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listSnapshotPolicies(cmd))
def list_events(apiclient, **kwargs):
"""Lists events"""
cmd = listEvents.listEventsCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():
cmd.listall=True
return(apiclient.listEvents(cmd))
def list_disk_offering(apiclient, **kwargs):
"""Lists all available disk offerings."""
cmd = listDiskOfferings.listDiskOfferingsCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
if 'account' | |
== 200:
if response.content:
deserialized = response.json()
else:
deserialized = None
if response.status_code == 201:
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
@distributed_trace_async
async def begin_put_non_retry201_creating400_invalid_json(
self, product: JSONType = None, **kwargs: Any
) -> AsyncLROPoller[JSONType]:
"""Long running put request, service returns a Product with 'ProvisioningState' = 'Creating' and
201 response code.
:param product: Product to put.
:type product: JSONType
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns JSON object
:rtype: ~azure.core.polling.AsyncLROPoller[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
product = {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values
include: "Succeeded", "Failed", "canceled", "Accepted", "Creating",
"Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of
:code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
# response body for status code(s): 200, 201
response.json() == {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values
include: "Succeeded", "Failed", "canceled", "Accepted", "Creating",
"Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of
:code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
"""
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop("cls", None) # type: ClsType[JSONType]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._put_non_retry201_creating400_invalid_json_initial(
product=product, content_type=content_type, cls=lambda x, y, z: x, **kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False:
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
async def _put_async_relative_retry400_initial(self, product: JSONType = None, **kwargs: Any) -> JSONType:
cls = kwargs.pop("cls", None) # type: ClsType[JSONType]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
if product is not None:
_json = product
else:
_json = None
request = build_lrosads_put_async_relative_retry400_request_initial(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers["Azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("Azure-AsyncOperation")
)
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
@distributed_trace_async
async def begin_put_async_relative_retry400(
self, product: JSONType = None, **kwargs: Any
) -> AsyncLROPoller[JSONType]:
"""Long running put request, service returns a 200 with ProvisioningState=’Creating’. Poll the
endpoint indicated in the Azure-AsyncOperation header for operation status.
:param product: Product to put.
:type product: JSONType
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns JSON object
:rtype: ~azure.core.polling.AsyncLROPoller[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
product = {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values
include: "Succeeded", "Failed", "canceled", "Accepted", "Creating",
"Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of
:code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
# response body for status code(s): 200
response.json() == {
"id": "str", # Optional. Resource Id.
"location": "str", # Optional. Resource Location.
"name": "str", # Optional. Resource Name.
"properties": {
"provisioningState": "str", # Optional.
"provisioningStateValues": "str" # Optional. Possible values
include: "Succeeded", "Failed", "canceled", "Accepted", "Creating",
"Created", "Updating", "Updated", "Deleting", "Deleted", "OK".
},
"tags": {
"str": "str" # Optional. A set of tags. Dictionary of
:code:`<string>`.
},
"type": "str" # Optional. Resource Type.
}
"""
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop("cls", None) # type: ClsType[JSONType]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._put_async_relative_retry400_initial(
product=product, content_type=content_type, cls=lambda x, y, z: x, **kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
response_headers = {}
response = pipeline_response.http_response
response_headers["Azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("Azure-AsyncOperation")
)
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
if polling is True:
polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False:
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
async def _delete_non_retry400_initial(self, **kwargs: Any) -> None:
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_lrosads_delete_non_retry400_request_initial()
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
if cls:
return cls(pipeline_response, None, response_headers)
@distributed_trace_async
async def begin_delete_non_retry400(self, **kwargs: Any) -> AsyncLROPoller[None]:
"""Long running delete request, service returns a 400 with an error body.
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns None
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop("cls", None) # type: ClsType[None]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_non_retry400_initial(cls=lambda x, y, z: x, **kwargs)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False:
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
async def _delete202_non_retry400_initial(self, **kwargs: Any) -> None:
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_lrosads_delete202_non_retry400_request_initial()
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
if cls:
return cls(pipeline_response, None, response_headers)
@distributed_trace_async
async def begin_delete202_non_retry400(self, **kwargs: Any) -> AsyncLROPoller[None]:
| |
assert isinstance(param3, iap.Power)
assert param3.other_param == param1
assert param3.val == param2
def test_exponentiate_stochastic_param_by_integer(self):
param1 = iap.Normal(0, 1)
param3 = param1 ** 2
assert isinstance(param3, iap.Power)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_exponentiate_integer_by_stochastic_param(self):
param1 = iap.Normal(0, 1)
param3 = 2 ** param1
assert isinstance(param3, iap.Power)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_exponentiate_string_by_stochastic_param(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = "test" ** param1
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_exponentiate_stochastic_param_by_string(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1 ** "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
class TestBinomial(unittest.TestCase):
def setUp(self):
reseed()
def test___init___p_is_zero(self):
param = iap.Binomial(0)
assert (
param.__str__()
== param.__repr__()
== "Binomial(Deterministic(int 0))"
)
def test___init___p_is_one(self):
param = iap.Binomial(1.0)
assert (
param.__str__()
== param.__repr__()
== "Binomial(Deterministic(float 1.00000000))"
)
def test_p_is_zero(self):
param = iap.Binomial(0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 0
assert np.all(samples == 0)
def test_p_is_one(self):
param = iap.Binomial(1.0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 1
assert np.all(samples == 1)
def test_p_is_50_percent(self):
param = iap.Binomial(0.5)
sample = param.draw_sample()
samples = param.draw_samples((10000,))
unique, counts = np.unique(samples, return_counts=True)
assert sample.shape == tuple()
assert samples.shape == (10000,)
assert sample in [0, 1]
assert len(unique) == 2
for val, count in zip(unique, counts):
if val == 0:
assert 5000 - 500 < count < 5000 + 500
elif val == 1:
assert 5000 - 500 < count < 5000 + 500
else:
assert False
def test_p_is_list(self):
param = iap.Binomial(iap.Choice([0.25, 0.75]))
for _ in sm.xrange(10):
samples = param.draw_samples((1000,))
p = np.sum(samples) / samples.size
assert (
(0.25 - 0.05 < p < 0.25 + 0.05)
or (0.75 - 0.05 < p < 0.75 + 0.05)
)
def test_p_is_tuple(self):
param = iap.Binomial((0.0, 1.0))
last_p = 0.5
diffs = []
for _ in sm.xrange(30):
samples = param.draw_samples((1000,))
p = np.sum(samples).astype(np.float32) / samples.size
diffs.append(abs(p - last_p))
last_p = p
nb_p_changed = sum([diff > 0.05 for diff in diffs])
assert nb_p_changed > 15
def test_samples_same_values_for_same_seeds(self):
param = iap.Binomial(0.5)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
class TestChoice(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Choice([0, 1, 2])
assert (
param.__str__()
== param.__repr__()
== "Choice(a=[0, 1, 2], replace=True, p=None)"
)
def test_value_is_list(self):
param = iap.Choice([0, 1, 2])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [0, 1, 2]
assert np.all(
np.logical_or(
np.logical_or(samples == 0, samples == 1),
samples == 2
)
)
def test_sampled_values_match_expected_counts(self):
param = iap.Choice([0, 1, 2])
samples = param.draw_samples((10000,))
expected = 10000/3
expected_tolerance = expected * 0.05
for v in [0, 1, 2]:
count = np.sum(samples == v)
assert (
expected - expected_tolerance
< count <
expected + expected_tolerance
)
def test_value_is_list_containing_negative_number(self):
param = iap.Choice([-1, 1])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 1]
assert np.all(np.logical_or(samples == -1, samples == 1))
def test_value_is_list_of_floats(self):
param = iap.Choice([-1.2, 1.7])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert (
(
-1.2 - _eps(sample)
< sample <
-1.2 + _eps(sample)
)
or
(
1.7 - _eps(sample)
< sample <
1.7 + _eps(sample)
)
)
assert np.all(
np.logical_or(
np.logical_and(
-1.2 - _eps(sample) < samples,
samples < -1.2 + _eps(sample)
),
np.logical_and(
1.7 - _eps(sample) < samples,
samples < 1.7 + _eps(sample)
)
)
)
def test_value_is_list_of_strings(self):
param = iap.Choice(["first", "second", "third"])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in ["first", "second", "third"]
assert np.all(
np.logical_or(
np.logical_or(
samples == "first",
samples == "second"
),
samples == "third"
)
)
def test_sample_without_replacing(self):
param = iap.Choice([1+i for i in sm.xrange(100)], replace=False)
samples = param.draw_samples((50,))
seen = [0 for _ in sm.xrange(100)]
for sample in samples:
seen[sample-1] += 1
assert all([count in [0, 1] for count in seen])
def test_non_uniform_probabilities_over_elements(self):
param = iap.Choice([0, 1], p=[0.25, 0.75])
samples = param.draw_samples((10000,))
unique, counts = np.unique(samples, return_counts=True)
assert len(unique) == 2
for val, count in zip(unique, counts):
if val == 0:
assert 2500 - 500 < count < 2500 + 500
elif val == 1:
assert 7500 - 500 < count < 7500 + 500
else:
assert False
def test_list_contains_stochastic_parameter(self):
param = iap.Choice([iap.Choice([0, 1]), 2])
samples = param.draw_samples((10000,))
unique, counts = np.unique(samples, return_counts=True)
assert len(unique) == 3
for val, count in zip(unique, counts):
if val in [0, 1]:
assert 2500 - 500 < count < 2500 + 500
elif val == 2:
assert 5000 - 500 < count < 5000 + 500
else:
assert False
def test_samples_same_values_for_same_seeds(self):
param = iap.Choice([-1, 0, 1, 2, 3])
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
def test_value_is_bad_datatype(self):
with self.assertRaises(Exception) as context:
_ = iap.Choice(123)
self.assertTrue(
"Expected a to be an iterable" in str(context.exception))
def test_p_is_bad_datatype(self):
with self.assertRaises(Exception) as context:
_ = iap.Choice([1, 2], p=123)
self.assertTrue("Expected p to be" in str(context.exception))
def test_value_and_p_have_unequal_lengths(self):
with self.assertRaises(Exception) as context:
_ = iap.Choice([1, 2], p=[1])
self.assertTrue("Expected lengths of" in str(context.exception))
class TestDiscreteUniform(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.DiscreteUniform(0, 2)
assert (
param.__str__()
== param.__repr__()
== "DiscreteUniform(Deterministic(int 0), Deterministic(int 2))"
)
def test_bounds_are_ints(self):
param = iap.DiscreteUniform(0, 2)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [0, 1, 2]
assert np.all(
np.logical_or(
np.logical_or(samples == 0, samples == 1),
samples == 2
)
)
def test_samples_match_expected_counts(self):
param = iap.DiscreteUniform(0, 2)
samples = param.draw_samples((10000,))
expected = 10000/3
expected_tolerance = expected * 0.05
for v in [0, 1, 2]:
count = np.sum(samples == v)
assert (
expected - expected_tolerance
< count <
expected + expected_tolerance
)
def test_lower_bound_is_negative(self):
param = iap.DiscreteUniform(-1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 0, 1]
assert np.all(
np.logical_or(
np.logical_or(samples == -1, samples == 0),
samples == 1
)
)
def test_bounds_are_floats(self):
param = iap.DiscreteUniform(-1.2, 1.2)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 0, 1]
assert np.all(
np.logical_or(
np.logical_or(
samples == -1, samples == 0
),
samples == 1
)
)
def test_lower_and_upper_bound_have_wrong_order(self):
param = iap.DiscreteUniform(1, -1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 0, 1]
assert np.all(
np.logical_or(
np.logical_or(
samples == -1, samples == 0
),
samples == 1
)
)
def test_lower_and_upper_bound_are_the_same(self):
param = iap.DiscreteUniform(1, 1)
sample = param.draw_sample()
samples = param.draw_samples((100,))
assert sample == 1
assert np.all(samples == 1)
def test_samples_same_values_for_same_seeds(self):
param = iap.Uniform(-1, 1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
class TestPoisson(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Poisson(1)
assert (
param.__str__()
== param.__repr__()
== "Poisson(Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.Poisson(1)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 <= sample
def test_via_comparison_to_np_poisson(self):
param = iap.Poisson(1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).poisson(
lam=1, size=(100, 1000))
assert samples.shape == (100, 1000)
for i in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:
count_direct = int(np.sum(samples_direct == i))
count = np.sum(samples == i)
tolerance = max(count_direct * 0.1, 250)
assert count_direct - tolerance < count < count_direct + tolerance
def test_samples_same_values_for_same_seeds(self):
param = iap.Poisson(1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
class TestNormal(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Normal(0, 1)
assert (
param.__str__()
== param.__repr__()
== "Normal(loc=Deterministic(int 0), scale=Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.Normal(0, 1)
sample = param.draw_sample()
assert sample.shape == tuple()
def test_via_comparison_to_np_normal(self):
param = iap.Normal(0, 1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).normal(loc=0, scale=1,
size=(100, 1000))
samples | |
which to assign service cluster IPs. It must not overlap with any Subnet IP ranges.
"""
return pulumi.get(self, "service_cidr")
@service_cidr.setter
def service_cidr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_cidr", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
Virtual network subnet resource ID the compute nodes belong to
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@pulumi.input_type
class AmlComputePropertiesArgs:
def __init__(__self__, *,
enable_node_public_ip: Optional[pulumi.Input[bool]] = None,
isolated_network: Optional[pulumi.Input[bool]] = None,
os_type: Optional[pulumi.Input[Union[str, 'OsType']]] = None,
remote_login_port_public_access: Optional[pulumi.Input[Union[str, 'RemoteLoginPortPublicAccess']]] = None,
scale_settings: Optional[pulumi.Input['ScaleSettingsArgs']] = None,
subnet: Optional[pulumi.Input['ResourceIdArgs']] = None,
user_account_credentials: Optional[pulumi.Input['UserAccountCredentialsArgs']] = None,
virtual_machine_image: Optional[pulumi.Input['VirtualMachineImageArgs']] = None,
vm_priority: Optional[pulumi.Input[Union[str, 'VmPriority']]] = None,
vm_size: Optional[pulumi.Input[str]] = None):
"""
AML Compute properties
:param pulumi.Input[bool] enable_node_public_ip: Enable or disable node public IP address provisioning. Possible values are: Possible values are: true - Indicates that the compute nodes will have public IPs provisioned. false - Indicates that the compute nodes will have a private endpoint and no public IPs.
:param pulumi.Input[bool] isolated_network: Network is isolated or not
:param pulumi.Input[Union[str, 'OsType']] os_type: Compute OS Type
:param pulumi.Input[Union[str, 'RemoteLoginPortPublicAccess']] remote_login_port_public_access: State of the public SSH port. Possible values are: Disabled - Indicates that the public ssh port is closed on all nodes of the cluster. Enabled - Indicates that the public ssh port is open on all nodes of the cluster. NotSpecified - Indicates that the public ssh port is closed on all nodes of the cluster if VNet is defined, else is open all public nodes. It can be default only during cluster creation time, after creation it will be either enabled or disabled.
:param pulumi.Input['ScaleSettingsArgs'] scale_settings: Scale settings for AML Compute
:param pulumi.Input['ResourceIdArgs'] subnet: Virtual network subnet resource ID the compute nodes belong to.
:param pulumi.Input['UserAccountCredentialsArgs'] user_account_credentials: Credentials for an administrator user account that will be created on each compute node.
:param pulumi.Input['VirtualMachineImageArgs'] virtual_machine_image: Virtual Machine image for AML Compute - windows only
:param pulumi.Input[Union[str, 'VmPriority']] vm_priority: Virtual Machine priority
:param pulumi.Input[str] vm_size: Virtual Machine Size
"""
if enable_node_public_ip is None:
enable_node_public_ip = True
if enable_node_public_ip is not None:
pulumi.set(__self__, "enable_node_public_ip", enable_node_public_ip)
if isolated_network is not None:
pulumi.set(__self__, "isolated_network", isolated_network)
if os_type is None:
os_type = 'Linux'
if os_type is not None:
pulumi.set(__self__, "os_type", os_type)
if remote_login_port_public_access is None:
remote_login_port_public_access = 'NotSpecified'
if remote_login_port_public_access is not None:
pulumi.set(__self__, "remote_login_port_public_access", remote_login_port_public_access)
if scale_settings is not None:
pulumi.set(__self__, "scale_settings", scale_settings)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
if user_account_credentials is not None:
pulumi.set(__self__, "user_account_credentials", user_account_credentials)
if virtual_machine_image is not None:
pulumi.set(__self__, "virtual_machine_image", virtual_machine_image)
if vm_priority is not None:
pulumi.set(__self__, "vm_priority", vm_priority)
if vm_size is not None:
pulumi.set(__self__, "vm_size", vm_size)
@property
@pulumi.getter(name="enableNodePublicIp")
def enable_node_public_ip(self) -> Optional[pulumi.Input[bool]]:
"""
Enable or disable node public IP address provisioning. Possible values are: Possible values are: true - Indicates that the compute nodes will have public IPs provisioned. false - Indicates that the compute nodes will have a private endpoint and no public IPs.
"""
return pulumi.get(self, "enable_node_public_ip")
@enable_node_public_ip.setter
def enable_node_public_ip(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_node_public_ip", value)
@property
@pulumi.getter(name="isolatedNetwork")
def isolated_network(self) -> Optional[pulumi.Input[bool]]:
"""
Network is isolated or not
"""
return pulumi.get(self, "isolated_network")
@isolated_network.setter
def isolated_network(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "isolated_network", value)
@property
@pulumi.getter(name="osType")
def os_type(self) -> Optional[pulumi.Input[Union[str, 'OsType']]]:
"""
Compute OS Type
"""
return pulumi.get(self, "os_type")
@os_type.setter
def os_type(self, value: Optional[pulumi.Input[Union[str, 'OsType']]]):
pulumi.set(self, "os_type", value)
@property
@pulumi.getter(name="remoteLoginPortPublicAccess")
def remote_login_port_public_access(self) -> Optional[pulumi.Input[Union[str, 'RemoteLoginPortPublicAccess']]]:
"""
State of the public SSH port. Possible values are: Disabled - Indicates that the public ssh port is closed on all nodes of the cluster. Enabled - Indicates that the public ssh port is open on all nodes of the cluster. NotSpecified - Indicates that the public ssh port is closed on all nodes of the cluster if VNet is defined, else is open all public nodes. It can be default only during cluster creation time, after creation it will be either enabled or disabled.
"""
return pulumi.get(self, "remote_login_port_public_access")
@remote_login_port_public_access.setter
def remote_login_port_public_access(self, value: Optional[pulumi.Input[Union[str, 'RemoteLoginPortPublicAccess']]]):
pulumi.set(self, "remote_login_port_public_access", value)
@property
@pulumi.getter(name="scaleSettings")
def scale_settings(self) -> Optional[pulumi.Input['ScaleSettingsArgs']]:
"""
Scale settings for AML Compute
"""
return pulumi.get(self, "scale_settings")
@scale_settings.setter
def scale_settings(self, value: Optional[pulumi.Input['ScaleSettingsArgs']]):
pulumi.set(self, "scale_settings", value)
@property
@pulumi.getter
def subnet(self) -> Optional[pulumi.Input['ResourceIdArgs']]:
"""
Virtual network subnet resource ID the compute nodes belong to.
"""
return pulumi.get(self, "subnet")
@subnet.setter
def subnet(self, value: Optional[pulumi.Input['ResourceIdArgs']]):
pulumi.set(self, "subnet", value)
@property
@pulumi.getter(name="userAccountCredentials")
def user_account_credentials(self) -> Optional[pulumi.Input['UserAccountCredentialsArgs']]:
"""
Credentials for an administrator user account that will be created on each compute node.
"""
return pulumi.get(self, "user_account_credentials")
@user_account_credentials.setter
def user_account_credentials(self, value: Optional[pulumi.Input['UserAccountCredentialsArgs']]):
pulumi.set(self, "user_account_credentials", value)
@property
@pulumi.getter(name="virtualMachineImage")
def virtual_machine_image(self) -> Optional[pulumi.Input['VirtualMachineImageArgs']]:
"""
Virtual Machine image for AML Compute - windows only
"""
return pulumi.get(self, "virtual_machine_image")
@virtual_machine_image.setter
def virtual_machine_image(self, value: Optional[pulumi.Input['VirtualMachineImageArgs']]):
pulumi.set(self, "virtual_machine_image", value)
@property
@pulumi.getter(name="vmPriority")
def vm_priority(self) -> Optional[pulumi.Input[Union[str, 'VmPriority']]]:
"""
Virtual Machine priority
"""
return pulumi.get(self, "vm_priority")
@vm_priority.setter
def vm_priority(self, value: Optional[pulumi.Input[Union[str, 'VmPriority']]]):
pulumi.set(self, "vm_priority", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> Optional[pulumi.Input[str]]:
"""
Virtual Machine Size
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vm_size", value)
@pulumi.input_type
class AmlComputeArgs:
def __init__(__self__, *,
compute_type: pulumi.Input[str],
compute_location: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
disable_local_auth: Optional[pulumi.Input[bool]] = None,
properties: Optional[pulumi.Input['AmlComputePropertiesArgs']] = None,
resource_id: Optional[pulumi.Input[str]] = None):
"""
An Azure Machine Learning compute.
:param pulumi.Input[str] compute_type: The type of compute
Expected value is 'AmlCompute'.
:param pulumi.Input[str] compute_location: Location for the underlying compute
:param pulumi.Input[str] description: The description of the Machine Learning compute.
:param pulumi.Input[bool] disable_local_auth: Opt-out of local authentication and ensure customers can use only MSI and AAD exclusively for authentication.
:param pulumi.Input['AmlComputePropertiesArgs'] properties: AML Compute properties
:param pulumi.Input[str] resource_id: ARM resource id of the underlying compute
"""
pulumi.set(__self__, "compute_type", 'AmlCompute')
if compute_location is not None:
pulumi.set(__self__, "compute_location", compute_location)
if description is not None:
pulumi.set(__self__, "description", description)
if disable_local_auth is not None:
pulumi.set(__self__, "disable_local_auth", disable_local_auth)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="computeType")
def compute_type(self) -> pulumi.Input[str]:
"""
The type of compute
Expected value is 'AmlCompute'.
"""
return pulumi.get(self, "compute_type")
@compute_type.setter
def compute_type(self, value: pulumi.Input[str]):
pulumi.set(self, "compute_type", value)
@property
@pulumi.getter(name="computeLocation")
def compute_location(self) -> Optional[pulumi.Input[str]]:
"""
Location for the underlying compute
"""
return pulumi.get(self, "compute_location")
@compute_location.setter
def compute_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compute_location", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the Machine Learning compute.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="disableLocalAuth")
def disable_local_auth(self) -> Optional[pulumi.Input[bool]]:
"""
Opt-out of local authentication and ensure customers can use only MSI and AAD exclusively for authentication.
"""
return pulumi.get(self, "disable_local_auth")
@disable_local_auth.setter
def disable_local_auth(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_local_auth", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['AmlComputePropertiesArgs']]:
"""
AML Compute properties
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['AmlComputePropertiesArgs']]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM resource id of the underlying compute
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@pulumi.input_type
class AmlTokenArgs:
def __init__(__self__, *,
identity_type: pulumi.Input[str]):
"""
AML Token identity configuration.
:param pulumi.Input[str] identity_type: Enum to determine identity framework.
Expected value is 'AMLToken'.
"""
pulumi.set(__self__, "identity_type", 'AMLToken')
@property
@pulumi.getter(name="identityType")
def identity_type(self) -> pulumi.Input[str]:
"""
Enum to determine identity framework.
Expected value is 'AMLToken'.
"""
return pulumi.get(self, "identity_type")
@identity_type.setter
def identity_type(self, value: pulumi.Input[str]):
pulumi.set(self, "identity_type", value)
@pulumi.input_type
class AssignedUserArgs:
def __init__(__self__, *,
object_id: pulumi.Input[str],
tenant_id: pulumi.Input[str]):
"""
A user that can be assigned to a compute instance.
:param pulumi.Input[str] object_id: User’s AAD Object Id.
:param pulumi.Input[str] tenant_id: User’s AAD Tenant Id.
"""
pulumi.set(__self__, "object_id", object_id)
pulumi.set(__self__, "tenant_id", tenant_id)
@property
@pulumi.getter(name="objectId")
def object_id(self) -> pulumi.Input[str]:
"""
User’s AAD Object Id.
"""
return pulumi.get(self, "object_id")
@object_id.setter
def object_id(self, value: pulumi.Input[str]):
pulumi.set(self, "object_id", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Input[str]:
"""
User’s AAD Tenant Id.
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: pulumi.Input[str]):
pulumi.set(self, "tenant_id", value)
@pulumi.input_type
class AutoPausePropertiesArgs:
def __init__(__self__, *,
delay_in_minutes: Optional[pulumi.Input[int]] = None,
enabled: Optional[pulumi.Input[bool]] = None):
"""
Auto pause properties
"""
if delay_in_minutes is not None:
pulumi.set(__self__, "delay_in_minutes", delay_in_minutes)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter(name="delayInMinutes")
def delay_in_minutes(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "delay_in_minutes")
@delay_in_minutes.setter
| |
<reponame>rachaonline789/hellomod
#
# Autogenerated by Thrift Compiler (0.12.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
all_structs = []
class Iface(object):
def findBuddyContactsByQuery(self, language, country, query, fromIndex, count, requestSource):
"""
Parameters:
- language
- country
- query
- fromIndex
- count
- requestSource
"""
pass
def getBuddyContacts(self, language, country, classification, fromIndex, count):
"""
Parameters:
- language
- country
- classification
- fromIndex
- count
"""
pass
def getBuddyDetail(self, buddyMid):
"""
Parameters:
- buddyMid
"""
pass
def getBuddyOnAir(self, buddyMid):
"""
Parameters:
- buddyMid
"""
pass
def getCountriesHavingBuddy(self):
pass
def getNewlyReleasedBuddyIds(self, country):
"""
Parameters:
- country
"""
pass
def getPopularBuddyBanner(self, language, country, applicationType, resourceSpecification):
"""
Parameters:
- language
- country
- applicationType
- resourceSpecification
"""
pass
def getPopularBuddyLists(self, language, country):
"""
Parameters:
- language
- country
"""
pass
def getPromotedBuddyContacts(self, language, country):
"""
Parameters:
- language
- country
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def findBuddyContactsByQuery(self, language, country, query, fromIndex, count, requestSource):
"""
Parameters:
- language
- country
- query
- fromIndex
- count
- requestSource
"""
self.send_findBuddyContactsByQuery(language, country, query, fromIndex, count, requestSource)
return self.recv_findBuddyContactsByQuery()
def send_findBuddyContactsByQuery(self, language, country, query, fromIndex, count, requestSource):
self._oprot.writeMessageBegin('findBuddyContactsByQuery', TMessageType.CALL, self._seqid)
args = findBuddyContactsByQuery_args()
args.language = language
args.country = country
args.query = query
args.fromIndex = fromIndex
args.count = count
args.requestSource = requestSource
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_findBuddyContactsByQuery(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = findBuddyContactsByQuery_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "findBuddyContactsByQuery failed: unknown result")
def getBuddyContacts(self, language, country, classification, fromIndex, count):
"""
Parameters:
- language
- country
- classification
- fromIndex
- count
"""
self.send_getBuddyContacts(language, country, classification, fromIndex, count)
return self.recv_getBuddyContacts()
def send_getBuddyContacts(self, language, country, classification, fromIndex, count):
self._oprot.writeMessageBegin('getBuddyContacts', TMessageType.CALL, self._seqid)
args = getBuddyContacts_args()
args.language = language
args.country = country
args.classification = classification
args.fromIndex = fromIndex
args.count = count
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getBuddyContacts(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getBuddyContacts_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "getBuddyContacts failed: unknown result")
def getBuddyDetail(self, buddyMid):
"""
Parameters:
- buddyMid
"""
self.send_getBuddyDetail(buddyMid)
return self.recv_getBuddyDetail()
def send_getBuddyDetail(self, buddyMid):
self._oprot.writeMessageBegin('getBuddyDetail', TMessageType.CALL, self._seqid)
args = getBuddyDetail_args()
args.buddyMid = buddyMid
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getBuddyDetail(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getBuddyDetail_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "getBuddyDetail failed: unknown result")
def getBuddyOnAir(self, buddyMid):
"""
Parameters:
- buddyMid
"""
self.send_getBuddyOnAir(buddyMid)
return self.recv_getBuddyOnAir()
def send_getBuddyOnAir(self, buddyMid):
self._oprot.writeMessageBegin('getBuddyOnAir', TMessageType.CALL, self._seqid)
args = getBuddyOnAir_args()
args.buddyMid = buddyMid
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getBuddyOnAir(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getBuddyOnAir_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "getBuddyOnAir failed: unknown result")
def getCountriesHavingBuddy(self):
self.send_getCountriesHavingBuddy()
return self.recv_getCountriesHavingBuddy()
def send_getCountriesHavingBuddy(self):
self._oprot.writeMessageBegin('getCountriesHavingBuddy', TMessageType.CALL, self._seqid)
args = getCountriesHavingBuddy_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getCountriesHavingBuddy(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getCountriesHavingBuddy_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "getCountriesHavingBuddy failed: unknown result")
def getNewlyReleasedBuddyIds(self, country):
"""
Parameters:
- country
"""
self.send_getNewlyReleasedBuddyIds(country)
return self.recv_getNewlyReleasedBuddyIds()
def send_getNewlyReleasedBuddyIds(self, country):
self._oprot.writeMessageBegin('getNewlyReleasedBuddyIds', TMessageType.CALL, self._seqid)
args = getNewlyReleasedBuddyIds_args()
args.country = country
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getNewlyReleasedBuddyIds(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getNewlyReleasedBuddyIds_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "getNewlyReleasedBuddyIds failed: unknown result")
def getPopularBuddyBanner(self, language, country, applicationType, resourceSpecification):
"""
Parameters:
- language
- country
- applicationType
- resourceSpecification
"""
self.send_getPopularBuddyBanner(language, country, applicationType, resourceSpecification)
return self.recv_getPopularBuddyBanner()
def send_getPopularBuddyBanner(self, language, country, applicationType, resourceSpecification):
self._oprot.writeMessageBegin('getPopularBuddyBanner', TMessageType.CALL, self._seqid)
args = getPopularBuddyBanner_args()
args.language = language
args.country = country
args.applicationType = applicationType
args.resourceSpecification = resourceSpecification
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getPopularBuddyBanner(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getPopularBuddyBanner_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "getPopularBuddyBanner failed: unknown result")
def getPopularBuddyLists(self, language, country):
"""
Parameters:
- language
- country
"""
self.send_getPopularBuddyLists(language, country)
return self.recv_getPopularBuddyLists()
def send_getPopularBuddyLists(self, language, country):
self._oprot.writeMessageBegin('getPopularBuddyLists', TMessageType.CALL, self._seqid)
args = getPopularBuddyLists_args()
args.language = language
args.country = country
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getPopularBuddyLists(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getPopularBuddyLists_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "getPopularBuddyLists failed: unknown result")
def getPromotedBuddyContacts(self, language, country):
"""
Parameters:
- language
- country
"""
self.send_getPromotedBuddyContacts(language, country)
return self.recv_getPromotedBuddyContacts()
def send_getPromotedBuddyContacts(self, language, country):
self._oprot.writeMessageBegin('getPromotedBuddyContacts', TMessageType.CALL, self._seqid)
args = getPromotedBuddyContacts_args()
args.language = language
args.country = country
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getPromotedBuddyContacts(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getPromotedBuddyContacts_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "getPromotedBuddyContacts failed: unknown result")
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["findBuddyContactsByQuery"] = Processor.process_findBuddyContactsByQuery
self._processMap["getBuddyContacts"] = Processor.process_getBuddyContacts
self._processMap["getBuddyDetail"] = Processor.process_getBuddyDetail
self._processMap["getBuddyOnAir"] = Processor.process_getBuddyOnAir
self._processMap["getCountriesHavingBuddy"] = Processor.process_getCountriesHavingBuddy
self._processMap["getNewlyReleasedBuddyIds"] = Processor.process_getNewlyReleasedBuddyIds
self._processMap["getPopularBuddyBanner"] = Processor.process_getPopularBuddyBanner
self._processMap["getPopularBuddyLists"] = Processor.process_getPopularBuddyLists
self._processMap["getPromotedBuddyContacts"] = Processor.process_getPromotedBuddyContacts
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_findBuddyContactsByQuery(self, seqid, iprot, oprot):
args = findBuddyContactsByQuery_args()
args.read(iprot)
iprot.readMessageEnd()
result = findBuddyContactsByQuery_result()
try:
result.success = self._handler.findBuddyContactsByQuery(args.language, args.country, args.query, args.fromIndex, args.count, args.requestSource)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TalkException as e:
msg_type = TMessageType.REPLY
result.e = e
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("findBuddyContactsByQuery", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getBuddyContacts(self, seqid, iprot, oprot):
args = getBuddyContacts_args()
args.read(iprot)
iprot.readMessageEnd()
result = getBuddyContacts_result()
try:
result.success = self._handler.getBuddyContacts(args.language, args.country, args.classification, args.fromIndex, args.count)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TalkException as e:
msg_type = TMessageType.REPLY
result.e = e
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getBuddyContacts", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getBuddyDetail(self, seqid, iprot, oprot):
args = getBuddyDetail_args()
args.read(iprot)
iprot.readMessageEnd()
result = getBuddyDetail_result()
try:
result.success = self._handler.getBuddyDetail(args.buddyMid)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TalkException as e:
msg_type = TMessageType.REPLY
result.e = e
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getBuddyDetail", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getBuddyOnAir(self, seqid, iprot, oprot):
args = getBuddyOnAir_args()
args.read(iprot)
iprot.readMessageEnd()
result = getBuddyOnAir_result()
try:
result.success = self._handler.getBuddyOnAir(args.buddyMid)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TalkException as e:
msg_type = TMessageType.REPLY
result.e = e
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
| |
Optional[PostalCode] = field(
default=None,
metadata={
"name": "PostalCode",
"type": "Element",
}
)
other_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##other",
}
)
type: Optional[str] = field(
default=None,
metadata={
"name": "Type",
"type": "Attribute",
}
)
other_attributes: Dict = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class DepartmentName:
"""
:ivar content:
:ivar type:
:ivar code: Used by postal services to encode the name of the
element.
:ivar other_attributes:
"""
content: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
"mixed": True,
}
)
type: Optional[str] = field(
default=None,
metadata={
"name": "Type",
"type": "Attribute",
}
)
code: Optional[str] = field(
default=None,
metadata={
"name": "Code",
"type": "Attribute",
}
)
other_attributes: Dict = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class FirmType:
"""
:ivar address_line:
:ivar firm_name: Name of the firm
:ivar department:
:ivar mail_stop: A MailStop is where the the mail is delivered to
within a premise/subpremise/firm or a facility.
:ivar postal_code:
:ivar other_element:
:ivar type:
:ivar other_attributes:
"""
address_line: List[AddressLine] = field(
default_factory=list,
metadata={
"name": "AddressLine",
"type": "Element",
"namespace": X_AL_NAMESPACE,
}
)
firm_name: List["FirmType.FirmName"] = field(
default_factory=list,
metadata={
"name": "FirmName",
"type": "Element",
"namespace": X_AL_NAMESPACE,
}
)
department: List[Department] = field(
default_factory=list,
metadata={
"name": "Department",
"type": "Element",
"namespace": X_AL_NAMESPACE,
}
)
mail_stop: Optional[MailStopType] = field(
default=None,
metadata={
"name": "MailStop",
"type": "Element",
"namespace": X_AL_NAMESPACE,
}
)
postal_code: Optional[PostalCode] = field(
default=None,
metadata={
"name": "PostalCode",
"type": "Element",
"namespace": X_AL_NAMESPACE,
}
)
other_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##other",
}
)
type: Optional[str] = field(
default=None,
metadata={
"name": "Type",
"type": "Attribute",
}
)
other_attributes: Dict = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class FirmName:
"""
:ivar content:
:ivar type:
:ivar code: Used by postal services to encode the name of the
element.
:ivar other_attributes:
"""
content: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
"mixed": True,
}
)
type: Optional[str] = field(
default=None,
metadata={
"name": "Type",
"type": "Attribute",
}
)
code: Optional[str] = field(
default=None,
metadata={
"name": "Code",
"type": "Attribute",
}
)
other_attributes: Dict = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class PostBox:
"""Specification of a postbox like mail delivery point.
Only a single postbox number can be specified. Examples of postboxes
are POBox, free mail numbers, etc.
:ivar address_line:
:ivar post_box_number: Specification of the number of a postbox
:ivar post_box_number_prefix: Specification of the prefix of the
post box number. eg. A in POBox:A-123
:ivar post_box_number_suffix: Specification of the suffix of the
post box number. eg. A in POBox:123A
:ivar post_box_number_extension: Some countries like USA have POBox
as 12345-123
:ivar firm: Specification of a firm, company, organization, etc. It
can be specified as part of an address that contains a street or
a postbox. It is therefore different from a large mail user
address, which contains no street.
:ivar postal_code:
:ivar other_element:
:ivar type: Possible values are, not limited to: POBox and Freepost.
:ivar indicator: LOCKED BAG NO:1234 where the Indicator is NO: and
Type is LOCKED BAG
:ivar other_attributes:
"""
class Meta:
namespace = X_AL_NAMESPACE
address_line: List[AddressLine] = field(
default_factory=list,
metadata={
"name": "AddressLine",
"type": "Element",
}
)
post_box_number: Optional["PostBox.PostBoxNumber"] = field(
default=None,
metadata={
"name": "PostBoxNumber",
"type": "Element",
"required": True,
}
)
post_box_number_prefix: Optional["PostBox.PostBoxNumberPrefix"] = field(
default=None,
metadata={
"name": "PostBoxNumberPrefix",
"type": "Element",
}
)
post_box_number_suffix: Optional["PostBox.PostBoxNumberSuffix"] = field(
default=None,
metadata={
"name": "PostBoxNumberSuffix",
"type": "Element",
}
)
post_box_number_extension: Optional["PostBox.PostBoxNumberExtension"] = field(
default=None,
metadata={
"name": "PostBoxNumberExtension",
"type": "Element",
}
)
firm: Optional[FirmType] = field(
default=None,
metadata={
"name": "Firm",
"type": "Element",
}
)
postal_code: Optional[PostalCode] = field(
default=None,
metadata={
"name": "PostalCode",
"type": "Element",
}
)
other_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##other",
}
)
type: Optional[str] = field(
default=None,
metadata={
"name": "Type",
"type": "Attribute",
}
)
indicator: Optional[str] = field(
default=None,
metadata={
"name": "Indicator",
"type": "Attribute",
}
)
other_attributes: Dict = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class PostBoxNumber:
"""
:ivar content:
:ivar code: Used by postal services to encode the name of the
element.
:ivar other_attributes:
"""
content: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
"mixed": True,
}
)
code: Optional[str] = field(
default=None,
metadata={
"name": "Code",
"type": "Attribute",
}
)
other_attributes: Dict = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class PostBoxNumberPrefix:
"""
:ivar content:
:ivar number_prefix_separator: A-12 where 12 is number and A is
prefix and "-" is the separator
:ivar code: Used by postal services to encode the name of the
element.
:ivar other_attributes:
"""
content: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
"mixed": True,
}
)
number_prefix_separator: Optional[str] = field(
default=None,
metadata={
"name": "NumberPrefixSeparator",
"type": "Attribute",
}
)
code: Optional[str] = field(
default=None,
metadata={
"name": "Code",
"type": "Attribute",
}
)
other_attributes: Dict = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class PostBoxNumberSuffix:
"""
:ivar content:
:ivar number_suffix_separator: 12-A where 12 is number and A is
suffix and "-" is the separator
:ivar code: Used by postal services to encode the name of the
element.
:ivar other_attributes:
"""
content: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
"mixed": True,
}
)
number_suffix_separator: Optional[str] = field(
default=None,
metadata={
"name": "NumberSuffixSeparator",
"type": "Attribute",
}
)
code: Optional[str] = field(
default=None,
metadata={
"name": "Code",
"type": "Attribute",
}
)
other_attributes: Dict = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class PostBoxNumberExtension:
"""
:ivar content:
:ivar number_extension_separator: "-" is the
NumberExtensionSeparator in POBOX:12345-123
:ivar other_attributes:
"""
content: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
"mixed": True,
}
)
number_extension_separator: Optional[str] = field(
default=None,
metadata={
"name": "NumberExtensionSeparator",
"type": "Attribute",
}
)
other_attributes: Dict = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class SubPremiseType:
"""
:ivar address_line:
:ivar sub_premise_name: Name of the SubPremise
:ivar sub_premise_location: Name of the SubPremise Location. eg.
LOBBY, BASEMENT, GROUND FLOOR, etc...
:ivar sub_premise_number: Specification of the identifier of a sub-
premise. Examples of sub-premises are apartments and suites.
sub-premises in a building are often uniquely identified by
means of consecutive identifiers. The identifier can be a
number, a letter or any combination of the two. In the latter
case, the identifier includes exactly one variable (range) part,
which is either a number or a single letter that is surrounded
by fixed parts at the left (prefix) or the right (postfix).
:ivar sub_premise_number_prefix: Prefix of the sub premise number.
eg. A in A-12
:ivar sub_premise_number_suffix: Suffix of the sub premise number.
eg. A in 12A
:ivar building_name: Name of the building
:ivar firm: Specification of a firm, company, organization, etc. It
can be specified as part of an address that contains a street or
a postbox. It is therefore different from a large mail user
address, which contains no street.
:ivar mail_stop: A MailStop is where the the mail is delivered to
within a premise/subpremise/firm or a facility.
:ivar postal_code:
:ivar sub_premise: Specification of a single sub-premise. Examples
of sub-premises are apartments and suites. Each sub-premise
should be uniquely identifiable. SubPremiseType: Specification
of the name of a sub-premise type. Possible values not limited
to: Suite, Appartment, Floor, Unknown Multiple levels within a
premise by recursively calling SubPremise Eg. Level 4, Suite 2,
Block C
:ivar other_element:
:ivar type:
:ivar other_attributes:
"""
address_line: List[AddressLine] = field(
default_factory=list,
metadata={
"name": "AddressLine",
"type": "Element",
"namespace": X_AL_NAMESPACE,
}
)
sub_premise_name: List["SubPremiseType.SubPremiseName"] = field(
default_factory=list,
metadata={
"name": "SubPremiseName",
"type": "Element",
"namespace": X_AL_NAMESPACE,
}
)
sub_premise_location: Optional["SubPremiseType.SubPremiseLocation"] = field(
default=None,
metadata={
"name": "SubPremiseLocation",
"type": "Element",
"namespace": X_AL_NAMESPACE,
}
)
sub_premise_number: List["SubPremiseType.SubPremiseNumber"] = field(
default_factory=list,
metadata={
"name": "SubPremiseNumber",
"type": "Element",
"namespace": X_AL_NAMESPACE,
}
)
sub_premise_number_prefix: List["SubPremiseType.SubPremiseNumberPrefix"] = field(
default_factory=list,
metadata={
"name": "SubPremiseNumberPrefix",
"type": "Element",
"namespace": X_AL_NAMESPACE,
}
)
sub_premise_number_suffix: List["SubPremiseType.SubPremiseNumberSuffix"] = field(
default_factory=list,
metadata={
"name": "SubPremiseNumberSuffix",
"type": "Element",
"namespace": X_AL_NAMESPACE,
}
)
building_name: List[BuildingNameType] = field(
default_factory=list,
metadata={
"name": "BuildingName",
"type": "Element",
"namespace": X_AL_NAMESPACE,
}
)
firm: Optional[FirmType] = field(
default=None,
metadata={
"name": "Firm",
"type": "Element",
"namespace": | |
(R_values[14] / (1 + w * 1j * t_values[14]))
+ (R_values[15] / (1 + w * 1j * t_values[15]))
+ (R_values[16] / (1 + w * 1j * t_values[16]))
+ (R_values[17] / (1 + w * 1j * t_values[17]))
+ (R_values[18] / (1 + w * 1j * t_values[18]))
+ (R_values[19] / (1 + w * 1j * t_values[19]))
+ (R_values[20] / (1 + w * 1j * t_values[20]))
+ (R_values[21] / (1 + w * 1j * t_values[21]))
+ (R_values[22] / (1 + w * 1j * t_values[22]))
+ (R_values[23] / (1 + w * 1j * t_values[23]))
+ (R_values[24] / (1 + w * 1j * t_values[24]))
+ (R_values[25] / (1 + w * 1j * t_values[25]))
+ (R_values[26] / (1 + w * 1j * t_values[26]))
+ (R_values[27] / (1 + w * 1j * t_values[27]))
+ (R_values[28] / (1 + w * 1j * t_values[28]))
+ (R_values[29] / (1 + w * 1j * t_values[29]))
+ (R_values[30] / (1 + w * 1j * t_values[30]))
+ (R_values[31] / (1 + w * 1j * t_values[31]))
+ (R_values[32] / (1 + w * 1j * t_values[32]))
+ (R_values[33] / (1 + w * 1j * t_values[33]))
+ (R_values[34] / (1 + w * 1j * t_values[34]))
+ (R_values[35] / (1 + w * 1j * t_values[35]))
+ (R_values[36] / (1 + w * 1j * t_values[36]))
+ (R_values[37] / (1 + w * 1j * t_values[37]))
+ (R_values[38] / (1 + w * 1j * t_values[38]))
+ (R_values[39] / (1 + w * 1j * t_values[39]))
+ (R_values[40] / (1 + w * 1j * t_values[40]))
+ (R_values[41] / (1 + w * 1j * t_values[41]))
+ (R_values[42] / (1 + w * 1j * t_values[42]))
+ (R_values[43] / (1 + w * 1j * t_values[43]))
+ (R_values[44] / (1 + w * 1j * t_values[44]))
+ (R_values[45] / (1 + w * 1j * t_values[45]))
+ (R_values[46] / (1 + w * 1j * t_values[46]))
+ (R_values[47] / (1 + w * 1j * t_values[47]))
+ (R_values[48] / (1 + w * 1j * t_values[48]))
+ (R_values[49] / (1 + w * 1j * t_values[49]))
+ (R_values[50] / (1 + w * 1j * t_values[50]))
+ (R_values[51] / (1 + w * 1j * t_values[51]))
+ (R_values[52] / (1 + w * 1j * t_values[52]))
+ (R_values[53] / (1 + w * 1j * t_values[53]))
+ (R_values[54] / (1 + w * 1j * t_values[54]))
+ (R_values[55] / (1 + w * 1j * t_values[55]))
+ (R_values[56] / (1 + w * 1j * t_values[56]))
+ (R_values[57] / (1 + w * 1j * t_values[57]))
+ (R_values[58] / (1 + w * 1j * t_values[58]))
+ (R_values[59] / (1 + w * 1j * t_values[59]))
+ (R_values[60] / (1 + w * 1j * t_values[60]))
+ (R_values[61] / (1 + w * 1j * t_values[61]))
+ (R_values[62] / (1 + w * 1j * t_values[62]))
+ (R_values[63] / (1 + w * 1j * t_values[63]))
+ (R_values[64] / (1 + w * 1j * t_values[64]))
+ (R_values[65] / (1 + w * 1j * t_values[65]))
+ (R_values[66] / (1 + w * 1j * t_values[66]))
+ (R_values[67] / (1 + w * 1j * t_values[67]))
+ (R_values[68] / (1 + w * 1j * t_values[68]))
+ (R_values[69] / (1 + w * 1j * t_values[69]))
+ (R_values[70] / (1 + w * 1j * t_values[70]))
+ (R_values[71] / (1 + w * 1j * t_values[71]))
+ (R_values[72] / (1 + w * 1j * t_values[72]))
+ (R_values[73] / (1 + w * 1j * t_values[73]))
+ (R_values[74] / (1 + w * 1j * t_values[74]))
+ (R_values[75] / (1 + w * 1j * t_values[75]))
+ (R_values[76] / (1 + w * 1j * t_values[76]))
)
def KK_RC78(w, Rs, R_values, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
return (
Rs
+ (R_values[0] / (1 + w * 1j * t_values[0]))
+ (R_values[1] / (1 + w * 1j * t_values[1]))
+ (R_values[2] / (1 + w * 1j * t_values[2]))
+ (R_values[3] / (1 + w * 1j * t_values[3]))
+ (R_values[4] / (1 + w * 1j * t_values[4]))
+ (R_values[5] / (1 + w * 1j * t_values[5]))
+ (R_values[6] / (1 + w * 1j * t_values[6]))
+ (R_values[7] / (1 + w * 1j * t_values[7]))
+ (R_values[8] / (1 + w * 1j * t_values[8]))
+ (R_values[9] / (1 + w * 1j * t_values[9]))
+ (R_values[10] / (1 + w * 1j * t_values[10]))
+ (R_values[11] / (1 + w * 1j * t_values[11]))
+ (R_values[12] / (1 + w * 1j * t_values[12]))
+ (R_values[13] / (1 + w * 1j * t_values[13]))
+ (R_values[14] / (1 + w * 1j * t_values[14]))
+ (R_values[15] / (1 + w * 1j * t_values[15]))
+ (R_values[16] / (1 + w * 1j * t_values[16]))
+ (R_values[17] / (1 + w * 1j * t_values[17]))
+ (R_values[18] / (1 + w * 1j * t_values[18]))
+ (R_values[19] / (1 + w * 1j * t_values[19]))
+ (R_values[20] / (1 + w * 1j * t_values[20]))
+ (R_values[21] / (1 + w * 1j * t_values[21]))
+ (R_values[22] / (1 + w * 1j * t_values[22]))
+ (R_values[23] / (1 + w * 1j * t_values[23]))
+ (R_values[24] / (1 + w * 1j * t_values[24]))
+ (R_values[25] / (1 + w * 1j * t_values[25]))
+ (R_values[26] / (1 + w * 1j * t_values[26]))
+ (R_values[27] / (1 + w * 1j * t_values[27]))
+ (R_values[28] / (1 + w * 1j * t_values[28]))
+ (R_values[29] / (1 + w * 1j * t_values[29]))
+ (R_values[30] / (1 + w * 1j * t_values[30]))
+ (R_values[31] / (1 + w * 1j * t_values[31]))
+ (R_values[32] / (1 + w * 1j * t_values[32]))
+ (R_values[33] / (1 + w * 1j * t_values[33]))
+ (R_values[34] / (1 + w * 1j * t_values[34]))
+ (R_values[35] / (1 + w * 1j * t_values[35]))
+ (R_values[36] / (1 + w * 1j * t_values[36]))
+ (R_values[37] / (1 + w * 1j * t_values[37]))
+ (R_values[38] / (1 + w * 1j * t_values[38]))
+ (R_values[39] / (1 + w * 1j * t_values[39]))
+ (R_values[40] / (1 + w * 1j * t_values[40]))
+ (R_values[41] / (1 + w * 1j * t_values[41]))
+ (R_values[42] / (1 + w * 1j * t_values[42]))
+ (R_values[43] / (1 + w * 1j * t_values[43]))
+ (R_values[44] / (1 + w * 1j * t_values[44]))
+ (R_values[45] / (1 + w * 1j * t_values[45]))
+ (R_values[46] / (1 + w * 1j * t_values[46]))
+ (R_values[47] / (1 + w * 1j * t_values[47]))
+ (R_values[48] / (1 + w * 1j * t_values[48]))
+ (R_values[49] / (1 + w * 1j * t_values[49]))
+ (R_values[50] / (1 + w * 1j * t_values[50]))
+ (R_values[51] / (1 + w * 1j * t_values[51]))
+ (R_values[52] / (1 + w * 1j * t_values[52]))
+ (R_values[53] / (1 + w * 1j * t_values[53]))
+ (R_values[54] / (1 + w * 1j * t_values[54]))
+ (R_values[55] / (1 + w * 1j * t_values[55]))
+ (R_values[56] / (1 + w * 1j * t_values[56]))
+ (R_values[57] / (1 + w * 1j * t_values[57]))
+ (R_values[58] / (1 + w * 1j * t_values[58]))
+ (R_values[59] / (1 + w * 1j * t_values[59]))
+ (R_values[60] / (1 + w * 1j * t_values[60]))
+ (R_values[61] / (1 + w * 1j * t_values[61]))
+ (R_values[62] / (1 + w * 1j * t_values[62]))
+ (R_values[63] / (1 | |
<filename>src/defs/classes/lodash.py
from typing import Any, Callable, Dict, Generic, List, Optional, Tuple, Type, TypeVar, Union
_L1 = TypeVar('_L1')
_L2 = TypeVar('_L2')
_L3 = TypeVar('_L3', int, float)
_L4 = TypeVar('_L4')
# noinspection PyPep8Naming
class _LodashChain(Generic[_L1]):
def __init__(self, value: Union[List[_L1], Dict[Any, _L1]]) -> None:
self.__inner = value
def concat(self, other: Union[List[_L1], Dict[Any, _L1]]) -> '_LodashChain[_L1]':
pass
def chunk(self, size: int = 1) -> '_LodashChain[List[_L1]]':
pass
def compact(self) -> '_LodashChain[_L1]':
pass
def difference(self, *other: List[_L1]) -> '_LodashChain[_L1]':
pass
def drop(self, n: int = 1) -> '_LodashChain[_L1]':
pass
def dropRight(self, n: int = 1) -> '_LodashChain[_L1]':
pass
def dropRightWhile(self,
predicate: Union[Dict[str, Any], Callable[[_L1], bool], None, str] = None,
thisArg: Any = None) -> '_LodashChain[_L1]':
pass
def dropWhile(self,
predicate: Union[Dict[str, Any], Callable[[_L1], bool], None, str] = None,
thisArg: Any = None) -> '_LodashChain[_L1]':
pass
def fill(self, value: _L1, start: int = 0, end: int = 0) -> '_LodashChain[_L1]':
pass
def first(self) -> Optional[_L1]:
pass
def flatten(self) -> '_LodashChain':
pass
def flattenDeep(self) -> '_LodashChain':
pass
def initial(self) -> List[_L1]:
pass
def intersection(self, arrays: List[List[_L1]]) -> '_LodashChain':
pass
def last(self) -> Optional[Any]:
pass
def lastIndexOf(self, value: _L1, fromIndex: Union[int, bool] = 0) -> int:
pass
def pull(self, values: List[_L1]) -> '_LodashChain':
pass
def pullAt(self, indices: List[int]) -> '_LodashChain':
pass
def remove(self, predicate: Union[Dict[str, Any], Callable[[_L1], bool], None, str] = None,
thisArg: Any = None) -> '_LodashChain':
pass
def rest(self) -> '_LodashChain':
pass
def slice(self, start: int = 0, end: int = 0) -> '_LodashChain':
pass
def sortedIndex(self,
value: _L1,
iteratee: Union[str, Callable[[_L1], _L2], None] = None,
thisArg: Any = None) -> int:
pass
def sortedLastIndex(self,
value: _L1,
iteratee: Union[str, Callable[[_L1], _L2], None] = None,
thisArg: Any = None) -> int:
pass
def take(self, n: int = 1) -> '_LodashChain':
pass
def takeRight(self, n: int = 1) -> '_LodashChain':
pass
def takeRightWhile(self,
predicate: Union[str, Callable[[_L1], _L2], None] = None,
thisArg: Any = None) -> '_LodashChain':
pass
def takeWhile(self,
predicate: Union[str, Callable[[_L1], _L2], None] = None,
thisArg: Any = None) -> '_LodashChain':
pass
def union(self, arrays: List[List[_L1]]) -> '_LodashChain':
pass
def unique(self,
isSorted: bool = False,
iteratee: Union[str, Callable[[_L1], _L2], None] = None,
thisArg: Any = None) -> List[_L1]:
pass
def uniq(self,
isSorted: bool = False,
iteratee: Union[str, Callable[[_L1], _L2], None] = None,
thisArg: Any = None) -> List[_L1]:
pass
def unzip(self) -> '_LodashChain':
pass
def unzipWith(self,
iteratee: Optional[Callable[[Any, Any, Any, Any], Any]] = None,
thisArg: Any = None) -> '_LodashChain':
pass
def without(self, values: List[_L1]) -> '_LodashChain':
pass
def xor(self, arrays: List[List[_L1]]) -> '_LodashChain':
pass
def zip(self) -> '_LodashChain':
pass
def zipObject(self, values: Optional[List[Any]] = None) -> '_LodashChain':
pass
def zipWith(self,
iteratee: Optional[Callable[[Any, Any, Any, Any], None]] = None,
thisArg: Any = None) -> '_LodashChain':
pass
def all(self, predicate: Union[Dict[str, Any], Callable[[_L1], bool], None, str] = None,
thisArg: Any = None) -> bool:
pass
def any(self, predicate: Union[Dict[str, Any], Callable[[_L1], bool], None, str] = None,
thisArg: Any = None) -> bool:
pass
def at(self, *props: Any) -> List[_L1]:
pass
def countBy(self, iteratee: Union[str, Callable[[_L1], _L2], None] = None, thisArg: Any = None) -> '_LodashChain':
pass
def every(self,
predicate: Union[Dict[str, Any], Callable[[_L1], bool], None, str] = None,
thisArg: Any = None) -> bool:
pass
def filter(self,
predicate: Union[Dict[str, Any], Callable[[_L1], bool], None, str] = None,
thisArg: Any = None) -> '_LodashChain[_L1]':
pass
def find(self,
predicate: Union[Dict[str, Any], Callable[[_L1], bool], None, str] = None,
thisArg: Any = None) -> _L1:
pass
def findLast(self,
predicate: Union[Dict[str, Any], Callable[[_L1], bool], None, str] = None,
thisArg: Any = None) -> _L1:
pass
def findWhere(self,
predicate: Union[Dict[str, Any], Callable[[_L1], bool], None, str] = None,
thisArg: Any = None) -> _L1:
pass
def forEach(self, iteratee: Callable[[_L1], Optional[bool]] = None, thisArg: Any = None) -> '_LodashChain[_L1]':
pass
def forEachRight(self,
iteratee: Callable[[_L1], Optional[bool]] = None,
thisArg: Any = None) -> '_LodashChain[_L1]':
pass
def groupBy(self, iteratee: Union[str, Callable[[_L1], _L2], None] = None, thisArg: Any = None) -> '_LodashChain':
pass
def includes(self, value: _L1, fromIndex: int = 0) -> bool:
pass
def indexBy(self, iteratee: Union[str, Callable[[_L1], _L2], None] = None, thisArg: Any = None) -> Dict[str, _L1]:
pass
def invoke(self, path: str, *args: Any) -> '_LodashChain':
pass
def map(self, iteratee: Union[str, Callable[[_L1], _L2], None] = None, thisArg: Any = None) -> '_LodashChain':
pass
def partition(self,
predicate: Union[Dict[str, Any], Callable[[_L1], bool], None, str] = None,
thisArg: Any = None) -> '_LodashChain':
pass
def pluck(self, path: Union[str, List[str]]) -> '_LodashChain':
pass
def reduce(self,
iteratee: Callable[[_L2, _L1], _L2] = None, accumulator: _L2 = None,
thisArg: Any = None) -> _L2:
pass
def reduceRight(self,
iteratee: Callable[[_L2, _L1], _L2] = None, accumulator: _L2 = None,
thisArg: Any = None) -> _L2:
pass
def reject(self,
predicate: Union[Dict[str, Any], Callable[[_L1], bool], None, str] = None,
thisArg: Any = None) -> '_LodashChain':
pass
def sample(self) -> Any:
pass
def shuffle(self) -> '_LodashChain':
pass
def size(self) -> int:
pass
def some(self,
predicate: Union[Dict[str, Any], Callable[[_L1], bool], None, str] = None,
thisArg: Any = None) -> bool:
pass
def sortBy(self, iteratee: Union[str, Callable[[_L1], _L2], None] = None, thisArg: Any = None) -> '_LodashChain':
pass
def sortByAll(self, *iteratee: Union[str, Callable[[_L1], _L2], None]) -> '_LodashChain':
pass
def sortByOrder(self, iteratees: List[Union[str, Callable[[_L1], _L2], None]], orders: List[str]) -> '_LodashChain':
pass
def where(self, source: Any) -> '_LodashChain':
pass
def toArray(self) -> '_LodashChain':
pass
def toPlainObject(self) -> '_LodashChain':
pass
def sum(self, iteratee: Union[str, Callable[[_L1], _L2], None] = None, thisArg: Any = None) -> _L2:
pass
def keys(self) -> '_LodashChain':
pass
def mapKeys(self, iteratee: Callable[[str], str] = None, thisArg: Any = None) -> '_LodashChain':
pass
def mapValues(self, iteratee: Callable[[Any], Any] = None, thisArg: Any = None) -> '_LodashChain':
pass
def omit(self,
predicate: Union[Dict[str, Any], Callable[[_L1], bool], None, str],
thisArg: Any = None) -> '_LodashChain':
pass
def pairs(self) -> '_LodashChain':
pass
def values(self) -> '_LodashChain':
pass
def value(self) -> Any:
pass
def max(self, iteratee: Callable[[_L1], _L3] = None, thisArg: Any = None) -> _L1:
pass
def min(self, iteratee: Callable[[_L1], _L3] = None, thisArg: Any = None) -> _L1:
pass
# noinspection PyPep8Naming
class _:
def __new__(cls, value: Union[List[_L1], Dict[Any, _L1]]) -> _LodashChain[_L1]:
return _LodashChain(value)
@staticmethod
def chunk(array: List[_L1], size: int = 1) -> List[List[_L1]]:
pass
@staticmethod
def compact(array: List[_L1]) -> List[_L1]:
pass
@staticmethod
def difference(array: List[_L1], *other: List[_L1]) -> List[_L1]:
pass
@staticmethod
def drop(array: List[_L1], n: int = 1) -> List[_L1]:
pass
@staticmethod
def dropRight(array: List[_L1], n: int = 1) -> List[_L1]:
pass
@staticmethod
def dropRightWhile(array: List[_L1],
predicate: Union[Dict[str, Any], Callable[[_L1], bool], None, str] = None,
thisArg: Any = None) -> List[_L1]:
pass
@staticmethod
def dropWhile(array: List[_L1],
predicate: Union[Dict[str, Any], Callable[[_L1], bool], None, str] = None,
thisArg: Any = None) -> List[_L1]:
pass
@staticmethod
def fill(array: List[_L1], value: _L1, start: int = 0, end: int = 0) -> List[_L1]:
pass
@staticmethod
def findIndex(array: List[_L1],
predicate: Union[Dict[str, Any], Callable[[_L1], bool], None, str] = None,
thisArg: Any = None) -> int:
pass
@staticmethod
def findLastIndex(array: List[_L1],
predicate: Union[Dict[str, Any], Callable[[_L1], bool], None, str] = None,
thisArg: Any = None) -> int:
pass
@staticmethod
def first(array: List[_L1]) -> Optional[_L1]:
pass
@staticmethod
def flatten(array: List[List[_L1]]) -> List[_L1]:
pass
@staticmethod
def flattenDeep(array: List[Any]) -> List[Any]:
pass
@staticmethod
def indexOf(array: List[_L1], value: _L1, fromIndex: Union[int, bool] = 0) -> int:
pass
@staticmethod
def initial(array: List[_L1]) -> List[_L1]:
pass
@staticmethod
def intersection(array: List[List[_L1]]) -> List[_L1]:
pass
@staticmethod
def last(array: List[_L1]) -> Optional[_L1]:
pass
@staticmethod
def lastIndexOf(array: List[_L1], value: _L1, fromIndex: Union[int, bool] = 0) -> int:
pass
@staticmethod
def pull(array: List[_L1], *values: _L1) -> List[_L1]:
pass
@staticmethod
def pullAt(array: List[_L1], indices: List[int]) -> List[_L1]:
pass
@staticmethod
def remove(array: List[_L1],
predicate: Union[Dict[str, Any], Callable[[_L1], bool], None, str] = None,
thisArg: Any = None) -> List[_L1]:
pass
@staticmethod
def rest(array: List[_L1]) -> List[_L1]:
pass
@staticmethod
def slice(array: List[_L1], start: int = 0, end: int = 0) -> List[_L1]:
pass
@staticmethod
def sortedIndex(array: List[_L1],
value: _L1,
iteratee: Union[str, Callable[[_L1], _L2], None] = None,
thisArg: Any = None) -> int:
pass
@staticmethod
def sortedLastIndex(array: List[_L1],
value: _L1,
iteratee: Union[str, Callable[[_L1], _L2], None] = None,
thisArg: Any = None) | |
<gh_stars>0
import sys
from typing import List
from concurrent.futures import ThreadPoolExecutor, as_completed
import grpc
import pickle
#import structures
#from structures import Node
sys.path.append("../")
from net import net_pb2, net_pb2_grpc
from server.connect import Conndb
import etcd3
from etcd.etcd import Etcd_S
import client.structures
from client.structures import Node
etcd = etcd3.client(host="10.77.70.61", port=2379)
def NodeEqual(node_x, node_y):
eq = True
eq = eq and node_x.id == node_y.id
eq = eq and node_x.type == node_y.type
eq = eq and node_x.parent == node_y.parent
eq = eq and node_x.children == node_y.children
eq = eq and node_x.tables == node_y.tables
eq = eq and node_x.site == node_y.site
eq = eq and node_x.size == node_y.size
eq = eq and node_x.if_valid == node_y.if_valid
eq = eq and node_x.columns == node_y.columns
eq = eq and node_x.f_id == node_y.f_id
eq = eq and node_x.f_name == node_y.f_name
eq = eq and node_x.select_condi == node_y.select_condi
eq = eq and node_x.projection == node_y.projection
eq = eq and node_x.join == node_y.join
eq = eq and node_x.top == node_y.top
return eq
def nodesEqual(nodes_x, nodes_y):
if len(nodes_x) != len(nodes_y):
return False
eq = True
for i in range(0, len(nodes_x)):
eq = eq and NodeEqual(nodes_x[i], nodes_y[i])
return eq
def Node2dict(node_x):
dict_x = {}
dict_x['id'] = node_x.id #node id, start from 0
dict_x['type'] = node_x.type #type fragment, select, projection, join, union
dict_x['parent'] = node_x.parent #the parent node id, default -1
dict_x['children'] = node_x.children #the children node ids, defalut []
dict_x['tables'] = node_x.tables #table names in this node
dict_x['site'] = node_x.site #which site this node is in
dict_x['size'] = node_x.size #total bytes of data in this node
dict_x['if_valid'] = node_x.if_valid #if this node has been pruned
dict_x['columns'] = node_x.columns
dict_x['f_id'] = node_x.f_id #if type is 'fragment', the fragment id
dict_x['f_name'] = node_x.f_name
dict_x['select_condi'] = node_x.select_condi #if type is 'select', the select condition
dict_x['projection'] = node_x.projection #if type is 'projection', the project attributes
dict_x['join'] = node_x.join #if type is join, the join condition
dict_x['top'] = node_x.top #if type is 'fragment', correspond id for join
return dict_x
def dict2Node(dict_x):
node_x = Node(dict_x['id'],
dict_x['type'],
dict_x['parent'],
dict_x['children'],
dict_x['tables'],
dict_x['site'],
dict_x['size'],
dict_x['columns'],
dict_x['if_valid'],
dict_x['f_id'],
dict_x['f_name'],
dict_x['select_condi'],
dict_x['projection'],
dict_x['join'],
dict_x['top']
)
return node_x
def str2nodes(str_x):
nodes_x = eval(str_x)
for i in range(0, len(nodes_x)):
nodes_x[i] = dict2Node(nodes_x[i])
return nodes_x
def nodes2str(nodes_x):
str_x = []
for i in nodes_x:
str_x.append(Node2dict(i))
return str(str_x)
nodes = []
nodes.append(Node(0,'fragment',
parent = 12,
children = [],
tables=['Customer'],
site = 1,
size=0,
columns=[],
f_id=7))
nodes.append(Node(1,'fragment',
parent = 13,
children = [],
tables=['Customer'],
site = 1,
size=0,
columns=[],
f_id=7))
nodes.append(Node(2,'fragment',
parent = 14,
children = [],
tables=['Customer'],
site = 1,
size=0,
columns=[],
f_id=7))
nodes.append(Node(3,'fragment',
parent = 15,
children = [],
tables=['Customer'],
site = 1,
size=0,
columns=[],
f_id=7))
nodes.append(Node(4,'fragment',
parent = 8,
children = [],
tables=['Orders'],
site = 1,
size=0,
columns=[],
f_id=9))
nodes.append(Node(5,'fragment',
parent = 9,
children = [],
tables=['Orders'],
site = 2,
size=0,
columns=[],
f_id=10))
nodes.append(Node(6,'fragment',
parent = 10,
children = [],
tables=['Orders'],
site = 3,
size=0,
columns=[],
f_id=11))
nodes.append(Node(7,'fragment',
parent = 11,
children = [],
tables=['Orders'],
site = 4,
size=0,
columns=[],
f_id=12))
nodes.append(Node(8,'projection',
parent = 12,
children = [4],
tables=['Orders'],
site = 1,
size=0,
columns=[],
projection=['Orders.customer_id','Orders.quantity']))
nodes.append(Node(9,'projection',
parent = 13,
children = [5],
tables=['Orders'],
site = 2,
size=0,
columns=[],
projection=['Orders.customer_id','Orders.quantity']))
nodes.append(Node(10,'projection',
parent = 14,
children = [6],
tables=['Orders'],
site = 3,
size=0,
columns=[],
projection=['Orders.customer_id','Orders.quantity']))
nodes.append(Node(11,'projection',
parent = 15,
children = [7],
tables=['Orders'],
site = 4,
size=0,
columns=[],
projection=['Orders.customer_id','Orders.quantity']))
nodes.append(Node(12,'join',
parent = 16,
children = [0,8],
tables=['Customer','Orders'],
site = 1,
size=0,
columns=[],
join=['Customer.id','Orders.customer_id']))
nodes.append(Node(13,'join',
parent = 17,
children = [1,9],
tables=['Customer','Orders'],
site = 2,
size=0,
columns=[],
join=['Customer.id','Orders.customer_id']))
nodes.append(Node(14,'join',
parent = 18,
children = [2,10],
tables=['Customer','Orders'],
site = 3,
size=0,
columns=[],
join=['Customer.id','Orders.customer_id']))
nodes.append(Node(15,'join',
parent = 19,
children = [3,11],
tables=['Customer','Orders'],
site = 4,
size=0,
columns=[],
join=['Customer.id','Orders.customer_id']))
nodes.append(Node(16,'projection',
parent = 20,
children = [12],
tables=['Customer','Orders'],
site = 1,
size=0,
columns=[],
projection=['Customer.name','Orders.quantity']))
nodes.append(Node(17,'projection',
parent = 20,
children = [13],
tables=['Customer','Orders'],
site = 2,
size=0,
columns=[],
projection=['Customer.name','Orders.quantity']))
nodes.append(Node(18,'projection',
parent = 20,
children = [14],
tables=['Customer','Orders'],
site = 3,
size=0,
columns=[],
projection=['Customer.name','Orders.quantity']))
nodes.append(Node(19,'projection',
parent = 20,
children = [15],
tables=['Customer','Orders'],
site = 4,
size=0,
columns=[],
projection=['Customer.name','Orders.quantity']))
nodes.append(Node(20,'union',
parent = -1,
children = [16,17,18,19],
tables=['Customer','Orders'],
site = 2,
size=0,
columns=[]))
### example above
def table_column_from_pj(pj):
tables = []
columns = []
for i in pj:
point_loc = i.find('.')
tables.append(i[0 : point_loc])
columns.append(i[point_loc+1 : len(i)])
return tables, columns
def table_column_type(table, column):
columns = eval(etcd.get('/table/' + table + '/columns')[0])
for i in columns:
if i[0] == column:
return table, column, i[1]
def fragment_columns(table, site):
return eval(etcd.get('/table/' + table + '/fragment/' + str(site))[0])['columns']
def sql_create(query_no, node_no, columns):
sql = 'create table Q' + str(query_no) + '_N' + str(node_no) + ' ('
sql += columns[0][0] + '_' + columns[0][1] + ' ' + columns[0][2]
for i in range(1, len(columns)):
sql += ', ' + columns[i][0] + '_' + columns[i][1] + ' ' + columns[i][2]
sql += ');'
return sql
def valueij(valueij):
if str(type(valueij)).find('str') >= 0:
return "'" + valueij + "'"
else:
return str(valueij)
def value_tuple(value):
sql = '(' + valueij(value[0])
for i in range(1, len(value)):
sql += ', ' + valueij(value[i])
sql += ')'
return sql
def sql_insert(query_no, node_no, columns, values):
if values == []:
return ''
sql = 'insert into Q' + str(query_no) + '_N' + str(node_no) + '('
sql += columns[0][0] + '_' + columns[0][1]
for i in range(1, len(columns)):
sql += ', ' + columns[i][0] + '_' + columns[i][1]
sql += ') values'
sql += value_tuple(values[0])
for i in range(1, len(values)):
sql += ', ' + value_tuple(values[i])
sql += ';'
return sql
def sql_select_fragment(columns):
sql = 'select ' + columns[0][1]
for i in range(1, len(columns)):
sql += ', ' + columns[i][1]
sql += ' from ' + columns[0][0]
sql += ';'
return sql
def sql_select_star(query_no, node_no):
return 'select * from Q' + str(query_no) + '_N' + str(node_no) + ';'
def sql_select(query_no, node_no, columns, select_condi):
ss = sql_select_star(query_no, node_no)
sql = ss[0 : len(ss)-1]
sql += ' where Q' + str(query_no) + '_N' + str(node_no) + '.' + columns[0][0] + '_' + columns[0][1]
sql += ' ' + select_condi[1] + ' ' + select_condi[2]
sql += ';'
return sql
def sql_project(query_no, node_no, columns):
sql = 'select ' + columns[0][0] + '_' + columns[0][1]
for i in range(1, len(columns)):
sql += ', ' + columns[i][0] + '_' + columns[i][1]
sql += ' from Q' + str(query_no) + '_N' + str(node_no) + ';'
return sql
def sql_union(query_no, children):
sss = sql_select_star(query_no, children[0])
sql = sss[0 : len(sss)-1]
for i in range(1, len(children)):
sss = sql_select_star(query_no, children[i])
sql += ' UNION ALL ' + sss[0 : len(sss)-1]
sql += ';'
return sql
def sql_join(query_no, nodes_no, columns):
sql = 'select * from Q' + str(query_no) + '_N' + str(nodes_no[0])
sql += ', Q' + str(query_no) + '_N' + str(nodes_no[1])
sql += ' where Q' + str(query_no) + '_N' + str(nodes_no[0]) + '.' + columns[0][0] + '_' + columns[0][1]
sql += ' = Q' + str(query_no) + '_N' + str(nodes_no[1]) + '.' + columns[1][0] + '_' + columns[1][1]
sql += ';'
return sql
def sql_drop_temp(query_no, children):
for i in children:
sql = 'drop table Q' + str(query_no) + '_N' + str(i) + ';'
print(sql) #实际运行应当执行drop
server_conn = [None, None, None, None, None]
db_conn = [None, None, None, None, None]
def server_conn_init(self_site):
global server_conn
if self_site != 1:
conn = grpc.insecure_channel('10.77.70.61:8883')
server_conn[1] = net_pb2_grpc.NetServiceStub(channel=conn) #site1在server 61:8883
if self_site != 2:
conn = grpc.insecure_channel('10.77.70.61:8885')
server_conn[2] = net_pb2_grpc.NetServiceStub(channel=conn) #site2在server 61:8885
if self_site != 3:
conn = grpc.insecure_channel('10.77.70.62:8883')
server_conn[3] = net_pb2_grpc.NetServiceStub(channel=conn) #site3在server 62:8883
if self_site != 4:
conn = grpc.insecure_channel('10.77.70.63:8883')
server_conn[4] = net_pb2_grpc.NetServiceStub(channel=conn) #site4在server 63:8883
def db_conn_init(self_site):
global db_conn
_MYSQL_USER = "root"
_MYSQL_PASSWORD = "<PASSWORD>$%^"
_DB = "db1"
if self_site == 1:
db_conn[1] = Conndb(user=_MYSQL_USER, password=_<PASSWORD>, database=_DB) #site1
if self_site == 2:
_DB = "db2"
db_conn[2] = Conndb(user=_MYSQL_USER, password=_MYSQL_PASSWORD, database=_DB) #site2
elif self_site == 3:
db_conn[3] = Conndb(user=_MYSQL_USER, password=_<PASSWORD>, database=_DB) #site3
elif self_site == 4:
db_conn[4] = Conndb(user=_MYSQL_USER, password=_MYSQL_PASSWORD, database=_DB) #site4
def dfs_execute(query_no, node_no, str_nodes, self_site): #return node_no,columns,data,trans_vol data=tuple(tuple)
global server_conn
nodes = str2nodes(str_nodes)
#print(query_no, node_no)
if nodes[node_no].site != self_site: #不同站,grpc远程过程调用
print('[grpc from Site' + str(self_site) + ' to Site' + str(nodes[node_no].site), end=']')
print('Start Node' + str(node_no)) #同站
dfs_node_no, str_columns, str_values, trans_vol = dfs(query_no, node_no, str_nodes)
columns = eval(str_columns)
values = eval(str_values)
i = nodes[node_no]
print(sql_create(query_no, i.id, columns)) #创建中间结果表
| |
data DataContainer in hera_cal.DataContainer form
antpos : type=dictionary, antenna positions dictionary
keys are antenna integers, values are ndarray baseline vectors.
tol : type=float, redundant baseline distance tolerance in units of baseline vectors
weights : type=bool, if True, treat data as a wgts dictionary and multiply by redundant weighting.
Output: (red_data)
-------
red_data : type=DataContainer, data dictionary in AbsCal form, with unique baseline data
distributed to redundant baseline groups.
if weights == True:
red_data is a real-valued wgts dictionary with redundant baseline weighting muliplied in.
"""
# get data keys
keys = list(data.keys())
# get polarizations in data
pols = data.pols()
# get redundant baselines
reds = redcal.get_reds(antpos, bl_error_tol=tol, pols=pols)
# make red_data dictionary
red_data = odict()
# iterate over data keys
for i, k in enumerate(keys):
# find which bl_group this key belongs to
match = np.array(list(map(lambda r: k in r, reds)))
conj_match = np.array(list(map(lambda r: reverse_bl(k) in r, reds)))
# if no match, just copy data over to red_data
if True not in match and True not in conj_match:
red_data[k] = copy.copy(data[k])
else:
# iterate over matches
for j, (m, cm) in enumerate(zip(match, conj_match)):
if weights:
# if weight dictionary, add repeated baselines
if m:
if k not in red_data:
red_data[k] = copy.copy(data[k])
red_data[k][red_data[k].astype(np.bool)] = red_data[k][red_data[k].astype(np.bool)] + len(reds[j]) - 1
else:
red_data[k][red_data[k].astype(np.bool)] = red_data[k][red_data[k].astype(np.bool)] + len(reds[j])
elif cm:
if k not in red_data:
red_data[k] = copy.copy(data[k])
red_data[k][red_data[k].astype(np.bool)] = red_data[k][red_data[k].astype(np.bool)] + len(reds[j]) - 1
else:
red_data[k][red_data[k].astype(np.bool)] = red_data[k][red_data[k].astype(np.bool)] + len(reds[j])
else:
# if match, insert all bls in bl_group into red_data
if m:
for bl in reds[j]:
red_data[bl] = copy.copy(data[k])
elif cm:
for bl in reds[j]:
red_data[bl] = np.conj(data[k])
# re-sort, square if weights to match linsolve
if weights:
for i, k in enumerate(red_data):
red_data[k][red_data[k].astype(np.bool)] = red_data[k][red_data[k].astype(np.bool)]**(2.0)
else:
red_data = odict([(k, red_data[k]) for k in sorted(red_data)])
return DataContainer(red_data)
def match_times(datafile, modelfiles, filetype='uvh5', atol=1e-5):
"""
Match start and end LST of datafile to modelfiles. Each file in modelfiles needs
to have the same integration time.
Args:
datafile : type=str, path to data file
modelfiles : type=list of str, list of filepaths to model files ordered according to file start time
filetype : str, options=['uvh5', 'miriad']
Returns:
matched_modelfiles : type=list, list of modelfiles that overlap w/ datafile in LST
"""
# get lst arrays
data_dlst, data_dtime, data_lsts, data_times = io.get_file_times(datafile, filetype=filetype)
model_dlsts, model_dtimes, model_lsts, model_times = io.get_file_times(modelfiles, filetype=filetype)
# shift model files relative to first file & first index if needed
for ml in model_lsts:
if ml[0] < model_lsts[0][0]:
ml += 2 * np.pi
# get model start and stop, buffering by dlst / 2
model_starts = np.asarray([ml[0] - md / 2.0 for ml, md in zip(model_lsts, model_dlsts)])
model_ends = np.asarray([ml[-1] + md / 2.0 for ml, md in zip(model_lsts, model_dlsts)])
# shift data relative to model if needed
if data_lsts[-1] < model_starts[0]:
data_lsts += 2 * np.pi
# select model files
match = np.asarray(modelfiles)[(model_starts < data_lsts[-1] + atol)
& (model_ends > data_lsts[0] - atol)]
return match
def cut_bls(datacontainer, bls=None, min_bl_cut=None, max_bl_cut=None, inplace=False):
"""
Cut visibility data based on min and max baseline length.
Parameters
----------
datacontainer : DataContainer object to perform baseline cut on
bls : dictionary, holding baseline position vectors.
keys are antenna-pair tuples and values are baseline vectors in meters.
If bls is None, will look for antpos attr in datacontainer.
min_bl_cut : float, minimum baseline separation [meters] to keep in data
max_bl_cut : float, maximum baseline separation [meters] to keep in data
inplace : bool, if True edit data in input object, else make a copy.
Output
------
datacontainer : DataContainer object with bl cut enacted
"""
if not inplace:
datacontainer = copy.deepcopy(datacontainer)
if min_bl_cut is None:
min_bl_cut = 0.0
if max_bl_cut is None:
max_bl_cut = 1e10
if bls is None:
# look for antpos in dc
if not hasattr(datacontainer, 'antpos'):
raise ValueError("If bls is not fed, datacontainer must have antpos attribute.")
bls = odict()
ap = datacontainer.antpos
for bl in datacontainer.keys():
if bl[0] not in ap or bl[1] not in ap:
continue
bls[bl] = ap[bl[1]] - ap[bl[0]]
for k in list(datacontainer.keys()):
bl_len = np.linalg.norm(bls[k])
if k not in bls:
continue
if bl_len > max_bl_cut or bl_len < min_bl_cut:
del datacontainer[k]
assert len(datacontainer) > 0, "no baselines were kept after baseline cut..."
return datacontainer
class AbsCal(object):
"""
AbsCal object used to for phasing and scaling visibility data to an absolute reference model.
A few different calibration methods exist. These include:
1) per-antenna amplitude logarithmic calibration solves the equation:
ln[abs(V_ij^data / V_ij^model)] = eta_i + eta_j
2) per-antenna phase logarithmic calibration solves the equation:
angle(V_ij^data / V_ij^model) = phi_i - phi_j
3) delay linear calibration solves the equation:
delay(V_ij^data / V_ij^model) = delay(g_i) - delay(g_j)
= tau_i - tau_j
where tau is the delay that can be turned
into a complex gain via: g = exp(i * 2pi * tau * freqs).
4) delay slope linear calibration solves the equation:
delay(V_ij^data / V_ij^model) = dot(T_dly, B_ij)
where T_dly is a delay slope in [ns / meter]
and B_ij is the baseline vector between ant i and j.
5) frequency-independent phase slope calibration
median_over_freq(angle(V_ij^data / V_ij^model)) = dot(Phi, B_ji)
where Phi is a phase slope in [radians / meter]
and B_ij is the baseline vector between ant i and j.
6) Average amplitude linear calibration solves the equation:
log|V_ij^data / V_ij^model| = log|g_avg_i| + log|g_avg_j|
7) Tip-Tilt phase logarithmic calibration solves the equation
angle(V_ij^data / V_ij^model) = psi + dot(TT_Phi, B_ij)
where psi is an overall gain phase scalar,
TT_Phi is the gain phase slope vector [radians / meter]
and B_ij is the baseline vector between antenna i and j.
Methods (1), (2) and (3) can be thought of as general bandpass solvers, whereas
methods (4), (5), (6), and (7) are methods that would be used for data that has already
been redundantly calibrated.
Be warned that the linearizations of the phase solvers suffer from phase wrapping
pathologies, meaning that a delay calibration should generally precede a
phs_logcal or a TT_phs_logcal bandpass routine.
"""
def __init__(self, model, data, refant=None, wgts=None, antpos=None, freqs=None,
min_bl_cut=None, max_bl_cut=None, bl_taper_fwhm=None, verbose=True,
filetype='miriad', input_cal=None):
"""
AbsCal object used to for phasing and scaling visibility data to an absolute reference model.
The format of model, data and wgts is in a dictionary format, with the convention that
keys contain antennas-pairs + polarization, Ex. (1, 2, 'nn'), and values contain 2D complex
ndarrays with [0] axis indexing time and [1] axis frequency.
Parameters:
-----------
model : Visibility data of refence model, type=dictionary or DataContainer
keys are antenna-pair + polarization tuples, Ex. (1, 2, 'nn').
values are complex ndarray visibilities.
these must be 2D arrays, with [0] axis indexing time
and [1] axis indexing frequency.
Optionally, model can be a path to a pyuvdata-supported file, a
pyuvdata.UVData object or hera_cal.HERAData object,
or a list of either.
data : Visibility data, type=dictionary or DataContainer
keys are antenna-pair + polarization tuples, Ex. (1, 2, 'nn').
values are complex ndarray visibilities.
these must be 2D arrays, with [0] axis indexing time
and [1] axis indexing frequency.
Optionally, data can be a path to a pyuvdata-supported file, a
pyuvdata.UVData object or hera_cal.HERAData object,
or a list of either. In this case, antpos, freqs
and wgts are overwritten from arrays in data.
refant : antenna number integer for reference antenna
The refence antenna is used in the phase solvers, where an absolute phase is applied to all
antennas such that the refant's phase is set to identically zero.
wgts : weights of the data, type=dictionary or DataContainer, [default=None]
keys are antenna pair + pol tuples (must match model), values are real floats
matching shape of model and data
antpos : type=dictionary, dict of antenna position vectors in ENU (topo) frame in meters.
origin of coordinates does not matter, but preferably are centered in the array.
keys are | |
copy.deepcopy(ampA0)
maxA = maxA*Afmean
minA = minA*Afmean
maxP = gammaP*1.0 + (1.0 - gammaP)*np.max(s.P[s.flagNode<0,:].flatten())
minP = (1.0 - gammaP)*np.min(s.P[s.flagNode<0,:].flatten())
pertPoint0 = pertPoint*np.mean(s.Lf)
pertPoint = copy.deepcopy(pertPoint0)
neighBoundPts = np.arange(s.np)[s.flagNode == -1]
nnbp = len(neighBoundPts)
np.random.shuffle(neighBoundPts)
neighBoundFibres = np.arange(s.nf)[s.flagFibres == 1]
nnbf = len(neighBoundFibres)
np.random.shuffle(neighBoundFibres)
kp = 0
kf = 0
ksmooth = 0
pointPertubation = False
# main loop
while(k<maxit and J0>Jtol):
status = True
pointPertubation = s.isPointPertubation(k)
if(pointPertubation):
#~ print 'pointPertubation' , kp
# selects randomly the points on the bonundary neighbourhood
ipp = neighBoundPts[kp%nnbp]
kp = kp + 1
dp, J = perturbPoint(funcCost,s,ampP,maxP,minP,ipp)
status, J0, ampP, knochange = updatePertubation(J0, ampP, knochange, J, ampP0, alphaP, maxnochange, dJmin)
if(status):
s.P[ipp,:] = s.P[ipp,:] + dp
s.set_af_Lf_Vf(s.InvConnect[ipp])
s.setNormalAndAbar(s.InvConnectPoint[ipp])
else:
#~ print 'areaPertubation' , kf
ipp = neighBoundFibres[kf%nnbp]
kf = kf + 1
dA, J = perturbArea(funcCost,s,ampA,maxA,minA,ipp)
status, J0, ampA, knochange = updatePertubation(J0, ampA, knochange, J, ampA0, alphaA, maxnochange, dJmin)
if(status):
s.Af[ipp] = s.Af[ipp] + dA
if(J0<Jtol2 and status and ksmooth < timesSmooth):
print('smooth')
s.smooth(omegaSmooth,True)
s.correctPoints(maxP,minP)
s.set_af_Lf_Vf()
s.setNormalAndAbar()
ksmooth = ksmooth + 1
if(not status and knochange == 0):
print('pertubation')
s.addPertubation(pertPoint)
#~ s.smooth(omegaSmooth,timesSmooth,True)
s.correctPoints(maxP,minP)
s.set_af_Lf_Vf()
s.setNormalAndAbar()
J0 = funcCost(s)
if(kpert < restartPert):
pertPoint = alphaPert*pertPoint
kpert = kpert + 1
else:
pertPoint = copy.deepcopy(pertPoint0)
kpert = 0
k = k + 1
print(J0)
#~ print s.Abar
#~ print J0
# ================= auxiliary functions ==========================
def updatePertubation(J0, amp, knochange, J, amp0, alpha, maxnochange, dJmin):
# test if the minimum is worthy
if(J < J0):
status = True
if((J0-J)/J0 < dJmin):
print(" (J0-J)/J0 < dJmin ")
amp = amp0
J0 = J
knochange = 0
else:
status = False
amp = alpha*amp
knochange = knochange + 1
if(knochange > maxnochange):
print(" knochange > maxnochange ")
amp = amp0
knochange = 0
return status, J0, amp, knochange
# computes functional for old BNC (\sum_{i\in setNodeGamma} \sum_f [f,i] A_f a_f )
# used in the optimization algorithm
def functionalNBCfib(net):
vacc = np.zeros(2)
for i in range(net.npb):
for j in range(len(net.InvConnect[i])):
f = net.InvConnect[i][j]
sfi = net.InvConnectSign[i][j]
vacc = vacc + sfi*net.Af[f]*net.af[f]
return np.sqrt(np.dot(vacc,vacc))
# computes functional for new BNC ( (\sum_{i\in setNodeGamma} \bar{A}_i n_i ) ), todo
# used in the optimization algorithm
def functionalNBCnormal(net):
vacc = np.zeros(2)
for i in range(net.np):
if(net.flagNode[i]>0):
vacc = vacc + net.Abar[i]*net.normal[i,:]
print(vacc)
return np.sqrt(np.dot(vacc,vacc))
def functionalNBCBoth(net):
return np.sqrt(functionalNBCnormal(net)**2.0 + functionalNBCfib(net)**2.0)
def perturbPoint(funcCost,net,amp,maxP,minP,ipp):
# selects the random pertubation
ampEfect = amp*np.random.rand(1)
thet = 2.0*np.pi*np.random.rand(1)
r = ampEfect*np.array([np.cos(thet),np.sin(thet)])
# get pertubation in all possible directions
p = np.zeros((8,2))
p[0,0] = r[0] ; p[0,1] = r[1];
p[1,0] = -r[0] ; p[1,1] = -r[1];
p[2,0] = -r[0] ; p[2,1] = r[1];
p[3,0] = r[0] ; p[3,1] = -r[1];
p[4,0] = r[1] ; p[4,1] = r[0];
p[5,0] = -r[1] ; p[5,1] = -r[0];
p[6,0] = -r[1] ; p[6,1] = r[0];
p[7,0] = r[1] ; p[7,1] = -r[0];
# computes the corresponding cost functionals
J = np.zeros(8)
for j in range(8):
net.P[ipp,:] = net.P[ipp,:] + p[j,:]
net.set_af_Lf_Vf(net.InvConnect[ipp])
net.setNormalAndAbar(net.InvConnectPoint[ipp])
if( max(net.P[ipp,0],net.P[ipp,1]) > maxP or min(net.P[ipp,0],net.P[ipp,1]) < minP ): # penalises pertubations outside a range
J[j] = 999.9
else:
J[j] = funcCost(net)
net.P[ipp,:] = net.P[ipp,:] - p[j,:]
net.set_af_Lf_Vf(net.InvConnect[ipp])
net.setNormalAndAbar(net.InvConnectPoint[ipp])
# select the minimum
ipmin = np.argmin(J)
J = np.min(J)
return p[ipmin,:] , J
def perturbArea(funcCost,net,amp,maxA,minA,ipp):
# selects the random pertubation
dA = amp*np.random.randn(1)
Jp = 0.0
Jm = 0.0
#~ print 'delta area = ', dA
net.Af[ipp] = net.Af[ipp] + dA
net.setNormalAndAbar(net.ElemFib[ipp])
if( net.Af[ipp] > maxA ):
Jp = 999.9
else:
Jp = funcCost(net)
net.Af[ipp] = net.Af[ipp] - 2.0*dA
net.setNormalAndAbar(net.ElemFib[ipp])
if( net.Af[ipp] < minA ):
Jm = 999.9
else:
Jm = funcCost(net)
net.Af[ipp] = net.Af[ipp] + dA
net.setNormalAndAbar(net.ElemFib[ipp])
if(Jp<Jm):
return dA, Jp
else:
return -dA, Jm
# notice that nf = 4*(nx - 1)*(ny - 1) in regular grid excluding vertical and horizontal fibres
# computes actual number of fibres, given a prevision and asymetric factor (nx - ny = asymFac)
def estimateGridPointsByAsymetry(nfPrev,asymfac = 0):
a = 1.0
b = float(asymfac)
c = - 0.25*float(nfPrev)
delta = np.sqrt(b*b - 4.0*a*c)
y = (-b + delta)/(2.0*a)
ny = y + 1.0
nx = ny + float(asymfac)
nx = np.round(nx).astype('int')
ny = np.round(ny).astype('int')
print('estimated nx,ny=',nx, ny)
return nx , ny
# computes actual number of fibres, given a prevision and asymetric factor (nx - ny = asymFac)
def estimateGridPointsByTheta(nfPrev,theta = 0.25*np.pi):
ny = 1.0 + np.sqrt(float(nfPrev)/(4.0*np.tan(theta)))
nx = 1 + np.tan(theta)*(ny-1)
nx = np.round(nx).astype('int')
ny = np.round(ny).astype('int')
print('estimated nx,ny=',nx, ny)
return nx , ny
# create structured points, with boundary structured
# used in createMatrixAndNetwork
def structuredPointsWithBoundary(nx,ny):
nf = 2*ny*nx - ny - nx + 1
npb = 2*(nx + ny - 2 )
points = np.zeros((nf,2))
points[0:npb,:] = borderPoints(0.0,0.0,1.0,1.0,nx,ny)
tx = 1.0/(2.0*(nx-1.0))
pxa = tx + np.linspace(0.0,1.0,nx)[0:nx-1]
ty = 1.0/(2.0*(ny-1.0))
pya = ty + np.linspace(0.0,1.0,ny)[0:ny-1]
pxb = np.linspace(0.0,1.0,nx)[1:nx-1]
pyb = np.linspace(0.0,1.0,ny)[1:ny-1]
k = npb
for px in pxa:
for py in pya:
points[k,0] = px
points[k,1] = py
k = k + 1
for px in pxb:
for py in pyb:
points[k,0] = px
points[k,1] = py
k = k + 1
return points
def tri2Edges(tri):
e = []
for t in tri:
ei = []
for i in range(3):
if(i==0):
ei = [t[0],t[1]]
elif(i==1):
ei = [t[1],t[2]]
else:
ei = [t[2],t[0]]
if(ei[0]>ei[1]):
ei = [ei[1],ei[0]]
if(not(ei in e)):
e = e + [ei]
return e
# create structured boudary points
# used in structuredPointsWithBoundary
def borderPoints(a,b,c,d,nx,ny):
x = np.linspace(a,c,nx)
y = np.linspace(b,d,ny)
N = 2*(nx + ny - 2)
p = np.zeros((N,2))
k = 0
for i in range(nx):
p[k,0] = x[i]
p[k,1] = b
k = k + 1
for j in range(1,ny):
p[k,0] = c
p[k,1] = y[j]
k = k + 1
for i in range(1,nx):
p[k,0] = x[nx-1-i]
p[k,1] = d
k = k + 1
for j in range(1,ny-1):
p[k,0] = a
p[k,1] = y[ny - 1 - j]
k = k + 1
return p
# get points and edges and convert into points on boundary, interior, fibres on boundary and interior, add as well dummy points
# used in createMatrixAndNetwork
def convertGeneric(p,e):
n = p.shape[0]
xmin = np.min(p[:,0])
ymin = np.min(p[:,1])
xmax = np.max(p[:,0])
ymax = np.max(p[:,1])
p[:,0] = (p[:,0] - xmin)/(xmax - xmin)
p[:,1] = (p[:,1] - ymin)/(ymax - ymin)
ElemFib = []
for ei in e:
ElemFib = ElemFib + [ei]
ElemFib = np.array(ElemFib)
return p,ElemFib
# In a segment from 0 to 1, assigns 0 for the left extremity, 1 for the middle and 2 for the right extremity
# used in setFlagBound.
def classify(x):
tol = 1.0e-8
c = 0
if(abs(x)<tol): # left extreme
c = 0
elif(abs(x-1.0)<tol): # right extreme
c = 1
else: # in the middle
c = 2
return c
def writeInifileDefault(Ncoord,Ndof,op = 2):
inifile=open("IniFile000.txt",'w')
if(op=='Piola'): # Piola Style
inifile.write("*Time\n0 0 0\n")
elif(op=='SGP'): #SGP Style
inifile.write("*Initial Conditions\n")
for j in range(Ncoord):
for k in range(Ndof):
inifile.write("0.0 ")
inifile.write("\n")
inifile.close()
def writeParamDefault(Nfib,Nparam):
paramfile=open("Param000.txt",'w')
paramfile.write("*Parameter Groups\n")
paramfile.write(str(Nfib+1) + "\n")
paramfile.write("*Real Parameters\n")
paramfile.write(str(0) + " ")
for j in range(Nfib):
paramfile.write(str(Nparam) + " ")
paramfile.write("\n")
for j in range(Nfib):
for k in range(Nparam):
paramfile.write("0.0 ")
paramfile.write("\n")
paramfile.write("*Integer Parameters\n")
paramfile.write(str(0) + " ")
for j in range(Nfib):
paramfile.write(str(0) + " ")
paramfile.close()
def writeParamCustom(Param):
NparamGroups = len(Param)
Nparam = len(Param[0])
paramfile=open("Param000.txt",'w')
paramfile.write("*Parameter Groups\n")
paramfile.write(str(NparamGroups+1) + "\n")
paramfile.write("*Real Parameters\n")
paramfile.write(str(0) + " ")
for j in range(NparamGroups):
paramfile.write(str(Nparam) + " ")
paramfile.write("\n")
for j in range(NparamGroups):
for k in range(Nparam):
paramfile.write(str(Param[j,k]) + " ")
paramfile.write("\n")
paramfile.write("*Integer Parameters\n")
paramfile.write(str(0) + " ")
for j in range(NparamGroups):
paramfile.write(str(0) + " ")
paramfile.close()
def writeFigNetwork(net,c=0,lw = 7.0,figNum = 1,filename = 'network.pdf'):
colorPallette = []
if(c==0):
nf = len(net.ElemFib)
m = 0.8
maxAf = np.max(net.Af)
minAf = np.min(net.Af)
v = np.zeros(nf)
if(maxAf>minAf):
v = (m/(maxAf - minAf))*(net.Af - minAf)
for f in range(nf):
colorPallette = colorPallette + [(v[f],v[f],v[f])]
k = 0
plt.figure(figNum,(6,6))
for f in net.ElemFib:
x = [net.P[f[0],0],net.P[f[1],0]]
y = [net.P[f[0],1],net.P[f[1],1]]
if(c == 0):
plt.plot(x,y, linewidth = lw, color = colorPallette[k])
k = k + 1
else:
plt.plot(x,y, linewidth = lw, color = c)
plt.tight_layout()
plt.savefig(filename)
def writeParamFibres(net,Nparam,fignum = -1):
print(Nparam)
Param = np.zeros((net.nf,Nparam))
# fortran to python convention
Ipos_flag1 = 0
Ipos_flag2 = 1
Ipos_Lf = 2
Ipos_Areaf = 3
Ipos_Vf = 4
Ipos_lfa = 5
Ipos_af = 6
Ipos_yrel1 = 8
Ipos_yrel2 = 10
Ipos_Abar1 = 12
Ipos_Abar2 = 13
Ipos_normal1 = 14
Ipos_normal2 = 16
Ipos_nConnectFibre1 = 18
Ipos_nConnectFibre2 = 19
#~ Ipos_r0 = 20 # new compability
Ipos_r0 = 24 # old compability
# just to update
net.set_af_Lf_Vf()
net.setNormalAndAbar()
normal_bar = net.get_normal_bar()
yG = net.get_yG()
Bten = net.get_Bten()
BtenInv = np.linalg.inv(Bten)
BtenFile=open("Bten.txt",'w')
BtenFile.write( str(Bten[0,0]) + ' ' + str(Bten[0,1]) + ' ' + str(Bten[1,0]) + ' ' + str(Bten[1,1]) + '\n')
BtenFile.write( str(BtenInv[0,0]) + ' ' + str(BtenInv[0,1]) + ' ' + str(BtenInv[1,0]) + ' ' + str(BtenInv[1,1]) + '\n')
BtenFile.write( str(np.sum(net.Vf)) )
for f in range(net.nf):
p1 = net.ElemFib[f,0]
p2 = net.ElemFib[f,1]
Param[f,Ipos_flag1] = net.flagNode[p1]
Param[f,Ipos_flag2] = net.flagNode[p2]
Param[f,Ipos_Lf] = net.Lf[f]
Param[f,Ipos_Areaf] = net.Af[f]
Param[f,Ipos_Vf] = net.Vf[f]
Param[f,Ipos_lfa] = net.lfa[f]
Param[f,Ipos_af:Ipos_af+2] = net.af[f,:]
Param[f,Ipos_yrel1:Ipos_yrel1 + 2] = net.P[p1,:] - yG
Param[f,Ipos_yrel2:Ipos_yrel2 + 2] = net.P[p2,:] - yG
Param[f,Ipos_Abar1] = net.Abar[p1]
Param[f,Ipos_Abar2] = net.Abar[p2]
Param[f,Ipos_normal1:Ipos_normal1 + 2] = net.normal[p1,:] - normal_bar
Param[f,Ipos_normal2:Ipos_normal2 + 2] = net.normal[p2,:] - normal_bar
Param[f,Ipos_nConnectFibre1] = len(net.InvConnect[p1])
Param[f,Ipos_nConnectFibre2] = len(net.InvConnect[p2])
Param[f,Ipos_r0] = net.r0[f]
writeParamCustom(Param)
if(fignum != -1):
os.system("cp Param000.txt Param000_" + str(fignum) + ".txt ")
def writeMesh(X,Elem,auxElem,Ndof,Nsubstep):
Ncoord=len(X)
ElemTotal = Elem + auxElem
NelemGroups= len(ElemTotal)
mesh=open("Mesh.txt",'w')
mesh.write("*NODAL DOFs\n")
mesh.write(str(Ndof) + "\n")
mesh.write("*DIMEN\n")
mesh.write("3\n")
mesh.write("*COORDINATES\n")
mesh.write(str(Ncoord) + "\n\n")
for i in range(Ncoord):
mesh.write(format(X[i,0],'.10e') + " " + format(X[i,1],'.10e') + " 0.0 \n")
mesh.write("\n")
mesh.write("*ELEMENT GROUPS\n")
mesh.write(str(NelemGroups) + "\n")
for i in range(NelemGroups):
mesh.write(str(i + 1) + " " + str(len(ElemTotal[i])) + " Generic\n")
mesh.write("\n")
for elemGroup in ElemTotal:
for e in elemGroup:
mesh.write(str(len(e)) + "\n")
mesh.write("\n")
mesh.write("*INCIDENCE\n")
for elemGroup in ElemTotal:
for e in elemGroup:
for v in e:
mesh.write(str(v+1) + " ") # fortran convention
mesh.write("\n")
mesh.write("\n")
mesh.write("*ELEMENT TYPE\n")
k = 0
for elemGroup in ElemTotal:
k = k + 1
for e in elemGroup:
mesh.write(str(k) + "\n")
mesh.write("\n")
mesh.write("*ELEMENT MAT\n")
# just the elements in Elem have material associated
k = 2
for elemGroup in Elem:
for e in elemGroup:
mesh.write(str(k) + "\n")
k = k + 1
for elemGroup in auxElem:
for e in elemGroup:
mesh.write(str(1) + "\n")
mesh.write("\n")
mesh.write("*DIRICHLET CONDITIONS\n")
for i | |
δρεπάνι δρεπάνισμα δρεπανοκυττάρωση δρεπανοκύτταρο
δριμόνι δριμύτητα δριστέλα δρογογνωσία δρολάπι δρομάδα δρομάκι δρομάκος
δρομίσκος δρομολόγηση δρομολόγησις δρομολόγιο δρομολόγιον δρομόμετρο
δροσέρα δροσερότητα δροσιά δροσοπηγή δροσοστάλα δροσοσταλίδα δροσοσταλιά
δροσό δροσόπαγο δροσόπαγος δρουγγάριος δρυΐδης δρυάδες δρυμός δρυμώνας
δρυοκόπος δρυς δρυόπτερις δρωτάρι δρωτσίλα δρόγη δρόλαπας δρόμος δρόμωνας
δρόσισμα δρόσος δρύφακτο δρώμα δρώμενο δυάδα δυάρα δυάρι δυάς δυαδικότητα
δυνάμωμα δυνάστης δυναμίτης δυναμίτιδα δυναμική δυναμικό δυναμικόν δυναμικότης
δυναμισμός δυναμιστής δυναμιτάκι δυναμιτιστής δυναμογονία δυναμογράφος
δυναμοσύνολο δυναμό δυναμόμετρο δυναστεία δυνατόν δυνατότης δυνατότητα
δυνητικότητα δυοσμαρίνι δυσίδρωση δυσαισθησία δυσανάγνωση δυσαναλογία
δυσανασχέτησις δυσανασχέτιση δυσανεξία δυσαρέσκεια δυσαρέστηση δυσαρέστησις
δυσαρμονία δυσαυτονομία δυσβαρισμός δυσβασία δυσβουλία δυσγενεσία δυσγνωσία
δυσενδοκρινία δυσενσυναίσθηση δυσεντερία δυσεντερικός δυσηκοΐα δυσθανασία
δυσιδρωσία δυσκαμψία δυσκαταποσία δυσκινησία δυσκοιλιότης δυσκοιλιότητα
δυσκολία δυσκρασία δυσλειτουργία δυσλεξία δυσλιπιδαιμία δυσμένεια δυσμαί
δυσμνησία δυσμορφία δυσοσμία δυσουρία δυσπαρευνία δυσπεψία δυσπιστία δυσπλασία
δυσπροσαρμοστία δυσπροφερσιμότητα δυσπρόσιο δυσταξινόμηση δυστοκία δυστονία
δυστροπία δυστροφία δυστυχία δυστύχημα δυσυχρονισμός δυσφήμηση δυσφήμησις
δυσφαγία δυσφασία δυσφημία δυσφημία δυσφορία δυσφράδεια δυσφρασία δυσφωνία
δυσχρηστία δυσχρωμία δυσχρωματοψία δυσωδία δυτικισμός δυφίο δυφιοαπεικόνιση
δυφιοδιαφάνεια δυφιονιάδα δυφιοοκτάδα δυφιόρρευμα δυχατέρα δυϊκός δυϊσμός
δωδεκάγωνο δωδεκάδα δωδεκάεδρο δωδεκάθεο δωδεκάθεον δωδεκάμερο δωδεκάορτο
δωδεκάς δωδεκάωρο δωδεκαήμερο δωδεκαήμερον δωδεκαδάκτυλο δωδεκαδάχτυλο
δωδεκαδακτυλίτιδα δωδεκαδακτυλογαστρεκτομή δωδεκαδακτυλονηστιδοστομία
δωδεκαδακτυλοπηξία δωδεκαδακτυλοπυλωρεκτομή δωδεκαδακτυλοσκόπηση
δωδεκαδακτυλοτομία δωδεκαετία δωδεκανήσιος δωδεκαριά δωδεκατημόριο δωδεκαωρία
δωματιάκι δωματιάρα δωρήτρια δωρεά δωρεοδότης δωρεοδόχος δωρητής δωροδοκία
δωροδόκος δωροεπιταγή δωρολήπτης δωροληψία δωρόσημο δωσίλογος δωσιδικία
δόγης δόγμα δόκανο δόκανον δόκιμος δόκτορας δόκτωρ δόλιχος δόλος δόλωμα δόλων
δόμησις δόμος δόνα δόνηση δόνησις δόντι δόξα δόρυ δόση δόσιμο δόσιμον δόσις
δότρια δύναμη δύνη δύση δύσις δύσπνοια δύτης δύτρια δώμα δώρημα δώρο δώρον
είδος είδωλο είκοσι είλωτας είναι είρων είρωνας είσδυση είσοδος είσπλους
είσπραξις εαρινοποίηση εαροσύνη εαυτοσκοπία εαυτούλης εβαπορίτης εβδομάδα
εβδομήντα εβδομηκονταετία εβδομηκονταετηρίδα εβδομηκοντούτης εβδομηκοντούτις
εβδομηντάρα εβδομηντάρης εβδομηντάχρονη εβδομηντάχρονος εβδομηνταριά
εβενουργική εβενουργός εβενούργημα εβολουσιονισμός εβονίτης εβραΐστρια εβραία
εβραίος εβραιο-ισπανικά εβραιοφοβία εβραϊκά εβραϊκή εβραϊσμός εβραϊστής εγίρα
εγγενώς ανώνυμο εγγλέζα εγγλέζος εγγονή εγγονός εγγραμματισμός εγγραμματοσύνη
εγγραφή εγγραφοφυλάκιο εγγραφοφυλακείο εγγυητής εγγυοδοσία εγγυοδότης εγγόνα
εγγύηση εγγύτητα εγελιανισμός εγερσιμότητα εγερτήριο εγκέφαλος εγκαίνια
εγκαθίδρυση εγκαινίαση εγκαινιασμός εγκαλλώπισμα εγκαρίτης εγκαρδίωση
εγκαρτέρηση εγκατάλειψη εγκατάσταση εγκατάστατος εγκαταβίωση εγκαταστάτης
εγκεντρισμός εγκεφαλίτιδα εγκεφαλικό εγκεφαλικότητα εγκεφαλογράφημα
εγκεφαλολόγος εγκεφαλομυελίτιδα εγκεφαλοπάθεια εγκιβωτισμός εγκλεισμός
εγκληματικότητα εγκληματολογία εγκληματολόγος εγκλητήριο εγκλιμάτιση
εγκλιτικό εγκλωβισμός εγκοίλιο εγκοινωνισμός εγκοπή εγκράτεια εγκρέτα εγκρεμός
εγκυκλοπαιδικότητα εγκυκλοπαιδισμός εγκυκλοπαιδιστές εγκυμοσύνη εγκυρότητα
εγκωμιαστής εγκόλληση εγκόλπιο εγκόσμια εγκύκλιος εγκύστωση εγκώμια εγκώμιο
εγχάραξη εγχείρημα εγχείρηση εγχείριση εγχειρίδιο εγχειρηματοποίηση
εγχυματόζωο εγχυτρισμός εγωίσταρος εγωίστρια εγωισμός εγωιστής εγωισταράς
εγωκεντρισμός εγωλάτρης εγωλάτρις εγωλάτρισσα εγωλατρία εγωπάθεια εγωτιστής
εγώ εδάφιο εδαφικότητα εδαφοκάλυψη εδαφοκτησία εδαφολογία εδαφομηχανική
εδεσσαίος εδραίωση εδραίωσις εδωδιμοπωλείο εδωδιμοπωλείον εδωδιμοπώλης εδώδιμα
εδώλιον εθελοδουλία εθελοθυσία εθελοντής εθελοντισμός εθελοτυφλία εθελόντρια
εθισμός εθνάριο εθνάρχης εθναπόστολος εθναρχία εθνεγέρτης εθνεγερσία
εθνική εθνικίστρια εθνικισμός εθνικιστής εθνικοποίηση εθνικοσοσιαλίστρια
εθνικοσοσιαλιστής εθνικοφροσύνη εθνικόν εθνικός ύμνος εθνικότητα εθνισμός
εθνογράφος εθνογραφία εθνοκάθαρση εθνοκεντρισμός εθνοκράτος εθνοκρατία
εθνοκρατοκεντρισμός εθνολογία εθνομάρτυρας εθνομάρτυς εθνομεθοδολογία
εθνομηδενιστής εθνομουσικολογία εθνοπατέρας εθνοσυνέλευση εθνοσφαγέας
εθνοσωτήρας εθνοφοβία εθνοφονία εθνοφονιάς εθνοφρουρά εθνοφρουρός εθνοφυλακή
εθνοψυχιατρική εθνοψυχολογία εθνόσημο εθνότητα εθολογία ειδή ειδήμονας ειδήμων
ειδημοσύνη ειδησάριο ειδησεογράφος ειδησεογραφία ειδησεολογία ειδησούλα
ειδισμός ειδοί ειδογένεση ειδογονία ειδογράφημα ειδοποίηση ειδοποιητήριο
ειδωλολάτρης ειδωλολάτρισσα ειδωλολατρία ειδωλοσκόπιο ειδωλόθυτα ειδωνυμία
ειδύλλιο ειδώλιο εικασία εικαστικός εικονίδιο εικονίτσα εικονικοποίηση
εικονικότητα εικονισμός εικονογράφημα εικονογράφηση εικονογράφος εικονογραφία
εικονοκλασία εικονολάτρης εικονολήπτης εικονολήπτρια εικονολατρία εικονολογία
εικονομάχος εικονομήνυμα εικονομαχία εικονοσκόπιο εικονοστάσι εικονοστάσιο
εικονοσύμβολο εικονοτυπία εικονοχαρακτήρας εικονόγραμμα εικοσάδα εικοσάδραχμο
εικοσάευρο εικοσάλεπτο εικοσάλεπτο εικοσάρι εικοσάρικο εικοσάχρονος εικοσαετία
εικοσιένα εικοσιπεντάευρο εικοσιτετράωρο εικοσιτετράωρον εικοτολογία εικόνα
εικός εικών ειλεός ειλητάριο ειλητάριον ειλητό ειλικρίνεια ειμαρμένη ειρήνεμα
ειρήνη ειρεσιώνη ειρηνίστρια ειρηνευτής ειρηνικά ειρηνισμός ειρηνιστής
ειρηνοδικείο ειρηνοποιός ειρηνοφιλία ειρκτή ειρμός ειρωνεία εισήγηση
εισαγγελία εισαγωγέας εισαγωγή εισαγωγικά εισαγωγούλα εισακτέος εισβολέας
εισδοχή εισηγήτρια εισηγητής εισιτήριο εισιτήριον εισιτηριοαποφυγή
εισοδηματίας εισοδισμός εισπήδηση εισπίεση εισπνευστήρ εισπνευστήρας εισπνοή
εισπράκτωρ εισπράχτορας εισπρακτορίνα εισπρακτόρισσα εισροή εισφορά
εισφοροδιαφυγή εισχώρηση εισχώρησις εισόδημα εισόδια εισόρμηση εισόρμησις
εκάρ εκατομμυριοστό εκατομμυριούχα εκατομμυριούχος εκατομμύριο εκατοντάδα
εκατοντάδραχμον εκατοντάς εκατοντάχρονα εκατονταετία εκατονταετηρίδα
εκατονταρχία εκατοντούτης εκατοντούτις εκατοστάρα εκατοστάρης εκατοστάρι
εκατοσταριά εκατοστημόριο εκατοστό εκατοστόγραμμο εκατοστόμετρο εκατοστόμετρον
εκατοχρονίτισσα εκατό εκατόγραμμο εκατόλιτρο εκατόλιτρον εκατόμβη εκατόνταρχος
εκατόφυλλον εκβάθυνση εκβάθυνσις εκβίαση εκβίασις εκβαρβάρωση εκβαρβάρωσις
εκβιασμός εκβιαστής εκβιομηχάνιση εκβιομηχάνισις εκβιομηχανισμός εκβλάστημα
εκβλάστησις εκβολή εκβραχισμός εκγηπέδωση εκγλύφανο εκγλύφανον
εκγύμναση εκγύμνασις εκδάσωση εκδάσωσις εκδήλωση εκδήλωσις εκδίκαση εκδίκηση
εκδίπλωση εκδίωξη εκδίωξις εκδημοκρατισμός εκδημοτικισμός εκδικήτρια εκδικητής
εκδορά εκδορέας εκδορεύς εκδοροσφαγέας εκδοτήριο εκδοχέας εκδοχή εκδούλευση
εκδούλεψη εκδρομέας εκδρομή εκδρομισμός εκδυσόζωα εκδυτικισμός εκδόσεις
εκδότρια εκεχειρία εκζήτηση εκζήτησις εκθέτης εκθέτις εκθέτρια εκθήλυνση
εκθαμβωτικότητα εκθείαση εκθειάστρια εκθειασμός εκθειαστής εκθεμελίωση
εκθετήριο εκθρονισμός εκθρόνιση εκθρόνισις εκκένωση εκκένωσις εκκίνηση
εκκαθάριση εκκαθάρισις εκκαθαρίστρια εκκαθαριστής εκκαθαριστικό εκκαμίνευση
εκκεντρικότητα εκκεντρότης εκκεντρότητα εκκενωτής εκκινητήρας εκκινητής
εκκλησάρης εκκλησάρισσα εκκλησία εκκλησίασμα εκκλησίδιο εκκλησίτσα εκκλησιά
εκκλησιάρισσα εκκλησιάρχης εκκλησιασμός εκκλησιαστήριο εκκλησιαστικός
εκκλησούλα εκκοκκισμός εκκοκκιστήριο εκκοκκιστήριον εκκολαπτήριο εκκρεμές
εκκρεμότης εκκρεμότητα εκκόκκιση εκκόλαψη εκκόλαψις εκκόλπωμα εκκύκλημα
εκλέκτωρ εκλέπτυνση εκλέπτυνσις εκλέρ εκλαΐκευση εκλαΐκευσις εκλαμπρότης
εκλαμπτήρας εκλαμψία εκλατόμηση εκλαϊκευτής εκλαϊκεύτρια εκλειπτική
εκλεκτικισμός εκλεκτικιστής εκλεκτικότης εκλεκτικότητα εκλεκτισμός εκλεξιμότης
εκλιπάρηση εκλογέας εκλογές εκλογή εκλογίκευση εκλογιμότης εκλογιμότητα
εκλογοδικείον εκλογολογία εκλογολόγος εκλογομάγειρας εκλογομάγειρος
εκλογομαγειρείο εκμάθηση εκμάθησις εκμέκ εκμίσθωση εκμίσθωσις εκμαίευση
εκμαυλίστρια εκμαυλισμός εκμαυλιστής εκμετάλλευση εκμετάλλευσις εκμεταλλευτής
εκμηδένιση εκμηδένισις εκμηδενισμός εκμηδενιστής εκμηχάνιση εκμηχανισμός
εκμισθώτρια εκμοντερνισμός εκμυζητής εκμυστήρευση εκμύζηση εκνέφωμα εκναυλωτής
εκναύλωση εκνευρισμός εκνεφωτής εκπίεση εκπίεσμα εκπαίδευση εκπαίδευσις
εκπαιδευτήριον εκπαιδευτής εκπαιδευτικοί εκπαιδευτικός εκπαιδεύτρια
εκπαραθύρωσις εκπαρθένευση εκπαρθένευσις εκπατρισμός εκπειρατισμός εκπεσμός
εκπλήρωσις εκπλειστηρίασμα εκπλειστηριασμός εκπλειστηριαστής εκπνοή εκποίηση
εκπολιτισμός εκπομπή εκπορθητής εκπροσώπευση εκπροσώπηση εκπροσώπησις
εκπρόσωπος τύπου εκπτώσεις εκπυρήνιση εκπυρσοκρότηση εκπυρσοκρότησις
εκπωματιστήρας εκπόνηση εκπόνησις εκπόρευση εκπόρευσις εκπόρθηση εκπόρθησις
εκπόρνευσις εκπώμαστρον εκράν εκρίζωση εκρίζωσις εκρηκτικό εκρηκτικότης
εκροή εκσκαφέας εκσκαφή εκσκαφεύς εκσλαβισμός εκσλαυισμός εκσπερμάτιση
εκσπερμάτωσις εκσπερματισμός εκσπλαχνισμός εκσπρέσο εκστρατεία εκστόμιση
εκσυγχρονισμός εκσφαλμάτωση εκσφενδονισμός εκσφενδόνιση εκσφενδόνισις εκτάριο
εκτέλεση εκτέλεσις εκτίμηση εκτίμησις εκτίναξη εκτίναξις εκτακτοσυστολή
εκταμίευσις εκτατικότητα εκταφή εκτελεστής εκτελεστικό εκτελωνίστρια
εκτελωνιστής εκτελώνιση εκτελώνισις εκτεχνίκευση εκτιμήτρια εκτιμητής
εκτοκισμός εκτομή εκτομίας εκτοξευτήρας εκτοξευτής εκτοπία εκτοπισμός
εκτράχυνση εκτράχυνσις εκτραχηλισμός εκτροπέας εκτροπή εκτροφέας εκτροφή
εκτροφείον εκτροχίαση εκτροχίασις εκτροχιασμός εκτροχιαστής εκτυπωτής εκτόνωση
εκτόξευσις εκτόπιση εκτόπισις εκτόπισμα εκτόπλασμα εκτύλιξη εκτύλιξις εκτύπωση
εκφασισμός εκφαυλισμός εκφοβισμός εκφοβιστής εκφορά εκφορτωτής εκφραστής
εκφυλισμός εκφυλόφατσα εκφωνήτρια εκφωνητής εκφόβηση εκφόβησις εκφόβιση
εκφόρτωσις εκφύλιση εκφύλισις εκφώνηση εκφώνησις εκχέρσωμα εκχέρσωση
εκχείλιση εκχείλισις εκχιονισμός εκχιονιστήρας εκχρηματισμός εκχριστιανισμός
εκχυλιστήρας εκχωμάτωση εκχωμάτωσις εκχωρήτρια εκχωρητήριο εκχωρητής εκχύλιση
εκχύλισμα εκχύμωση εκχύμωσις εκχώρηση εκχώρησις ελάτη ελάτι ελάττωμα ελάττωση
ελάφι ελάφρυνση ελάφρυνσις ελάφρωμα ελάχιστο κοινό πολλαπλάσιο ελέγκτρια
ελέφας ελίτ ελίτσα ελίφι ελαία ελαιογραφία ελαιοδιαχωριστήρας ελαιοδοχείο
ελαιοπαραγωγή ελαιοπαραγωγός ελαιοπιεστήριο ελαιοπολτός ελαιοπυρήνας
ελαιοτριβείο ελαιουργία ελαιουργείο ελαιουργός ελαιοφαγία ελαιοφυτεία
ελαιοχρωματιστής ελαιόδεντρο ελαιόθερμο ελαιόκαρπος ελαιόλαδο ελαιόμετρο
ελαιών ελαιώνας ελαμικά ελασματοποίηση ελασματοποίησις ελασματουργείο
ελαστίνη ελαστικό ελαστικότης ελαστικότητα ελαστογραφία ελατήριο
ελαττωματικότητα ελατόμελο ελατόπισσα ελαφάκι ελαφίδες ελαφίνα ελαφηβολία
ελαφοκρέας ελαφονησιώτης ελαφράδα ελαφρόνοια ελαφρόπετρα ελαφρότης ελαφρότητα
ελαφόπουλο ελαχιστοποίηση ελαχιστοποίησις ελβετίδα ελβετογερμανικά ελβετός
ελγίνεια ελεήτρα ελεήτρια ελεγεία ελεγείο ελεγκτήριο ελεγκτής ελεγχοσυνάρτηση
ελεεινολόγηση ελεεινολόγησις ελεεινότης ελεεινότητα ελεημοσύνη ελεητής
ελεμενταρισμός ελευθέρωση ελευθέρωσις ελευθερία ελευθεριά ελευθεριότης
ελευθεροκοινωνία ελευθεροπλοΐα ελευθεροστομία ελευθεροτέκτονας
ελευθεροτυπία ελευθεροφροσύνη ελευθερωτής ελευθερώτρια ελευσινιώτης ελεφαντάδα
ελεφαντίαση ελεφαντίασις ελεφαντίνα ελεφαντοκόκαλο ελεφαντοστούν ελεφαντοστό
ελεφαντόδοντο ελεφαντόδους ελεύθερο ελεύθερος ελεύθερος σκοπευτής ελιά ελιγμός
ελικοβακτηρίδιο ελικοδρόμιο ελικοπτερατζής ελικοπτεροφόρο ελικοπτερόσχημος
ελικόπτερο ελικόρρευμα ελιξήριο ελιξίριο ελιοκούκουτσο ελιτισμός ελιόδεντρο
ελκυσμός ελκυστήρας ελκυστής ελκυστικότητα ελλέβορος ελλαδίτης ελλανοδίκης
ελλειπτικότητα ελληνάδικο ελληνάρας ελληνίδα ελληνίς ελληναράς ελληνικά
ελληνική ελληνικούρα ελληνικό ελληνικός ελληνικότητα ελληνισμός ελληνιστής
ελληνοκεντρισμός ελληνολάτρης ελληνολατρία ελληνομάθεια ελληνομάχος
ελληνοπούλα ελληνοπρέπεια ελληνορθοδοξία ελληνοφοβία ελληνόπουλο ελλιμένιση
ελλογιμότητα ελλύχνιον ελμινθίαση ελμινθίασις ελονοσία ελπίδα ελπίς εμίρης
εμβάθυνσις εμβάπτιση εμβάς εμβέλεια εμβαδομέτρηση εμβαδομέτρησις εμβαδό
εμβαδόν εμβαπτισμός εμβατήριο εμβατίκια εμβληματολογία εμβοή εμβολή
εμβολιοθεραπεία εμβολιοθεραπευτική εμβολισμός εμβρίθεια εμβροντησία εμβροχή
εμβρυογονία εμβρυοθυλάκιο εμβρυοθύλακος εμβρυοκαρδία εμβρυοκτόνία εμβρυολογία
εμβρυοπλαστία εμβρυοσκόπηση εμβρυοτομή εμβρυοτόμος εμβρυουλκός εμβρυωρία
εμβόλιμο εμβόλιο εμετοδοχείο εμετοκαθαρτικά εμετολογία εμετός εμιγκρέ εμιγκρές
εμμέλεια εμμηναγωγό εμμηνοληξία εμμηνοπαυσία εμμηνορραγία εμμηνορρυσία
εμμηνόπαυση εμμηνόρροια εμμονή εμμονοκρατία εμορφάδα εμορφιά εμού εμπάθεια
εμπέδωση εμπέτασμα εμπαίκτης εμπαίκτρια εμπαιγμός εμπειρία εμπειρισμός
εμπειρογνώμονας εμπειρογνώμων εμπειροτεχνία εμπιστοσύνη εμπλεκόμενος εμπλοκή
εμπνευστής εμπνεύστρια εμπνοή εμποδίστρια εμποδισμός εμποδιστής εμποράκος
εμπορείο εμπορείον εμπορευματογνωσία εμπορευματοκιβωτιοφόρο εμπορευματοκιβώτιο
εμπορευματοποίηση εμπορευόμενος εμπορικάκι εμπορικολόγος εμπορικοποίηση
εμπορικόν εμπορικότης εμπορικότητα εμποριολογία εμποροδικείο εμποροκιβώτιο
εμποροκρατισμός εμπορομεσίτης εμποροπάζαρο εμποροπανήγυρη εμποροπανήγυρις
εμποροράπτης εμποροράφτης εμποροραφείο εμπορορράπτης εμπορορράφτης
εμποροϋπάλληλος εμποτισμός εμπρήστρια εμπρεσιονισμός εμπρεσιονιστής εμπρησμός
εμπριμέ εμπροσθοφυλακή εμπτυσμός εμπυρομαντεία εμπόδιο εμπόδιση εμπόδισμα
εμπόριο εμπόριον εμπόρισσα εμπότιση εμπότισις εμπύημα εμπύρευμα εμπύρωση
εμφάνισις εμφανιστήριο εμφανιστής εμφιάλωση εμφιάλωσις εμφιλοχωρώ εμφυτοκρατία
εμφύσημα εμφύσηση εμφύσησις εμφύτευμα εμφύτευση εμφύτευσις εμψυχωτής
εμψύχωση εμψύχωσις εμότζι ενάργεια ενάρθρωση ενάσκηση ενέδρα ενέργεια ενέργημα
ενέχυρον ενήλικας ενήλικος ενίδρυση ενίδρυσις ενίσχυση ενίσχυσις εναέριος
εναερίτης εναιώρημα εναλλάκτης εναλλαγή εναλφαβητισμός ενανθράκωση
ενανθρώπηση ενανθρώπιση ενανθρώπισις εναντίωση εναντιομορφία εναντιομορφισμός
εναντιότης εναντιότητα εναποθήκευση εναπόθεση εναρμόνιση ενασχόληση ενατένιση
ενδαγγειοχειρουργός ενδαρτηρεκτομή ενδείκτης ενδεικτικό ενδεκάγωνο ενδεκάδα
ενδεχομενικότητα ενδεχόμενο ενδημία ενδημικότητα ενδημισμός ενδημοεπιδημία
ενδιαφέρον ενδιαφέρουσα ενδοέκκριση ενδοβένθος ενδογένεια ενδογένεση ενδογαμία
ενδοδαπέδιο ενδοδοντία ενδοδοντιστής ενδοεπικοινωνία ενδοθήλιο ενδοθήλιον
ενδοθύλακας ενδοθύλακος ενδοιασμός ενδοκάρδιο ενδοκάρδιον ενδοκάρπιο
ενδοκαρδίτιδα ενδοκαρδίτις ενδοκρινικά ενδοκρινολογία ενδοκρινολόγος
ενδομήτριον ενδομεταφορά ενδομητρίτιδα ενδομητρίωση ενδοουρολογία
ενδοσκοπία | |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import os
import re
import signal
import subprocess
import sys
def wait_for_output(proc):
"""This is a convenience method to parse a process's stdout and stderr.
Args:
proc: A process started by subprocess.Popen.
Returns:
A tuple of the stdout and stderr of the process as strings.
"""
stdout_data, stderr_data = proc.communicate()
if stdout_data is not None:
try:
# NOTE(rkn): This try/except block is here because I once saw an
# exception raised here and want to print more information if that
# happens again.
stdout_data = stdout_data.decode("ascii")
except UnicodeDecodeError:
raise Exception("Failed to decode stdout_data:", stdout_data)
if stderr_data is not None:
try:
# NOTE(rkn): This try/except block is here because I once saw an
# exception raised here and want to print more information if that
# happens again.
stderr_data = stderr_data.decode("ascii")
except UnicodeDecodeError:
raise Exception("Failed to decode stderr_data:", stderr_data)
return stdout_data, stderr_data
class DockerRunner(object):
"""This class manages the logistics of running multiple nodes in Docker.
This class is used for starting multiple Ray nodes within Docker, stopping
Ray, running a workload, and determining the success or failure of the
workload.
Attributes:
head_container_id: The ID of the docker container that runs the head
node.
worker_container_ids: A list of the docker container IDs of the Ray
worker nodes.
head_container_ip: The IP address of the docker container that runs the
head node.
"""
def __init__(self):
"""Initialize the DockerRunner."""
self.head_container_id = None
self.worker_container_ids = []
self.head_container_ip = None
def _get_container_id(self, stdout_data):
"""Parse the docker container ID from stdout_data.
Args:
stdout_data: This should be a string with the standard output of a
call to a docker command.
Returns:
The container ID of the docker container.
"""
p = re.compile("([0-9a-f]{64})\n")
m = p.match(stdout_data)
if m is None:
return None
else:
return m.group(1)
def _get_container_ip(self, container_id):
"""Get the IP address of a specific docker container.
Args:
container_id: The docker container ID of the relevant docker
container.
Returns:
The IP address of the container.
"""
proc = subprocess.Popen(["docker", "inspect",
"--format={{.NetworkSettings.Networks.bridge"
".IPAddress}}",
container_id],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_data, _ = wait_for_output(proc)
p = re.compile("([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})")
m = p.match(stdout_data)
if m is None:
raise RuntimeError("Container IP not found.")
else:
return m.group(1)
def _start_head_node(self, docker_image, mem_size, shm_size,
num_redis_shards, num_cpus, num_gpus,
development_mode):
"""Start the Ray head node inside a docker container."""
mem_arg = ["--memory=" + mem_size] if mem_size else []
shm_arg = ["--shm-size=" + shm_size] if shm_size else []
volume_arg = (["-v",
"{}:{}".format(os.path.dirname(
os.path.realpath(__file__)),
"/ray/test/jenkins_tests")]
if development_mode else [])
command = (["docker", "run", "-d"] + mem_arg + shm_arg + volume_arg +
[docker_image, "ray", "start", "--head", "--block",
"--redis-port=6379",
"--num-redis-shards={}".format(num_redis_shards),
"--num-cpus={}".format(num_cpus),
"--num-gpus={}".format(num_gpus),
"--no-ui"])
print("Starting head node with command:{}".format(command))
proc = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_data, _ = wait_for_output(proc)
container_id = self._get_container_id(stdout_data)
if container_id is None:
raise RuntimeError("Failed to find container ID.")
self.head_container_id = container_id
self.head_container_ip = self._get_container_ip(container_id)
def _start_worker_node(self, docker_image, mem_size, shm_size, num_cpus,
num_gpus, development_mode):
"""Start a Ray worker node inside a docker container."""
mem_arg = ["--memory=" + mem_size] if mem_size else []
shm_arg = ["--shm-size=" + shm_size] if shm_size else []
volume_arg = (["-v",
"{}:{}".format(os.path.dirname(
os.path.realpath(__file__)),
"/ray/test/jenkins_tests")]
if development_mode else [])
command = (["docker", "run", "-d"] + mem_arg + shm_arg + volume_arg +
["--shm-size=" + shm_size, docker_image,
"ray", "start", "--block",
"--redis-address={:s}:6379".format(self.head_container_ip),
"--num-cpus={}".format(num_cpus),
"--num-gpus={}".format(num_gpus)])
print("Starting worker node with command:{}".format(command))
proc = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout_data, _ = wait_for_output(proc)
container_id = self._get_container_id(stdout_data)
if container_id is None:
raise RuntimeError("Failed to find container id")
self.worker_container_ids.append(container_id)
def start_ray(self, docker_image=None, mem_size=None, shm_size=None,
num_nodes=None, num_redis_shards=1, num_cpus=None,
num_gpus=None, development_mode=None):
"""Start a Ray cluster within docker.
This starts one docker container running the head node and
num_nodes - 1 docker containers running the Ray worker nodes.
Args:
docker_image: The docker image to use for all of the nodes.
mem_size: The amount of memory to start each docker container with.
This will be passed into `docker run` as the --memory flag. If
this is None, then no --memory flag will be used.
shm_size: The amount of shared memory to start each docker
container with. This will be passed into `docker run` as the
`--shm-size` flag.
num_nodes: The number of nodes to use in the cluster (this counts
the head node as well).
num_redis_shards: The number of Redis shards to use on the head
node.
num_cpus: A list of the number of CPUs to start each node with.
num_gpus: A list of the number of GPUs to start each node with.
development_mode: True if you want to mount the local copy of
test/jenkins_test on the head node so we can avoid rebuilding
docker images during development.
"""
assert len(num_cpus) == num_nodes
assert len(num_gpus) == num_nodes
# Launch the head node.
self._start_head_node(docker_image, mem_size, shm_size,
num_redis_shards, num_cpus[0], num_gpus[0],
development_mode)
# Start the worker nodes.
for i in range(num_nodes - 1):
self._start_worker_node(docker_image, mem_size, shm_size,
num_cpus[1 + i], num_gpus[1 + i],
development_mode)
def _stop_node(self, container_id):
"""Stop a node in the Ray cluster."""
proc = subprocess.Popen(["docker", "kill", container_id],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_data, _ = wait_for_output(proc)
stopped_container_id = self._get_container_id(stdout_data)
if not container_id == stopped_container_id:
raise Exception("Failed to stop container {}."
.format(container_id))
proc = subprocess.Popen(["docker", "rm", "-f", container_id],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_data, _ = wait_for_output(proc)
removed_container_id = self._get_container_id(stdout_data)
if not container_id == removed_container_id:
raise Exception("Failed to remove container {}."
.format(container_id))
print("stop_node", {"container_id": container_id,
"is_head": container_id == self.head_container_id})
def stop_ray(self):
"""Stop the Ray cluster."""
success = True
try:
self._stop_node(self.head_container_id)
except Exception:
success = False
for container_id in self.worker_container_ids:
try:
self._stop_node(container_id)
except Exception:
success = False
return success
def run_test(self, test_script, num_drivers, driver_locations=None,
timeout_seconds=600):
"""Run a test script.
Run a test using the Ray cluster.
Args:
test_script: The test script to run.
num_drivers: The number of copies of the test script to run.
driver_locations: A list of the indices of the containers that the
different copies of the test script should be run on. If this
is None, then the containers will be chosen randomly.
timeout_seconds: The amount of time in seconds to wait before
considering the test to have failed. When the timeout expires,
this will cause this function to raise an exception.
Returns:
A dictionary with information about the test script run.
Raises:
Exception: An exception is raised if the timeout expires.
"""
all_container_ids = ([self.head_container_id] +
self.worker_container_ids)
if driver_locations is None:
driver_locations = [np.random.randint(0, len(all_container_ids))
for _ in range(num_drivers)]
# Define a signal handler and set an alarm to go off in
# timeout_seconds.
def handler(signum, frame):
raise RuntimeError("This test timed out after {} seconds."
.format(timeout_seconds))
signal.signal(signal.SIGALRM, handler)
signal.alarm(timeout_seconds)
# Start the different drivers.
driver_processes = []
for i in range(len(driver_locations)):
# Get the container ID to run the ith driver in.
container_id = all_container_ids[driver_locations[i]]
command = ["docker", "exec", container_id, "/bin/bash", "-c",
("RAY_REDIS_ADDRESS={}:6379 RAY_DRIVER_INDEX={} python "
"{}".format(self.head_container_ip, i, test_script))]
print("Starting driver with command {}.".format(test_script))
# Start the driver.
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
driver_processes.append(p)
# Wait for the drivers to finish.
results = []
for p in driver_processes:
stdout_data, stderr_data = wait_for_output(p)
print("STDOUT:")
print(stdout_data)
print("STDERR:")
print(stderr_data)
results.append({"success": p.returncode == 0,
"return_code": p.returncode})
# Disable the alarm.
signal.alarm(0)
return results
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Run multinode tests in Docker.")
parser.add_argument("--docker-image", default="ray-project/deploy",
help="docker image")
parser.add_argument("--mem-size", help="memory size")
parser.add_argument("--shm-size", default="1G", help="shared memory size")
parser.add_argument("--num-nodes", default=1, type=int,
help="number of nodes to use in the cluster")
parser.add_argument("--num-redis-shards", default=1, type=int,
help=("the number of Redis shards to start on the "
"head node"))
parser.add_argument("--num-cpus", type=str,
help=("a comma separated list of values representing "
"the number of CPUs to start each node with"))
parser.add_argument("--num-gpus", type=str,
help=("a comma separated list of values representing "
"the number of GPUs to start each node with"))
parser.add_argument("--num-drivers", default=1, type=int,
help="number of drivers to run")
parser.add_argument("--driver-locations", type=str,
help=("a comma separated list of indices of the "
"containers to run the drivers in"))
parser.add_argument("--test-script", required=True, help="test script")
parser.add_argument("--development-mode", action="store_true",
help="use local copies of the test scripts")
args = parser.parse_args()
# Parse the number of CPUs and GPUs to use for each worker.
num_nodes = args.num_nodes
num_cpus = ([int(i) for i in args.num_cpus.split(",")]
if | |
self.virtual_size is not None:
_dict['VirtualSize'] = self.virtual_size
if hasattr(self, 'vulnerability_count') and self.vulnerability_count is not None:
_dict['VulnerabilityCount'] = self.vulnerability_count
if hasattr(self, 'vulnerable') and self.vulnerable is not None:
_dict['Vulnerable'] = self.vulnerable
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this RemoteAPIImage object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'RemoteAPIImage') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'RemoteAPIImage') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class RestoreResult():
"""
The result of restoring tags for a digest. In a successful request the digest is
always restored, and zero or more of its tags may be restored.
:attr List[str] successful: (optional) Successful is a list of tags that were
restored.
:attr List[str] unsuccessful: (optional) Unsuccessful is a list of tags that
were not restored because of a conflict.
"""
def __init__(self,
*,
successful: List[str] = None,
unsuccessful: List[str] = None) -> None:
"""
Initialize a RestoreResult object.
:param List[str] successful: (optional) Successful is a list of tags that
were restored.
:param List[str] unsuccessful: (optional) Unsuccessful is a list of tags
that were not restored because of a conflict.
"""
self.successful = successful
self.unsuccessful = unsuccessful
@classmethod
def from_dict(cls, _dict: Dict) -> 'RestoreResult':
"""Initialize a RestoreResult object from a json dictionary."""
args = {}
if 'successful' in _dict:
args['successful'] = _dict.get('successful')
if 'unsuccessful' in _dict:
args['unsuccessful'] = _dict.get('unsuccessful')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a RestoreResult object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'successful') and self.successful is not None:
_dict['successful'] = self.successful
if hasattr(self, 'unsuccessful') and self.unsuccessful is not None:
_dict['unsuccessful'] = self.unsuccessful
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this RestoreResult object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'RestoreResult') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'RestoreResult') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class RetentionPolicy():
"""
A document that contains the image retention settings for a namespace.
:attr int images_per_repo: (optional) Determines how many images will be
retained for each repository when the retention policy is executed. The value -1
denotes 'Unlimited' (all images are retained).
:attr str namespace: The namespace to which the retention policy is attached.
:attr bool retain_untagged: (optional) Determines if untagged images are
retained when executing the retention policy. This is false by default meaning
untagged images will be deleted when the policy is executed.
"""
def __init__(self,
namespace: str,
*,
images_per_repo: int = None,
retain_untagged: bool = None) -> None:
"""
Initialize a RetentionPolicy object.
:param str namespace: The namespace to which the retention policy is
attached.
:param int images_per_repo: (optional) Determines how many images will be
retained for each repository when the retention policy is executed. The
value -1 denotes 'Unlimited' (all images are retained).
:param bool retain_untagged: (optional) Determines if untagged images are
retained when executing the retention policy. This is false by default
meaning untagged images will be deleted when the policy is executed.
"""
self.images_per_repo = images_per_repo
self.namespace = namespace
self.retain_untagged = retain_untagged
@classmethod
def from_dict(cls, _dict: Dict) -> 'RetentionPolicy':
"""Initialize a RetentionPolicy object from a json dictionary."""
args = {}
if 'images_per_repo' in _dict:
args['images_per_repo'] = _dict.get('images_per_repo')
if 'namespace' in _dict:
args['namespace'] = _dict.get('namespace')
else:
raise ValueError('Required property \'namespace\' not present in RetentionPolicy JSON')
if 'retain_untagged' in _dict:
args['retain_untagged'] = _dict.get('retain_untagged')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a RetentionPolicy object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'images_per_repo') and self.images_per_repo is not None:
_dict['images_per_repo'] = self.images_per_repo
if hasattr(self, 'namespace') and self.namespace is not None:
_dict['namespace'] = self.namespace
if hasattr(self, 'retain_untagged') and self.retain_untagged is not None:
_dict['retain_untagged'] = self.retain_untagged
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this RetentionPolicy object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'RetentionPolicy') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'RetentionPolicy') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class RootFS():
"""
RootFS contains information about the root filesystem of a container image.
:attr str base_layer: (optional) Descriptor for the base layer in the image.
:attr List[str] layers: (optional) Descriptors for each layer in the image.
:attr str type: (optional) The type of filesystem.
"""
def __init__(self,
*,
base_layer: str = None,
layers: List[str] = None,
type: str = None) -> None:
"""
Initialize a RootFS object.
:param str base_layer: (optional) Descriptor for the base layer in the
image.
:param List[str] layers: (optional) Descriptors for each layer in the
image.
:param str type: (optional) The type of filesystem.
"""
self.base_layer = base_layer
self.layers = layers
self.type = type
@classmethod
def from_dict(cls, _dict: Dict) -> 'RootFS':
"""Initialize a RootFS object from a json dictionary."""
args = {}
if 'BaseLayer' in _dict:
args['base_layer'] = _dict.get('BaseLayer')
if 'Layers' in _dict:
args['layers'] = _dict.get('Layers')
if 'Type' in _dict:
args['type'] = _dict.get('Type')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a RootFS object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'base_layer') and self.base_layer is not None:
_dict['BaseLayer'] = self.base_layer
if hasattr(self, 'layers') and self.layers is not None:
_dict['Layers'] = self.layers
if hasattr(self, 'type') and self.type is not None:
_dict['Type'] = self.type
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this RootFS object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'RootFS') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'RootFS') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Trash():
"""
Details of the tags and days until expiry.
:attr int days_until_expiry: (optional)
:attr List[str] tags: (optional)
"""
def __init__(self,
*,
days_until_expiry: int = None,
tags: List[str] = None) -> None:
"""
Initialize a Trash object.
:param int days_until_expiry: (optional)
:param List[str] tags: (optional)
"""
self.days_until_expiry = days_until_expiry
self.tags = tags
@classmethod
def from_dict(cls, _dict: Dict) -> 'Trash':
"""Initialize a Trash object from a json dictionary."""
args = {}
if 'daysUntilExpiry' in _dict:
args['days_until_expiry'] = _dict.get('daysUntilExpiry')
if 'tags' in _dict:
args['tags'] = _dict.get('tags')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Trash object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'days_until_expiry') and self.days_until_expiry is not None:
_dict['daysUntilExpiry'] = self.days_until_expiry
if hasattr(self, 'tags') and self.tags is not None:
_dict['tags'] = self.tags
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Trash object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'Trash') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Trash') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class VAReport():
"""
The VA Report for a given image.
:attr int configuration_issue_count: (optional) Number of configuration issues
in the image.
:attr int exempt_issue_count: (optional) | |
not need_frontports:
return
all_rearports = {str(item): item for item in nb.dcim.rear_port_templates.filter(devicetype_id=deviceType)}
for port in need_frontports:
try:
rpGet = all_rearports[port["rear_port"]]
port["rear_port"] = rpGet.id
except KeyError:
logger.debug(f'Could not find Rear Port for Front Port: {port["name"]} - ' + f'{port["type"]} - {deviceType}')
created = False
count = 0
while created == False and count < 3:
try:
fpSuccess = nb.dcim.front_port_templates.create(need_frontports)
for fp in fpSuccess:
logger.debug(f"Front Port Created: {fp.name} - " + f"{fp.type} - {fp.device_type.id} - " + f"{fp.id}")
# counter.update({'updated': 1})
created = True
count = 3
except Exception as e:
logger.debug(e.error)
created = False
count = count + 1
sleep(0.5 * count)
# modified/sourced from from: https://github.com/minitriga/Netbox-Device-Type-Library-Import
def createRearPorts(self, rearports, deviceType, nb):
all_rearports = {str(item): item for item in nb.dcim.rear_port_templates.filter(devicetype_id=deviceType)}
need_rearports = []
for rearport in rearports:
try:
rpGet = all_rearports[rearport["name"]]
logger.debug(f"Rear Port Template Exists: {rpGet.name} - {rpGet.type}" + f" - {rpGet.device_type.id} - {rpGet.id}")
except KeyError:
rearport["device_type"] = deviceType
need_rearports.append(rearport)
if not need_rearports:
return
created = False
count = 0
while created == False and count < 3:
try:
rpSuccess = nb.dcim.rear_port_templates.create(need_rearports)
for rp in rpSuccess:
logger.debug(f"Rear Port Created: {rp.name} - {rp.type}" + f" - {rp.device_type.id} - {rp.id}")
# counter.update({'updated': 1})
created = True
count = 3
except Exception as e:
logger.debug(e.error)
created = False
count = count + 1
sleep(0.5 * count)
# modified/sourced from from: https://github.com/minitriga/Netbox-Device-Type-Library-Import
def createDeviceBays(self, devicebays, deviceType, nb):
all_devicebays = {str(item): item for item in nb.dcim.device_bay_templates.filter(devicetype_id=deviceType)}
need_devicebays = []
for devicebay in devicebays:
try:
dbGet = all_devicebays[devicebay["name"]]
logger.debug(f"Device Bay Template Exists: {dbGet.name} - " + f"{dbGet.device_type.id} - {dbGet.id}")
except KeyError:
devicebay["device_type"] = deviceType
need_devicebays.append(devicebay)
if not need_devicebays:
return
created = False
count = 0
while created == False and count < 3:
try:
dbSuccess = nb.dcim.device_bay_templates.create(need_devicebays)
for db in dbSuccess:
logger.debug(f"Device Bay Created: {db.name} - " + f"{db.device_type.id} - {db.id}")
# counter.update({'updated': 1})
created = True
count = 3
except Exception as e:
logger.debug(e.error)
created = False
count = count + 1
sleep(0.5 * count)
# modified/sourced from from: https://github.com/minitriga/Netbox-Device-Type-Library-Import
def createPowerOutlets(self, poweroutlets, deviceType, nb):
all_poweroutlets = {str(item): item for item in nb.dcim.power_outlet_templates.filter(devicetype_id=deviceType)}
need_poweroutlets = []
for poweroutlet in poweroutlets:
try:
poGet = all_poweroutlets[poweroutlet["name"]]
logger.debug(f"Power Outlet Template Exists: {poGet.name} - " + f"{poGet.type} - {poGet.device_type.id} - {poGet.id}")
except KeyError:
poweroutlet["device_type"] = deviceType
need_poweroutlets.append(poweroutlet)
if not need_poweroutlets:
return
all_power_ports = {str(item): item for item in nb.dcim.power_port_templates.filter(devicetype_id=deviceType)}
for outlet in need_poweroutlets:
try:
ppGet = all_power_ports[outlet["power_port"]]
outlet["power_port"] = ppGet.id
except KeyError:
pass
created = False
count = 0
while created == False and count < 3:
try:
poSuccess = nb.dcim.power_outlet_templates.create(need_poweroutlets)
for po in poSuccess:
logger.debug(f"Power Outlet Created: {po.name} - " + f"{po.type} - {po.device_type.id} - " + f"{po.id}")
# counter.update({'updated': 1})
created = True
count = 3
except Exception as e:
logger.debug(e.error)
created = False
count = count + 1
sleep(0.5 * count)
# modified/sourced from from: https://github.com/minitriga/Netbox-Device-Type-Library-Import
def createDeviceTypes(self, deviceTypes, nb=None):
nb = self.py_netbox
all_device_types = {str(item): item for item in nb.dcim.device_types.all()}
for deviceType in deviceTypes:
try:
dt = all_device_types[deviceType["model"]]
logger.debug(f"Device Type Exists: {dt.manufacturer.name} - " + f"{dt.model} - {dt.id}")
except KeyError:
try:
dt = nb.dcim.device_types.create(deviceType)
# counter.update({'added': 1})
logger.debug(f"Device Type Created: {dt.manufacturer.name} - " + f"{dt.model} - {dt.id}")
except Exception as e:
logger.debug(e.error)
if "interfaces" in deviceType:
logger.debug("interfaces")
self.createInterfaces(deviceType["interfaces"], dt.id, nb)
if "power-ports" in deviceType:
logger.debug("power-ports")
self.createPowerPorts(deviceType["power-ports"], dt.id, nb)
if "power-port" in deviceType:
logger.debug("power-port")
self.createPowerPorts(deviceType["power-port"], dt.id, nb)
if "console-ports" in deviceType:
logger.debug("console-port")
self.createConsolePorts(deviceType["console-ports"], dt.id, nb)
if "power-outlets" in deviceType:
logger.debug("power-outlets")
self.createPowerOutlets(deviceType["power-outlets"], dt.id, nb)
if "console-server-ports" in deviceType:
logger.debug("console-server-ports")
self.createConsoleServerPorts(deviceType["console-server-ports"], dt.id, nb)
if "rear-ports" in deviceType:
logger.debug("rear-ports")
self.createRearPorts(deviceType["rear-ports"], dt.id, nb)
if "front-ports" in deviceType:
logger.debug("front-ports")
self.createFrontPorts(deviceType["front-ports"], dt.id, nb)
if "device-bays" in deviceType:
logger.debug("device-bays")
self.createDeviceBays(deviceType["device-bays"], dt.id, nb)
def change_attrib_type(self, attrib):
if attrib in ["uint", "int", "float"]:
attrib = "text"
if attrib in ["bool"]:
attrib = "boolean"
if attrib in ["string", "dict"]:
attrib = "text"
return attrib
def cleanup_attrib_value(self, attrib_val, attrib_type):
if attrib_type in ["uint", "int", "float"]:
return str(attrib_val)
if attrib_type in ["bool"]:
return bool(attrib_val)
if attrib_type in ["string", "dict", "text"]:
return str(attrib_val)
if attrib_type == "date":
datetime_time = datetime.datetime.fromtimestamp(int(attrib_val))
return datetime_time.strftime("%Y-%m-%d")
return str(attrib_val)
def createCustomFields(self, attributes):
logger.debug(attributes)
nb = self.py_netbox
all_custom_fields = {str(item): item for item in nb.extras.custom_fields.all()}
logger.debug(all_custom_fields)
for custom_field in attributes:
custom_field["label"] = copy.copy(custom_field["name"])
custom_field["name"] = str(slugify.slugify(custom_field["name"], separator="_", replacements=[["/", ""], ["-", "_"]]))
try:
# print(custom_field["name"])
# print(all_custom_fields[custom_field["name"]])
if custom_field["label"] in all_custom_fields.keys():
dt = all_custom_fields[custom_field["label"]]
if not str(dt.name) == custom_field["name"]:
logger.debug(f"name is not correctly set on custom field {custom_field['label']}, updating, this may take some time")
dt.update({"name": custom_field["name"], "label": custom_field["label"]})
all_custom_fields = {str(item): item for item in nb.extras.custom_fields.all()}
dt = all_custom_fields[custom_field["name"]]
logger.debug(f"Custom Field Exists: {dt.name} - " + f"{dt.type}")
except KeyError:
try:
custom_field["type"] = self.change_attrib_type(custom_field["type"])
custom_field["content_types"] = [
"circuits.circuit",
"circuits.circuittype",
"circuits.provider",
"circuits.providernetwork",
"dcim.cable",
"dcim.consoleport",
"dcim.consoleserverport",
"dcim.device",
"dcim.devicebay",
"dcim.devicerole",
"dcim.devicetype",
"dcim.frontport",
"dcim.interface",
"dcim.inventoryitem",
"dcim.location",
"dcim.manufacturer",
"dcim.platform",
"dcim.powerfeed",
"dcim.poweroutlet",
"dcim.powerpanel",
"dcim.powerport",
"dcim.rack",
"dcim.rackreservation",
"dcim.rackrole",
"dcim.rearport",
"dcim.region",
"dcim.site",
"dcim.sitegroup",
"dcim.virtualchassis",
"ipam.aggregate",
"ipam.ipaddress",
"ipam.prefix",
"ipam.rir",
"ipam.role",
"ipam.routetarget",
"ipam.vrf",
"ipam.vlangroup",
"ipam.vlan",
"ipam.service",
"ipam.iprange",
"tenancy.tenantgroup",
"tenancy.tenant",
"virtualization.cluster",
"virtualization.clustergroup",
"virtualization.clustertype",
"virtualization.virtualmachine",
"virtualization.vminterface",
]
dt = nb.extras.custom_fields.create(custom_field)
# counter.update({'added': 1})
logger.debug(f"Device Type Created: {dt.name} - " + f"{dt.type} ")
# print("test")
except Exception as e:
logger.error(f"failed to add custom field: {custom_field['name']}")
logger.debug(e)
def get_rack_by_rt_id(self, rt_id):
nb = self.py_netbox
racks = [item for item in nb.dcim.racks.filter(cf_rt_id=rt_id)]
logger.debug(racks)
if len(racks) == 1:
return racks[0]
elif len(racks) > 1:
for rack in racks:
if rack["custom_fields"]["rt_id"] == str(rt_id):
return rack
return None
else:
return None
def get_site_by_rt_id(self, rt_id):
nb = self.py_netbox
sites = [item for item in nb.dcim.sites.filter(cf_rt_id=rt_id)]
logger.debug(sites)
if len(sites) == 1:
return sites[0]
elif len(sites) > 1:
for rack in sites:
if rack["custom_fields"]["rt_id"] == str(rt_id):
return rack
return None
else:
return None
def manage_sites(self, rt_sites_map):
nb = self.py_netbox
current_sites = [str(item) for item in nb.dcim.sites.all()]
for rt_id, name in rt_sites_map.items():
if config["Misc"]["SITE_NAME_CLEANUP"]:
description = copy.deepcopy(name)
name = name.split(" (")[0]
site_data = {"description": description, "name": name, "slug": slugify.slugify(name), "custom_fields": {"rt_id": str(rt_id)}}
if not name in current_sites:
pp.pprint(f"{name} not in netbox, adding")
print(nb.dcim.sites.create(site_data))
else:
site = nb.dcim.sites.get(name=name)
site.update(site_data)
def create_cable(self, int_1_id, int_2_id):
nb = self.py_netbox
data = {
"termination_a_type": "dcim.interface",
"termination_a_id": int_1_id,
"termination_b_type": "dcim.interface",
"termination_b_id": int_2_id,
}
try:
created = nb.dcim.cables.create(data)
pp.pprint(created)
except Exception as e:
logger.debug("unable to create cable, usually means a cable already exists...")
logger.error(e)
def create_cables_between_devices(self, connection_data):
nb = self.py_netbox
local_device_obj = nb.dcim.devices.filter(cf_rt_id=connection_data["local_device_rt_id"])
local_device = {str(item): dict(item) for item in local_device_obj}
if bool(local_device):
local_device = list(local_device.values())[0]
# local_device_dict = { str(local_device): dict(local_device) }
# pp.pprint(local_device)
remote_device_obj = nb.dcim.devices.filter(cf_rt_id=connection_data["remote_device"]["id"])
remote_device = {str(item): dict(item) for item in remote_device_obj}
if bool(remote_device):
remote_device = list(remote_device.values())[0]
# remote_device = nb.dcim.devices.filter(cf_rt_id=connection_data["remote_device"]["id"])
# pp.pprint(remote_device)
if bool(local_device) and bool(remote_device):
local_device_ints_objs = nb.dcim.interfaces.filter(device_id=local_device["id"])
local_device_ints = {str(item): item for item in local_device_ints_objs}
# pp.pprint(local_device_ints)
remote_device_ints_objs = nb.dcim.interfaces.filter(device_id=remote_device["id"])
remote_device_ints = {str(item): item for item in remote_device_ints_objs}
# pp.pprint(remote_device_ints)
local_port_found = False
if connection_data["local_port"] in local_device_ints.keys():
logger.debug("found local_port in netbox")
local_port_found = True
local_port = local_device_ints[connection_data["local_port"]]
# local_port_dict = {str(item): item for item in local_port}
else:
logger.error(f"did not find local_port({connection_data['local_port']}) in netbox...")
remote_port_found = False
if connection_data["remote_port"] in remote_device_ints.keys():
logger.debug("found remote_port in netbox")
remote_port_found = True
remote_port = remote_device_ints[connection_data["remote_port"]]
# remote_port_dict = {str(item): item for item in remote_port}
else:
logger.error(f"did not find remote_port({connection_data['remote_port']}) in netbox for device {remote_device['name']}")
if local_port_found and remote_port_found:
# port may be set to Virtual if it didnt exist in device template when syned over. fix if needed
if str(remote_port.type) == "Virtual":
remote_port.update({"type": "other"})
if str(local_port.type) == "Virtual":
local_port.update({"type": "other"})
# the actual meat of the function.... why did it take soo much to get here? definately monday code....
self.create_cable(local_port.id, remote_port.id)
else:
logger.warning("remote device doesnt exist in nb yet. connections will be added when it gets added")
def manage_vm(self, vm_data):
nb = self.py_netbox
rt_id = vm_data["custom_fields"]["rt_id"]
vm_data = self.get_vm_cluster_from_device(vm_data)
pp.pprint(vm_data)
device_check1 = nb.virtualization.virtual_machines.get(cf_rt_id=rt_id)
# device_check2 = nb.virtualization.virtual_machines.filter(name=vm_data["name"])
device_check2 = None
if device_check1:
logger.debug("found existing vm in netbox, will update")
device_check1.update(vm_data)
elif device_check2:
logger.debug("found existing vm in netbox by name (dangerious)")
device_check2.update(vm_data)
else:
logger.debug("did not find an existing vm in nb, will add!")
new_device = nb.virtualization.virtual_machines.create(vm_data)
def get_vm_cluster_from_device(self, | |
= {'first_name': 'Bill'}
self.assertRaises(redpipe.InvalidOperation,
lambda: self.User(data))
self.assertRaises(redpipe.InvalidOperation,
lambda: self.UserWithPk(data))
def test_incr(self):
key = '1'
class T(redpipe.Struct):
keyspace = 'T'
fields = {
}
field = 'arbitrary_field'
t = T(key)
t.incr(field)
self.assertEqual(t[field], '1')
with redpipe.autoexec() as pipe:
t.incr(field, pipe=pipe)
self.assertEqual(t[field], '1')
self.assertEqual(t[field], '2')
t.incr(field, 3)
self.assertEqual(t[field], '5')
t.decr(field)
self.assertEqual(t[field], '4')
t.decr(field, 2)
self.assertEqual(t[field], '2')
def test_typed_incr(self):
key = '1'
class T(redpipe.Struct):
keyspace = 'T'
fields = {
'counter': redpipe.IntegerField
}
field = 'counter'
t = T(key)
t.incr(field)
self.assertEqual(t[field], 1)
with redpipe.autoexec() as pipe:
t.incr(field, pipe=pipe)
self.assertEqual(t[field], 1)
self.assertEqual(t[field], 2)
t.incr(field, 3)
self.assertEqual(t[field], 5)
t.decr(field)
self.assertEqual(t[field], 4)
t.decr(field, 2)
self.assertEqual(t[field], 2)
arbitrary_field = t.pop(field)
self.assertEqual(arbitrary_field, 2)
self.assertEqual(t.get(field), None)
def test_delete(self):
keys = ['1', '2', '3']
for k in keys:
data = self.fake_user_data(_key=k)
self.User(data)
for k in keys:
u = self.User(k)
self.assertTrue(u.persisted)
self.User.delete(keys)
for k in keys:
u = self.User(k)
self.assertFalse(u.persisted)
def test_indirect_overlap_of_pk(self):
key = '1'
other_key = '2'
data = self.fake_user_data(user_id=key)
u = self.UserWithPk(data)
u.core().hset(key, 'user_id', other_key)
u = self.UserWithPk(key)
self.assertEqual(dict(u)['user_id'], key)
self.assertNotIn('user_id', u._data) # noqa
self.assertEqual(u.key, key)
def test_update_with_none_future(self):
f = redpipe.Future()
f.set(None)
data = self.fake_user_data(user_id='1')
u = self.UserWithPk(data)
u.update({'first_name': f})
u = self.UserWithPk('1')
self.assertRaises(KeyError, lambda: u['first_name'])
def test_with_empty_update(self):
class Test(redpipe.Struct):
keyspace = 'U'
fields = {
'a': redpipe.TextField,
}
key_name = 'k'
data = {'k': '1', 'a': 'foo', 'b': 'bar'}
t = Test(data)
t.update({})
self.assertEqual(t, data)
def test_fields_custom_default(self):
class Test(redpipe.Struct):
keyspace = 'U'
fields = {
'a': redpipe.TextField,
'b': redpipe.TextField,
}
default_fields = ['a']
key_name = 'k'
data = {'k': '1', 'a': 'foo', 'b': 'bar'}
t = Test(data)
self.assertEqual(t, data)
t = Test(data['k'])
self.assertEqual(t, {'k': '1', 'a': 'foo'})
t.load(['b'])
self.assertEqual(t, data)
t = Test(data['k'], fields='all')
self.assertEqual(t, data)
def test_fields_custom_default_defined_only(self):
class Test(redpipe.Struct):
keyspace = 'U'
fields = {
'a': redpipe.TextField,
'b': redpipe.TextField,
}
default_fields = 'defined'
key_name = 'k'
data = {'k': '1', 'a': 'foo', 'b': 'bar', 'c': 'bazz'}
t = Test(data)
self.assertEqual(t, data)
t = Test(data['k'])
self.assertEqual(t, {'k': '1', 'a': 'foo', 'b': 'bar'})
t.load(['c'])
self.assertEqual(t, data)
t = Test(data['k'], fields='all')
self.assertEqual(t, data)
def test_nx(self):
class Test(redpipe.Struct):
keyspace = 'U'
fields = {
'f1': redpipe.TextField,
'f2': redpipe.TextField,
}
key_name = 'k'
t = Test({'k': '1', 'f1': 'a'})
self.assertEqual(t['f1'], 'a')
t = Test({'k': '1', 'f1': 'b', 'f2': 'c'}, nx=True)
self.assertEqual(t['f1'], 'a')
self.assertEqual(t['f2'], 'c')
def test_required_fields(self):
class Test(redpipe.Struct):
keyspace = 'U'
fields = {
'a': redpipe.IntegerField,
'b': redpipe.IntegerField
}
required = set('b')
t = Test({'_key': 'abc', 'b': 123})
self.assertEqual(t['b'], 123)
with self.assertRaises(redpipe.InvalidOperation):
Test({'_key': 'abc', 'a': 123}) # Create obj w/o required field
with self.assertRaises(redpipe.InvalidOperation):
# Update obj removing a required field
t.update({'a': 456, 'b': None})
# Make sure the other fields did NOT update on the failed update
self.assertIsNone(t.get('a', None))
t.update({'a': 456, 'b': 789}) # Update required field of obj
self.assertEqual(t['a'], 456)
self.assertEqual(t['b'], 789)
t.update({'a': None}) # Update non-required field of obj
self.assertIsNone(t.get('a', None))
def test_required_adding_later(self):
class Test(redpipe.Struct):
keyspace = 'U'
fields = {
'a': redpipe.IntegerField,
'b': redpipe.IntegerField
}
class Test2(redpipe.Struct):
required = {'new_required_field'}
keyspace = 'U'
fields = {
'a': redpipe.IntegerField,
'b': redpipe.IntegerField
}
Test({'_key': 'abc', 'b': 123})
t = Test2('abc')
t.update({'b': 456, 'random_field': 'hello_world'})
self.assertEqual(t['b'], 456)
self.assertEqual(t['random_field'], 'hello_world')
self.assertIsNone(t.get('new_required_field', None))
with self.assertRaises(redpipe.InvalidOperation):
# Update obj removing a required field that didn't yet exist
t.update({'a': 456, 'new_required_field': None})
class ConnectTestCase(unittest.TestCase):
def tearDown(self):
redpipe.reset()
def incr_a(self, key, pipe=None):
with redpipe.autoexec(pipe, name='a') as pipe:
return pipe.incr(key)
def incr_b(self, key, pipe=None):
with redpipe.autoexec(pipe, name='b') as pipe:
return pipe.incr(key)
def test(self):
r = redislite.StrictRedis()
redpipe.connect_redis(r)
redpipe.connect_redis(r)
self.assertRaises(
redpipe.AlreadyConnected,
lambda: redpipe.connect_redis(redislite.StrictRedis()))
redpipe.disconnect()
redpipe.connect_redis(redislite.StrictRedis())
# tear down the connection
redpipe.disconnect()
# calling it multiple times doesn't hurt anything
redpipe.disconnect()
redpipe.connect_redis(r)
redpipe.connect_redis(
redis.Redis(connection_pool=r.connection_pool))
redpipe.connect_redis(r)
self.assertRaises(
redpipe.AlreadyConnected,
lambda: redpipe.connect_redis(
redislite.StrictRedis()))
def test_with_decode_responses(self):
def connect():
redpipe.connect_redis(
redislite.StrictRedis(decode_responses=True))
self.assertRaises(redpipe.InvalidPipeline, connect)
def test_single_nested(self):
redpipe.connect_redis(redislite.StrictRedis(), 'a')
def mid_level(pipe=None):
with redpipe.autoexec(pipe, name='a') as pipe:
return self.incr_a('foo', pipe=pipe)
def top_level(pipe=None):
with redpipe.autoexec(pipe, name='a') as pipe:
return mid_level(pipe)
with redpipe.autoexec(name='a') as pipe:
ref = top_level(pipe)
self.assertRaises(redpipe.ResultNotReady, lambda: ref.result)
self.assertEqual(ref.result, 1)
def test_sync(self):
try:
redpipe.disable_threads()
self.test_single_nested()
self.tearDown()
self.test_pipeline_nested_mismatched_name()
self.tearDown()
self.test_multi_invalid_connection()
self.tearDown()
self.test_sleeping_cb()
finally:
redpipe.enable_threads()
def test_sleeping_cb(self):
redpipe.connect_redis(redislite.Redis(), 'a')
redpipe.connect_redis(redislite.Redis(), 'b')
with redpipe.autoexec(name='a') as pipe:
pipe.set('foo', '1')
with redpipe.autoexec(pipe=pipe, name='b') as p:
ref = p.blpop('1', timeout=1)
self.assertEqual(ref.result, None)
def test_multi(self):
a_conn = redislite.StrictRedis()
b_conn = redislite.StrictRedis()
redpipe.connect_redis(a_conn, name='a')
redpipe.connect_redis(b_conn, name='b')
key = 'foo'
verify_callback = []
with redpipe.pipeline() as pipe:
a = self.incr_a(key, pipe)
b = self.incr_b(key, pipe)
def cb():
verify_callback.append(1)
pipe.on_execute(cb)
pipe.execute()
self.assertEqual(a.result, 1)
self.assertEqual(b.result, 1)
self.assertEqual(verify_callback, [1])
# test failure
try:
with redpipe.autoexec() as pipe:
a = self.incr_a(key, pipe)
raise Exception('boo')
except Exception:
pass
self.assertRaises(redpipe.ResultNotReady, lambda: a.result)
def test_multi_auto(self):
a_conn = redislite.StrictRedis()
b_conn = redislite.StrictRedis()
redpipe.connect_redis(a_conn)
redpipe.connect_redis(a_conn, name='a')
redpipe.connect_redis(b_conn, name='b')
key = 'foo'
verify_callback = []
with redpipe.autoexec() as pipe:
a = self.incr_a(key, pipe)
b = self.incr_b(key, pipe)
def cb():
verify_callback.append(1)
pipe.on_execute(cb)
self.assertEqual(a.result, 1)
self.assertEqual(b.result, 1)
self.assertEqual(verify_callback, [1])
def test_multi_invalid_connection(self):
a_conn = redislite.StrictRedis()
b_conn = redislite.StrictRedis(port=987654321)
redpipe.connect_redis(a_conn, name='a')
redpipe.connect_redis(b_conn, name='b')
key = 'foo'
verify_callback = []
with redpipe.pipeline(name='a') as pipe:
a = self.incr_a(key, pipe)
b = self.incr_b(key, pipe)
def cb():
verify_callback.append(1)
pipe.on_execute(cb)
self.assertRaises(redis.ConnectionError, pipe.execute)
# you can see here that it's not a 2-phase commit.
# the goal is not tranactional integrity.
# it is parallel execution of network tasks.
self.assertRaises(redpipe.ResultNotReady, lambda: a.result)
self.assertRaises(redpipe.ResultNotReady, lambda: b.result)
self.assertEqual(verify_callback, [])
def test_pipeline_mismatched_name(self):
a_conn = redislite.StrictRedis()
b_conn = redislite.StrictRedis()
redpipe.connect_redis(a_conn, name='a')
redpipe.connect_redis(b_conn, name='b')
with redpipe.pipeline(name='b') as pipe:
ref = self.incr_a(key='foo', pipe=pipe)
self.assertRaises(redpipe.ResultNotReady, lambda: ref.result)
pipe.execute()
def test_pipeline_nested_mismatched_name(self):
a_conn = redislite.StrictRedis()
b_conn = redislite.StrictRedis()
redpipe.connect_redis(a_conn, name='a')
redpipe.connect_redis(b_conn, name='b')
def my_function(pipe=None):
with redpipe.pipeline(pipe=pipe, name='b') as pipe:
ref = self.incr_a(key='foo', pipe=pipe)
self.assertRaises(redpipe.ResultNotReady, lambda: ref.result)
pipe.execute()
return ref
with redpipe.pipeline(name='a') as pipe:
ref1 = my_function(pipe=pipe)
ref2 = my_function(pipe=pipe)
self.assertRaises(redpipe.ResultNotReady, lambda: ref1.result)
self.assertRaises(redpipe.ResultNotReady, lambda: ref2.result)
pipe.execute()
self.assertEqual(ref1.result, 1)
self.assertEqual(ref2.result, 2)
def test_pipeline_invalid_object(self):
a_conn = redislite.StrictRedis()
b_conn = redislite.StrictRedis()
redpipe.connect_redis(a_conn)
redpipe.connect_redis(a_conn, name='a')
redpipe.connect_redis(b_conn, name='b')
def do_invalid():
self.incr_a(key='foo', pipe='invalid')
self.assertRaises(redpipe.InvalidPipeline, do_invalid)
def test_unconfigured_pipeline(self):
def invalid():
self.incr_a(key='foo')
def nested_invalid():
with redpipe.autoexec() as pipe:
self.incr_a(key='foo', pipe=pipe)
self.assertRaises(redpipe.InvalidPipeline, invalid)
self.assertRaises(redpipe.InvalidPipeline, nested_invalid)
@unittest.skipIf(rediscluster is None, 'rediscluster is disabled')
class ConnectRedisClusterTestCase(unittest.TestCase):
def tearDown(self):
redpipe.reset()
def test(self):
# i don't need to set up a full cluster to test. this.
# it's enough to know I wired it into the code correctly for now.
r = rediscluster.StrictRedisCluster(
startup_nodes=[{'host': '0', 'port': 999999}],
init_slot_cache=False
)
redpipe.connect_redis(r, 'test')
with redpipe.pipeline(name='test') as pipe:
pipe.set('foo', 'bar')
self.assertRaises(Exception, pipe.execute)
@unittest.skipIf(rediscluster is None, 'rediscluster package failed to import')
class RedisClusterTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.c = SingleNodeRedisCluster(strict=False)
cls.r = cls.c.client
redpipe.connect_redis(cls.r)
@classmethod
def tearDownClass(cls):
cls.r = None
cls.c.shutdown()
cls.c = None
redpipe.reset()
def tearDown(self):
self.r.flushall()
def test_basic(self):
with redpipe.autoexec() as pipe:
pipe.set('foo', 'bar')
res = pipe.get('foo')
self.assertEqual(res, b'bar')
def test_list(self):
class Test(redpipe.List):
keyspace = 'T'
with redpipe.autoexec() as pipe:
t = Test(pipe)
append = t.rpush('1', 'a', 'b', 'c')
lrange = t.lrange('1', 0, -1)
lpop = t.lpop('1')
self.assertEqual(append, 3)
self.assertEqual(lrange, ['a', 'b', 'c'])
self.assertEqual(lpop, 'a')
def test_set(self):
class Test(redpipe.Set):
keyspace = 'T'
with redpipe.autoexec() as pipe:
t = Test(pipe)
sadd = t.sadd('1', 'a', 'b', 'c')
smembers = t.smembers('1')
spop = t.spop('1')
scard = t.scard('1')
expected = {'a', 'b', 'c'}
self.assertEqual(sadd, 3)
self.assertEqual(smembers, expected)
self.assertIn(spop, expected)
self.assertEqual(scard, 2)
def test_string(self):
class Test(redpipe.String):
keyspace = 'T'
with redpipe.autoexec() as pipe:
t = Test(pipe)
set_result = t.set('1', 'a')
get_result = t.get('1')
delete_result = t.delete('1')
self.assertEqual(set_result, 1)
self.assertEqual(get_result, 'a')
self.assertEqual(delete_result, 1)
def test_sorted_sets(self):
class Test(redpipe.SortedSet):
keyspace = 'T'
with redpipe.autoexec() as pipe:
t = Test(pipe)
t.zadd('1', 'a', 1)
t.zadd('1', 'b', 2)
zadd = t.zadd('1', 'c', 3)
zrange = t.zrange('1', 0, -1)
zincrby = t.zincrby('1', 'a', 1)
self.assertEqual(zadd, 1)
self.assertEqual(zrange, ['a', 'b', 'c'])
self.assertEqual(zincrby, 2)
def test_hll_commands(self):
class Test(redpipe.HyperLogLog):
keyspace = 'T'
with redpipe.autoexec() as pipe:
t = Test(pipe)
pfadd = t.pfadd('1', 'a', 'b', 'c')
t.pfadd('1', 'a', 'b', 'c')
t.pfadd('1', 'd')
pfcount = t.pfcount('1')
self.assertEqual(pfadd, 1)
self.assertEqual(pfcount, 4)
@unittest.skipIf(rediscluster is None, 'rediscluster package failed to import')
class StrictRedisClusterTestCase(RedisClusterTestCase):
@classmethod
def setUpClass(cls):
cls.c = SingleNodeRedisCluster(strict=True)
cls.r = cls.c.client
redpipe.connect_redis(cls.r)
@classmethod
def tearDownClass(cls):
cls.r = None
cls.c.shutdown()
cls.c = None
redpipe.reset()
class StrictStringTestCase(BaseTestCase):
class Data(redpipe.String):
keyspace = 'STRING'
def super_get(self, key):
f = redpipe.Future()
with self.super_pipe as pipe:
res = self.get(key)
def cb():
f.set(res.result)
pipe.on_execute(cb)
return f
| |
TODO: verify that this was indeed wrong
# w = self.node_weights
# indicator matrix that i,j are not neighboured or equal
not_adjacent_or_equal = (1 - A - np.identity(N)).astype('int8')
if mpi.available:
parts = max(1, int(np.ceil(min((mpi.size-1) * 10.0,
0.1 * N))))
step = int(np.ceil(1.0*N/(1.0*parts)))
if self.silence_level <= 0:
print (" parallelizing on " + str((mpi.size-1))
+ " slaves into " + str(parts) + " parts with "
+ str(step) + " nodes each...")
for idx in xrange(parts):
start_i = idx * step
end_i = min((idx+1)*step, N)
if start_i >= end_i:
break
this_A = A[start_i:end_i, :]
this_not_adjacent_or_equal = \
not_adjacent_or_equal[start_i:end_i, :]
mpi.submit_call(
"_cy_mpi_nsi_newman_betweenness",
(this_A.astype(int), V.astype(float), N,
w.astype(float),
this_not_adjacent_or_equal.astype(int), start_i,
end_i),
module="pyunicorn", id=idx)
# Retrieve results of all submited jobs
component_betweenness = np.zeros(N)
for idx in xrange(parts):
this_betweenness, start_i, end_i = mpi.get_result(idx)
component_betweenness[start_i:end_i] = this_betweenness
else:
component_betweenness, start_i, end_i = \
_cy_mpi_nsi_newman_betweenness(
A.astype(int), V.astype(float), N, w.astype(float),
not_adjacent_or_equal.astype(int), 0, N)
# Correction for the fact that we used only s,t not
# neighboured to i
if add_local_ends:
nsi_k = subnet.nsi_degree()
component_betweenness += (2.0 * w.sum() - nsi_k) * nsi_k
# Copy results into randomWalkBetweennessArray at the correct
# positions
for j in xrange(len(nodes)):
nsi_newman_betweenness[nodes[j]] = component_betweenness[j]
if self.silence_level <= 0:
print "...took", time.time()-t0, "seconds"
return nsi_newman_betweenness
#
# Efficiency measures
#
def global_efficiency(self, link_attribute=None):
"""
Return the global (weighted) efficiency. (see [Costa2007]_)
**Example:**
>>> r(Network.SmallTestNetwork().global_efficiency())
Calculating all shortest path lengths...
Calculating global (weighted) efficiency...
0.7111
:arg str link_attribute: Optional name of the link attribute to be used
as the links' length. If None, links have length 1. (Default: None)
:rtype: float
"""
if link_attribute == "topological":
print ("WARNING: link_attribute='topological' is deprecated.\n"
+ "Use link_attribute=None instead.")
link_attribute = None
path_lengths = self.path_lengths(link_attribute)
if self.silence_level <= 1:
print "Calculating global (weighted) efficiency..."
# Set path lengths on diagonal to infinity to avoid summing over those
# entries when calculating efficiency
np.fill_diagonal(path_lengths, np.inf)
# Calculate global efficiency
efficiency = (1/float(self.N * (self.N-1)) * (1/path_lengths).sum())
# Restore path lengths on diagonal to zero
np.fill_diagonal(path_lengths, 0)
return efficiency
@cached_const('nsi', 'global eff', 'n.s.i. global efficiency')
def nsi_global_efficiency(self):
"""
Return the n.s.i. global efficiency.
**Example:**
>>> r(Network.SmallTestNetwork().nsi_global_efficiency())
Calculating n.s.i. global efficiency...
Calculating all shortest path lengths...
0.7415
:rtype: float
"""
# TODO: check results of examples!
w = self.node_weights
# Set path lengths on diagonal to 1
nsi_dist = self.path_lengths() + np.identity(self.N)
return w.dot((1/nsi_dist).dot(w)) / self.total_node_weight**2
def distance_based_measures(self, replace_inf_by=None):
"""
Return a dictionary of local and global measures that are based on
shortest path lengths.
This is useful for large graphs for which the matrix of all shortest
path lengths cannot be stored.
EXPERIMENTAL!
:type replace_inf_by: float/inf/None
:arg replace_inf_by: If None, the number of nodes is used.
(Default: None)
:rtype: dictionary with keys "closeness", "harmonic_closeness",
"exponential_closeness", "average_path_length",
"global_efficiency", "nsi_closeness", "nsi_harmonic_closeness",
"nsi_exponential_closeness", "nsi_average_path_length",
"nsi_global_efficiency"
"""
N, w, W = self.N, self.node_weights, self.total_node_weight
if replace_inf_by is None:
replace_inf_by = N
closeness = np.zeros(N)
harmonic_closeness = np.zeros(N)
exponential_closeness = np.zeros(N)
average_path_length = 0
nsi_closeness = np.zeros(N)
nsi_harmonic_closeness = np.zeros(N)
nsi_exponential_closeness = np.zeros(N)
nsi_average_path_length = 0
for i in range(N):
if self.silence_level == 0:
print i
di = np.array(self.graph.shortest_paths(i), dtype=float).flatten()
di[np.where(di == np.inf)] = replace_inf_by
closeness[i] = 1.0 / di.sum()
average_path_length += di.sum()
di[i] = np.inf
harmonic_closeness[i] = (1.0/di).sum()
exponential_closeness[i] = (0.5**di).sum()
di[i] = 1
nsi_closeness[i] = 1.0 / (w*di).sum()
nsi_average_path_length += w[i] * (w*di).sum()
nsi_harmonic_closeness[i] = (w/di).sum()
nsi_exponential_closeness[i] = (w * 0.5**di).sum()
return {
"closeness": closeness * (N-1),
"harmonic_closeness": harmonic_closeness / (N-1),
"exponential_closeness": exponential_closeness / (N-1),
"average_path_length": average_path_length / N*(N-1),
"global_efficiency": harmonic_closeness.mean() / (N-1),
"nsi_closeness": nsi_closeness * W,
"nsi_harmonic_closeness": nsi_harmonic_closeness / W,
"nsi_exponential_closeness": nsi_exponential_closeness / W,
"nsi_average_path_length": nsi_average_path_length / W**2,
"nsi_global_efficiency": w.dot(nsi_harmonic_closeness) / W**2
}
#
# Vulnerability measures
#
def local_vulnerability(self, link_attribute=None):
"""
For each node, return its vulnerability. (see [Costa2007]_)
**Example:**
>>> r(Network.SmallTestNetwork().local_vulnerability())
Calculating all shortest path lengths...
Calculating global (weighted) efficiency...
Calculating (weighted) node vulnerabilities...
array([ 0.2969, 0.0625, -0.0313, -0.0078, 0.0977, -0.125 ])
:arg str link_attribute: Optional name of the link attribute to be used
as the links' length. If None, links have length 1. (Default: None)
:rtype: 1d numpy array [node] of floats
"""
if link_attribute == "topological":
print ("WARNING: link_attribute='topological' is deprecated.\n"
+ "Use link_attribute=None instead.")
link_attribute = None
vulnerability = np.zeros(self.N)
# Calculate global efficiency of complete network E
global_efficiency = self.global_efficiency(link_attribute)
if self.silence_level <= 1:
print "Calculating (weighted) node vulnerabilities..."
# Initialize progress bar
if self.silence_level <= 1:
progress = progressbar.ProgressBar(maxval=self.N).start()
for i in xrange(self.N):
# Update progress bar every 10 steps
if self.silence_level <= 1:
if (i % 10) == 0:
progress.update(i)
# Remove vertex i from graph
graph = self.graph - i
# Generate Network object from this reduced graph
network = Network.FromIGraph(graph, 2)
# Calculate global topological efficiency E_i after removal of
# vertex i
node_efficiency = network.global_efficiency(link_attribute)
# Calculate local topological vulnerability of vertex i
vulnerability[i] = ((global_efficiency - node_efficiency)
/ global_efficiency)
# Clean up
del graph, network
# Terminate progress bar
if self.silence_level <= 1:
progress.finish()
return vulnerability
#
# Community measures
#
@cached_const('base', 'coreness', 'coreness')
def coreness(self):
"""
For each node, return its coreness.
The k-core of a network is a maximal subnetwork in which each node has
at least degree k. (Degree here means the degree in the subnetwork of
course). The coreness of a node is k if it is a member of the k-core
but not a member of the (k+1)-core.
**Example:**
>>> Network.SmallTestNetwork().coreness()
Calculating coreness...
array([2, 2, 2, 2, 2, 1])
:rtype: 1d numpy array [node] of floats
"""
return np.array(self.graph.coreness())
#
# Synchronizability measures
#
@cached_const('base', 'msf sync',
'master stability function synchronizability')
def msf_synchronizability(self):
"""
Return the synchronizability in the master stability function
framework.
This is equal to the largest eigenvalue of the graph Laplacian divided
by the smallest non-zero eigenvalue. A smaller value indicates higher
synchronizability and vice versa. This function makes sense for
undirected climate networks (with symmetric laplacian matrix).
For directed networks, the undirected laplacian matrix is used.
(see [Pecora1998]_)
.. note::
Only defined for undirected networks.
**Example:**
>>> r(Network.SmallTestNetwork().msf_synchronizability())
Calculating master stability function synchronizability...
6.7784
:rtype: float
"""
# TODO: use sparse version to speed up!
# Get undirected graph laplacian
laplacian = self.laplacian()
# Get eigenvalues of laplacian
eigenvalues = np.real(linalg.eigvals(laplacian))
# Sort eigenvalues in ascending order
eigenvalues.sort()
# Get smallest non-zero eigenvalue (Fiedler value)
i = 0
fiedler_value = 0
# The limited accuracy of eigenvalue calculation forces the use of
# some threshold, below which eigenvalues are considered to be zero
accuracy = 10**(-10)
while (eigenvalues[i] <= accuracy) and (i < self.N - 1):
fiedler_value = eigenvalues[i+1]
i += 1
# Calculate synchronizability R
R = eigenvalues[-1] / fiedler_value
return R
#
# Distance measures between two graphs
#
def hamming_distance_from(self, other_network):
"""
Return the normalized hamming distance between this and another
network.
This is the percentage of links that have to be changed to transform
this network into the other. Hamming distance is only defined for
networks with an equal number of nodes.
:rtype: float between 0 and 1
"""
# Get own adjacency matrix
A = self.adjacency
# Get the other graph's adjacency matrix
B = other_network.adjacency
# Check if the graphs have the same number of vertices
if self.N == other_network.N:
# Calculate the hamming distance
hamming = (A != B).sum()
# Return the normalized hamming distance
return hamming / float(self.N * (self.N - 1))
else:
raise NetworkError(
"Only defined for networks with same number of nodes.")
def spreading(self, alpha=None):
"""
For each node, return its "spreading" value.
.. note::
This is still EXPERIMENTAL!
:rtype: 1d numpy array [node] of floats
"""
if alpha is None:
alpha = 1.0 / self.degree().mean()
return matfuncs.expm2(
np.log(2.0) * (alpha * self.adjacency -
np.identity(self.N))).sum(axis=0).flatten()
def nsi_spreading(self, alpha=None):
"""
For each node, return its n.s.i. "spreading" value.
.. note::
This is still EXPERIMENTAL!
:rtype: 1d numpy array [node] of floats
"""
N, | |
<filename>tests/sat/Models/c710.160.UNSAT.dimacs.test.py
input = """
c num blocks = 1
c num vars = 160
c minblockids[0] = 1
c maxblockids[0] = 160
p cnf 160 710
-111 68 67 0
-144 28 -44 0
-152 -121 -142 0
-54 -39 -89 0
103 -114 -137 0
60 -125 127 0
-145 116 106 0
75 118 -99 0
-152 -63 95 0
-123 146 134 0
-103 -133 -77 0
-51 -65 -64 0
-23 117 -144 0
74 4 -49 0
2 -86 129 0
-77 54 -5 0
144 -41 -113 0
152 -34 -80 0
-39 32 -127 0
11 83 -88 0
139 96 -157 0
-82 10 126 0
-48 -20 -145 0
135 -88 128 0
-127 -122 -90 0
-5 142 -106 0
15 -32 -35 0
-76 14 -24 0
-12 -27 -105 0
-106 -38 -21 0
-119 73 135 0
103 -83 -94 0
-27 -52 -11 0
9 -142 121 0
-75 -57 105 0
-158 21 99 0
140 -148 -93 0
12 -143 -6 0
-42 -87 150 0
-148 160 -131 0
158 53 119 0
-131 97 154 0
-110 -62 -99 0
-153 -5 -19 0
-23 -10 -74 0
-91 -62 29 0
-72 49 -88 0
3 144 -137 0
-111 -45 -95 0
66 51 69 0
-31 -116 -127 0
148 150 62 0
45 106 -32 0
-133 -25 -69 0
144 72 99 0
-150 -74 -33 0
-28 54 -83 0
141 113 93 0
-55 74 -140 0
129 67 -145 0
83 -33 85 0
-40 -149 -122 0
44 94 -57 0
142 -61 -139 0
30 83 64 0
35 -111 130 0
-136 25 -60 0
-63 79 -5 0
117 131 -90 0
-115 -100 152 0
-83 112 64 0
-78 -90 45 0
139 -58 75 0
-89 46 6 0
-25 -126 148 0
130 44 34 0
-51 43 155 0
60 121 -120 0
13 -78 -81 0
-64 112 -58 0
-22 111 -97 0
-29 -9 -151 0
-101 18 -33 0
-121 -143 19 0
-128 24 75 0
-32 -83 -93 0
86 -134 -30 0
114 29 125 0
3 -145 -125 0
-47 145 -27 0
-100 152 20 0
36 -108 -92 0
-57 -148 -20 0
98 129 -27 0
83 -152 -89 0
145 -104 139 0
58 44 -115 0
72 -152 -87 0
-84 141 94 0
74 -91 83 0
98 137 -15 0
60 85 -142 0
-135 -31 -81 0
-72 -25 122 0
-133 158 -35 0
38 -125 -67 0
38 -17 -136 0
-90 -100 -137 0
105 100 -114 0
84 -34 127 0
19 -118 -7 0
154 79 -58 0
155 -75 -18 0
-70 92 -154 0
-113 -25 -71 0
-48 -53 -124 0
-53 135 60 0
-116 -55 29 0
143 72 116 0
106 -9 -100 0
83 -19 131 0
-42 -81 -54 0
67 139 -144 0
-82 126 -131 0
21 -80 144 0
105 -79 69 0
99 -91 -114 0
96 20 25 0
-136 86 -146 0
-33 -103 71 0
96 -111 73 0
113 -107 -81 0
108 -27 -5 0
112 151 60 0
-84 104 73 0
123 107 16 0
86 -65 140 0
47 9 -55 0
109 73 114 0
113 -16 89 0
103 34 -21 0
-34 57 118 0
-7 -31 107 0
-107 135 93 0
-26 -50 139 0
79 63 -20 0
135 2 105 0
-16 -156 -59 0
-93 -51 -121 0
137 122 -97 0
-86 17 135 0
138 156 131 0
-160 -113 -11 0
63 -42 154 0
-102 75 116 0
-158 -104 -145 0
4 -150 94 0
51 71 -100 0
-124 -123 26 0
146 -140 -108 0
-105 37 -42 0
64 -88 -125 0
7 63 77 0
147 -151 -102 0
-94 -103 -5 0
20 124 -7 0
110 -75 -84 0
143 43 -72 0
-107 -155 92 0
-98 62 -79 0
44 62 -114 0
-69 79 -130 0
-101 21 87 0
-155 51 -112 0
-26 133 157 0
-26 100 -53 0
-160 -14 -79 0
-89 110 14 0
130 -84 129 0
-138 75 83 0
158 -33 -157 0
130 -53 -92 0
96 -76 -126 0
118 -158 -33 0
-50 141 -38 0
130 135 -4 0
97 -140 81 0
108 126 -9 0
-141 9 151 0
-88 9 36 0
100 -62 31 0
111 155 -108 0
142 -135 -155 0
106 104 116 0
119 143 8 0
6 -3 8 0
140 -50 154 0
73 68 -89 0
88 -52 -43 0
-37 -159 158 0
154 32 110 0
111 121 -134 0
112 125 -113 0
45 44 -99 0
-25 -65 -85 0
50 92 105 0
-20 -27 109 0
5 -50 -121 0
-85 31 -60 0
16 35 39 0
143 -92 -8 0
-113 -125 48 0
88 -72 -55 0
104 105 99 0
-156 -104 92 0
57 -3 -125 0
47 60 32 0
-98 -1 136 0
-18 125 122 0
-55 -82 136 0
144 -143 -113 0
-4 138 7 0
-100 97 -48 0
-19 -47 -138 0
-148 -90 -114 0
76 34 -133 0
-128 -7 112 0
111 82 -159 0
-96 125 123 0
-125 61 52 0
31 32 110 0
73 -51 -11 0
-153 -74 2 0
-78 -103 108 0
-20 -49 -151 0
-127 44 5 0
58 -6 -26 0
131 152 58 0
14 -41 -130 0
-68 -132 -113 0
-63 60 -17 0
-1 99 25 0
-140 141 -101 0
50 60 -84 0
22 -27 91 0
140 -42 13 0
126 61 -108 0
-76 -92 159 0
138 -56 -108 0
27 -17 -70 0
-8 -146 129 0
1 -23 -136 0
-104 -79 -97 0
-157 137 57 0
-132 109 -1 0
-105 153 18 0
-20 -94 -9 0
-132 13 157 0
109 21 -61 0
32 154 -93 0
-13 137 58 0
138 110 -38 0
158 133 128 0
-114 -103 81 0
71 -37 -88 0
-118 120 -114 0
-137 85 -98 0
-17 -147 -7 0
111 89 -132 0
63 130 -149 0
-129 -91 33 0
90 102 79 0
-76 25 -66 0
27 90 -128 0
49 -85 -128 0
-90 -13 28 0
-55 141 108 0
89 101 65 0
154 145 -93 0
75 61 157 0
23 134 108 0
105 133 -1 0
-122 -72 -8 0
131 156 84 0
114 121 132 0
57 -2 -15 0
-122 160 124 0
37 -67 -119 0
28 -39 -57 0
-131 -7 -84 0
-159 52 135 0
-15 -135 47 0
121 20 146 0
-132 136 119 0
-48 -108 -99 0
-126 -132 -128 0
-15 -10 -5 0
56 -156 -80 0
-79 -94 -47 0
-104 -8 -6 0
5 -56 -85 0
-83 -117 45 0
-113 -63 152 0
-17 4 -19 0
-8 17 -36 0
-125 -100 106 0
7 72 -19 0
-130 -156 148 0
-67 -109 138 0
-74 133 -87 0
-34 51 15 0
89 -141 -59 0
56 85 -108 0
29 14 -154 0
149 57 110 0
39 -112 134 0
-72 -157 13 0
123 -58 -140 0
-115 -159 34 0
-123 -147 -82 0
146 -59 126 0
-132 102 24 0
117 -113 -112 0
-141 31 -13 0
62 152 -118 0
125 -69 137 0
-157 -100 -44 0
-52 122 -19 0
-86 41 66 0
32 -33 -146 0
-61 -3 69 0
146 -112 77 0
-101 -104 140 0
38 -30 -47 0
52 104 32 0
-74 76 118 0
41 19 126 0
-103 46 -96 0
-55 134 -127 0
-67 -104 153 0
-54 -49 -116 0
30 110 -135 0
-9 -157 141 0
154 146 111 0
-24 159 -136 0
26 129 1 0
-73 154 40 0
-46 -98 -33 0
-75 -62 148 0
39 54 -117 0
-108 -66 -104 0
97 -6 -61 0
30 110 -129 0
92 65 108 0
4 -26 -46 0
-5 45 134 0
-81 -74 144 0
-65 -134 -14 0
145 -90 -160 0
-107 -97 61 0
135 -131 70 0
-101 -1 76 0
-157 -31 -140 0
-99 85 -160 0
150 -141 124 0
128 153 66 0
96 113 -130 0
-115 -55 156 0
-45 150 -41 0
-35 -145 -72 0
-123 -54 -146 0
-129 92 -96 0
-145 -4 160 0
51 27 -121 0
-79 157 -41 0
111 39 41 0
-98 -138 -4 0
-103 -72 160 0
36 -90 135 0
-60 -104 107 0
112 77 98 0
73 47 155 0
21 8 -81 0
-118 -101 71 0
29 -160 -121 0
28 -149 52 0
-74 -103 -114 0
-135 100 76 0
80 -81 -112 0
157 -28 6 0
-61 -118 80 0
108 -100 38 0
-43 -160 -28 0
49 -142 42 0
24 142 2 0
-78 -87 19 0
-79 115 -150 0
152 -31 -74 0
-152 -38 -50 0
-148 2 -123 0
-53 46 97 0
-37 81 34 0
89 -36 -18 0
128 10 -57 0
21 -33 -112 0
-20 -69 84 0
110 63 -113 0
-73 85 -1 0
-24 -68 98 0
133 49 65 0
116 -21 78 0
-127 10 51 0
123 -67 145 0
-106 -49 -25 0
-93 -94 139 0
18 -139 -82 0
-1 111 117 0
135 -93 -41 0
-94 19 135 0
96 87 -128 0
-129 | |
# coding: utf-8
# /*##########################################################################
# Copyright (C) 2016-2017 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ############################################################################*/
"""This module provides functions to read fabio images as an HDF5 file.
>>> import silx.io.fabioh5
>>> f = silx.io.fabioh5.File("foobar.edf")
.. note:: This module has a dependency on the `h5py <http://www.h5py.org/>`_
and `fabio <https://github.com/silx-kit/fabio>`_ libraries,
which are not a mandatory dependencies for `silx`. You might need
to install it if you don't already have it.
"""
import collections
import numpy
import numbers
import logging
_logger = logging.getLogger(__name__)
try:
from silx.third_party import six
except ImportError:
import six
try:
import fabio
except ImportError as e:
_logger.error("Module %s requires fabio", __name__)
raise e
try:
import h5py
except ImportError as e:
_logger.error("Module %s requires h5py", __name__)
raise e
class Node(object):
"""Main class for all fabioh5 classes. Help to manage a tree."""
def __init__(self, name, parent=None):
self.__parent = parent
self.__basename = name
@property
def h5py_class(self):
"""Returns the h5py classes which is mimicked by this class. It can be
one of `h5py.File, h5py.Group` or `h5py.Dataset`
:rtype: Class
"""
raise NotImplementedError()
@property
def parent(self):
"""Returns the parent of the node.
:rtype: Node
"""
return self.__parent
@property
def file(self):
"""Returns the file node of this node.
:rtype: Node
"""
node = self
while node.__parent is not None:
node = node.__parent
if isinstance(node, File):
return node
else:
return None
def _set_parent(self, parent):
"""Set the parent of this node.
It do not update the parent object.
:param Node parent: New parent for this node
"""
self.__parent = parent
@property
def attrs(self):
"""Returns HDF5 attributes of this node.
:rtype: dict
"""
return {}
@property
def name(self):
"""Returns the HDF5 name of this node.
"""
if self.__parent is None:
return "/"
if self.__parent.name == "/":
return "/" + self.basename
return self.__parent.name + "/" + self.basename
@property
def basename(self):
"""Returns the HDF5 basename of this node.
"""
return self.__basename
class Dataset(Node):
"""Class which handle a numpy data as a mimic of a h5py.Dataset.
"""
def __init__(self, name, data, parent=None, attrs=None):
self.__data = data
Node.__init__(self, name, parent)
if attrs is None:
self.__attrs = {}
else:
self.__attrs = attrs
def _set_data(self, data):
"""Set the data exposed by the dataset.
It have to be called only one time before the data is used. It should
not be edited after use.
:param numpy.ndarray data: Data associated to the dataset
"""
self.__data = data
def _get_data(self):
"""Returns the exposed data
:rtype: numpy.ndarray
"""
return self.__data
@property
def attrs(self):
"""Returns HDF5 attributes of this node.
:rtype: dict
"""
return self.__attrs
@property
def h5py_class(self):
"""Returns the h5py classes which is mimicked by this class. It can be
one of `h5py.File, h5py.Group` or `h5py.Dataset`
:rtype: Class
"""
return h5py.Dataset
@property
def dtype(self):
"""Returns the numpy datatype exposed by this dataset.
:rtype: numpy.dtype
"""
return self._get_data().dtype
@property
def shape(self):
"""Returns the shape of the data exposed by this dataset.
:rtype: tuple
"""
if isinstance(self._get_data(), numpy.ndarray):
return self._get_data().shape
else:
return tuple()
@property
def size(self):
"""Returns the size of the data exposed by this dataset.
:rtype: int
"""
if isinstance(self._get_data(), numpy.ndarray):
return self._get_data().size
else:
# It is returned as float64 1.0 by h5py
return numpy.float64(1.0)
def __len__(self):
"""Returns the size of the data exposed by this dataset.
:rtype: int
"""
if isinstance(self._get_data(), numpy.ndarray):
return len(self._get_data())
else:
# It is returned as float64 1.0 by h5py
raise TypeError("Attempt to take len() of scalar dataset")
def __getitem__(self, item):
"""Returns the slice of the data exposed by this dataset.
:rtype: numpy.ndarray
"""
if not isinstance(self._get_data(), numpy.ndarray):
if item == Ellipsis:
return numpy.array(self._get_data())
elif item == tuple():
return self._get_data()
else:
raise ValueError("Scalar can only be reached with an ellipsis or an empty tuple")
return self._get_data().__getitem__(item)
def __str__(self):
basename = self.name.split("/")[-1]
return '<FabIO dataset "%s": shape %s, type "%s">' % \
(basename, self.shape, self.dtype.str)
def __getslice__(self, i, j):
"""Returns the slice of the data exposed by this dataset.
Deprecated but still in use for python 2.7
:rtype: numpy.ndarray
"""
return self.__getitem__(slice(i, j, None))
@property
def value(self):
"""Returns the data exposed by this dataset.
Deprecated by h5py. It is prefered to use indexing `[()]`.
:rtype: numpy.ndarray
"""
return self._get_data()
@property
def compression(self):
"""Returns compression as provided by `h5py.Dataset`.
There is no compression."""
return None
@property
def compression_opts(self):
"""Returns compression options as provided by `h5py.Dataset`.
There is no compression."""
return None
@property
def chunks(self):
"""Returns chunks as provided by `h5py.Dataset`.
There is no chunks."""
return None
class LazyLoadableDataset(Dataset):
"""Abstract dataset which provide a lazy loading of the data.
The class have to be inherited and the :meth:`_create_data` have to be
implemented to return the numpy data exposed by the dataset. This factory
is only called ones, when the data is needed.
"""
def __init__(self, name, parent=None, attrs=None):
super(LazyLoadableDataset, self).__init__(name, None, parent, attrs=attrs)
self.__is_initialized = False
def _create_data(self):
"""
Factory to create the data exposed by the dataset when it is needed.
It have to be implemented to work.
:rtype: numpy.ndarray
"""
raise NotImplementedError()
def _get_data(self):
"""Returns the data exposed by the dataset.
Overwrite Dataset method :meth:`_get_data` to implement the lazy
loading feature.
:rtype: numpy.ndarray
"""
if not self.__is_initialized:
data = self._create_data()
self._set_data(data)
self.__is_initialized = True
return super(LazyLoadableDataset, self)._get_data()
class Group(Node):
"""Class which mimic a `h5py.Group`."""
def __init__(self, name, parent=None, attrs=None):
Node.__init__(self, name, parent)
self.__items = collections.OrderedDict()
if attrs is None:
attrs = {}
self.__attrs = attrs
def _get_items(self):
"""Returns the child items as a name-node dictionary.
:rtype: dict
"""
return self.__items
def add_node(self, node):
"""Add a child to this group.
:param Node node: Child to add to this group
"""
self._get_items()[node.basename] = node
node._set_parent(self)
@property
def h5py_class(self):
"""Returns the h5py classes which is mimicked by this class.
It returns `h5py.Group`
:rtype: Class
"""
return h5py.Group
@property
def attrs(self):
"""Returns HDF5 attributes of this node.
:rtype: dict
"""
return self.__attrs
def items(self):
"""Returns items iterator containing name-node mapping.
:rtype: iterator
"""
return self._get_items().items()
def get(self, name, default=None, getclass=False, getlink=False):
""" Retrieve an item or other information.
If getlink only is true, the returned value is always HardLink
cause this implementation do not use links. Like the original
implementation.
:param str name: name of the item
:param object default: default value returned if the name is not found
:param bool getclass: if true, the returned object is the class of the object found
:param bool getlink: if true, links object are returned instead of the target
:return: An object, else None
:rtype: object
"""
if name not in self._get_items():
return default
if getlink:
node = h5py.HardLink()
else:
node = self._get_items()[name]
if getclass:
obj = node.h5py_class
else:
obj = node
return obj
def __len__(self):
"""Returns the number of child contained in this group.
:rtype: int
"""
return len(self._get_items())
def __iter__(self):
"""Iterate over member names"""
for x in self._get_items().__iter__():
yield x
def __getitem__(self, name):
"""Return a child from is name.
:param name str: name of a member or a path throug members using '/'
separator. A '/' as a prefix access to the root item of the tree.
:rtype: Node
"""
if name is None or name == "":
raise | |
)
generate_test_file(test_file)
result = runner.invoke(
cli,
[
"--cache",
cache_minutes,
"validate",
"shcd-cabling",
"--csm",
csm,
"--architecture",
architecture,
"--ips",
ips,
"--username",
username,
"--password",
password,
"--shcd",
test_file,
"--tabs",
tabs,
"--corners",
corners_too_narrow,
],
)
assert result.exit_code == 1
assert "Not enough columns exist." in str(result.output)
remove_switch_from_cache(ip)
@patch("canu.report.switch.cabling.cabling.switch_vendor")
@patch("canu.report.switch.cabling.cabling.netmiko_command")
@responses.activate
def test_validate_shcd_cabling_corners_too_high(netmiko_command, switch_vendor):
"""Test that the `canu validate shcd-cabling` command fails on empty cells."""
corners_too_high = "H16,S48"
with runner.isolated_filesystem():
switch_vendor.return_value = "aruba"
netmiko_command.return_value = mac_address_table
responses.add(
responses.POST,
f"https://{ip}/rest/v10.04/login",
)
responses.add(
responses.GET,
f"https://{ip}/rest/v10.04/system?attributes=platform_name,hostname,system_mac",
json=switch_info1,
)
responses.add(
responses.GET,
f"https://{ip}/rest/v10.04/system/interfaces/*/lldp_neighbors?depth=2",
json=lldp_neighbors_json1,
)
responses.add(
responses.GET,
f"https://{ip}/rest/v10.04/system/vrfs/default/neighbors?depth=2",
json=arp_neighbors_json1,
)
responses.add(
responses.POST,
f"https://{ip}/rest/v10.04/logout",
)
generate_test_file(test_file)
result = runner.invoke(
cli,
[
"--cache",
cache_minutes,
"validate",
"shcd-cabling",
"--csm",
csm,
"--architecture",
architecture,
"--ips",
ips,
"--username",
username,
"--password",
password,
"--shcd",
test_file,
"--tabs",
tabs,
"--corners",
corners_too_high,
],
)
assert result.exit_code == 1
assert "On tab 25G_10G, header column Source not found." in str(result.output)
assert "On tab 25G_10G, the header is formatted incorrectly." in str(
result.output,
)
remove_switch_from_cache(ip)
@patch("canu.report.switch.cabling.cabling.switch_vendor")
@patch("canu.report.switch.cabling.cabling.netmiko_command")
@responses.activate
def test_validate_shcd_cabling_corners_bad_cell(netmiko_command, switch_vendor):
"""Test that the `canu validate shcd-cabling` command fails on bad cell."""
corners_bad_cell = "16,S48"
with runner.isolated_filesystem():
switch_vendor.return_value = "aruba"
netmiko_command.return_value = mac_address_table
responses.add(
responses.POST,
f"https://{ip}/rest/v10.04/login",
)
responses.add(
responses.GET,
f"https://{ip}/rest/v10.04/system?attributes=platform_name,hostname,system_mac",
json=switch_info1,
)
responses.add(
responses.GET,
f"https://{ip}/rest/v10.04/system/interfaces/*/lldp_neighbors?depth=2",
json=lldp_neighbors_json1,
)
responses.add(
responses.GET,
f"https://{ip}/rest/v10.04/system/vrfs/default/neighbors?depth=2",
json=arp_neighbors_json1,
)
responses.add(
responses.POST,
f"https://{ip}/rest/v10.04/logout",
)
generate_test_file(test_file)
result = runner.invoke(
cli,
[
"--cache",
cache_minutes,
"validate",
"shcd-cabling",
"--csm",
csm,
"--architecture",
architecture,
"--ips",
ips,
"--username",
username,
"--password",
password,
"--shcd",
test_file,
"--tabs",
tabs,
"--corners",
corners_bad_cell,
],
)
assert result.exit_code == 1
assert "Bad range of cells entered for tab 25G_10G." in str(result.output)
remove_switch_from_cache(ip)
def test_validate_shcd_cabling_not_enough_corners():
"""Test that the `canu validate shcd-cabling` command fails on not enough corners."""
not_enough_corners = "H16"
with runner.isolated_filesystem():
generate_test_file(test_file)
result = runner.invoke(
cli,
[
"--cache",
cache_minutes,
"validate",
"shcd-cabling",
"--csm",
csm,
"--architecture",
architecture,
"--ips",
ips,
"--username",
username,
"--password",
password,
"--shcd",
test_file,
"--tabs",
tabs,
"--corners",
not_enough_corners,
],
)
assert result.exit_code == 0
assert "There were 1 corners entered, but there should be 2." in str(
result.output,
)
remove_switch_from_cache(ip)
@patch("canu.report.switch.cabling.cabling.switch_vendor")
@patch("canu.report.switch.cabling.cabling.netmiko_command")
@responses.activate
def test_validate_shcd_cabling_bad_headers(netmiko_command, switch_vendor):
"""Test that the `canu validate shcd-cabling` command fails on bad headers."""
bad_header_tab = "Bad_Headers"
with runner.isolated_filesystem():
switch_vendor.return_value = "aruba"
netmiko_command.return_value = mac_address_table
responses.add(
responses.POST,
f"https://{ip}/rest/v10.04/login",
)
responses.add(
responses.GET,
f"https://{ip}/rest/v10.04/system?attributes=platform_name,hostname,system_mac",
json=switch_info1,
)
responses.add(
responses.GET,
f"https://{ip}/rest/v10.04/system/interfaces/*/lldp_neighbors?depth=2",
json=lldp_neighbors_json1,
)
responses.add(
responses.GET,
f"https://{ip}/rest/v10.04/system/vrfs/default/neighbors?depth=2",
json=arp_neighbors_json1,
)
responses.add(
responses.POST,
f"https://{ip}/rest/v10.04/logout",
)
generate_test_file(test_file)
result = runner.invoke(
cli,
[
"--cache",
cache_minutes,
"validate",
"shcd-cabling",
"--csm",
csm,
"--architecture",
architecture,
"--ips",
ips,
"--username",
username,
"--password",
password,
"--shcd",
test_file,
"--tabs",
bad_header_tab,
"--corners",
corners,
],
)
assert result.exit_code == 1
assert "On tab Bad_Headers, header column Slot not found" in str(result.output)
remove_switch_from_cache(ip)
@patch("canu.report.switch.cabling.cabling.switch_vendor")
@patch("canu.report.switch.cabling.cabling.netmiko_command")
@responses.activate
def test_validate_shcd_cabling_bad_architectural_definition(
netmiko_command,
switch_vendor,
):
"""Test that the `canu validate shcd-cabling` command fails with bad connections."""
corners_bad_row = "I14,S31"
with runner.isolated_filesystem():
switch_vendor.return_value = "aruba"
netmiko_command.return_value = mac_address_table
responses.add(
responses.POST,
f"https://{ip}/rest/v10.04/login",
)
responses.add(
responses.GET,
f"https://{ip}/rest/v10.04/system?attributes=platform_name,hostname,system_mac",
json=switch_info1,
)
responses.add(
responses.GET,
f"https://{ip}/rest/v10.04/system/interfaces/*/lldp_neighbors?depth=2",
json=lldp_neighbors_json1,
)
responses.add(
responses.GET,
f"https://{ip}/rest/v10.04/system/vrfs/default/neighbors?depth=2",
json=arp_neighbors_json1,
)
responses.add(
responses.POST,
f"https://{ip}/rest/v10.04/logout",
)
generate_test_file(test_file)
result = runner.invoke(
cli,
[
"--cache",
cache_minutes,
"validate",
"shcd-cabling",
"--csm",
csm,
"--architecture",
architecture,
"--ips",
ips,
"--username",
username,
"--password",
password,
"--shcd",
test_file,
"--tabs",
tabs,
"--corners",
corners_bad_row,
],
)
assert result.exit_code == 1
assert "No architectural definition found to allow connection between" in str(
result.output,
)
remove_switch_from_cache(ip)
# Switch 1
switch_info1 = {
"hostname": "sw-spine01",
"platform_name": "X86-64",
"system_mac": "aa:aa:aa:aa:aa:aa",
}
lldp_neighbors_json1 = {
"1%2F1%2F1": {
"bb:bb:bb:bb:bb:bb,1/1/1": {
"chassis_id": "bb:bb:bb:bb:bb:bb",
"mac_addr": "bb:bb:bb:bb:bb:cc",
"neighbor_info": {
"chassis_description": "Test switch description",
"chassis_name": "sw-spine02",
"port_description": "",
"port_id_subtype": "if_name",
},
"port_id": "1/1/1",
},
},
"1%2F1%2F2": {
"aa:bb:cc:88:00:00,1/1/2": {
"chassis_id": "aa:bb:cc:88:00:00",
"mac_addr": "aa:bb:cc:88:00:03",
"neighbor_info": {
"chassis_description": "Test switch2 description",
"chassis_name": "sw-spine02",
"port_description": "1/1/2",
"port_id_subtype": "if_name",
},
"port_id": "1/1/2",
},
},
"1%2F1%2F3": {
"00:00:00:00:00:00,00:00:00:00:00:00": {
"chassis_id": "00:00:00:00:00:00",
"mac_addr": "00:00:00:00:00:00",
"neighbor_info": {
"chassis_description": "",
"chassis_name": "",
"port_description": "",
"port_id_subtype": "link_local_addr",
},
"port_id": "00:00:00:00:00:00",
},
"11:11:11:11:11:11,11:11:11:11:11:11": {
"chassis_id": "11:11:11:11:11:11",
"mac_addr": "11:11:11:11:11:11",
"neighbor_info": {
"chassis_description": "",
"chassis_name": "",
"port_description": "",
"port_id_subtype": "link_local_addr",
},
"port_id": "11:11:11:11:11:11",
},
},
"1%2F1%2F4": {
"aa:aa:aa:aa:aa:aa,aa:aa:aa:aa:aa:aa": {
"chassis_id": "aa:aa:aa:aa:aa:aa",
"mac_addr": "aa:aa:aa:aa:aa:aa",
"neighbor_info": {
"chassis_description": "NCN description",
"chassis_name": "ncn-m88",
"port_description": "mgmt1",
"port_id_subtype": "link_local_addr",
},
"port_id": "aa:aa:aa:aa:aa:aa",
},
},
"1%2F1%2F5": {
"99:99:99:99:99:99,1/1/5": {
"chassis_id": "99:99:99:99:99:99",
"mac_addr": "99:99:99:99:99:99",
"neighbor_info": {
"chassis_description": "sw-leaf-bmc-99",
"chassis_name": "sw-leaf-bmc99",
"port_description": "1/1/5",
"port_id_subtype": "if_name",
},
"port_id": "1/1/5",
},
},
}
arp_neighbors_json1 = {
"192.168.1.2,vlan1": {
"mac": "00:00:00:00:00:00",
"ip_address": "192.168.1.2",
"port": {"vlan1": "/rest/v10.04/system/interfaces/vlan1"},
},
"192.168.1.3,vlan2": {
"mac": "11:11:11:11:11:11",
"ip_address": "192.168.1.3",
"port": {"vlan2": "/rest/v10.04/system/interfaces/vlan2"},
},
"192.168.2.2,vlan3": {
"mac": "00:00:00:00:00:00",
"ip_address": "192.168.2.2",
"port": {"vlan3": "/rest/v10.04/system/interfaces/vlan3"},
},
}
# Switch 2
switch_info2 = {
"hostname": "sw-leaf02",
"platform_name": "X86-64",
"system_mac": "bb:bb:bb:bb:bb:bb",
}
lldp_neighbors_json2 = {
"1%2F1%2F1": {
"aa:aa:aa:aa:aa:aa,1/1/1": {
"chassis_id": "aa:aa:aa:aa:aa:aa",
"mac_addr": "aa:aa:aa:aa:aa:bb",
"neighbor_info": {
"chassis_description": "Test switch description",
"chassis_name": "sw-leaf01",
"port_description": "",
"port_id_subtype": "if_name",
},
"port_id": "1/1/1",
},
},
}
arp_neighbors_json2 = {
"192.168.1.2,vlan1": {
"mac": "00:00:00:00:00:00",
"ip_address": "192.168.1.2",
"port": {"vlan1": "/rest/v10.04/system/interfaces/vlan1"},
},
"192.168.1.3,vlan2": {
"mac": "11:11:11:11:11:11",
"ip_address": "192.168.1.3",
"port": {"vlan2": "/rest/v10.04/system/interfaces/vlan2"},
},
"192.168.2.2,vlan3": {
"mac": "00:00:00:00:00:00",
"ip_address": "192.168.2.2",
"port": {"vlan3": "/rest/v10.04/system/interfaces/vlan3"},
},
}
def generate_test_file(file_name):
"""Generate xlsx sheet for testing."""
wb = Workbook()
test_file = file_name
ws1 = wb.active
ws1.title = "25G_10G"
ws1["I14"] = "Source"
ws1["J14"] = "Rack"
ws1["K14"] = "Location"
ws1["L14"] = "Slot"
# None
ws1["N14"] = "Port"
ws1["O14"] = "Destination"
ws1["P14"] = "Rack"
ws1["Q14"] = "Location"
# None
ws1["S14"] = "Port"
test_data = [
[
"sw-25g01",
"x3000",
"u12",
"",
"-",
"1",
"sw-25g02",
"x3000",
"u13",
"-",
"1",
],
[
"sw-25g01",
"x3000",
"u12",
"",
"-",
"2",
"sw-25g02",
"x3000",
"u13",
"-",
"2",
],
[
"sw-25g01",
"x3000",
"u12",
"",
"-",
"3",
"junk",
"x3000",
"u13",
"-",
"2",
],
[
"sw-25g01",
"x3000",
"u12",
"",
"-",
"4",
"sw-smn99",
"x3000",
"u13",
"-",
"2",
],
[
"sw-25g01",
"x3000",
"u12",
"",
"-",
"5",
"junk",
"x3000",
"u13",
"-",
"2",
],
[
"sw-smn01",
"x3000",
"U14",
"",
"-",
"49",
"sw-25g01",
"x3000",
"u12",
"-",
"48",
],
[
"sw-smn01",
"x3000",
"U14",
"",
"",
"50",
"sw-25g02",
"x3000",
"u13",
"-",
"48",
],
[
"sw-25g01",
"x3000",
"u12",
"",
"-",
"47",
"sw-25g02",
"x3000",
"u13",
"-",
"47",
],
[
"uan01",
"x3000",
"u19",
"ocp",
"-",
"1",
"sw-25g01",
"x3000",
"u12",
"-",
"16",
],
[
"uan01",
"x3000",
"u19",
"ocp",
"-",
"2",
"sw-25g01",
"x3000",
"u12",
"-",
"17",
],
[
"uan01",
"x3000",
"u19",
"pcie-slot1",
"-",
"1",
"sw-25g02",
"x3000",
"u13",
"-",
"16",
],
[
"uan01",
"x3000",
"u19",
"pcie-slot1",
"-",
"2",
"sw-25g02",
"x3000",
"u13",
"-",
"17",
],
[
"sn03",
"x3000",
"u09",
"ocp",
"-",
"1",
"sw-25g01",
"x3000",
"u12",
"-",
"15",
],
[
"wn03",
"x3000",
"u06",
"ocp",
"-",
"1",
"sw-25g01",
"x3000",
"u12",
"-",
"9",
],
[
"wn03",
"x3000",
"u06",
"ocp",
"-",
"2",
"sw-25g02",
"x3000",
"u13",
"-",
"9",
],
[
"CAN switch",
"cfcanb4s1",
"",
"",
"-",
"9",
"sw-25g01",
"x3000",
"u12",
"-",
"36",
],
# BAD ROW, do not include in normal run
[
"mn99",
"x3000",
"u12",
"",
"-",
"1",
"mn98",
"x3000",
"u13",
"-",
"1",
],
]
for row in range(0, 17):
for col in range(0, 11):
ws1.cell(column=col + 9, row=row + 15, value=f"{test_data[row][col]}")
# Tab 2 "Bad_Headers"
ws2 = wb.create_sheet(title="Bad_Headers")
ws2["I14"] = "Source"
ws2["J14"] = "Rack"
ws2["K14"] = "Location"
# Missing Header
# None
ws2["M14"] = "Port"
ws2["N14"] = "Destination"
ws2["O14"] = "Rack"
ws2["P14"] = "Location"
# None
ws2["R14"] = "Port"
# Tab 3 "More_connections" containing bad connections
ws3 = wb.create_sheet(title="More_connections")
ws3["I14"] = "Source"
ws3["J14"] = "Rack"
ws3["K14"] = "Location"
ws3["L14"] = "Slot"
# None
ws3["N14"] = "Port"
ws3["O14"] = "Destination"
ws3["P14"] = "Rack"
ws3["Q14"] = "Location"
# None
ws3["S14"] = "Port"
test_data3 = [
[
"sw-25g01",
"x3000",
"u12",
"",
"-",
"51",
"sw-25g02",
"x3000",
"u13",
"-",
"51",
],
[
"sw-25g01",
"x3000",
"u12",
"",
"-",
"52",
"sw-25g02",
"x3000",
"u13",
"-",
"52",
],
[
"sw-25g01",
"x3000",
"u12",
"",
"-",
"52",
"sw-25g02",
"x3000",
"u13",
"-",
"51",
],
[
"sw-cdu01",
"x3000",
"u12",
"",
"-",
"52",
"sw-smn99",
"x3000",
"u13",
"-",
"52",
],
[
"mn-99",
"x3000",
"u12",
"",
"-",
"52",
"sw-25g01",
"x3000",
"u13",
"-",
"52",
],
[
"mn-99",
"x3000",
"u12",
"",
"-",
"50",
"sw-25g02",
"x3000",
"u13",
"-",
"52",
],
[
"mn-99",
"x3000",
"u12",
"",
"-",
"51",
"sw-smn98",
"x3000",
"u13",
"-",
"52",
],
[
"mn-99",
"x3000",
"u12",
"",
"-",
"52",
"sw-smn99",
"x3000",
"u13",
"-",
"52",
],
[
"sw-100g01",
"x3000",
"u12",
"",
"-",
"52",
"sw-smn99",
"x3000",
"u13",
"-",
"52",
],
]
for row in range(0, 9):
for col in range(0, 11):
ws3.cell(column=col + 9, row=row + 15, value=f"{test_data3[row][col]}")
wb.save(filename=test_file)
mac_address_table = (
"MAC age-time : 300 seconds\n"
+ "Number of MAC addresses : 90\n"
+ "\n"
+ "MAC Address VLAN Type | |
<reponame>stolarczyk/peppy
"""
Build a Project object.
"""
import os
from collections import Mapping
from logging import getLogger
import pandas as pd
from attmap import PathExAttMap
from ubiquerg import is_url
from .const import *
from .exceptions import *
from .sample import Sample
from .utils import copy, load_yaml, make_abs_via_cfg, make_list
_LOGGER = getLogger(PKG_NAME)
@copy
class Project(PathExAttMap):
"""
A class to model a Project (collection of samples and metadata).
:param str | Mapping cfg: Project config file (YAML), or appropriate
key-value mapping of data to constitute project
:param str | Iterable[str] sample_table_index: name of the columns to set
the sample_table index to
:param str | Iterable[str] subsample_table_index: name of the columns to set
the subsample_table index to
:param str | Iterable[str] amendments: names of the amendments to activate
:param Iterable[str] amendments: amendments to use within configuration file
:Example:
.. code-block:: python
from peppy import Project
prj = Project("ngs")
samples = prj.samples
"""
def __init__(
self,
cfg=None,
amendments=None,
sample_table_index=None,
subsample_table_index=None,
defer_samples_creation=False,
):
_LOGGER.debug(
"Creating {}{}".format(
self.__class__.__name__, " from file {}".format(cfg) if cfg else ""
)
)
super(Project, self).__init__()
if isinstance(cfg, str):
self[CONFIG_FILE_KEY] = cfg
self.parse_config_file(cfg, amendments)
else:
self[CONFIG_FILE_KEY] = None
self._samples = []
self[SAMPLE_EDIT_FLAG_KEY] = False
self.st_index = sample_table_index or SAMPLE_NAME_ATTR
self.sst_index = subsample_table_index or [
SAMPLE_NAME_ATTR,
SUBSAMPLE_NAME_ATTR,
]
self.name = self.infer_name()
self.description = self.get_description()
if not defer_samples_creation:
self.create_samples()
self._sample_table = self._get_table_from_samples(index=self.st_index)
def create_samples(self):
"""
Populate Project with Sample objects
"""
self._samples = self.load_samples()
self.modify_samples()
def _reinit(self):
"""
Clear all object attributes and initialize again
"""
cfg_path = self[CONFIG_FILE_KEY] if CONFIG_FILE_KEY in self else None
for attr in self.keys():
del self[attr]
self.__init__(cfg=cfg_path)
def _get_table_from_samples(self, index):
"""
Generate a data frame from samples. Excludes private
attrs (prepended with an underscore)
:param str | Iterable[str] index: name of the columns to set the index to
:return pandas.DataFrame: a data frame with current samples attributes
"""
df = pd.DataFrame()
for sample in self.samples:
sd = sample.to_dict()
ser = pd.Series({k: v for (k, v) in sd.items() if not k.startswith("_")})
df = df.append(ser, ignore_index=True)
index = [index] if isinstance(index, str) else index
if not all([i in df.columns for i in index]):
_LOGGER.debug(
"Could not set {} index. At least one of the "
"requested columns does not exist: {}".format(
CFG_SAMPLE_TABLE_KEY, index
)
)
return df
_LOGGER.debug("Setting sample_table index to: {}".format(index))
df.set_index(keys=index, drop=False, inplace=True)
return df
def parse_config_file(self, cfg_path, amendments=None):
"""
Parse provided yaml config file and check required fields exist.
:param str cfg_path: path to the config file to read and parse
:param Iterable[str] amendments: Name of amendments to activate
:raises KeyError: if config file lacks required section(s)
"""
if CONFIG_KEY not in self:
self[CONFIG_KEY] = PathExAttMap()
if not os.path.exists(cfg_path) and not is_url(cfg_path):
raise OSError(f"Project config file path does not exist: {cfg_path}")
config = load_yaml(cfg_path)
assert isinstance(
config, Mapping
), "Config file parse did not yield a Mapping; got {} ({})".format(
config, type(config)
)
_LOGGER.debug("Raw ({}) config data: {}".format(cfg_path, config))
# recursively import configs
if (
PROJ_MODS_KEY in config
and CFG_IMPORTS_KEY in config[PROJ_MODS_KEY]
and config[PROJ_MODS_KEY][CFG_IMPORTS_KEY]
):
_make_sections_absolute(config[PROJ_MODS_KEY], [CFG_IMPORTS_KEY], cfg_path)
_LOGGER.info(
"Importing external Project configurations: {}".format(
", ".join(config[PROJ_MODS_KEY][CFG_IMPORTS_KEY])
)
)
for i in config[PROJ_MODS_KEY][CFG_IMPORTS_KEY]:
_LOGGER.debug("Processing external config: {}".format(i))
if os.path.exists(i):
self.parse_config_file(cfg_path=i)
else:
_LOGGER.warning(
"External Project configuration does not" " exist: {}".format(i)
)
self[CONFIG_KEY].add_entries(config)
# Parse yaml into the project.config attributes
_LOGGER.debug("Adding attributes: {}".format(", ".join(config)))
# Overwrite any config entries with entries in the amendments
amendments = [amendments] if isinstance(amendments, str) else amendments
if amendments:
for amendment in amendments:
c = self[CONFIG_KEY]
if (
PROJ_MODS_KEY in c
and AMENDMENTS_KEY in c[PROJ_MODS_KEY]
and c[PROJ_MODS_KEY][AMENDMENTS_KEY] is not None
):
_LOGGER.debug("Adding entries for amendment '{}'".format(amendment))
try:
amends = c[PROJ_MODS_KEY][AMENDMENTS_KEY][amendment]
except KeyError:
raise MissingAmendmentError(
amendment, c[PROJ_MODS_KEY][AMENDMENTS_KEY]
)
_LOGGER.debug("Updating with: {}".format(amends))
self[CONFIG_KEY].add_entries(amends)
_LOGGER.info("Using amendments: {}".format(amendment))
else:
raise MissingAmendmentError(amendment)
self[ACTIVE_AMENDMENTS_KEY] = amendments
# determine config version and reformat it, if needed
self[CONFIG_KEY][CONFIG_VERSION_KEY] = ".".join(self._get_cfg_v())
# here specify cfg sections that may need expansion
relative_vars = [CFG_SAMPLE_TABLE_KEY, CFG_SUBSAMPLE_TABLE_KEY]
_make_sections_absolute(self[CONFIG_KEY], relative_vars, cfg_path)
def load_samples(self):
self._read_sample_data()
samples_list = []
if SAMPLE_DF_KEY not in self:
return []
for _, r in self[SAMPLE_DF_KEY].iterrows():
samples_list.append(Sample(r.dropna(), prj=self))
return samples_list
def modify_samples(self):
if self._modifier_exists():
mod_diff = set(self[CONFIG_KEY][SAMPLE_MODS_KEY].keys()) - set(
SAMPLE_MODIFIERS
)
if len(mod_diff) > 0:
_LOGGER.warning(
"Config '{}' section contains unrecognized "
"subsections: {}".format(SAMPLE_MODS_KEY, mod_diff)
)
self.attr_remove()
self.attr_constants()
self.attr_synonyms()
self.attr_imply()
self._assert_samples_have_names()
self.attr_merge()
self.attr_derive()
def _modifier_exists(self, modifier_key=None):
"""
Check whether a specified sample modifier is defined and can be applied
If no modifier is specified, only the sample_modifiers section's
existence is checked
:param str modifier_key: modifier key to be checked
:return bool: whether the requirements are met
"""
_LOGGER.debug("Checking existence: {}".format(modifier_key))
if CONFIG_KEY not in self or SAMPLE_MODS_KEY not in self[CONFIG_KEY]:
return False
if (
modifier_key is not None
and modifier_key not in self[CONFIG_KEY][SAMPLE_MODS_KEY]
):
return False
return True
def attr_remove(self):
"""
Remove declared attributes from all samples that have them defined
"""
def _del_if_in(obj, attr):
if attr in obj:
del obj[attr]
if self._modifier_exists(REMOVE_KEY):
to_remove = self[CONFIG_KEY][SAMPLE_MODS_KEY][REMOVE_KEY]
_LOGGER.debug("Removing attributes: {}".format(to_remove))
for attr in to_remove:
[_del_if_in(s, attr) for s in self.samples]
def attr_constants(self):
"""
Update each Sample with constants declared by a Project.
If Project does not declare constants, no update occurs.
"""
if self._modifier_exists(CONSTANT_KEY):
to_append = self[CONFIG_KEY][SAMPLE_MODS_KEY][CONSTANT_KEY]
_LOGGER.debug("Applying constant attributes: {}".format(to_append))
for attr, val in to_append.items():
[s.update({attr: val}) for s in self.samples if attr not in s]
def attr_synonyms(self):
"""
Copy attribute values for all samples to a new one
"""
if self._modifier_exists(DUPLICATED_KEY):
synonyms = self[CONFIG_KEY][SAMPLE_MODS_KEY][DUPLICATED_KEY]
_LOGGER.debug("Applying synonyms: {}".format(synonyms))
for sample in self.samples:
for attr, new in synonyms.items():
if attr in sample:
setattr(sample, new, getattr(sample, attr))
def _assert_samples_have_names(self):
"""
Make sure samples have sample_name attribute specified.
Try to derive this attribute first.
:raise InvalidSampleTableFileException: if names are not specified
"""
try:
# before merging, which is requires sample_name attribute to map
# sample_table rows to subsample_table rows,
# perform only sample_name attr derivation
if (
SAMPLE_NAME_ATTR
in self[CONFIG_KEY][SAMPLE_MODS_KEY][DERIVED_KEY][DERIVED_ATTRS_KEY]
):
self.attr_derive(attrs=[SAMPLE_NAME_ATTR])
except KeyError:
pass
for sample in self.samples:
if SAMPLE_NAME_ATTR not in sample:
msg_base = "{st} is missing '{sn}' column; ".format(
st=CFG_SAMPLE_TABLE_KEY, sn=SAMPLE_NAME_ATTR
)
msg = msg_base + "you must specify {sn}s in {st} or derive them".format(
st=CFG_SAMPLE_TABLE_KEY, sn=SAMPLE_NAME_ATTR
)
if self.st_index != SAMPLE_NAME_ATTR:
setattr(sample, SAMPLE_NAME_ATTR, getattr(sample, self.st_index))
_LOGGER.warning(
msg_base + "using specified {} index ({}) instead. "
"Setting name: {}".format(
CFG_SAMPLE_TABLE_KEY,
self.st_index,
getattr(sample, self.st_index),
)
)
else:
raise InvalidSampleTableFileException(msg)
def attr_merge(self):
"""
Merge sample subannotations (from subsample table) with
sample annotations (from sample_table)
"""
if SUBSAMPLE_DF_KEY not in self or self[SUBSAMPLE_DF_KEY] is None:
_LOGGER.debug("No {} found, skipping merge".format(CFG_SUBSAMPLE_TABLE_KEY))
return
for subsample_table in self[SUBSAMPLE_DF_KEY]:
for n in list(subsample_table[self.sample_name_colname]):
if n not in [s[SAMPLE_NAME_ATTR] for s in self.samples]:
_LOGGER.warning(
("Couldn't find matching sample for " "subsample: {}").format(n)
)
for sample in self.samples:
sample_colname = self.sample_name_colname
if sample_colname not in subsample_table.columns:
raise KeyError(
"Subannotation requires column '{}'.".format(sample_colname)
)
_LOGGER.debug(
"Using '{}' as sample name column from "
"subannotation table".format(sample_colname)
)
sample_indexer = (
subsample_table[sample_colname] == sample[SAMPLE_NAME_ATTR]
)
this_sample_rows = subsample_table[sample_indexer].dropna(
how="all", axis=1
)
if len(this_sample_rows) == 0:
_LOGGER.debug(
"No merge rows for sample '%s', skipping",
sample[SAMPLE_NAME_ATTR],
)
continue
_LOGGER.debug("%d rows to merge", len(this_sample_rows))
_LOGGER.debug(
"Merge rows dict: " "{}".format(this_sample_rows.to_dict())
)
merged_attrs = {key: list() for key in this_sample_rows.columns}
_LOGGER.debug(this_sample_rows)
for subsample_row_id, row in this_sample_rows.iterrows():
try:
row[SUBSAMPLE_NAME_ATTR]
except KeyError:
row[SUBSAMPLE_NAME_ATTR] = str(subsample_row_id)
rowdata = row.to_dict()
def _select_new_attval(merged_attrs, attname, attval):
"""Select new attribute value for the merged columns
dictionary"""
if attname in merged_attrs:
return merged_attrs[attname] + [attval]
return [str(attval).rstrip()]
for attname, attval in rowdata.items():
if attname == sample_colname or not attval:
_LOGGER.debug("Skipping KV: {}={}".format(attname, attval))
continue
_LOGGER.debug(
"merge: sample '{}'; '{}'='{}'".format(
sample[SAMPLE_NAME_ATTR], attname, attval
)
)
merged_attrs[attname] = _select_new_attval(
merged_attrs, attname, attval
)
# remove sample name from the data with which to update sample
merged_attrs.pop(sample_colname, None)
_LOGGER.debug(
"Updating Sample {}: {}".format(
sample[SAMPLE_NAME_ATTR], merged_attrs
)
)
sample.update(merged_attrs)
def attr_imply(self):
"""
Infer value for additional field(s) from other field(s).
Add columns/fields to the sample based on values in those already-set
that the sample's project defines as indicative of implications for
additional data elements for the sample.
"""
if not self._modifier_exists(IMPLIED_KEY):
return
implications = self[CONFIG_KEY][SAMPLE_MODS_KEY][IMPLIED_KEY]
if not isinstance(implications, list):
| |
<gh_stars>0
#! /usr/bin/env python
# Copyright: (c) 2019, <NAME>
# This software is made available to you under the terms of the Apache2.0 license.
# Apache License v2.0
# See LICENSE.txt for details.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: kafka_topic
author:
- <NAME> (@post.ch)
short_description: manage kafka-topics
version_added: "2.7"
description:
- create, delete and modify kafka-topics
options:
name:
description:
- Unique name for topic by which it will be identified.
- Valid characters are: a-z, A-Z, ".", "-", "_"
- Also, the topic must be conform to the ISVC-Topic-Grammar.
required: true
type: str
state:
description:
- If set to "absent", topic will be deleted if present.
- If set to "present", topic will be created if not present.
required: true
type: str
choices: [ absent, present ]
partitions:
description:
- How many partitions are created for the topic.
- Partitions can not be set to zero or negative.
required: true
type: int
replication_factor:
description:
- How many times each partition for the topic is replicated.
- The number of replicas can not be more than the number of brokers in the cluster.
- Replicas can not be set to zero or negative.
- Once the replicas are set, they can not be changed with Ansible.
required: true
type: int
bootstrap_server:
description:
- Kafka-Broker which is a member of the Kafka-Cluster you want to create the topic on.
- Use the following format: "host:port".
required: true
type: list
cleanup_policy:
description:
- Corresponds to the topic-config "cleanup.policy" from Apache Kafka.
- If set to "delete", old segments will be deleted when their retention time or
size limits have been reached.
- If set to "compact", old segments will be compacted when their retention time
or size limits have been reached.
default: delete
type: str
choices: [ delete, compact ]
retention:
description:
- Corresponds to the topic-config "retention.ms" from Apache Kafka.
- How long a log will be retained before being discarded.
- If set to "-1", no time limit is applied.
- Else use the following format: "%d%h%m%s%ms".
default: 604800000ms (==7d)
type: str
'''
EXAMPLES = '''
---
#create new topic
- name: create topic "foo"
kafka_topic:
name: foo
state: present
partitions: 2
replication_factor: 2
bootstrap_server:
- localhost:9092
- 10.10.4.5:5678
#modify topic
- name: modify topic "foo"
kafka_topic:
name: foo
state: present
partitions: 2
replication_factor: 2
bootstrap_server:
- 127.0.0.4:1234
retention: 2d12h
#delete topic
- name: delete topic "bar"
kafka_topic:
name: bar
state: absent
partitions: 1
replication_factor: 1
bootstrap_server:
- 172.16.31.10:45078
cleanup_policy: compact
'''
RETURN = '''
---
name:
description: name of the targeted topic
type: string
returned: always
state:
description: state of the targeted topic
type: string
returned: success
'''
from ansible.module_utils.basic import AnsibleModule
from confluent_kafka.admin import AdminClient, NewTopic, NewPartitions, ConfigResource
import re
import socket
##########################################
# #
# INPUT-VALIDATION-FUNCTIONS #
# #
##########################################
# validate name for topic
# param: name = topicname, type: str
# no return
def validate_name(name):
max_length = 249
#regex for checking if topicname matches topic-name-grammar set from the ISVC-Project
rema = re.match(r"^([a-z][a-z\d-]+(\.[a-z][a-z\d-]+)*|app\.[a-z]{2,})(\.[A-Z][A-Za-z\d]+(\.v[1-9][0-9]*)?)?(-(state|command|event)(\.state|\.command|\.event)*)?(-[a-z][a-z0-9]*)?(-from\.(test|int|prod))?$", name)
if rema:
rema = rema.group(0)
if not rema or len(rema) > max_length:
msg = ("Invalid name for topic." \
" Valid characters are: a-z, A-Z, 0-9, \".\",\"-\",\"_\"" \
" and a max-length of %s characters." \
" Also check out the Topic-Grammar from the ISVC-Project." \
%(max_length)
)
fail_module(msg)
# validate partition-number and replication-number
# param: factor = number for partitions or replication, type:int
# param: part_or_rep = which gets validated for error-message if needed, type: str
# no return
def validate_factor(factor, part_or_rep):
if type(factor) == float:
msg = ("Value from %s must be an int." \
" You tried to set %s as factor." \
%(part_or_rep, factor)
)
fail_module(msg)
try:
factor = int(factor)
except ValueError:
msg = ("Value from %s must be an int." \
" You tried to set %s as factor." \
%(part_or_rep, factor)
)
fail_module(msg)
if factor <= 0:
msg = ("Value from %s must be a positive int." \
" You tried to set %s as factor." \
%(part_or_rep, factor)
)
fail_module(msg)
# validate broker-definition
# param: broker_definition, type:list, pattern per broker: 'host:port'
# returns brokers as a string with following pattern: 'host:port,host:port'
def validate_broker(broker_definition):
broker_def_list = []
for broker in broker_definition:
broker_parts = broker.split(":")
if len(broker_parts) == 2:
broker = validate_ipv4(broker_parts)
if len(broker_parts) > 2:
msg = ("It seems you tried so set an IPv6-Address: %s" \
" We do not support that so far - please set" \
" an IPv4-Address." \
%(broker)
)
fail_module(msg)
if len(broker_parts) < 2:
msg = ("Broker-Definition does not seem to be valid: %s" \
" Use following pattern per broker: host:port." \
%(broker)
)
fail_module(msg)
broker_def_list.append(broker)
final_broker_definition = ",".join(broker_def_list)
return final_broker_definition
# validate ipv4-address, trying to build a tcp-connection to given address
# param: broker = one broker-definition, type: list, pattern: [host,port]
# return: broker, type: str, pattern: 'host:port'
def validate_ipv4(broker):
port = validate_port(broker[1])
ip = broker[0]
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
#try to make a connection
sock.connect((ip,port))
sock.close()
except socket.error:
sock.close()
msg = ("Can not connect to broker: %s" \
" Please check if the definition is right." \
%(broker)
)
fail_module(msg)
return str(ip)+":"+str(port)
# validate port
# param: port, type: str
# return: port, type: int
def validate_port(port):
try:
port = int(port)
except ValueError:
msg = ("Port needs to be int, but got: %s" \
%(port)
)
fail_module(msg)
if (port <= 0) or (port > 65535):
msg = ("Valid Port-Range is: 1-65535." \
" But given Port is: %s" \
%(port)
)
fail_module(msg)
return port
# validate retention time and convert to ms
# param: retention = retention-time, type: str, pattern: %d%h%m%s%ms
# return: retention-time in ms unless set to unlimited, type: int or string
def validate_retention_ms(retention):
if retention == "-1": #sets retention-time to unlimited
return retention
#try to parse retention with regex into groups, split by timetype
rema = re.match( r"(?P<days>\d+d)?(?P<hours>\d+h)?(?P<minutes>\d+m)?(?P<seconds>\d+s)?(?P<miliseconds>\d+m)?",retention)
t = rema.span()
if t[1] == 0:
msg = ("Could not parse given retention-time: %s into ms." \
" Please use the following pattern: %%d%%h%%m%%s%%ms." \
%(retention)
)
fail_module(msg)
days = rema.group("days")
hours = rema.group("hours")
minutes = rema.group("minutes")
seconds = rema.group("seconds")
miliseconds = rema.group("miliseconds")
timetype = [days, hours, minutes, seconds, miliseconds]
multiplier = [86400000,3600000,60000,1000,1]
ms_total = 0
i = 0
for t_type in timetype: #convert to ms and add together
if t_type is not None:
ms_total = ms_total + (int(t_type[:-1])*multiplier[i]) #[:-1] cuts of last char (which indicates timetype and is not an int)
i = i+1
if (ms_total >= 2**63):
msg = ("Your chosen retention-time is way too long." \
" Retention-time can not be over 2^63ms." \
" You set %s as retention, which results in %s ms." \
%(retention, ms_total)
)
fail_module(msg)
return ms_total
##########################################
# #
# KAFKA-FUNCTIONS #
# #
##########################################
# check if topic exists
# param: topic = topicname, type: str
# return: True if topic exists, False if not, type: bool
def check_topic(topic):
topics = admin.list_topics(timeout=5).topics #type(topics)=dict
try:
topics[topic]
except KeyError:
return False
return True
# compare the defined partitions and replication-factor in the playbook with the actually set
# param: topic = topicname, type: str
# param: partitions, type: int
# param: replication_factor, type: int
# return: True if change is needed, False if no change needed, type: bool
def compare_part_rep(topic, partitions, replication_factor):
metadata = admin.list_topics() #type(metadata.topics) = dict
old_part = len(metadata.topics[topic].partitions) #access partitions of topic over .partitions-func
old_rep = len(metadata.topics[topic].partitions[0].replicas) #type(partitions) = dict, access replicas with partition-id as key over .replicas-func
if partitions < old_part:
msg = ("It is not possible to reduce the amount of partitions." \
" At the moment, there are %s partitions for the topic %s." \
" You tried to set %s as the new amount of partitions." \
%(old_part,topic,partitions)
)
fail_module(msg)
if replication_factor != old_rep:
msg = ("It is not possible to modify the replication_factor." \
" At the moment, it is set to %s and you tried to set it to %s." \
%(old_rep,replication_factor)
)
fail_module(msg)
if partitions == old_part:
return False
return True
# compare the defined config in the playbook with the one set at the moment for this topic
# param: topic = topicname, type: str
# param: new_config = dictionary with new config and values, type: dict
# return: True if | |
with self._option():
self._token("Cornelius")
with self._option():
self._token("Cressida")
with self._option():
self._token("Cymberline")
with self._option():
self._token("Demetrius")
with self._option():
self._token("Desdemona")
with self._option():
self._token("Dionyza")
with self._option():
self._token("Doctor")
self._token("Caius")
with self._option():
self._token("Dogberry")
with self._option():
self._token("Don")
self._token("John")
with self._option():
self._token("Don")
self._token("Pedro")
with self._option():
self._token("Donalbain")
with self._option():
self._token("Dorcas")
with self._option():
self._token("Duncan")
with self._option():
self._token("Egeus")
with self._option():
self._token("Emilia")
with self._option():
self._token("Escalus")
with self._option():
self._token("Falstaff")
with self._option():
self._token("Fenton")
with self._option():
self._token("Ferdinand")
with self._option():
self._token("Ford")
with self._option():
self._token("Fortinbras")
with self._option():
self._token("Francisca")
with self._option():
self._token("Friar")
self._token("John")
with self._option():
self._token("Friar")
self._token("Laurence")
with self._option():
self._token("Gertrude")
with self._option():
self._token("Goneril")
with self._option():
self._token("Hamlet")
with self._option():
self._token("Hecate")
with self._option():
self._token("Hector")
with self._option():
self._token("Helen")
with self._option():
self._token("Helena")
with self._option():
self._token("Hermia")
with self._option():
self._token("Hermonie")
with self._option():
self._token("Hippolyta")
with self._option():
self._token("Horatio")
with self._option():
self._token("Imogen")
with self._option():
self._token("Isabella")
with self._option():
self._token("John")
self._token("of")
self._token("Gaunt")
with self._option():
self._token("John")
self._token("of")
self._token("Lancaster")
with self._option():
self._token("Julia")
with self._option():
self._token("Juliet")
with self._option():
self._token("Julius")
self._token("Caesar")
with self._option():
self._token("King")
self._token("Henry")
with self._option():
self._token("King")
self._token("John")
with self._option():
self._token("King")
self._token("Lear")
with self._option():
self._token("King")
self._token("Richard")
with self._option():
self._token("Lady")
self._token("Capulet")
with self._option():
self._token("Lady")
self._token("Macbeth")
with self._option():
self._token("Lady")
self._token("Macduff")
with self._option():
self._token("Lady")
self._token("Montague")
with self._option():
self._token("Lennox")
with self._option():
self._token("Leonato")
with self._option():
self._token("Luciana")
with self._option():
self._token("Lucio")
with self._option():
self._token("Lychorida")
with self._option():
self._token("Lysander")
with self._option():
self._token("Macbeth")
with self._option():
self._token("Macduff")
with self._option():
self._token("Malcolm")
with self._option():
self._token("Mariana")
with self._option():
self._token("Mark")
self._token("Antony")
with self._option():
self._token("Mercutio")
with self._option():
self._token("Miranda")
with self._option():
self._token("Mistress")
self._token("Ford")
with self._option():
self._token("Mistress")
self._token("Overdone")
with self._option():
self._token("Mistress")
self._token("Page")
with self._option():
self._token("Montague")
with self._option():
self._token("Mopsa")
with self._option():
self._token("Oberon")
with self._option():
self._token("Octavia")
with self._option():
self._token("Octavius")
self._token("Caesar")
with self._option():
self._token("Olivia")
with self._option():
self._token("Ophelia")
with self._option():
self._token("Orlando")
with self._option():
self._token("Orsino")
with self._option():
self._token("Othello")
with self._option():
self._token("Page")
with self._option():
self._token("Pantino")
with self._option():
self._token("Paris")
with self._option():
self._token("Pericles")
with self._option():
self._token("Pinch")
with self._option():
self._token("Polonius")
with self._option():
self._token("Pompeius")
with self._option():
self._token("Portia")
with self._option():
self._token("Priam")
with self._option():
self._token("Prince")
self._token("Henry")
with self._option():
self._token("Prospero")
with self._option():
self._token("Proteus")
with self._option():
self._token("Publius")
with self._option():
self._token("Puck")
with self._option():
self._token("Queen")
self._token("Elinor")
with self._option():
self._token("Regan")
with self._option():
self._token("Robin")
with self._option():
self._token("Romeo")
with self._option():
self._token("Rosalind")
with self._option():
self._token("Sebastian")
with self._option():
self._token("Shallow")
with self._option():
self._token("Shylock")
with self._option():
self._token("Slender")
with self._option():
self._token("Solinus")
with self._option():
self._token("Stephano")
with self._option():
self._token("Thaisa")
with self._option():
self._token("The")
self._token("Abbot")
self._token("of")
self._token("Westminster")
with self._option():
self._token("The")
self._token("Apothecary")
with self._option():
self._token("The")
self._token("Archbishop")
self._token("of")
self._token("Canterbury")
with self._option():
self._token("The")
self._token("Duke")
self._token("of")
self._token("Milan")
with self._option():
self._token("The")
self._token("Duke")
self._token("of")
self._token("Venice")
with self._option():
self._token("The")
self._token("Ghost")
with self._option():
self._token("Theseus")
with self._option():
self._token("Thurio")
with self._option():
self._token("Timon")
with self._option():
self._token("Titania")
with self._option():
self._token("Titus")
with self._option():
self._token("Troilus")
with self._option():
self._token("Tybalt")
with self._option():
self._token("Ulysses")
with self._option():
self._token("Valentine")
with self._option():
self._token("Venus")
with self._option():
self._token("Vincentio")
with self._option():
self._token("Viola")
self._error(
"expecting one of: "
"'Achilles' 'Adonis' 'Adriana' 'Aegeon'"
"'Aemilia' 'Agamemnon' 'Agrippa' 'Ajax'"
"'Alonso' 'Andromache' 'Angelo'"
"'Antiochus' 'Antonio' 'Arthur'"
"'Autolycus' 'Balthazar' 'Banquo'"
"'Beatrice' 'Benedick' 'Benvolio'"
"'Bianca' 'Brabantio' 'Brutus' 'Capulet'"
"'Cassandra' 'Cassius' 'Christopher'"
"'Cicero' 'Claudio' 'Claudius'"
"'Cleopatra' 'Cordelia' 'Cornelius'"
"'Cressida' 'Cymberline' 'Demetrius'"
"'Desdemona' 'Dionyza' 'Doctor'"
"'Dogberry' 'Don' 'Donalbain' 'Dorcas'"
"'Duncan' 'Egeus' 'Emilia' 'Escalus'"
"'Falstaff' 'Fenton' 'Ferdinand' 'Ford'"
"'Fortinbras' 'Francisca' 'Friar'"
"'Gertrude' 'Goneril' 'Hamlet' 'Hecate'"
"'Hector' 'Helen' 'Helena' 'Hermia'"
"'Hermonie' 'Hippolyta' 'Horatio'"
"'Imogen' 'Isabella' 'John' 'Julia'"
"'Juliet' 'Julius' 'King' 'Lady' 'Lennox'"
"'Leonato' 'Luciana' 'Lucio' 'Lychorida'"
"'Lysander' 'Macbeth' 'Macduff' 'Malcolm'"
"'Mariana' 'Mark' 'Mercutio' 'Miranda'"
"'Mistress' 'Montague' 'Mopsa' 'Oberon'"
"'Octavia' 'Octavius' 'Olivia' 'Ophelia'"
"'Orlando' 'Orsino' 'Othello' 'Page'"
"'Pantino' 'Paris' 'Pericles' 'Pinch'"
"'Polonius' 'Pompeius' 'Portia' 'Priam'"
"'Prince' 'Prospero' 'Proteus' 'Publius'"
"'Puck' 'Queen' 'Regan' 'Robin' 'Romeo'"
"'Rosalind' 'Sebastian' 'Shallow'"
"'Shylock' 'Slender' 'Solinus' 'Stephano'"
"'Thaisa' 'The' 'Theseus' 'Thurio'"
"'Timon' 'Titania' 'Titus' 'Troilus'"
"'Tybalt' 'Ulysses' 'Valentine' 'Venus'"
"'Vincentio' 'Viola'"
)
@tatsumasu()
def _nothing_(self): # noqa
with self._group():
with self._choice():
with self._option():
self._token("nothing")
with self._option():
self._token("zero")
self._error("expecting one of: " "'nothing' 'zero'")
self.name_last_node("nothing_word")
self._define(["nothing_word"], [])
@tatsumasu()
def _positive_or_neutral_adjective_(self): # noqa
with self._choice():
with self._option():
self._positive_adjective_()
with self._option():
self._neutral_adjective_()
self._error(
"expecting one of: "
"'amazing' 'beautiful' 'blossoming'"
"'bold' 'brave' 'charming' 'clearest'"
"'cunning' 'cute' 'delicious'"
"'embroidered' 'fair' 'fine' 'gentle'"
"'golden' 'good' 'handsome' 'happy'"
"'healthy' 'honest' 'lovely' 'loving'"
"'mighty' 'noble' 'peaceful' 'pretty'"
"'prompt' 'proud' 'reddest' 'rich'"
"'smooth' 'sunny' 'sweet' 'sweetest'"
"'trustworthy' 'warm'"
"<positive_adjective> 'big' 'black'"
"'blue' 'bluest' 'bottomless' 'furry'"
"'green' 'hard' 'huge' 'large' 'little'"
"'normal' 'old' 'purple' 'red' 'rural'"
"'small' 'tiny' 'white' 'yellow'"
"<neutral_adjective>"
)
@tatsumasu()
def _positive_or_neutral_noun_(self): # noqa
with self._choice():
with self._option():
self._positive_noun_()
with self._option():
self._neutral_noun_()
self._error(
"expecting one of: "
"'Heaven' 'King' 'Lord' 'angel' 'flower'"
"'happiness' 'joy' 'plum' \"summer's\""
"'hero' 'rose' 'kingdom' 'pony'"
"<positive_noun> 'animal' 'aunt'"
"'brother' 'cat' 'chihuahua' 'cousin'"
"'cow' 'daughter' 'door' 'face' 'father'"
"'fellow' 'granddaughter' 'grandfather'"
"'grandmother' 'grandson' 'hair'"
"'hamster' 'horse' 'lamp' 'lantern'"
"'mistletoe' 'moon' 'morning' 'mother'"
"'nephew' 'niece' 'nose' 'purse' 'road'"
"'roman' 'sister' 'sky' 'son' 'squirrel'"
"'stone' 'thing' 'town' 'tree' 'uncle'"
"'wind' <neutral_noun>"
)
@tatsumasu()
def _neutral_comparative_(self): # noqa
self._token("as")
with self._group():
with self._choice():
with self._option():
self._negative_adjective_()
with self._option():
self._positive_or_neutral_adjective_()
self._error(
"expecting one of: "
"<negative_adjective>"
"<positive_or_neutral_adjective>"
)
self.name_last_node("comparison")
self._token("as")
self._define(["comparison"], [])
@tatsumasu()
def _negative_noun_phrase_(self): # noqa
with self._optional():
with self._choice():
with self._option():
self._article_()
with self._option():
self._possessive_()
self._error("expecting one of: " "<article> <possessive>")
def block2():
with self._group():
with self._choice():
with self._option():
self._negative_adjective_()
with self._option():
self._neutral_adjective_()
self._error(
"expecting one of: " "<negative_adjective> <neutral_adjective>"
)
self._closure(block2)
self.name_last_node("adjectives")
self._negative_noun_()
self.name_last_node("noun")
self._define(["adjectives", "noun"], [])
@tatsumasu()
def _positive_noun_phrase_(self): # noqa
with self._optional():
with self._choice():
with self._option():
self._article_()
with self._option():
self._possessive_()
self._error("expecting one of: " "<article> <possessive>")
def block2():
self._positive_or_neutral_adjective_()
self._closure(block2)
self.name_last_node("adjectives")
self._positive_or_neutral_noun_()
self.name_last_node("noun")
self._define(["adjectives", "noun"], [])
@tatsumasu()
def _noun_phrase_(self): # noqa
with self._choice():
with self._option():
self._negative_noun_phrase_()
with self._option():
self._positive_noun_phrase_()
self._error(
"expecting one of: "
"'Hell' 'Microsoft' 'bastard' 'beggar'"
"'blister' 'codpiece' 'coward' 'curse'"
"'death' 'devil' 'draught' 'famine'"
"'flirt-gill' 'goat' 'hate' 'hog' 'hound'"
"'leech' 'lie' 'pig' 'plague'"
"'starvation' 'toad' 'war' 'wolf'"
"<negative_noun> 'bad' 'cowardly'"
"'cursed' 'damned' 'dirty' 'disgusting'"
"'distasteful' 'dusty' 'evil' 'fat-"
"kidneyed' 'fatherless' 'fat' 'foul'"
"'hairy' 'half-witted' 'horrible'"
"'horrid' 'infected' 'lying' 'miserable'"
"'misused' 'oozing' 'rotten' 'smelly'"
"'snotty' 'sorry' 'stinking' 'stuffed'"
"'stupid' 'vile' 'villainous' 'worried'"
"<negative_adjective> 'big' 'black'"
"'blue' 'bluest' 'bottomless' 'furry'"
"'green' 'hard' 'huge' 'large' 'little'"
"'normal' 'old' 'purple' 'red' 'rural'"
"'small' 'tiny' 'white' 'yellow'"
"<neutral_adjective> 'a' 'an' 'the'"
"<article> 'mine' 'my'"
"<first_person_possessive> 'thine' 'thy'"
"'your' <second_person_possessive> 'his'"
"'her' 'its' 'their'"
"<third_person_possessive> <possessive>"
"<negative_noun_phrase> 'Heaven' 'King'"
"'Lord' 'angel' 'flower' 'happiness'"
"'joy' 'plum' \"summer's\" 'hero' 'rose'"
"'kingdom' 'pony' <positive_noun>"
"'animal' 'aunt' 'brother' 'cat'"
"'chihuahua' 'cousin' 'cow' 'daughter'"
"'door' 'face' 'father' 'fellow'"
"'granddaughter' 'grandfather'"
"'grandmother' 'grandson' 'hair'"
"'hamster' 'horse' 'lamp' 'lantern'"
"'mistletoe' 'moon' 'morning' 'mother'"
"'nephew' 'niece' 'nose' 'purse' 'road'"
"'roman' 'sister' 'sky' 'son' 'squirrel'"
"'stone' 'thing' 'town' 'tree' 'uncle'"
"'wind' <neutral_noun>"
"<positive_or_neutral_noun> 'amazing'"
"'beautiful' 'blossoming' 'bold' 'brave'"
"'charming' 'clearest' 'cunning' 'cute'"
"'delicious' 'embroidered' 'fair' 'fine'"
"'gentle' 'golden' 'good' 'handsome'"
"'happy' 'healthy' 'honest' 'lovely'"
"'loving' 'mighty' 'noble' 'peaceful'"
"'pretty' 'prompt' 'proud' 'reddest'"
"'rich' 'smooth' 'sunny' 'sweet'"
"'sweetest' 'trustworthy' 'warm'"
"<positive_adjective>"
"<positive_or_neutral_adjective>"
"<positive_noun_phrase>"
)
@tatsumasu()
def _first_person_value_(self): # noqa
with self._group():
with self._choice():
with self._option():
self._first_person_()
with self._option():
self._first_person_reflexive_()
self._error(
"expecting one of: "
"'I' 'me' <first_person> 'myself'"
"<first_person_reflexive>"
)
self.name_last_node("first_person_word")
self._define(["first_person_word"], [])
@tatsumasu()
def _second_person_value_(self): # noqa
with self._group():
with self._choice():
with self._option():
self._second_person_()
with self._option():
self._second_person_reflexive_()
self._error(
"expecting one of: "
"'thee' 'thou' 'you' <second_person>"
"'thyself' 'yourself'"
"<second_person_reflexive>"
)
self.name_last_node("second_person_word")
self._define(["second_person_word"], [])
@tatsumasu()
def _character_name_(self): # noqa
self._character_()
self.name_last_node("name")
self._define(["name"], [])
@tatsumasu()
def _value_(self): # noqa
with self._choice():
with self._option():
self._expression_()
with self._option():
self._first_person_value_()
with self._option():
self._second_person_value_()
with self._option():
self._noun_phrase_()
with self._option():
self._character_name_()
with self._option():
self._nothing_()
self._error(
"expecting one of: "
"'the' <binary_operation>"
"<binary_expression> 'twice'"
"<unary_operation> <unary_expression>"
"<expression> 'I' 'me' <first_person>"
"'myself' <first_person_reflexive>"
"<first_person_value> 'thee' 'thou' 'you'"
"<second_person> 'thyself' 'yourself'"
"<second_person_reflexive>"
"<second_person_value> 'Hell' 'Microsoft'"
"'bastard' 'beggar' 'blister' 'codpiece'"
"'coward' 'curse' 'death' 'devil'"
"'draught' 'famine' 'flirt-gill' 'goat'"
"'hate' 'hog' 'hound' 'leech' 'lie' 'pig'"
"'plague' 'starvation' 'toad' 'war'"
"'wolf' <negative_noun> 'bad' 'cowardly'"
"'cursed' 'damned' 'dirty' 'disgusting'"
"'distasteful' 'dusty' 'evil' 'fat-"
"kidneyed' 'fatherless' 'fat' 'foul'"
"'hairy' 'half-witted' 'horrible'"
"'horrid' 'infected' 'lying' 'miserable'"
"'misused' 'oozing' 'rotten' 'smelly'"
"'snotty' 'sorry' 'stinking' 'stuffed'"
"'stupid' 'vile' 'villainous' 'worried'"
"<negative_adjective> 'big' 'black'"
"'blue' 'bluest' 'bottomless' 'furry'"
"'green' 'hard' 'huge' 'large' 'little'"
"'normal' 'old' 'purple' 'red' 'rural'"
"'small' 'tiny' 'white' 'yellow'"
"<neutral_adjective> 'a' 'an' <article>"
"'mine' 'my' <first_person_possessive>"
"'thine' 'thy' 'your'"
"<second_person_possessive> 'his' 'her'"
"'its' 'their' <third_person_possessive>"
"<possessive> <negative_noun_phrase>"
"'Heaven' 'King' 'Lord' 'angel' 'flower'"
"'happiness' 'joy' 'plum' \"summer's\""
"'hero' 'rose' 'kingdom' 'pony'"
"<positive_noun> 'animal' 'aunt'"
"'brother' 'cat' 'chihuahua' 'cousin'"
"'cow' 'daughter' 'door' 'face' | |
<filename>pipecaster/transform_wrappers.py<gh_stars>0
"""
Wrapper classes for internal ML models.
MultichannelPipelines treat all internal component as transfomers (i.e.
invoking fit/transform/fit_transform). As a consequence, when predictors are
used internally (e.g. for voting or stacking) a transformer interface must be
added to the internal predictors. In practice, this means choosing a
prediction method to use when transforming, converting 1D outputs to 2D
outputs, and applying internal cross validation training when required.
:class:`SingleChannel` and :class:`Multichannel` classes add a transformer
interface to single channel and multichannel predictors respectively.
:class:`SingleChannelCV` and :class:`MultichannelCV` classes add a transformer
interface and internal cross validaiton training to single channel and
multichannel predictors respectively. Internal cross validation (internal cv)
training is typically used when outputs of a base predictor will be used to
train a meta-predictor. It guarantees that base predictors do not make
inferences on their own training samples (1). Internal cv training can improve
meta-predictor accuracy if overfitting is a limiting problem, or it can reduce
metapredictor accuracy if the number of training samples is limiting.
(1) Wolpert, <NAME>. "Stacked generalization." Neural networks 5.2
(1992): 241-259.
"""
import functools
import numpy as np
from sklearn.metrics import log_loss
import pipecaster.utils as utils
import pipecaster.config as config
from pipecaster.utils import Cloneable, Saveable
from pipecaster.cross_validation import cross_val_predict, score_predictions
__all__ = ['make_transformer', 'make_cv_transformer', 'unwrap_predictor',
'unwrap_model']
def make_transformer(predictor, transform_method='auto'):
"""
Add transform methods to a predictor.
Parameters
----------
predictor : scikit-learn predictor or multichannel predictor
Predictor to wrap.
transform_method : str, default='auto'
- Name of the prediction method to call when transforming (e.g. when
outputting meta-features).
- If 'auto' :
- If classifier : method picked using
config.transform_method_precedence order (default:
predict_proba->predict_log_proba->decision_function->predict).
- If regressor : 'predict'
Returns
-------
Predictor/transformer
A wrapped predictor with both predictor and transformer interfaces.
Examples
--------
::
from sklearn.ensemble import GradientBoostingClassifier
import pipecaster as pc
Xs, y, X_types = pc.make_multi_input_classification(n_informative_Xs=3,
n_random_Xs=2)
clf = pc.MultichannelPipeline(n_channels=5)
base_clf = GradientBoostingRegressor()
base_clf = pc.make_transformer(base_clf)
clf.add_layer(base_clf)
clf.add_layer(pc.SoftVotingClassifier())
pc.cross_val_score(clf, Xs, y, cv=3)
# output: [0.8529411764705882, 0.9411764705882353, 0.96875]
"""
if utils.is_multichannel(predictor):
return Multichannel(predictor, transform_method)
else:
return SingleChannel(predictor, transform_method)
def make_cv_transformer(predictor, transform_method='auto', internal_cv=5,
score_method='auto', scorer='auto', cv_processes=1):
"""
Add internal cross validation training and transform methods to a
predictor.
Parameters
----------
predictor : scikit-learn predictor or multichannel predictor
Predictor to wrap.
transform_method : str, default='auto'
- Name of the prediction method to call when transforming (e.g. when
outputting meta-features).
- If 'auto' :
- If classifier : method picked using
config.transform_method_precedence order (default:
predict_proba->predict_log_proba->decision_function->predict).
- If regressor : 'predict'
internal_cv : int, None, or callable, default=5
- Function for train/test subdivision of the training data. Used to
estimate performance of base classifiers and ensure they do not
generate predictions from their training samples during
meta-predictor training.
- If int > 1: StratifiedKfold(n_splits=internal_cv) if classifier or
KFold(n_splits=internal_cv) if regressor.
- If {None, 1}: disable internal cv.
- If callable: Assumed to be split generator like scikit-learn KFold.
score_method : str, default='auto'
- Name of prediction method used when scoring predictor performance.
- If 'auto' :
- If classifier : method picked using
config.score_method_precedence order (default:
ppredict_proba->predict_log_proba->decision_function->predict).
- If regressor : 'predict'
scorer : callable, default='auto'
Callable that computes a figure of merit score for the internal_cv run.
The score is exposed as score_ attribute during fit_transform().
- If 'auto':
- explained_variance_score for regressors with predict()
- roc_auc_score for classifiers with {predict_proba,
predict_log_proba, decision_function}
- balanced_accuracy_score for classifiers with only predict()
- If callable: A scorer with signature: score = scorer(y_true, y_pred).
cv_processes : int or 'max', default=1
- The number of parallel processes to run for internal cross
validation.
- If int : Use up to cv_processes number of processes.
- If 'max' : Use all available CPUs.
Returns
-------
Predictor/transformer
A wrapped predictor with both predictor and transformer interfaces.
Internal cross_validation training occurs during calls to
fit_transform().
Examples
--------
::
from sklearn.ensemble import GradientBoostingClassifier
import pipecaster as pc
Xs, y, X_types = pc.make_multi_input_classification(n_informative_Xs=3,
n_random_Xs=2)
clf = pc.MultichannelPipeline(n_channels=5)
base_clf = GradientBoostingRegressor()
base_clf = pc.make_cv_transformer(base_clf)
clf.add_layer(base_clf)
clf.add_layer(pc.MultichannelPredictor(GradientBoostingClassifier()))
pc.cross_val_score(clf, Xs, y, cv=3)
# output: [0.8529411764705882, 0.9080882352941176, 1.0]
"""
if utils.is_multichannel(predictor):
return MultichannelCV(predictor, transform_method, internal_cv,
score_method, scorer, cv_processes)
else:
return SingleChannelCV(predictor, transform_method, internal_cv,
score_method, scorer, cv_processes)
class SingleChannel(Cloneable, Saveable):
"""
Add transformer interface to a scikit-learn predictor.
Wrapper class that provides scikit-learn conformant predictors with
transform() and fit_transform methods().
Parameters
----------
predictor : predictor instance
The scikit-learn conformant estimator/predictor to wrap.
transform_method : str, default='auto'
- Name of the prediction method to call when transforming (e.g. when
outputting meta-features).
- If 'auto' :
- If classifier : method picked using
config.transform_method_precedence order (default:
predict_proba->predict_log_proba->decision_function->predict).
- If regressor : 'predict'
Examples
--------
Model stacking, classification:
::
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.svm import SVC
import pipecaster as pc
Xs, y, _ = pc.make_multi_input_classification(n_informative_Xs=3,
n_random_Xs=7)
clf = pc.MultichannelPipeline(n_channels=10)
base_clf = GradientBoostingClassifier()
base_clf = pc.transform_wrappers.SingleChannel(base_clf)
clf.add_layer(base_clf, pipe_processes='max')
clf.add_layer(pc.MultichannelPredictor(SVC()))
pc.cross_val_score(clf, Xs, y, cv=3)
# output: [0.8529411764705882, 0.8216911764705883, 0.9099264705882353]
Model stacking, regression:
::
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.svm import SVR
import pipecaster as pc
Xs, y, _ = pc.make_multi_input_regression(n_informative_Xs=7,
n_random_Xs=3)
clf = pc.MultichannelPipeline(n_channels=10)
base_clf = GradientBoostingRegressor()
base_clf = pc.transform_wrappers.SingleChannel(base_clf)
clf.add_layer(base_clf, pipe_processes=1)
clf.add_layer(pc.MultichannelPredictor(SVR()))
pc.cross_val_score(clf, Xs, y, cv=3)
# output: [0.077183453, 0.067682880449, 0.07849665]
Notes
-----
This class uses reflection to expose the predictor methods found in the
object that it wraps, so the method attributes in a SingleChannel instance
are not usually identical to the method attributes of the SingleChannel
class.
If a sample_weight parameter is sent to the fit() method but the wrapped
predictor doesn't accept this argument, fit() will be called without the
sample_weight parameter and no warning will be given.
"""
def __init__(self, predictor, transform_method='auto'):
self._params_to_attributes(SingleChannel.__init__, locals())
utils.enforce_fit(predictor)
utils.enforce_predict(predictor)
self._add_predictor_interface(predictor)
self._set_estimator_type(predictor)
def _set_estimator_type(self, predictor):
if hasattr(predictor, '_estimator_type') is True:
self._estimator_type = predictor._estimator_type
def _add_predictor_interface(self, predictor):
for method_name in config.recognized_pred_methods:
if hasattr(predictor, method_name):
prediction_method = functools.partial(self.predict_with_method,
method_name=method_name)
setattr(self, method_name, prediction_method)
def _add_model_interface(self, model, X):
detected_methods = utils.detect_predict_methods(model, X)
for method_name in detected_methods:
prediction_method = functools.partial(self.predict_with_method,
method_name=method_name)
setattr(self, method_name, prediction_method)
def _remove_predictor_interface(self):
for method_name in config.recognized_pred_methods:
if hasattr(self, method_name):
delattr(self, method_name)
def set_transform_method(self, method_name):
self.transform_method = method_name
return self
def get_transform_method(self):
if self.transform_method == 'auto':
method_name = utils.get_transform_method(self)
if method_name is None:
raise NameError('model lacks a recognized method for '
'conversion to transformer')
else:
method_name = self.transform_method
return method_name
def fit(self, X, y=None, **fit_params):
self.model = utils.get_clone(self.predictor)
is_classifier = utils.is_classifier(self.predictor)
if y is None:
try:
self.model.fit(X, **fit_params)
except:
self.model.fit(X)
else:
if is_classifier:
self.classes_, y = np.unique(y, return_inverse=True)
try:
self.model.fit(X, y, **fit_params)
except:
self.model.fit(X, y)
self._set_estimator_type(self.model)
self._remove_predictor_interface()
self._add_model_interface(self.model, X)
return self
def predict_with_method(self, X, method_name):
if hasattr(self, 'model') is False:
raise utils.FitError('prediction attempted before model fitting')
if hasattr(self.model, method_name):
predict_method = getattr(self.model, method_name)
predictions = predict_method(X)
else:
raise NameError('prediction method {} not found in {} attributes'
.format(method_name, self.model))
if utils.is_classifier(self) and method_name == 'predict':
predictions = self.classes_[predictions]
return predictions
def transform(self, X):
if hasattr(self, 'model'):
transformer = getattr(self.model, self.get_transform_method())
X_t = transformer(X)
# convert output array to output matrix:
if len(X_t.shape) == 1:
X_t = X_t.reshape(-1, 1)
# drop redundant prob output from binary classifiers:
elif (len(X_t.shape) == 2 and X_t.shape[1] == 2 and
utils.is_classifier(self.model)):
X_t = X_t[:, 1].reshape(-1, 1)
return X_t
else:
raise utils.FitError('transform called before model fitting')
def fit_transform(self, X, y=None, **fit_params):
self.fit(X, y, **fit_params)
return self.transform(X)
def _more_tags(self):
return {'multichannel': False}
def get_clone(self):
"""
Get a stateful clone.
"""
clone = super().get_clone()
if hasattr(self, 'classes_'):
clone.classes_ = self.classes_.copy()
if hasattr(self, 'model'):
clone.model = utils.get_clone(self.model)
clone._set_estimator_type(self.model)
clone._remove_predictor_interface()
clone._add_predictor_interface(self)
return clone
def get_descriptor(self, verbose=1):
return '{' + utils.get_descriptor(self.predictor, verbose,
self.get_params()) + '}tr'
class SingleChannelCV(SingleChannel):
"""
Add transformer interface and internal cross validation training to
scikit-learn predictor.
Wrapper class that provides predictors with transform() and fit_transform()
methods, and internal cross validation training with performance scoring.
Parameters
----------
predictor : predictor instance
The scikit-learn conformant predictor to wrap.
transform_method : str, default='auto'
- Name of the prediction method to call when transforming (e.g. when
outputting meta-features).
- If 'auto' :
- If classifier : method picked using
config.transform_method_precedence order (default:
predict_proba->predict_log_proba->decision_function->predict).
- If regressor : 'predict'
internal_cv : int, None, or callable, default=5
- Function for train/test subdivision of the training data. | |
#!/usr/bin/env python
"""
Pydoc sub-class for generating documentation for entire packages.
Taken from: http://pyopengl.sourceforge.net/pydoc/OpenGLContext.pydoc.pydoc2.html
Author: <NAME>
"""
import logging
import pydoc, inspect, os, string, shutil
import sys, imp, os, stat, re, types, inspect
from repr import Repr
from string import expandtabs, find, join, lower, split, strip, rfind, rstrip
_log = logging.getLogger(python-logstash-logger)
def classify_class_attrs(cls):
"""Return list of attribute-descriptor tuples.
For each name in dir(cls), the return list contains a 4-tuple
with these elements:
0. The name (a string).
1. The kind of attribute this is, one of these strings:
'class method' created via classmethod()
'static method' created via staticmethod()
'property' created via property()
'method' any other flavor of method
'data' not a method
2. The class which defined this attribute (a class).
3. The object as obtained directly from the defining class's
__dict__, not via getattr. This is especially important for
data attributes: C.data is just a data object, but
C.__dict__['data'] may be a data descriptor with additional
info, like a __doc__ string.
Note: This version is patched to work with Zope Interface-bearing objects
"""
mro = inspect.getmro(cls)
names = dir(cls)
result = []
for name in names:
# Get the object associated with the name.
# Getting an obj from the __dict__ sometimes reveals more than
# using getattr. Static and class methods are dramatic examples.
if name in cls.__dict__:
obj = cls.__dict__[name]
else:
try:
obj = getattr(cls, name)
except AttributeError, err:
continue
# Figure out where it was defined.
homecls = getattr(obj, "__objclass__", None)
if homecls is None:
# search the dicts.
for base in mro:
if name in base.__dict__:
homecls = base
break
# Get the object again, in order to get it from the defining
# __dict__ instead of via getattr (if possible).
if homecls is not None and name in homecls.__dict__:
obj = homecls.__dict__[name]
# Also get the object via getattr.
obj_via_getattr = getattr(cls, name)
# Classify the object.
if isinstance(obj, staticmethod):
kind = "static method"
elif isinstance(obj, classmethod):
kind = "class method"
elif isinstance(obj, property):
kind = "property"
elif (inspect.ismethod(obj_via_getattr) or
inspect.ismethoddescriptor(obj_via_getattr)):
kind = "method"
else:
kind = "data"
result.append((name, kind, homecls, obj))
return result
inspect.classify_class_attrs = classify_class_attrs
class DefaultFormatter(pydoc.HTMLDoc):
def docmodule(self, object, name=None, mod=None, packageContext = None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
parts = split(name, '.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
(join(parts[:i+1], '.'), parts[i]))
linkedname = join(links + parts[-1:], '.')
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = path
if sys.platform == 'win32':
import nturl2path
url = nturl2path.pathname2url(path)
filelink = '<a href="file:%s">%s</a>' % (url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(str(object.__date__)))
if info:
head = head + ' (%s)' % join(info, ', ')
result = self.heading(
head, '#ffffff', '#7799ee', '<a href=".">index</a><br>' + filelink)
modules = inspect.getmembers(object, inspect.ismodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
if (inspect.getmodule(value) or object) is object:
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not cdict.has_key(key):
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
if inspect.isbuiltin(value) or inspect.getmodule(value) is object:
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, pydoc.isdata):
if key not in ['__builtins__', '__doc__']:
data.append((key, value))
doc = self.markup(pydoc.getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
packageContext.clean ( classes, object )
packageContext.clean ( funcs, object )
packageContext.clean ( data, object )
if hasattr(object, '__path__'):
modpkgs = []
modnames = []
for file in os.listdir(object.__path__[0]):
path = os.path.join(object.__path__[0], file)
modname = inspect.getmodulename(file)
if modname and modname not in modnames:
modpkgs.append((modname, name, 0, 0))
modnames.append(modname)
elif pydoc.ispackage(path):
modpkgs.append((file, name, 1, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
## result = result + self.bigsection(
## 'Package Contents', '#ffffff', '#aa55cc', contents)
result = result + self.moduleSection( object, packageContext)
elif modules:
contents = self.multicolumn(
modules, lambda (key, value), s=self: s.modulelink(value))
result = result + self.bigsection(
'Modules', '#fffff', '#aa55cc', contents)
if classes:
classlist = map(lambda (key, value): value, classes)
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', join(contents))
if data:
contents = []
for key, value in data:
try:
contents.append(self.document(value, key))
except Exception, err:
pass
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', join(contents, '<br>\n'))
if hasattr(object, '__author__'):
contents = self.markup(str(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(str(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, name
)
return pydoc.classname(object, modname)
def moduleSection( self, object, packageContext ):
"""Create a module-links section for the given object (module)"""
modules = inspect.getmembers(object, inspect.ismodule)
packageContext.clean ( modules, object )
packageContext.recurseScan( modules )
if hasattr(object, '__path__'):
modpkgs = []
modnames = []
for file in os.listdir(object.__path__[0]):
path = os.path.join(object.__path__[0], file)
modname = inspect.getmodulename(file)
if modname and modname not in modnames:
modpkgs.append((modname, object.__name__, 0, 0))
modnames.append(modname)
elif pydoc.ispackage(path):
modpkgs.append((file, object.__name__, 1, 0))
modpkgs.sort()
# do more recursion here...
for (modname, name, ya,yo) in modpkgs:
packageContext.addInteresting( join( (object.__name__, modname), '.'))
items = []
for (modname, name, ispackage,isshadowed) in modpkgs:
try:
# get the actual module object...
## if modname == "events":
## import pdb
## pdb.set_trace()
module = pydoc.safeimport( "%s.%s"%(name,modname) )
description, documentation = pydoc.splitdoc( inspect.getdoc( module ))
if description:
items.append(
"""%s -- %s"""% (
self.modpkglink( (modname, name, ispackage, isshadowed) ),
description,
)
)
else:
items.append(
self.modpkglink( (modname, name, ispackage, isshadowed) )
)
except:
items.append(
self.modpkglink( (modname, name, ispackage, isshadowed) )
)
contents = string.join( items, '<br>')
result = self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda (key, value), s=self: s.modulelink(value))
result = self.bigsection(
'Modules', '#fffff', '#aa55cc', contents)
else:
result = ""
return result
class AlreadyDone(Exception):
pass
class PackageDocumentationGenerator:
"""A package document generator creates documentation
for an entire package using pydoc's machinery.
baseModules -- modules which will be included
and whose included and children modules will be
considered fair game for documentation
destinationDirectory -- the directory into which
the HTML documentation will be written
recursion -- whether to add modules which are
referenced by and/or children of base modules
exclusions -- a list of modules whose contents will
not be shown in any other module, commonly
such modules as OpenGL.GL, wxPython.wx etc.
recursionStops -- a list of modules which will
explicitly stop recursion (i.e. they will never
be included), even if they are children of base
modules.
formatter -- allows for passing in a custom formatter
see DefaultFormatter for sample implementation.
"""
def __init__ (
self, baseModules, destinationDirectory = ".",
recursion = 1, exclusions = (),
recursionStops = (),
formatter = None
):
self.destinationDirectory = os.path.abspath( destinationDirectory)
self.exclusions = {}
self.warnings = []
self.baseSpecifiers = {}
self.completed = {}
self.recursionStops = {}
self.recursion = recursion
for stop in recursionStops:
self.recursionStops[ stop ] = 1
self.pending = []
for exclusion in exclusions:
try:
self.exclusions[ exclusion ]= pydoc.locate ( exclusion)
except pydoc.ErrorDuringImport, value:
self.warn( """Unable to import the module %s which was specified as an exclusion module"""% (repr(exclusion)))
self.formatter = formatter or DefaultFormatter()
for base in baseModules:
self.addBase( base )
def warn( self, message ):
"""Warnings are used for recoverable, but not necessarily ignorable conditions"""
self.warnings.append (message)
def info (self, message):
"""Information/status report"""
_log.debug(message)
def addBase(self, specifier):
"""Set the base of the documentation set, only children of these modules will be documented"""
try:
self.baseSpecifiers [specifier] = pydoc.locate ( specifier)
self.pending.append (specifier)
except pydoc.ErrorDuringImport, value:
self.warn( """Unable to import the module %s which was specified as a base module"""% (repr(specifier)))
def addInteresting( self, specifier):
"""Add a module to the list of interesting modules"""
if self.checkScope( specifier):
self.pending.append (specifier)
else:
self.completed[ specifier] = 1
def checkScope (self, specifier):
"""Check that the specifier is "in scope" for the recursion"""
if not self.recursion:
return 0
items = string.split (specifier, ".")
stopCheck = items [:]
while stopCheck:
name = string.join(items, ".")
if self.recursionStops.get( name):
return 0
elif self.completed.get (name):
return 0
del stopCheck[-1]
while items:
if self.baseSpecifiers.get( string.join(items, ".")):
return 1
del items[-1]
# was not within any given scope
return 0
def process( self ):
"""Having added all of the base and/or interesting modules,
proceed to generate the appropriate documentation for each
module in the appropriate directory, doing the recursion
as we go."""
try:
while self.pending:
try:
if self.completed.has_key( self.pending[0] ):
raise AlreadyDone( self.pending[0] )
self.info( """Start %s"""% (repr(self.pending[0])))
object = pydoc.locate ( self.pending[0] )
self.info( """ ... found %s"""% (repr(object.__name__)))
except AlreadyDone:
pass
except pydoc.ErrorDuringImport, value:
self.info( """ ... FAILED %s"""% (repr( value)))
self.warn( """Unable to import the module %s"""% (repr(self.pending[0])))
except (SystemError, SystemExit), value:
self.info( """ ... FAILED %s"""% (repr( value)))
self.warn( """Unable to import the module %s"""% (repr(self.pending[0])))
except Exception, value:
self.info( """ ... FAILED %s"""% (repr( value)))
self.warn( """Unable to import the module %s"""% (repr(self.pending[0])))
else:
page = self.formatter.page(
pydoc.describe(object),
self.formatter.docmodule(
object,
object.__name__,
packageContext = self,
)
)
file = open (
os.path.join(
self.destinationDirectory,
self.pending[0] + ".html",
),
'w',
)
file.write(page)
file.close()
self.completed[ self.pending[0]] = object
del self.pending[0]
finally:
for item in self.warnings:
_log.info(item)
def clean (self, objectList, object):
"""callback from the formatter object asking us to remove
those items in the key, value pairs where the object is
imported from one of the excluded modules"""
for key, value in objectList[:]:
for excludeObject in self.exclusions.values():
if hasattr( excludeObject, key ) and excludeObject is not object:
if (
getattr( excludeObject, key) is value | |
systemic_vel_est(z,param_dict,burn_in,run_dir,plot_param_hist=plot_param_hist)
extra_dict = {**extra_dict, **z_dict}
if verbose:
print('\n > Saving Data...')
# Write all chains to a fits table
if (write_chain==True):
write_chains({**param_dict,**flux_dict,**lum_dict,**cont_lum_dict,**eqwidth_dict,**int_vel_disp_dict},run_dir)
# Plot and save the best fit model and all sub-components
comp_dict = plot_best_model(param_dict,
line_list,
combined_line_list,
lam_gal,
galaxy,
noise,
comp_options,
losvd_options,
host_options,
power_options,
poly_options,
opt_feii_options,
uv_iron_options,
balmer_options,
outflow_test_options,
host_template,
opt_feii_templates,
uv_iron_template,
balmer_template,
stel_templates,
fwhm_gal,
fit_mask,
fit_stat,
velscale,
run_dir)
# Calculate some fit quality parameters which will be added to the dictionary
# These will be appended to result_dict and need to be in the same format {"med": , "std", "flag":}
fit_quality_dict = fit_quality_pars(param_dict,line_list,combined_line_list,comp_dict,fit_mask,fit_type="mcmc",fit_stat=fit_stat)
param_dict = {**param_dict,**fit_quality_dict}
# Write best fit parameters to fits table
# Header information
header_dict = {}
header_dict["Z_SDSS"] = z
header_dict["MED_NOISE"] = np.median(noise)
header_dict["VELSCALE"] = velscale
#
param_dict = {**param_dict,**flux_dict,**lum_dict,**eqwidth_dict,**cont_lum_dict,**int_vel_disp_dict,**extra_dict}
write_params(param_dict,header_dict,bounds,run_dir,binnum,spaxelx,spaxely)
# Make interactive HTML plot
if plot_HTML:
plotly_best_fit(fits_file.parent.name,line_list,fit_mask,run_dir)
if verbose:
print('\n Cleaning up...')
print('----------------------------------------------------------------------------------------------------')
# Delete redundant files to cut down on space
cleanup(run_dir)
# Total time
elap_time = (time.time() - start_time)
if verbose:
print("\n Total Runtime = %s" % (time_convert(elap_time)))
# Write to log
write_log(elap_time,'total_time',run_dir)
print(' - Done fitting %s! \n' % fits_file.stem)
sys.stdout.flush()
return
##################################################################################
def initialize_walkers(init_params,param_names,bounds,soft_cons,nwalkers,ndim):
"""
Initializes the MCMC walkers within bounds and soft constraints.
"""
# Create refereence dictionary for numexpr
pdict = {}
for k in range(0,len(param_names),1):
pdict[param_names[k]] = init_params[k]
pos = init_params + 1.e-3 * np.random.randn(nwalkers,ndim)
# First iterate through bounds
for j in range(np.shape(pos)[1]): # iterate through parameter
for i in range(np.shape(pos)[0]): # iterate through walker
if (pos[i][j]<bounds[j][0]) | (pos[i][j]>bounds[j][1]):
while (pos[i][j]<bounds[j][0]) | (pos[i][j]>bounds[j][1]):
pos[i][j] = init_params[j] + 1.e-3*np.random.randn(1)
return pos
#### Calculate Sysetemic Velocity ################################################
def systemic_vel_est(z,param_dict,burn_in,run_dir,plot_param_hist=True):
"""
Estimates the systemic (stellar) velocity of the galaxy and corrects
the SDSS redshift (which is based on emission lines).
"""
c = 299792.458
# Get measured stellar velocity
stel_vel = np.array(param_dict['stel_vel']['chain'])
# Calculate new redshift
z_best = (z+1)*(1+stel_vel/c)-1
# Burned-in + Flattened (along walker axis) chain
# If burn_in is larger than the size of the chain, then
# take 50% of the chain length instead.
if (burn_in >= np.shape(z_best)[1]):
burn_in = int(0.5*np.shape(z_best)[1])
# print('\n Burn-in is larger than chain length! Using 50% of chain length for burn-in...\n')
flat = z_best[:,burn_in:]
flat = flat.flat
# Subsample the data into a manageable size for the kde and HDI
if len(flat[np.isfinite(flat)]) > 0:
subsampled = np.random.choice(flat[np.isfinite(flat)],size=10000)
# Histogram; 'Doane' binning produces the best results from tests.
hist, bin_edges = np.histogram(subsampled, bins='doane', density=False)
# Generate pseudo-data on the ends of the histogram; this prevents the KDE
# from weird edge behavior.
n_pseudo = 3 # number of pseudo-bins
bin_width=bin_edges[1]-bin_edges[0]
lower_pseudo_data = np.random.uniform(low=bin_edges[0]-bin_width*n_pseudo, high=bin_edges[0], size=hist[0]*n_pseudo)
upper_pseudo_data = np.random.uniform(low=bin_edges[-1], high=bin_edges[-1]+bin_width*n_pseudo, size=hist[-1]*n_pseudo)
# Calculate bandwidth for KDE (Silverman method)
h = kde_bandwidth(flat)
# Create a subsampled grid for the KDE based on the subsampled data; by
# default, we subsample by a factor of 10.
xs = np.linspace(np.min(subsampled),np.max(subsampled),10*len(hist))
# Calculate KDE
kde = gauss_kde(xs,np.concatenate([subsampled,lower_pseudo_data,upper_pseudo_data]),h)
p68 = compute_HDI(subsampled,0.68)
p95 = compute_HDI(subsampled,0.95)
post_max = xs[kde.argmax()] # posterior max estimated from KDE
post_mean = np.mean(flat)
post_med = np.median(flat)
low_68 = post_max - p68[0]
upp_68 = p68[1] - post_max
low_95 = post_max - p95[0]
upp_95 = p95[1] - post_max
post_std = np.std(flat)
post_mad = stats.median_abs_deviation(flat)
if ((post_max-(3.0*low_68))<0):
flag = 1
else: flag = 0
z_dict = {}
z_dict["z_sys"] = {}
z_dict["z_sys"]["par_best"] = post_max
z_dict["z_sys"]["ci_68_low"] = low_68
z_dict["z_sys"]["ci_68_upp"] = upp_68
z_dict["z_sys"]["ci_95_low"] = low_95
z_dict["z_sys"]["ci_95_upp"] = upp_95
z_dict["z_sys"]["mean"] = post_mean
z_dict["z_sys"]["std_dev"] = post_std
z_dict["z_sys"]["median"] = post_med
z_dict["z_sys"]["med_abs_dev"] = post_mad
z_dict["z_sys"]["flat_chain"] = flat
z_dict["z_sys"]["flag"] = flag
else:
z_dict = {}
z_dict["z_sys"] = {}
z_dict["z_sys"]["par_best"] = np.nan
z_dict["z_sys"]["ci_68_low"] = np.nan
z_dict["z_sys"]["ci_68_upp"] = np.nan
z_dict["z_sys"]["ci_95_low"] = np.nan
z_dict["z_sys"]["ci_95_upp"] = np.nan
z_dict["z_sys"]["mean"] = np.nan
z_dict["z_sys"]["std_dev"] = np.nan
z_dict["z_sys"]["median"] = np.nan
z_dict["z_sys"]["med_abs_dev"] = np.nan
z_dict["z_sys"]["flat_chain"] = flat
z_dict["z_sys"]["flag"] = 1
return z_dict
##################################################################################
#### Find Nearest Function #######################################################
def find_nearest(array, value):
"""
This function finds the nearest value in an array and returns the
closest value and the corresponding index.
"""
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return array[idx],idx
##################################################################################
#### Convert Seconds to Minutes ##################################################
# Python Program to Convert seconds
# into hours, minutes and seconds
def time_convert(seconds):
"""
Converts runtimes in seconds to hours:minutes:seconds format.
"""
seconds = seconds % (24. * 3600.)
hour = seconds // 3600.
seconds %= 3600.
minutes = seconds // 60.
seconds %= 60.
return "%d:%02d:%02d" % (hour, minutes, seconds)
##################################################################################
#### Setup Directory Structure ###################################################
def setup_dirs(work_dir,verbose=True):
"""
This sets up the BADASS directory structure for each spectra. It creates
the "MCMC_output_#" folders.
"""
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [ atoi(c) for c in re.split('(\d+)', text) ]
# Get list of folders in work_dir:
folders = glob.glob(work_dir+'MCMC_output_*')
folders.sort(key=natural_keys)
if (len(folders)==0):
if verbose:
print(' Folder has not been created. Creating MCMC_output folder...')
# Create the first MCMC_output file starting with index 1
os.mkdir(work_dir+'MCMC_output_1')
run_dir = os.path.join(work_dir,'MCMC_output_1/') # running directory
prev_dir = None
else:
# Get last folder name
s = folders[-1]
result = re.search('MCMC_output_(.*)', s)
# The next folder is named with this number
fnum = str(int(result.group(1))+1)
prev_num = str(int(result.group(1)))
# Create the first MCMC_output file starting with index 1
new_fold = work_dir+'MCMC_output_'+fnum+'/'
prev_fold = work_dir+'MCMC_output_'+prev_num+'/'
os.mkdir(new_fold)
run_dir = new_fold
if os.path.exists(prev_fold+'MCMC_chain.csv')==True:
prev_dir = prev_fold
else:
prev_dir = prev_fold
if verbose:
print(' Storing MCMC_output in %s' % run_dir)
return run_dir,prev_dir
##################################################################################
#### Determine fitting region ####################################################
# SDSS spectra
def determine_fit_reg_sdss(fits_file, run_dir, fit_reg, good_thresh, fit_losvd, losvd_options, verbose):
"""
Determines the fitting region for SDSS spectra.
"""
# Limits of the stellar template wavelength range
# The stellar templates packaged with BADASS are from the Indo-US Coude Feed Stellar Template Library
# with the below wavelength ranges.
if (losvd_options["library"]=="IndoUS"):
min_losvd, max_losvd = 3460, 9464
if (losvd_options["library"]=="Vazdekis2010"):
min_losvd, max_losvd = 3540.5, 7409.6
if (losvd_options["library"]=="eMILES"):
min_losvd, max_losvd = 1680.2, 49999.4
# Open spectrum file
hdu = fits.open(fits_file)
specobj = hdu[2].data
z = specobj['z'][0]
# t = hdu['COADD'].data
t = hdu[1].data
lam_gal = (10**(t['loglam']))/(1+z)
gal = t['flux']
ivar = t['ivar']
and_mask = t['and_mask']
# Edges of wavelength vector
first_good = lam_gal[0]
last_good = lam_gal[-1]
if ((fit_reg=='auto') or (fit_reg=='full')):
# The lower limit of the spectrum must be the lower limit of our stellar templates
if ((fit_losvd==True) & (first_good < min_losvd)) | ((fit_losvd==True) & (last_good > max_losvd)):
if verbose:
print("\n Warning: Fitting LOSVD requires wavelenth range between %d Å and %d Å for stellar templates. BADASS will adjust your fitting range to fit the LOSVD..." % (min_losvd,max_losvd))
print(" - Available wavelength range: (%d, %d)" % (first_good, last_good) )
auto_low = np.max([min_losvd,first_good]) # Indo-US Library of Stellar Templates has a lower limit of 3460
# auto_upp = determine_upper_bound(first_good,last_good)
auto_upp = np.min([max_losvd,last_good])
# if (auto_upp is not None):
new_fit_reg = (np.floor(auto_low),np.ceil(auto_upp))
if verbose:
print(" - New fitting region is (%d, %d). \n" % (new_fit_reg[0], new_fit_reg[1]) )
# elif (auto_upp is None):
# new_fit_reg = None
# return None, None
elif (fit_losvd==False):
new_fit_reg = (np.floor(first_good),np.ceil(last_good))
elif isinstance(fit_reg,(tuple,list)):
# Check to see if tuple/list makes sense
if ((fit_reg[0]>fit_reg[1]) | (fit_reg[1]<fit_reg[0])): # if boundaries overlap
if verbose:
print('\n Fitting boundaries overlap! \n')
new_fit_reg = None
return None, None
elif (fit_reg[0] > last_good) | (fit_reg[1] < first_good):
if verbose:
print('\n Fitting region not available! \n')
new_fit_reg = None
return None, None
elif ((fit_losvd==True) & (fit_reg[0]<min_losvd)) | ((fit_losvd==True) & (fit_reg[1]>max_losvd)):
if verbose:
print("\n Warning: Fitting LOSVD requires wavelenth range between 3460 A and 9464 A for stellar templates. BADASS will adjust your fitting range to fit the LOSVD...")
print(" - Input fitting range: (%d, %d)" % (fit_reg[0], fit_reg[1]) )
print(" - Available wavelength range: (%d, %d)" % (first_good, last_good) )
wave_low = np.max([min_losvd,fit_reg[0],first_good])
wave_upp = np.min([max_losvd,fit_reg[1],last_good])
new_fit_reg = (np.floor(wave_low),np.ceil(wave_upp))
if verbose:
print(" - New fitting region is (%d, %d). \n" % (new_fit_reg[0], new_fit_reg[1]) )
else:# (fit_losvd==False):
if (fit_reg[0] < first_good) | (fit_reg[1] > last_good):
if | |
helper_position1 = helper_position + len(Settings.CONTROL_MAPPING_DELIMITER)
helper_position2 = input_string.find(Settings.CONTROL_MAPPING_DELIMITER,helper_position1)
debug_log("loading control mapping")
settings_string = input_string[helper_position1:helper_position2].lstrip().rstrip()
self.player_key_maps.load_from_string(settings_string)
input_string = input_string[:helper_position] + input_string[helper_position2 + len(Settings.CONTROL_MAPPING_DELIMITER):]
lines = input_string.split("\n")
for line in lines:
helper_position = line.find(":")
if helper_position < 0:
continue
key_string = line[:helper_position]
value_string = line[helper_position + 1:].lstrip().rstrip()
if key_string == "sound volume":
self.sound_volume = float(value_string)
elif key_string == "music volume":
self.music_volume = float(value_string)
elif key_string == "screen resolution":
helper_tuple = value_string.split("x")
self.screen_resolution = (int(helper_tuple[0]),int(helper_tuple[1]))
elif key_string == "fullscreen":
self.fullscreen = True if value_string == "True" else False
elif key_string == "control by mouse":
self.control_by_mouse = True if value_string == "True" else False
#----------------------------------------------------------------------------
def sound_is_on(self):
return self.sound_volume > Settings.SOUND_VOLUME_THRESHOLD
#----------------------------------------------------------------------------
def music_is_on(self):
return self.music_volume > Settings.SOUND_VOLUME_THRESHOLD
#----------------------------------------------------------------------------
def current_resolution_index(self):
return next((i for i in xrange(len(Settings.POSSIBLE_SCREEN_RESOLUTIONS)) if self.screen_resolution == Settings.POSSIBLE_SCREEN_RESOLUTIONS[i]),0)
#==============================================================================
class Game(object):
# colors used for players and teams
COLOR_WHITE = 0
COLOR_BLACK = 1
COLOR_RED = 2
COLOR_BLUE = 3
COLOR_GREEN = 4
COLOR_CYAN = 5
COLOR_YELLOW = 6
COLOR_ORANGE = 7
COLOR_BROWN = 8
COLOR_PURPLE = 9
COLOR_NAMES = [
"white",
"black",
"red",
"blue",
"green",
"cyan",
"yellow",
"orange",
"brown",
"purple"
]
STATE_PLAYING = 0
STATE_EXIT = 1
STATE_MENU_MAIN = 2
STATE_MENU_SETTINGS = 3
STATE_MENU_ABOUT = 4
STATE_MENU_PLAY_SETUP = 5
STATE_MENU_MAP_SELECT = 6
STATE_MENU_CONTROL_SETTINGS = 7
STATE_MENU_PLAY = 8
STATE_MENU_RESULTS = 9
STATE_GAME_STARTED = 10
CHEAT_PARTY = 0
CHEAT_ALL_ITEMS = 1
CHEAT_PLAYER_IMMORTAL = 2
VERSION_STR = "0.95"
NUMBER_OF_CONTROLLED_PLAYERS = 4 ##< maximum number of non-AI players on one PC
RESOURCE_PATH = "resources"
MAP_PATH = "maps"
SETTINGS_FILE_PATH = "settings.txt"
#----------------------------------------------------------------------------
def __init__(self):
pygame.mixer.pre_init(22050,-16,2,512) # set smaller audio buffer size to prevent audio lag
pygame.init()
pygame.font.init()
pygame.mixer.init()
self.frame_number = 0
self.player_key_maps = PlayerKeyMaps()
self.settings = Settings(self.player_key_maps)
self.game_number = 0
if os.path.isfile(Game.SETTINGS_FILE_PATH):
debug_log("loading settings from file " + Game.SETTINGS_FILE_PATH)
self.settings.load_from_file(Game.SETTINGS_FILE_PATH)
self.settings.save_to_file(Game.SETTINGS_FILE_PATH) # save the reformatted settings file (or create a new one)
pygame.display.set_caption("Bombman")
self.renderer = Renderer()
self.apply_screen_settings()
self.sound_player = SoundPlayer()
self.sound_player.change_music()
self.apply_sound_settings()
self.apply_other_settings()
self.map_name = ""
self.random_map_selection = False
self.game_map = None
self.play_setup = PlaySetup()
self.menu_main = MainMenu(self.sound_player)
self.menu_settings = SettingsMenu(self.sound_player,self.settings,self)
self.menu_about = AboutMenu(self.sound_player)
self.menu_play_setup = PlaySetupMenu(self.sound_player,self.play_setup)
self.menu_map_select = MapSelectMenu(self.sound_player)
self.menu_play = PlayMenu(self.sound_player)
self.menu_controls = ControlsMenu(self.sound_player,self.player_key_maps,self)
self.menu_results = ResultMenu(self.sound_player)
self.ais = []
self.state = Game.STATE_MENU_MAIN
self.immortal_players_numbers = []
self.active_cheats = set()
#----------------------------------------------------------------------------
def deactivate_all_cheats(self):
self.active_cheats = set()
debug_log("all cheats deactivated")
#----------------------------------------------------------------------------
def activate_cheat(self, what_cheat):
self.active_cheats.add(what_cheat)
debug_log("cheat activated")
#----------------------------------------------------------------------------
def deactivate_cheat(self, what_cheat):
if what_cheat in self.active_cheats:
self.active_cheats.remove(what_cheat)
#----------------------------------------------------------------------------
def cheat_is_active(self, what_cheat):
return what_cheat in self.active_cheats
#----------------------------------------------------------------------------
def get_player_key_maps(self):
return self.player_key_maps
#----------------------------------------------------------------------------
def get_settings(self):
return self.settings
#----------------------------------------------------------------------------
def apply_screen_settings(self):
display_flags = 0
if self.settings.fullscreen:
display_flags += pygame.FULLSCREEN
self.screen = pygame.display.set_mode(self.settings.screen_resolution,display_flags)
screen_center = (Renderer.get_screen_size()[0] / 2,Renderer.get_screen_size()[1] / 2)
pygame.mouse.set_pos(screen_center)
self.renderer.update_screen_info()
#----------------------------------------------------------------------------
def apply_sound_settings(self):
self.sound_player.set_music_volume(self.settings.music_volume)
self.sound_player.set_sound_volume(self.settings.sound_volume)
#----------------------------------------------------------------------------
def apply_other_settings(self):
self.player_key_maps.allow_control_by_mouse(self.settings.control_by_mouse)
#----------------------------------------------------------------------------
def save_settings(self):
self.settings.save_to_file(Game.SETTINGS_FILE_PATH)
#----------------------------------------------------------------------------
def __check_cheat(self, cheat_string, cheat = None):
if self.player_key_maps.string_was_typed(cheat_string):
if cheat != None:
self.activate_cheat(cheat)
else:
self.deactivate_all_cheats()
self.player_key_maps.clear_typing_buffer()
#----------------------------------------------------------------------------
## Manages the menu actions and sets self.active_menu.
def manage_menus(self):
new_state = self.state
prevent_input_processing = False
# cheack if any cheat was typed:
self.__check_cheat("party",game.CHEAT_PARTY)
self.__check_cheat("herecomedatboi",game.CHEAT_ALL_ITEMS)
self.__check_cheat("leeeroy",game.CHEAT_PLAYER_IMMORTAL)
self.__check_cheat("revert")
self.player_key_maps.get_current_actions() # this has to be called in order for player_key_maps to update mouse controls properly
# ================ MAIN MENU =================
if self.state == Game.STATE_MENU_MAIN:
self.active_menu = self.menu_main
if self.active_menu.get_state() == Menu.MENU_STATE_CONFIRM:
new_state = [
Game.STATE_MENU_PLAY_SETUP,
Game.STATE_MENU_SETTINGS,
Game.STATE_MENU_ABOUT,
Game.STATE_EXIT
] [self.active_menu.get_selected_item()[0]]
# ================ PLAY MENU =================
elif self.state == Game.STATE_MENU_PLAY:
self.active_menu = self.menu_play
if self.active_menu.get_state() == Menu.MENU_STATE_CANCEL:
new_state = Game.STATE_PLAYING
elif self.active_menu.get_state() == Menu.MENU_STATE_CONFIRM:
if self.active_menu.get_selected_item() == (0,0):
new_state = Game.STATE_PLAYING
for player in self.game_map.get_players():
player.wait_for_bomb_action_release()
elif self.active_menu.get_selected_item() == (1,0):
new_state = Game.STATE_MENU_MAIN
self.sound_player.change_music()
self.deactivate_all_cheats()
# ============== SETTINGS MENU ===============
elif self.state == Game.STATE_MENU_SETTINGS:
self.active_menu = self.menu_settings
if self.active_menu.get_state() == Menu.MENU_STATE_CANCEL:
new_state = Game.STATE_MENU_MAIN
elif self.active_menu.get_state() == Menu.MENU_STATE_CONFIRM:
if self.active_menu.get_selected_item() == (5,0):
new_state = Game.STATE_MENU_CONTROL_SETTINGS
elif self.active_menu.get_selected_item() == (7,0):
new_state = Game.STATE_MENU_MAIN
# ========== CONTROL SETTINGS MENU ===========
elif self.state == Game.STATE_MENU_CONTROL_SETTINGS:
self.active_menu = self.menu_controls
self.active_menu.update(self.player_key_maps) # needs to be called to scan for pressed keys
if self.active_menu.get_state() == Menu.MENU_STATE_CANCEL:
new_state = Game.STATE_MENU_SETTINGS
elif self.active_menu.get_state() == Menu.MENU_STATE_CONFIRM:
if self.active_menu.get_selected_item() == (0,0):
new_state = Game.STATE_MENU_SETTINGS
# ================ ABOUT MENU =================
elif self.state == Game.STATE_MENU_ABOUT:
self.active_menu = self.menu_about
if self.active_menu.get_state() in (Menu.MENU_STATE_CONFIRM,Menu.MENU_STATE_CANCEL):
new_state = Game.STATE_MENU_MAIN
# ============== PLAY SETUP MENU ==============
elif self.state == Game.STATE_MENU_PLAY_SETUP:
self.active_menu = self.menu_play_setup
if self.active_menu.get_state() == Menu.MENU_STATE_CANCEL:
new_state = Game.STATE_MENU_MAIN
elif self.active_menu.get_state() == Menu.MENU_STATE_CONFIRM:
if self.active_menu.get_selected_item() == (0,1):
new_state = Game.STATE_MENU_MAP_SELECT
elif self.active_menu.get_selected_item() == (0,0):
new_state = Game.STATE_MENU_MAIN
# ============== MAP SELECT MENU ==============
elif self.state == Game.STATE_MENU_MAP_SELECT:
self.active_menu = self.menu_map_select
if self.active_menu.get_state() == Menu.MENU_STATE_CANCEL:
new_state = Game.STATE_MENU_PLAY_SETUP
elif self.active_menu.get_state() == Menu.MENU_STATE_CONFIRM:
self.map_name = self.active_menu.get_selected_map_name()
self.random_map_selection = self.active_menu.random_was_selected()
self.game_number = 1 # first game
new_state = Game.STATE_GAME_STARTED
self.deactivate_cheat(Game.CHEAT_PARTY)
# ================ RESULT MENU ================
elif self.state == Game.STATE_MENU_RESULTS:
self.active_menu = self.menu_results
if self.active_menu.get_state() in (Menu.MENU_STATE_CONFIRM,Menu.MENU_STATE_CANCEL):
new_state = Game.STATE_MENU_MAIN
if new_state != self.state: # going to new state
self.state = new_state
self.active_menu.leaving()
self.active_menu.process_inputs(self.player_key_maps.get_current_actions())
#----------------------------------------------------------------------------
def acknowledge_wins(self, winner_team_number, players):
for player in players:
if player.get_team_number() == winner_team_number:
player.set_wins(player.get_wins() + 1)
#----------------------------------------------------------------------------
def run(self):
time_before = pygame.time.get_ticks()
show_fps_in = 0
pygame_clock = pygame.time.Clock()
while True: # main loop
profiler.measure_start("main loop")
dt = min(pygame.time.get_ticks() - time_before,100)
time_before = pygame.time.get_ticks()
pygame_events = []
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.state = Game.STATE_EXIT
pygame_events.append(event)
self.player_key_maps.process_pygame_events(pygame_events,self.frame_number)
if self.state == Game.STATE_PLAYING:
self.renderer.process_animation_events(self.game_map.get_and_clear_animation_events()) # play animations
self.sound_player.process_events(self.game_map.get_and_clear_sound_events()) # play sounds
profiler.measure_start("map rend.")
self.screen.blit(self.renderer.render_map(self.game_map),(0,0))
profiler.measure_stop("map rend.")
profiler.measure_start("sim.")
self.simulation_step(dt)
profiler.measure_stop("sim.")
if self.game_map.get_state() == GameMap.STATE_GAME_OVER:
self.game_number += 1
if self.game_number > self.play_setup.get_number_of_games():
previous_winner = self.game_map.get_winner_team()
self.acknowledge_wins(previous_winner,self.game_map.get_players())
self.menu_results.set_results(self.game_map.get_players())
self.game_map = None
self.state = Game.STATE_MENU_RESULTS # show final results
self.deactivate_all_cheats()
else:
self.state = Game.STATE_GAME_STARTED # new game
elif self.state == Game.STATE_GAME_STARTED:
debug_log("starting game " + str(self.game_number))
previous_winner = -1
if self.game_number != 1:
previous_winner = self.game_map.get_winner_team()
kill_counts = [0 for i in xrange(10)]
win_counts = [0 for i in xrange(10)]
if self.game_map != None:
for player in self.game_map.get_players():
kill_counts[player.get_number()] = player.get_kills()
win_counts[player.get_number()] = player.get_wins()
map_name_to_load = self.map_name if not self.random_map_selection else self.menu_map_select.get_random_map_name()
with open(os.path.join(Game.MAP_PATH,map_name_to_load)) as map_file:
map_data = map_file.read()
self.game_map = GameMap(map_data,self.play_setup,self.game_number,self.play_setup.get_number_of_games(),self.cheat_is_active(Game.CHEAT_ALL_ITEMS))
player_slots = self.play_setup.get_slots()
if self.cheat_is_active(Game.CHEAT_PLAYER_IMMORTAL):
self.immortal_players_numbers = []
for i in xrange(len(player_slots)):
if player_slots[i] != None and player_slots[i][0] >= 0: # cheat: if not AI
self.immortal_players_numbers.append(i) # make the player immortal
self.ais = []
for i in xrange(len(player_slots)):
if player_slots[i] != None and player_slots[i][0] < 0: # indicates AI
self.ais.append(AI(self.game_map.get_players_by_numbers()[i],self.game_map))
for player in self.game_map.get_players():
player.set_kills(kill_counts[player.get_number()])
player.set_wins(win_counts[player.get_number()])
self.acknowledge_wins(previous_winner,self.game_map.get_players()) # add win counts
self.sound_player.change_music()
self.state = Game.STATE_PLAYING
elif self.state == Game.STATE_EXIT:
break
else: # in menu
self.manage_menus()
profiler.measure_start("menu rend.")
self.screen.blit(self.renderer.render_menu(self.active_menu,self),(0,0))
profiler.measure_stop("menu rend.")
pygame.display.flip()
pygame_clock.tick()
if show_fps_in <= 0:
if DEBUG_FPS:
debug_log("fps: " + str(pygame_clock.get_fps()))
show_fps_in = 255
else:
show_fps_in -= 1
self.frame_number += 1
profiler.measure_stop("main loop")
if DEBUG_PROFILING:
debug_log(profiler.get_profile_string())
profiler.end_of_frame()
#----------------------------------------------------------------------------
## Filters a list of performed actions so that there are no actions of
# human players that are not participating in the game.
def filter_out_disallowed_actions(self, actions):
player_slots = self.play_setup.get_slots()
result = filter(lambda a: (player_slots[a[0]] != None and player_slots[a[0]] >=0) or (a[1] == PlayerKeyMaps.ACTION_MENU), actions)
return result
#----------------------------------------------------------------------------
def simulation_step(self, dt):
actions_being_performed = self.filter_out_disallowed_actions(self.player_key_maps.get_current_actions())
for action in actions_being_performed:
if action[0] == -1: # menu key pressed
self.state = Game.STATE_MENU_PLAY
return
profiler.measure_start("sim. AIs")
for i in xrange(len(self.ais)):
actions_being_performed = actions_being_performed + self.ais[i].play()
profiler.measure_stop("sim. AIs")
players = self.game_map.get_players()
profiler.measure_start("sim. inputs")
for player in players:
player.react_to_inputs(actions_being_performed,dt,self.game_map)
profiler.measure_stop("sim. inputs")
profiler.measure_start("sim. map update")
self.game_map.update(dt,self.immortal_players_numbers)
profiler.measure_stop("sim. map update")
#----------------------------------------------------------------------------
## Sets up a test game for debugging, so that the menus can | |
<gh_stars>0
"""ANTs' utilities."""
import os
from ..base import traits, isdefined, TraitedSpec, File, Str, InputMultiObject
from ..mixins import CopyHeaderInterface
from .base import ANTSCommandInputSpec, ANTSCommand
class ImageMathInputSpec(ANTSCommandInputSpec):
dimension = traits.Int(
3, usedefault=True, position=1, argstr="%d", desc="dimension of output image"
)
output_image = File(
position=2,
argstr="%s",
name_source=["op1"],
name_template="%s_maths",
desc="output image file",
keep_extension=True,
)
operation = traits.Enum(
"m",
"vm",
"+",
"v+",
"-",
"v-",
"/",
"^",
"max",
"exp",
"addtozero",
"overadd",
"abs",
"total",
"mean",
"vtotal",
"Decision",
"Neg",
"Project",
"G",
"MD",
"ME",
"MO",
"MC",
"GD",
"GE",
"GO",
"GC",
"TruncateImageIntensity",
mandatory=True,
position=3,
argstr="%s",
desc="mathematical operations",
)
op1 = File(
exists=True, mandatory=True, position=-2, argstr="%s", desc="first operator"
)
op2 = traits.Either(
File(exists=True), Str, position=-1, argstr="%s", desc="second operator"
)
copy_header = traits.Bool(
True,
usedefault=True,
desc="copy headers of the original image into the output (corrected) file",
)
class ImageMathOuputSpec(TraitedSpec):
output_image = File(exists=True, desc="output image file")
class ImageMath(ANTSCommand, CopyHeaderInterface):
"""
Operations over images.
Example
-------
>>> ImageMath(
... op1='structural.nii',
... operation='+',
... op2='2').cmdline
'ImageMath 3 structural_maths.nii + structural.nii 2'
>>> ImageMath(
... op1='structural.nii',
... operation='Project',
... op2='1 2').cmdline
'ImageMath 3 structural_maths.nii Project structural.nii 1 2'
>>> ImageMath(
... op1='structural.nii',
... operation='G',
... op2='4').cmdline
'ImageMath 3 structural_maths.nii G structural.nii 4'
>>> ImageMath(
... op1='structural.nii',
... operation='TruncateImageIntensity',
... op2='0.005 0.999 256').cmdline
'ImageMath 3 structural_maths.nii TruncateImageIntensity structural.nii 0.005 0.999 256'
"""
_cmd = "ImageMath"
input_spec = ImageMathInputSpec
output_spec = ImageMathOuputSpec
_copy_header_map = {"output_image": "op1"}
class ResampleImageBySpacingInputSpec(ANTSCommandInputSpec):
dimension = traits.Int(
3, usedefault=True, position=1, argstr="%d", desc="dimension of output image"
)
input_image = File(
exists=True, mandatory=True, position=2, argstr="%s", desc="input image file"
)
output_image = File(
position=3,
argstr="%s",
name_source=["input_image"],
name_template="%s_resampled",
desc="output image file",
keep_extension=True,
)
out_spacing = traits.Either(
traits.List(traits.Float, minlen=2, maxlen=3),
traits.Tuple(traits.Float, traits.Float, traits.Float),
traits.Tuple(traits.Float, traits.Float),
position=4,
argstr="%s",
mandatory=True,
desc="output spacing",
)
apply_smoothing = traits.Bool(
False, argstr="%d", position=5, desc="smooth before resampling"
)
addvox = traits.Int(
argstr="%d",
position=6,
requires=["apply_smoothing"],
desc="addvox pads each dimension by addvox",
)
nn_interp = traits.Bool(
argstr="%d", desc="nn interpolation", position=-1, requires=["addvox"]
)
copy_header = traits.Bool(
True,
mandatory=True,
usedefault=True,
desc="copy headers of the original image into the output (corrected) file",
)
class ResampleImageBySpacingOutputSpec(TraitedSpec):
output_image = File(exists=True, desc="resampled file")
class ResampleImageBySpacing(ANTSCommand, CopyHeaderInterface):
"""
Resample an image with a given spacing.
Examples
--------
>>> res = ResampleImageBySpacing(dimension=3)
>>> res.inputs.input_image = 'structural.nii'
>>> res.inputs.output_image = 'output.nii.gz'
>>> res.inputs.out_spacing = (4, 4, 4)
>>> res.cmdline #doctest: +ELLIPSIS
'ResampleImageBySpacing 3 structural.nii output.nii.gz 4 4 4'
>>> res = ResampleImageBySpacing(dimension=3)
>>> res.inputs.input_image = 'structural.nii'
>>> res.inputs.output_image = 'output.nii.gz'
>>> res.inputs.out_spacing = (4, 4, 4)
>>> res.inputs.apply_smoothing = True
>>> res.cmdline #doctest: +ELLIPSIS
'ResampleImageBySpacing 3 structural.nii output.nii.gz 4 4 4 1'
>>> res = ResampleImageBySpacing(dimension=3)
>>> res.inputs.input_image = 'structural.nii'
>>> res.inputs.output_image = 'output.nii.gz'
>>> res.inputs.out_spacing = (0.4, 0.4, 0.4)
>>> res.inputs.apply_smoothing = True
>>> res.inputs.addvox = 2
>>> res.inputs.nn_interp = False
>>> res.cmdline #doctest: +ELLIPSIS
'ResampleImageBySpacing 3 structural.nii output.nii.gz 0.4 0.4 0.4 1 2 0'
"""
_cmd = "ResampleImageBySpacing"
input_spec = ResampleImageBySpacingInputSpec
output_spec = ResampleImageBySpacingOutputSpec
_copy_header_map = {"output_image": "input_image"}
def _format_arg(self, name, trait_spec, value):
if name == "out_spacing":
if len(value) != self.inputs.dimension:
raise ValueError("out_spacing dimensions should match dimension")
value = " ".join(["%g" % d for d in value])
return super(ResampleImageBySpacing, self)._format_arg(name, trait_spec, value)
class ThresholdImageInputSpec(ANTSCommandInputSpec):
dimension = traits.Int(
3, usedefault=True, position=1, argstr="%d", desc="dimension of output image"
)
input_image = File(
exists=True, mandatory=True, position=2, argstr="%s", desc="input image file"
)
output_image = File(
position=3,
argstr="%s",
name_source=["input_image"],
name_template="%s_resampled",
desc="output image file",
keep_extension=True,
)
mode = traits.Enum(
"Otsu",
"Kmeans",
argstr="%s",
position=4,
requires=["num_thresholds"],
xor=["th_low", "th_high"],
desc="whether to run Otsu / Kmeans thresholding",
)
num_thresholds = traits.Int(position=5, argstr="%d", desc="number of thresholds")
input_mask = File(
exists=True,
requires=["num_thresholds"],
argstr="%s",
desc="input mask for Otsu, Kmeans",
)
th_low = traits.Float(position=4, argstr="%f", xor=["mode"], desc="lower threshold")
th_high = traits.Float(
position=5, argstr="%f", xor=["mode"], desc="upper threshold"
)
inside_value = traits.Float(
1, position=6, argstr="%f", requires=["th_low"], desc="inside value"
)
outside_value = traits.Float(
0, position=7, argstr="%f", requires=["th_low"], desc="outside value"
)
copy_header = traits.Bool(
True,
mandatory=True,
usedefault=True,
desc="copy headers of the original image into the output (corrected) file",
)
class ThresholdImageOutputSpec(TraitedSpec):
output_image = File(exists=True, desc="resampled file")
class ThresholdImage(ANTSCommand, CopyHeaderInterface):
"""
Apply thresholds on images.
Examples
--------
>>> thres = ThresholdImage(dimension=3)
>>> thres.inputs.input_image = 'structural.nii'
>>> thres.inputs.output_image = 'output.nii.gz'
>>> thres.inputs.th_low = 0.5
>>> thres.inputs.th_high = 1.0
>>> thres.inputs.inside_value = 1.0
>>> thres.inputs.outside_value = 0.0
>>> thres.cmdline #doctest: +ELLIPSIS
'ThresholdImage 3 structural.nii output.nii.gz 0.500000 1.000000 1.000000 0.000000'
>>> thres = ThresholdImage(dimension=3)
>>> thres.inputs.input_image = 'structural.nii'
>>> thres.inputs.output_image = 'output.nii.gz'
>>> thres.inputs.mode = 'Kmeans'
>>> thres.inputs.num_thresholds = 4
>>> thres.cmdline #doctest: +ELLIPSIS
'ThresholdImage 3 structural.nii output.nii.gz Kmeans 4'
"""
_cmd = "ThresholdImage"
input_spec = ThresholdImageInputSpec
output_spec = ThresholdImageOutputSpec
_copy_header_map = {"output_image": "input_image"}
class AIInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(
3, 2, usedefault=True, argstr="-d %d", desc="dimension of output image"
)
verbose = traits.Bool(
False, usedefault=True, argstr="-v %d", desc="enable verbosity"
)
fixed_image = File(
exists=True,
mandatory=True,
desc="Image to which the moving_image should be transformed",
)
moving_image = File(
exists=True,
mandatory=True,
desc="Image that will be transformed to fixed_image",
)
fixed_image_mask = File(exists=True, argstr="-x %s", desc="fixed mage mask")
moving_image_mask = File(
exists=True, requires=["fixed_image_mask"], desc="moving mage mask"
)
metric_trait = (
traits.Enum("Mattes", "GC", "MI"),
traits.Int(32),
traits.Enum("Regular", "Random", "None"),
traits.Range(value=0.2, low=0.0, high=1.0),
)
metric = traits.Tuple(
*metric_trait, argstr="-m %s", mandatory=True, desc="the metric(s) to use."
)
transform = traits.Tuple(
traits.Enum("Affine", "Rigid", "Similarity"),
traits.Range(value=0.1, low=0.0, exclude_low=True),
argstr="-t %s[%g]",
usedefault=True,
desc="Several transform options are available",
)
principal_axes = traits.Bool(
False,
usedefault=True,
argstr="-p %d",
xor=["blobs"],
desc="align using principal axes",
)
search_factor = traits.Tuple(
traits.Float(20),
traits.Range(value=0.12, low=0.0, high=1.0),
usedefault=True,
argstr="-s [%g,%g]",
desc="search factor",
)
search_grid = traits.Either(
traits.Tuple(
traits.Float, traits.Tuple(traits.Float, traits.Float, traits.Float)
),
traits.Tuple(traits.Float, traits.Tuple(traits.Float, traits.Float)),
argstr="-g %s",
desc="Translation search grid in mm",
)
convergence = traits.Tuple(
traits.Range(low=1, high=10000, value=10),
traits.Float(1e-6),
traits.Range(low=1, high=100, value=10),
usedefault=True,
argstr="-c [%d,%g,%d]",
desc="convergence",
)
output_transform = File(
"initialization.mat", usedefault=True, argstr="-o %s", desc="output file name"
)
class AIOuputSpec(TraitedSpec):
output_transform = File(exists=True, desc="output file name")
class AI(ANTSCommand):
"""
Calculate the optimal linear transform parameters for aligning two images.
Examples
--------
>>> AI(
... fixed_image='structural.nii',
... moving_image='epi.nii',
... metric=('Mattes', 32, 'Regular', 1),
... ).cmdline
'antsAI -c [10,1e-06,10] -d 3 -m Mattes[structural.nii,epi.nii,32,Regular,1]
-o initialization.mat -p 0 -s [20,0.12] -t Affine[0.1] -v 0'
>>> AI(fixed_image='structural.nii',
... moving_image='epi.nii',
... metric=('Mattes', 32, 'Regular', 1),
... search_grid=(12, (1, 1, 1)),
... ).cmdline
'antsAI -c [10,1e-06,10] -d 3 -m Mattes[structural.nii,epi.nii,32,Regular,1]
-o initialization.mat -p 0 -s [20,0.12] -g [12.0,1x1x1] -t Affine[0.1] -v 0'
"""
_cmd = "antsAI"
input_spec = AIInputSpec
output_spec = AIOuputSpec
def _run_interface(self, runtime, correct_return_codes=(0,)):
runtime = super(AI, self)._run_interface(runtime, correct_return_codes)
self._output = {
"output_transform": os.path.join(
runtime.cwd, os.path.basename(self.inputs.output_transform)
)
}
return runtime
def _format_arg(self, opt, spec, val):
if opt == "metric":
val = "%s[{fixed_image},{moving_image},%d,%s,%g]" % val
val = val.format(
fixed_image=self.inputs.fixed_image,
moving_image=self.inputs.moving_image,
)
return spec.argstr % val
if opt == "search_grid":
fmtval = "[%s,%s]" % (val[0], "x".join("%g" % v for v in val[1]))
return spec.argstr % fmtval
if opt == "fixed_image_mask":
if isdefined(self.inputs.moving_image_mask):
return spec.argstr % ("[%s,%s]" % (val, self.inputs.moving_image_mask))
return super(AI, self)._format_arg(opt, spec, val)
def _list_outputs(self):
return getattr(self, "_output")
class AverageAffineTransformInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(
3, 2, argstr="%d", mandatory=True, position=0, desc="image dimension (2 or 3)"
)
output_affine_transform = File(
argstr="%s",
mandatory=True,
position=1,
desc="Outputfname.txt: the name of the resulting transform.",
)
transforms = InputMultiObject(
File(exists=True),
argstr="%s",
mandatory=True,
position=3,
desc="transforms to average",
)
class AverageAffineTransformOutputSpec(TraitedSpec):
affine_transform = File(exists=True, desc="average transform file")
class AverageAffineTransform(ANTSCommand):
"""
Examples
--------
>>> from nipype.interfaces.ants import AverageAffineTransform
>>> avg = AverageAffineTransform()
>>> avg.inputs.dimension = 3
>>> avg.inputs.transforms = ['trans.mat', 'func_to_struct.mat']
>>> avg.inputs.output_affine_transform = 'MYtemplatewarp.mat'
>>> avg.cmdline
'AverageAffineTransform 3 MYtemplatewarp.mat trans.mat func_to_struct.mat'
"""
_cmd = "AverageAffineTransform"
input_spec = AverageAffineTransformInputSpec
output_spec = AverageAffineTransformOutputSpec
def _format_arg(self, opt, spec, val):
return super(AverageAffineTransform, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs["affine_transform"] = os.path.abspath(
self.inputs.output_affine_transform
)
return outputs
class AverageImagesInputSpec(ANTSCommandInputSpec):
dimension = traits.Enum(
3, 2, argstr="%d", mandatory=True, position=0, desc="image dimension (2 or 3)"
)
output_average_image = File(
"average.nii",
argstr="%s",
position=1,
usedefault=True,
hash_files=False,
desc="the name of the resulting image.",
)
normalize = traits.Bool(
argstr="%d",
mandatory=True,
position=2,
desc="Normalize: if true, the 2nd image is divided by its mean. "
"This will select the largest image to average into.",
)
images = InputMultiObject(
File(exists=True),
argstr="%s",
mandatory=True,
position=3,
desc="image to apply transformation to (generally a coregistered functional)",
)
class AverageImagesOutputSpec(TraitedSpec):
output_average_image = File(exists=True, desc="average image file")
class AverageImages(ANTSCommand):
"""
Examples
--------
>>> from nipype.interfaces.ants import | |
"blipmale.ogg"
def ssound(self,v):
self._clicksound = v
clicksound = property(gsound,ssound)
def can_continue(self):
"""If not blocking, player cannot make text continue
If skip mode is on (_debug mode) we can just skip the text
Otherwise, check to see if all the text has been written out"""
if not self.blocking:
return
if self.can_skip or self.nextline():
return True
def enter_down(self):
if not self.can_continue(): return
if not self.nextline():
while not self.nextline():
self.add_character()
#self.mwritten = self._markup._text
else:
self.forward()
def k_left(self):
if self.statement and self.nextline():
assets.cur_script.prev_statement()
self.forward()
def k_right(self):
if self.statement and self.nextline():
self.forward()
def k_z(self):
if self.statement and self.nextline():
assets.cur_script.cross = "pressed"
assets.cur_script.clear_arrows()
assets.cur_script.goto_result("press "+self.statement,backup=assets.variables.get("_court_fail_label",None))
self.delete()
def k_x(self):
if self.statement and self.nextline():
em = assets.addevmenu()
em.fail = assets.variables.get("_court_fail_label",None)
assets.cur_script.clear_arrows()
def forward(self,sound=True):
"""Set last written text to the contents of the textbox
turn off testimony blinking
scroll to next 3 lines of text if they exist
if there is no more text, delete textbox
play the bloop sound"""
t = textutil.markup_text()
t._text = self.mwritten
assets.variables["_last_written_text"] = t.fulltext()
lines = self.text.split("\n")
lines = lines[4:]
self.set_text("\n".join(lines))
self.mwritten = []
self.next = self.num_lines
self.img = self.base.copy()
if not self.text.strip():
self.delete()
if sound:
assets.play_sound("bloop.ogg",volume=0.7)
def draw(self,dest):
self.children = []
if not self.go or self.kill:
return
#For the widget
x = assets.variables.get("_textbox_x","")
y = assets.variables.get("_textbox_y","")
self.rpos1 = [(assets.sw-self.img.get_width())/2,
assets.sh-self.img.get_height()]
if x!="":
self.rpos1[0] = int(x)
if y!="":
self.rpos1[1] = int(y)
self.width1 = self.img.get_width()
self.height1 = self.img.get_height()
dest.blit(self.img,
self.rpos1)
if self.rightp and self.nextline():
dest.blit(self.rpi.img,[self.rpos1[0]+self.width1-16,
self.rpos1[1]+self.height1-16])
if getattr(self,"showleft",False) and self.nextline():
dest.blit(pygame.transform.flip(self.rpi.img,1,0),[self.rpos1[0],
self.rpos1[1]+self.height1-16])
#End
x = assets.variables.get("_nt_x","")
y = assets.variables.get("_nt_y","")
if self.nt_full:
nx,ny = self.rpos1[0],(self.rpos1[1]-self.nt_full.get_height())
if x!="":
nx = int(x)
if y!="":
ny = int(y)
dest.blit(self.nt_full,[nx,ny])
if self.nt_text_image:
if assets.variables.get("_nt_text_x","")!="":
nx += int(assets.variables.get("_nt_text_x",0))
if assets.variables.get("_nt_text_y","")!="":
ny += int(assets.variables.get("_nt_text_y",0))
dest.blit(self.nt_text_image,[nx+5,ny])
elif self.nt_left and self.nt_text_image:
nx,ny = self.rpos1[0],(self.rpos1[1]-self.nt_left.get_height())
if x!="":
nx = int(x)
if y!="":
ny = int(y)
dest.blit(self.nt_left,[nx,ny])
for ii in range(self.nt_text_image.get_width()+8):
dest.blit(self.nt_middle,[nx+3+ii,ny])
dest.blit(self.nt_right,[nx+3+ii+1,ny])
if assets.variables.get("_nt_text_x","")!="":
nx += int(assets.variables.get("_nt_text_x",0))
if assets.variables.get("_nt_text_y","")!="":
ny += int(assets.variables.get("_nt_text_y",0))
dest.blit(self.nt_text_image,[nx+5,ny])
def add_character(self):
command = None
next_char = 1
char = self._markup._text[len(self.mwritten)]
self.mwritten.append(char)
if isinstance(char,textutil.markup_command):
command,args = char.command,char.args
if assets.cur_script.macros.get(command,None):
print "RUNNING A MACRO"
assets.variables["_return"] = ""
this = assets.cur_script
ns = assets.cur_script.execute_macro(command,args)
old = ns._endscript
s = len(self.mwritten)-1
mt = self._markup._text
self._markup._text = self.mwritten
def back(*args):
old()
print "MWRIT",s,self.mwritten
t0=[]
t1=[]
for i,c in enumerate(mt):
if i<s and not isinstance(c,textutil.markup_command):
t0.append(c)
if i>s:
t1.append(c)
t2 = [textutil.markup_command("_fullspeed","")]+t0+[textutil.markup_command("_endfullspeed","")]+list(assets.variables["_return"])+t1[:-1]
print "t0","".join([str(x) for x in t0])
print "t1","".join([str(x) for x in t1])
t = textutil.markup_text()
t._text = t2
print repr(t.fulltext())
self.set_text(t.fulltext())
self.mwritten = []
self.next_char = 0
ns._endscript = back
else:
print "no macro for",command
commands = ["sfx","sound","delay","spd","_fullspeed","_endfullspeed",
"wait","center","type","next",
"tbon","tboff",
"e","f","s","p","c"]
commands.sort(key=lambda o:len(o))
commands.reverse()
for cm in commands:
if command.startswith(cm):
nargs = command.split(cm,1)[1]
if nargs and not nargs.startswith(" "):
command,args = cm,nargs
break
print "new command:",command,args
if command == "sfx":
assets.play_sound(args)
elif command == "sound":
self.clicksound = args
elif command == "delay":
self.delay = int(args)
self.wait = "manual"
elif command == "spd":
self.speed = float(args)
elif command == "_fullspeed":
self.last_speed = self.speed
self.speed = 0
elif command == "_endfullspeed":
self.speed = self.last_speed
elif command == "wait":
self.wait = args
elif command == "center":
pass
elif command == "type":
self.clicksound = "typewriter.ogg"
self.delay = 2
self.wait = "manual"
elif command == "next":
if assets.portrait:
assets.portrait.set_blinking()
del self.mwritten[-1]
self.forward(False)
return 0
elif command=="e":
try:
assets.set_emotion(args.strip())
except:
import traceback
traceback.print_exc()
raise markup_error("No character to apply emotion to")
elif command=="f":
assets.flash = 3
assets.flashcolor = [255,255,255]
command = args.split(" ")
if len(command)>0 and command[0]:
assets.flash = int(command[0])
if len(command)>1:
assets.flashcolor = color_str(command[1])
elif command=="s":
assets.shakeargs = [x for x in args.split(" ") if x.strip()]
elif command=="p":
next_char = int(args.strip())
elif command=="c":
pass
elif command=="tbon":
assets.cur_script.tbon()
elif command=="tboff":
assets.cur_script.tboff()
else:
raise markup_error("No macro or markup command valid for:"+command)
elif isinstance(char,textutil.markup):
pass
else:
if not hasattr(self,"_lc"):
self._lc = ""
self.go = 1
if self._lc in [".?"] and char == " ":
next_char = 6
if self._lc in ["!"] and char == " ":
next_char = 8
if self._lc in [","] and char == " ":
next_char = 4
if self._lc in ["-"] and (char.isalpha() or char.isdigit()):
next_char = 4
if char in ["("]:
self.in_paren = 1
if char in [")"]:
self.in_paren = 0
if assets.portrait:
punctuation = [x for x in assets.variables.get("_punctuation",u".,?!")]
if not self.in_paren and not char in punctuation:
assets.portrait.set_talking()
if self.in_paren:
assets.portrait.set_blinking()
if char.encode('utf-8').strip():
assets.play_sound(self.clicksound,volume=random.uniform(0.65,1.0))
next_char = int(next_char*self.delay)
if self.wait=="manual":
if char.strip():
next_char = 5*self.delay
else:
next_char = 2
self._lc = char
return next_char
def nextline(self):
"""Returns true if all the text waiting to be written into the textbox has been written"""
t = textutil.markup_text()
t._text = self.mwritten
return not len(self.mwritten)<len(self._markup._text) or len(t.fulltext().split("\n"))>=self.num_lines
def update(self,dt=None):
#assets.play_sound(self.clicksound)
self.rpi.update()
if self.kill: return
if dt is None:
dt = assets.dt
self.next_char -= dt
#FIXME - logic is horrendously convoluted
while (not self.nextline()) and self.next_char<=0:
#self.next_char += 1
num_chars = max(int(self.speed),1)
next_char = 0
cnum = num_chars
while (not self.nextline()) and ((not self.speed) or cnum>0):
cnum -= 1
ac_next = self.add_character()
if self.speed:
next_char += ac_next
if self.speed:
self.next_char += (next_char/float(self.speed))
if assets.portrait:
if self.next_char>10 or self.nextline():
assets.portrait.set_blinking()
title = True
self.next = 0
if self.next==0:
self.img = self.base.copy()
y, stx, inc = 6, 6, 18*(assets.sh/standard_sh)
x = stx
color = self.color
center = False
t = textutil.markup_text()
t._text = self.mwritten
lines = [self.nametag.replace("\n","")]+t.fulltext().split("\n")
nlines = assets.variables["_textbox_lines"]
if nlines == "auto":
if len(lines)==4:
nlines = "3"
else:
nlines = "2"
if not nlines:
nlines = "3"
nlines = int(nlines)
if nlines == 2:
y,inc = 8,24
for i,line in enumerate(lines[:nlines+1]):
if title:
if line.strip():
ncolor = assets.variables.get("_nt_text_color","")
if ncolor:
ncolor = color_str(ncolor)
else:
ncolor = color
nt_image = assets.get_font("nt").render(line.replace(u"_",u" "),1,ncolor)
self.nt_text_image = nt_image
title = False
else:
img = assets.get_image_font("tb").render(line,color)
color = ImgFont.lastcolor
if "{center}" in line:
center = not center
if center:
x = (assets.sw-img.get_width())//2
if x+img.get_width()>assets.sw:
if not getattr(self,"OVERAGE",None) and vtrue(assets.variables.get("_debug","false")):
self.OVERAGE = x+img.get_width()-assets.sw
raise offscreen_text('Text Overflow:"%s" over by %s'%(line,self.OVERAGE))
self.img.blit(img,[x,y])
y+=inc
x = stx
self.next = self.num_lines
if self.is_cross and self.nextline():
self.is_cross = False
subscript("show_press_button")
subscript("show_present_button")
if self.blocking:
return True
return
class uglyarrow(fadesprite):
def __init__(self):
fadesprite.__init__(self,x=0,y=assets.sh)
self.load(assets.variables.get("_bigbutton_bg","bg/main"))
self.arrow = sprite(0,0).load("general/arrow_big")
self.scanlines = fadesprite(0,0).load("fg/scanlines")
self.border_top = fadesprite(0,0).load(assets.variables.get("_screen2_letterbox_img","general/bigbutton/border"))
self.border_bottom = fadesprite(0,0).load(assets.variables.get("_screen2_letterbox_img","general/bigbutton/border"))
self.scanlines.fade = 50
self.button = None
self.double = None
self.textbox = None
self.pri = ulayers.index(self.__class__.__name__)
self.width = self.iwidth = assets.sw
self.height = self.iheight = assets.sh
self.high = False
self.showleft = True
self.last = None
self.id_name = "_uglyarrow_"
def show_unclicked(self):
p = assets.variables.get("_bigbutton_img","general/buttonpress")
if self.last != p:
self.last = p
self.button = sprite(0,0).load(p)
def show_clicked(self):
p = assets.variables.get("_bigbutton_img","general/buttonpress")
high = noext(p)+"_high"+onlyext(p)
if self.last != high:
self.last = high
self.button = sprite(0,0).load(high)
def show_cross(self):
if not self.double:
self.double = sprite(0,0).load(assets.variables.get("_bigbutton_cross","general/cross_exam_buttons"))
self.button = None
def update(self):
self.pos[1] = assets.sh
if self.high:
self.show_clicked()
else:
self.show_unclicked()
if self.textbox and self.textbox.statement:
self.show_cross()
self.arrow.update()
return False
def draw(self,dest):
fadesprite.draw(self,dest)
if self.button:
self.button.pos[0] = (assets.sw-self.button.img.get_width())//2
self.button.pos[1] = (assets.sh-self.button.img.get_height())//2+assets.sh
self.button.draw(dest)
self.iwidth = self.button.img.get_width()
self.iheight = self.button.img.get_height()
if self.can_click():
self.arrow.pos[0] = (assets.sw-self.arrow.img.get_width())//2
self.arrow.pos[1] = (assets.sh-self.arrow.img.get_height())//2+assets.sh
self.arrow.draw(dest)
elif self.double:
self.double.pos[0] = (assets.sw-self.double.img.get_width())//2
self.double.pos[1] = (assets.sh-self.double.img.get_height())//2+assets.sh
self.double.draw(dest)
self.iwidth = self.double.img.get_width()
self.iheight = self.double.img.get_height()
if self.can_click():
self.arrow.pos[0] = (assets.sw-self.arrow.img.get_width())//2-75
self.arrow.pos[1] = (assets.sh-self.arrow.img.get_height())//2+assets.sh
self.arrow.img = pygame.transform.flip(self.arrow.img,1,0)
if self.showleft:
self.arrow.draw(dest)
self.arrow.pos[0] = (assets.sw-self.arrow.img.get_width())//2+70
self.arrow.pos[1] = (assets.sh-self.arrow.img.get_height())//2+assets.sh
self.arrow.img = pygame.transform.flip(self.arrow.img,1,0)
self.arrow.draw(dest)
if vtrue(assets.variables.get("_screen2_scanlines","off")):
self.scanlines.pos = self.pos
self.scanlines.draw(dest)
if vtrue(assets.variables.get("_screen2_letterbox","on")):
self.border_top.pos = self.pos
self.border_top.draw(dest)
self.border_bottom.pos[0] = self.pos[0]
self.border_bottom.pos[1] = self.pos[1]+192-self.border_bottom.height
self.border_bottom.draw(dest)
def over(self,mp):
if self.button:
if mp[0]>=self.button.pos[0] and mp[1]>=self.button.pos[1]\
and mp[0]<=self.button.pos[0]+self.iwidth\
and mp[1]<=self.button.pos[1]+self.iheight:
return True
if self.double:
if mp[0]>=self.double.pos[0] and mp[1]>=self.double.pos[1]\
and mp[0]<=self.double.pos[0]+self.iwidth/2\
and mp[1]<=self.double.pos[1]+self.iheight:
return "left"
if mp[0]>=self.double.pos[0]+self.iwidth/2 and mp[1]>=self.double.pos[1]\
and mp[0]<=self.double.pos[0]+self.iwidth\
and mp[1]<=self.double.pos[1]+self.iheight:
return "right"
def move_over(self,mp,rel,bt):
if not self.over(mp):
if self.high:
self.high = None
else:
if self.high == None:
self.high = True
def click_down_over(self,mp):
gui.window.focused = self
over = self.over(mp)
if over == True and not self.high and self.can_click():
self.high = True
if over == "left" and self.can_click() and self.showleft:
self.textbox.k_left()
if over == "right" and self.can_click():
self.textbox.k_right()
def click_up_over(self,mp):
if self.high:
self.high = False
if self.can_click():
self.textbox.enter_down()
def can_click(self):
return self.textbox and | |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE.txt file in the root directory of this source tree.
import logging
import multiprocessing as mp
import queue
import time
import traceback
from typing import Callable, Dict, List, Optional, Set
import torch
import torch.distributed as td
import torch.multiprocessing
import torch.nn as nn
from torchbiggraph.distributed import Startable, init_process_group
from torchbiggraph.types import CharTensorType, ModuleStateDict, Rank
from torchbiggraph.util import tag_logs_with_process_name
logger = logging.getLogger("torchbiggraph")
################################################################################
# Generic parameter client-server protocol
################################################################################
# FIXME! This will be slow
def _tostring(t: CharTensorType) -> str:
return "".join(chr(x.item()) for x in t)
def _fromstring(s: str) -> CharTensorType:
return torch.tensor([ord(x) for x in s], dtype=torch.int8)
STORE_CMD = 1
GET_CMD = 2
JOIN_CMD = 3
SWAP_CMD = 4
_tensor_types = [
torch.FloatTensor,
torch.DoubleTensor,
torch.ByteTensor,
torch.IntTensor,
torch.LongTensor,
]
_tensor_type_idx = {t().type(): i for i, t in enumerate(_tensor_types)}
class ParameterServer(Startable):
"""
A simple parameter server. Clients can store tensors, accumulate, and
get tensors by string key. Operations on the parameter server are globally
synchronous.
FIXME: torchbiggraph.rpc should be fixed to not require torch.serialization,
then most of this code can be removed.
FIXME: torch.distributed.recv should not require you to provide the
tensor to write to; the type and size should be sent in the header.
That would simplify this code a lot.
"""
def __init__(self, num_clients: int, log_stats: bool = False) -> None:
self.num_clients = num_clients
self.parameters: Dict[str, torch.Tensor] = {}
self.log_stats = log_stats
def start(self) -> None:
join_count = 0
while True:
# 1. receive the command
cmd_buffer = torch.full((6,), -1, dtype=torch.long)
rank = td.recv(cmd_buffer)
cmd = cmd_buffer[0].item()
if cmd == STORE_CMD:
key = self._recv_key(rank, cmd_buffer[1].item())
self.handle_store(rank, key,
cmd_buffer[2].item(),
cmd_buffer[3].item(),
cmd_buffer[4].item(),
cmd_buffer[5].item())
elif cmd == GET_CMD:
key = self._recv_key(rank, cmd_buffer[1].item())
self.handle_get(rank, key, cmd_buffer[2].item())
elif cmd == SWAP_CMD:
key = self._recv_key(rank, cmd_buffer[1].item())
self.handle_store(rank, key,
cmd_buffer[2].item(),
cmd_buffer[3].item(),
cmd_buffer[4].item(),
cmd_buffer[5].item())
self.handle_get(rank, key, False)
elif cmd == JOIN_CMD:
join_count += 1
if join_count == self.num_clients:
for r in range(self.num_clients):
# after sending the join cmd,
# each client waits on this ack to know everyone is done
# and it's safe to exit
td.send(torch.zeros((1,)), dst=r)
break
else:
raise RuntimeError("Command is unknown value %d from rank %d."
% (cmd, rank))
@staticmethod
def _recv_key(rank: int, keylen: int) -> str:
"""Receive a string tensor key from a client node."""
key_buffer = torch.zeros((keylen,), dtype=torch.int8)
td.recv(key_buffer, src=rank)
return _tostring(key_buffer)
def handle_store(
self,
rank: int,
key: str,
ndim: int,
accum: int,
overwrite: int,
ttype: int,
) -> None:
if ndim == -1:
assert key in self.parameters
size = self.parameters[key].size()
else:
size = torch.empty((ndim,), dtype=torch.long)
td.recv(size, src=rank)
size = size.tolist()
tensor_type = _tensor_types[ttype]
if not accum and overwrite and key in self.parameters:
# avoid holding onto 2x the memory
del self.parameters[key]
data = tensor_type(*size)
start_t = time.monotonic()
td.recv(data, src=rank)
end_t = time.monotonic()
if self.log_stats:
stats_size = data.numel() * data.element_size()
stats_time = end_t - start_t
logger.debug(
f"Received tensor {key} from client {rank}: "
f"{stats_size:,} bytes "
f"in {stats_time:,g} seconds "
f"=> {stats_size / stats_time:,.0f} B/s")
if accum:
self.parameters[key] += data
elif (key not in self.parameters) or overwrite:
self.parameters[key] = data
def handle_get(self, rank: int, key: str, send_size: int) -> None:
if key not in self.parameters:
assert send_size, "Key %s not found" % key
td.send(torch.tensor([-1, -1], dtype=torch.long), rank)
return
data = self.parameters[key]
if send_size:
type_idx = _tensor_type_idx[data.type()]
td.send(torch.tensor([data.ndimension(), type_idx], dtype=torch.long),
rank)
td.send(torch.tensor(list(data.size()), dtype=torch.long), rank)
start_t = time.monotonic()
td.send(data, dst=rank)
end_t = time.monotonic()
if self.log_stats:
stats_size = data.numel() * data.element_size()
stats_time = end_t - start_t
logger.debug(
f"Sent tensor {key} to client {rank}: "
f"{stats_size:,} bytes "
f"in {stats_time:,g} seconds "
f"=> {stats_size / stats_time:,.0f} B/s")
class ParameterClient:
"""Client for ParameterServer.
Supports store, accumulate, swap, swap-accumulate, and get operations."""
def __init__(self, server_rank: int, log_stats: bool = False) -> None:
self.server_rank = server_rank
self.log_stats = log_stats
def store(
self,
key: str,
src: torch.Tensor,
accum: bool = False,
overwrite: bool = True,
) -> None:
"""Store or accumulate a tensor on the server.
"""
cmd_rpc = torch.tensor([STORE_CMD,
len(key),
-1 if accum else src.ndimension(),
int(accum),
int(overwrite),
_tensor_type_idx[src.type()]],
dtype=torch.long)
td.send(cmd_rpc, self.server_rank)
td.send(_fromstring(key), self.server_rank)
if not accum:
td.send(torch.tensor(list(src.size()), dtype=torch.long), self.server_rank)
start_t = time.monotonic()
td.send(src, self.server_rank)
end_t = time.monotonic()
if self.log_stats:
stats_size = src.numel() * src.element_size()
stats_time = end_t - start_t
logger.debug(
f"Sent tensor {key} to server {self.server_rank}: "
f"{stats_size:,} bytes "
f"in {stats_time:,g} seconds "
f"=> {stats_size / stats_time:,.0f} B/s")
def get(
self,
key: str,
dst: Optional[torch.Tensor] = None,
shared: bool = False,
) -> Optional[torch.Tensor]:
"""Get a tensor from the server.
"""
cmd_rpc = torch.tensor([GET_CMD, len(key), dst is None, 0, 0, 0], dtype=torch.long)
td.send(cmd_rpc, self.server_rank)
td.send(_fromstring(key), self.server_rank)
if dst is None:
meta = torch.full((2,), -1, dtype=torch.long)
td.recv(meta, src=self.server_rank)
ndim, ttype = meta
if ndim.item() == -1:
return None
size = torch.full((ndim.item(),), -1, dtype=torch.long)
td.recv(size, src=self.server_rank)
tensor_type = _tensor_types[ttype.item()]
if shared:
dst_storage = tensor_type().storage_type()._new_shared(size.prod())
dst = tensor_type(dst_storage).view(*size.tolist())
else:
dst = tensor_type(*size.tolist())
start_t = time.monotonic()
td.recv(dst, src=self.server_rank)
end_t = time.monotonic()
if self.log_stats:
stats_size = dst.numel() * dst.element_size()
stats_time = end_t - start_t
logger.debug(
f"Received tensor {key} from server {self.server_rank}: "
f"{stats_size:,} bytes "
f"in {stats_time:,g} seconds "
f"=> {stats_size / stats_time:,.0f} B/s")
return dst
def swap(
self,
key: str,
src: torch.Tensor,
dst: Optional[torch.Tensor] = None,
accum: bool = False,
overwrite: bool = False,
) -> None:
"""Store or accumulate a tensor on the server,
and then get its current value.
"""
if dst is None:
dst = torch.zeros_like(src)
cmd_rpc = torch.tensor([SWAP_CMD,
len(key),
-1 if accum else src.ndimension(),
int(accum),
int(overwrite),
_tensor_type_idx[src.type()]],
dtype=torch.long)
td.send(cmd_rpc, self.server_rank)
td.send(_fromstring(key), self.server_rank)
if not accum:
td.send(torch.tensor(list(src.size()), dtype=torch.long),
self.server_rank)
start_t = time.monotonic()
td.send(src, self.server_rank)
td.recv(dst, src=self.server_rank)
end_t = time.monotonic()
if self.log_stats:
stats_size = \
src.numel() * src.element_size() + dst.numel() * dst.element_size()
stats_time = end_t - start_t
logger.debug(
f"Swapped tensor {key} with server {self.server_rank}: "
f"{stats_size:,} bytes "
f"in {stats_time:,g} seconds "
f"=> {stats_size / stats_time:,.0f} B/s")
def join(self) -> None:
"""All clients should call join at the end, which will allow the server
to exit.
"""
cmd_rpc = torch.tensor([JOIN_CMD, 0, 0, 0, 0, 0], dtype=torch.long)
td.send(cmd_rpc, self.server_rank)
ack = torch.empty((1,))
td.recv(ack, src=self.server_rank)
class GradientParameterClient:
"""We keep track of the last pull of each tensor from the server, and then when
a push is requested, we accumulate the difference between the pulled tensor
and the current version
"""
def __init__(self, server_rank: Rank) -> None:
self._client = ParameterClient(server_rank)
self._cache: Dict[str, torch.Tensor] = {}
def push(self, key: str, tensor: torch.Tensor) -> None:
# if they tensor is cached, accumulate the difference.
# otherwise, send the tensor to the server.
if key in self._cache:
diff = tensor - self._cache[key]
self._client.store(key, diff, accum=True)
self._cache[key] += diff
else:
self._cache[key] = tensor.clone()
# all the clients race to set the initial value of the tensor, then
# every clients just uses that one
self._client.store(key, self._cache[key], overwrite=False)
def pull(self, key: str, dst: torch.Tensor) -> torch.Tensor:
self._client.get(key, dst)
if key in self._cache:
self._cache[key].copy_(dst)
else:
self._cache[key] = dst.clone()
return dst
def update(self, key: str, tensor: torch.Tensor) -> None:
if key in self._cache:
diff = tensor - self._cache[key]
self._client.swap(key, diff, self._cache[key], accum=True)
tensor.copy_(self._cache[key])
else:
self._cache[key] = tensor.clone()
# all the clients race to set the initial value of the tensor, then
# every clients just uses that one
self._client.swap(key, self._cache[key], self._cache[key],
overwrite=False)
tensor.copy_(self._cache[key])
def join(self) -> None:
self._client.join()
################################################################################
# Parameter sharer
################################################################################
MIN_BYTES_TO_SHARD = 1e7 # only shard parameters above 10MB
def _client_thread_loop(
process_name: str,
client_rank: Rank,
all_server_ranks: List[Rank],
q: mp.Queue,
errq: mp.Queue,
init_method: Optional[str],
world_size: int,
groups: List[List[Rank]],
subprocess_init: Optional[Callable[[], None]] = None,
max_bandwidth: float = 1e8,
min_sleep_time: float = 0.01,
) -> None:
try:
tag_logs_with_process_name(process_name)
if subprocess_init is not None:
subprocess_init()
init_process_group(
rank=client_rank,
init_method=init_method,
world_size=world_size,
groups=groups,
)
params = {}
clients = [GradientParameterClient(server_rank)
for server_rank in all_server_ranks]
log_time, log_rounds, log_bytes = time.time(), 0, 0
# thread loop:
# 1. check for a command from the main process
# 2. update (push and pull) each parameter in my list of parameters
# 3. if we're going to fast, sleep for a while
while True:
tic = time.time()
bytes_transferred = 0
try:
data = q.get(timeout=0.01)
cmd, args = data
if cmd == "params":
| |
<reponame>rickyHong/Cirq-repl
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import math
from typing import List, cast
import numpy as np
import pytest
import cirq
def _make_qubits(n):
return [cirq.NamedQubit('q{}'.format(i)) for i in range(n)]
def _sample_qubit_pauli_maps():
qubits = _make_qubits(3)
paulis_or_none = (None, cirq.X, cirq.Y, cirq.Z)
for paulis in itertools.product(paulis_or_none, repeat=len(qubits)):
yield {qubit: pauli for qubit, pauli in zip(qubits, paulis)
if pauli is not None}
def test_eq_ne_hash():
q0, q1, q2 = _make_qubits(3)
eq = cirq.testing.EqualsTester()
eq.make_equality_group(
lambda: cirq.PauliString({}),
lambda: cirq.PauliString({}, +1))
eq.add_equality_group(cirq.PauliString({}, -1))
for q, pauli in itertools.product((q0, q1), (cirq.X, cirq.Y, cirq.Z)):
eq.add_equality_group(cirq.PauliString({q: pauli}, +1))
eq.add_equality_group(cirq.PauliString({q: pauli}, -1))
for q, p0, p1 in itertools.product((q0, q1), (cirq.X, cirq.Y, cirq.Z),
(cirq.X, cirq.Y, cirq.Z)):
eq.add_equality_group(cirq.PauliString({q: p0, q2: p1}, +1))
def test_equal_up_to_coefficient():
q0, = _make_qubits(1)
assert cirq.PauliString({}, +1).equal_up_to_coefficient(
cirq.PauliString({}, +1))
assert cirq.PauliString({}, -1).equal_up_to_coefficient(
cirq.PauliString({}, -1))
assert cirq.PauliString({}, +1).equal_up_to_coefficient(
cirq.PauliString({}, -1))
assert cirq.PauliString({}, +1).equal_up_to_coefficient(
cirq.PauliString({}, 2j))
assert cirq.PauliString({q0: cirq.X}, +1).equal_up_to_coefficient(
cirq.PauliString({q0: cirq.X}, +1))
assert cirq.PauliString({q0: cirq.X}, -1).equal_up_to_coefficient(
cirq.PauliString({q0: cirq.X}, -1))
assert cirq.PauliString({q0: cirq.X}, +1).equal_up_to_coefficient(
cirq.PauliString({q0: cirq.X}, -1))
assert not cirq.PauliString({q0: cirq.X}, +1).equal_up_to_coefficient(
cirq.PauliString({q0: cirq.Y}, +1))
assert not cirq.PauliString({q0: cirq.X}, +1).equal_up_to_coefficient(
cirq.PauliString({q0: cirq.Y}, 1j))
assert not cirq.PauliString({q0: cirq.X}, -1).equal_up_to_coefficient(
cirq.PauliString({q0: cirq.Y}, -1))
assert not cirq.PauliString({q0: cirq.X}, +1).equal_up_to_coefficient(
cirq.PauliString({q0: cirq.Y}, -1))
assert not cirq.PauliString({q0: cirq.X}, +1).equal_up_to_coefficient(
cirq.PauliString({}, +1))
assert not cirq.PauliString({q0: cirq.X}, -1).equal_up_to_coefficient(
cirq.PauliString({}, -1))
assert not cirq.PauliString({q0: cirq.X}, +1).equal_up_to_coefficient(
cirq.PauliString({}, -1))
def test_exponentiation_as_exponent():
a, b = cirq.LineQubit.range(2)
p = cirq.PauliString({a: cirq.X, b: cirq.Y})
with pytest.raises(NotImplementedError, match='non-hermitian'):
_ = math.e**(math.pi * p)
with pytest.raises(TypeError, match='unsupported'):
_ = 'test'**p
assert cirq.approx_eq(
math.e**(-1j * math.pi * p),
cirq.PauliStringPhasor(p, exponent_neg=0.5, exponent_pos=-0.5))
assert cirq.approx_eq(
math.e**(0.5j * math.pi * p),
cirq.PauliStringPhasor(p, exponent_neg=-0.25, exponent_pos=0.25))
assert cirq.approx_eq(
2**(0.5j * math.pi * p),
cirq.PauliStringPhasor(p,
exponent_neg=-0.25 * math.log(2),
exponent_pos=0.25 * math.log(2)))
assert cirq.approx_eq(
np.exp(0.5j * math.pi * p),
cirq.PauliStringPhasor(p, exponent_neg=-0.25, exponent_pos=0.25))
def test_exponentiate_single_value_as_exponent():
q = cirq.LineQubit(0)
assert cirq.approx_eq(math.e**(-0.25j * math.pi * cirq.X(q)),
cirq.Rx(0.25 * math.pi).on(q))
assert cirq.approx_eq(math.e**(-0.25j * math.pi * cirq.Y(q)),
cirq.Ry(0.25 * math.pi).on(q))
assert cirq.approx_eq(math.e**(-0.25j * math.pi * cirq.Z(q)),
cirq.Rz(0.25 * math.pi).on(q))
assert cirq.approx_eq(np.exp(-0.3j * math.pi * cirq.X(q)),
cirq.Rx(0.3 * math.pi).on(q))
assert cirq.approx_eq(cirq.X(q)**0.5, cirq.XPowGate(exponent=0.5).on(q))
assert cirq.approx_eq(cirq.Y(q)**0.5, cirq.YPowGate(exponent=0.5).on(q))
assert cirq.approx_eq(cirq.Z(q)**0.5, cirq.ZPowGate(exponent=0.5).on(q))
def test_exponentiation_as_base():
a, b = cirq.LineQubit.range(2)
p = cirq.PauliString({a: cirq.X, b: cirq.Y})
with pytest.raises(NotImplementedError, match='non-unitary'):
_ = (2 * p)**5
with pytest.raises(TypeError, match='unsupported'):
_ = p**'test'
with pytest.raises(TypeError, match='unsupported'):
_ = p**1j
assert p**-1 == p
assert cirq.approx_eq(
p**0.5, cirq.PauliStringPhasor(p, exponent_neg=0.5, exponent_pos=0))
assert cirq.approx_eq(
p**-0.5, cirq.PauliStringPhasor(p, exponent_neg=-0.5, exponent_pos=0))
assert cirq.approx_eq(
math.e**(0.5j * math.pi * p),
cirq.PauliStringPhasor(p, exponent_neg=-0.25, exponent_pos=0.25))
assert cirq.approx_eq(
2**(0.5j * math.pi * p),
cirq.PauliStringPhasor(p,
exponent_neg=-0.25 * math.log(2),
exponent_pos=0.25 * math.log(2)))
assert cirq.approx_eq(
np.exp(0.5j * math.pi * p),
cirq.PauliStringPhasor(p, exponent_neg=-0.25, exponent_pos=0.25))
@pytest.mark.parametrize('pauli', (cirq.X, cirq.Y, cirq.Z))
def test_from_single(pauli):
q0, = _make_qubits(1)
assert (cirq.PauliString.from_single(q0, pauli)
== cirq.PauliString({q0: pauli}))
@pytest.mark.parametrize('qubit_pauli_map', _sample_qubit_pauli_maps())
def test_getitem(qubit_pauli_map):
other = cirq.NamedQubit('other')
pauli_string = cirq.PauliString(qubit_pauli_map)
for key in qubit_pauli_map:
assert qubit_pauli_map[key] == pauli_string[key]
with pytest.raises(KeyError):
_ = qubit_pauli_map[other]
with pytest.raises(KeyError):
_ = pauli_string[other]
@pytest.mark.parametrize('qubit_pauli_map', _sample_qubit_pauli_maps())
def test_get(qubit_pauli_map):
other = cirq.NamedQubit('other')
pauli_string = cirq.PauliString(qubit_pauli_map)
for key in qubit_pauli_map:
assert qubit_pauli_map.get(key) == pauli_string.get(key)
assert qubit_pauli_map.get(other) == pauli_string.get(other) == None
# pylint: disable=too-many-function-args
assert qubit_pauli_map.get(other, 5) == pauli_string.get(other, 5) == 5
# pylint: enable=too-many-function-args
@pytest.mark.parametrize('qubit_pauli_map', _sample_qubit_pauli_maps())
def test_contains(qubit_pauli_map):
other = cirq.NamedQubit('other')
pauli_string = cirq.PauliString(qubit_pauli_map)
for key in qubit_pauli_map:
assert key in pauli_string
assert other not in pauli_string
@pytest.mark.parametrize('qubit_pauli_map', _sample_qubit_pauli_maps())
def test_keys(qubit_pauli_map):
pauli_string = cirq.PauliString(qubit_pauli_map)
assert (len(qubit_pauli_map.keys()) == len(pauli_string.keys())
== len(pauli_string.qubits))
assert (set(qubit_pauli_map.keys()) == set(pauli_string.keys())
== set(pauli_string.qubits))
@pytest.mark.parametrize('qubit_pauli_map', _sample_qubit_pauli_maps())
def test_items(qubit_pauli_map):
pauli_string = cirq.PauliString(qubit_pauli_map)
assert len(qubit_pauli_map.items()) == len(pauli_string.items())
assert set(qubit_pauli_map.items()) == set(pauli_string.items())
@pytest.mark.parametrize('qubit_pauli_map', _sample_qubit_pauli_maps())
def test_values(qubit_pauli_map):
pauli_string = cirq.PauliString(qubit_pauli_map)
assert len(qubit_pauli_map.values()) == len(pauli_string.values())
assert set(qubit_pauli_map.values()) == set(pauli_string.values())
@pytest.mark.parametrize('qubit_pauli_map', _sample_qubit_pauli_maps())
def test_len(qubit_pauli_map):
pauli_string = cirq.PauliString(qubit_pauli_map)
assert len(qubit_pauli_map) == len(pauli_string)
@pytest.mark.parametrize('qubit_pauli_map', _sample_qubit_pauli_maps())
def test_iter(qubit_pauli_map):
pauli_string = cirq.PauliString(qubit_pauli_map)
assert len(tuple(qubit_pauli_map)) == len(tuple(pauli_string))
assert set(tuple(qubit_pauli_map)) == set(tuple(pauli_string))
def test_repr():
q0, q1, q2 = _make_qubits(3)
pauli_string = cirq.PauliString({q2: cirq.X, q1: cirq.Y, q0: cirq.Z})
cirq.testing.assert_equivalent_repr(pauli_string)
cirq.testing.assert_equivalent_repr(-pauli_string)
cirq.testing.assert_equivalent_repr(1j * pauli_string)
cirq.testing.assert_equivalent_repr(2 * pauli_string)
cirq.testing.assert_equivalent_repr(cirq.PauliString())
def test_str():
q0, q1, q2 = _make_qubits(3)
pauli_string = cirq.PauliString({q2: cirq.X, q1: cirq.Y, q0: cirq.Z})
assert str(cirq.PauliString({})) == 'I'
assert str(-cirq.PauliString({})) == '-I'
assert str(pauli_string) == 'Z(q0)*Y(q1)*X(q2)'
assert str(-pauli_string) == '-Z(q0)*Y(q1)*X(q2)'
assert str(1j*pauli_string) == '1j*Z(q0)*Y(q1)*X(q2)'
assert str(pauli_string*-1j) == '-1j*Z(q0)*Y(q1)*X(q2)'
@pytest.mark.parametrize('map1,map2,out', (lambda q0, q1, q2: (
({}, {}, {}),
({q0: cirq.X}, {q0: cirq.Y}, {q0: (cirq.X, cirq.Y)}),
({q0: cirq.X}, {q1: cirq.X}, {}),
({q0: cirq.Y, q1: cirq.Z}, {q1: cirq.Y, q2: cirq.X},
{q1: (cirq.Z, cirq.Y)}),
({q0: cirq.X, q1: cirq.Y, q2: cirq.Z}, {}, {}),
({q0: cirq.X, q1: cirq.Y, q2: cirq.Z}, {q0: cirq.Y, q1: cirq.Z},
{q0: (cirq.X, cirq.Y), q1: (cirq.Y, cirq.Z)}),
))(*_make_qubits(3)))
def test_zip_items(map1, map2, out):
ps1 = cirq.PauliString(map1)
ps2 = cirq.PauliString(map2)
out_actual = tuple(ps1.zip_items(ps2))
assert len(out_actual) == len(out)
assert dict(out_actual) == out
@pytest.mark.parametrize('map1,map2,out', (lambda q0, q1, q2: (
({}, {}, ()),
({q0: cirq.X}, {q0: cirq.Y}, ((cirq.X, cirq.Y),)),
({q0: cirq.X}, {q1: cirq.X}, ()),
({q0: cirq.Y, q1: cirq.Z}, {q1: cirq.Y, q2: cirq.X},
((cirq.Z, cirq.Y),)),
({q0: cirq.X, q1: cirq.Y, q2: cirq.Z}, {}, ()),
({q0: cirq.X, q1: cirq.Y, q2: cirq.Z}, {q0: cirq.Y, q1: cirq.Z},
# Order not necessary
((cirq.X, cirq.Y), (cirq.Y, cirq.Z)))
))(*_make_qubits(3)))
def test_zip_paulis(map1, map2, out):
ps1 = cirq.PauliString(map1)
ps2 = cirq.PauliString(map2)
out_actual = tuple(ps1.zip_paulis(ps2))
assert len(out_actual) == len(out)
if len(out) <= 1:
assert out_actual == out
assert set(out_actual) == set(out) # Ignore output order
def test_commutes_with():
q0, q1, q2 = _make_qubits(3)
assert cirq.PauliString.from_single(q0, cirq.X).commutes_with(
cirq.PauliString.from_single(q0, cirq.X))
assert not cirq.PauliString.from_single(q0, cirq.X).commutes_with(
cirq.PauliString.from_single(q0, cirq.Y))
assert cirq.PauliString.from_single(q0, cirq.X).commutes_with(
cirq.PauliString.from_single(q1, cirq.X))
assert cirq.PauliString.from_single(q0, cirq.X).commutes_with(
cirq.PauliString.from_single(q1, cirq.Y))
assert cirq.PauliString({q0: cirq.X, q1: cirq.Y}).commutes_with(
cirq.PauliString({q0: cirq.X, q1: cirq.Y}))
assert not cirq.PauliString({q0: cirq.X, q1: cirq.Y}).commutes_with(
cirq.PauliString({q0: cirq.X, q1: cirq.Z}))
assert cirq.PauliString({q0: cirq.X, q1: cirq.Y}).commutes_with(
cirq.PauliString({q0: cirq.Y, q1: cirq.X}))
assert cirq.PauliString({q0: cirq.X, q1: cirq.Y}).commutes_with(
cirq.PauliString({q0: cirq.Y, q1: cirq.Z}))
assert cirq.PauliString({q0: cirq.X, q1: cirq.Y}).commutes_with(
cirq.PauliString({q0: cirq.X, q1: cirq.Y, q2: cirq.Z}))
assert not cirq.PauliString({q0: cirq.X, q1: cirq.Y}).commutes_with(
cirq.PauliString({q0: cirq.X, q1: cirq.Z, q2: cirq.Z}))
assert cirq.PauliString({q0: cirq.X, q1: cirq.Y}).commutes_with(
cirq.PauliString({q0: cirq.Y, q1: cirq.X, q2: cirq.Z}))
assert cirq.PauliString({q0: cirq.X, q1: cirq.Y}).commutes_with(
cirq.PauliString({q0: cirq.Y, q1: cirq.Z, q2: cirq.X}))
assert cirq.PauliString({q0: cirq.X, q1: cirq.Y}).commutes_with(
cirq.PauliString({q2: cirq.X, q1: cirq.Y}))
assert not cirq.PauliString({q0: cirq.X, q1: cirq.Y}).commutes_with(
cirq.PauliString({q2: cirq.X, q1: cirq.Z}))
assert not cirq.PauliString({q0: cirq.X, q1: cirq.Y}).commutes_with(
cirq.PauliString({q2: cirq.Y, q1: cirq.X}))
assert not cirq.PauliString({q0: cirq.X, q1: cirq.Y}).commutes_with(
cirq.PauliString({q2: cirq.Y, q1: cirq.Z}))
def test_negate():
q0, q1 = _make_qubits(2)
qubit_pauli_map = {q0: cirq.X, q1: cirq.Y}
ps1 = cirq.PauliString(qubit_pauli_map)
ps2 = cirq.PauliString(qubit_pauli_map, -1)
assert -ps1 == ps2
assert ps1 == -ps2
neg_ps1 = -ps1
assert -neg_ps1 == ps1
def test_mul_scalar():
a, b = cirq.LineQubit.range(2)
p = cirq.PauliString({a: cirq.X, b: cirq.Y})
assert -p == -1 * p == -1.0 * p == p * -1 == p * complex(-1)
assert -p != 1j * p
assert +p == 1 * p
with pytest.raises(TypeError):
_ = p * 'test'
with pytest.raises(TypeError):
_ = 'test' * p
def test_mul_strings():
a, b, c, d = cirq.LineQubit.range(4)
p1 = cirq.PauliString({a: cirq.X, b: cirq.Y, c: cirq.Z})
p2 = cirq.PauliString({b: cirq.X, c: cirq.Y, d: cirq.Z})
assert p1 * p2 == -cirq.PauliString({
a: cirq.X,
b: cirq.Z,
c: cirq.X,
d: cirq.Z,
})
assert cirq.X(a) * cirq.PauliString({a: cirq.X}) == cirq.PauliString()
assert cirq.PauliString({a: cirq.X}) * cirq.X(a) == cirq.PauliString()
assert cirq.X(a) * cirq.X(a) == cirq.PauliString()
assert -cirq.X(a) * -cirq.X(a) == cirq.PauliString()
with pytest.raises(TypeError, match='unsupported'):
_ = cirq.X(a) * object()
with pytest.raises(TypeError, match='unsupported'):
_ = object() * cirq.X(a)
assert -cirq.X(a) == -cirq.PauliString({a: cirq.X})
def test_op_equivalence():
a, b = cirq.LineQubit.range(2)
various_x = [
cirq.X(a),
cirq.PauliString({a: cirq.X}),
cirq.PauliString.from_single(a, cirq.X),
cirq.SingleQubitPauliStringGateOperation(cirq.X, a),
cirq.GateOperation(cirq.X, [a]),
]
for x in various_x:
cirq.testing.assert_equivalent_repr(x)
eq = cirq.testing.EqualsTester()
eq.add_equality_group(*various_x)
eq.add_equality_group(cirq.Y(a), cirq.PauliString({a: cirq.Y}))
eq.add_equality_group(-cirq.PauliString({a: cirq.X}))
eq.add_equality_group(cirq.Z(a), cirq.PauliString({a: cirq.Z}))
eq.add_equality_group(cirq.Z(b), cirq.PauliString({b: cirq.Z}))
def test_op_product():
a, b = cirq.LineQubit.range(2)
assert cirq.X(a) * cirq.X(b) == cirq.PauliString({a: cirq.X, b: cirq.X})
assert cirq.X(a) * cirq.Y(b) == cirq.PauliString({a: cirq.X, b: cirq.Y})
assert cirq.Z(a) * cirq.Y(b) == cirq.PauliString({a: cirq.Z, b: cirq.Y})
assert cirq.X(a) * cirq.X(a) == cirq.PauliString()
assert cirq.X(a) * cirq.Y(a) == 1j * cirq.PauliString({a: cirq.Z})
assert cirq.Y(a) * cirq.Z(b) * cirq.X(a) == -1j * cirq.PauliString({
| |
<reponame>claraElk/cc_saflow<gh_stars>1-10
import os
import numpy as np
import pandas as pd
import mne
from mne.preprocessing import ICA, create_ecg_epochs, create_eog_epochs
from autoreject import AutoReject
from scipy.io import loadmat, savemat
#from brainpipe import feature
from src.saflow_params import BIDS_PATH, LOGS_DIR
from mne.io import read_raw_fif, read_raw_ctf
#from hytools.meg_utils import get_ch_pos
from src.utils import get_SAflow_bids
from src.behav import find_logfile, get_VTC_from_file
import random
from matplotlib.pyplot import close
from mne.time_frequency import psd_multitaper, psd_welch
import pickle
def find_rawfile(subj, bloc, BIDS_PATH):
filepath = '/sub-{}/ses-recording/meg/'.format(subj)
files = os.listdir(BIDS_PATH + filepath)
for file in files:
if file[-8] == bloc:
filename = file
return filepath, filename
def saflow_preproc(filepath, savepath, reportpath, ica=True):
report = mne.Report(verbose=True)
raw_data = read_raw_ctf(filepath, preload=True)
raw_data = raw_data.apply_gradient_compensation(grade=3) #required for source reconstruction
picks = mne.pick_types(raw_data.info, meg=True, eog=True, exclude='bads')
fig = raw_data.plot(show=False);
report.add_figs_to_section(fig, captions='Time series', section='Raw data')
close(fig)
fig = raw_data.plot_psd(average=False, picks=picks, show=False);
report.add_figs_to_section(fig, captions='PSD', section='Raw data')
close(fig)
## Filtering
high_cutoff = 200
low_cutoff = 0.5
raw_data.filter(low_cutoff, high_cutoff, fir_design="firwin")
raw_data.notch_filter(np.arange(60, high_cutoff+1, 60), picks=picks, filter_length='auto',phase='zero', fir_design="firwin")
fig = raw_data.plot_psd(average=False, picks=picks, fmax=120, show=False);
report.add_figs_to_section(fig, captions='PSD', section='Filtered data')
close(fig)
if ica == False :
report.save(reportpath, open_browser=False, overwrite=True);
raw_data.save(savepath, overwrite=True)
del report
del raw_data
del fig
elif ica == True :
## ICA
ica = ICA(n_components=20, random_state=0).fit(raw_data, decim=3)
fig = ica.plot_sources(raw_data, show=False);
report.add_figs_to_section(fig, captions='Independent Components', section='ICA')
close(fig)
## FIND ECG COMPONENTS
ecg_threshold = 0.50
ecg_epochs = create_ecg_epochs(raw_data, ch_name='EEG059')
ecg_inds, ecg_scores = ica.find_bads_ecg(ecg_epochs, ch_name='EEG059', method='ctps', threshold=ecg_threshold)
fig = ica.plot_scores(ecg_scores, ecg_inds, show=False);
report.add_figs_to_section(fig, captions='Correlation with ECG (EEG059)', section='ICA - ECG')
close(fig)
fig = list()
try:
fig = ica.plot_properties(ecg_epochs, picks=ecg_inds, image_args={'sigma': 1.}, show=False);
for i, figure in enumerate(fig):
report.add_figs_to_section(figure, captions='Detected component ' + str(i), section='ICA - ECG')
close(figure)
except:
print('No component to remove')
## FIND EOG COMPONENTS
eog_threshold = 4
eog_epochs = create_eog_epochs(raw_data, ch_name='EEG057')
eog_inds, eog_scores = ica.find_bads_eog(eog_epochs, ch_name='EEG057', threshold=eog_threshold)
#TODO : if eog_inds == [] then eog_inds = [index(max(abs(eog_scores)))]
fig = ica.plot_scores(eog_scores, eog_inds, show=False);
report.add_figs_to_section(fig, captions='Correlation with EOG (EEG057)', section='ICA - EOG')
close(fig)
fig = list()
try:
fig = ica.plot_properties(eog_epochs, picks=eog_inds, image_args={'sigma': 1.}, show=False);
for i, figure in enumerate(fig):
report.add_figs_to_section(figure, captions='Detected component ' + str(i), section='ICA - EOG')
close(figure)
except:
print('No component to remove')
## EXCLUDE COMPONENTS
ica.exclude = ecg_inds
ica.apply(raw_data)
ica.exclude = eog_inds
ica.apply(raw_data)
fig = raw_data.plot(show=False); # Plot the clean signal.
report.add_figs_to_section(fig, captions='After filtering + ICA', section='Raw data')
close(fig)
## SAVE PREPROCESSED FILE
report.save(reportpath, open_browser=False, overwrite=True);
raw_data.save(savepath, overwrite=True)
del ica
del report
del raw_data
del fig
def segment_files(bids_filepath, tmin=0, tmax=0.8):
raw = read_raw_fif(bids_filepath, preload=True)
picks = mne.pick_types(raw.info, meg=True, ref_meg=True, eeg=False, eog=False, stim=False)
### Set some constants for epoching
baseline = None #(None, -0.05)
#reject = {'mag': 4e-12}
try:
events = mne.find_events(raw, min_duration=1/raw.info['sfreq'], verbose=False)
except ValueError:
events = mne.find_events(raw, min_duration=2/raw.info['sfreq'], verbose=False)
event_id = {'Freq': 21, 'Rare': 31}
epochs = mne.Epochs(raw, events=events, event_id=event_id, tmin=tmin,
tmax=tmax, baseline=baseline, reject=None, picks=picks, preload=True)
ar = AutoReject(n_jobs=6)
epochs_clean, autoreject_log = ar.fit_transform(epochs, return_log=True)
return epochs_clean, autoreject_log
def remove_errors(logfile, events):
'''
Takes as input the raw events, including both stimuli and responses.
Outputs an event vector containing only the correct trials, and indices of their position in the list
of all stims
'''
# load RT vector
data = loadmat(logfile)
df_response = pd.DataFrame(data['response'])
RT_array= np.asarray(df_response.loc[:,4])
# get events vector with only stimuli events
new_events = []
for ev in events:
if ev[2] != 99:
new_events.append(ev)
events = np.array(new_events)
# check for each trial if it has a response or not
events_noerr = []
events_comerr = []
events_omerr = []
for idx, event in enumerate(events):
if event[2] == 21:
if RT_array[idx] != 0:
events_noerr.append(event)
else:
events_omerr.append(event)
if event[2] == 31:
if RT_array[idx] == 0:
events_noerr.append(event)
else:
events_comerr.append(event)
events_noerr = np.array(events_noerr)
events_comerr = np.array(events_comerr)
events_omerr = np.array(events_omerr)
return events_noerr, events_comerr, events_omerr
def trim_events(events_noerr, events_artrej):
'''
This function compares the events vectors of correct epochs (events_noerr)
and of kept epochs after auto-reject (events_artrej).
Returns a list of intersecting epochs, + their idx in the clean epochs vector
'''
events_trimmed = []
idx_trimmed = []
for idx, event in enumerate(events_artrej):
if event[0] in events_noerr[:,0]:
events_trimmed.append(event)
idx_trimmed.append(idx)
events_trimmed = np.array(events_trimmed)
idx_trimmed = np.array(idx_trimmed)
print('N events in clean epochs : {}'.format(len(events_artrej)))
print('N events in correct epochs : {}'.format(len(events_noerr)))
print('N events in intersection : {}'.format(len(idx_trimmed)))
return events_trimmed, idx_trimmed
def trim_INOUT_idx(INidx, OUTidx, events_trimmed, events):
'''
With INidx_trimmed refering to indices in events_artrej
'''
# get events vector with only stimuli events
new_events = []
for ev in events:
if ev[2] != 99:
new_events.append(ev)
events = np.array(new_events)
INidx_trimmed = []
OUTidx_trimmed = []
# compare trimmed events with all_events, and store corresponding indices
for idx, ev in enumerate(events):
for idx_trim, ev_trim in enumerate(events_trimmed):
if ev[0] == ev_trim[0]:
if idx in INidx:
INidx_trimmed.append(idx_trim)
if idx in OUTidx:
OUTidx_trimmed.append(idx_trim)
INidx_trimmed = np.array(INidx_trimmed)
OUTidx_trimmed = np.array(OUTidx_trimmed)
return INidx_trimmed, OUTidx_trimmed
def get_odd_epochs(BIDS_PATH, LOGS_DIR, subj, bloc, stage='-epo'):
'''
Returns an array of indices of Freqs and Rares epochs. Retains only clean epochs.
'''
### Get events after artifact rejection have been performed
epo_path, epo_filename = get_SAflow_bids(BIDS_PATH, subj, bloc, stage=stage, cond=None)
events_artrej = mne.read_events(epo_filename, verbose=False) # get events from the epochs file (so no resp event)
### Get original events from the raw file, to compare them to the events left in the epochs file
events_fname, events_fpath = get_SAflow_bids(BIDS_PATH, subj, bloc, stage='preproc_raw', cond=None)
raw = read_raw_fif(events_fpath, preload=False, verbose=False)#, min_duration=2/epochs.info['sfreq'])
try:
events = mne.find_events(raw, min_duration=1/raw.info['sfreq'], verbose=False)
except ValueError:
events = mne.find_events(raw, min_duration=2/raw.info['sfreq'], verbose=False)
# Get the list of hits/miss events
log_file = LOGS_DIR + find_logfile(subj,bloc,os.listdir(LOGS_DIR))
events_noerr, events_comerr, events_omerr = remove_errors(log_file, events)
# Keep only events that are clean, and split them by condition
# Start with correct events
events_noerr_trimmed, idx_noerr_trimmed = trim_events(events_noerr, events_artrej)
freqs_hits_idx = np.array([idx_noerr_trimmed[i] for i, x in enumerate(events_noerr_trimmed) if x[2] == 21])
rares_hits_idx = np.array([idx_noerr_trimmed[i] for i, x in enumerate(events_noerr_trimmed) if x[2] == 31])
# Then commission errors
if events_comerr.size > 0:
events_comerr_trimmed, idx_comerr_trimmed = trim_events(events_comerr, events_artrej)
rares_miss_idx = np.array(idx_comerr_trimmed)
else:
rares_miss_idx = np.array([])
# And finally ommission errors
if events_omerr.size > 0:
events_omerr_trimmed, idx_omerr_trimmed = trim_events(events_omerr, events_artrej)
freqs_miss_idx = np.array(idx_omerr_trimmed)
else:
freqs_miss_idx = np.array([])
return freqs_hits_idx, freqs_miss_idx, rares_hits_idx, rares_miss_idx
def get_VTC_epochs(BIDS_PATH, LOGS_DIR, subj, bloc, stage='-epo', lobound=None, hibound=None, save_epochs=False, filt_order=3, filt_cutoff=0.1):
'''
This functions allows to use the logfile to split the epochs obtained in the epo.fif file.
It works by comparing the timestamps of IN and OUT events to the timestamps in the epo file events
It returns IN and OUT indices that are to be used in the split_PSD_data function
'''
### Get events after artifact rejection have been performed
epo_path, epo_filename = get_SAflow_bids(BIDS_PATH, subj, bloc, stage=stage, cond=None)
events_artrej = mne.read_events(epo_filename, verbose=False) # get events from the epochs file (so no resp event)
### Find logfile to extract VTC
log_file = LOGS_DIR + find_logfile(subj,bloc,os.listdir(LOGS_DIR))
VTC, INbounds, OUTbounds, INidx, OUTidx, RT_array = get_VTC_from_file(log_file, lobound=lobound, hibound=hibound, filt=True, filt_order=filt_order, filt_cutoff=filt_cutoff)
### Get original events and split them using the VTC
events_fname, events_fpath = get_SAflow_bids(BIDS_PATH, subj, bloc, stage='preproc_raw', cond=None)
raw = read_raw_fif(events_fpath, preload=False, verbose=False)#, min_duration=2/epochs.info['sfreq'])
try:
events = mne.find_events(raw, min_duration=1/raw.info['sfreq'], verbose=False)
except ValueError:
events = mne.find_events(raw, min_duration=2/raw.info['sfreq'], verbose=False)
events_noerr, events_comerr, events_omerr = remove_errors(log_file, events)
# Keep only events that are correct and clean
events_trimmed, idx_trimmed = trim_events(events_noerr, events_artrej)
# Write INidx and OUTidx as indices of clean events
INidx, OUTidx = trim_INOUT_idx(INidx, OUTidx, events_trimmed, events)
VTC_epo = np.array([VTC[idx] for idx in idx_trimmed])
return INidx, OUTidx, VTC_epo, idx_trimmed
def compute_PSD(epochs, freqlist=None, method='multitaper'):
epochs_psds = []
if freqlist == None:
freqlist = [ [4, 8], [8, 12], [12, 20], [20, 30], [30, 60], [60, 90], [90, 120] ]
# Compute PSD
if method == 'multitaper':
psds, freqs = psd_multitaper(epochs, fmin=min(min(freqlist)), fmax=max(max(freqlist)), n_jobs=1)
if method == 'pwelch':
psds, freqs = psd_welch(epochs, average='median', fmin=min(min(freqlist)), fmax=max(max(freqlist)), n_jobs=1)
psds = 10. * np.log10(psds) # Convert power to dB scale.
# Average in freq bands
for low, high in freqlist:
freq_idx = [i for i, x in enumerate(freqs) if x >=low and x <= high]
psd = np.mean(psds[:,:,freq_idx], axis=2)
epochs_psds.append(psd)
epochs_psds = np.array(epochs_psds).swapaxes(2,0).swapaxes(1,0)
return epochs_psds
def compute_PSD_hilbert(raw, ARlog, freqlist=None, tmin=0, tmax=0.8):
epochs_envelopes = []
if freqlist == None:
freqlist = [ [4, 8], [8, 12], [12, 20], [20, 30], [30, 60], [60, 90], [90, 120] ]
for low, high in freqlist:
# | |
#!/usr/bin/python
# -*- coding: utf8 -*-
# cp936
"""
建立HDF5索引
"""
import datetime
import tables
class IndexRecord(tables.IsDescription):
datetime = tables.UInt64Col() #IGNORE:E1101
start = tables.UInt64Col() #IGNORE:E1101
def UpdateWeekIndex(h5file):
try:
group = h5file.getNode("/","week")
except:
group = h5file.createGroup("/","week")
def getNewDate(olddate):
y = olddate/100000000
m = olddate/1000000 - y*100
d = olddate/10000 - (y*10000+m*100)
tempdate = datetime.date(y,m,d)
tempweekdate = tempdate - datetime.timedelta(tempdate.weekday())
newdate = tempweekdate.year*100000000 + tempweekdate.month*1000000 + tempweekdate.day*10000
return newdate
for table in h5file.walkNodes("/data"):
if type(table) != tables.table.Table:
continue
#print table.name
try:
index_table = h5file.getNode(group,table.name)
except:
index_table = h5file.createTable(group,table.name, IndexRecord)
total = table.nrows
if 0 == total:
continue
index_total = index_table.nrows
index_row = index_table.row
if index_total:
index_last_date = int(index_table[-1]['datetime'])
last_date = getNewDate(int(table[-1]['datetime']))
if index_last_date == last_date:
continue
startix = int(index_table[-1]['start'])
pre_index_date = int(index_table[-1]['datetime'])
else:
startix = 0
pre_index_date = getNewDate(int(table[0]['datetime']))
index_row['datetime'] = pre_index_date
index_row['start'] = 0
index_row.append()
#week_table.flush()
index = startix
for row in table[startix:]:
date = int(row['datetime'])
cur_index_date = getNewDate(date)
if cur_index_date != pre_index_date:
index_row['datetime'] = cur_index_date
index_row['start'] = index
index_row.append()
pre_index_date = cur_index_date
index += 1
index_table.flush()
def UpdateMonthIndex(h5file):
try:
group = h5file.getNode("/","month")
except:
group = h5file.createGroup("/","month")
def getNewDate(olddate):
y = olddate/100000000
m = olddate/1000000 - y*100
return(y*100000000 + m*1000000 + 10000)
for table in h5file.walkNodes("/data"):
if type(table) != tables.table.Table:
continue
#print table.name
try:
index_table = h5file.getNode(group,table.name)
except:
index_table = h5file.createTable(group,table.name, IndexRecord)
total = table.nrows
if 0 == total:
continue
index_total = index_table.nrows
index_row = index_table.row
if index_total:
index_last_date = int(index_table[-1]['datetime'])
last_date = getNewDate(int(table[-1]['datetime']))
if index_last_date == last_date:
continue
startix = int(index_table[-1]['start'])
pre_index_date = int(index_table[-1]['datetime'])
else:
startix = 0
date = int(table[0]['datetime'])
pre_index_date = getNewDate(date)
index_row['datetime'] = pre_index_date
index_row['start'] = 0
index_row.append()
#week_table.flush()
index = startix
for row in table[startix:]:
date = int(row['datetime'])
cur_index_date = getNewDate(date)
if cur_index_date != pre_index_date:
index_row['datetime'] = cur_index_date
index_row['start'] = index
index_row.append()
pre_index_date = cur_index_date
index += 1
index_table.flush()
def UpdateYearIndex(h5file):
try:
group = h5file.getNode("/","year")
except:
group = h5file.createGroup("/","year")
def getNewDate(olddate):
y = olddate/100000000
return(y*100000000 + 1010000)
for table in h5file.walkNodes("/data"):
if type(table) != tables.table.Table:
continue
#print table.name
try:
index_table = h5file.getNode(group,table.name)
except:
index_table = h5file.createTable(group,table.name, IndexRecord)
total = table.nrows
if 0 == total:
continue
index_total = index_table.nrows
index_row = index_table.row
if index_total:
index_last_date = int(index_table[-1]['datetime'])
last_date = getNewDate(int(table[-1]['datetime']))
if index_last_date == last_date:
continue
startix = int(index_table[-1]['start'])
pre_index_date = int(index_table[-1]['datetime'])
else:
startix = 0
date = int(table[0]['datetime'])
pre_index_date = getNewDate(date)
index_row['datetime'] = pre_index_date
index_row['start'] = 0
index_row.append()
#week_table.flush()
index = startix
for row in table[startix:]:
date = int(row['datetime'])
cur_index_date = getNewDate(date)
if cur_index_date != pre_index_date:
index_row['datetime'] = cur_index_date
index_row['start'] = index
index_row.append()
pre_index_date = cur_index_date
index += 1
index_table.flush()
def UpdateHalfYearIndex(h5file):
try:
group = h5file.getNode("/","halfyear")
except:
group = h5file.createGroup("/","halfyear")
def getNewDate(olddate):
halfyearDict={1:1,2:1,3:1,4:1,5:1,6:1,7:7,8:7,9:7,10:7,11:7,12:7}
y = olddate/100000000
m = olddate/1000000 - y*100
return( y*100000000 + halfyearDict[m]*1000000 + 10000 )
for table in h5file.walkNodes("/data"):
if type(table) != tables.table.Table:
continue
#print table.name
try:
index_table = h5file.getNode(group,table.name)
except:
index_table = h5file.createTable(group,table.name, IndexRecord)
total = table.nrows
if 0 == total:
continue
index_total = index_table.nrows
index_row = index_table.row
if index_total:
index_last_date = int(index_table[-1]['datetime'])
last_date = getNewDate(int(table[-1]['datetime']))
if index_last_date == last_date:
continue
startix = int(index_table[-1]['start'])
pre_index_date = int(index_table[-1]['datetime'])
else:
startix = 0
date = int(table[0]['datetime'])
pre_index_date = getNewDate(date)
index_row['datetime'] = pre_index_date
index_row['start'] = 0
index_row.append()
#week_table.flush()
index = startix
for row in table[startix:]:
date = int(row['datetime'])
cur_index_date = getNewDate(date)
if cur_index_date != pre_index_date:
index_row['datetime'] = cur_index_date
index_row['start'] = index
index_row.append()
pre_index_date = cur_index_date
index += 1
index_table.flush()
def UpdateQuarterIndex(h5file):
try:
group = h5file.getNode("/","quarter")
except:
group = h5file.createGroup("/","quarter")
def getNewDate(olddate):
quarterDict={1:1,2:1,3:1,4:4,5:4,6:4,7:7,8:7,9:7,10:10,11:10,12:10}
y = olddate/100000000
m = olddate/1000000 - y*100
return( y*100000000 + quarterDict[m]*1000000 + 10000 )
for table in h5file.walkNodes("/data"):
if type(table) != tables.table.Table:
continue
#print table.name
try:
index_table = h5file.getNode(group,table.name)
except:
index_table = h5file.createTable(group,table.name, IndexRecord)
total = table.nrows
if 0 == total:
continue
index_total = index_table.nrows
index_row = index_table.row
if index_total:
index_last_date = int(index_table[-1]['datetime'])
last_date = getNewDate(int(table[-1]['datetime']))
if index_last_date == last_date:
continue
startix = int(index_table[-1]['start'])
pre_index_date = int(index_table[-1]['datetime'])
else:
startix = 0
date = int(table[0]['datetime'])
pre_index_date = getNewDate(date)
index_row['datetime'] = pre_index_date
index_row['start'] = 0
index_row.append()
#week_table.flush()
index = startix
for row in table[startix:]:
date = int(row['datetime'])
cur_index_date = getNewDate(date)
if cur_index_date != pre_index_date:
index_row['datetime'] = cur_index_date
index_row['start'] = index
index_row.append()
pre_index_date = cur_index_date
index += 1
index_table.flush()
def UpdateDayIndex(h5file):
try:
group = h5file.getNode("/","day")
except:
group = h5file.createGroup("/","day")
def getNewDate(olddate):
newdate = olddate/10000*10000
return newdate
for table in h5file.walkNodes("/data"):
if type(table) != tables.table.Table:
continue
#print table.name
try:
index_table = h5file.getNode(group,table.name)
except:
index_table = h5file.createTable(group,table.name, IndexRecord)
total = table.nrows
if 0 == total:
continue
index_total = index_table.nrows
index_row = index_table.row
if index_total:
index_last_date = int(index_table[-1]['datetime'])
last_date = getNewDate(int(table[-1]['datetime']))
if index_last_date == last_date:
continue
startix = int(index_table[-1]['start'])
pre_index_date = int(index_table[-1]['datetime'])
else:
startix = 0
date = int(table[0]['datetime'])
pre_index_date = getNewDate(date)
index_row['datetime'] = pre_index_date
index_row['start'] = 0
index_row.append()
#week_table.flush()
index = startix
for row in table[startix:]:
date = int(row['datetime'])
cur_index_date = getNewDate(date)
if cur_index_date != pre_index_date:
index_row['datetime'] = cur_index_date
index_row['start'] = index
index_row.append()
pre_index_date = cur_index_date
index += 1
index_table.flush()
def UpdateHourIndex(h5file):
try:
group = h5file.getNode("/","min60")
except:
group = h5file.createGroup("/","min60")
def getNewDate(olddate):
min = olddate-olddate/10000*10000
if min<=1030:
newdate = olddate/10000*10000 + 1030
elif min<=1130:
newdate = olddate/10000*10000 + 1130
elif min<=1400:
newdate = olddate/10000*10000 + 1400
else:
newdate = olddate/10000*10000 + 1500
return newdate
for table in h5file.walkNodes("/data"):
if type(table) != tables.table.Table:
continue
#print table.name
try:
index_table = h5file.getNode(group,table.name)
except:
index_table = h5file.createTable(group,table.name, IndexRecord)
total = table.nrows
if 0 == total:
continue
index_total = index_table.nrows
index_row = index_table.row
if index_total:
index_last_date = int(index_table[-1]['datetime'])
last_date = getNewDate(int(table[-1]['datetime']))
if index_last_date == last_date:
continue
startix = int(index_table[-1]['start'])
pre_index_date = int(index_table[-1]['datetime'])
else:
startix = 0
date = int(table[0]['datetime'])
pre_index_date = getNewDate(date)
index_row['datetime'] = pre_index_date
index_row['start'] = 0
index_row.append()
index = startix
for row in table[startix:]:
date = int(row['datetime'])
cur_index_date = getNewDate(date)
if cur_index_date != pre_index_date:
index_row['datetime'] = cur_index_date
index_row['start'] = index
index_row.append()
pre_index_date = cur_index_date
index += 1
index_table.flush()
def UpdateFifteenMinIndex(h5file):
try:
group = h5file.getNode("/","min15")
except:
group = h5file.createGroup("/","min15")
def getNewDate(olddate):
min = olddate-olddate/10000*10000
if min<=945:
newdate = olddate/10000*10000 + 945
elif min<=1000:
newdate = olddate/10000*10000 + 1000
elif min<=1015:
newdate = olddate/10000*10000 + 1015
elif min<=1030:
newdate = olddate/10000*10000 + 1030
elif min<=1045:
newdate = olddate/10000*10000 + 1045
elif min<=1100:
newdate = olddate/10000*10000 + 1100
elif min<=1115:
newdate = olddate/10000*10000 + 1115
elif min<=1130:
newdate = olddate/10000*10000 + 1130
elif min<=1315:
newdate = olddate/10000*10000 + 1315
elif min<=1330:
newdate = olddate/10000*10000 + 1330
elif min<=1345:
newdate = olddate/10000*10000 + 1345
elif min<=1400:
newdate = olddate/10000*10000 + 1400
elif min<=1415:
newdate = olddate/10000*10000 + 1415
elif min<=1430:
newdate = olddate/10000*10000 + 1430
elif min<=1445:
newdate = olddate/10000*10000 + 1445
else:
newdate = olddate/10000*10000 + 1500
return newdate
for table in h5file.walkNodes("/data"):
if type(table) != tables.table.Table:
continue
#print table.name
try:
index_table = h5file.getNode(group,table.name)
except:
index_table = h5file.createTable(group,table.name, IndexRecord)
total = table.nrows
if 0 == total:
continue
index_total = index_table.nrows
index_row = index_table.row
if index_total:
index_last_date = int(index_table[-1]['datetime'])
last_date = getNewDate(int(table[-1]['datetime']))
if index_last_date == last_date:
continue
startix = int(index_table[-1]['start'])
pre_index_date = int(index_table[-1]['datetime'])
else:
startix = 0
date = int(table[0]['datetime'])
pre_index_date = getNewDate(date)
index_row['datetime'] = pre_index_date
index_row['start'] = 0
index_row.append()
index = startix
for row in table[startix:]:
date = int(row['datetime'])
cur_index_date = getNewDate(date)
if cur_index_date != pre_index_date:
index_row['datetime'] = cur_index_date
index_row['start'] = index
index_row.append()
pre_index_date = cur_index_date
index += 1
index_table.flush()
def UpdateHalfHourIndex(h5file):
try:
group = h5file.getNode("/","min30")
except:
group = h5file.createGroup("/","min30")
def getNewDate(olddate):
min = olddate-olddate/10000*10000
if min<=1000:
newdate = olddate/10000*10000 + 1000
elif min<=1030:
newdate = olddate/10000*10000 + 1030
elif min<=1100:
newdate = olddate/10000*10000 + 1100
elif min<=1130:
newdate = olddate/10000*10000 + 1130
elif min<=1330:
newdate = olddate/10000*10000 + 1330
elif min<=1400:
newdate = olddate/10000*10000 + 1400
elif min<=1430:
newdate = olddate/10000*10000 + 1430
else:
newdate = olddate/10000*10000 + 1500
return newdate
for table in h5file.walkNodes("/data"):
if type(table) != tables.table.Table:
continue
#print table.name
try:
index_table = h5file.getNode(group,table.name)
except:
index_table | |
# -*- coding: utf-8 -*-
from radish import world, given, when, then, step
from terraform_compliance.steps import property_match_list
from terraform_compliance.common.helper import check_sg_rules, convert_resource_type, find_root_by_key, seek_key_in_dict
from terraform_compliance.common.helper import seek_regex_key_in_dict_values, jsonify, Null, EmptyStash
from terraform_compliance.common.helper import get_resource_name_from_stash
from terraform_compliance.extensions.ext_radish_bdd import skip_step
from terraform_compliance.extensions.ext_radish_bdd import custom_type_any, custom_type_condition, custom_type_section
import re
from terraform_compliance.common.exceptions import Failure, TerraformComplianceNotImplemented
from terraform_compliance.common.exceptions import TerraformComplianceInternalFailure
# TODO: Figure out how the IAM policies/statements shown in the plan.out
# TODO: Implement an IAM Compliance via https://github.com/Netflix-Skunkworks/policyuniverse
# def i have_name_defined(_step_obj, name, _terraform_config=world):
# pass
@given(u'I have {name:ANY} defined')
@given(u'I have {name:ANY} {type_name:SECTION} configured')
def i_have_name_section_configured(_step_obj, name, type_name='resource', _terraform_config=world):
'''
Finds given resource or variable by name and returns it. Skips the step (and further steps) if it is not found.
:param _step_obj: Internal, step object for radish.
:param name: String of the name of the resource_type or variable.
:param type_name: String of the type, either resource(s) or variable(s)
:param _terraform_config: Internal, terraform configuration.
:return:
'''
assert (type_name in ['resource', 'resources',
'variable', 'variables',
'provider', 'providers',
'data', 'datas']), \
'{} configuration type does not exist or not implemented yet. ' \
'Use resource(s), provider(s), variable(s) or data(s) instead.'.format(type_name)
if type_name.endswith('s'):
type_name = type_name[:-1]
if name == 'resource that supports tags':
resource_types_supports_tags = find_root_by_key(_terraform_config.config.terraform.resources_raw,
'tags',
return_key='type')
resource_list = []
for resource_type in resource_types_supports_tags:
resource_list.extend(_terraform_config.config.terraform.find_resources_by_type(resource_type))
if resource_list:
_step_obj.context.type = type_name
_step_obj.context.name = name
_step_obj.context.stash = resource_list
return True
elif type_name == 'resource':
name = convert_resource_type(name)
resource_list = _terraform_config.config.terraform.find_resources_by_type(name)
if resource_list:
_step_obj.context.type = type_name
_step_obj.context.name = name
_step_obj.context.stash = resource_list
return True
elif type_name == 'variable':
found_variable = _terraform_config.config.terraform.variables.get(name, None)
if found_variable:
_step_obj.context.type = type_name
_step_obj.context.name = name
_step_obj.context.stash = found_variable
return True
elif type_name == 'provider':
found_provider = _terraform_config.config.terraform.configuration.get('providers', {}).get(name, None)
if found_provider:
_step_obj.context.type = type_name
_step_obj.context.name = name
_step_obj.context.stash = found_provider
return True
elif type_name == 'data':
name = convert_resource_type(name)
data_list = _terraform_config.config.terraform.find_data_by_type(name)
if data_list:
_step_obj.context.type = type_name
_step_obj.context.name = name
_step_obj.context.stash = data_list
return True
skip_step(_step_obj, name)
@when(u'its {key:ANY} is {value:ANY}')
def its_key_is_value(_step_obj, key, value):
search_key = str(key).lower()
found_list = []
for obj in _step_obj.context.stash:
object_key = obj.get(key, Null)
if object_key is not Null:
object_key = object_key.split('[')[0]
if object_key == value:
found_list.append(obj)
if found_list is not []:
_step_obj.context.stash = found_list
else:
skip_step(_step_obj, value)
@when(u'it contain {something:ANY}')
@when(u'they have {something:ANY}')
@when(u'it has {something:ANY}')
@when(u'it contains {something:ANY}')
@then(u'it must contain {something:ANY}')
def it_condition_contain_something(_step_obj, something):
prop_list = []
if _step_obj.context.type in ('resource', 'data'):
for resource in _step_obj.context.stash:
if type(resource) is not dict:
resource = {'values': resource,
'address': resource,
'type': _step_obj.context.name}
values = resource.get('values', resource.get('expressions', {}))
found_value = Null
found_key = Null
if type(values) is dict:
found_key = values.get(something, seek_key_in_dict(values, something))
if type(found_key) is not list:
found_key = [{something: found_key}]
if len(found_key):
found_key = found_key[0] if len(found_key) == 1 else found_key
if type(found_key) is dict:
found_value = jsonify(found_key.get(something, found_key))
else:
found_value = found_key
elif type(values) is list:
found_value = []
for value in values:
if type(value) is dict:
# First search in the keys
found_key = seek_key_in_dict(value, something)
# Then search in the values with 'key'
if not found_key:
found_key = seek_regex_key_in_dict_values(value, 'key', something)
if found_key:
found_key = found_key[0]
found_value = value.get('value')
break
if found_key is not Null and len(found_key):
found_key = found_key[0] if len(found_key) == 1 else found_key
if type(found_key) is dict:
found_value.append(jsonify(found_key.get(something, found_key)))
if type(found_value) is dict and 'constant_value' in found_value:
found_value = found_value['constant_value']
if found_value is not Null and found_value != [] and found_value != '' and found_value != {}:
prop_list.append({'address': resource['address'],
'values': found_value,
'type': _step_obj.context.name})
elif 'must' in _step_obj.context_sensitive_sentence:
raise Failure('{} ({}) does not have {} property.'.format(resource['address'],
resource.get('type', ''),
something))
if prop_list:
_step_obj.context.stash = prop_list
_step_obj.context.property_name = something
return True
skip_step(_step_obj,
resource=_step_obj.context.name,
message='Can not find any {} property for {} resource in '
'terraform plan.'.format(something, _step_obj.context.name))
elif _step_obj.context.type == 'provider':
values = seek_key_in_dict(_step_obj.context.stash, something)
if values:
_step_obj.context.stash = values
_step_obj.context.property_name = something
return True
skip_step(_step_obj,
resource=_step_obj.context.name,
message='Skipping the step since {} type does not have {} property.'.format(_step_obj.context.type,
something))
@then(u'{something:ANY} is be enabled')
@then(u'{something:ANY} must be enabled')
def property_is_enabled(_step_obj, something):
for resource in _step_obj.context.stash:
if type(resource) is dict:
if something in property_match_list:
something = property_match_list[something].get(resource['type'], something)
property_value = seek_key_in_dict(resource.get('values', {}), something)
if len(property_value):
property_value = property_value[0]
if type(property_value) is dict:
property_value = property_value.get(something, Null)
if not property_value:
raise Failure('Resource {} does not have {} property enabled ({}={}).'.format(resource.get('address', "resource"),
something,
something,
property_value))
return True
@then(u'it must {condition:ANY} have {proto:ANY} protocol and port {port} for {cidr:ANY}')
def it_condition_have_proto_protocol_and_port_port_for_cidr(_step_obj, condition, proto, port, cidr):
proto = str(proto)
cidr = str(cidr)
# Set to True only if the condition is 'only'
condition = condition == 'only'
# In case we have a range
if '-' in port:
if condition:
raise Failure('"must only" scenario cases must be used either with individual port '
'or multiple ports separated with comma.')
from_port, to_port = port.split('-')
ports = [from_port, to_port]
# In case we have comma delimited ports
elif ',' in port:
ports = [port for port in port.split(',')]
from_port = min(ports)
to_port = max(ports)
else:
from_port = to_port = int(port)
ports = list(set([str(from_port), str(to_port)]))
from_port = int(from_port) if int(from_port) > 0 else 1
to_port = int(to_port) if int(to_port) > 0 else 1
ports[0] = ports[0] if int(ports[0]) > 0 else '1'
looking_for = dict(proto=proto,
from_port=int(from_port),
to_port=int(to_port),
ports=ports,
cidr=cidr)
for security_group in _step_obj.context.stash:
if type(security_group['values']) is list:
for sg in security_group['values']:
check_sg_rules(plan_data=sg, security_group=looking_for, condition=condition)
elif type(security_group['values']) is dict:
check_sg_rules(plan_data=security_group['values'], security_group=looking_for, condition=condition)
else:
raise TerraformComplianceInternalFailure('Unexpected Security Group, '
'must be either list or a dict: '
'{}'.format(security_group['values']))
return True
@when(u'I {action_type:ANY} it')
@when(u'I {action_type:ANY} them')
@when(u'I {action_type:ANY} the value')
@then(u'I {action_type:ANY} it')
@then(u'I {action_type:ANY} them')
@then(u'I {action_type:ANY} the value')
def i_action_them(_step_obj, action_type):
if action_type == "count":
# WARNING: Only case where we set stash as a dictionary, instead of a list.
if type(_step_obj.context.stash) is list:
if type(_step_obj.context.stash[0]) is dict():
if _step_obj.context.stash.get('values'):
_step_obj.context.stash = seek_key_in_dict(_step_obj.context.stash, 'values')
count = 0
for result in _step_obj.context.stash:
count += len(result.get('values', {})) if result.get('values') else 1
_step_obj.context.stash = {'values': count}
else:
_step_obj.context.stash = {'values': len(_step_obj.context.stash)}
else:
raise TerraformComplianceNotImplemented('Invalid action_type in the scenario: {}'.format(action_type))
@then(u'Its value must be {operator:ANY} than {number:d}')
@then(u'I expect the result is {operator:ANY} than {number:d}')
def i_expect_the_result_is_operator_than_number(_step_obj, operator, number, _stash=EmptyStash):
values = _step_obj.context.stash if _stash is EmptyStash else _stash
if type(values) is list:
for value_set in values:
i_expect_the_result_is_operator_than_number(_step_obj, operator, number, _stash=value_set)
elif type(values) is dict:
i_expect_the_result_is_operator_than_number(_step_obj, operator, number, values.get('values', Null))
elif type(values) is int or type(values) is str:
values = int(values)
if operator in ("more", "greater", "bigger"):
assert values > number, "{} is not more than {}".format(values, number)
elif operator in ("more and equal", "greater and equal", "bigger and equal"):
assert values >= number, "{} is not more and equal than {}".format(values, number)
elif operator in ("less", "lesser", "smaller"):
assert values < number, "{} is not less than {}".format(values, number)
elif operator in ("less and equal", "lesser and equal", "smaller and equal"):
assert values <= number, "{} is not less and equal than {}".format(values, number)
else:
raise TerraformComplianceNotImplemented('Invalid operator: {}'.format(operator))
elif type(values) is Null:
raise TerraformComplianceNotImplemented('Null/Empty value found on {}'.format(_step_obj.context.type))
@step(u'its value {condition:ANY} match the "{search_regex}" regex')
def its_value_condition_match_the_search_regex_regex(_step_obj, condition, search_regex, _stash=EmptyStash):
def fail(condition, name=None):
text = 'matches' if condition == 'must not' else 'does not match'
name = name if (name is not None or name is not False) else _step_obj.context.name
pattern = 'Null/None' if regex == '\x00' else regex
raise Failure('{} property in {} {} {} with {} regex. '
'It is set to {}.'.format(_step_obj.context.property_name,
name,
_step_obj.context.type,
text,
pattern,
values))
regex = r'{}'.format(search_regex)
values = _step_obj.context.stash if _stash is EmptyStash else _stash
if type(values) is str or type(values) is int or type(values) is bool:
matches = re.match(regex, str(values), flags=re.IGNORECASE)
if (condition == 'must' and matches is None) or (condition == "must not" and matches is not None):
_stash = get_resource_name_from_stash(_step_obj.context.stash, _stash)
fail(condition, name=_stash.get('address'))
elif type(values) is list:
for value in values:
its_value_condition_match_the_search_regex_regex(_step_obj, condition, search_regex, value)
elif type(values) is dict:
if 'values' in values:
if values['values'] is None and regex == '\x00' and condition == 'must not':
values = values['values']
fail(condition, name=_stash.get('address'))
else:
its_value_condition_match_the_search_regex_regex(_step_obj, condition, search_regex, values.get('values'))
else:
for key, value in values.items():
its_value_condition_match_the_search_regex_regex(_step_obj, condition, search_regex, value)
@then(u'the scenario fails')
@then(u'the scenario should fail')
@then(u'it fails')
@then(u'it should fail')
@then(u'it must fail')
def | |
Array of document results that
match the query.
"""
self.matching_results = matching_results
self.notices = notices
@classmethod
def from_dict(cls, _dict: Dict) -> 'QueryNoticesResponse':
"""Initialize a QueryNoticesResponse object from a json dictionary."""
args = {}
valid_keys = ['matching_results', 'notices']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class QueryNoticesResponse: '
+ ', '.join(bad_keys))
if 'matching_results' in _dict:
args['matching_results'] = _dict.get('matching_results')
if 'notices' in _dict:
args['notices'] = [
Notice._from_dict(x) for x in (_dict.get('notices'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a QueryNoticesResponse object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self,
'matching_results') and self.matching_results is not None:
_dict['matching_results'] = self.matching_results
if hasattr(self, 'notices') and self.notices is not None:
_dict['notices'] = [x._to_dict() for x in self.notices]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this QueryNoticesResponse object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'QueryNoticesResponse') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'QueryNoticesResponse') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class QueryResponse():
"""
A response containing the documents and aggregations for the query.
:attr int matching_results: (optional) The number of matching results for the
query.
:attr List[QueryResult] results: (optional) Array of document results for the
query.
:attr List[QueryAggregation] aggregations: (optional) Array of aggregations for
the query.
:attr RetrievalDetails retrieval_details: (optional) An object contain retrieval
type information.
:attr str suggested_query: (optional) Suggested correction to the submitted
**natural_language_query** value.
:attr List[QuerySuggestedRefinement] suggested_refinements: (optional) Array of
suggested refinements.
:attr List[QueryTableResult] table_results: (optional) Array of table results.
"""
def __init__(self,
*,
matching_results: int = None,
results: List['QueryResult'] = None,
aggregations: List['QueryAggregation'] = None,
retrieval_details: 'RetrievalDetails' = None,
suggested_query: str = None,
suggested_refinements: List['QuerySuggestedRefinement'] = None,
table_results: List['QueryTableResult'] = None) -> None:
"""
Initialize a QueryResponse object.
:param int matching_results: (optional) The number of matching results for
the query.
:param List[QueryResult] results: (optional) Array of document results for
the query.
:param List[QueryAggregation] aggregations: (optional) Array of
aggregations for the query.
:param RetrievalDetails retrieval_details: (optional) An object contain
retrieval type information.
:param str suggested_query: (optional) Suggested correction to the
submitted **natural_language_query** value.
:param List[QuerySuggestedRefinement] suggested_refinements: (optional)
Array of suggested refinements.
:param List[QueryTableResult] table_results: (optional) Array of table
results.
"""
self.matching_results = matching_results
self.results = results
self.aggregations = aggregations
self.retrieval_details = retrieval_details
self.suggested_query = suggested_query
self.suggested_refinements = suggested_refinements
self.table_results = table_results
@classmethod
def from_dict(cls, _dict: Dict) -> 'QueryResponse':
"""Initialize a QueryResponse object from a json dictionary."""
args = {}
valid_keys = [
'matching_results', 'results', 'aggregations', 'retrieval_details',
'suggested_query', 'suggested_refinements', 'table_results'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class QueryResponse: '
+ ', '.join(bad_keys))
if 'matching_results' in _dict:
args['matching_results'] = _dict.get('matching_results')
if 'results' in _dict:
args['results'] = [
QueryResult._from_dict(x) for x in (_dict.get('results'))
]
if 'aggregations' in _dict:
args['aggregations'] = [
QueryAggregation._from_dict(x)
for x in (_dict.get('aggregations'))
]
if 'retrieval_details' in _dict:
args['retrieval_details'] = RetrievalDetails._from_dict(
_dict.get('retrieval_details'))
if 'suggested_query' in _dict:
args['suggested_query'] = _dict.get('suggested_query')
if 'suggested_refinements' in _dict:
args['suggested_refinements'] = [
QuerySuggestedRefinement._from_dict(x)
for x in (_dict.get('suggested_refinements'))
]
if 'table_results' in _dict:
args['table_results'] = [
QueryTableResult._from_dict(x)
for x in (_dict.get('table_results'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a QueryResponse object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self,
'matching_results') and self.matching_results is not None:
_dict['matching_results'] = self.matching_results
if hasattr(self, 'results') and self.results is not None:
_dict['results'] = [x._to_dict() for x in self.results]
if hasattr(self, 'aggregations') and self.aggregations is not None:
_dict['aggregations'] = [x._to_dict() for x in self.aggregations]
if hasattr(self,
'retrieval_details') and self.retrieval_details is not None:
_dict['retrieval_details'] = self.retrieval_details._to_dict()
if hasattr(self,
'suggested_query') and self.suggested_query is not None:
_dict['suggested_query'] = self.suggested_query
if hasattr(self, 'suggested_refinements'
) and self.suggested_refinements is not None:
_dict['suggested_refinements'] = [
x._to_dict() for x in self.suggested_refinements
]
if hasattr(self, 'table_results') and self.table_results is not None:
_dict['table_results'] = [x._to_dict() for x in self.table_results]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this QueryResponse object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'QueryResponse') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'QueryResponse') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class QueryResult():
"""
Result document for the specified query.
:attr str document_id: The unique identifier of the document.
:attr dict metadata: (optional) Metadata of the document.
:attr QueryResultMetadata result_metadata: Metadata of a query result.
:attr List[QueryResultPassage] document_passages: (optional) Passages returned
by Discovery.
"""
def __init__(self,
document_id: str,
result_metadata: 'QueryResultMetadata',
*,
metadata: dict = None,
document_passages: List['QueryResultPassage'] = None,
**kwargs) -> None:
"""
Initialize a QueryResult object.
:param str document_id: The unique identifier of the document.
:param QueryResultMetadata result_metadata: Metadata of a query result.
:param dict metadata: (optional) Metadata of the document.
:param List[QueryResultPassage] document_passages: (optional) Passages
returned by Discovery.
:param **kwargs: (optional) Any additional properties.
"""
self.document_id = document_id
self.metadata = metadata
self.result_metadata = result_metadata
self.document_passages = document_passages
for _key, _value in kwargs.items():
setattr(self, _key, _value)
@classmethod
def from_dict(cls, _dict: Dict) -> 'QueryResult':
"""Initialize a QueryResult object from a json dictionary."""
args = {}
xtra = _dict.copy()
if 'document_id' in _dict:
args['document_id'] = _dict.get('document_id')
del xtra['document_id']
else:
raise ValueError(
'Required property \'document_id\' not present in QueryResult JSON'
)
if 'metadata' in _dict:
args['metadata'] = _dict.get('metadata')
del xtra['metadata']
if 'result_metadata' in _dict:
args['result_metadata'] = QueryResultMetadata._from_dict(
_dict.get('result_metadata'))
del xtra['result_metadata']
else:
raise ValueError(
'Required property \'result_metadata\' not present in QueryResult JSON'
)
if 'document_passages' in _dict:
args['document_passages'] = [
QueryResultPassage._from_dict(x)
for x in (_dict.get('document_passages'))
]
del xtra['document_passages']
args.update(xtra)
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a QueryResult object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document_id') and self.document_id is not None:
_dict['document_id'] = self.document_id
if hasattr(self, 'metadata') and self.metadata is not None:
_dict['metadata'] = self.metadata
if hasattr(self,
'result_metadata') and self.result_metadata is not None:
_dict['result_metadata'] = self.result_metadata._to_dict()
if hasattr(self,
'document_passages') and self.document_passages is not None:
_dict['document_passages'] = [
x._to_dict() for x in self.document_passages
]
if hasattr(self, '_additionalProperties'):
for _key in self._additionalProperties:
_value = getattr(self, _key, None)
if _value is not None:
_dict[_key] = _value
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __setattr__(self, name: str, value: object) -> None:
properties = {
'document_id', 'metadata', 'result_metadata', 'document_passages'
}
if not hasattr(self, '_additionalProperties'):
super(QueryResult, self).__setattr__('_additionalProperties', set())
if name not in properties:
self._additionalProperties.add(name)
super(QueryResult, self).__setattr__(name, value)
def __str__(self) -> str:
"""Return a `str` version of this QueryResult object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'QueryResult') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'QueryResult') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class QueryResultMetadata():
"""
Metadata of a query result.
:attr str document_retrieval_source: (optional) The document retrieval source
that produced this search result.
:attr str collection_id: The collection id associated with this training data
set.
:attr float confidence: (optional) The confidence score for the given result.
Calculated based on how relevant the result is estimated to be. confidence can
range from `0.0` to `1.0`. The higher the number, the more relevant the
document. The `confidence` value for a result was calculated using the model
specified in the `document_retrieval_strategy` field of the result set. This
field is only returned if the **natural_language_query** parameter is specified
in the query.
"""
def __init__(self,
collection_id: str,
| |
<gh_stars>10-100
from enum import Enum
from pprint import pformat, pprint
from typing import Any, Dict, List, Optional, TypeVar, Union
from mstrio import config
from mstrio.api import subscriptions
from mstrio.connection import Connection
from mstrio.distribution_services.schedule import Schedule
from mstrio.distribution_services.subscription.content import Content
from mstrio.distribution_services.subscription.delivery import (CacheType, ClientType, Delivery,
Orientation, SendContentAs,
ShortcutCacheFormat, ZipSettings)
from mstrio.users_and_groups import User
from mstrio.server.project import Project
from mstrio.utils import helper, time_helper
from mstrio.utils.entity import EntityBase
class RecipientsTypes(Enum):
CONTACT_GROUP = "CONTACT_GROUP"
USER_GROUP = "USER_GROUP"
CONTACT = "CONTACT"
USER = "USER"
PERSONAL_ADDRESS = "PERSONAL_ADDRESS"
UNSUPPORTED = "UNSUPPORTED"
T = TypeVar("T")
def list_subscriptions(connection: Connection, project_id: Optional[str] = None,
project_name: Optional[str] = None, to_dictionary: bool = False,
limit: Optional[int] = None,
**filters) -> Union[List["Subscription"], List[dict]]:
"""Get all subscriptions per project as list of Subscription objects or
dictionaries.
Optionally filter the subscriptions by specifying filters.
Specify either `project_id` or `project_name`.
When `project_id` is provided (not `None`), `project_name` is
omitted.
Args:
connection(object): MicroStrategy connection object
project_id: Project ID
project_name: Project name
to_dictionary: If True returns a list of subscription dicts,
otherwise returns a list of subscription objects
limit: limit the number of elements returned. If `None` (default), all
objects are returned.
**filters: Available filter parameters: ['id', 'name', 'editable',
'allowDeliveryChanges', 'allowPersonalizationChanges',
'allowUnsubscribe', 'dateCreated', 'dateModified', 'owner',
'schedules', 'contents', 'recipients', 'delivery']
"""
project_id = Subscription._project_id_check(connection, project_id, project_name)
msg = 'Error getting subscription list.'
# NOTE DE208094 x-mstr-total-count is not working correctly for this
# endpoint the chunk_size is thus increased to allow downloading all
# subscriptions at once. Change to 1000 for async chunking once it is
# working
objects = helper.fetch_objects_async(
connection=connection,
api=subscriptions.list_subscriptions,
async_api=subscriptions.list_subscriptions_async,
limit=limit,
chunk_size=100000,
filters=filters,
error_msg=msg,
dict_unpack_value="subscriptions",
project_id=project_id,
)
if to_dictionary:
return objects
else:
return [
Subscription.from_dict(
source=obj,
connection=connection,
project_id=project_id,
) for obj in objects
]
class Subscription(EntityBase):
"""Class representation of MicroStrategy Subscription object.
Attributes:
id: The ID of the Subscription
connection: The MicroStrategy connection object
project_id: The ID of the project the Subscription belongs to
"""
_API_GETTERS = {
("id", "name", "editable", "date_created", "date_modified", "owner", "schedules",
"contents", "recipients", "delivery"): subscriptions.get_subscription
}
_FROM_DICT_MAP = {
"owner": User.from_dict,
"contents": lambda source, connection:
[Content.from_dict(content, connection) for content in source], # noqa: E131
"delivery": Delivery.from_dict,
"schedules": lambda source, connection:
[Schedule.from_dict(content, connection) for content in source], # noqa: E131
"date_created": time_helper.DatetimeFormats.YMDHMS,
"date_modified": time_helper.DatetimeFormats.YMDHMS,
}
_API_PATCH = [subscriptions.update_subscription]
_RECIPIENTS_TYPES = [
'CONTACT_GROUP', 'USER_GROUP', 'CONTACT', 'USER', 'PERSONAL_ADDRESS', 'UNSUPPORTED'
]
_RECIPIENTS_INCLUDE = ['TO', 'CC', 'BCC', None]
def __init__(self, connection, subscription_id, project_id=None, project_name=None,
application_id=None, application_name=None):
"""Initialize Subscription object, populates it with I-Server data.
Specify either `project_id` or `project_name`.
When `project_id` is provided (not `None`), `project_name`
is omitted.
Args:
connection: MicroStrategy connection object returned
by `connection.Connection()`
subscription_id: ID of the subscription to be initialized
project_id: Project ID
project_name: Project name
application_id: deprecated. Use project_id instead.
application_name: deprecated. Use project_name instead.
"""
if application_id or application_name:
helper.deprecation_warning(
'`application`',
'`project`',
'172.16.31.10', # NOSONAR
False)
project_id = project_id or application_id
project_name = project_name or application_name
project_id = self._project_id_check(connection, project_id, project_name)
super().__init__(connection, subscription_id, project_id=project_id)
def _init_variables(self, project_id, **kwargs):
super()._init_variables(**kwargs)
self.subscription_id = kwargs.get('id')
self.editable = kwargs.get('editable')
self.allow_delivery_changes = kwargs.get('allow_delivery_changes')
self.allow_personalization_changes = kwargs.get('allow_personalization_changes')
self.allow_unsubscribe = kwargs.get('allow_unsubscribe')
self.date_created = time_helper.map_str_to_datetime("date_created",
kwargs.get("date_created"),
self._FROM_DICT_MAP)
self.date_modified = time_helper.map_str_to_datetime("date_modified",
kwargs.get("date_modified"),
self._FROM_DICT_MAP)
self.owner = User.from_dict(kwargs.get('owner'),
self.connection) if kwargs.get('owner') else None
self.schedules = [
Schedule.from_dict(schedule, self._connection) for schedule in kwargs.get('schedules')
] if kwargs.get('schedules') else None
self.contents = [
Content.from_dict(content, self._connection) for content in kwargs.get('contents')
] if kwargs.get('contents') else None
self.recipients = kwargs.get('recipients', None)
self.delivery = Delivery.from_dict(
kwargs.get('delivery')) if kwargs.get('delivery') else None
self.project_id = project_id
def alter(
self,
name: Optional[str] = None,
allow_delivery_changes: Optional[bool] = None,
allow_personalization_changes: Optional[bool] = None,
allow_unsubscribe: Optional[bool] = None,
send_now: bool = False,
owner_id: Optional[str] = None,
schedules: Union[str, List[str], Schedule, List[Schedule]] = None,
contents: Content = None,
recipients: Union[List[str], List[dict]] = None,
delivery: Union[Delivery, dict] = None,
delivery_mode: Optional[str] = None,
custom_msg=None,
delivery_expiration_date: Optional[str] = None,
contact_security: Optional[bool] = None,
filename: Optional[str] = None,
compress: Optional[bool] = None,
space_delimiter: Optional[str] = None,
email_subject: Optional[str] = None,
email_message: Optional[str] = None,
email_send_content_as: Optional[str] = None,
overwrite_older_version: Optional[bool] = None,
zip_filename: Optional[str] = None,
zip_password_protect: Optional[bool] = None,
zip_password: Optional[str] = None,
file_burst_sub_folder: Optional[str] = None,
printer_copies: Optional[int] = None,
printer_range_start: Optional[int] = None,
printer_range_end: Optional[int] = None,
printer_collated: Optional[bool] = None,
printer_orientation: Optional[str] = None,
printer_use_print_range: Optional[bool] = None,
cache_type: Optional[str] = None,
shortcut_cache_format: Optional[str] = None,
mobile_client_type: Optional[str] = None,
device_id: Optional[str] = None,
do_not_create_update_caches: Optional[bool] = None,
re_run_hl: Optional[bool] = None,
):
"""
Alter subscription.
Args:
connection(Connection): a MicroStrategy connection object
name(str): name of the subscription,
project_id(str): project ID,
allow_delivery_changes(bool): whether the recipients can change
the delivery of the subscription,
allow_personalization_changes(bool): whether the recipients can
personalize the subscription,
allow_unsubscribe(bool): whether the recipients can unsubscribe
from the subscription,
send_now(bool): indicates whether to execute the subscription
immediately,
owner_id(str): ID of the subscription owner, by default logged in
user ID,
schedules (Union[str, List[str], Schedule, List[Schedule]]):
Schedules IDs or Schedule objects,
contents (Content): The content of the subscription.
recipients (Union[List[str], List[dict]]): list of recipients IDs
or dicts,
delivery_mode(str, enum): the subscription delivery mode [EMAIL,
FILE, PRINTER, HISTORY_LIST, CACHE, MOBILE, FTP, SNAPSHOT,
PERSONAL_VIEW, SHARED_LINK, UNSUPPORTED],
delivery_expiration_date(str): expiration date of the subscription,
format should be yyyy-MM-dd,
contact_security(bool): whether to use contact security for each
contact group member,
filename(str): the filename that will be delivered when
the subscription is executed,
compress(bool): whether to compress the file
space_delimiter(str): space delimiter,
email_subject(str): email subject associated with the subscription,
email_message(str): email body of subscription,
email_send_content_as(str,enum): [data, data_and_history_list,
data_and_link_and_history_list, link_and_history_list],
overwrite_older_version(bool): whether the current subscription
will overwrite earlier versions of the same report or document
in the history list,
zip_filename(str): filename of the compressed content,
zip_password_protect(bool): whether to password protect zip file,
zip_password(str): optional password for the compressed file
file_burst_sub_folder(str): burst sub folder,
printer_copies(int): the number of copies that should be printed,
printer_range_start(int): the number indicating the first report
page that should be printed,
printer_range_end(int): the number indicating the last report
page that should be printed,
printer_collated(bool): whether the printing should be collated,
printer_orientation(str,enum): [ PORTRAIT, LANDSCAPE ]
printer_use_print_range(bool): whether print range should be used,
cache_type(str,enum): [RESERVED, SHORTCUT, BOOKMARK,
SHORTCUTWITHBOOKMARK]
shortcut_cache_format(str,enum): [RESERVED, JSON, BINARY, BOTH]
mobile_client_type(str,enum): [RESERVED, BLACKBERRY, PHONE, TABLET,
ANDROID]
device_id(str): the mobile target project,
do_not_create_update_caches(bool): whether the current subscription
will overwrite earlier versions of the same report or document
in the history list,
re_run_hl(bool): whether subscription will re-run against warehouse
"""
def is_changed(nested=None, **kwargs):
for key, value in kwargs.items():
if nested:
return value if value != nested and value is not None else nested
else:
current_val = self.__dict__.get(key)
# if not current_val: we need to get
return value if value != current_val and value is not None else current_val
# Schedules logic
schedules = self.__validate_schedules(schedules=schedules)
if not schedules:
schedules = [{'id': sch.id} for sch in self.schedules]
# Content logic
if contents:
contents = contents if isinstance(contents, list) else [contents]
content_type_msg = "Contents must be dictionaries or Content objects."
contents = [
content.to_dict(
camel_case=True) if isinstance(content, Content) else content if isinstance(
content, dict) else helper.exception_handler(content_type_msg, TypeError)
for content in contents
]
else:
contents = [cont.to_dict() for cont in self.contents]
# Delivery logic
if delivery:
temp_delivery = (Delivery.from_dict(delivery)
if isinstance(delivery, dict) else delivery)
else:
temp_delivery = self.__change_delivery_properties(
delivery_mode, delivery_expiration_date, contact_security, email_subject,
email_message, filename, compress, None, zip_filename, zip_password,
zip_password_protect, space_delimiter, email_send_content_as,
overwrite_older_version, file_burst_sub_folder, printer_copies,
printer_range_start, printer_range_end, printer_collated, printer_orientation,
printer_use_print_range, cache_type, shortcut_cache_format, mobile_client_type,
device_id, do_not_create_update_caches, re_run_hl)
delivery = temp_delivery.to_dict(camel_case=True)
# Recipients logic
recipients = is_changed(recipients=recipients)
recipients = Subscription._validate_recipients(self.connection, contents, recipients,
self.project_id, delivery['mode'])
body = {
"name": is_changed(name=name),
"allowDeliveryChanges": is_changed(allow_delivery_changes=allow_delivery_changes),
"allowPersonalizationChanges":
is_changed(allow_personalization_changes=allow_personalization_changes),
"allowUnsubscribe": is_changed(allow_unsubscribe=allow_unsubscribe),
"sendNow": send_now,
'owner': {
'id': is_changed(nested=self.owner.id, owner_id=owner_id)
},
"schedules": schedules,
"contents": contents,
"recipients": recipients,
"delivery": delivery,
}
body = helper.delete_none_values(body)
response = subscriptions.update_subscription(self.connection, self.id, self.project_id,
body)
if response.ok:
response = response.json()
response = helper.camel_to_snake(response)
self._set_object(**response)
if config.verbose:
print(custom_msg if custom_msg else "Updated subscription '{}' with ID: {}."
.format(self.name, self.id))
@staticmethod
def __validate_schedules(schedules: Union[str, List[str], Schedule, List[Schedule]] = None):
tmp_schedules = []
schedules = schedules if isinstance(schedules, list) else [schedules]
schedules = [s for s in schedules if s is not None]
for schedule in schedules:
if isinstance(schedule, Schedule):
| |
stEvt:instan\
ceID=\x22xmp.iid:f2\
b9677e-2393-754d\
-9ae5-5eb8693c7b\
40\x22 stEvt:when=\x22\
2020-05-02T17:59\
:57-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>zh\
\x97\x9b\x00\x00\x01KIDAT8\x11\xa5\xc1\xb1j\
\x14A\x00\x00\xd07\xbb\xb3\xf1b\x11\xd1\xa4\xd0\x13\x04\
\x0d\x9a6\x08\x1a\x90\x88)\xac4\xbd\x1fa/\xd8\x08\
\x16\xd66jg-X\xa4\x91\x14V\xfe\x80`@\xac\
\xb5\xda+\xac\x12\xe2\xdd\xe66\xb7\xa30\xc5\xf4\xe6\xbd\
\x90Rr\x16q2\x99\x5c\xc1\x18A\xb6@\x8f\x91b\
\xc0\x1c#E\x8f_\x11\xaf\xb1\x8b\x1f\x88\xb8\x84k8\
@\x8dS\x5c\xc0:\xbec\xc0\x02\x9bx\x1ae\xcf\xf0\
N\xf6\x08/q\x07\xe71\xc5=\xbc\xc5\x0ef\xe8\xf1\
\x11\xe7\x22*\xcc\x15'H\xb2\xa9\xacC\xc2\x14\xbdl\
@U!\xa1VD\x04Y#\x8b\x08h\x10d\x15R\
D\xc0L\xf1\x07\x0bY/;\xc6\x80\xa9b\x8e\x10\xd1\
\xe1\x05v\x11q\x19\x1b\xd8CD\x8f5\xac\xe3\x13\x16\
\x18\xb0\x8d/\x115\xbea\x1f\x1561\xc6>\x960\
\xc3\x06\xae\xe33z\x9c\xe2\x06\x9a\x88\x06{\xf8 \xdb\
\xc2]\xbcW\xdc\xc4C\xbcQ\xec`T!\xe1\xa2b\
\x15\x8dlY\xb6\x86\x06+hd\xcb\x18\xa2\xecHq\
\x88\xb9l&;D\x8f#E\xe7\x9f\x88\x06\xdb\xf8\x89\
\x0a\xf7q\x15\x0f0\xc21nc\x8c\xc78A\x8f[\
\xf8\x1a\xda\xb6}\x85'\xf8\x8d\x1a5\x1at\x08H\xa8\
\xb1\x84N6`\x05\xcfC\xdb\xb6\xce\x22\x22\xf8\x7f\xe9\
/\x95.[\x86[\xd33\xc7\x00\x00\x00\x00IEN\
D\xaeB`\x82\
\x00\x00\x07\x96\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
42-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:59:54-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:59:54-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:56b09c1a-aaf6\
-114c-988f-0a3ca\
10c9f46\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:0b17749c-fecf-5\
24a-b107-45998b1\
0598f\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:6f357b6\
2-9eb4-1646-8b64\
-01badbead68f\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:6f357b62-9eb4\
-1646-8b64-01bad\
bead68f\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:42-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:56\
b09c1a-aaf6-114c\
-988f-0a3ca10c9f\
46\x22 stEvt:when=\x22\
2020-05-02T17:59\
:54-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\xabg\
\x10\xf7\x00\x00\x01KIDAT8\x8d\x85\xd3=/\
DA\x14\xc6\xf1\xb5\xbbHP(\x10Q\x89\xc2Rx\
-$HhDT>\x80NB\xa2R 6\xa1\xa6\
V\xa8\x14j/[\xd2\xe8\xc5[\x10\x1a\x11\x89Jn\
\xa7\xf0\x09\xae\xffI\x9e\xd9\x1c\xd7,\xc5/sgr\
\xee\x993g\xee\xcd\xa5i\x9a3I\x92\xd4i\xec\xc4\
\x0e\x8e\xb0\x87>\xad\xe7C\xac\x97}\xb9\x1b/8\xc3\
2\x0e\xf1\x81I\x1f\x17K\x90\xd7X\xc1\x81\x0f`\xbe\
\x81\xdb\xd8\xee\xd5\x04\x0a,\xe0\x01c\x9a7hl\xc5\
=J\xb1*|\x82f\xdb\x09\xa3\x99\xaa\xa2\xeb\xb1\x04\
-\xb8\xc1H&A\x93\xd6\xffM`Gxr\x0d+\
hl\xc7#\xfak\x1e\xc1\xedvlW\xa7\xe7F\x8d\
+\xb8\xfe\xb3\x89\x04\x145v\xe1\x1d\xc3\x9a\xb7\xe1\x15\
3\x9a\xd7\x87*\xc2\xa6?J\xe2y\x0b_\x98\xd7|\
\x10\x9fXu1\xbf{\xc0b\x0f.\xadT\xb7{\xe8\
A/\xe60\x8eM\xadM`=T`\xcd\xbb\xc3~\
v\x97Lu%<\xeb\xf3\xb6k-\x87\x04S\xd6}\
\x17X\xf4/[2\xd7\xd0Y\xa4\xb8\xa8\x1eA\x8bW\
\xb5\xba\xec\x12[?\xde\xb0m\xf1\xd8\x0d\x09\xa6\xd5\xa8\
S\xa98'8\xc7\x02\x86\xb0\xa4d\x03\xf6\xb3\x85\x04\
\x1dX\xd4O\xb3f\xcdql^\x0e\xd7\xe8\x9b\x1b|\
\x03[\xc6\x07X\xf4\x8a\xfc8\x00\x00\x00\x00IEN\
D\xaeB`\x82\
\x00\x00\x07W\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
37-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:57:59-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:57:59-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:2d1bb855-51a4\
-9849-87b4-5fe50\
795959f\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:9ad96167-5b0d-4\
e4b-8b29-11c6ab0\
bd5b2\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:61cfdd3\
8-4097-b140-adcf\
-5ede4e586cbb\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:61cfdd38-4097\
-b140-adcf-5ede4\
e586cbb\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:37-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:2d\
1bb855-51a4-9849\
-87b4-5fe5079595\
9f\x22 stEvt:when=\x22\
2020-05-02T17:57\
:59-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\x02\x1f\
\x17\xe9\x00\x00\x01\x0cIDAT8\xcb\xad\xd3\xbf+\
\x85Q\x1c\xc7\xf1\xc3\x1d\x0cJ\x18\x0c\x0aY\x08\xabb\
\x22,\xfe\x04\xc9\xbdHa\x12\x7f\x81\xd5&\x8bA~\
]\x83B\x0cf\xa5nnve2{\x91\xbf\xc2r\
\x9e\xdb\xe3\xe9>7W\x86O\xef\xf3\xf3\xd3\xe7\x9c\xef\
9\x01\x01\xadM*$\x0a\xe9\xce_\x14\xd0\x8d)\xcc\
b\xa6\x01\x93\xf68\x0ai\x83}|\xe0\x01UT\xf0\
\x94a2^\xc1\x17\xe6\x93\xa3\x07\xdca\x05]\xe8D\
\x1f:\xea\xb0\x1fm\xb8\xc2j:\xc15J8B1\
r)\xc3b\x86o\xb8\xc4t\xc0M\x9c(\xc7$y\
<\xc7:\xc6\xb0\x86Wl%\x06%\x9cb9\x87%\
\x9c`3U\x81\x0bl\xa7\x0d\xce\xe2\x86z\xcc3\xf8\
\x91\xa0\x1c/\xa7\x1e\x93#l`\x12\xbbxO\x12\x5c\
c\x11\x87X\xf8\x05\x8f\xf1\x82\x03L\x04\xdcb\x07\xbd\
\x18\xc0P,Y\x96\xc3\xe8\xc1}\xacN\xad\x8c{\xf8\
\xc4#\x9e\xe3\xa3\xc9c5\xae\x9d\x8b\x06\x85\x80\xf6\xe8\
>\x8a\x91\xc8F\x1aD\xcb\xbf~\xa6\x10\x1d\x9bQ\xcd\
\xe0\x1b\xe2\xf5\x00\x81\xa3\x11o\x5c\x00\x00\x00\x00IE\
ND\xaeB`\x82\
\x00\x00\x07E\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
39-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:58:19-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:58:19-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:1919319f-9ab7\
-7248-b2fe-365e8\
65f46fa\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:e84372f6-a9ba-3\
b4e-95f2-20b054e\
a4986\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:51c8694\
1-a17b-d142-996b\
-f26f334c75e1\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:51c86941-a17b\
-d142-996b-f26f3\
34c75e1\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:39-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:19\
19319f-9ab7-7248\
-b2fe-365e865f46\
fa\x22 stEvt:when=\x22\
2020-05-02T17:58\
:19-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>Q\x81\
\xcb\xa1\x00\x00\x00\xfaIDAT8\x8d\x9d\xd1O+\
EQ\x14\x05\xf0\xeb\xc9\xa3\x0c$Q\xf4\x90\x91\x81/\
d\xeaK\x18\x98\xc9\x9f\x0c=\xc5G\x90R\xccM\x15\
J\x22%\x13D\xea72\xf1\x0509W\xbb\xe3\xdc\
W\xde\xe0\xb6\xefZ\xed\xb5\xd6\xdegW\xa8\xfa\xf8\x06\
\xea\xff~\xc4c\xa9\xb6\xfec\xd0Ju\x1d\x1fX\xaa\
\xf9\xa6\xc68j\xcdm\xe3.\xd5\xb7\xda\xe4\xcfN\x19\
\xae\xc5\x1bx\xc6B\xc2\xab\xf8\xc4Lljc%\x98\
\x0c\xa6\xba\x85\x17\xcc&<\x823\x1c\xa2\x1dSO\xf0\
\x85\x83\xc0m\xe2\x09\xf3\x09\x0f\xe3\x02\xc7\xf1\x0a\xa3\x89\
8M\x8d\xd7\xd8\xc7\x1a^\x0b\xe2\xa3\xb8j\x85e|\
c1\x91\xe3\xb8\xc1;:A|\x99\x8b\xe3#\xee\xe0\
\x1e\xd3\x09Ob.\xbcM.\xfe\xbdV|\x83]<\
b*pC\xa5\xb1\xe3\xc5r\xa2\x8b\x07L$|\xd5\
\x94\x9cO\x90\x9b\xdc\xe2\xbcWri\x85\xd8\xb0\x17O\
UJ.\x194\xa5\x14\x93\x9b\x0crQO1\xaa\x1f\
'\x04\xda\x03ts\xb6z\x00\x00\x00\x00IEND\
\xaeB`\x82\
\x00\x00\x07S\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
40-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:59:08-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:59:08-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:ff789fa1-c77b\
-ef4e-822b-8100c\
a564040\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:deed16e6-4c25-d\
54a-8c1b-b191bc9\
469a2\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:108884d\
0-c3f6-2d46-ad80\
-95cb9fd45b9e\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:108884d0-c3f6\
-2d46-ad80-95cb9\
fd45b9e\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:40-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:ff\
789fa1-c77b-ef4e\
-822b-8100ca5640\
40\x22 stEvt:when=\x22\
2020-05-02T17:59\
:08-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\xca\xa8\
\x1f\xde\x00\x00\x01\x08IDAT8\x11\x9d\xc11J\
\x1ca\x18\x00\xd0\xf7\xcd\xfe\xc2Z\xe8\x95,\xc2B\x04\
\xc13\xe8\x01\xc4\xca\xc2J\x8cx\x13\x9b\x94\x01-\xbc\
\x81Z\xeb\x11\x16\xecDYq]w>\x85_\x18&\
#\x01\xf3^L\xa7S\x9f\x1a\xb4\xd8\xc6\x0e^\x11\x08\
}\x89\xc0\x0c\xc7E\xa7U\xed\x22\xf0\x07kH}\x81\
9\x0e\xb1Y\x10H\xec\xe3\x0a\x0f\xb8\xc4\x99\x7f\xdb@\
)H\xd5&\x9e\xf0\x86uUA\xab/\xb0\xc4*\xb2\
`\x0f7\xb8\xc7\x1c\x81V\xd5\xa2\xd5\x17:Q\xf0\x13\
\xcfx\xc3\xc875x\xc4\x0b\xc2\x7fh0B\xa3/\
|-\xfc\xa5\xe8K\xa4N \xf4\x05R\x95E_\x83\
\x06\xaf\xaa\xa5\xa1T-0*\xaaD`\x89g\x1ca\
\x82U\xa4N\xa8\x16\x98\xe0\xa2\xa8B\xe7\x00\xd7h\x90\
\x86\x12c\xfc\xc6yQ-1\xc3)&\xaa4\xd4b\
\x8c\x13\xdc\xfaPt\x0ep\x8d\x91\xa1@\xa2`\x8c\xb9\
*\x0af8\xc6\x0fU\x1aJ\x8cq\x8a;\x9d,\xf8\
\x85-\xac\xa8\xc2Pb\x05\xad*\x90>\xbc\x03\xbd.\
H\xf6LWD\x9f\x00\x00\x00\x00IEND\xaeB\
`\x82\
\x00\x00\x07\x08\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
41-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:59:17-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:59:17-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:2aa5434a-8082\
-294f-ab68-decda\
f66531b\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:70cb38c3-655a-8\
745-a62f-41fc7df\
d7c15\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:6952f7f\
1-e2b8-d143-8936\
-52153a744db5\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:6952f7f1-e2b8\
-d143-8936-52153\
a744db5\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:41-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:2a\
a5434a-8082-294f\
-ab68-decdaf6653\
1b\x22 stEvt:when=\x22\
2020-05-02T17:59\
:17-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>X\x82\
2o\x00\x00\x00\xbdIDAT8\xcb\xd5\xd3A\x0a\
\xc20\x10\x05\xd04u\xe3-<RA\xdc\x88 \x8a\
\xb5z.ED\xad\xa2\xde\xa7\xeer\x00O\xe0\x1f\x98\
\x94OH5\x88\x1b\x17\x0f\x06\xd2\xfeN\xa7S\xe3\x9c\
3*S\x5c\x87\xe4\xccRm\x0c\x05|\xc5PZ\x01\
w\xa8\xe1\xdc\xe1\x02[\x18\xf8N9\xe0\x0a7\x98\xc0\
\x1a*\xb2R3h`\xee_\x87\x03\x8e0Nh[\
:(c\x01'Xj\xdd\xd3a\xb1\x5c\xcf\xeaw\x01\
%M:|r\xf4\xba\xff\x09\xc8R\x03\xfc\x10\xf3\xc8\
\x16\xdaOC\x94\x83i\xc2g\xdc\xebn\xb4\x01>y\
\x03\x0f\xd8\xe9\xc6\x85\x1b(\x0e\xf0\x84a\xac\x03Y\xcf\
\x85n\x5c\xd5A6t\x04\xfd\x9f\xfeL<i\x9b\xa8\
\xbd\xef\x05U\x86\xdc\xcfyk\x90\x02\x00\x00\x00\x00I\
END\xaeB`\x82\
\x00\x00\x07\xa4\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
39-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:58:21-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:58:21-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:7bae3a42-9ea6\
-0d42-b89a-7c6a0\
e5369d8\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:181a5300-5a74-d\
842-8880-8152f42\
2d9c2\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:26d15ab\
6-f5a8-8c46-a5e6\
-13817d38179b\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:26d15ab6-f5a8\
-8c46-a5e6-13817\
d38179b\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:39-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:7b\
ae3a42-9ea6-0d42\
-b89a-7c6a0e5369\
d8\x22 stEvt:when=\x22\
2020-05-02T17:58\
:21-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>?\x03\
\xb8\x05\x00\x00\x01YIDAT8\xcbu\xd3\xcd+\
\x05Q\x18\xc7\xf1qq\xddr7(\x97\x15\x16\x94X\
\xb0Q\xb2\xf1\x0f\xdcR\x92R6JX\xc8\xc2\xca\x92\
lH)YP\xec\xb8\x0b\x7f\x85H)o\xc5F\x22\
n4I(e\xe9\xed{\xf4\x9bz\x8c3\x8bOs\
^\xe6<\xe79\xe7\x99\x09\xc20\x0c\x90\xd2\xb3\x01\x93\
j;%(S\xbb\x1b\xc3f\xfc\xf7\x1d\xdb\xa9\xc1\x01\
\xbe\xb0\x10\x0b\xdc\x89\x22\xde1b\x83D;\xa5q\x8e\
\x0ddq\x89U\xcdu\xe0Y\xbb\xb7\xe1\x03}\xd1\x06\
\x81Iw\x16\x17\xa8V\x10\x17p\x07\xb7\x18\xd7;\x8b\
8Fc\x94\x85\x0d\xe0\xcc\xe1AA*\x15dBs\
+\xca\xac\xce\x1e\xcf.N\x99L\xeeQo\xe6\x96q\
\x85\x9c\xfa\xa5\xf6\x12\x83\xf8\xa02)*\xc8\x92\x16g\
5Wn\xb3\x8e\xef\xee.l^\xed\x19U\xe4L\xa5\
\xec\xc5t\xec\xc8\x7f\xca\xd8\x8cC\xbc\xa9\x1anl\x14\
U\xe8RF\xaf\x98\xf2\x95\xb1\x02\xd7XS\xff\x14\xdb\
j\xf7(h\x1eM\xf8\xc6\x80\xaf\x8c\xaeTwhQ\
\xdfe\xb3\x8b\x1b\x0cjl\x13{\xfa\xe8\xbcetA\
^\xd0\xaa\xfe>\x86\xd4.\xe0\x04\x19\xdf\x11\xecE\xba\
\xba?\xa1\xdd\xccm\xe1H_lb\x19m\x901<\
\xea\x1fX\xd7\xceiO\xb9\xff\x05\xb0A\xdcO\xf3\xa9\
3g|\x8b\x93\x02\xd8\xd2\xf6\xa36i\xb1\xf3\x03\xf4\
q\xef\x08h\x03a\xd9\x00\x00\x00\x00IEND\xae\
B`\x82\
\x00\x00\x07\x22\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
40-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:59:07-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:59:07-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:66724da0-8283\
-d94f-95d9-ffe4e\
91214a0\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:a2649990-acdc-b\
944-bbba-4a4088d\
ce427\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:d8e3b2a\
d-b21e-5a48-bbd5\
-6177e54d8dae\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:d8e3b2ad-b21e\
-5a48-bbd5-6177e\
54d8dae\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:40-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:66\
724da0-8283-d94f\
-95d9-ffe4e91214\
a0\x22 stEvt:when=\x22\
2020-05-02T17:59\
:07-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\xba\x82\
)\x15\x00\x00\x00\xd7IDAT8\x8d\xb5\xd3?\x0a\
\xc20\x1c\x86\xe1\xb6\x82\xe2\xbfEt\xd1Ep\x10u\
\x10D\xf0\x00\x1e\xc2\x1b\x09n\x8e\x0e\x8e.\xaez\x01\
A\x5c\x14T\xd0Yq\xe95\xea[\xf8\x15B\x9a\xd4\
,\x0e\x0f-i\xf2\xf1%\xa1^\x14E\x9e.\x0cC\
_y\x9f\xa1\x22\xef\x81>\xf7\xd7\xe2\x05\x22\xec\x95\xb1\
\xc0\x1a\xa0-^\xe2\x8d\x07\x9e\xd8\x99\xe6\x19\x1b`\x85\
\x0b\xba8b\x80-\x0e\xa8\x1a\x1b$\xa9<[\xd8\xa0\
\x81\x824\xe8\xc8\xb75&\xeaVR\x0d\xb46q\xc8\
\x15}\xdb\x1c\xdb\xc2\xa4MM\x02z\xce\xb7\xf0\xef\x00\
\xdf\x1a \xa7\x1f\x88\x9c\x8c\xd5qs\x0a\xb04\xc9\xe3\
\xec\xda\xa0\x1d_\x11F\x18c\x88)^\x99\x01\xca~\
\xe7\xf8\xe0.w\x9f8\xa1\xe9\xd2 \xae[BQS\
6-t:\x03\x17\xa6\xff %+\xe0\x0b:A\xfb\
\xba\xc0\x864\x8a\x00\x00\x00\x00IEND\xaeB`\
\x82\
\x00\x00\x07\x92\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
42-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T18:00:05-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T18:00:05-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:8b039da5-c65d\
-914e-b6d8-04de8\
6ce1411\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:57919ae5-b96a-c\
748-bbdb-bde6f4d\
d7730\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:060f576\
c-1a5c-304a-8f62\
-a956c6b0f45d\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:060f576c-1a5c\
-304a-8f62-a956c\
6b0f45d\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:42-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:8b\
039da5-c65d-914e\
-b6d8-04de86ce14\
11\x22 stEvt:when=\x22\
2020-05-02T18:00\
:05-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>=z\
=\xe4\x00\x00\x01GIDAT8\xcb\x8d\xd2\xbdJ\
\x03A\x14\x86\xe1\xc9\xc6\x1f,l\x8c\x8d.\x22(\xde\
A\xac\xf4\x06RX\xc5\xc2B\x10\x7f\x10\x09b!\x08\
\x82`'\x04A\xb0\xb3\x10\xb5\x10-E\x14\xb4\xd0\xde\
BP\x90h\xa3!\x85\xb0\x8d\x17\xa1\xef\xc87pX\
7\x89\xc5\x132;;\xdf\x9e\x993.I\x12g\x8c\
\xa3\x8aS\x1cc\x1d\x05\xcdE\xa9w\x7f\xd9\xc1.\x1a\
8\xc2\x1a6q\x83\x0f\x94\x9a\x85\xd8\xc5\xaf\x883\xbe\
R\xc6\x17&\xb3BB\xd9\x0d\xb3\xb8\x0by\xe9\xd4\xb3\
\x0a\x1e\x9am\xa1\xaa\xfd\xba\x16zPC1]\x85\xff\
9\xc32\x06q\xa1}_\xe3\x0a\xf7\xd8S5\x97\x98\
\xcd\x0a8\xc1\x06\xba1\x8f%,b\x01+\x98\xd2\x82\
;\x9d\xc7\x9f\x00\xdf\xaa\xdb6[\xf0\xd5\xd51\xa6q\
\xce\x06\x14\xd4\xaai3\xd9\xa1\xb2C\x80\xbf\x17\xe7f\
\x1c\xa5\xdbXR\xab*:\xb0\xf0<\xd6\xe2o\xbc\xe1\
\x00\x03\x9a\xcb\x87\x80\x906\xa1V\xd5\xcc\x01\xd6\x15\xe0\
\xef\xc8*\x0e\xd5\xf2\x91P\x89\xcb\xb8\x1cE\x9dv\xd9\
\xecy_A\xfe\xff6>1\x9a\xbe\xcaQ\x8bC\xec\
\xc7\xbb\xee\x8c\x1fo\xe1\x05}Y/GF\xce\x04\xc7\
*\x7fG\xe3'\xcc\xb96\xeds\xf6\xc00\x84g}\
\xfd\x11\xc3\xff\x0d\xb0[\xec\xc5\x8c\xc2\xdc\x0f\xd0\xf9\xe9\
\xac$\xe6}@\x00\x00\x00\x00IEND\xaeB`\
\x82\
\x00\x00\x07R\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
39-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:58:23-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:58:23-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:223a4064-00d8\
-6847-85b3-88431\
7836c0b\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:cac9afaf-9ae9-3\
541-b1d3-2a33bea\
0c498\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:c21aeab\
e-a43d-7849-9cf1\
-5ff5f84fe130\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:c21aeabe-a43d\
-7849-9cf1-5ff5f\
84fe130\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:39-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:22\
3a4064-00d8-6847\
-85b3-884317836c\
0b\x22 stEvt:when=\x22\
2020-05-02T17:58\
:23-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\xd5\x99\
\xdf{\x00\x00\x01\x07IDAT8\x8d\x8d\xd3\xbd.\
DA\x18\x06\xe0\xdd\xb5T*\x09Q(D\x14j\xa2\
A\xe7\x02\x84Z\x22\x11\x8bJ+\x91\xa8\xdd\xc76\x1a\
\x5c\x81\xc2O\xad\xa2\xa0\x10\x09\xf2\xf8i$\xae@3\
#\xe3\xcb\x9ec\x8b/s2\xef\xcf\xbcygN\x03\
\x8d\x9ai\xa1Y\xc7\xf9O\xdc\xeb\xbb/\x83,\x98\xc1\
H\x9dI/\xf1@Zg\xf1\x85s\x0c\x05\xac\xd2 \
\x13\xe6\xf1\x86]tq]\x98\xb4\xaa\x0c\xa2x\xbb\xc0\
\xcep\x89\xc1\x98$\x13\xdai\x9d\xc3G\x10\xe79I\
&\x7f\x92\xc4\x93\xdf\xd1)D{X\x0eI.J\x93\
F\x10\xef\x14\xe4\xfdT\xe2#\x16\x8b\xfd\xd3\xd4I;\
'X\xc0+6\x0a\xd2\x01^0\x81u|&^\xc6\
\xbb\xb8\xc2p\x03+\xf8\xc6R\x02\x0f\xf1\x8c\xc9B\xb0\
\x99\x12\xe6$Gx\xc2X&\xac\xa6\x14\xc7\xb8\xc7T\
Qn\xee\xa8\x83\x87\xd4\xc3\x1dFc\x89k\xb8\xc5t\
\xb8\x99\x92\xb3\x85\x1b\x8c\xc7\x12\x9b\x81X\xf9\xf6\x0b\xec\
\xf7\x1a\xa3I\xed\xdf\x179\x95@\xbf\xf3\x0343\xda\
\x9b\xa6\x8b\xcc\xcd\x00\x00\x00\x00IEND\xaeB`\
\x82\
\x00\x00\x07\x16\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
40-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:59:05-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:59:05-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:f7d46de4-a69a\
-e747-94fd-9bd72\
388c73e\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:9ffa29c6-2977-1\
549-8946-360a9dd\
59f94\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:a82b017\
a-e5d7-3542-b8dd\
-f8e059b7d108\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:a82b017a-e5d7\
-3542-b8dd-f8e05\
9b7d108\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:40-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:f7\
d46de4-a69a-e747\
-94fd-9bd72388c7\
3e\x22 stEvt:when=\x22\
2020-05-02T17:59\
:05-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\xdd\x5c\
~\xb7\x00\x00\x00\xcbIDAT8\x8d\xad\xd21\x0b\
\x81A\x1c\xc7\xf13X)Vd\xb0\x90\x8d\x88\xb0\xd8\
\xa4\xbc\x06J\xb1\x190\x18d\x90W\x82\xc5$&\x93\
U(!\x83Qz\xde\xc6\xe3{\xf5\x1f\x84G\x8f<\
\xc3\xe7\xee\xea\xee~\xfd\xff\xd7)\xd34\xd5?\x94a\
\x18a\xa4\x91@\xf2\x07)\xf8u\xc0\x107\x1cp\xb6\
\xe9\x88+*:\xe0/z(\xa1\x8f6\xba6u\xd0\
C\x5c\x0740\xc7\x18S\x9b&\x98\xa1\xe0H\x0b-\
\xac\xa5\x8a\xa5M\x0b\xacP\xd4\x01y4QE\xed\x07\
uD\x1cia\x84;N\xb8|q\x963o\xff\xc0\
\x87\x10\x02\x16\x822G\x10CT\xe8\xb5W\xc9F\x01\
\x19d-\xe4\xe0\xb1ja \xe5l\xb0\x7f\xb1\xc3V\
J.\xcb%7\x5cB9\xf2\x88\xea)\xf1\x9b\x8f\x01\
\x0f\xf1\xd1\x00S\xd5\x16\x7f\xc0\x00\x00\x00\x00IEN\
D\xaeB`\x82\
\x00\x00\x07=\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
38-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:58:14-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:58:14-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:11dd1d5b-b5d7\
-e143-b26f-19b76\
c6a2ef5\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:7883eca1-c148-2\
746-a9b0-df7332f\
2f7d0\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:dfad02d\
1-a46f-6c46-a1da\
-31e87cfbec42\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:dfad02d1-a46f\
-6c46-a1da-31e87\
cfbec42\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:38-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:11\
dd1d5b-b5d7-e143\
-b26f-19b76c6a2e\
f5\x22 stEvt:when=\x22\
2020-05-02T17:58\
:14-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>[\xf2\
\x00\xeb\x00\x00\x00\xf2IDAT8\x11c\xf8\xff\xff\
?\x03>\xfc\xec\xd93\x16(-\x0f\xc4\x8b\x80\xd8\x16\
\xcag\x02\xd1\xc4j6\x00\xe2\xeb@\xfc\x1f\x88S\x91\
\xe5\xf0if\x86\xd2\xce@\xfc\x0e\x88\xe3\x80x1\x10\
g\x22\xcbc\xd3\xc8\x08\xc2Pv$\x10\xbf\x01\xe2\x10\
(\x7f=\x92\x01\x98.\x80i\x84\xb2K\x81\xf8%\x10\
[!\x89\xe16\x00Ms/\x10\xdf\x05bu(\x9f\
\x15J\xaf\xc3\xeb\x02\xa8\xc4r \xbe\x00\xc4\xc20\x85\
H^ZG\xc8\x0b\xfb\x80\xf8\x08\x96X \xda\x80:\
\xbe\x01\xc4\xa6h1\xc1H\x8a\x17R\x81\xf8-\x10\
\x07\xc2\x0c\x81%\x1ab\x5c\x00\xb3\xd1\xfb\x19\x04\xa4\xa3\
\x89\x13\xe5\x02\x98\xbf\x8d\x81\xf8\x09\x10\xd7\x22\xc9m \
h\x00\x9a!r@|\x0b\x88g@\xf9\xab\x818\x8b\
\xd4\xa4\xcc\x07\xc4\xe7\x80x>\x10oF\xf2\x16\x0b1\
\x99\x89\x19)-l\x80f\xa6\x04\xa2\x0d@\xce\xb6P\
6(C\xa9!G-\x00\xe3S\x02\xf0f\xa7E,\
\x00\x00\x00\x00IEND\xaeB`\x82\
\x00\x00\x07S\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
41-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:59:27-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:59:27-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:b788edbf-e465\
-954b-9a61-3b3a6\
3c0b17c\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:a08c0a17-f23c-1\
c40-a0bf-91622c0\
d6f60\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:e34016c\
6-78a9-ac42-ad55\
-386a18c8ed58\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:e34016c6-78a9\
-ac42-ad55-386a1\
8c8ed58\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:41-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:b7\
88edbf-e465-954b\
-9a61-3b3a63c0b1\
7c\x22 stEvt:when=\x22\
2020-05-02T17:59\
:27-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>o\xc9\
\x9c\xc4\x00\x00\x01\x08IDAT8\xcb\x95\xd2\xbd.\
\x84A\x14\xc6\xf1Y\xbb\xcbV\xb8\x04\xb7\xe0k\xd5\xae\
@B\xaf\x10+\x11\x17\xa0\xd2\xe8]\x83\x8a\x82\x10\x1f\
\xd5\x12\xef6:\x0a\x09\x09\x12\xa5\xfc\xc2%\x88F3\
o2\x99\xec\xbe\xbb\x8a\x93I\xces\xfe\xcf\x99sf\
\x02\xc2\x90\x18\x8bg\x13\xbb8\xc5>\xa6\x10\xfe\x03w\
\xf1\x88m\xf4p\x83\xc6(p\x1d\x05N2\xfd\x09+\
\xa3t\xbe\xc2/\x8e\xb2\x9a\x1e\xd6\xab\xe0F,:\x8f\
\xb9.n\xa3\xb6\x83\x0fLV\xc1\x05.\x12\xad\x8ec\
\xbc\xe2\x0d\xed|\x89\xf9\xcc\x97\x89\xd6\x8ag\x07?\x98\
-\x99~\xf0]\x9c\xbb\xd4\xc6\x13\xf8\x0b\xf3Im\x08\
\xa8%\x0b\x1b\x04oFx)\x85\xf3\x11\xae\xb3k\x97\
\xf0V\x84\x17r85X\xc6{\x5c^\xc0D\xd2\xf9\
{\x10\x9c\x1a\x1c\xe2 \xdbG'\xc2\xedApi\xd0\
\xc4\x03\x16cr\x1a{\xf8\x1c\x06\x97\x06kx\xc1*\
\xce\xf0\x8c\xfb\xaak\xe7\x06\x1b\xf1\xab\x16\xb1\xf3\x5c\x9f\
\xbfQi\xd0\xc2LV\x5cK\x9e\xb72\xfe\x00O\xa3\
\xe0\x80\xf6\xc9\x1dU\x00\x00\x00\x00IEND\xaeB\
`\x82\
\x00\x00\x07\x1d\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
39-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-07T19:00:59-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
7T19:00:59-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:629c6f97-efef\
-0743-bf78-faf00\
1161913\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:333c7819-ad90-9\
846-91de-cab6abf\
2ebab\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:a388297\
3-1e1d-a045-88c7\
-5e755708fd64\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:a3882973-1e1d\
-a045-88c7-5e755\
708fd64\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:39-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:62\
9c6f97-efef-0743\
-bf78-faf0011619\
13\x22 stEvt:when=\x22\
2020-05-07T19:00\
:59-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\xacr\
\xe7\xb6\x00\x00\x00\xd2IDAT8\x8d\xc5\xd31J\
\x041\x14\x87\xf1_f#.\x82\xde\xc4R\x0f\xb0\xae\
'\xf0\x06\xb66\xdeD\xef`c\xad\x9d`+X\x8f\
\xe0^\xc0\xca\xc6~\xdd\x89\x85\x09\x86a\x06e\xa70\
M\xf8\x93|_\xde{\x90\x90R2e5\x93h\xc4\
\xb6m\x1btS*\xe80\xdb\x82\x0dE\x00\x9b-\x04\
\xa9\x16\xec\xe3\xf8\x8f\xe0A}\xb7\x08\xe6\xb8\xc1\xe5/\
p\xc0-.J\x8eY\xf2\x8e3<\xe4\x83\xeb\x0a\xda\
\xc1:\xc3\xf79\x9f\x976\xa2\x9f!\xbe\xe0\x04\x8f=\
\xc9:\xefw\xd8\xc5iUM\x8a9l\x10\xf1ZI\
\x02\xae\xaa\x97\xe7XV\xadw2T\xd6\xe7\x80\xa4\xc1\
\x11\xf6\xb0\xe8\xc3}A_\xb2\xc0\x13\x9e\xc7\xe0!A\
\x91\xcc\xb0\xc2!>\xc6\xe01\x01\xdf3i\xf0\x96s\
\x18\x82!\xfc\xfbo\xfc\x02d\x060\xdc\x02*\xb0\x83\
\x00\x00\x00\x00IEND\xaeB`\x82\
\x00\x00\x07p\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
39-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:58:42-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:58:42-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:23a0a7c9-fa1c\
-2c42-96a2-79b36\
294488e\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:af68db94-569c-9\
643-bbe9-7588463\
856c5\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:080cea2\
c-631a-9144-8573\
-4aed99ee2fdb\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:080cea2c-631a\
-9144-8573-4aed9\
9ee2fdb\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:39-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:23\
a0a7c9-fa1c-2c42\
-96a2-79b3629448\
8e\x22 stEvt:when=\x22\
2020-05-02T17:58\
:42-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\xff\x9f\
\xc9Y\x00\x00\x01%IDAT8\x8d\x95\xd3M+\
\xc5A\x14\xc7\xf1\xbf\xc7{\xc5B\x1e\xca\xc3F\xa2n\
,\x88\xf22,,\xece\xa5(\x0b\xa1\xa4l-<\
d\xa3D\xb1\xb2\xf0>\x90\x88R\xba\x22\x16\xeac\xe1\
\x05\xd8aan\x8d\x89\xdc;u\x9a~s\xe6\xf7m\
\xe6\xcc\x99\x0c\xf5\x98G72T\x87\xf9\xb7h\xc0\x22\
\xf6\xb0\x8b\xcd\x0cMx\xc0X\xb4\xb1*1\x96t+\
^\xb0\x8fU|fh\xc4\x19\x86\x90C\xcf/\xc6\xd2\
\xdc\x82Kt\x05],\x01\xaeP\xc0$>\xb0\x9c@\
b\xc0\x0d\x06\x82~\x8e\x01}\xa1\x16\xa78\xc7F\x04\
\xa9I\x00\x85\xa0\x9fb@/\x96\xb0\x85<\x1e\xb1\x93\
\xd4\xe2_\xc0\x02\x8eB\xb2\x1d\xb78\x88\x00\xad\xe5\x00\
bC.\x14\xed8\xe8\xb6J\x01=\xb8\xc3z%'\
8\x0c\xc9\x11\xdfc\xa5\x92+,b\x13\x83x\xc3\x5c\
\xd8X[N\x11\xfb1\x8bg\x141\x15=au9\
\x80\x02&\xf0\x8aq?\xdf?m\xa4AI#]`\
8,6'\xa6\x14p\x8d\xce\xa0\x1fJ\x80\x1b\x8cF\
\x86\xf4G\xc6\x80{\x9c\xf8n\xb2\xf7\x0cu\x98F\xc7\
\x1f\xe68\xf2\x98\x09\xe6m\xac}\x01\xfd\xfd\xfa\x8c\x1d\
\xd0N\xa2\x00\x00\x00\x00IEND\xaeB`\x82\
\x00\x00\x07\xa4\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
41-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:59:33-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:59:33-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:4d4993ce-3759\
-7d47-941a-783e8\
51cb32c\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:bdc57426-9d1a-a\
d46-8439-7b0f160\
e4dd6\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:ae71151\
1-69d7-fb4b-b1ef\
-853f969075db\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:ae711511-69d7\
-fb4b-b1ef-853f9\
69075db\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:41-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:4d\
4993ce-3759-7d47\
-941a-783e851cb3\
2c\x22 stEvt:when=\x22\
2020-05-02T17:59\
:33-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?> v\
\xbc\xb5\x00\x00\x01YIDAT8\xcb\x85\xd3\xcb+\
GA\x14\xc0\xf1\xf9\xfd\x94W\xbfH\x9e\x0b\x14\x11I\
d#\x8a<\x96\xecX(K?YK!\x0bJJ\
Y))\x91\x0d\x85\x08e!Q\x16V\x8a\xf2\x08y\
DI\xba\x91\x7f\xc2w\xea\x8c\x8e\xc9\xfdY|\xba\xe7\
\xcc\x9d{\xee\xb9sgL\x10\x04\x06Y\x18B\x1cm\
H\x97q+\x82\xa8\xca\x7fqA>\xd6q\x803\x5c\
I\xde\xa8&'%*\xa0%\xa3\x093x\xc1\x0e\x0a\
\xc3\x8a\x98\xb0\xd6D6\x16\xf0\x8e\x8e\xbf\x8a\xb8\xc0\xbe\
\xe1\x10k\x18E\xb5W\xa8\x1b\x1f\xe8\xf5\x8b\xb8\x09i\
\xe8\xc20\xf6\xf0\x88U\xe4\xa9\x22\xad\xf8B\x8b.\x12\
\xd6z\x09V\xf0\x8az5\xde\x87g\xc4\xdc\x1fr7\
RP\x8b\x1c\xaf\xd0\x00>Q\xa7\xc6v1'q\xd4\
\x0d\x16\xe1\x02w\x98G\xaez`\x10\xb7\xea\xadv}\
\x9eP\xac?\xc1~O\x06j\xb0\x85{\x94\xab\x22G\
\x98T\xf9>\xc6\x12\xad\xc1\x94l\xa6T\xc9\x9b\xa5\xbb\
L\xc9\xe3\xb2\xe9~\x0a\x8c\xcb\xe2l\xabu8\x96\xed\
m\xa4\xd09\xda%\xaf\xc2\xb5\xdd'6\xe9\xc4\x8d\xb4\
o\x7f\xe1\xacL\xea\xc7\xa9\xeaj\x03\xd3\x12\x17\xe0\x01\
\x956\xe9\xc1\xa5\xdcX\xc6\x92\xc4\x0d\xd8\x94\xc3d\xf3\
\x11LH\x1c\x93n+\xdc\x02.\xe2\x0d'(\xfbg\
{\x87\x9e\x85Ru\x8c#\xde\xd5\xc5~n\xbe\x01\x08\
\x8e\xe9\xc2R\xfa\x06\x83\x00\x00\x00\x00IEND\xae\
B`\x82\
\x00\x00\x07k\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
39-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:58:24-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:58:24-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:34b3c532-3c9c\
-ec4d-8f36-38fb0\
91c7328\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:938564cf-9394-8\
845-b33c-225b3c9\
7ad9f\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:37b2195\
a-7373-6d4d-b917\
-cbbf6160523e\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:37b2195a-7373\
-6d4d-b917-cbbf6\
160523e\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:39-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:34\
b3c532-3c9c-ec4d\
-8f36-38fb091c73\
28\x22 stEvt:when=\x22\
2020-05-02T17:58\
:24-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\xd5\xa7\
)F\x00\x00\x01 IDAT8\xcb\x8d\xd3\xbd.\
\x04a\x14\x06\xe0o\xed\x86j\x05\x95;\xa0\x92\x10*\
Q\xb8\x00\x85NTD\xe1\x1e\xa84\x1aQ\x8b\x9f+\
X\xd9\x04\x0d\x89;\xb0\x94\xa8(\x9f\xc4]h\xce\xc8\
1\xd9e\x8a73sf\xbe\xf3\xfe\x9c3\x05%0\
\x96\xee7p\x8c\x0b\x9c\xe1\x00\xcb\xc3\xbe\xad\x17\xd61\
\xc03.q\x88#\xf4\xf0\x8e>f\xf3\x99|x\x07\
_\xd8OL\x193\xa1\xe8\x03sU\x93\x92\x98\xbf\xb0\
R\x939\x86\x16\xda\xa9~\x887t\xb3\x85Ab\x1e\
\x1f\xa1\xa0\x95\xd4\xde\xe1\xa4j\xb0\x11\x9eK\x03T\x0d\
\x96\x22\x93\xa9\x12i_\x0d\x99\xc4(\x15\x05\x13x\xc1\
j\x89\xb4\x0f\x1a6\xc8\xb8\xc5n\x899\x1f\xd5\x18\x9a\
\xe0\x01[%\xd8{Ql7\xb4\xd0\xc5+\x16Jl\
\xd8{\xcc\xf9?\x15\x9d\xb8n\xe2\x09\x9d\xeaE\x1f\xe7\
C\xe6\x9f\x99;\xe9\xf9\x15{y\x0ff\xf1\x99\xc2\xfc\
\x0b\x8f\xc9\xf2\xafU\x9e\x0f+\xb7X\x8cQ\x95\xe4y\
36\xf0:\xa9i\xd5\x17d\x12\xa7\xd1\xe8\x057\xb8\
\x0f\xc9\x83Jv\xce\xaa\x8c\xf8\x9d\xa7\xb1\x16\x07\xb6\xb1\
P\xcb\xe0'\x9fohN\xe0y\x0c\xc8\xdd<\x00\x00\
\x00\x00IEND\xaeB`\x82\
\x00\x00\x07h\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
39-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:58:42-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:58:42-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:b0c637c2-599e\
-2c48-9f85-901a6\
0e2fb8e\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:e8ee2f64-1059-7\
048-ae3e-466e41a\
5d0d2\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:b2b5c98\
3-756d-e449-a359\
-6e1a33d2f574\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:b2b5c983-756d\
-e449-a359-6e1a3\
3d2f574\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:39-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:b0\
c637c2-599e-2c48\
-9f85-901a60e2fb\
8e\x22 stEvt:when=\x22\
2020-05-02T17:58\
:42-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\xb9\xae\
\x02\xf7\x00\x00\x01\x1dIDAT8\xcb\x9d\xd3\xbd+\
\xc5a\x18\xc6\xf1\xdf\xf1\x92L\x14)+eT\xb2(\
\xc3\xf9\x1f\x8cH\x89\xe18%g\xf4\x1f\x98e&\x83\
\xe4\xfd5\x8b\x81\x89\x81\x88\x89\xc5`\xf9\xd8L\xfe\x00\
\xcb\xfd\xd3\xd3\xe9wt\x18\xbe\xdd\xddO\xf7u=w\
O\xd7\x93\xa1\x8a\x0b\xec\xe0\xa0Ivq\x8cr\x862\
\x161\x8biLa\xb2\x01S\x98\xc1<\x160\x94!\
e\x10#\x18\x8d\x9a2\x8aa\xf4\xa7\x9a\x0c\xad\xd1\x8c\
\xe1\x0bO\xb8\xc1\x03\xee\x82G\x5c\xe3\x03\xb7\xe8\x0cM\
)\xbd\xbd\x1dG8\xac\xdb*\xa7\x07\xafX\xce\xc5\xf9\
\x06?M\xd4k\xecG\xdf\x12\xb57\xc4+\xf5\xc6i\
\x93\x0fw\xe0\x1e\x1b\xd1w\xe1\x19\xab\xc9l\xa9\xc8 \
}\x8f\xee\x10\xad\xe1\x0a\x9bE\xe2\x22\x83\xd4\xa4\x0f\xef\
\xd8j$\xce\x0dJ\x05\xb4%\x0f\xd7\x91\x18\xa73-\
\x8d6\xf8\x13YD\xf92\xa2yZ\xc79\xce\x0a\xce\
O\xe2\xbc\x9cE\xb6O\x22\xa2\xd5\x88\xe8oT1\x87\
\x17,f\xf1\x89&\xfe\xb1\xfe:j\xb9\xc1\x1b\xb6#\
\x89\xcd\xb0\x87OT2\x8c\xa3\x86\xa5\xa0\xd6\x04K\xa8\
`\xe0\x1b\xdc\xaf\xfef\xc2\x91\xf6\x98\x00\x00\x00\x00I\
END\xaeB`\x82\
\x00\x00\x07p\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
40-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:59:10-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:59:10-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:2a2d633b-7d65\
-074c-897b-6be88\
1a7840a\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:a8690234-429f-0\
648-a947-0ec23c2\
a4939\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:62d9ef5\
5-bc66-1047-97f5\
-2303002ab930\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:62d9ef55-bc66\
-1047-97f5-23030\
02ab930\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:40-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:2a\
2d633b-7d65-074c\
-897b-6be881a784\
0a\x22 stEvt:when=\x22\
2020-05-02T17:59\
:10-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\xf0I\
\x12}\x00\x00\x01%IDAT8\xcb\x8d\xd3\xbb.\
\x84Q\x14\x05\xe03.\x89\x11\x09*\x1e@x\x08\xdd\
\x14&D\x22L+\x12\xbdP\x12\xb5Bh\xd4*\x13\
\x09\xe2\x01$z\xb7N$\xae\xa5\xe6+x\x09\xcd\x9e\
\xe4\xf8sf(N\xce\xbfO\xf6\xda{\xaf\xb5\xd7\x9f\
\x90\xba\x9c\xbe\xec{\x1d'8\xc5\x0eF;9\xdd\xc0\
\xfdq\x0f\xe3\x0aO\xd8\xc5&\xae\xf1\x8ci\xa4\x12\xb8\
\x16w\x1dw\xb8,\xe4\xec\xe1\x1e\xb5nc\x8f\x07\xf8\
\xac\x07\xc5G\xcc\x95:'\x1c\x17:\xefc5\x8b\xdb\
\xd8\xaer\x9e\xc4F\xf0\xcb\x0b\x1e\xe2\xa3\xc3;\xce-\
Z\xf9\xd8C1\xf6y\xa5s\x07<\x91\xbd\xad\xe1\x15\
\xf5\x94\xa9}\x83\x8b\x7f\x80\xe7\xf1\x8dF\xbe\x85\xab\x02\
\xe7\x12\xb8\x19\xe0\x95\xdc\x07\xeb\xb1\xe7\xbf\xc0\xb3\xf8B\
+\xd7-\x85\xc3v\xb3M\x1c\xe1\xbd\x02\x9e\x8a\xee\x8d\
\x8a\xe8)\x85=\xb7\xe2a\x04\x07\x15\xb5\x17\xf1\x89\x99\
\x88\x07\xf3iSx\xfb\xba\x8bY\x16b\xec\xe5\x88\x07\
\xaa9\x09cx\x09{\xa6\xca\xaa\xbe\xb0T\xf8\xb9~\
\x15H1\xf2C\xd8\xb3\x1d&y+q.\x15\xe8\xcb\
\x04lb;\x94\xae\xf7\xea\xdc9?-,\xe7\x12y\
\xb6TO\x00\x00\x00\x00IEND\xaeB`\x82\
\x00\x00\x07\xb6\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
35-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:57:43-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:57:43-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:617745c7-71e8\
-ad43-9150-b4ee2\
699f492\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:e6ef899c-e7bc-3\
e43-85f2-b251d3d\
e0a96\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:fd7cbab\
c-fe4e-fc44-9e97\
-052f9af8263e\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:fd7cbabc-fe4e\
-fc44-9e97-052f9\
af8263e\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:35-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:61\
7745c7-71e8-ad43\
-9150-b4ee2699f4\
92\x22 stEvt:when=\x22\
2020-05-02T17:57\
:43-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>a\x8e\
\x22U\x00\x00\x01kIDAT8\x8dm\xd3\xcd+\
Da\x14\xc7\xf1;\xc3`\x10Ey[\x11\x0b\xca\xc2\
\xd6H\x16,X[\xc9\x82\x22JD\x94\x0d\xcd\xd6\x94\
2)/k\x9a\x92\xb7\x92\x22o\xc9NJ\x22%)\
E2\xfe\x0d\xdfS\xe7\xe8\xb8\xcd\xe2\xd3\xf3<\xf7\xce\
\xfd=\xcf=gn\x90\xcdf\x03\xe4\xe9(\x86\xb1\xa0\
\xf3|\x1dk\xb1\x8eV]G\x10\x95\xb9-d,\xc1\
\x16>\xd1\xe5\x02\x8d\x84\xfe`\xc6]\x8b\xd8\xa4\x17\xef\
\xc8\xa0\x14\x09\xac`\x07\xdbXD\x15\x9a\xf1\x84s4\
\xd8\x09:\xf1\x86~\x94\xe1\x14/\xd8\xc08fq\x80\
\x0f$u\xc3M\x5c\xcb\xa9eQ\x8eb\x1d\xe5\x14\xab\
\xae&\xd5(\xd4y#\xee\xf55e]'5\xf2\xef\
(\x89\xe9\xd0{\xefa\xd4\xad\xa5\xa8\xcf\x98\x0e\xd7\xa0\
[\x8f\x1d\x0bU\xff\x18\x13:\x8f\xeb\xd8\xa7!1\xab\
\x81X\xc3\xb2{\xd8:s\xa8m\x0d\x5c\xb8\xb8C\x8f\
\x0f\xd8\xc7T\x8e\x80\x0b\xa4\xdc\x83Q\x1dO,\xd8n\
\xecb.G@B\x0b\x9b\x0a]\xbf\xc4\xa0\x0fHj\
\xbf\xfd\xbf\xd2v\xab\xc7-\xc6t]\x89G\xb4\xf9\x80\
&|i\xab|\x11-\xac\x005:\x9f\xc7\x8d\xef\x82\
\xed\xb4\xa4\xc5\xb1\x87\xc3\xef-:\xf0\x8d\xf6p\x1bM\
F[$\x15.r\xd7+\xb4F\xf2-\x0c\xf9\xe0\xbf\
\x8f\xc2\xfdxRC\x1ep\x843\xbc\xe2JO\xf0\xef\
T~w\x1f\x12\xd7\x0e\x8c`\x00-\xee\x9e\xff\xf4\x83\
_.+\xedi\x06K\x84\x14\x00\x00\x00\x00IEN\
D\xaeB`\x82\
\x00\x00\x07^\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
41-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:59:15-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:59:15-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:8d4ef758-54b4\
-154b-88d2-cf590\
eb85ab1\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:89527441-dcb9-3\
347-adb7-4edd289\
3dacd\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:a72a768\
d-ab78-fb43-912c\
-21f4d307b787\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:a72a768d-ab78\
-fb43-912c-21f4d\
307b787\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:41-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:8d\
4ef758-54b4-154b\
-88d2-cf590eb85a\
b1\x22 stEvt:when=\x22\
2020-05-02T17:59\
:15-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\x8cf\
(\xc6\x00\x00\x01\x13IDAT8\xcb\x9d\xd3\xbdJ\
\x03A\x10\xc0\xf1\xf3\xfc\x08X)\x04\xa3\x09\xf8\x18\x12\
Q\x04\xc5\x07H\x13$\x85b\x12,\xd5\xdaRDC\
\xf2\x0e~4\xa2\x85e*A\x22\x8a\x11\x14\x0b\x1b\xad\
\xac\xc4\xab|\x87\xf8_\x9c\x85ar\x17\x0f\x0f~\xec\
\xdd\xed\xee\xec\xec,\x1bDQ\x14\xa40\x94\xd4\xf7\xd7\
\xc4\x16\xca\xf2>\x9c6\x80\x1f\xb8\x8fO|`))\
\x88\x9d<\x22\xedz\xf4\xfbL\xa0\x82/\x14\xe2\x82\xc4\
M\x9e\xc77\x16T\xdf\x11^\xd5\x98\xd0\x06\xf0?f\
e\xe5\xaa|g\xd4\x8aWh\xc7m\xc1Wx\x14w\
\xd8\x1bP\xd4\x1b\x1c\xe8\x93\xd1\x9d\x17R4\xb7\x85e\
\xacH\xeb\xac\xa2\x88M\xf4\xb0\xe63\xd7\x95?\x94\x0c\
\xee\xd1\xc5\x83\xf1\x88[tP\xf2Y\xe8-\xb86\x8f\
\x1c\xa6\x12LK\xdbw\x0ac8\xc6\x1b\x9e\xf0\x92\xe0\
Y\xc6\xd4u\x06N\x16\xefX\xc4\xb8\xac\x925fd\
l\x03\x976\xc0\xa4Dnb\x0b;\xd86v\xb1\x81\
k\x9c\xda\x22:5\x9cK\xe7\xd9\x00'\x98\xb3\x19\xfc\
\x9b\xbd\xb2i\x84\xfaz\xff\x00R\xb4\xe7\xaaCq\xaf\
\x06\x00\x00\x00\x00IEND\xaeB`\x82\
\x00\x00\x07x\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
40-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-02T17:59:04-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
2T17:59:04-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 | |
'size:9.8pt; color:#a5c261;">\'type\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; ' \
'color:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'' \
'pyNodeMulti\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </spa' \
'n><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'label\'</span><span sty' \
'le=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'' \
'Consolas\'; font-size:9.8pt; color:#a5c261;">\'PyNode Multi:\'</span><span style=" font-family:\'Co' \
'nsolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Consolas\'; font-siz' \
'e:9.8pt; color:#a5c261;">\'toolTip\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; ' \
'color:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'' \
'This is a tip\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </s' \
'pan><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'color\'</span><span s' \
'tyle=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: [</span><span style=" font-famil' \
'y:\'Consolas\'; font-size:9.8pt; color:#6897bb;">10</span><span style=" font-family:\'Consolas\'; fo' \
'nt-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; col' \
'or:#6897bb;">20</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </s' \
'pan><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#6897bb;">30</span><span style=" ' \
'font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">]</span><span style=" font-family:\'Conso' \
'las\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Consolas\'; font-size:9' \
'.8pt; color:#a5c261;">\'share\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color' \
':#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#8888c6;">False</' \
'span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style="' \
' font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'save\'</span><span style=" font-family' \
':\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; fon' \
't-size:9.8pt; color:#8888c6;">False</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; c' \
'olor:#cc7832;">, </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'t' \
'ext\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span ' \
'style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'\'</span><span style=" font-fam' \
'ily:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Consolas\'; ' \
'font-size:9.8pt; color:#a5c261;">\'placeholderText\'</span><span style=" font-family:\'Consolas\'; f' \
'ont-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; co' \
'lor:#a5c261;">\'Placeholder\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#' \
'cc7832;">, </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'errorIf' \
'Empty\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><spa' \
'n style=" font-family:\'Consolas\'; font-size:9.8pt; color:#8888c6;">False</span><span style=" font-' \
'family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Consolas\'' \
'; font-size:9.8pt; color:#a5c261;">\'buttonLabel\'</span><span style=" font-family:\'Consolas\'; fo' \
'nt-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; col' \
'or:#a5c261;">\' > \'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832' \
';">, </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'buttonToolTip' \
'\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span sty' \
'le=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'Get Selection\'</span><span style=' \
'" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Co' \
'nsolas\'; font-size:9.8pt; color:#a5c261;">\'checkExisting\'</span><span style=" font-family:\'Conso' \
'las\'; font-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; font-size:9' \
'.8pt; color:#8888c6;">False</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9' \
'b7c6;">}<br /><br /></span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#808080;">' \
'# IntSpinner<br /></span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">{<' \
'/span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'type\'</span><span ' \
'style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-famil' \
'y:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'intSpinner\'</span><span style=" font-family:\'Co' \
'nsolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Consolas\'; font-siz' \
'e:9.8pt; color:#a5c261;">\'label\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; co' \
'lor:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'In' \
't Spinner:\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span' \
'><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'toolTip\'</span><span st' \
'yle=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:' \
'\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'This is a tip\'</span><span style=" font-family:\'C' \
'onsolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Consolas\'; font-si' \
'ze:9.8pt; color:#a5c261;">\'color\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; c' \
'olor:#a9b7c6;">: [</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#6897bb;">10' \
'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style' \
'=" font-family:\'Consolas\'; font-size:9.8pt; color:#6897bb;">20</span><span style=" font-family:\'C' \
'onsolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Consolas\'; font-si' \
'ze:9.8pt; color:#6897bb;">30</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a' \
'9b7c6;">]</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><s' \
'pan style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'share\'</span><span style="' \
' font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Con' \
'solas\'; font-size:9.8pt; color:#8888c6;">False</span><span style=" font-family:\'Consolas\'; font-s' \
'ize:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#' \
'a5c261;">\'save\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: <' \
'/span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#8888c6;">False</span><span sty' \
'le=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'' \
'Consolas\'; font-size:9.8pt; color:#a5c261;">\'max\'</span><span style=" font-family:\'Consolas\'; ' \
'font-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; c' \
'olor:#6897bb;">99</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, <' \
'/span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'min\'</span><span s' \
'tyle=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family' \
':\'Consolas\'; font-size:9.8pt; color:#6897bb;">0</span><span style=" font-family:\'Consolas\'; font' \
'-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color' \
':#a5c261;">\'value\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">' \
': </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#6897bb;">0</span><span styl' \
'e=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'' \
'Consolas\'; font-size:9.8pt; color:#a5c261;">\'step\'</span><span style=" font-family:\'Consolas\'; ' \
'font-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; c' \
'olor:#6897bb;">1</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">}<br' \
' /><br /></span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#808080;"># FloatSpin' \
'ner<br /></span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">{</span><sp' \
'an style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'type\'</span><span style=" f' \
'ont-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Conso' \
'las\'; font-size:9.8pt; color:#a5c261;">\'floatSpinner\'</span><span style=" font-family:\'Consolas\'' \
'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Consolas\'; font-size:9.8pt' \
'; color:#a5c261;">\'label\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9' \
'b7c6;">: </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'Float Spi' \
'nner:\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><spa' \
'n style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'toolTip\'</span><span style="' \
' font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Con' \
'solas\'; font-size:9.8pt; color:#a5c261;">\'This is a tip\'</span><span style=" font-family:\'Consol' \
'as\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Consolas\'; font-size:9.' \
'8pt; color:#a5c261;">\'color\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:' \
'#a9b7c6;">: [</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#6897bb;">10</spa' \
'n><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" fo' \
'nt-family:\'Consolas\'; font-size:9.8pt; color:#6897bb;">20</span><span style=" font-family:\'Consol' \
'as\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Consolas\'; font-size:9.' \
'8pt; color:#6897bb;">30</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6' \
';">]</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span s' \
'tyle=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'share\'</span><span style=" font' \
'-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Consolas' \
'\'; font-size:9.8pt; color:#8888c6;">False</span><span style=" font-family:\'Consolas\'; font-size:9' \
'.8pt; color:#cc7832;">, </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c26' \
'1;">\'save\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span' \
'><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#8888c6;">False</span><span style=" ' \
'font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Cons' \
'olas\'; font-size:9.8pt; color:#a5c261;">\'max\'</span><span style=" font-family:\'Consolas\'; font-' \
'size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:' \
'#6897bb;">99</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span' \
'><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'min\'</span><span style=' \
'" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Co' \
'nsolas\'; font-size:9.8pt; color:#6897bb;">0</span><span style=" font-family:\'Consolas\'; font-size' \
':9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c' \
'261;">\'value\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </s' \
'pan><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#6897bb;">0</span><span style=" f' \
'ont-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Conso' \
'las\'; font-size:9.8pt; color:#a5c261;">\'step\'</span><span style=" font-family:\'Consolas\'; font-' \
'size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:' \
'#6897bb;">0.1</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">}<br />' \
'<br /></span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#808080;"># CheckBox<br ' \
'/></span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">{</span><span styl' \
'e=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'type\'</span><span style=" font-fam' \
'ily:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; ' \
'font-size:9.8pt; color:#a5c261;">\'check\'</span><span style=" font-family:\'Consolas\'; font-size:9' \
'.8pt; color:#cc7832;">, </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c26' \
'1;">\'label\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </spa' \
'n><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'Check Box:\'</span><spa' \
'n style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-fam' \
'ily:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'toolTip\'</span><span style=" font-family:\'Con' \
'solas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; font-size' \
':9.8pt; color:#a5c261;">\'This is a tip\'</span><span style=" font-family:\'Consolas\'; font-size:9.' \
'8pt; color:#cc7832;">, </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261' \
';">\'color\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: [</spa' \
'n><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#6897bb;">10</span><span style=" fo' \
'nt-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Consol' \
'as\'; font-size:9.8pt; color:#6897bb;">20</span><span style=" font-family:\'Consolas\'; font-size:9.' \
'8pt; color:#cc7832;">, </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#6897bb' \
';">30</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">]</span><span s' \
'tyle=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family' \
':\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'share\'</span><span style=" font-family:\'Consolas' \
'\'; font-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; font-size:9.8p' \
't; color:#8888c6;">False</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc783' \
'2;">, </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'save\'</span' \
'><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span style=" fon' \
't-family:\'Consolas\'; font-size:9.8pt; color:#8888c6;">False</span><span style=" font-family:\'Cons' \
'olas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Consolas\'; font-size:' \
'9.8pt; color:#a5c261;">\'value\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; colo' \
'r:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#8888c6;">False<' \
'/span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">}</span></p></body></' \
'html>'
browser = QtWidgets.QTextBrowser()
browser.setWordWrapMode(QtGui.QTextOption.NoWrap)
browser.setText(txt)
browser.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.MinimumExpanding)
browser.setMinimumHeight(500)
body_lwt.addWidget(browser)
def wrapping_label(self, txt):
"""
Creates a label with rich text format that's properly aligned and set to wrap.
Args:
txt (str): The text you want to pass in.
"""
lbl = QtWidgets.QLabel(txt)
lbl.setTextFormat(QtCore.Qt.RichText)
lbl.setAlignment(QtCore.Qt.AlignTop)
lbl.setWordWrap(True)
return lbl
class EditScriptListDialog(QtWidgets.QDialog):
"""
Custom dialog for adjusting the scripts being sourced in the scratch paper window
"""
def __init__(self, parent, data):
"""
Initial call method
Args:
parent (na_scratch_paper.ScratchPaperWidget): The parent widget
data (dict): Data being passed in to the widget {(str) name: Tab Label, (str) path: Path to the script,
(list) excluded: Buttons to Exclude from the UI}
"""
super(EditScriptListDialog, self).__init__(parent)
self.setWindowTitle('Edit Script List')
self.data = data
self.create_base()
self.populate_table()
def create_base(self):
"""
Creates the Main UI elements.
"""
conditional_enable_btns = []
main_lwt = QtWidgets.QVBoxLayout()
main_lwt.setContentsMargins(20, 20, | |
import tkinter as tk
import sys
from tkinter import filedialog
import random
import numpy as np
import pandas as pd
import math
import seaborn as sns
sys.path.append('Portplanering')
sys.path.append('Bilbokning/src')
from bilbokning import calculate_carriages
HEURISTICS = ['local_search',
'simulated_annealing',
'variable_neighbourhood_search',
'tabu_search']
NEIGHBOURHOODS = ['swap_port',
'swap_time',
'move_port',
'move_time']
zone_dict = {
0: 'TÄLT ',
1: 'FRIST ',
2: 'MPFTG\t',
3: 'MPBVV\t',
4: 'MPJÄR\t',
5: 'HPALL\t',
6: 'ADR ',
7: 'ENTEB\t',
8: 'ENTST\t'
}
# Function to change orderfile- Not to be used during testing
def browse_files():
filename = filedialog.askopenfilename(title = "Select a File",
filetypes = (("Text files",
"*.csv*"),
("all files",
"*.*")))
w.configure(text="File Opened: "+filename)
#----------------------------------FUNCTIONS-----------------------------------
# Global variables to be used for prints etc.
global running
global best_cost
global best_solution
global COSTS1
global COSTS2
running = False
# Function when portplanering() is running
def portplanering():
global running
global best_cost
global best_solution
global COSTS1
global COSTS2
COSTS1 = []
COSTS2 = []
from heuristic import run_heuristics
from generate_solution import generate_random_solution
from transportproblem_pulp import transportproblem
# Generate a feasible solution
y = generate_random_solution()
# Calculate the current cost
cost = transportproblem(y)[0]
best_cost = cost
best_solution = np.copy(y)
# Initial constans for SA and Tabu search
temp = 1000
tabu_list_max_len = 10
# Initial Tabu list for tabusearch
tabu_list = []
# Insert an initial word into the text
T.insert(tk.END, 10)
# Set neighbour to the chosen one through gui.
neighbour = chosen_neighbour.get()
local_opt = False
# running == True whenever the search for a heuristic is on
ctr = 0
while running:
ctr += 1
# Start a heuristic iteration
cost, y, local_opt, best_cost, best_solution, temp, COSTS1, COSTS2, tabu_list, tabu_list_max_len, neighbour = \
run_heuristics(y, cost, chosen_heuristic.get(), neighbour, local_opt, best_cost, best_solution, temp, COSTS1, COSTS2, tabu_list, tabu_list_max_len)
# Remove the previous output and insert the current cost
T.delete("1.0", "end")
T.insert(tk.END, cost)
# Generate a new random neighbourhood is condition is fulfilled.
if local_opt:
neighbour = NEIGHBOURHOODS[random.randrange(2)]
local_opt = False
m.update()
if ctr == 200:
running == False
break
def save_pic(cos, colname, filename):
df = pd.DataFrame([cos])
df = df.T
df.columns = colname
a = sns.lineplot(data=df[0:199])
figure = a.get_figure()
figure.savefig(filename+'.pdf')
figure.savefig(filename+'.png')
# function destroys window
def destroy_window():
m.destroy()
# If both Bilbokning and Portplanering is marked then, bilbokning will run first
# and then run Portplanering after.
def run_program():
# If Bilbokning is checked, it starts bilbokning
if bilv.get() == 1:
date=T.get("1.0", 'end-1c')
calculate_carriages(slid.get(), date)
d.configure(text="Date: "+date)
# If Portplanering is checked, it starts portplanering
if portv.get() == 1:
global running
# Sets global vaiable to True, means heuristic is running.
running = True
portplanering()
# Stop-button will not stop Bilbokning. Only heuristic search.
def stop_program():
from transportproblem_pulp import transportproblem
global running
global best_solution
if portv.get() == 1:
running = False
T.delete("1.0", "end")
# Calculate the cost of the best_solution found so far.
cost, x = transportproblem(best_solution)
# Print it in window and run solution_to_txt
T.insert(tk.END, 'Best solution found: ' + str(cost))
solution_to_txt(cost, x)
#------------------------------------------------------------------------------
# -------------------------------Window----------------------------------------
# Creates a window with every orders assigned ports
def view_solution():
L = pd.read_csv('Portplanering/Lj.csv')
number_of_orders = len(L)
J = range(number_of_orders)
import csv
def showSol():
top2 = tk.Toplevel()
with open('solution/'+str(chosen_order_list.get())+'.csv', newline='') as file:
reader = csv.reader(file)
r = 0
for col in reader:
c = 0
for row in col:
label = tk.Label(top2,
width = 10,
height = 2,
text = row,
relief = tk.RIDGE)
label.grid(row = r, column = c)
c += 1
r += 1
# Define buttons
top = tk.Toplevel()
top.title('Solution window')
chosen_order_list = tk.StringVar(top)
chosen_order_list.set(J[0])
op_menu_order = tk.OptionMenu(top, chosen_order_list, *J)
op_menu_order.pack()
button_open_solution = tk.Button(top,
text='Show solution',
command = showSol)
button_open_solution.pack()
# function creates a txtfile to view the current output in a textfile
def solution_to_txt(cost, x):
L = pd.read_csv('Portplanering/Lj.csv')
S = pd.read_csv('Portplanering/Sj.csv')
dij = pd.read_csv('Portplanering/dij.csv')
mj = pd.read_csv('Portplanering/mj.csv')
a = pd.read_csv('Portplanering/aip.csv')
a = np.array(a)
a = a.T
NUMBER_OF_PORTS = 40
list_of_vehicles = L+S
list_of_vehicles = list_of_vehicles.values.tolist()
list_of_vehicles = [val for sublist in list_of_vehicles for val in sublist]
number_of_orders = len(L)
# ------------------
# Functions for the solution window
# Sort x so its sorted for i(zone) -> p(port) -> j(order), from the LP-solver PuLP
x_sorted=[]
for i in range(9):
for p in range(40):
for j in range(number_of_orders):
this_index = np.where(x == 'x_'+str(i)+'_'+str(p)+'_'+str(j))[0]
x_sorted.append(int(float(x[this_index][0][1])))
# Getters for x_index
def get_zone(x_index):
return math.floor(x_index/(number_of_orders*NUMBER_OF_PORTS))
def get_port(x_index):
return math.floor((x_index % (number_of_orders*NUMBER_OF_PORTS)) / number_of_orders)
def get_order(x_index):
return (x_index % (number_of_orders*NUMBER_OF_PORTS)) % number_of_orders
x_sorted=np.array(x_sorted)
ny=[]
x_sorted_index = np.where(x_sorted != 0)[0]
for i in x_sorted_index:
ny.append([get_order(i), get_zone(i), get_port(i), x_sorted[i]])
# Creates CSV-files for each order, with port and transportation data.
for order in range(number_of_orders):
d = pd.DataFrame(np.zeros((9,0)))
for i in ny:
if i[0] == order:
d.at[i[1],i[2]] = i[3]
d.to_csv('solution/'+str(order)+'.csv', index=False)
# --------------------------TO TXT---------------------------
# Constants
ORDER_STEP_LENGTH = 160
TIME_STEP_LENGTH = 80
VEHICLE_STEP_LENGTH = 40
def get_order_yindex(vehicle_index):
order = math.floor(vehicle_index / ORDER_STEP_LENGTH)
return order
def get_time_yindex(vehicle_index):
time = math.floor((vehicle_index % ORDER_STEP_LENGTH) / TIME_STEP_LENGTH)
return time
def get_port_yindex(vehicle_index):
port = ((vehicle_index % ORDER_STEP_LENGTH) % TIME_STEP_LENGTH) % VEHICLE_STEP_LENGTH
return port
def get_vehicle_type_yindex(vehicle_index):
return math.floor(((vehicle_index % ORDER_STEP_LENGTH) % TIME_STEP_LENGTH) / VEHICLE_STEP_LENGTH)
# Number of timeslot used for this order
num_of_times = int(max(np.array(mj)))+1
# Get y_index
y_index = np.where(best_solution != 0)[0]
# y_index split for each timeslot
y_index_time = [[] for i in range(num_of_times)]
time_order = [[] for i in range(num_of_times)]
# time_order contains all the orders at a specific time.
# y_index_time contains the y_index at a specific time.
for i in y_index:
for j in range(num_of_times):
if get_time_yindex(i) == j:
y_index_time[j].append(i)
time_order[j].append(get_order_yindex(i))
for i in range(len(time_order)):
time_order[i] = list(set(time_order[i]))
time_order[i] = [int(x) for x in time_order[i]]
time_order[i].sort()
# Make cost to real cost:
cost = 0
for j in range(number_of_orders):
for p in range(NUMBER_OF_PORTS):
for i in range(9):
cost += a[i,p] * x_sorted[i*NUMBER_OF_PORTS*number_of_orders + p*number_of_orders + j]
# Writes this data to a .txt
with open('solution.txt', 'w') as file:
# This 'Datum' has to be set if you create for a certain date
file.write('------------------------------------------------------------\n')
file.write('Datum: XXXX-XX-XX Tidsintervall: '+str(num_of_times)+'\n')
file.write('----------------------------------------------------------\n')
# cost = best_cost found so far
file.write('Total sträcka: '+str(cost)+'\n')
file.write('Ordrar\n')
# Shows on what time slot the orders have been set
for t in range(num_of_times):
file.write(str(t)+': ')
for i in time_order[t]:
file.write(str(i)+', ')
file.write(' \n')
file.write('------------------------------------------------------------\n\n')
file.write('------------------------------------------------------------\n')
file.write('Port\tT = 1\t\t\tT = 2\n')
# Shows for each port where the orders are set for each timeslot
for p in range(40):
first_time='--------'
second_time='--------'
for i in y_index_time[0]:
if get_time_yindex(i)==0 and get_port_yindex(i) == p:
if get_vehicle_type_yindex(i) == 0:
amount = '(18)'
else:
amount = '(30)'
first_time = str(get_order_yindex(i))+' '+amount
for i in y_index_time[1]:
if get_time_yindex(i)==1 and get_port_yindex(i) == p:
if get_vehicle_type_yindex(i) == 0:
amount = '(18)'
else:
amount = '(30)'
second_time = str(get_order_yindex(i))+' '+amount
file.write(str(p+1)+'\t'+first_time+'\t\t'+ second_time+'\n')
# Shows for eachtime slot where the orders are set for each port
for t in range(num_of_times):
file.write('\n\nTidsintervall: ' + str(t) + '\n')
file.write('------------------------------------------------------------\n')
file.write('ORDER\t\t TOT\t BIL+SLÄP\t\t PORT (#PALL)\n')
order = -1
for j in y_index_time[t]:
if order==get_order_yindex(j):
port = get_port_yindex(j)
num_of_pallets = 0
for i in range(9):
num_of_pallets += x_sorted[order + number_of_orders*port + i*(40*number_of_orders)]
file.write(' & '+str(port+1)+' ('+str(num_of_pallets)+')')
else:
order = get_order_yindex(j)
tot = dij.sum(axis=0)[order]
fordon = str(L.at[order,'0'])+' + '+str(S.at[order,'0'])
port = get_port_yindex(j)
num_of_pallets = 0
for i in range(9):
num_of_pallets += x_sorted[order + number_of_orders * port + i*(40*number_of_orders)]
file.write('\n'+str(order)+'\t\t'+str(tot)+'\t\t'+str(fordon)+'\t\t'+str(port+1)+' ('+str(num_of_pallets)+')')
# Creates specific data for each orders.
for j in range(number_of_orders):
file.write('\n------------------------------------------------------------\n\n')
file.write('------------------------------------------------------------\n')
vehicles =[]
for j2 in y_index:
if get_order_yindex(j2) == j:
vehicles.append(j2)
#print(j)
#print(y_index)
file.write('Order\t'+str(j)+' '+'\tTidsintervall: '+str(get_time_yindex(vehicles[0]))+'\n\n')
file.write('Bil')
for v in vehicles:
if get_vehicle_type_yindex(v) == 0:
file.write('\t\t18')
elif get_vehicle_type_yindex(v) == 1:
if len(vehicles) == 2:
file.write('\t30')
else:
file.write('\t\t30')
file.write('\nPort\t\t')
for v in vehicles:
file.write(str(get_port_yindex(v))+'\t')
file.write('\n------------------------------------------------------------')
for i in range(9):
file.write('\n'+zone_dict[i]+'\t')
for v in vehicles:
port = get_port_yindex(v)
order = get_order_yindex(v)
file.write(str(x_sorted[order + number_of_orders * port + i*(40*number_of_orders)])+'\t')
# ------------------------------------------------------------------------------
# Creates the gui window
m = tk.Tk()
m.geometry('600x400')
m.title(' xXx Bilbokning | Portplanering xXx')
# Define frames
top_frame = tk.Frame(m)
top_frame.pack(side=tk.TOP)
left_frame = tk.Frame(m)
left_frame.pack(side=tk.LEFT)
right_frame = tk.Frame(m)
right_frame.pack(side=tk.RIGHT)
bottom_frame=tk.Frame(m)
bottom_frame.pack(side=tk.BOTTOM)
w = tk.Label(top_frame, text='No file chosen', font = '100')
d = tk.Label(top_frame, text='No date chosen', font = '100')
#------------------------------------------------------------------------------
#----------------------------------Slider--------------------------------------
#Define a slider to change packing factor, DEFAULT=0.8
slid = tk.Scale(left_frame, from_=0.20, to=1.0, orient=tk.HORIZONTAL, resolution=0.05)
slid.set(0.80)
slid.pack()
#------------------------------------------------------------------------------
#---------------------------Options Meny for heuristics------------------------
# Option menu for heuristcs
chosen_heuristic = tk.StringVar(m)
chosen_heuristic.set(HEURISTICS[0])
opmenu = tk.OptionMenu(right_frame, chosen_heuristic, *HEURISTICS)
# Option | |
<reponame>benstear/manubot
"""Functions importable from manubot.cite submodule (submodule API):
standardize_citekey()
citekey_to_csl_item()
Helpers:
inspect_citekey()
is_valid_citekey() - also used in manubot.process
shorten_citekey() - used solely in manubot.process
infer_citekey_prefix()
"""
import functools
import logging
import re
from manubot.util import import_function
citeproc_retrievers = {
"doi": "manubot.cite.doi.get_doi_csl_item",
"pmid": "manubot.cite.pubmed.get_pubmed_csl_item",
"pmcid": "manubot.cite.pubmed.get_pmc_csl_item",
"arxiv": "manubot.cite.arxiv.get_arxiv_csl_item",
"isbn": "manubot.cite.isbn.get_isbn_csl_item",
"wikidata": "manubot.cite.wikidata.get_wikidata_csl_item",
"url": "manubot.cite.url.get_url_csl_item",
}
"""
Regex to extract citation keys.
The leading '@' is omitted from the single match group.
Same rules as pandoc, except more permissive in the following ways:
1. the final character can be a slash because many URLs end in a slash.
2. underscores are allowed in internal characters because URLs, DOIs, and
citation tags often contain underscores.
If a citekey does not match this regex, it can be substituted for a
tag that does, as defined in citation-tags.tsv.
https://github.com/greenelab/manubot-rootstock/issues/2#issuecomment-312153192
Prototyped at https://regex101.com/r/s3Asz3/4
"""
citekey_pattern = re.compile(r"(?<!\w)@([a-zA-Z0-9][\w:.#$%&\-+?<>~/]*[a-zA-Z0-9/])")
@functools.lru_cache(maxsize=5_000)
def standardize_citekey(citekey, warn_if_changed=False):
"""
Standardize citation keys based on their source
"""
source, identifier = citekey.split(":", 1)
if source == "doi":
if identifier.startswith("10/"):
from manubot.cite.doi import expand_short_doi
try:
identifier = expand_short_doi(identifier)
except Exception as error:
# If DOI shortening fails, return the unshortened DOI.
# DOI metadata lookup will eventually fail somewhere with
# appropriate error handling, as opposed to here.
logging.error(
f"Error in expand_short_doi for {identifier} "
f"due to a {error.__class__.__name__}:\n{error}"
)
logging.info(error, exc_info=True)
identifier = identifier.lower()
if source == "isbn":
from isbnlib import to_isbn13
identifier = to_isbn13(identifier)
standard_citekey = f"{source}:{identifier}"
if warn_if_changed and citekey != standard_citekey:
logging.warning(
f"standardize_citekey expected citekey to already be standardized.\n"
f"Instead citekey was changed from {citekey!r} to {standard_citekey!r}"
)
return standard_citekey
regexes = {
"arxiv": re.compile(
r"(?P<versionless_id>[0-9]{4}\.[0-9]{4,5}|[a-z\-]+(\.[A-Z]{2})?/[0-9]{7})(?P<version>v[0-9]+)?"
),
"pmid": re.compile(r"[1-9][0-9]{0,7}"),
"pmcid": re.compile(r"PMC[0-9]+"),
"doi": re.compile(r"10\.[0-9]{4,9}/\S+"),
"shortdoi": re.compile(r"10/[a-zA-Z0-9]+"),
"wikidata": re.compile(r"Q[0-9]+"),
}
def inspect_citekey(citekey):
"""
Check citekeys adhere to expected formats. If an issue is detected a
string describing the issue is returned. Otherwise returns None.
"""
source, identifier = citekey.split(":", 1)
if source == "arxiv":
# https://arxiv.org/help/arxiv_identifier
if not regexes["arxiv"].fullmatch(identifier):
return "arXiv identifiers must conform to syntax described at https://arxiv.org/help/arxiv_identifier."
if source == "pmid":
# https://www.nlm.nih.gov/bsd/mms/medlineelements.html#pmid
if identifier.startswith("PMC"):
return (
"PubMed Identifiers should start with digits rather than PMC. "
f"Should {citekey!r} switch the citation source to 'pmcid'?"
)
elif not regexes["pmid"].fullmatch(identifier):
return "PubMed Identifiers should be 1-8 digits with no leading zeros."
if source == "pmcid":
# https://www.nlm.nih.gov/bsd/mms/medlineelements.html#pmc
if not identifier.startswith("PMC"):
return "PubMed Central Identifiers must start with 'PMC'."
elif not regexes["pmcid"].fullmatch(identifier):
return (
"Identifier does not conform to the PMCID regex. "
"Double check the PMCID."
)
if source == "doi":
if identifier.startswith("10."):
# https://www.crossref.org/blog/dois-and-matching-regular-expressions/
if not regexes["doi"].fullmatch(identifier):
return (
"Identifier does not conform to the DOI regex. "
"Double check the DOI."
)
elif identifier.startswith("10/"):
# shortDOI, see http://shortdoi.org
if not regexes["shortdoi"].fullmatch(identifier):
return (
"Identifier does not conform to the shortDOI regex. "
"Double check the shortDOI."
)
else:
return "DOIs must start with '10.' (or '10/' for shortDOIs)."
if source == "isbn":
import isbnlib
fail = isbnlib.notisbn(identifier, level="strict")
if fail:
return f"identifier violates the ISBN syntax according to isbnlib v{isbnlib.__version__}"
if source == "wikidata":
# https://www.wikidata.org/wiki/Wikidata:Identifiers
if not identifier.startswith("Q"):
return "Wikidata item IDs must start with 'Q'."
elif not regexes["wikidata"].fullmatch(identifier):
return (
"Identifier does not conform to the Wikidata regex. "
"Double check the entity ID."
)
return None
def is_valid_citekey(
citekey, allow_tag=False, allow_raw=False, allow_pandoc_xnos=False
):
"""
Return True if citekey is a properly formatted string. Return False if
citekey is not a citation or is an invalid citation.
In the case citekey is invalid, an error is logged. This
function does not catch all invalid citekeys, but instead performs cursory
checks, such as ensuring citekeys adhere to the expected formats. No calls to
external resources are used by these checks, so they will not detect
citekeys to non-existent identifiers unless those identifiers violate
their source's syntax.
allow_tag=False, allow_raw=False, and allow_pandoc_xnos=False enable
allowing citekey sources that are valid for Manubot manuscripts, but
likely not elsewhere. allow_tag=True enables citekey tags (e.g.
tag:citation-tag). allow_raw=True enables raw citekeys (e.g.
raw:manual-reference). allow_pandoc_xnos=True still returns False for
pandoc-xnos references (e.g. fig:figure-id), but does not log an error.
With the default of False for these arguments, valid sources are restricted
to those for which manubot can retrieve metadata based only on the
standalone citekey.
"""
if not isinstance(citekey, str):
logging.error(
f"citekey should be type 'str' not "
f"{type(citekey).__name__!r}: {citekey!r}"
)
return False
if citekey.startswith("@"):
logging.error(f"invalid citekey: {citekey!r}\nstarts with '@'")
return False
try:
source, identifier = citekey.split(":", 1)
except ValueError:
logging.error(
f"citekey not splittable via a single colon: {citekey}. "
"Citekeys must be in the format of `source:identifier`."
)
return False
if not source or not identifier:
msg = f"invalid citekey: {citekey!r}\nblank source or identifier"
logging.error(msg)
return False
if allow_pandoc_xnos:
# Exempted non-citation sources used for pandoc-fignos,
# pandoc-tablenos, and pandoc-eqnos
pandoc_xnos_keys = {"fig", "tbl", "eq"}
if source in pandoc_xnos_keys:
return False
if source.lower() in pandoc_xnos_keys:
logging.error(
f"pandoc-xnos reference types should be all lowercase.\n"
f'Should {citekey!r} use {source.lower()!r} rather than "{source!r}"?'
)
return False
# Check supported source type
sources = set(citeproc_retrievers)
if allow_raw:
sources.add("raw")
if allow_tag:
sources.add("tag")
if source not in sources:
if source.lower() in sources:
logging.error(
f"citekey sources should be all lowercase.\n"
f'Should {citekey} use "{source.lower()}" rather than "{source}"?'
)
else:
logging.error(
f"invalid citekey: {citekey!r}\n"
f"Source {source!r} is not valid.\n"
f'Valid citation sources are {{{", ".join(sorted(sources))}}}'
)
return False
inspection = inspect_citekey(citekey)
if inspection:
logging.error(f"invalid {source} citekey: {citekey}\n{inspection}")
return False
return True
def shorten_citekey(standard_citekey):
"""
Return a shortened citekey derived from the input citekey.
The input citekey should be standardized prior to this function,
since differences in the input citekey will result in different shortened citekeys.
Short citekeys are generated by converting the input citekey to a 6 byte hash,
and then converting this digest to a base62 ASCII str. Shortened
citekeys consist of characters in the following ranges: 0-9, a-z and A-Z.
"""
import hashlib
import base62
assert not standard_citekey.startswith("@")
as_bytes = standard_citekey.encode()
blake_hash = hashlib.blake2b(as_bytes, digest_size=6)
digest = blake_hash.digest()
short_citekey = base62.encodebytes(digest)
return short_citekey
def citekey_to_csl_item(citekey, prune=True):
"""
Generate a CSL Item (Python dictionary) for the input citekey.
"""
from manubot.cite.csl_item import CSL_Item
from manubot import __version__ as manubot_version
citekey == standardize_citekey(citekey, warn_if_changed=True)
source, identifier = citekey.split(":", 1)
if source not in citeproc_retrievers:
msg = f"Unsupported citation source {source!r} in {citekey!r}"
raise ValueError(msg)
citeproc_retriever = import_function(citeproc_retrievers[source])
csl_item = citeproc_retriever(identifier)
csl_item = CSL_Item(csl_item)
note_text = f"This CSL JSON Item was automatically generated by Manubot v{manubot_version} using citation-by-identifier."
note_dict = {"standard_id": citekey}
csl_item.note_append_text(note_text)
csl_item.note_append_dict(note_dict)
short_citekey = shorten_citekey(citekey)
csl_item.set_id(short_citekey)
csl_item.clean(prune=prune)
return csl_item
def infer_citekey_prefix(citekey):
"""
Passthrough citekey if it has a valid citation key prefix. Otherwise,
if the lowercase citekey prefix is valid, convert the prefix to lowercase.
Otherwise, assume citekey is raw and prepend "raw:".
"""
prefixes = [f"{x}:" for x in list(citeproc_retrievers) + ["raw"]]
for prefix in prefixes:
if citekey.startswith(prefix):
return citekey
if citekey.lower().startswith(prefix):
return prefix + citekey[len(prefix) :]
return f"raw:{citekey}"
def url_to_citekey(url):
"""
Convert a HTTP(s) URL into a citekey.
For supported sources, convert from url citekey to an alternative source like doi.
If citekeys fail inspection, revert alternative sources to URLs.
"""
from urllib.parse import urlparse, unquote
citekey = None
parsed_url = urlparse(url)
domain_levels = parsed_url.hostname.split(".")
if domain_levels[-2:] == ["doi", "org"]:
# DOI URLs
doi = unquote(parsed_url.path.lstrip("/"))
citekey = f"doi:{doi}"
if domain_levels[-2] == "sci-hub":
# Sci-Hub domains
doi = parsed_url.path.lstrip("/")
citekey = f"doi:{doi}"
if domain_levels[-2:] == ["biorxiv", "org"]:
# bioRxiv URL to DOI. See https://git.io/Je9Hq
match = re.search(
r"/(?P<biorxiv_id>([0-9]{4}\.[0-9]{2}\.[0-9]{2}\.)?[0-9]{6,})",
parsed_url.path,
)
if match:
citekey = f"doi:10.1101/{match.group('biorxiv_id')}"
is_ncbi_url = parsed_url.hostname.endswith("ncbi.nlm.nih.gov")
if is_ncbi_url and parsed_url.path.startswith("/pubmed/"):
# PubMed URLs
try:
pmid = parsed_url.path.split("/")[2]
citekey = f"pmid:{pmid}"
except IndexError:
pass
if is_ncbi_url and parsed_url.path.startswith("/pmc/"):
# PubMed Central URLs
try:
pmcid = parsed_url.path.split("/")[3]
citekey = f"pmcid:{pmcid}"
except IndexError:
pass
if domain_levels[-2:] == ["wikidata", "org"] and parsed_url.path.startswith(
"/wiki/"
):
# Wikidata URLs
try:
wikidata_id = parsed_url.path.split("/")[2]
citekey = f"wikidata:{wikidata_id}"
except IndexError:
pass
if domain_levels[-2:] == ["arxiv", "org"]:
# arXiv identifiers. See https://arxiv.org/help/arxiv_identifier
try:
arxiv_id = parsed_url.path.split("/", maxsplit=2)[2]
if arxiv_id.endswith(".pdf"):
arxiv_id = arxiv_id[:-4]
citekey = f"arxiv:{arxiv_id}"
except IndexError:
pass
if citekey is None or inspect_citekey(citekey) is not None:
citekey = f"url:{url}"
| |
# -*- coding: utf-8 -*-
from settings import *
from messages import *
from functions import *
import time
import random
import sqlite3
from aiogram import asyncio
from aiogram import Bot, types
from aiogram.dispatcher import Dispatcher
from aiogram.utils import executor
from aiogram.utils.helper import Helper, HelperMode, ListItem
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from aiogram.contrib.middlewares.logging import LoggingMiddleware
from aiogram.types import ReplyKeyboardMarkup, \
KeyboardButton, InlineKeyboardMarkup, \
InlineKeyboardButton, ReplyKeyboardRemove
from aiogram.utils.exceptions import BotBlocked
import asyncio
from aiogram.utils.exceptions import Unauthorized
from aiogram.dispatcher import DEFAULT_RATE_LIMIT
from aiogram.dispatcher.handler import CancelHandler, current_handler
from aiogram.dispatcher.middlewares import BaseMiddleware
from aiogram.utils.exceptions import Throttled
loop = asyncio.get_event_loop()
bot = Bot(token = token, loop = loop)
dp = Dispatcher(bot, storage = MemoryStorage())
dp.middleware.setup(LoggingMiddleware())
class UserStates(Helper):
GET_CHANNEL_TO_UP = ListItem()
GET_SUB_COUNT = ListItem()
CONFIRMATION = ListItem()
GET_MSG_FOR_MAIL = ListItem()
GET_USER_FOR_UBAN = ListItem()
GET_USER_FOR_CHB = ListItem()
main_menu = ReplyKeyboardMarkup(resize_keyboard = True)
main_menu.add('✔️ Подписаться на канал', '➕ Получить подписчиков')
main_menu.add('👤 Профиль', '👣 Партнёрская программа')
admin_menu = InlineKeyboardMarkup()
statistics_bt = InlineKeyboardButton(text = '📊 Статистика', callback_data = 'stat')
mail_bt = InlineKeyboardButton(text = '✉️ Рассылка', callback_data = 'mail')
give_uban_bt = InlineKeyboardButton(text = '🚷 Выдать бан/разбан', callback_data = 'uban')
change_balance_bt = InlineKeyboardButton(text = '💳 Изменить баланс', callback_data = 'chb')
admin_menu.add(statistics_bt, mail_bt)
admin_menu.add(give_uban_bt, change_balance_bt)
cancel_menu = InlineKeyboardMarkup()
cancel_bt = InlineKeyboardButton(text = '🚫 Отмена', callback_data = 'cancel')
cancel_menu.add(cancel_bt)
#==============
async def user_in_channel_checker():
last_check = get_last_check()
if last_check == None and count_of_channels() >= 1:
global check_user_in_ch
async def check_user_in_ch():
channels = get_channels_for_check()
for x in channels:
my_id = await bot.get_me()
try:
status_bot_in_channel = await bot.get_chat_member(chat_id = x[1], user_id = my_id.id)
status_bot_in_channel = status_bot_in_channel.status
except (Unauthorized, BotBlocked):
status_bot_in_channel = 'left'
if status_bot_in_channel == 'administrator':
subs = x[2]
checked_users = eval(x[-1])
for user in subs:
if user not in checked_users:
get_user = await bot.get_chat_member(chat_id = x[1], user_id = user)
time_from_subs = x[2][user]
if get_user.status == 'left' and ((time_from_subs - datetime.datetime.now()).days < SUBSCRIPTION_TERM) and user_was_fine(x[0], user) == False:
add_user_to_fined(x[0], user)
change_balance(user, FINE_FOR_UNSUBSCRIBING)
increase_fine_count(user)
username = await bot.get_chat(chat_id = x[1])
await bot.send_message(user, SUBSCRIPTION_VIOLATION(username.username, SUBSCRIPTION_TERM, FINE_FOR_UNSUBSCRIBING))
elif get_user.status == 'left' and ((time_from_subs - datetime.datetime.now()).days >= SUBSCRIPTION_TERM) and user_was_fine(x[0], user) == False:
add_member_to_checked(x[0], user)
else:
writer = edit_promotion_status(x[0], 0)
id = x[1]
add_promotion_to_uncheck(x[0])
await bot.send_message(writer, CHANNEL_WAS_DEL_FROM_CHANNEL(id, LINK_TO_INTRODUCTION_AND_RULES), parse_mode = 'Markdown')
set_last_check()
await check_user_in_ch()
elif last_check != None and count_of_channels >= 1:
now_time = datetime.datetime.now()
delta = last_check - now_time
if delta.seconds >= 3600:
await check_user_in_ch()
#==============
@dp.message_handler(lambda m: user_banned(m.from_user.id) == False, commands = ['start'])
async def start_commands_handle(m: types.Message):
if is_user_in_db(m.from_user.id) < 1:
argument = m.get_args()
if (argument is not None) and (argument.isdigit() == True) and (is_user_in_db(argument)) == 1:
add_user_to_db(m.from_user.id, ref_father = argument)
await m.reply(START, reply = False, parse_mode = 'Markdown', reply_markup = main_menu)
await bot.send_message(text = NEW_REFERAL(argument), chat_id = argument)
else:
add_user_to_db(m.from_user.id)
await m.reply(START, reply = False, parse_mode = 'Markdown', reply_markup = main_menu)
else:
await m.reply(UPDATE, reply = False, parse_mode = 'Markdown', reply_markup = main_menu)
@dp.message_handler(lambda m: m.from_user.id in admins, commands = ['admin'])
async def admin_command_handle(m: types.Message):
await m.reply(SELECT_ADMIN_MENU_BUTTON, reply = False, reply_markup = admin_menu)
@dp.message_handler(lambda m: m.from_user.id not in admins, commands = ['admin'])
async def handle_not_admin(m: types.Message):
await m.reply(YOU_WAS_HACK_ME, reply = False)
@dp.message_handler(lambda m: m.text == '👤 Профиль' and user_banned(m.from_user.id) == False)
async def profile_button_handle(m: types.Message):
await m.reply(PROFILE(m), reply = False, parse_mode = 'Markdown')
@dp.message_handler(lambda m: m.text == '➕ Получить подписчиков' and user_banned(m.from_user.id) == False)
async def add_channel_handle(m: types.Message):
if user_balance(m.from_user.id) >= LITTLE_SUBCOIN_TO_GET_SUBS:
state = dp.current_state(user = m.from_user.id)
await state.set_state('GET_CHANNEL_TO_UP')
await m.reply(GIVE_CHANNEL_LINK, reply = False, parse_mode = 'Markdown', reply_markup = cancel_menu)
else:
await m.reply(LITTLE_SUBCOIN_1, reply = False)
@dp.message_handler(state = 'GET_CHANNEL_TO_UP')
async def channel_to_up_handle(m: types.Message):
try:
if m.content_type == 'text':
my_id = await bot.get_me()
get_channel= await bot.get_chat(m.text)
if get_channel.type == 'channel':
status_bot_in_channel = await bot.get_chat_member(chat_id = m.text, user_id = my_id.id)
if check_channel_in_db(get_channel.id) == 1:
if status_bot_in_channel.status == 'administrator':
number = save_channel(channel_id = get_channel.id, writer = m.from_user.id)
cancel_promotion = InlineKeyboardMarkup()
cancel_promotion.add(InlineKeyboardButton(text = '🚫 Отмена', callback_data = 'cancel_' + str(number)))
await bot.delete_message(message_id = m.message_id - 1, chat_id = m.from_user.id)
await m.reply(SEND_SUB_COUNT_1(m), reply = False, parse_mode = 'Markdown', reply_markup = cancel_promotion)
state = dp.current_state(user = m.from_user.id)
await state.set_state('GET_SUB_COUNT')
else:
await bot.delete_message(message_id = m.message_id - 1, chat_id = m.from_user.id)
await m.reply(BOT_NOT_IN_CHANNEL, parse_mode = 'Markdown', reply_markup = cancel_menu)
elif check_channel_in_db(get_channel.id) == 0:
await m.reply(CHANNEL_ON_PROMOTION_2, reply = False, reply_markup = cancel_menu)
else:
await bot.delete_message(message_id = m.message_id - 1, chat_id = m.from_user.id)
await m.reply(THIS_IS_NOT_CHANNEL, parse_mode = 'Markdown', reply_markup = cancel_menu)
else:
await m.reply(THIS_IS_NOT_TEXT, parse_mode = 'Markdown', reply_markup = cancel_menu)
except Exception as e:
await m.reply(e, reply_markup = cancel_menu)
@dp.message_handler(state = 'GET_SUB_COUNT')
async def handle_get_sub_count(m: types.Message):
if (m.content_type == 'text') and (m.text.isdigit() == True) and (int(m.text) >= LITTLE_SUBCOIN_TO_GET_SUBS) and user_balance(m.from_user.id) >= int(m.text):
save_channel(subs_count = int(m.text), writer = m.from_user.id)
channel_stat = get_channel_stat(m.from_user.id)
username = await bot.get_chat(channel_stat[0][0][1])
username = username.username
confirmation_menu = InlineKeyboardMarkup()
confirmation_menu.add(InlineKeyboardButton(text = '🚫 Отмена', callback_data = 'cancel_' + str(channel_stat[-1])), InlineKeyboardButton(text = '✅ Подтвердить', callback_data = 'confirm_' + str(channel_stat[-1])))
state = dp.current_state(user = m.from_user.id)
await state.set_state('CONFIRMATION')
await bot.delete_message(message_id = m.message_id - 1, chat_id = m.from_user.id)
await m.reply(CONFIRM_ADDING_CHANNEL(username, channel_stat[0][0][0], channel_stat[0][0][0]), reply = False, reply_markup = confirmation_menu)
else:
channel_stat = get_channel_stat(m.from_user.id)
username = await bot.get_chat(channel_stat[0][0][1])
username = username.username
cancel_wnum_menu= InlineKeyboardMarkup()
cancel_wnum_menu.add(InlineKeyboardButton(text = '🚫 Отмена', callback_data = 'cancel_' + str(channel_stat[-1])))
await m.reply(LITTLE_SUBCOIN_2, reply = False, reply_markup = cancel_wnum_menu)
@dp.message_handler(lambda m: m.text == '✔️ Подписаться на канал' and user_banned(m.from_user.id) == False)
async def sent_instruction_for_subscribe(m: types.Message):
black_list = []
while True:
channels_list = channel_for_subscribe(m.from_user.id)
if channels_list != 0 and len(channels_list) > len(black_list):
channel_to_subscribe = random.choice(list(channels_list))
if channel_to_subscribe not in black_list:
my_id = await bot.get_me()
try:
bot_status = await bot.get_chat_member(chat_id = channel_to_subscribe, user_id = my_id.id)
bot_status = bot_status.status
except (Unauthorized, BotBlocked):
bot_status = 'left'
if bot_status == "administrator":
status_of_user = await bot.get_chat_member(chat_id = channel_to_subscribe, user_id = m.from_user.id)
if status_of_user.status == 'left':
username = await bot.get_chat(chat_id = channel_to_subscribe)
subscribe_menu = InlineKeyboardMarkup()
subscribe_menu.add(InlineKeyboardButton(text = 'Перейти к каналу', url = 'tg://resolve?domain=' + username.username))
subscribe_menu.add(InlineKeyboardButton(text = 'Проверить подписку', callback_data = 'sub_' + str(channels_list[channel_to_subscribe])))
await m.reply(SUBSCRIBE_ON_THIS_CHANNEL, reply_markup = subscribe_menu, reply = False)
break
else:
black_list.append(channel_to_subscribe)
else:
writer = edit_promotion_status(channels_list[channel_to_subscribe], 0)
id = channel_to_subscribe
await bot.send_message(writer, CHANNEL_WAS_DEL_FROM_CHANNEL(id, LINK_TO_INTRODUCTION_AND_RULES))
else:
await m.reply(NO_HAVE_CHANNELS_FOR_SUBSCRIBE, reply = False)
break
@dp.message_handler(content_types = ['text', 'video', 'photo', 'document', 'animation'], state = 'GET_MSG_FOR_MAIL')
async def send_mail(m: types.Message):
state = dp.current_state(user = m.from_user.id)
await state.reset_state()
users = get_users_for_mailing()
if m.content_type == 'text':
all_users = 0
blocked_users = 0
for x in users:
try:
await bot.send_message(x[0], m.html_text, parse_mode = 'HTML')
all_users += 1
await asyncio.sleep(0.3)
except BotBlocked:
blocked_users += 1
await m.reply(MAILING_END(all_users, blocked_users), reply = False)
if m.content_type == 'photo':
all_users = 0
blocked_users = 0
for x in users:
try:
await bot.send_photo(x[0], photo = m.photo[-1].file_id, caption = m.html_text, parse_mode = 'HTML')
all_users += 1
await asyncio.sleep(0.3)
except BotBlocked:
blocked_users += 1
await m.reply(MAILING_END(all_users, blocked_users), reply = False)
if m.content_type == 'video':
all_users = 0
blocked_users = 0
for x in users:
try:
await bot.send_video(x[0], video = m.video.file_id, caption = m.html_text, parse_mode = 'HTML')
all_users += 1
await asyncio.sleep(0.3)
except BotBlocked:
blocked_users += 1
await m.reply(MAILING_END(all_users, blocked_users), reply = False)
if m.content_type == 'animation':
all_users = 0
blocked_users = 0
for x in users:
try:
await bot.send_animation(x[0], animation = m.animation.file_id)
all_users += 1
await asyncio.sleep(0.3)
except BotBlocked:
blocked_users += 1
await m.reply(MAILING_END(all_users, blocked_users), reply = False)
if m.content_type == 'document':
all_users = 0
blocked_users = 0
for x in users:
try:
await bot.send_document(x[0], document = m.document.file_id)
all_users += 1
await asyncio.sleep(0.3)
except BotBlocked:
blocked_users += 1
await m.reply(MAILING_END(all_users, blocked_users), reply = False)
@dp.message_handler(lambda m: m.text == '👣 Партнёрская программа' and user_banned(m.from_user.id) == False)
async def referal_button_handle(m: types.Message):
get_bot = await bot.get_me()
await m.reply(PARTNER_PROGRAM(get_bot.username, m.from_user.id, referals(m.from_user.id)), reply = False, parse_mode = 'Markdown')
@dp.callback_query_handler(lambda c: c.data == 'cancel', state = UserStates.all())
async def cancel_button_handle(c: types.callback_query):
state = dp.current_state(user = c.from_user.id)
await state.reset_state()
await c.message.edit_text(CANCEL_TEXT)
@dp.message_handler(lambda m: m.from_user.id in admins, content_types = ['text'], state = 'GET_USER_FOR_CHB')
async def handle_user_for_chb(m: types.Message):
list = m.text.split(' ')
if len(list) == 2:
id = list[0]
value = list[1]
if id.isdigit() and value.lstrip('-').isdigit():
result = change_balance(id, value)
await m.reply(result, reply = False)
else:
await m.reply(NOT_INTEGER, reply = False)
else:
await m.reply(LITTLE_VALUE, reply = False)
state = dp.current_state(user = m.from_user.id)
await state.reset_state()
@dp.message_handler(lambda m: m.from_user.id in admins, content_types = ['text'], state = 'GET_USER_FOR_UBAN')
async def handle_user_for_uban(m: types.Message):
list = m.text.split(' ')
if len(list) == 2:
id = | |
List[Dict[str, str]]
params = assign_params(category=category, search=search, sort=sort, created_by=created_by, offset=0,
limit=limit or 100)
if share_mode:
params["category.share_mode"] = share_mode
if alerted:
params["stats.alerted_agents"] = share_mode
# get all results
while len(indicators) < max_records:
indicators_partial_results = client.get_indicators_request(params)["data"]["entries"]
if not indicators_partial_results:
break
indicators.extend(indicators_partial_results)
params["offset"] = len(indicators)
# remove access results
if len(indicators) > max_records:
indicators[int(max_records) - 1: -1] = []
return indicators
def get_all_enabled_conditions(client: Client, indicator_category, indicator_name):
offset = 0
conditions = [] # type: List[Dict[str, str]]
# get all results
while True:
conditions_partial_results = client.get_indicator_conditions_request(
indicator_category,
indicator_name,
offset=offset,
enabled=True,
)['data']['entries']
if not conditions_partial_results:
break
conditions.extend(conditions_partial_results)
offset = len(conditions)
return conditions
def get_indicator_conditions(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
returns a list of enabled conditions assosiated with a specific indicator to the war room
"""
conditions = get_all_enabled_conditions(
client,
args.get('category'),
args.get('name')
)
conditions_entries = [get_condition_entry(condition) for condition in conditions]
md_table = tableToMarkdown(
name=f"Indicator '{args.get('name')}' Alerts on",
t=conditions_entries
)
return CommandResults(
outputs_prefix="FireEyeHX.Conditions",
outputs_key_field="_id",
outputs=conditions,
readable_output=md_table
)
"""helper fetch-incidents"""
def organize_reported_at(reported_at):
milisecond = int(reported_at[-4:-1]) + 1
if milisecond == 1000:
reported_at = date_to_timestamp(reported_at[:-5], date_format=DATE_FORMAT) + 1000
reported_at = timestamp_to_datestring(reported_at, date_format=DATE_FORMAT) + ".000Z"
else:
if milisecond < 10:
reported_at = reported_at[:-4] + '00' + str(milisecond) + reported_at[-1]
elif milisecond < 100:
reported_at = reported_at[:-4] + '0' + str(milisecond) + reported_at[-1]
else:
reported_at = reported_at[:-4] + str(milisecond) + reported_at[-1]
return reported_at
def query_fetch(reported_at=None, first_fetch: str = None):
query = '{"operator":"between","arg":['
if reported_at:
query += '"' + reported_at + '"' + ','
else:
query += '"' + timestamp_to_datestring(
parse_date_range(first_fetch, to_timestamp=True, utc=False)[0]) + '"' + ','
query += '"' + timestamp_to_datestring(parse_date_range("1 days", to_timestamp=True,
utc=False)[1]) + '"' + '],"field":"reported_at"}'
return query
def parse_alert_to_incident(alert: Dict, pattern: Pattern) -> Dict:
event_type = alert.get('event_type')
event_type = 'NewEvent' if not event_type else event_type
event_values = alert.get('event_values', {})
event_indicators_map = {
'fileWriteEvent': 'fileWriteEvent/fileName',
'ipv4NetworkEvent': 'ipv4NetworkEvent/remoteIP',
'dnsLookupEvent': 'dnsLookupEvent/hostname',
'regKeyEvent': 'regKeyEvent/valueName'
}
event_indicator = event_indicators_map.get(event_type)
event_indicator = 'No Indicator' if not event_indicator else event_indicator
indicator = ''
if isinstance(event_values, dict):
indicator = event_values.get(event_indicator, '')
incident_name = u'{event_type_parsed}: {indicator}'.format(
event_type_parsed=pattern.sub("\g<1> \g<2>", event_type).title(),
indicator=indicator
)
incident = {
'name': incident_name,
'occurred': alert.get("event_at"),
'rawJSON': json.dumps(alert)
}
return incident
def run_commands_without_polling(client: Client, args: Dict[str, Any]):
if args.get('cmd') == 'fireeye-hx-search':
return start_search_command(client, args)[0]
if args.get('cmd') == 'fireeye-hx-data-acquisition':
return data_acquisition_command(client, args)[0]
if args.get('cmd') == 'fireeye-hx-file-acquisition':
return file_acquisition_command(client, args)[0]
''' COMMAND FUNCTIONS '''
"""
POLICIES
"""
def list_policy_command(client: Client, args: Dict[str, Any]) -> CommandResults:
offset = args.get('offset', 0)
limit = args.get('limit', 50)
name = args.get('policyName')
policy_id = args.get('policyId')
enabled = args.get('enabled')
if name and policy_id:
raise ValueError("Enter a name or ID but not both")
response = client.list_policy_request(offset=offset, limit=limit, policy_id=policy_id, name=name, enabled=enabled)
for_table = [{
"Policy Id": entry["_id"],
"Policy Name": entry["name"],
"Description": entry["description"],
"Priority": entry["priority"],
"Enabled": entry["enabled"],
} for entry in response['data']['entries']]
headers_for_table = ["Policy Name", "Policy Id", "Description", "Priority", "Enabled"]
md = tableToMarkdown(name="FireEye HX List Policies", t=for_table, headers=headers_for_table)
command_results = CommandResults(
outputs_prefix='FireEyeHX.Policy',
outputs_key_field='_id',
outputs=response,
raw_response=response,
readable_output=md
)
return command_results
def list_host_set_policy_command(client: Client, args: Dict[str, Any]) -> CommandResults:
offset = args.get("offset", 0)
limit = args.get("limit", 50)
host_set_id = args.get("hostSetId")
policy_id = args.get("policyId", "")
if host_set_id and policy_id:
raise ValueError("Enter a Policy Id or Host Set Id but not both")
if host_set_id:
response = client.list_host_set_policy_by_hostSetId_request(host_set_id)
else:
response = client.list_host_set_policy_request(offset=offset, limit=limit, policy_id=policy_id)
for_table = []
for entry in response["data"]["entries"]:
for_table.append({
"Policy Id": entry["policy_id"],
"Host Set Id": entry["persist_id"]
})
headers_for_table = ["Policy Id", "Host Set Id"]
md = tableToMarkdown(name="FireEye HX Host Set Policies", t=for_table, headers=headers_for_table)
return CommandResults(
outputs_prefix="FireEyeHX.HostSets.Policy",
outputs_key_field="_id",
outputs=response["data"]["entries"],
readable_output=md
)
def assign_host_set_policy_command(client: Client, args: Dict[str, Any]) -> CommandResults:
host_set_id = args.get("hostSetId")
policy_id = args.get("policyId")
if not policy_id or not host_set_id:
raise ValueError("policy ID and hostSetId are required")
message = ""
response = None
try:
response = client.assign_host_set_policy_request({
"persist_id": host_set_id,
"policy_id": policy_id})
message = "Success"
except Exception as e:
if '400' in str(e):
demisto.debug(str(e))
message = "This hostset may already be included in this policy"
else:
raise ValueError(e)
return CommandResults(
readable_output=message,
outputs_prefix="FireEyeHX.Policy",
outputs=response
)
def delete_host_set_policy_command(client: Client, args: Dict[str, Any]) -> CommandResults:
host_set_id = int(args.get('hostSetId', ''))
policy_id = args.get('policyId')
message = ''
try:
client.delete_host_set_policy_request(host_set_id, policy_id)
message = 'Success'
except Exception as e:
if '404' in str(e):
message = f'polisy ID - {policy_id} or Host Set ID - {host_set_id} Not Found'
else:
raise ValueError(e)
return CommandResults(readable_output=message)
"""
HOST INFORMAITION
"""
def get_all_hosts_information_command(client: Client, args: Dict[str, Any]) -> CommandResults:
offset = int(args.get('offset', 0))
hosts = []
limit = int(args.get('limit', 1000))
if limit > 1000:
limit = 1000
while True:
hosts_partial = client.get_hosts_request(offset=offset, limit=limit)
if not hosts_partial["data"]["entries"]:
break
hosts.extend(hosts_partial["data"]["entries"])
offset = len(hosts)
if len(hosts) > limit:
hosts[int(limit) - 1: -1] = []
outputs = []
for host in hosts:
outputs.append({
'Host Name': host.get('hostname'),
'Last Poll': host.get('last_poll_timestamp'),
'Agent ID': host.get('_id'),
'Agent Version': host.get('agent_version'),
'Host IP': host.get('primary_ip_address'),
'OS': host.get('os', {}).get('platform'),
'Containment State': host.get('containment_state'),
'Domain': host.get('domain'),
'Last Alert': host.get('last_alert')
})
headers_for_table = ['Host Name', 'Host IP', 'Agent ID', 'Agent Version',
'OS', 'Last Poll', 'Containment State', 'Domain', 'Last Alert']
md = tableToMarkdown(
name="FireEye HX Get Hosts Information",
t=outputs,
headers=headers_for_table
)
return CommandResults(
outputs_prefix="FireEyeHX.Hosts",
outputs_key_field="_id",
outputs=outputs,
raw_response=hosts,
readable_output=md
)
def get_host_information_command(client: Client, args: Dict[str, Any]) -> CommandResults:
agent_id = args.get("agentId")
host_name = args.get("hostName")
if not agent_id and not host_name:
raise ValueError("Please provide either agentId or hostName")
host: Dict
if agent_id:
try:
host = client.get_hosts_by_agentId_request(agent_id)["data"]
except Exception:
raise ValueError(f"agentId {agent_id} is not correct")
else:
try:
host = client.get_hosts_request(limit=1, host_name=host_name)["data"]["entries"][0]
except Exception:
raise ValueError(f"{host_name} is not found")
headers_for_table = ['Host Name', 'Host IP', 'Agent ID', 'Agent Version',
'OS', 'Last Poll', 'Containment State', 'Domain', 'Last Alert']
for_table = [{
'Host Name': host.get('hostname'),
'Last Poll': host.get('last_poll_timestamp'),
'Agent ID': host.get('_id'),
'Agent Version': host.get('agent_version'),
'Host IP': host.get('primary_ip_address'),
'OS': host.get('os', {}).get('platform'),
'Containment State': host.get('containment_state'),
'Domain': host.get('domain'),
'Last Alert': host.get('last_alert')
}]
md = tableToMarkdown(
name="FireEye HX Get Host Information",
t=for_table,
headers=headers_for_table
)
return CommandResults(
outputs_prefix="FireEyeHX.Hosts",
outputs_key_field="_id",
outputs=host,
readable_output=md
)
def get_host_set_information_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
return host set information to the war room according to given id or filters
"""
host_set_id = args.get('hostSetID')
body = assign_params(
limit=args.get('limit'),
offset=args.get('offset'),
search=args.get('search'),
sort=args.get('sort'),
name=args.get('name'),
type=args.get('type')
)
response = client.get_host_set_information_request(body, host_set_id)
host_set = [] # type: List[Dict[str, str]]
try:
if host_set_id:
data = response['data']
host_set = [data]
else:
data = response['data']
host_set = data.get('entries', [])
except Exception as e:
demisto.debug(str(e))
raise ValueError('Failed to get host set information - unexpected response from the server.\n' + response.text)
md_table = "No host sets found"
if len(host_set) > 0:
md_table = tableToMarkdown(
name='FireEye HX Get Host Sets Information',
t=host_set_entry(host_set),
headers=['Name', 'ID', 'Type']
)
return CommandResults(
outputs_prefix="FireEyeHX.HostSets",
outputs_key_field="_id",
outputs=host_set,
readable_output=md_table
)
"""
HOST CONTAINMENT
"""
def get_list_containment_command(client: Client, args: Dict[str, Any]) -> CommandResults:
state_update_time = args.get("state_update_time", "")
offset = args.get("offset", 0)
limit = args.get("limit", 50)
response = client.get_list_containment_request(offset=offset,
limit=limit,
state_update_time=state_update_time)["data"]["entries"]
for_table = []
for entry in response:
for_table.append({
"Id": entry["_id"],
"State": entry["state"],
"Request Origin": entry["requested_by_actor"],
"Request Date": entry["requested_on"],
"Containment Origin": entry["contained_by_actor"],
"Containment Date": entry["contained_on"],
"Last System information date": entry["last_sysinfo"]
})
headers_for_table = ["Id", "State", "Request Origin", "Request Date",
"Containment Origin", "Containment Date", "Last System information date"]
md = tableToMarkdown(name="List Containment", t=for_table, headers=headers_for_table)
return CommandResults(
outputs_prefix="FireEyeHX.Hosts",
outputs_key_field="_id",
outputs=response,
readable_output=md
)
def host_containment_command(client: Client, args: Dict[str, Any]) -> List[CommandResults]:
agent_id = args.get("agentId")
host_name = args.get("hostName", "")
if not agent_id and not host_name:
raise ValueError("Please provide either agentId or hostName")
if not agent_id:
agent_id = get_agent_id_by_host_name(client, host_name)
try:
client.host_containmet_request(agent_id)
except Exception as e:
raise ValueError(e)
message = ""
try:
client.approve_containment_request(agent_id)
message = "Containment request for the host was sent and approved successfully"
except Exception as e:
if '422' in str(e):
message = "You do not have the required permissions for containment approve\n" \
"The containment request sent, but it is not approve."
elif '409' in str(e):
message = "This host may already in containment"
else:
raise ValueError(e)
host = client.get_hosts_by_agentId_request(agent_id)
return [CommandResults(
outputs_prefix="FireEyeHX.Hosts",
outputs_key_field="_id",
outputs=host['data'],
readable_output=message),
CommandResults(outputs_prefix="Endpoint", outputs=get_collect_endpoint_contxt(host["data"]))]
def approve_containment_command(client: Client, args: Dict[str, Any]) -> CommandResults:
agent_id = args.get("agentId")
if not agent_id:
raise ValueError("Agent ID is required")
message = "Containment for the host was approved successfully"
try:
client.approve_containment_request(agent_id)
except Exception as e:
if '409' in str(e):
message = "This host may already in containment"
else:
message = "Containment for the host failed, check if you have the necessary permissions"
return | |
# -*- coding: utf-8 -*-
import pickle, re, timeit
import unicodedata as ud
from nltk import trigrams, bigrams, FreqDist, sent_tokenize
from include.morph_analyzer import Analyzer, buckwalter
def talaa_pos_xml2talaa_pos_dic_phrase(annotated_corpus_path, annotated_corpus_name, path):
"""
Parameters
----------
annotated_corpus_path : str
the full path of the annotated corpus (in xml format).
e.g. "/home/user/corpus/".
annotated_corpus_name : str
the name of the corpus.
e.g. copus1 (without the extension).
path : str
the path where to save the resulted dictionnary ("talaa_pos_dic_phrase"+annotated_corpus_name).
Returns
-------
create a dictionnary ("talaa_pos_dic_phrase"+annotated_corpus_name) that contains {phrase_number : [Text, Tokenisation, POS_Tag, Nb_Mot, Nb_Token]}, and saved in the specified path.
"""
phrases = re.findall(r"<Phrase.+?</Phrase_\d+>", open(annotated_corpus_path+annotated_corpus_name+".xml", encoding = "utf-8").read(), re.DOTALL)
d = {}
for phrase in phrases:
try:
phrase = re.sub(r"\n|\ufeff", "", phrase)
Num_phrase = int(re.findall(r"<Num_phrase>(.*)</Num_phrase>", phrase)[0])
Text = re.findall(r"<Text>(.+)</Text>", phrase)[0]
Tokenisation = re.findall(r"<Tokenisation>(.*)</Tokenisation>", phrase)[0].split()
POS_Tag = re.findall(r"<POS_Tag>(.*)</POS_Tag>", phrase)[0].split()
Nb_Mot = int(re.findall(r"<Nb_Mot>(.*)</Nb_Mot>", phrase)[0])
Nb_Token = int(re.findall(r"<Nb_Token>(.*)</Nb_Token>", phrase)[0])
d[Num_phrase] = [Text, Tokenisation, POS_Tag, Nb_Mot, Nb_Token]
except:
# print("il y a une erreur dans la phrase :\n", phrase)
continue
save_obj(d, "talaa_pos_dic_phrase_"+annotated_corpus_name, path)
#********************************************************
def pos_lexicon_file2dict_morphem_setOfTags(pos_lexicon_file_path, path = None):
"""
Parameters
----------
pos_lexicon_file_path : str
the full path where POSLexicon file (.txt) exist.
e.g. /home/user/corpus/POSLexicon.txt .
path : str, optional
the path where to save the generated dictionnary "pos_lexicon".
The default is None.
Returns
-------
create a "pos_lexicon" dictionnary that contains {morpheme : {tag1,tag2,..}}.
"""
f = open(pos_lexicon_file_path, encoding = "windows-1256")
d = {}
for line in f:
line = line.split()
if len(line) > 1:
d[line[0]] = set(line[1:])
if path:
save_obj(d, "pos_lexicon", path)
else:
save_obj(d, "pos_lexicon", pos_lexicon_file_path)
#********************************************************
def update_pos_lexicon(path_sources, annotated_corpus_name, path__updates__, percent_phrases = 100, path = None):
"""
Parameters
----------
path_sources : str
specify where pos_lexicon and talaa_pos_dic_phrase dictionnaries exist.
annotated_corpus_name : str
the name of the corpus.
e.g., copus1 .
path__updates__ : str
the path where to save the updates made to pos_lexicon.
pecent_phrases : float
specify the percentage of sentences concerned by updating the pos_lexicon.
by default all sentences will be used to update the pos_lexicon, else the percentage specified will be used.
path : str
specify the path where to save the dictionnary resulted, by default its the same as path_sources
Returns
-------
create a dictionary with the name updated_pos_lexicon and save it in the same path as pos_lexicon.
That dictionnary contains the content of the POSLexicon dictionnary + any new entry (new morpheme and new tag)
"""
pos_lexicon = load_obj("pos_lexicon", path_sources)
talaa_pos_dic_phrase = load_obj("talaa_pos_dic_phrase_"+annotated_corpus_name, path_sources)
limit = len(talaa_pos_dic_phrase) * percent_phrases/100
#
updates = ""
phrase = 0
while phrase < limit:
count_morpheme = 0
while count_morpheme < len(talaa_pos_dic_phrase[phrase][1]):
#print(count_morpheme)
morpheme = talaa_pos_dic_phrase[phrase][1][count_morpheme]
tag = talaa_pos_dic_phrase[phrase][2][count_morpheme + 1]
try:
# add a new tag to an exist morpheme
pos_lexicon[morpheme].add(tag)
# +
updates += "\najout de " + tag + " à " + morpheme
except:
# create a new entry in the pos_lexicon dictionnary : morpheme : {tag}
pos_lexicon[morpheme] = {tag}
# +
updates += "\najout d'un nouveau morpheme ( " + morpheme + " ) avec le tag : " + tag
count_morpheme += 1
phrase += 1
if path:
save_obj(pos_lexicon, "updated_pos_lexicon", path)
else:
save_obj(pos_lexicon, "updated_pos_lexicon", path_sources)
# =
with open(path__updates__ + "lesMajApportAvecLeCorpus_"+ annotated_corpus_name +".txt", "w", encoding = "utf-8") as file:
file.seek(0)
file.truncate()
file.write(updates)
file.close()
#********************************************************
def transition_prob(annotated_corpus_names, path_sources, path = None, learning_percentage = 100):
"""
Parameters
----------
annotated_corpus_names : list
the list of all annotated corpus names used to generate model.
path_sources : str
specify where "talaa_pos_dic_phrase_.." dictionnaries exist.
path : str
specify where to save the resulted dictionnary.
Returns
-------
create a dictionary with the name transition_prob_dict in the specified path.
That dictionnary contains P(tg2|tg0 tg1) P(tg2|tg1)
"""
tr_tg = []
bi_tg = []
un_tg = []
for name in annotated_corpus_names:
talaa_pos_dic_phrase = load_obj("talaa_pos_dic_phrase_n_"+name, path_sources)
limit = len(talaa_pos_dic_phrase) * learning_percentage/100
phrase = 0
while phrase < limit:
tr_tg += trigrams(talaa_pos_dic_phrase[phrase][2])
bi_tg += bigrams(talaa_pos_dic_phrase[phrase][2])
un_tg += talaa_pos_dic_phrase[phrase][2]
phrase += 1
fd_tags = FreqDist(tr_tg + bi_tg + un_tg)
transition_prob_dict = {}
for (tg1,tg2,tg3) in tr_tg:
transition_prob_dict[(tg3,(tg1,tg2))] = fd_tags[(tg1,tg2,tg3)]/fd_tags[(tg1,tg2)] # tg3|tg1 tg2
for (tg1,tg2) in bi_tg:
transition_prob_dict[(tg2,tg1)] = fd_tags[(tg1,tg2)]/fd_tags[tg1] # tg2|tg1
if path:
save_obj(transition_prob_dict, "transition_prob_dict", path)
else:
save_obj(transition_prob_dict, "transition_prob_dict", path_sources)
#********************************************************
def state_observation_prob(annotated_corpus_names, path_sources, path = None, learning_percentage = 100):
"""
Parameters
----------
annotated_corpus_names : list
the list of all annotated corpus names used to generate model
path_sources : str
specify where "talaa_pos_dic_phrase_n_..." dictionnar(y|ies) exist(s).
path : str
specify where to save the resulted dictionnary.
Returns
-------
create a dictionnary with the name state_observation_dict in the specified path (if not specified then it will be saved in the same path as talaa_pos...).
That dictionnary contains P(w2|w0 tg0 w1 tg1 tg2) P(w2|w1 tg1 tg2) P(w2|tg2)
"""
tr_c = [] # w1t1 w2t2 w3t3
tr_n = [] # w1t1 w2t2 t3
bi_c = [] # w1t1 w2t2
bi_n = [] # w1t1 t2
un_c = [] # w1t1
un_n = [] # t1
for name in annotated_corpus_names:
talaa_pos_dic_phrase = load_obj("talaa_pos_dic_phrase_n_"+name, path_sources)
limit = len(talaa_pos_dic_phrase) * learning_percentage/100
phrase = 0
while phrase < limit:
tr_to = list(trigrams([""] + talaa_pos_dic_phrase[phrase][1]))
bi_to = list(bigrams([""] + talaa_pos_dic_phrase[phrase][1]))
un_to = talaa_pos_dic_phrase[phrase][1]
tr_tg = list(trigrams(talaa_pos_dic_phrase[phrase][2]))
bi_tg = list(bigrams(talaa_pos_dic_phrase[phrase][2]))
un_tg = talaa_pos_dic_phrase[phrase][2][1:]
i = 0
while i < len(tr_to):
tr_c.append((tr_to[i][0], tr_tg[i][0], tr_to[i][1], tr_tg[i][1], tr_to[i][2], tr_tg[i][2]))
tr_n.append((tr_to[i][0], tr_tg[i][0], tr_to[i][1], tr_tg[i][1], tr_tg[i][2]))
i += 1
i = 0
while i < len(bi_to):
bi_c.append((bi_to[i][0], bi_tg[i][0], bi_to[i][1], bi_tg[i][1]))
bi_n.append((bi_to[i][0], bi_tg[i][0], bi_tg[i][1]))
i += 1
i = 0
while i < len(un_to):
un_c.append((un_to[i], un_tg[i]))
un_n.append(un_tg[i])
i += 1
phrase += 1
fd_state_observation_tr_c = FreqDist(tr_c)
fd_state_observation_tr_n = FreqDist(tr_n)
fd_state_observation_bi_c = FreqDist(bi_c)
fd_state_observation_bi_n = FreqDist(bi_n)
fd_state_observation_un_c = FreqDist(un_c)
fd_state_observation_un_n = FreqDist(un_n)
state_observation_tr = []
for w1,t1,w2,t2,w3,t3 in fd_state_observation_tr_c.keys():
state_observation_tr.append(((w3,(w1,t1,w2,t2,t3)),fd_state_observation_tr_c[(w1,t1,w2,t2,w3,t3)]/fd_state_observation_tr_n[(w1,t1,w2,t2,t3)]))
state_observation_bi = []
for w1,t1,w2,t2 in fd_state_observation_bi_c.keys():
state_observation_bi.append(((w2,(w1,t1,t2)),fd_state_observation_bi_c[(w1,t1,w2,t2)]/fd_state_observation_bi_n[(w1,t1,t2)]))
state_observation_un = []
for w1,t1 in fd_state_observation_un_c.keys():
state_observation_un.append(((w1,t1),fd_state_observation_un_c[(w1,t1)]/fd_state_observation_un_n[t1]))
state_observation_dict = dict(state_observation_tr + state_observation_bi + state_observation_un)
if path:
save_obj(state_observation_dict, "state_observation_dict", path)
else:
save_obj(state_observation_dict, "state_observation_dict", path_sources)
#********************************************************
def viterbi(phrase, path_sources, return_list_of_tuples__token_tag = False):
"""
Parameters
----------
phrase : list
list of tokens.
path_sources : str
specify the path where state_observation_dict, transition_prob_dict and updated_pos_lexicon exist.
return_list_of_tuples__token_tag : boolean
Specify the type of result [tag1, tag2, ...] or [(token1,tag1), (token2,tag2), ...]
Returns
-------
list of tags
returns a list of tags of the correspond sentence.
"""
# input
state_observation_dict = load_obj("state_observation_dict", path_sources)
transition_prob_dict = load_obj("transition_prob_dict", path_sources)
updated_pos_lexicon = load_obj("updated_pos_lexicon", path_sources)
# initialization
## add of Start ("") to the phrase in input
words_sequence = [""] + phrase
## initialize the viterbi matrix
viterbi_matrix = { 0:[("NULL",1)] } # NULL <=> Start
w = 1
while w <= len(phrase):
viterbi_matrix[w] = []
for tg in updated_pos_lexicon[words_sequence[w]]:
viterbi_matrix[w].append((tg,0))
w += 1
## initialize the backtrace matrix
backtrace_matrix = {}
w = 1
while w <= len(phrase):
for tg in updated_pos_lexicon[words_sequence[w]]:
backtrace_matrix[(w,tg)] = ""
w += 1
# update both of viterbi and backtrace matrix
i = 1
while i <= len(phrase):
prob_transition_all_null = True
update = False
"""
tg2|tg0 tg1
w2|w0 tg0 w1 tg1 tg2
or ( i == 1 )
tg2|tg1
w2|w1 tg1 tg2
"""
tg_wi = 0
while tg_wi < len(viterbi_matrix[i]):
for tg1,value in viterbi_matrix[i-1]: # tg1 <=> tgi-1 value = viterbi[wi-1, tgi-1]
# calculate the new score
if value == 0: # test if viterbi_matrix[w-1][tgi-1] == 0 or not
continue
# P_trans we want p_trans(tg2|tg0 tg1) if not equal to 0, else p_trans(tg2|tg1)
tg2 = viterbi_matrix[i][tg_wi][0]
# now we don't have tg0
if i-1 > 0:
# in that case tg0 exist
tg0 = backtrace_matrix[(i-1,tg1)]
try:
P_trans = transition_prob_dict[(tg2,(tg0,tg1))]
except:
continue
else:
try:
P_trans = transition_prob_dict[(tg2,tg1)]
except:
# P_trans == 0
continue
prob_transition_all_null = False # P_trans != 0
# P_tag_observ we want P_tag_observ(w2|w0 tg0 w1 tg1 tg2) if != 0 , else P_tag_observ(w2|w1 tg1 tg2) if != 0, else P_tag_onbserv(w2|tg2)
w2 = words_sequence[i]
w1 = words_sequence[i-1]
if i-1 > 0:
# in that we already have tg0 from the previous step
w0 = words_sequence[i-2]
try:
P_tag_observ = state_observation_dict[(w2,(w0,tg0,w1,tg1,tg2))]
except:
continue
else:
try:
P_tag_observ = state_observation_dict[(w2,(w1,tg1,tg2))]
except:
continue
score = value * P_trans * P_tag_observ
# if score == 0:
# continue
if score > | |
<filename>utils/ERLplot_utils.py
#!/usr/bin/env python3
# Import modules
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib import ticker, gridspec
from deprecated import deprecated
import utils.ERLexperiment_utils as exp
import numpy as np
from scipy import ndimage
import seaborn as sns
from collections import Iterable
from utils.tile_coder import TileCoder
import matplotlib as mpl
import pickle
# TODO: Add documentation for plot_updates and plot_updates_
# TODO: Update action plots
# TODO: state_update_heatmap should do 2D heatmap of multi-dimensional
# states, just give the two dimensions to plot in a heatmap. same for the
# surface plot!!
# TODO: separate state_update_heatmap into two functions, one of which will
# simply take a vector of update counts and create a heatmap of it given the
# dimensions to reduce it to (e.g. reshape(100, 100))
# TODO: make _pendulum_ticks more general to label any number of ticks
# TODO: Plot mean with runs episodic
# TODO: Plot mean with stderr episodic for multiple data files
# TODO: Change action plot to histogram
# TODO: Make the state surface plot use either state updates or states visited
# Like the new scheme in state heatmap plot
# TODO: State surface plot should take similar params to heatmap
# TODO: Improve documentation
# TODO: plot_mean_with_runs() should plot avg rewards for continuing
# Global variables
CMAP = "rocket"
DEFAULT_COLOURS = list(sns.color_palette(CMAP, 6).as_hex())
plt.rcParams["axes.prop_cycle"] = mpl.cycler(color=sns.color_palette(CMAP))
# sns.set_theme(palette=CMAP)
OFFSET = 0 # The offset to start in DEFAULT_COLOURS
def updates(data, type_, ind, smooth_over, names, fig=None, ax=None,
figsize=(12, 6), last_ind=-1, colours=None, xlim=None, ylim=None,
env_type="continuing", label="", alpha=0.2, xscale="log",
yscale="linear", base=10, xlabel=None, ylabel=None,
runs=False, keep_shape=False, bootstrap=False, significance=0.1):
"""
Plots the performance vs number of samples used in updates
Parameters
----------
data : list of dict
The Python data dictionaries generated from running main.py for the
agents
type_ : str
Which type of data to plot, one of "eval" or "train"
ind : iter of iter of int
The list of lists of hyperparameter settings indices to plot for
each agent. For example [[1, 2], [3, 4]] means that the first agent
plots will use hyperparameter settings indices 1 and 2, while the
second will use 3 and 4.
smooth_over : list of int
The number of previous data points to smooth over for the agent's
plot for each data dictionary. Note that this is *not* the number of
timesteps to smooth over, but rather the number of data points to
smooth over. For example, if you save the return every 1,000
timesteps, then setting this value to 15 will smooth over the last
15 readings, or 15,000 timesteps. For example, [1, 2] will mean that
the plots using the first data dictionary will smooth over the past 1
data points, while the second will smooth over the passed 2 data
points for each hyperparameter setting.
names : list of str
The name of the agents, used for the legend
fig : plt.figure
The figure to plot on, by default None. If None, creates a new figure
ax : plt.Axes
The axis to plot on, by default None, If None, creates a new axis
figsize : tuple(int, int)
The size of the figure to plot
colours : list of list of str
The colours to use for each hyperparameter settings plot for each data
dictionary
xlim : float, optional
The x limit for the plot, by default None
ylim : float, optional
The y limit for the plot, by default None
env_type : str, optional
The type of environment, one of 'continuing', 'episodic'. By default
'continuing'
alpha : float, optional
The alpha channel for the plot, by default 0.1
xscale : str, optional
The scale at which to plot the x axis, by default "log"
yscale : str, optional
The scale at which to plot the y axis, by default "linear"
base : int, optional
The base of the scale of the x axis, by default 10
xlabel : str, optional
The label for the x axis, by default ""
ylabel : str, optional
The label for the y axis, by default ""
title : str, optional
The title of the plot, by default ""
runs : bool, optional
Whether or not to plot the individual runs or error bars, by default
False. If True, plots individual runs instead of error bars.
keep_shape : bool, optional
Whether the shape of the input data should be kept after smoothing, by
default False. If True, then the first smooth_over episodes are
smoothed and kept. If False, then the first smooth_over episodes are
replaced by their fully smoothed value.
bootstrap : bool, optional
Whether or plot standard error or bootstrap confidence intervals, by
default False. If True, bootstrap confidence intervals are plotted
instead of standard error.
significance : float, optional
The significance level for the confidence interval, by default 0.01
Returns
-------
plt.Figure, plt.Axes
The figure and axes plotted on
"""
if colours is None:
colours = _get_default_colours(ind)
# Setup figure
fig, ax = _setup_fig(fig, ax, figsize, None, xlim, ylim, xlabel, ylabel,
xscale, yscale, base)
legend_labels = []
for i in range(len(data)):
for j in range(len(ind[i])):
fig, ax = _updates(data[i], type_, ind[i][j], smooth_over[i],
fig, ax, figsize,
colours[i][j], xlim, ylim, env_type,
alpha, xscale, yscale, base, xlabel, ylabel,
runs, keep_shape, bootstrap, significance)
# Create legend label and lines
agent_name = names[i]
label = agent_name
legend_labels.append(label)
ax.legend(legend_labels)
return fig, ax
def _updates(data, type_, ind, smooth_over, fig=None, ax=None,
figsize=(12, 6), colour=None, xlim=None, ylim=None,
env_type="continuing", alpha=0.2, xscale="log",
yscale="linear", base=10, xlabel=None, ylabel=None, runs=False,
keep_shape=False, bootstrap=False, significance=0.1):
if colour is None:
colour = _get_default_colours([ind])[0]
# # Setup figure
# fig, ax = _setup_fig(fig, ax, figsize, None, xlim, ylim, xlabel, ylabel,
# xscale, yscale, base)
hyperparams = data["experiment_data"][ind]["agent_hyperparams"]
if "batch_size" in hyperparams:
batch_size = hyperparams["batch_size"]
else:
batch_size = 1
# Calculate updates to plot along x-axis
steps_per_episode = data["experiment"]["environment"]["steps_per_episode"]
total_steps = data["experiment"]["environment"]["total_timesteps"]
# Include the first eval -- before any updates have been done
start = 0
stop = (total_steps + steps_per_episode) * batch_size
step = steps_per_episode * batch_size
updates = np.arange(start, stop, step)
# Get the data to plot depending on the env type
if env_type == "continuing":
rewards, err, all_ = \
rewards_per_update_continuing(data, ind, smooth_over, type_,
keep_shape, bootstrap, significance)
else:
rewards, err, all_ = \
rewards_per_update_episodic(data, ind, smooth_over, updates, type_,
keep_shape, bootstrap, significance)
# If the original shape is not kept, then the updates need to be adjusted
# for smoothing
if not keep_shape:
updates = updates[:len(updates) - smooth_over + 1]
# Plot the mean
ax.step(updates, rewards, where="post", color=colour)
if not runs:
if not bootstrap:
# Standard error
ax.fill_between(updates, rewards-err, rewards+err, alpha=alpha,
step="post", color=colour)
else:
# Bootstrap confidence intervals
ax.fill_between(updates, err[0], err[1], alpha=alpha, step="post",
color=colour)
# Plot each run
if runs:
for i in range(all_.shape[0]):
ax.step(updates, all_[i, :], linestyle="--", alpha=alpha,
color=colour, where="post")
return fig, ax
def rewards_per_update_continuing(data, ind, smooth_over, type_="train",
keep_shape=False, bootstrap=False,
significance=0.1):
returns = []
for run in data["experiment_data"][ind]["runs"]:
if type_ == "train":
# The episodic returns for each run
run_returns = list(run["train_episode_rewards"])
# Plot the first eval episode, since this is the only data for
# 0 updates
first_ep = run["eval_episode_rewards"][0].Vmean()
run_returns.insert(0, first_ep)
returns.append(run_returns)
else:
returns.append(run["eval_episode_rewards"].mean(axis=-1))
# Get the steps per episode so we can plot average reward
steps_per_ep = data["experiment"]["environment"]["steps_per_episode"]
returns = np.array(returns) / steps_per_ep
returns = exp.smooth(returns, smooth_over, keep_shape=keep_shape)
# Calculate standard error
if not bootstrap:
runs = len(data["experiment_data"][ind]["runs"])
stderr = np.std(np.array(returns), axis=0) / np.sqrt(runs)
return returns.mean(axis=0), stderr, returns
# Calculate bootstrap confidence interval
else:
conf = exp.bootstrap_conf(returns, significance)
return returns.mean(axis=0), conf, returns
def rewards_per_update_episodic(data, ind, smooth_over, updates, type_="train",
keep_shape=False, bootstrap=False,
significance=0.1):
returns = []
for run in data["experiment_data"][ind]["runs"]:
if type_ == "train":
# Determine the size of batches used in updates
hyperparams = data["experiment_data"][ind]["agent_hyperparams"]
if "batch_size" in hyperparams:
batch_size = hyperparams["batch_size"]
else:
batch_size = 1
# Calculate the cumulative number of updates over all previous
# episodes
run_updates = np.cumsum(run["train_episode_steps"]) * batch_size
# For each cumulative number of updates plotted on the x-axis,
# get the performance of the episode with number of updates closest
# to this value
run_returns = []
for i in range(1, len(updates)):
ep_gt_lower_bound = | |
from datetime import datetime
import sys
sys.path.insert(1, r'C:\Users\ASUS\Desktop\sources\Telegram\werewolf\Darkhelper\2\V2\Databases')
from Databases.Groups import GroupsPlayersBase , GroupsBase , GroupsControlBase
from Databases.Groups.Bet import BetBase
from Databases.Users import AdminsBase
from Databases.Users.AfksBase import Set_All_Group_AFK_Zero
from Databases.Stats import AdminStatsBase , GroupStatsBase
from Classes.Statics import Statics
from Databases.Users.UsersBase import Show_Group_ALL_User_Points , Show_All_user_Points
from Databases.Users.ShekarsBase import Delete_Shekar
class Group:
def __init__(self,Chat_id : int):
Details=GroupsBase.Show_Group_Features(int(Chat_id))
self.All_Atrebeutes=Details
self.chat_id=int(Chat_id)
self.Main = int(Details['group_id'])
self.Support = int(Details['support_id'])
self.Subscription_Date=str(Details['tamdid_date'])
self.Deadline=int(Details['davazdah'])
self.Auto_Tag=int(Details['auto_tag'])
self.Auto_DeleteTag=int(Details['auto_del'])
self.Auto_Tag_Support=int(Details['auto_tag_sup'])
self.Auto_DeleteTag_Sup=int(Details['auto_del_sup'])
self.Alarm=int(Details['alarm'])
self.Bet=int(Details['bet'])
self.Least_State=int(Details['state'])
self.State_Lock=int(Details['state_lock'])
self.Warn=int(Details['warn'])
#--------------------------------------|
# 0 - onyx |
# 1 - werewolf |
# 2 - black |
self.Bot_Kind=int(Details['bot_kind'])#|
#--------------------------------------|
self.Mute_Fun=int(Details['fun_mute'])
self.Auto_nextGame=int(Details['auto_next_game'])
self.NextGame_Response=int(Details['next_game_response'])
self.emoji1=str(Details['emoji1'])
self.emoji2=str(Details['emoji2'])
self.emoji3=str(Details['emoji3'])
self.Sooti=int(Details['sooti'])
self.Admin_Alarm=int(Details['admin_Alarm'])
self.Ghaleb=str(Details['ghaleb'])
self.JoinTime_Alarm=int(Details['jointime_sup'])
self.Dead_NextGame=int(Details['dead_next'])
self.Shekar_Pin=int(Details['pin_shekar'])
self.Nazer_pin=int(Details['pin_nazer'])
self.List_Pin=int(Details['pin_list'])
self.Role_Saver=int(Details['role_saver'])
self.Questions=int(Details['questions'])
self.Bors=int(Details['bors'])
self.Message_State=int(Details['message_state'])
self.Next_Message_Id=int(Details['auto_next_message_id'])
self.is_Question_Sended=int(Details['question_sended'])
self.Auto_Start=int(Details['auto_start'])
self.Afk_Warn=int(Details['afk_warn'])
self.Is_Join_Time=int(Details['join_time'])
self.Is_Tagging=int(Details['is_tagging'])
self.Is_Time_For_Question=bool(Details['Its_Question_Time'])
self.Players_Lock_Only=int(Details['players_state_lock'])
#-----------------------------------------------------------
Controls=GroupsControlBase.Show_Group_Control_Features(self.Main)
self.All_Controls=Controls
self.Welcome_Turn=int(Controls['welcometurn'])
self.Anti_Spam=int(Controls['anti_spam'])
self.Anti_Robot=int(Controls['anti_robot'])
self.Anti_NFSW=int(Controls['fosh_filter'])
self.Anti_Tabchi=int(Controls['anti_tabchi'])
self.Channel =str(Controls['channel'])
self.Channel_Lock=int(Controls['channellock'])
self.Group_Lock=int(Controls['lock'])
self.Voice_Lock=int(Controls['voice_lock'])
self.Sticker_Lock=int(Controls['sticker_lock'])
self.Photo_Lock=int(Controls['photo_lock'])
self.Link_Lock=int(Controls['link_lock'])
self.Forward_Lock=int(Controls['forward_lock'])
self.Video_Lock=int(Controls['video_lock'])
self.Service_Lock=int(Controls['service_lock'])
self.Spam_Count=int(Controls['spam_count'])
self.Welcome=str(Controls['welcome'])
self.Channel_Text=str(Controls['channel_text'])
#-----------------------------------------porn
self.Porn=str(Controls['porn'])
#-----------------------------
Controls=Controls['Filters']
self.Porn_All_Filters=Controls
self.Porn_Dick_Filter=str(Controls['dick'])
self.Porn_Pussy_Filter=str(Controls['pussy'])
self.Porn_Coverd_Pussy_Filter=str(Controls['coveredpossy'])
self.Porn_FBoobs_Filter=str(Controls['fboobs'])
self.Porn_MBoobs_Filter=str(Controls['mboobs'])
self.Porn_CoveredBoobs_Filter=str(Controls['coveredboobs'])
self.Porn_Stomach_Filter=str(Controls['stomack'])
self.Porn_ZirBaghal_Filter=str(Controls['baghal'])
self.Porn_Ass_Filter=str(Controls['ass'])
self.Porn_Feet_Filter=str(Controls['feet'])
self.Porn_Covered_ASS_Filter=str(Controls['coveredass'])
#-----------------------------------------------------------------
@property
def All_Players(self):
return Show_All_user_Points()
@property
def All_Group_Players(self):
return Show_Group_ALL_User_Points(self.Main)
async def Get_Players_usernames(self,bot,lists):
for i in lists:
try:
user=await bot.get_users(i)
if user.username :
yield user.mention
except:pass
#-----------------------------------------------------------------
def __int__(self) -> int:
return int(self.Support)
def __str__(self) -> str:
return str(self.Main)
#-----------------------------------------------------------------
@property
def Show_Istagging(self):
return GroupsBase.Show_one_feature('is_tagging',self.chat_id)
@property
def Show_JoinTime(self):
return GroupsBase.Show_one_feature('join_time',self.chat_id)
@property
def Join_time_Started(self):
GroupsBase.Change_Group_Feature(self.Main , 'join_time' , 1)
return 1
@property
def Join_time_Finished(self):
GroupsBase.Change_Group_Feature(self.Main , 'join_time' , 0)
return 0
#-----------------------------------------------------------------
@property
def Show_All_Admins_Points(self):
return AdminStatsBase.Show_Gap_All_Admins_Points(self.Main)
@property
def Show_Today_Admins_Points(self):
return AdminStatsBase.Show_Gap_All_Admins_Points_Today(self.Main)
@property
def Admins(self):
admins=AdminsBase.Show_All_Admins(self.Main)
return [ admins , len(admins) ]
@property
def Show_Owner(self):
return int(AdminsBase.Show_Owner(self.Main))
#-----------------------------------------------------------------
@property
def Show_Emojis(self):
return [ self.emoji1 , self.emoji2 , self.emoji3 ]
@property
def Show_Welcome(self):
wel=self.Welcome
if wel == 'none':
return None
else:return wel
@property
def Show_Ghaleb(self):
ghlb=self.Ghaleb
if ghlb == 'none':
return None
else:return ghlb
@property
def Show_Channel(self):
chnl=GroupsControlBase.Show_Channel(self.Main)
if chnl == 'none':
return None
else:return chnl
@property
def Show_Next_Game_Text(self):
if self.Bot_Kind ==0:return ' /nextgame@OnyxWereBetaBot '
elif self.Bot_Kind ==1:return ' /nextgame@werewolfbot '
elif self.Bot_Kind ==2:return ' /nextgame@Blackwwrobot \n /nextgame@blackwerewolfbot '
#-----------------------------------------------------------------
def Turn_Welcome_Turn(self):
if self.Welcome_Turn:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'welcometurn' , x)
return x
def Turn_Covered_Ass_Filter_Lock(self):
if self.Porn_Covered_ASS_Filter:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'coveredass' , x)
return x
def Turn_Dick_Filter_Lock(self):
if self.Porn_Dick_Filter:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'dick' , x)
return x
def Turn_pussy_Filter_Lock(self):
if self.Porn_Pussy_Filter:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'pussy' , x)
return x
def Turn_CoveredPussy_Filter_Lock(self):
if self.Porn_CoveredBoobs_Filter:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'coveredpossy' , x)
return x
def Turn_FBoobs_Filter_Lock(self):
if self.Porn_FBoobs_Filter:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'fboobs' , x)
return x
def Turn_MBoobs_Filter_Lock(self):
if self.Porn_MBoobs_Filter:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'mboobs' , x)
return x
def Turn_Covers_Boobs_Filter_Lock(self):
if self.Porn_CoveredBoobs_Filter:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'coveredboobs' , x)
return x
def Turn_Stomach_Filter_Lock(self):
if self.Porn_Stomach_Filter:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'stomack' , x)
return x
def Turn_ZirBaghal_Filter_Lock(self):
if self.Porn_ZirBaghal_Filter:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'baghal' , x)
return x
def Turn_Ass_Filter_Lock(self):
if self.Porn_Ass_Filter:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'ass' , x)
return x
def Turn_Feet_Filter_Lock(self):
if self.Porn_Feet_Filter:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'feet' , x)
return x
#-----------------------------------------------------------------
def Turn_Video_Lock(self):
if self.Video_Lock:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'video_lock' , x)
return x
def Turn_Service_Lock(self):
if self.Service_Lock:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'service_lock' , x)
return x
def Turn_Voice_Lock(self):
if self.Voice_Lock:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'voice_lock' , x)
return x
def Turn_Sticker_Lock(self):
if self.Sticker_Lock:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'sticker_lock' , x)
return x
def Turn_Photo_Lock(self):
if self.Photo_Lock:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'photo_lock' , x)
return x
def Turn_Link_Lock(self):
if self.Link_Lock:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'link_lock' , x)
return x
def Turn_Forward_Lock(self):
if self.Forward_Lock:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'forward_lock' , x)
return x
def Set_Anti_Spam(self,x):
if self.Anti_Robot:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'anti_spam' , x)
return x
def Turn_Anti_Robot(self):
if self.Anti_Robot:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'anti_robot' , x)
return x
def Turn_Anti_Porn(self):
if self.Porn:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'porn' , x)
return x
def Turn_Anti_NFSW(self):
if self.Anti_NFSW:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'fosh_filter' , x)
return x
def Turn_Anti_Tabchi(self):
if self.Anti_Tabchi:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'anti_tabchi' , x)
return x
def Set_Channel(self , x):
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'channel' , x)
return x
def Set_Channel_text(self , x):
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'channel_text' , x)
return x
def Set_Welcome(self , x):
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'welcome' , x)
return x
def Set_Spam_Count(self , x):
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'spam_count' , x)
return x
def Turn_Channel_Lock(self):
if self.Channel_Lock:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'channellock' , x)
return x
def Turn_Lock(self):
if self.Group_Lock:
x=0
else:
x=1
GroupsControlBase.Change_Group_Control_Feature(self.Main , 'lock' , x)
return x
#--------------------------------------------------------------------------
def Change_Message_State(self):
if self.Message_State:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'message_state' , x)
return x
def Change_Bors(self):
if self.Bors:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'bors' , x)
return x
def Change_Questions(self):
if self.Questions:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'questions' , x)
return x
def Change_Role_Saver(self):
if self.Role_Saver:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'role_saver' , x)
return x
def Change_Nazer_pin(self):
if self.Nazer_pin:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'pin_nazer' , x)
return x
def Change_Shekar_Pin(self):
if self.Shekar_Pin:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'pin_shekar' , x)
return x
def Change_Dead_NextGame(self):
if self.Dead_NextGame:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'dead_next' , x)
return x
def Change_JoinTime_Alarm(self):
if self.JoinTime_Alarm:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'jointime_sup' , x)
return x
def Set_Ghaleb(self , x):
GroupsBase.Change_Group_Feature(self.Main , 'ghaleb' , x)
return x
def Set_Next_Message_Id(self , x):
GroupsBase.Change_Group_Feature(self.Main , 'auto_next_message_id' , x)
return x
def Change_Afk_Warn(self):
if self.Afk_Warn:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'afk_warn' , x)
return x
def Change_Admin_Alarm(self):
if self.Admin_Alarm:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'admin_Alarm' , x)
return x
def Change_Sooti(self):
if self.Sooti:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'sooti' , x)
return x
def Set_emoji1(self , x):
GroupsBase.Change_Group_Feature(self.Main , 'emoji1' , x)
return x
def Set_emoji2(self , x):
GroupsBase.Change_Group_Feature(self.Main , 'emoji2' , x)
return x
def Set_emoji3(self , x):
GroupsBase.Change_Group_Feature(self.Main , 'emoji3' , x)
return x
def Change_NextGame_Response(self):
if self.NextGame_Response:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'next_game_response' , x)
return x
def DeadLine_Ends(self):
GroupsBase.Change_Group_Feature(self.Main , 'davazdah' , 0)
return True
def Change_Auto_NextGame(self):
if self.Auto_nextGame:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'auto_next_game' , x)
return x
def Change_Mute_Fun(self):
if self.Mute_Fun:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'fun_mute' , x)
return x
def Change_Bot_Kind(self , x):
GroupsBase.Change_Group_Feature(self.Main , 'bot_kind' , x)
return x
def Set_Warn(self , x):
GroupsBase.Change_Group_Feature(self.Main , 'warn' , x)
return x
def Change_State_Lock(self,x):
GroupsBase.Change_Group_Feature(self.Main , 'state_lock' , x)
return x
def Set_State(self , x):
GroupsBase.Change_Group_Feature(self.Main , 'state' , x)
return x
def Change_Bet(self):
if self.Bet:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'bet' , x)
return x
def Change_Auto_Tag(self):
if self.Auto_Tag:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'auto_tag' , x)
return x
def Change_Auto_DeleteTag(self):
if self.Auto_DeleteTag:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'auto_del' , x)
return x
def Change_Auto_Tag_Support(self):
if self.Auto_Tag_Support:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'auto_tag_sup' , x)
return x
def Change_Auto_DeleteTag_Sup(self):
if self.Auto_DeleteTag_Sup:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'auto_del_sup' , x)
return x
def Change_Alarm(self):
if self.Alarm:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'alarm' , x)
return x
def Change_Auto_Start(self):
if self.Alarm:
x=0
else:
x=1
GroupsBase.Change_Group_Feature(self.Main , 'auto_start' , x)
return x
def Tag_Started(self):
GroupsBase.Change_Group_Feature(self.Main , 'is_tagging' , 1)
return
def Tag_Stopped(self):
GroupsBase.Change_Group_Feature(self.Main , 'is_tagging' , 0)
return
#------------------------------------------------------------------------|
def Manual_Control_Change(self,row,amnt): #|
if amnt: #|
x=0 #|
else: #|
x=1 #|
GroupsControlBase.Change_Group_Control_Feature(self.Main , row , x) #|
return x #|
#|
def Manual_Change(self,row,amnt): #|
if amnt: #|
x=0 #|
else: #|
x=1 #|
GroupsBase.Change_Group_Feature(self.Main , row , x) #|
return x #|
#------------------------------------------------------------------------|
def Reset_AFKS(self):
Set_All_Group_AFK_Zero( self.Main )
return True
def END_Bet(self , team : int ):
x=BetBase.win( team , self.Main )
return x
def Game_Started(self,hash,Join_Time,players):
'time,players,main,hour,afk,hash,date'
GroupStatsBase.Add_Game(Join_Time,players,self.Main,int((datetime.now()).hour),hash)
return True
def | |
'rho','sigma','tau2','beta'] # necessary params
if any(x not in param.keys() for x in lst_params):
raise ValueError('Missing parameter in param.\n Need {}.\n Got {} '.format(lst_params,list(param.keys())))
niter = param['niter']
eta_star = param['eta_star']
delta = param['delta']
tau = param['tau']
rho = param['rho']
sigma = param['sigma']
tau2 = param['tau2']
# === END check block ===
# Initialization
w_old = np.ones((d,k))
Z_old = np.ones((m,k))
mu_old = np.eye(k,k)
Ik = np.eye(k,k)
loss = np.zeros(niter)
# Main Block
for i in range(niter):
V = w_old + tau*np.matmul(X.T,Z_old)
# Nuclear constraint
L,S0,R = np.linalg.svd(V,full_matrices=False)
norm_nuclear = S0.sum()
vs1 = proj_l1ball(S0.reshape((-1,)),eta_star)
S1 = vs1.reshape(S0.shape)
w = np.matmul(L, S1[..., None] * R)
w = 2*w - w_old
mu_new = (mu_old + rho*tau2*Ik-tau2*np.matmul(Y.T,Z_old)) / (1 + tau2*rho)
mu = 2*mu_new - mu_old
Z = (Z_old + sigma*(np.matmul(Y,mu) - np.matmul(X,w))) / (1 + sigma*delta)
Z_new = np.maximum(np.minimum(Z,1),-1)
mu_old = mu_new
w_old = w
Z_old = Z_new
loss[i] = np.linalg.norm(np.matmul(Y,mu_new)-np.matmul(X,w),1) \
+ 0.5*(np.linalg.norm(Ik-mu_new,'fro')**2)
# End loop
Z = Z_old
mu = mu_new
nbGenes_fin,_ = nb_Genes(w)
loss = loss/loss[0]
return w,mu,nbGenes_fin,loss,Z
# ================================== Part 2 ====================================
# ===================== Base Launch functions (scripts) ========================
def basic_run_eta(func_algo, func_predict,
X,YR, k,
genenames=None,
clusternames=None,
niter=30,
rho=1,
tau=4,
beta=0.25,
delta=1.0,
eta = None,
eta_star = None,
gamma = 1,
nfold=4,
rng = 1,
showres=True,
keepfig = False,
saveres=False,
outputPath='../results/'):
'''
# =====================================================================
# Basic function to launch the algorithm of some specific parameters.
# - Input:
# - func_algo (necessary) : The function of the algorithm
# - func_predict (necessary) : The function to predict
# - X (necessary) : The data
# - YR (necessary) : The labels for the data
# - k (necessary) : The number of the clusters
#
# - genenames (optional) : The names of the features of the data
# if not given, it will be
# ['Gene 1','Gene 2',...]
#
# - clusternames (optional) : The clusternames of the data
# if not given, it will be
# ['Class 1', 'Class 2',...]
#
# - niter (optional) : The number of iterations
#
# - rho, tau, beta, delta, : The hyper-parameters for the algo
# eta, gamma, etc (optional)
#
# - nfold (optional) : The number of the folds of the cross validation
#
# - rng (optional) : The seed to control the random funcion
#
# - showres (optional) : Boolean value. True if we want to show
# the results, plot the figures etc.
#
# - saveres (optional) : Boolean value. True to save the results
#
# - outputPath (optional) : String value. The output path.
#
# - Output:
# - mu : The centroids
# - nbm : Number of genes
# - accG : Global accuracy
# - loss : Loss for each iterations
# - W_mean : Mean weight matrix for all folds
# - timeElapsed : Time elapsed for one fold
# - (And the tables) : df_topGenes, df_normW, df_topG_normW,
# df_topGenes_mean, df_normW_mean,
# df_topG_normW_mean, df_acctest
# ======================================================================
'''
np.random.seed(rng) # reproducible
if not os.path.exists(outputPath): # make the directory if it does not exist
os.makedirs(outputPath)
n,d = X.shape
# parameter checking
if genenames is None:
genenames = ['Gene {}'.format(i+1) for i in range(d)]
if clusternames is None:
clusternames = ['Class {}'.format(i+1) for i in range(k)]
# Normalize the mean of datas (Deprecated)
#m = np.mean(X,axis=0)
#X = X-m
#normX = normest(X)
#X = X/normX
#YR = np.array(YR).reshape(-1,1)
if YR.ndim==1: # In case that OneHotEncoder get 1D array and raise a TypeError
YR = YR.reshape(-1,1)
Y = OneHotEncoder(categories='auto').fit_transform(YR).toarray()
normY = normest(Y)
normY2 = normY**2
# Dropping the cells randomly if the n%d is not zero
# For more details please see instructions in drop_cells
X,YR = drop_cells(X,YR,nfold)
param = {}
param['niter'] = niter
param['rho'] = rho
param['tau'] = tau
tau2 = beta*(1/(np.sqrt(n)*normY))
param['tau2'] = tau2
eps = 1/(1 + tau2*rho*0.25)
sigma = 1.0/(tau + (tau2*eps*normY2))# Converge until 2.6 for L1Nel
param['sigma'] = sigma
param['delta'] = delta
param['beta']= beta
param['eta'] = eta
param['eta_star'] = eta_star
param['gamma'] = gamma
# Initialization
nbG = np.zeros(nfold,dtype=int) # Number of genes for each fold
accuracy_train = np.zeros((nfold,k+1))
accuracy_test = np.zeros((nfold,k+1))
W0 = np.zeros((d,k,nfold)) # w in each fold
mu0 = np.zeros((k,k,nfold))
W_mean = np.zeros((d,k))
#Z0 = np.zeros((int((nfold-1)*n/nfold),k,nfold))
#Z_mean = np.zeros((int((nfold-1)*n/nfold),k))
loss_iter0 = np.zeros((nfold,niter)) # loss for each iteration of each fold
# W_mean stores w for each eta, where w is the mean of W0 along its third axis
nbG = np.zeros(nfold)
# Parameters printing
print('\nStarts trainning for')
print('{:>6}:{:<6}'.format('niter',niter))
if 'fista' in func_algo.__name__.lower():
print('{:>6}:{:<6}'.format('eta',eta))
print('{:>6}:{:<6}'.format('gamma',delta))
elif 'or' in func_algo.__name__.lower():
print('{:>6}:{:<6}'.format('eta',eta))
print('{:>6}:{:<6}'.format('rho',rho))
print('{:>6}:{:<6}'.format('tau',tau))
print('{:>6}:{:<6}'.format('beta',beta))
print('{:>6}:{:<6}'.format('tau_mu',tau2))
print('{:>6}:{:<6}'.format('sigma',sigma))
print('{:>6}:{:<6}'.format('delta',delta))
print('{:>6}:{:<6}'.format('gamma',delta))
elif '_l2' in func_algo.__name__.lower():
print('{:>6}:{:<6}'.format('eta',eta))
print('{:>6}:{:<6}'.format('rho',rho))
print('{:>6}:{:<6}'.format('tau',tau))
print('{:>6}:{:<6}'.format('beta',beta))
print('{:>6}:{:<6}'.format('tau_mu',tau2))
print('{:>6}:{:<6}'.format('sigma',sigma))
elif 'nuclear' in func_algo.__name__.lower():
print('{:>6}:{:<6}'.format('eta_star',eta_star))
print('{:>6}:{:<6}'.format('rho',rho))
print('{:>6}:{:<6}'.format('tau',tau))
print('{:>6}:{:<6}'.format('beta',beta))
print('{:>6}:{:<6}'.format('tau_mu',tau2))
print('{:>6}:{:<6}'.format('sigma',sigma))
print('{:>6}:{:<6}'.format('delta',delta))
else:
print('{:>6}:{:<6}'.format('eta',eta))
print('{:>6}:{:<6}'.format('rho',rho))
print('{:>6}:{:<6}'.format('tau',tau))
print('{:>6}:{:<6}'.format('beta',beta))
print('{:>6}:{:<6}'.format('tau_mu',tau2))
print('{:>6}:{:<6}'.format('sigma',sigma))
print('{:>6}:{:<6}'.format('delta',delta))
Y_PDS = np.zeros(YR.shape)
meanclassi = np.zeros(nfold)
kf = KFold(n_splits=nfold,random_state=rng,shuffle=True)
w_all,mu_all,nbGenes_all,loss_all = func_algo(X,YR,k,param)[0:4]
for i,(train_ind, test_ind) in enumerate(kf.split(YR)):
print('{:-<30}'.format(''))
print('{message:^6} {f1} / {f2}'.format(message='fold',f1=i+1,f2=nfold))
print('-> {} classification...'.format(func_algo.__name__))
# ========== Training =========
Xtrain = X[train_ind]
Xtest = X[test_ind]
Ytrain = YR[train_ind]
Ytest = YR[test_ind]
startTime = time.perf_counter()
w,mu,nbGenes,loss = func_algo(Xtrain,Ytrain,k,param)[0:4]
endTime = time.perf_counter()
timeElapsed = endTime - startTime
print('-> Completed.\n-> Time Elapsed:{:.4}s'.format(timeElapsed))
W0[:,:,i] = w
mu0[:,:,i] = mu
#Z0[:,:,i] = Z
loss_iter0[i,:] = loss
# ========== Accuracy =========
Ytrain_pred = func_predict(Xtrain,w,mu)
Ytest_pred = func_predict(Xtest,w,mu)
accuracy_train[i,0],accuracy_train[i,1:k+1] = compute_accuracy(Ytrain,Ytrain_pred,k)
accuracy_test[i,0],accuracy_test[i,1:k+1] = compute_accuracy(Ytest,Ytest_pred,k)
meanclassi[i] = np.mean(accuracy_test[i,1:k+1])
nbG[i] = nbGenes
Y_PDS[test_ind] = Ytest_pred
print('{:-<30}'.format(''))
# end kfold loop
nbm = int(nbG.mean())
accG = np.mean(accuracy_test[:,0],axis=0)
Meanclass = meanclassi.mean()
W_mean = np.mean(W0,axis=2)
mu_mean = np.mean(mu0,axis=2)
#Z_mean= np.mean(Z0,axis=2)
normfro = np.linalg.norm(w,'fro')
print('Training step ends.\n')
# Class size
Ctab = []
size_class = np.zeros(k) # Size of each class (real)
size_class_est = np.zeros(k) # Size of each class (estimated)
for j in range(k):
size_class[j] = (YR==(j+1)).sum()
size_class_est[j] = (Y_PDS==(j+1)).sum()
Ctab.append('Class {}'.format(j+1))
df_szclass = pd.DataFrame(size_class,index=Ctab,columns=['Class Size'])
df_szclass_est = pd.DataFrame(size_class_est,index=Ctab,columns=['Class Size'])
# Data accuracy
accuracy_train = np.vstack((accuracy_train,np.mean(accuracy_train,axis=0)))
accuracy_test = np.vstack((accuracy_test,np.mean(accuracy_test,axis=0)))
ind_df = []
for i_fold in range(nfold):
ind_df.append('Fold {}'.format(i_fold+1))
ind_df.append('Mean')
columns = ['Global']
if clusternames is None:
columns += Ctab
else:
columns += clusternames
df_accTrain = pd.DataFrame(accuracy_train,index=ind_df,columns=columns)
df_acctest = pd.DataFrame(accuracy_test,index=ind_df,columns=columns)
# Feature selection
print('Selecting features from whole dataset...',end='')
w,mu,nbGenes,loss = func_algo(X,YR,k,param)[0:4]
topGenes,normW = select_feature_w(w,genenames)
topGenes_mean,normW_mean = select_feature_w(W_mean,genenames)
# Mean of each fold
df_topGenes_mean = pd.DataFrame(topGenes_mean,columns=clusternames)
df_normW_mean = pd.DataFrame(normW_mean,columns=clusternames)
df_topG_normW_mean = merge_topGene_norm(topGenes_mean,normW_mean,clusternames)
# All data
df_topGenes = pd.DataFrame(topGenes,columns=clusternames)
df_normW=pd.DataFrame(normW,columns=clusternames)
df_topG_normW = merge_topGene_norm(topGenes,normW,clusternames)
print('Completed.\n')
# Two heatmaps
M_heatmap_classification = heatmap_classification(Y_PDS,YR,clusternames,rotate=60)
M_heatmap_signature = heatmap_normW(normW,clusternames,nbr_l=30,rotate=60)
# Results
if showres == True:
print('Size class (real):')
print(df_szclass)
print('\nSize class (estimated):')
print(df_szclass_est)
print('\nAccuracy Train')
print(df_accTrain)
print('\nAccuracy Test')
print(df_acctest)
if keepfig == False:
plt.close("all")
fig_lossIter = plt.figure(figsize=(8,6))
plt.plot(np.arange(niter,dtype=int)+1,loss)
msg_eta = '$\eta$:%d'%eta if eta is not None else ''
msg_etaS = '$\eta*$:%d'%eta_star if eta_star is not None else ''
plt.title('loss for each iteration {} {}\n ({})'.format(msg_eta,msg_etaS,func_algo.__name__),fontsize=18)
plt.ylabel('Loss',fontsize=18)
plt.xlabel('Iteration',fontsize=18)
plt.xticks(np.linspace(1,niter,num=6,endpoint=True,dtype=int))
plt.xlim(left=1,right=niter)
plt.ylim((0,1))
# Saving Result
if saveres==True:
# define two nametags
nametag_eta = '_eta-%d'%eta if eta is not None else ''
nametag_etaS = '_etaStar-%d'%eta_star if eta_star is not None else ''
# save loss
filename_loss = 'loss_{}_beta-{}_delta-{}{}{}_niter-{}.txt'.format(func_algo.__name__,beta,delta, nametag_eta,nametag_etaS,niter)
np.savetxt(outputPath + filename_loss,loss)
# define function name tag for two heatmaps
func_tag = func_algo.__name__ + nametag_eta + nametag_etaS
# Save heatmaps
filename_heat = '{}{}_Heatmap_of_confusion_Matrix.npy'.format(outputPath,func_tag)
np.save(filename_heat,M_heatmap_classification)
filename_heat = '{}{}_Heatmap_of_signature_Matrix.npy'.format(outputPath,func_tag)
np.save(filename_heat,M_heatmap_signature)
df_acctest.to_csv('{}{}{}{}_AccuracyTest.csv'.format(outputPath,func_algo.__name__,nametag_eta,nametag_etaS),sep=';')
df_topG_normW.to_csv('{}{}{}{}_TopGenesAndNormW.csv'.format(outputPath,func_algo.__name__,nametag_eta,nametag_etaS),sep=';')
# Other possiblilities to save
# fig_lossIter.savefig('{}{}{}{}_niter-{}_loss_iters.png'.format(outputPath,func_algo.__name__,nametag_eta,nametag_etaS,niter))
# All data
#df_topGenes.to_csv('{}{}_TopGenes.csv'.format(outputPath,func_algo.__name__),sep=';')
#df_normW.to_csv('{}{}_NormW.csv'.format(outputPath,func_algo.__name__),sep=';')
# Mean of each fold
#df_topGenes_mean.to_csv('{}{}_TopGenes_mean.csv'.format(outputPath,func_algo.__name__),sep=';')
#df_normW_mean.to_csv('{}{}_NormW_mean.csv'.format(outputPath,func_algo.__name__),sep=';')
#df_topG_normW_mean.to_csv('{}{}_TopGenesAndNormW_mean.csv'.format(outputPath,func_algo.__name__),sep=';')
return mu_mean, nbm, accG, loss, W_mean, timeElapsed, \
df_topGenes, df_normW, df_topG_normW, \
df_topGenes_mean, df_normW_mean, df_topG_normW_mean,\
df_acctest,w_all
# ===================== ========================================================
def getPredLabel(Ypred):
for i in range(Ypred.shape[0]):
if(Ypred[i]>1.5):
Ypred[i]=2
if(Ypred[i]<=1.5):
Ypred[i]=1
return Ypred
# =====================Functions used to compare different algorithms========================================================
def getCoefs(alg, model):
if (alg == 'RF'):
coef = model.feature_importances_
if (alg == 'svm'):
coef = model.coef_.transpose()
if (alg == 'plsda'):
coef | |
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Parts of this file are based upon xmlrpclib.py, the XML-RPC client
# interface included in the Python distribution.
#
# Copyright (c) 1999-2002 by Secret Labs AB
# Copyright (c) 1999-2002 by <NAME>
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
"""
A fake XenAPI SDK.
"""
import base64
import pickle
import random
import uuid
from xml.sax import saxutils
import zlib
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from oslo_utils import units
import six
from nova import exception
from nova.i18n import _
from nova.virt.xenapi.client import session as xenapi_session
_CLASSES = ['host', 'network', 'session', 'pool', 'SR', 'VBD',
'PBD', 'VDI', 'VIF', 'PIF', 'VM', 'VLAN', 'task']
_db_content = {}
LOG = logging.getLogger(__name__)
def reset():
for c in _CLASSES:
_db_content[c] = {}
host = create_host('fake')
create_vm('fake dom 0',
'Running',
is_a_template=False,
is_control_domain=True,
resident_on=host)
def reset_table(table):
if table not in _CLASSES:
return
_db_content[table] = {}
def _create_pool(name_label):
return _create_object('pool',
{'name_label': name_label})
def create_host(name_label, hostname='fake_name', address='fake_addr'):
host_ref = _create_object('host',
{'name_label': name_label,
'hostname': hostname,
'address': address})
host_default_sr_ref = _create_local_srs(host_ref)
_create_local_pif(host_ref)
# Create a pool if we don't have one already
if len(_db_content['pool']) == 0:
pool_ref = _create_pool('')
_db_content['pool'][pool_ref]['master'] = host_ref
_db_content['pool'][pool_ref]['default-SR'] = host_default_sr_ref
_db_content['pool'][pool_ref]['suspend-image-SR'] = host_default_sr_ref
def create_network(name_label, bridge):
return _create_object('network',
{'name_label': name_label,
'bridge': bridge})
def create_vm(name_label, status, **kwargs):
if status == 'Running':
domid = random.randrange(1, 1 << 16)
resident_on = list(_db_content['host'])[0]
else:
domid = -1
resident_on = ''
vm_rec = kwargs.copy()
vm_rec.update({'name_label': name_label,
'domid': domid,
'power_state': status,
'blocked_operations': {},
'resident_on': resident_on})
vm_ref = _create_object('VM', vm_rec)
after_VM_create(vm_ref, vm_rec)
return vm_ref
def destroy_vm(vm_ref):
vm_rec = _db_content['VM'][vm_ref]
vbd_refs = vm_rec['VBDs']
# NOTE(johannes): Shallow copy since destroy_vbd will remove itself
# from the list
for vbd_ref in vbd_refs[:]:
destroy_vbd(vbd_ref)
del _db_content['VM'][vm_ref]
def destroy_vbd(vbd_ref):
vbd_rec = _db_content['VBD'][vbd_ref]
vm_ref = vbd_rec['VM']
vm_rec = _db_content['VM'][vm_ref]
vm_rec['VBDs'].remove(vbd_ref)
vdi_ref = vbd_rec['VDI']
vdi_rec = _db_content['VDI'][vdi_ref]
vdi_rec['VBDs'].remove(vbd_ref)
del _db_content['VBD'][vbd_ref]
def destroy_vdi(vdi_ref):
vdi_rec = _db_content['VDI'][vdi_ref]
vbd_refs = vdi_rec['VBDs']
# NOTE(johannes): Shallow copy since destroy_vbd will remove itself
# from the list
for vbd_ref in vbd_refs[:]:
destroy_vbd(vbd_ref)
del _db_content['VDI'][vdi_ref]
def create_vdi(name_label, sr_ref, **kwargs):
vdi_rec = {
'SR': sr_ref,
'read_only': False,
'type': '',
'name_label': name_label,
'name_description': '',
'sharable': False,
'other_config': {},
'location': '',
'xenstore_data': {},
'sm_config': {'vhd-parent': None},
'physical_utilisation': '123',
'managed': True,
}
vdi_rec.update(kwargs)
vdi_ref = _create_object('VDI', vdi_rec)
after_VDI_create(vdi_ref, vdi_rec)
return vdi_ref
def after_VDI_create(vdi_ref, vdi_rec):
vdi_rec.setdefault('VBDs', [])
def create_vbd(vm_ref, vdi_ref, userdevice=0, other_config=None):
if other_config is None:
other_config = {}
vbd_rec = {'VM': vm_ref,
'VDI': vdi_ref,
'userdevice': str(userdevice),
'currently_attached': False,
'other_config': other_config}
vbd_ref = _create_object('VBD', vbd_rec)
after_VBD_create(vbd_ref, vbd_rec)
return vbd_ref
def after_VBD_create(vbd_ref, vbd_rec):
"""Create read-only fields and backref from VM and VDI to VBD when VBD
is created.
"""
vbd_rec['currently_attached'] = False
vbd_rec['device'] = ''
vbd_rec.setdefault('other_config', {})
vm_ref = vbd_rec['VM']
vm_rec = _db_content['VM'][vm_ref]
vm_rec['VBDs'].append(vbd_ref)
vm_name_label = _db_content['VM'][vm_ref]['name_label']
vbd_rec['vm_name_label'] = vm_name_label
vdi_ref = vbd_rec['VDI']
if vdi_ref and vdi_ref != "OpaqueRef:NULL":
vdi_rec = _db_content['VDI'][vdi_ref]
vdi_rec['VBDs'].append(vbd_ref)
def after_VIF_create(vif_ref, vif_rec):
"""Create backref from VM to VIF when VIF is created.
"""
vm_ref = vif_rec['VM']
vm_rec = _db_content['VM'][vm_ref]
vm_rec['VIFs'].append(vif_ref)
def after_VM_create(vm_ref, vm_rec):
"""Create read-only fields in the VM record."""
vm_rec.setdefault('domid', -1)
vm_rec.setdefault('is_control_domain', False)
vm_rec.setdefault('is_a_template', False)
vm_rec.setdefault('memory_static_max', str(8 * units.Gi))
vm_rec.setdefault('memory_dynamic_max', str(8 * units.Gi))
vm_rec.setdefault('VCPUs_max', str(4))
vm_rec.setdefault('VBDs', [])
vm_rec.setdefault('VIFs', [])
vm_rec.setdefault('resident_on', '')
def create_pbd(host_ref, sr_ref, attached):
config = {'path': '/var/run/sr-mount/%s' % sr_ref}
return _create_object('PBD',
{'device_config': config,
'host': host_ref,
'SR': sr_ref,
'currently_attached': attached})
def create_task(name_label):
return _create_object('task',
{'name_label': name_label,
'status': 'pending'})
def _create_local_srs(host_ref):
"""Create an SR that looks like the one created on the local disk by
default by the XenServer installer. Also, fake the installation of
an ISO SR.
"""
create_sr(name_label='Local storage ISO',
type='iso',
other_config={'i18n-original-value-name_label':
'Local storage ISO',
'i18n-key': 'local-storage-iso'},
physical_size=80000,
physical_utilisation=40000,
virtual_allocation=80000,
host_ref=host_ref)
return create_sr(name_label='Local storage',
type='ext',
other_config={'i18n-original-value-name_label':
'Local storage',
'i18n-key': 'local-storage'},
physical_size=40000,
physical_utilisation=20000,
virtual_allocation=10000,
host_ref=host_ref)
def create_sr(**kwargs):
sr_ref = _create_object(
'SR',
{'name_label': kwargs.get('name_label'),
'type': kwargs.get('type'),
'content_type': kwargs.get('type', 'user'),
'shared': kwargs.get('shared', False),
'physical_size': kwargs.get('physical_size', str(1 << 30)),
'physical_utilisation': str(
kwargs.get('physical_utilisation', 0)),
'virtual_allocation': str(kwargs.get('virtual_allocation', 0)),
'other_config': kwargs.get('other_config', {}),
'VDIs': kwargs.get('VDIs', [])})
pbd_ref = create_pbd(kwargs.get('host_ref'), sr_ref, True)
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
return sr_ref
def _create_local_pif(host_ref):
pif_ref = _create_object('PIF',
{'name-label': 'Fake PIF',
'MAC': '00:11:22:33:44:55',
'physical': True,
'VLAN': -1,
'device': 'fake0',
'host_uuid': host_ref,
'network': '',
'IP': '10.1.1.1',
'IPv6': '',
'uuid': '',
'management': 'true'})
_db_content['PIF'][pif_ref]['uuid'] = pif_ref
return pif_ref
def _create_object(table, obj):
ref = str(uuid.uuid4())
obj['uuid'] = str(uuid.uuid4())
_db_content[table][ref] = obj
return ref
def _create_sr(table, obj):
sr_type = obj[6]
# Forces fake to support iscsi only
if sr_type != 'iscsi' and sr_type != 'nfs':
raise Failure(['SR_UNKNOWN_DRIVER', sr_type])
host_ref = list(_db_content['host'])[0]
sr_ref = _create_object(table, obj[2])
if sr_type == 'iscsi':
vdi_ref = create_vdi('', sr_ref)
pbd_ref = create_pbd(host_ref, sr_ref, True)
_db_content['SR'][sr_ref]['VDIs'] = [vdi_ref]
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
_db_content['VDI'][vdi_ref]['SR'] = sr_ref
_db_content['PBD'][pbd_ref]['SR'] = sr_ref
return sr_ref
def _create_vlan(pif_ref, vlan_num, network_ref):
pif_rec = get_record('PIF', pif_ref)
vlan_pif_ref = _create_object('PIF',
{'name-label': 'Fake VLAN PIF',
'MAC': '00:11:22:33:44:55',
'physical': True,
'VLAN': vlan_num,
'device': pif_rec['device'],
'host_uuid': pif_rec['host_uuid']})
return _create_object('VLAN',
{'tagged-pif': pif_ref,
'untagged-pif': vlan_pif_ref,
'tag': vlan_num})
def get_all(table):
return list(_db_content[table].keys())
def get_all_records(table):
return _db_content[table]
def _query_matches(record, query):
# Simple support for the XenServer query language:
# 'field "host"="<uuid>" and field "SR"="<sr uuid>"'
# Tested through existing tests (e.g. calls to find_network_with_bridge)
and_clauses = query.split(" and ")
if len(and_clauses) > 1:
matches = True
for clause in and_clauses:
matches = matches and _query_matches(record, clause)
return matches
or_clauses = query.split(" or ")
if len(or_clauses) > 1:
matches = False
for clause in or_clauses:
matches = matches or _query_matches(record, clause)
return matches
if query.startswith('not '):
return not _query_matches(record, query[4:])
# Now it must be a single field - bad queries never match
if not query.startswith('field'):
return False
(field, value) = query[6:].split('=', 1)
# Some fields (e.g. name_label, memory_overhead) have double
# underscores in the DB, but only single underscores when querying
field = field.replace("__", "_").strip(" \"'")
value = value.strip(" \"'")
# Strings should be directly compared
if isinstance(record[field], str):
return record[field] == value
# But for all other value-checks, convert to a string first
# (Notably used for booleans - which can be lower or camel
# case and are interpreted/sanitised by XAPI)
return str(record[field]).lower() == value.lower()
def get_all_records_where(table_name, query):
matching_records = {}
table = _db_content[table_name]
for record in table:
if _query_matches(table[record], query):
matching_records[record] = table[record]
return matching_records
def get_record(table, ref):
if ref in _db_content[table]:
return _db_content[table].get(ref)
else:
raise Failure(['HANDLE_INVALID', table, ref])
def check_for_session_leaks():
if len(_db_content['session']) > 0:
raise exception.NovaException('Sessions have leaked: %s' %
_db_content['session'])
def as_value(s):
"""Helper function for simulating XenAPI plugin responses. It
escapes and wraps the given argument.
"""
return '<value>%s</value>' % saxutils.escape(s)
def as_json(*args, **kwargs):
"""Helper function for simulating XenAPI plugin responses for those
that are returning JSON. If this function is given plain arguments,
then these are rendered as a JSON list. If it's given keyword
arguments then these are rendered as | |
# DO NOT MODIFY THIS FILE
# Run me via: python3 -m unittest test_max_heap
import unittest
import time
import random
from max_heap import MaxHeap
class TestMaxHeap(unittest.TestCase):
"""
Initialization
"""
def test_instantiation(self):
"""
A MaxHeap exists.
"""
try:
MaxHeap()
except NameError:
self.fail("Could not instantiate MaxHeap.")
"""
A heap stores its data in an array, such as a Python list.
"""
def test_internal_data(self):
"""
A MaxHeap uses an array (a dynamic array / Python list) to store its data.
"""
h = MaxHeap()
self.assertEqual(list, type(h._data))
self.assertEqual(0, len(h._data))
"""
Size is the number of items in the heap.
"""
def test_size_initial(self):
"""
The _size() of a new heap is 0.
"""
h = MaxHeap()
self.assertEqual(0, h._size())
def test_size_data(self):
"""
The _size() of a heap is equal to the number of values in its list.
"""
h = MaxHeap()
h._data.append('fake')
self.assertEqual(1, h._size())
h._data.append('fake')
self.assertEqual(2, h._size())
h._data.pop()
self.assertEqual(1, h._size())
"""
Emptiness. A warm-up. Good to know, and a handy abstraction that you might
use elsewhere.
"""
def test_empty_initial(self):
"""
A new heap is empty.
Hint: _size is a convenient abstraction, and helps avoid repetitive code.
"""
h = MaxHeap()
self.assertTrue(h._is_empty())
def test_not_empty(self):
"""
A heap is not empty if there are items in its data list.
"""
h = MaxHeap()
h._data.append('fake')
self.assertFalse(h._is_empty())
h._data.append('fake')
self.assertFalse(h._is_empty())
def test_empty(self):
"""
A heap with no items in its data list is empty.
"""
h = MaxHeap()
h._data.append('fake')
h._data.append('fake')
h._data = []
self.assertTrue(h._is_empty())
"""
Last index. The index of the last element in the heap.
Later, when deleting from a heap, the first step in the deletion algorithm
moves the last element to the root position. So this will be handy.
"""
def test_last_index_initial(self):
"""
The 'last index' of an empty heap happens to be -1.
Hint: Easy to calculate if you know its size.
"""
h = MaxHeap()
self.assertEqual(-1, h._last_index())
def test_last_index_one(self):
"""
The last index of a heap with one element is 0.
Hint: Easy, if you know how to determine the last index of a list.
"""
h = MaxHeap()
h._data.append('fake')
self.assertEqual(0, h._last_index())
def test_last_index_two(self):
"""
The last index of a heap with two elements is 1.
"""
h = MaxHeap()
h._data.append('fake')
h._data.append('fake')
self.assertEqual(1, h._last_index())
def test_last_index_42(self):
"""
The last index of a heap with forty-two elements is 41.
"""
h = MaxHeap()
for _ in range(42):
h._data.append('fake')
self.assertEqual(41, h._last_index())
"""
Value at an index. It's handy to grab a value at a particular index, so lets
encapsulate this work into a method.
"""
def test_value_at_zero(self):
"""
The value at index 0 is the value of the 0th item in the heap's data list.
"""
h = MaxHeap()
value = fake_value()
h._data.append(value)
self.assertEqual(value, h._value_at(0))
def test_value_at(self):
"""
The value at index i is the value of the i'th item in the heap's data list.
"""
h = MaxHeap()
value = fake_value()
h._data.append(value)
self.assertEqual(value, h._value_at(0))
value = fake_value()
h._data.append(value)
self.assertEqual(value, h._value_at(1))
for i in range(2, 9):
value = fake_value()
h._data.append(value)
self.assertEqual(value, h._value_at(i))
def test_value_at_invalid_index(self):
"""
_value_at raises an IndexError when the index is out of bounds.
"""
h = MaxHeap()
self.assertRaises(IndexError, h._value_at, 0)
h._data.append('fake')
self.assertRaises(IndexError, h._value_at, 1)
self.assertRaises(IndexError, h._value_at, 2)
"""
Indexes of left child, right child, and parent.
A heap stores values linearly in a list, but it's really a tree. The root is
at 0, and its left child is at index 1, and its right child is at index 2.
The element at index 1 has a left child at index 3, and a right at index 4.
The element at index 2 has a left child at index 5, and a right at index 6.
What's the formula for this?
Hint: Draw it out.
"""
# def test_left_child_index(self):
# """
# An element at index i has a left child at index ____.
# Hint: Know how the heap works. Look up and study the concept.
# """
# h = MaxHeap()
# # This method just calculates the index. It doesn't care about the data.
# self.assertEqual(1, h._left_child_index(0))
# self.assertEqual(3, h._left_child_index(1))
# self.assertEqual(5, h._left_child_index(2))
# self.assertEqual(7, h._left_child_index(3))
# self.assertEqual(8675309, h._left_child_index(4337654))
# def test_right_child_index(self):
# """
# An element at index i has a right child at index ____.
# Hint: Know how the heap works. Look up and study the concept.
# """
# h = MaxHeap()
# # This method just calculates the index. It doesn't care about the data.
# self.assertEqual(2, h._right_child_index(0))
# self.assertEqual(4, h._right_child_index(1))
# self.assertEqual(6, h._right_child_index(2))
# self.assertEqual(8, h._right_child_index(3))
# self.assertEqual(5446, h._right_child_index(2722))
# def test_parent_index(self):
# """
# An element at index i has a parent at index ___.
# Hints: Work this out instead of looking it up. Draw it.
# And, use integer division for natural flooring.
# Watch your order of operations.
# """
# h = MaxHeap()
# # This first one is nonsense, but is here for completeness.
# self.assertEqual(-1, h._parent_index(0))
# # The root's left child is at 1, so its parent is at index 0.
# self.assertEqual(0, h._parent_index(1))
# # The root's right child is at 2, so its parent is at index 0.
# self.assertEqual(0, h._parent_index(2))
# self.assertEqual(1, h._parent_index(3))
# self.assertEqual(1, h._parent_index(4))
# self.assertEqual(2, h._parent_index(5))
# self.assertEqual(2, h._parent_index(6))
# self.assertEqual(3, h._parent_index(7))
# self.assertEqual(4337654, h._parent_index(8675309))
# self.assertEqual(2722, h._parent_index(5446))
"""
Left child, right child, and parent _values_.
Now that we know that calculating the left, right and parent indexes given
any element's index, retrieving the values there is easy.
Hint: Use your previous abstractions. Don't repeat yourself.
"""
# def test_parent(self):
# """
# Given an index i, the parent is the value at the 'parent index' of i.
# Hint: The phrase above is nearly identical to the code, if you use your
# abstractions.
# """
# h = MaxHeap()
# fake_root = fake_value()
# fake_left_child = fake_value()
# fake_right_child = fake_value()
# fake_left_left_child = fake_value()
# fake_left_right_child = fake_value()
# h._data.append(fake_root)
# h._data.append(fake_left_child)
# h._data.append(fake_right_child)
# h._data.append(fake_left_left_child)
# h._data.append(fake_left_right_child)
# self.assertEqual(fake_root, h._parent(1))
# self.assertEqual(fake_root, h._parent(2))
# self.assertEqual(fake_left_child, h._parent(3))
# self.assertEqual(fake_left_child, h._parent(4))
# def test_parent_invalid(self):
# """
# Retrieving the parent value for an index without a parent is invalid.
# """
# h = MaxHeap()
# self.assertRaises(IndexError, h._parent, 0)
# self.assertRaises(IndexError, h._parent, 1)
# self.assertRaises(IndexError, h._parent, 2)
# h._data.append('fake')
# try:
# h._parent(1)
# h._parent(2)
# except IndexError:
# self.fail("Could not retrieve parent properly.")
# for i in range(3, 9):
# self.assertRaises(IndexError, h._parent, i)
# def test_left_child_none(self):
# """
# If the 'left child index' of an element at index i exceeds the bounds of
# the data list, just return None.
# Hint: Draw both a 5-element array and tree. What is the value of the left
# child of the third (index 2) element? And the fourth? And the fifth?
# """
# h = MaxHeap()
# h._data.append('fake')
# h._data.append('fake')
# h._data.append('fake')
# self.assertIsNone(h._left_child(1))
# self.assertIsNone(h._left_child(2))
# h._data.append('fake')
# h._data.append('fake')
# self.assertIsNone(h._left_child(2))
# self.assertIsNone(h._left_child(3))
# self.assertIsNone(h._left_child(4))
# def test_left_child(self):
# """
# Given an index i, the left child is the value at the 'left child index'
# of i.
# Hint: The phrase above is nearly identical to the code, if you use your
# abstractions.
# """
# h = MaxHeap()
# fake_root = fake_value()
# fake_left_child = fake_value()
# fake_right_child = fake_value()
# fake_left_left_child = fake_value()
# fake_left_right_child = fake_value()
# h._data.append(fake_root)
# h._data.append(fake_left_child)
# h._data.append(fake_right_child)
# h._data.append(fake_left_left_child)
# h._data.append(fake_left_right_child)
# self.assertEqual(fake_left_child, h._left_child(0))
# self.assertEqual(fake_left_left_child, h._left_child(1))
# self.assertIsNone(h._left_child(2))
# self.assertIsNone(h._left_child(3))
# self.assertIsNone(h._left_child(4))
# def test_right_child_none(self):
# """
# If the 'right child index' of an element at index i exceeds the bounds of
# the data list, just return None.
# Hint: Draw both a 5-element array and tree. What is the value of the right
# child of the third (index 2) element? And the fourth? And the fifth?
# """
# h = MaxHeap()
# h._data.append('fake')
# h._data.append('fake')
# | |
def setup(self):
self.output.string.set(str(self.args.s1) + str(self.args.s2))
# Build the pipeline
ppl = Pipeline()
task1 = AppendString("Append 1","This ","is ")
task2 = AppendString("Append 2",task1.output.string,"the full string")
ppl.add_task(task2,requires=(task1,))
# Define outputs
ppl.add_output('result',task2.output.string)
# Run the pipeline
exit_status = ppl.run(working_dir=self.working_dir,
poll_interval=0.1)
# Check the outputs
self.assertEqual(exit_status,0)
self.assertTrue(isinstance(task1.output.string,PipelineParam))
self.assertEqual(task1.output.string.value,"This is ")
self.assertTrue(isinstance(task2.output.string,PipelineParam))
self.assertEqual(task2.output.string.value,"This is the full string")
self.assertEqual(ppl.output.result,"This is the full string")
def test_pipeline_dont_finalize_outputs(self):
"""
Pipeline: test not finalizing pipeline outputs
"""
# Define a reusable task
# Appends item to a string
class AppendString(PipelineTask):
def init(self,s1,s2):
self.add_output('string',PipelineParam(type=str))
def setup(self):
self.output.string.set(str(self.args.s1) + str(self.args.s2))
# Build the pipeline
ppl = Pipeline()
task1 = AppendString("Append 1","This ","is ")
task2 = AppendString("Append 2",task1.output.string,"the full string")
ppl.add_task(task2,requires=(task1,))
# Define outputs
ppl.add_output('result',task2.output.string)
# Run the pipeline with output finalization turned off
exit_status = ppl.run(finalize_outputs=False,
working_dir=self.working_dir,
poll_interval=0.1)
# Check the outputs
self.assertEqual(exit_status,0)
self.assertTrue(isinstance(task1.output.string,PipelineParam))
self.assertEqual(task1.output.string.value,"This is ")
self.assertTrue(isinstance(task2.output.string,PipelineParam))
self.assertEqual(task2.output.string.value,"This is the full string")
self.assertTrue(isinstance(ppl.output.result,PipelineParam))
self.assertEqual(ppl.output.result.value,"This is the full string")
def test_pipeline_method_task_list(self):
"""
Pipeline: test the 'task_list' method
"""
# Define a reusable task
# Appends item to a list
class Append(PipelineTask):
def init(self,l,s):
self.add_output('list',list())
def setup(self):
for item in self.args.l:
self.output.list.append(item)
self.output.list.append(self.args.s)
# Make an empty pipeline
ppl = Pipeline()
self.assertEqual(ppl.task_list(),[])
# Add a task
task1 = Append("Append 1",(),"item1")
task2 = Append("Append 2",task1.output.list,"item2")
ppl.add_task(task2,requires=(task1,))
# Check the task list
task_list = ppl.task_list()
self.assertEqual(len(task_list),2)
self.assertTrue(task1.id() in task_list)
self.assertTrue(task2.id() in task_list)
def test_pipeline_method_get_task(self):
"""
Pipeline: test the 'get_task' method
"""
# Define a reusable task
# Appends item to a list
class Append(PipelineTask):
def init(self,l,s):
self.add_output('list',list())
def setup(self):
for item in self.args.l:
self.output.list.append(item)
self.output.list.append(self.args.s)
# Make a pipeline
ppl = Pipeline()
task1 = Append("Append 1",(),"item1")
task2 = Append("Append 2",task1.output.list,"item2")
ppl.add_task(task2,requires=(task1,))
# Fetch task data
task1_data = ppl.get_task(task1.id())
self.assertEqual(task1_data[0],task1)
self.assertEqual(task1_data[1],())
self.assertEqual(task1_data[2],{})
task2_data = ppl.get_task(task2.id())
self.assertEqual(task2_data[0],task2)
self.assertEqual(task2_data[1],(task1,))
self.assertEqual(task2_data[2],{})
def test_pipeline_method_rank_tasks(self):
"""
Pipeline: test the 'rank_tasks' method
"""
# Define a reusable task
# Appends item to a list
class Append(PipelineTask):
def init(self,l,s):
self.add_output('list',list())
def setup(self):
for item in self.args.l:
self.output.list.append(item)
self.output.list.append(self.args.s)
# Make a pipeline
ppl = Pipeline()
task1 = Append("Append 1",(),"item1")
task2 = Append("Append 2",task1.output.list,"item2")
task3 = Append("Append 3",task1.output.list,"item3")
task4 = Append("Append 4",task3.output.list,"item4")
ppl.add_task(task2,requires=(task1,))
ppl.add_task(task3,requires=(task1,))
ppl.add_task(task4,requires=(task3,))
# Rank the tasks
ranked_tasks = ppl.rank_tasks()
# Should be 3 ranks
self.assertEqual(len(ranked_tasks),3)
# Check the ranks
self.assertEqual(ranked_tasks[0],[task1.id()])
self.assertEqual(sorted(ranked_tasks[1]),
sorted([task2.id(),task3.id()]))
self.assertEqual(ranked_tasks[2],[task4.id()])
def test_pipeline_method_initial_tasks(self):
"""
Pipeline: test the 'initial_tasks' method
"""
# Define a reusable task
# Appends item to a list
class Append(PipelineTask):
def init(self,l,s):
self.add_output('list',list())
def setup(self):
for item in self.args.l:
self.output.list.append(item)
self.output.list.append(self.args.s)
# Make a pipeline
ppl = Pipeline()
task1 = Append("Append 1",(),"item1")
task2 = Append("Append 2",task1.output.list,"item2")
task3 = Append("Append 3",task1.output.list,"item3")
task4 = Append("Append 4",task3.output.list,"item4")
ppl.add_task(task2,requires=(task1,))
ppl.add_task(task3,requires=(task1,))
ppl.add_task(task4,requires=(task3,))
# Check the initial tasks
self.assertEqual(ppl.initial_tasks,[task1])
def test_pipeline_method_final_tasks(self):
"""
Pipeline: test the 'final_tasks' method
"""
# Define a reusable task
# Appends item to a list
class Append(PipelineTask):
def init(self,l,s):
self.add_output('list',list())
def setup(self):
for item in self.args.l:
self.output.list.append(item)
self.output.list.append(self.args.s)
# Make a pipeline
ppl = Pipeline()
task1 = Append("Append 1",(),"item1")
task2 = Append("Append 2",task1.output.list,"item2")
task3 = Append("Append 3",task1.output.list,"item3")
task4 = Append("Append 4",task3.output.list,"item4")
ppl.add_task(task2,requires=(task1,))
ppl.add_task(task3,requires=(task1,))
ppl.add_task(task4,requires=(task3,))
# Check the initial tasks
self.assertEqual(ppl.final_tasks,sorted([task2,task4],
key=lambda x: x.id()))
def test_pipeline_method_get_dependent_tasks(self):
"""
Pipeline: test the 'get_dependent_tasks' method
"""
# Define a reusable task
# Appends item to a list
class Append(PipelineTask):
def init(self,l,s):
self.add_output('list',list())
def setup(self):
for item in self.args.l:
self.output.list.append(item)
self.output.list.append(self.args.s)
# Make a pipeline
ppl = Pipeline()
task1 = Append("Append 1",(),"item1")
task2 = Append("Append 2",task1.output.list,"item2")
task3 = Append("Append 3",task1.output.list,"item3")
task4 = Append("Append 4",task3.output.list,"item4")
ppl.add_task(task2,requires=(task1,))
ppl.add_task(task3,requires=(task1,))
ppl.add_task(task4,requires=(task3,))
# Check the dependent tasks
self.assertEqual(sorted(ppl.get_dependent_tasks(task1.id())),
sorted([task2.id(),task3.id(),task4.id()]))
self.assertEqual(ppl.get_dependent_tasks(task2.id()),[])
self.assertEqual(ppl.get_dependent_tasks(task3.id()),[task4.id()])
self.assertEqual(ppl.get_dependent_tasks(task4.id()),[])
def test_pipeline_append_pipeline(self):
"""
Pipeline: append one pipeline to another
"""
# Define a reusable task
# Appends item to a list
class Append(PipelineTask):
def init(self,l,s):
self.add_output('list',list())
def setup(self):
for item in self.args.l:
self.output.list.append(item)
self.output.list.append(self.args.s)
# Make first pipeline
ppl1 = Pipeline()
ppl1.add_param("param1")
ppl1.add_runner("runner1")
task1 = Append("Append 1",(),"item1")
task2 = Append("Append 2",task1.output.list,"item2")
task3 = Append("Append 3",task1.output.list,"item3")
task4 = Append("Append 4",task3.output.list,"item4")
ppl1.add_task(task2,requires=(task1,))
ppl1.add_task(task3,requires=(task1,))
ppl1.add_task(task4,requires=(task3,))
self.assertEqual(len(ppl1.task_list()),4)
# Make second pipeline
ppl2 = Pipeline()
ppl2.add_param("param2")
ppl2.add_runner("runner2")
task5 = Append("Append 5",task1.output.list,"item5")
task6 = Append("Append 6",task3.output.list,"item6")
task7 = Append("Append 7",task3.output.list,"item7")
ppl2.add_task(task6,requires=(task5,))
ppl2.add_task(task7,requires=(task6,))
self.assertEqual(len(ppl2.task_list()),3)
# Append second pipeline to the first
ppl1.append_pipeline(ppl2)
self.assertEqual(len(ppl1.task_list()),7)
# Check requirements on first task of pipeline 2
# have been updated
self.assertEqual(
sorted(ppl1.get_task(task5.id())[1],key=lambda t: t.id()),
sorted([task2,task4,],key=lambda t: t.id())
)
# Check params from both pipelines are defined
self.assertTrue('param1' in ppl1.params)
self.assertTrue('param2' in ppl1.params)
# Check runners from both pipelines are defined
self.assertTrue('runner1' in ppl1.runners)
self.assertTrue('runner2' in ppl1.runners)
def test_pipeline_merge_pipeline(self):
"""
Pipeline: merge one pipeline into another
"""
# Define a reusable task
# Appends item to a list
class Append(PipelineTask):
def init(self,l,s):
self.add_output('list',list())
def setup(self):
for item in self.args.l:
self.output.list.append(item)
self.output.list.append(self.args.s)
# Make first pipeline
ppl1 = Pipeline()
ppl1.add_param("param1")
ppl1.add_runner("runner1")
task1 = Append("Append 1",(),"item1")
task2 = Append("Append 2",task1.output.list,"item2")
task3 = Append("Append 3",task1.output.list,"item3")
task4 = Append("Append 4",task3.output.list,"item4")
ppl1.add_task(task2,requires=(task1,))
ppl1.add_task(task3,requires=(task1,))
ppl1.add_task(task4,requires=(task3,))
self.assertEqual(len(ppl1.task_list()),4)
# Make second pipeline
ppl2 = Pipeline()
ppl2.add_param("param2")
ppl2.add_runner("runner2")
task5 = Append("Append 5",task1.output.list,"item5")
task6 = Append("Append 6",task3.output.list,"item6")
task7 = Append("Append 7",task3.output.list,"item7")
ppl2.add_task(task6,requires=(task5,))
ppl2.add_task(task7,requires=(task6,))
self.assertEqual(len(ppl2.task_list()),3)
# Merge second pipeline into the first
ppl1.merge_pipeline(ppl2)
self.assertEqual(len(ppl1.task_list()),7)
# Check params from both pipelines are defined
self.assertTrue('param1' in ppl1.params)
self.assertTrue('param2' in ppl1.params)
# Check runners from both pipelines are defined
self.assertTrue('runner1' in ppl1.runners)
self.assertTrue('runner2' in ppl1.runners)
def test_pipeline_add_pipeline(self):
"""
Pipeline: add one pipeline into another
"""
# Define a reusable task
# Appends item to a list
class Append(PipelineTask):
def init(self,l,s):
self.add_output('list',list())
def setup(self):
for item in self.args.l:
self.output.list.append(item)
self.output.list.append(self.args.s)
# Make first pipeline
ppl1 = Pipeline()
ppl1.add_param("param1")
ppl1.add_runner("runner1")
task1 = Append("Append 1",(),"item1")
task2 = Append("Append 2",task1.output.list,"item2")
task3 = Append("Append 3",task1.output.list,"item3")
task4 = Append("Append 4",task3.output.list,"item4")
ppl1.add_task(task2,requires=(task1,))
ppl1.add_task(task3,requires=(task1,))
ppl1.add_task(task4,requires=(task3,))
self.assertEqual(len(ppl1.task_list()),4)
# Make second pipeline
ppl2 = Pipeline()
ppl2.add_param("param2")
ppl2.add_runner("runner2")
task5 = Append("Append 5",task1.output.list,"item5")
task6 = Append("Append 6",task3.output.list,"item6")
task7 = Append("Append 7",task3.output.list,"item7")
ppl2.add_task(task6,requires=(task5,))
ppl2.add_task(task7,requires=(task6,))
self.assertEqual(len(ppl2.task_list()),3)
# Merge second pipeline into the first
ppl1.add_pipeline(ppl2)
self.assertEqual(len(ppl1.task_list()),7)
# Check params from both pipelines are defined
self.assertTrue('param1' in ppl1.params)
self.assertTrue('param2' in ppl1.params)
# Check runners from both pipelines are defined
self.assertTrue('runner1' in ppl1.runners)
self.assertTrue('runner2' in ppl1.runners)
class TestPipelineTask(unittest.TestCase):
def setUp(self):
# Set up a scheduler
self.sched = SimpleScheduler(poll_interval=0.01)
self.sched.start()
# Make a temporary working dir
self.working_dir = tempfile.mkdtemp(
suffix='TestPipeline')
# Store PATH
self.path = os.environ['PATH']
def tearDown(self):
# Stop the scheduler
if self.sched is not None:
self.sched.stop()
# Remove temp dir
if os.path.exists(self.working_dir):
shutil.rmtree(self.working_dir)
# Restore PATH
os.environ['PATH'] = self.path
def _user(self):
# Internal function to determine user
return getpass.getuser()
def _hostname(self):
# Internal function to determine hostname
try:
return os.environ['HOSTNAME']
except KeyError:
# HOSTNAME not defined in the
# environment, try 'platform'
# module instead
return platform.node()
def test_pipelinetask_invocations(self):
"""
PipelineTask: check task methods are invoked
"""
# Define a simplistic task which does nothing
class CheckInvocations(PipelineTask):
def init(self):
self.add_output('invocations',list())
self.output.invocations.append("init")
def setup(self):
self.output.invocations.append("setup")
def finish(self):
self.output.invocations.append("finish")
# Make a task instance
task = CheckInvocations("Check method invocations")
# Check initial state
self.assertFalse(task.completed)
self.assertEqual(task.exit_code,None)
self.assertEqual(task.output.invocations,
["init"])
# Run the task
task.run(sched=self.sched,
working_dir=self.working_dir,
asynchronous=False)
# Check final state
self.assertTrue(task.completed)
self.assertEqual(task.exit_code,0)
self.assertEqual(task.output.invocations,
["init","setup","finish"])
def test_pipelinetask_init(self):
"""
PipelineTask: check task 'init' invocations
"""
# Define a task for testing
class CheckInit(PipelineTask):
def init(self,a,b,c='hello',d=13,e=None):
self.add_output('results',list())
def setup(self):
result = "a=%s b=%s c=%s d=%s e=%s" \
% (self.args.a,
self.args.b,
self.args.c,
self.args.d,
self.args.e)
self.output.results.append(result)
# Make a task instance with minimal arglist
task = CheckInit("Minimal arglist","a","b")
self.assertEqual(task.args.a,"a")
self.assertEqual(task.args.b,"b")
self.assertEqual(task.args.c,"hello")
self.assertEqual(task.args.d,13)
self.assertEqual(task.args.e,None)
# Make a task instance with named minimal arglist
task = CheckInit("Named minimal arglist",a="a",b="b")
self.assertEqual(task.args.a,"a")
self.assertEqual(task.args.b,"b")
self.assertEqual(task.args.c,"hello")
self.assertEqual(task.args.d,13)
self.assertEqual(task.args.e,None)
# Make a task instance with named minimal arglist (reversed)
task = CheckInit("Named minimal arglist reversed",
b="a",a="b")
self.assertEqual(task.args.a,"b")
self.assertEqual(task.args.b,"a")
self.assertEqual(task.args.c,"hello")
self.assertEqual(task.args.d,13)
self.assertEqual(task.args.e,None)
# Make a task instance with args and subset of keywords
task = CheckInit("Args and subset of keywords",
"a","b",e=True,d=12)
self.assertEqual(task.args.a,"a")
self.assertEqual(task.args.b,"b")
self.assertEqual(task.args.c,"hello")
self.assertEqual(task.args.d,12)
self.assertEqual(task.args.e,True)
# Make a task instance with full arglist with keywords
task = CheckInit("Full arglist with keywords",
"a","b",c="goodbye",d=12,e=True)
self.assertEqual(task.args.a,"a")
self.assertEqual(task.args.b,"b")
self.assertEqual(task.args.c,"goodbye")
self.assertEqual(task.args.d,12)
self.assertEqual(task.args.e,True)
# Make a task instance with full arglist no keywords
task = CheckInit("Full arglist no keywords",
"a","b","goodbye",12,True)
self.assertEqual(task.args.a,"a")
self.assertEqual(task.args.b,"b")
self.assertEqual(task.args.c,"goodbye")
self.assertEqual(task.args.d,12)
self.assertEqual(task.args.e,True)
# Check task raises exception if | |
<filename>Websocket.py
import time, base64, hmac, hashlib, json, datetime
import pandas as pd
import numpy as np
from random import randint
from threading import Thread
from websocket import WebSocketApp#, WebSocketConnectionClosedException
from multiprocessing import Pool
from multiprocessing.dummy import Pool as ThreadPool
from random import randint
import requests
class Client():
"""
@info:
Websocket client used to connect to the Coinbase exchange. Listening to the websocket for
updates instead of polling the server via HTTP calls is highly recomment to decrease overhead and
improve performance.
- API Docs: https://docs.pro.coinbase.com/#websocket-feed
supported channels: - ticker, level2, user
supported products: - All available products through the Coinbase Pro exchange
@use:
ws = Client(production=True,
credentials={ YOUR CREDENTIALS },
user = True,
level2= [ 'BTC-USD', ... ],
ticker= [ 'BTC-USD','ETH-USD', ... ],
ohlc = [ [ 'BTC-USD','1min','15min' ],[ 'ETH-USD', '1hour' ], ... ] )
@params:
tickers : List of products which are subscribed to the tickers channel
level2 : List of products which are subscribed to the level2 channel
user : boolean to subscribe to the users channels. ( REQUIRES CREDENTIALS )
ohlc : List of products and candle increments to manage. Example [[ 'BTC-USD', '1min', '5min', '15min', '1hour', '6hour', '1day' ]]
credentials: Dictionary with the API credentials needed to connect to Coinbase
production : Boolean. if set to True the websocket will connect via url 'wss://ws-feed.pro.coinbase.com'
else if set to False the websocket will connect via url 'wss://ws-feed-public.sandbox.pro.coinbase.com'
@KEY METHODS:
self.orderbook('BTC-USD')
>>>
price size side
7037.95 0.000000 asks
7036.54 0.000000 bids
7036.16 0.000000 asks
self.ticker('BTC-USD')
>>>
{
'best_ask': 6423.08,
'best_bid': 6422.59,
'high_24h': 6485.76,
'last_size': 0.00511036,
'low_24h': 6003.0,
'open_24h': 6418.01,
'price': 6423.08,
'product_id': 'BTC-USD',
'sequence': 6555468983,
'side': 'buy',
'time': 1533828452.0009532,
'trade_id': 48603077,
'type': 'ticker',
'volume_24h': 14287.80656342,
'volume_30d': 307449.79720148}
}
self.ohlc('BTC-USD','1day')
>>>
time low high open close volume
1537465260 1537465260 6400.15 6402.96 6400.16 6402.95 20.687342
1537465320 1537465320 6402.96 6405.00 6402.96 6405.00 4.263147
@variables:
data : dictionary data variable stores the consumable websocket messages post processing. structure
'BTC-USD': {
'ticker': instance of Ticker class,
'orderbook': instance of OrderBookManagement class,
'ohlc': instance of OHLC class
},
'orders' : instance of OrderManagement class
example:
>>> ws.data['BTC-USD']['ticker'].history =
[
{'time': 1533828390.86529,'price': 4388.01 },
{'time': 1533828452.0009532,'price': 4385.01 },
...
]
>>> ws.data['BTC-USD']['ticker'].live = {
'best_ask': 6423.08,
'best_bid': 6422.59,
'high_24h': 6485.76,
'last_size': 0.00511036,
'low_24h': 6003.0,
'open_24h': 6418.01,
'price': 6423.08,
'product_id': 'BTC-USD',
'sequence': 6555468983,
'side': 'buy',
'time': 1533828452.0009532,
'trade_id': 48603077,
'type': 'ticker',
'volume_24h': 14287.80656342,
'volume_30d': 307449.79720148}
}
>>> ws.data['BTC-USD']['ohlc']['1day'].candles
time low high open close volume
1537465260 1537465260 6400.15 6402.96 6400.16 6402.95 20.687342
1537465320 1537465320 6402.96 6405.00 6402.96 6405.00 4.263147
...
>>> ws.data['BTC-USD']['orderbook'].book
DataFrame
Columns: [size, side]
Index: [price]
example:
price size side
7037.95 0.000000 asks
7036.54 0.000000 bids
7036.16 0.000000 asks
...
>>> ws.data['BTC-USD']['orderbook'].asks(remove_zeros=True)
price size
0 7032.33 2.576296
1 7033.00 0.030000
2 7033.06 0.026360
...
Note: remove_zeros=True will remove price levels with a size value of 0
>>> ws.data['BTC-USD']['orderbook'].bids(remove_zeros=True)
price size
0 7032.32 19.915242
1 7032.31 1.000000
2 7031.77 0.001000
...
Note: remove_zeros=True will remove price levels with a size value of 0
>>> ws.data['orders'].records
[
{ "type": "received", "time": "2014-11-07T08:19:27.028459Z", "product_id": "BTC-USD", "sequence": 10, "order_id": "d50ec984-77a8-460a-b958-66f114b0de9b", "size": "1.34", "price": "502.1", "side": "buy", "order_type": "limit" },
{ "type": "open", "time": "2014-11-07T08:19:27.028459Z", "product_id": "BTC-USD", "sequence": 10, "order_id": "d50ec984-77a8-460a-b958-66f114b0de9b", "price": "200.2", "remaining_size": "1.00", "side": "sell" },
...
]
>>> ws.data['orders'].orders
DataFrame
Columns: [sequence, order_id, create_time, update_time, product_id, order_type, side, stop_price, price, size, USD, BTC, LTC, ETH, BCH, ETC, taker_fee_rate, status]
Index: []
@methods:
open() : Opens the connection and subscribes to the given channels for the given products
close(): closes the connection to the websocket. This method does not clear out the data variable.
"""
def __init__(self, production=False, ticker=[], level2=[], user=[], ohlc=[], credentials=None ):
self.url = 'wss://ws-feed-public.sandbox.pro.coinbase.com'
self.production = production
self._ticker = ticker
self._level2 = level2
self._user = user
self._ohlc = ohlc
self._credentials = credentials
self.updated_time = time.time() + 30
if self.production:
self.url = 'wss://ws-feed.pro.coinbase.com'
self._subscription = self.subscription( self._ticker, self._level2, self._user, self._credentials )
self.data = self.set_data( self._subscription, self._ohlc, self.production )
self.messages = []
self.ws = None
self.conn_thread = None
self.terminated = False
self.error_count = 0
self.max_errors_allowed = 1000
self.PRODUCTS = ['BTC-USD','LTC-USD','ETH-USD','ETC-USD','LTC-BTC','ETH-BTC','ETC-BTC','BCH-USD','BCH-BTC','ZRX-USD','ZRX-BTC']
self.accepted_message_type = ["error","ticker","snapshot","l2update","received","open","done","match","change","activate"]
def on_message(self, ws, message):
"""Appends the message from the ws to the list of messages to process later"""
message = json.loads(message)
if message['type'] == 'error':
self.on_error(None, message['message'])
elif message['type'] == 'subscriptions':
print("Subscribed to {}".format(', '.join([ channel['name'] for channel in message['channels'] ])))
else:
if ((message['type']=='ticker' and message['product_id'] in self._ticker) or
(message['type'] in ["snapshot", "l2update"] and message['product_id'] in self._level2) or
(message['type'] in ["received","open","done","match","change","activate"] )):
self.messages.append(message)
elif message['type']=='heartbeat':
self.updated_time = time.time()
def on_error(self, ws, error):
"""Prints the errors"""
print(error)
if self.error_count == self.max_errors_allowed:
print("{}: Exceeded error count. Terminating connection".format(datetime.datetime.now()))
self.close()
else:
self.error_count += 1
def on_close(self, ws):
if self.terminated:
print("Connection closed")
else:
print("{}: Connection unexpectedly closed. Re-establishing a connection.".format(datetime.datetime.now()))
self._subscription = self.subscription( self._ticker, self._level2, self._user, self._credentials )
self.connect()
def on_open(self, ws):
"""Sends the initial subscription message to the server"""
self.terminated = False
ws.send(json.dumps(self._subscription))
print("Connected. Awaiting subscription message. {}".format(self.url))
# ==============================================================================
# The following methods handle creating a connection and monitoring of the feed
# ==============================================================================
def connect(self):
try:
self.terminated = False
monitor = Thread(target=self.monitor, name='Monitor method')
monitor.start()
self.ws = WebSocketApp(
url = self.url,
on_open = self.on_open,
on_message = self.on_message,
on_error = self.on_error,
on_close = self.on_close,
keep_running = True
)
self.ws.run_forever()
print("Disconnected")
monitor.join(timeout=30)
except Exception as e:
monitor.join()
raise Exception('Connection failed. Error {}'.format(e))
def monitor(self):
"""Monitors the messages received and processes them individually"""
while not self.terminated:
try:
if (time.time() - self.updated_time) < 5:
messages = self.messages.copy()
# procs = np.min([ len(messages), 9 ]) + 1
# pool = ThreadPool(procs)
# pool.map(self.process, messages)
# pool.close()
# pool.join()
for message in messages:
self.process(message)
elif self.ws:
self.updated_time += 10
self.ws.close()
except Exception as e:
self.on_error(None, "Monitoring Error: {}".format(e))
continue
finally:
time.sleep(0.1)
def process_tickers(self, message):
if 'ticker' in self.data[message['product_id']]:
self.data[message['product_id']]['ticker'].update( message )
if 'ohlc' in self.data[message['product_id']]:
for ohlc in self.data[message['product_id']]['ohlc']:
self.data[message['product_id']]['ohlc'][ohlc].update( self.data[message['product_id']]['ticker'].live )
def process_orderbook(self, message):
if 'orderbook' in self.data[message['product_id']]:
self.data[message['product_id']]['orderbook'].update( message )
def process_orders(self, message):
unprocessed_order = self.data['user'].update( message )
if unprocessed_order:
self.messages.append(unprocessed_order)
def process(self, message):
"""This method removes the message received from the list of messages, then routes \n the message to the appropriate function"""
try:
self.messages.remove(message)
except ValueError:
pass # nothing to see here, just a message that was already processed and is not on the list any more
except Exception as e:
print('error removing message from self.message:', e)
try:
if message['type'] in ["ticker"]:
self.process_tickers(message)
elif message['type'] in ["snapshot", "l2update"]:
self.process_orderbook(message)
elif message['type'] in ["received","open","done","match","change","activate"] and 'user' in self.data:
self.process_orders(message)
except Exception as e:
raise Exception("Process raised an error: {}\n\t{}".format(e,message))
# ==============================================================================
# Data exploration methods
# ==============================================================================
def orderbook(self, product):
return self.data[product.upper()]['orderbook'].book
def ticker(self, product):
return self.data[product.upper()]['ticker'].live
def ohlc(self, product, ohlc):
return self.data[product.upper()]['ohlc'][ohlc].candles
def orders(self, ids='*'):
orders = self.data['user'].orders
columns = self.data['user'].columns
if ids == '*':
return orders[columns]
else:
ids = ids if type(ids)==list else [ids]
return orders[ orders['order_id'].isin(ids) ][columns]
# ==============================================================================
# the following methods handle the creation of the subscription
# and managing connections
# ==============================================================================
def set_data(self, SUBSCRIPTION, OHLC_, PRODUCTION):
data = { **{ product: { } for product in self._subscription['product_ids'] } }
for channel in SUBSCRIPTION['channels']:
if not isinstance(channel, str):
if channel['name'] == 'ticker':
for product in channel['product_ids']:
data[ product ][ 'ticker' ] = Ticker()
if channel['name'] == 'level2':
for product in channel['product_ids']:
data[ product ][ 'orderbook' ] = OrderBookManagement()
elif channel == 'user':
data[ 'user' ] = OrderManagement()
for candles in OHLC_:
data[candles[0]]['ohlc'] = { increment: OHLC( candles[0], increment ) for increment in candles[1:] }
time.sleep(1)
return data
def subscription(self, ticker=None, level2=None, user=None, credentials=None):
subscription = {
'type': 'subscribe',
'product_ids': list(set(ticker + level2)),
'channels': ['heartbeat']
}
if user: subscription['channels'].append( 'user' )
if ticker: subscription['channels'].append( { 'name':'ticker', 'product_ids': list(set(ticker)) } | |
message to tell us that the panel has finished download mode, so we too should stop download mode
self.DownloadMode = False
self.pmExpectedResponse = []
#self.pmWaitingForAckFromPanel = False
if self.pmLastSentMessage is not None:
lastCommandData = self.pmLastSentMessage.command.data
log.debug("[handle_msgtype0B] last command {0}".format(self.toString(lastCommandData)))
if lastCommandData is not None:
if lastCommandData[0] == 0x0A:
log.info("[handle_msgtype0B] We're in powerlink mode *****************************************")
self.pmPowerlinkMode = True # INTERFACE set State to "PowerLink"
# We received a download exit message, restart timer
self.reset_watchdog_timeout()
self.ProcessSettings()
def handle_msgtype25(self, data): # Download retry
""" MsgType=25 - Download retry
Unit is not ready to enter download mode
"""
# Format: <MsgType> <?> <?> <delay in sec>
iDelay = data[2]
log.info("[handle_msgtype25] Download Retry, have to wait {0} seconds data is {1}".format(iDelay, self.toString(data)))
# self.loop.call_later(int(iDelay), self.download_retry())
self.DownloadMode = False
self.doneAutoEnroll = False
sleep(iDelay)
## dont bother with another download attemp as they never work, attempt to start again
asyncio.ensure_future(self.coordinate_powerlink_startup(4), loop = self.loop)
def handle_msgtype33(self, data):
""" MsgType=33 - Settings
Message send after a MSG_START. We will store the information in an internal array/collection """
if len(data) != 10:
log.info("[handle_msgtype33] ERROR: MSGTYPE=0x33 Expected len=14, Received={0}".format(len(self.ReceiveData)))
log.info("[handle_msgtype33] " + self.toString(self.ReceiveData))
return
# Data Format is: <index> <page> <8 data bytes>
# Extract Page and Index information
iIndex = data[0]
iPage = data[1]
#log.debug("[handle_msgtype33] Getting Data " + self.toString(data) + " page " + hex(iPage) + " index " + hex(iIndex))
# Write to memory map structure, but remove the first 2 bytes from the data
self.pmWriteSettings(iPage, iIndex, data[2:])
def handle_msgtype3C(self, data): # Panel Info Messsage when start the download
""" The panel information is in 4 & 5
5=PanelType e.g. PowerMax, PowerMaster
4=Sub model type of the panel - just informational, not used
"""
self.ModelType = data[4]
self.PanelType = data[5]
self.PowerMaster = (self.PanelType >= 7)
modelname = pmPanelType_t[self.PanelType] or "UNKNOWN" # INTERFACE set this in the user interface
PanelStatus["Model Type"] = self.ModelType
PanelStatus["Power Master"] = 'Yes' if self.PowerMaster else 'No'
log.debug("[handle_msgtype3C] PanelType={0} : {2} , Model={1} Powermaster {3}".format(self.PanelType, self.ModelType, modelname, self.PowerMaster))
if not self.doneAutoEnroll:
# when here, the first download did not get denied
# we did not get an 08 message back from the panel
# we did not get an AB 00 01 request from the panel to auto enroll
# Remove anything else from the List, we need to restart
self.pmExpectedResponse = []
# Clear the list
self.ClearList()
if not self.ForceStandardMode:
self.doneAutoEnroll = False
log.info("[handle_msgtype3C] Attempt to auto-enroll")
self.DownloadMode = False
self.SendMsg_ENROLL()
else:
self.SendCommand("MSG_STATUS")
# We got a first response, now we can continue enrollment the PowerMax/Master PowerLink
interval = self.getTimeFunction() - self.lastSendOfDownloadEprom
td = timedelta(seconds=90) # prevent multiple requests for the EPROM panel settings, at least 90 seconds
if interval > td:
self.lastSendOfDownloadEprom = self.getTimeFunction()
self.pmPowerlinkEnrolled()
if PanelSettings["AutoSyncTime"]: # should we sync time between the HA and the Alarm Panel
t = datetime.now()
if t.year > 2000:
year = t.year - 2000
values = [t.second, t.minute, t.hour, t.day, t.month, year]
timePdu = bytearray(values)
#self.pmSyncTimeCheck = t
self.SendCommand("MSG_SETTIME", options = [3, timePdu] )
else:
log.info("[Enrolling Powerlink] Please correct your local time.")
def handle_msgtype3F(self, data):
""" MsgType=3F - Download information
Multiple 3F can follow eachother, if we request more then &HFF bytes """
log.info("[handle_msgtype3F]")
# data format is normally: <index> <page> <length> <data ...>
# If the <index> <page> = FF, then it is an additional PowerMaster MemoryMap
iIndex = data[0]
iPage = data[1]
iLength = data[2]
# Check length and data-length
if iLength != len(data) - 3: # 3 because --> index & page & length
log.info("[handle_msgtype3F] ERROR: Type=3F has an invalid length, Received: {0}, Expected: {1}".format(len(data)-3, iLength))
log.info("[handle_msgtype3F] " + self.toString(self.ReceiveData))
return
# Write to memory map structure, but remove the first 4 bytes (3F/index/page/length) from the data
self.pmWriteSettings(iPage, iIndex, data[3:])
def handle_msgtypeA0(self, data):
""" MsgType=A0 - Event Log """
log.info("[handle_MsgTypeA0] Packet = {0}".format(self.toString(data)))
eventNum = data[1]
# Check for the first entry, it only contains the number of events
if eventNum == 0x01:
log.debug("[handle_msgtypeA0] Eventlog received")
self.eventCount = data[0]
else:
iSec = data[2]
iMin = data[3]
iHour = data[4]
iDay = data[5]
iMonth = data[6]
iYear = int(data[7]) + 2000
iEventZone = data[8]
iLogEvent = data[9]
zoneStr = pmLogUser_t[self.pmLang][iEventZone] or "UNKNOWN"
eventStr = pmLogEvent_t[self.pmLang][iLogEvent] or "UNKNOWN"
idx = eventNum - 1
# Create an event log array
self.pmEventLogDictionary[idx] = LogEvent()
if pmPanelConfig_t["CFG_PARTITIONS"][self.PanelType] > 1:
part = 0
for i in range(1, 4):
part = (iSec % (2 * i) >= i) and i or part
self.pmEventLogDictionary[idx].partition = (part == 0) and "Panel" or part
self.pmEventLogDictionary[idx].time = "{0:0>2}:{1:0>2}".format(iHour, iMin)
else:
# This alarm panel only has a single partition so it must either be panal or partition 1
self.pmEventLogDictionary[idx].partition = (iEventZone == 0) and "Panel" or "1"
self.pmEventLogDictionary[idx].time = "{0:0>2}:{1:0>2}:{2:0>2}".format(iHour, iMin, iSec)
self.pmEventLogDictionary[idx].date = "{0:0>2}/{1:0>2}/{2}".format(iDay, iMonth, iYear)
self.pmEventLogDictionary[idx].zone = zoneStr
self.pmEventLogDictionary[idx].event = eventStr
#self.pmEventLogDictionary.items = idx
#self.pmEventLogDictionary.done = (eventNum == self.eventCount)
log.debug("Log Event {0}".format(self.pmEventLogDictionary[idx]))
def handle_msgtypeA3(self, data):
""" MsgType=A3 - Zone Names """
log.info("[handle_MsgTypeA3] Wibble Packet = {0}".format(self.toString(data)))
msgCnt = int(data[0])
offset = 8 * (int(data[1]) - 1)
for i in range(0, 8):
zoneName = pmZoneName_t[int(data[2+i])]
log.info(" Zone name for {0} is {1}".format( offset+i+1, zoneName ))
if offset+i in self.pmSensorDev_t:
if not self.pmSensorDev_t[offset+i].zname: # if not already set
self.pmSensorDev_t[offset+i].zname = zoneName
self.pmSensorDev_t[offset+i].pushChange()
log.info(" Found Sensor")
def handle_msgtypeA6(self, data):
""" MsgType=A6 - Zone Types I think """
log.info("[handle_MsgTypeA6] Packet = {0}".format(self.toString(data)))
# Commented Out
# I assumed that the 5 A6 messages were similar to the 5 A3 messages, giving the type and chime info (as per the EPROM download)
# It doesn't look like it so it's commented out until I can figure out what they are
# Example data streams from my alarm, 5 data packets (header, mgstype, checksum and footer removed)
# 04 01 2a 2a 2a 25 25 25 25 25 43 # This is supposed to tell us the total message count i.e. 4
# 04 01 2a 2a 2a 25 25 25 25 25 43 # Then this is message 1 of 4 (zones 1 to 8)
# 04 02 25 25 24 24 25 25 24 25 43 # Then this is message 2 of 4 (zones 9 to 16)
# 04 03 25 25 25 29 29 1f 1f 27 43 # Then this is message 3 of 4 (zones 17 to 24)
# 04 04 27 28 28 1e 22 28 00 00 43 # Then this is message 4 of 4 (zones 25 to 32)
# e.g. If we decoded the same as the EPROM zone type for zone 1, 2 and 3 (showing 2a in my examples above):
# 2a & 0x0F would give 0x0A for the type "24 Hours Audible" which is wrong, mine should be "Interior" as they are PIRs
if not self.pmPowerlinkMode:
msgCnt = int(data[0])
offset = 8 * (int(data[1]) - 1)
for i in range (0, 8):
zoneInfo = int(data[2+i]) - 0x1E # in other code data[2+i] - 0x1E;
zoneType = (zoneInfo & 0x0F) + 1 # in other code add one
zoneChime = ((zoneInfo >> 4) & 0x03)
log.debug("Zone type for {0} is {1} chime {2}".format( offset+i+1, pmZoneType_t[self.pmLang][zoneType], pmZoneChime_t[self.pmLang][zoneChime]))
# if offset+i in self.pmSensorDev_t:
# self.pmSensorDev_t[offset+i].ztype = zoneType
# self.pmSensorDev_t[offset+i].ztypeName = pmZoneType_t[self.pmLang][zoneType]
# self.pmSensorDev_t[offset+i].zchime = pmZoneChime_t[self.pmLang][zoneChime]
# self.pmSensorDev_t[offset+i].pushChange()
# def displaySensorBypass(self, sensor):
# armed = False
# if self.pmSensorShowBypass:
# armed = sensor.bypass
# else:
# zoneType = sensor.ztype
# mode = bitw.band(pmSysStatus, 0x0F) -- armed or not: 4=armed home; 5=armed away
# local alwaysOn = { [2] = "", [3] = "", [9] = "", [10] = "", [11] = "", [14] = "" }
# Show as armed if
# a) the sensor type always triggers an alarm: (2)flood, (3)gas, (11)fire, (14)temp, (9/10)24h (silent/audible)
# b) the system is armed away (mode = 4)
# c) the system is armed home (mode = 5) and the zone is | |
<gh_stars>0
'''
This module contains all of the panels for mcblend GUI.
'''
# don't import future annotations Blender needs that
from typing import List, Optional
from dataclasses import dataclass
from .custom_properties import EffectTypes
import bpy
from bpy.props import (
StringProperty, IntProperty, BoolProperty, FloatProperty,
FloatVectorProperty, CollectionProperty, EnumProperty, PointerProperty,
IntVectorProperty
)
from .operator_func.texture_generator import (
list_mask_types_as_blender_enum, UvMaskTypes,
list_mix_mask_modes_as_blender_enum)
# GUI
# UV-groups names list
class OBJECT_UL_NusiqMcblendUVGroupList(bpy.types.UIList):
'''GUI item used for drawing list of names of UV-groups.'''
def draw_item(
self, context, layout, data, item, icon, active_data,
active_propname):
'''
Drawing OBJECT_NusiqMcblendUvGroupProperties in a list.
:param context: the contexts of operator
:param layout: layout in which the object is drawn
:param data: the RNA object containing the collection
:param item: the item currently drawn in the collection
:param icon: not used - "the "computed" icon for the item" (?)
:param active_data: the RNA object containing the active property for the
collection.
:param active_propname: the name of the active property.
For more info see the UI Template called: "UI List Simple".
'''
# pylint: disable=arguments-differ, unused-argument
if self.layout_type in {'DEFAULT', 'COMPACT', 'CENTER'}:
# No rename functionality:
# layout.label(text=item.name, translate=False)
# With rename functionality:
layout.prop(item, "name", text="", emboss=False)
# UV-group panel
@dataclass
class _UIStackItem():
'''
Object used in OBJECT_PT_NusiqMcblendUVGroupPanel for saving the
information about nested UV-groups in stack data structure.
'''
ui: Optional[bpy.types.UILayout] # None if parent is collapsed
depth: int
class OBJECT_PT_NusiqMcblendUVGroupPanel(bpy.types.Panel):
'''Panel used for editing UV-groups.'''
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'scene'
bl_label = "Mcblend UV groups"
def draw_colors(self, mask, mask_index: int, col: bpy.types.UILayout):
'''Draws colors of UV-mask.'''
box = col.box()
row = box.row()
row.label(text='Colors')
op_props = row.operator(
"object.nusiq_mcblend_add_uv_mask_color", text="", icon='ADD')
op_props.mask_index = mask_index
colors_len = len(mask.colors)
for color_index, color in enumerate(mask.colors):
row = box.row()
row.prop(color, "color", text="")
up_down_row = row.row(align=True)
# Move down
if color_index - 1 >= 0:
op_props = up_down_row.operator(
"object.nusiq_mcblend_move_uv_mask_color", icon='TRIA_UP',
text='')
op_props.mask_index = mask_index
op_props.move_from = color_index
op_props.move_to = color_index - 1
# Move up
if color_index + 1 < colors_len:
op_props = up_down_row.operator(
"object.nusiq_mcblend_move_uv_mask_color", icon='TRIA_DOWN',
text='')
op_props.mask_index = mask_index
op_props.move_from = color_index
op_props.move_to = color_index + 1
# Delete button
op_props = row.operator(
"object.nusiq_mcblend_remove_uv_mask_color", icon='X', text='')
op_props.mask_index = mask_index
op_props.color_index = color_index
def draw_stripes(self, mask, mask_index: int, col: bpy.types.UILayout):
'''Draws stripes of UV-mask.'''
box = col.box()
row = box.row()
row.label(text='Stripes')
op_props = row.operator(
"object.nusiq_mcblend_add_uv_mask_stripe", text="", icon='ADD')
op_props.mask_index = mask_index
stripes_len = len(mask.stripes)
for stripe_index, stripe in enumerate(mask.stripes):
row = box.row()
if (
mask.relative_boundaries and
mask.mask_type != UvMaskTypes.GRADIENT_MASK.value):
# Gradient mask always uses absolute values
row.prop(stripe, "width_relative")
else:
row.prop(stripe, "width")
row.prop(stripe, "strength")
up_down_row = row.row(align=True)
# Move down
if stripe_index - 1 >= 0:
op_props = up_down_row.operator(
"object.nusiq_mcblend_move_uv_mask_stripe", icon='TRIA_UP',
text='')
op_props.mask_index = mask_index
op_props.move_from = stripe_index
op_props.move_to = stripe_index - 1
# Move up
if stripe_index + 1 < stripes_len:
op_props = up_down_row.operator(
"object.nusiq_mcblend_move_uv_mask_stripe", icon='TRIA_DOWN',
text='')
op_props.mask_index = mask_index
op_props.move_from = stripe_index
op_props.move_to = stripe_index + 1
# Delete button
op_props = row.operator(
"object.nusiq_mcblend_remove_uv_mask_stripe", icon='X',
text='')
op_props.mask_index = mask_index
op_props.stripe_index = stripe_index
def draw_mask_properties(
self, mask, index: int, col: bpy.types.UILayout, *,
colors=False, interpolate=False,
normalize=False, p1p2=False, stripes=False,
relative_boundaries=False, expotent=False, strength=False,
hard_edge=False, horizontal=False, seed=False,color=False,
children=False, mode=False):
'''Draws properties of UV-mask.'''
if colors:
self.draw_colors(mask, index, col) # colors
if interpolate:
col.prop(mask, "interpolate")
if normalize:
col.prop(mask, "normalize")
if p1p2:
row = col.row()
if mask.relative_boundaries:
row.prop(mask, "p1_relative")
row = col.row()
row.prop(mask, "p2_relative")
else:
row.prop(mask, "p1")
row = col.row()
row.prop(mask, "p2")
if relative_boundaries:
col.prop(mask, "relative_boundaries")
if stripes:
self.draw_stripes(mask, index, col) # stripes
if expotent:
col.prop(mask, "expotent")
if strength:
col.row().prop(mask, "strength")
if hard_edge:
col.prop(mask, "hard_edge")
if horizontal:
col.prop(mask, "horizontal")
if seed:
row = col.row()
row.prop(mask, "use_seed")
if mask.use_seed:
row.prop(mask, "seed")
if color:
col.prop(mask.color, "color")
if mode:
col.prop(mask, "mode")
if children:
col.prop(mask, "children")
def draw_mask(
self, mask, index: int, masks_len: int,
ui_stack: List[_UIStackItem]):
'''
Draws whole UV-mask gui with additional GUI items for navigation
between masks like buttons for moving and removing masks.
'''
col = None
# If parent is collapsed don't draw anything
if ui_stack[-1].ui is not None:
col = ui_stack[-1].ui
box = col.box()
# box.scale_x = True
col = box.column()
row = col.row()
if mask.ui_collapsed:
row.prop(
mask, "ui_collapsed", text="", icon='DISCLOSURE_TRI_RIGHT',
emboss=False)
else:
row.prop(
mask, "ui_collapsed", text="", icon='DISCLOSURE_TRI_DOWN',
emboss=False)
row.label(text=f'{mask.mask_type}')
up_down_row = row.row(align=True)
# Move down
if index - 1 >= 0:
op_props = up_down_row.operator(
"object.nusiq_mcblend_move_uv_mask", icon='TRIA_UP',
text='')
op_props.move_from = index
op_props.move_to = index - 1
# Move up
if index + 1 < masks_len:
op_props = up_down_row.operator(
"object.nusiq_mcblend_move_uv_mask", icon='TRIA_DOWN',
text='')
op_props.move_from = index
op_props.move_to = index + 1
# Hide button
if mask.ui_hidden:
row.prop(
mask, "ui_hidden", text="", icon='HIDE_ON',
emboss=False)
else:
row.prop(
mask, "ui_hidden", text="", icon='HIDE_OFF',
emboss=False)
# Delete button
op_props = row.operator(
"object.nusiq_mcblend_remove_uv_mask", icon='X', text='')
op_props.target = index
# Drawing the mask itself unless collapsed
if not mask.ui_collapsed:
if mask.mask_type == UvMaskTypes.COLOR_PALLETTE_MASK.value:
if len(ui_stack) > 1:
col.label(
text="This mask can't be put inside mix mask",
icon='ERROR')
else:
self.draw_mask_properties(
mask, index, col,
colors=True, interpolate=True, normalize=True)
if mask.mask_type == UvMaskTypes.GRADIENT_MASK.value:
self.draw_mask_properties(
mask, index, col,
p1p2=True, stripes=True, relative_boundaries=True,
expotent=True)
if mask.mask_type == UvMaskTypes.ELLIPSE_MASK.value:
self.draw_mask_properties(
mask, index, col,
p1p2=True, relative_boundaries=True, expotent=True,
strength=True, hard_edge=True)
if mask.mask_type == UvMaskTypes.RECTANGLE_MASK.value:
self.draw_mask_properties(
mask, index, col,
p1p2=True, relative_boundaries=True, expotent=True,
strength=True, hard_edge=True)
if mask.mask_type == UvMaskTypes.STRIPES_MASK.value:
self.draw_mask_properties(
mask, index, col,
stripes=True, relative_boundaries=True, horizontal=True)
if mask.mask_type == UvMaskTypes.RANDOM_MASK.value:
self.draw_mask_properties(
mask, index, col,
strength=True, expotent=True, seed=True)
if mask.mask_type == UvMaskTypes.COLOR_MASK.value:
self.draw_mask_properties(mask, index, col, color=True)
if mask.mask_type == UvMaskTypes.MIX_MASK.value:
self.draw_mask_properties(
mask, index, col,
children=True, strength=True, expotent=True,
mode=True)
if mask.mask_type == UvMaskTypes.MIX_MASK.value and col is not None:
# mask.children+1 because it counts itself as a member
if not mask.ui_collapsed:
ui_stack.append(_UIStackItem(
col.box(), mask.children+1))
else:
ui_stack.append(_UIStackItem(
None, mask.children+1))
def draw(self, context):
'''Draws whole UV-group panel.'''
col = self.layout.column(align=True)
# Add group
row = col.row()
row.operator(
"object.nusiq_mcblend_add_uv_group", text="New UV group",
icon='ADD'
)
row_import_export = col.row()
row_import_export.operator(
"object.nusiq_mcblend_import_uv_group_operator",
text="Import UV group", icon='IMPORT'
)
active_uv_group_id = bpy.context.scene.nusiq_mcblend_active_uv_group
uv_groups = bpy.context.scene.nusiq_mcblend_uv_groups
col.template_list(
listtype_name="OBJECT_UL_NusiqMcblendUVGroupList",
list_id="", dataptr=context.scene,
propname="nusiq_mcblend_uv_groups",
active_dataptr=context.scene,
active_propname="nusiq_mcblend_active_uv_group")
if active_uv_group_id < len(uv_groups):
active_uv_group = uv_groups[active_uv_group_id]
# Delete group
row.operator(
"object.nusiq_mcblend_remove_uv_group",
text="Delete this UV group", icon='X')
row_import_export.operator(
"object.nusiq_mcblend_export_uv_group_operator",
text="Export UV group", icon='EXPORT'
)
# Select side
row = col.row()
row.label(text='Side:')
row.prop(
context.scene, "nusiq_mcblend_active_uv_groups_side",
text="")
col.separator()
col.operator(
'object.nusiq_mcblend_copy_uv_group_side',
text='Copy current UV face', icon='DUPLICATE')
# Add mask
col.operator_menu_enum(
"object.nusiq_mcblend_add_uv_mask", "mask_type",
text="Add mask", icon="ADD")
# Draw selected side
sides = [
active_uv_group.side1, active_uv_group.side2,
active_uv_group.side3, active_uv_group.side4,
active_uv_group.side5, active_uv_group.side6
]
masks = sides[
int(context.scene.nusiq_mcblend_active_uv_groups_side)]
# Stack of UI items to draw in
ui_stack: List[_UIStackItem] = [
_UIStackItem(col, 0)]
for i, mask in enumerate(masks):
col.separator(factor=0.5)
self.draw_mask(mask, i, len(masks), ui_stack)
# Remove empty ui containers from top of ui_stack
while len(ui_stack) > 1: # Except the first one
ui_stack[-1].depth -= 1
if ui_stack[-1].depth <= 0:
ui_stack.pop()
else:
break
# Event group panel
class OBJECT_UL_NusiqMcblendEventsList(bpy.types.UIList):
'''GUI item used for drawing list of names of events.'''
def draw_item(
self, context, layout, data, item, icon, active_data,
active_propname):
'''
Drawing OBJECT_NusiqMcblendEventGroupProperties in a list.
:param context: the contexts of operator
:param layout: layout in which the object is drawn
:param data: the RNA object containing the collection
:param item: the item currently drawn in the collection
:param icon: not used - "the "computed" icon for the item" (?)
:param active_data: the RNA object containing the active property for the
collection.
:param active_propname: the name of the active property.
'''
# pylint: disable=arguments-differ, unused-argument
if self.layout_type in {'DEFAULT', 'COMPACT', 'CENTER'}:
# No rename functionality:
# layout.label(text=item.name, translate=False)
# With rename functionality:
layout.prop(item, "name", text="", emboss=False)
class OBJECT_PT_NusiqMcblendEventsPanel(bpy.types.Panel):
'''Panel used for editing events.'''
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'scene'
bl_label = "Mcblend events"
def draw_effect(self, effect, index: int, col: bpy.types.UILayout):
'''Draw single effect in the event'''
# If parent is collapsed don't draw anything
box = col.box()
col = box.column()
row = col.row()
row.label(text=f'{effect.effect_type}')
# Delete button
op_props = row.operator(
"object.nusiq_mcblend_remove_effect", icon='X', text='')
op_props.effect_index = index
if effect.effect_type == EffectTypes.PARTICLE_EFFECT.value:
col.prop(effect, "effect", text="Effect")
col.prop(effect, "locator", text="Locator")
col.prop(effect, "pre_effect_script", text="Pre effect script")
col.prop(effect, "bind_to_actor", text="Bind to actor")
elif effect.effect_type == EffectTypes.SOUND_EFFECT.value:
col.prop(effect, "effect", text="Effect")
def draw(self, context):
'''Draws whole event group panel.'''
col = self.layout.column(align=True)
row = col.row()
events = bpy.context.scene.nusiq_mcblend_events
active_event_id = | |
str was expected but a %s datatype variable was input.'
% type(path)
)
elif not os.path.isfile(path):
raise FileNotFoundError("No file named %s was found, aborting" % path)
if path.endswith(".h5"): # Assume SLC
"""
h5 has both the SLC images and metadata. gdal/rasterio seems to break a
lot of the metadata fields on the SLC case, so using h5py.
Note:: that the SLC reader is not closed unlike the GRD. This is by design, I'd rather
pass pointers than vectors with tens of thousands of elements. Only the datetimes
are converted from bytedata and read into the dict for compatability reasons.
"""
return read_SLC_metadata(h5py.File(path, "r"))
elif path.endswith(".tif") or path.endswith(".tiff"):
return read_GRD_metadata(path)
elif not isinstance(path, str):
raise TypeError(
'Could not understand input "path", a str was expected but a %s datatype variable was input.'
% type(path)
)
else:
raise ValueError(
'Could not understand input "path", either a .h5, .tif, .tiff or a .xml was expected but %s was input.'
% path
)
def _expected_keys(product_type):
"""
Aux function. Contains the most current lists of keys we expect there to be in the different forms of metadata.
"""
if product_type == "SLC":
expected_keys = (
"acquisition_end_utc",
"acquisition_mode",
"acquisition_prf",
"acquisition_start_utc",
"angX",
"angY",
"angZ",
"ant_elev_corr_flag",
"antenna_pattern_compensation",
"avg_scene_height",
"azimuth_ground_spacing",
"azimuth_looks",
"azimuth_time_interval",
"calibration_factor",
"carrier_frequency",
"chirp_bandwidth",
"chirp_duration",
"coord_center",
"coord_first_far",
"coord_first_near",
"coord_last_far",
"coord_last_near",
"dc_estimate_coeffs",
"dc_estimate_poly_order",
"dc_estimate_time_utc",
"doppler_rate_coeffs",
"doppler_rate_poly_order",
"first_pixel_time",
"fsl_compensation",
"geo_ref_system",
"local_incidence_angle",
"look_side",
"mean_earth_radius",
"mean_orbit_altitude",
"number_of_azimuth_samples",
"number_of_dc_estimations",
"number_of_range_samples",
"number_of_state_vectors",
"orbit_absolute_number",
"orbit_direction",
"orbit_relative_number",
"orbit_repeat_cycle",
"polarization",
"posX",
"posY",
"posZ",
"processing_prf",
"processing_time",
"processor_version",
"product_level",
"product_name",
"product_type",
"range_looks",
"range_sampling_rate",
"range_spread_comp_flag",
"sample_precision",
"satellite_name",
"slant_range_spacing",
"state_vector_time_utc",
"total_processed_bandwidth_azimuth",
"velX",
"velY",
"velZ",
"window_function_azimuth",
"window_function_range",
"zerodoppler_end_utc",
"zerodoppler_start_utc",
)
elif product_type == "GRD":
expected_keys = tuple(_expected_datatypes("GRD"))
elif product_type == "xml":
raise NotImplementedError(
"Ambiguous functionality, the .xml parsing structure does not work as expected. Fix the code before proceeding."
)
# A subset of it works, but some variables are missing and some are hidden under a hierarchical structure. You'll need to finish writing the
# wrapper if you want to use this.
expected_keys = (
"Orbit_State_Vectors",
"Doppler_Centroid_Coefficients",
"Doppler_Rate",
"product_name",
"product_type",
"product_level",
"satellite_name",
"acquisition_mode",
"look_side",
"processing_time",
"processor_version",
"acquisition_start_utc",
"acquisition_end_utc",
"zerodoppler_start_utc",
"zerodoppler_end_utc",
"first_pixel_time",
"number_of_azimuth_samples",
"number_of_range_samples",
"orbit_repeat_cycle",
"orbit_relative_number",
"orbit_absolute_number",
"orbit_direction",
"sample_precision",
"polarization",
"azimuth_looks",
"range_looks",
"slant_range_spacing",
"azimuth_ground_spacing",
"acquisition_prf",
"processing_prf",
"carrier_frequency",
"azimuth_time_interval",
"range_sampling_rate",
"chirp_bandwidth",
"chirp_duration",
"total_processed_bandwidth_azimuth",
"window_function_range",
"window_function_azimuth",
"range_spread_comp_flag",
"ant_elev_corr_flag",
"number_of_dc_estimations",
"dc_estimate_poly_order",
"doppler_rate_poly_order",
"geo_ref_system",
"avg_scene_height",
"mean_orbit_altitude",
"mean_earth_radius",
"coord_first_near",
"coord_first_far",
"coord_last_near",
"coord_last_far",
"coord_center",
"incidence_near",
"incidence_far",
"calibration_factor",
)
elif not isinstance(product_type, str):
raise TypeError(
'Did not understand input "product_type", a str was expected but a %s datatype variable was input.'
% type(product_type)
)
else:
raise ValueError(
'Did not understand input "product_type", either "SLC", "GRD" or "xml" was expected but %s was input.'
% product_type
)
return expected_keys
def _expected_datatypes(product_type):
"""
Aux function. Contains the most current lists of keys we expect there to be in the different forms of metadata.
"""
if product_type == "SLC":
# Only the datetimes need to be parsed.
expected_dtypes = {
"acquisition_start_utc": "parse_datetime_single",
"acquisition_end_utc": "parse_datetime_single",
"dc_estimate_time_utc": "parse_datetime_single",
"first_pixel_time_utc": "parse_datetime_single",
"state_vector_time_utc": "parse_datetime_vect",
"zerodoppler_start_utc": "parse_datetime_single",
"zerodoppler_end_utc": "parse_datetime_single",
}
elif product_type == "GRD":
# All the fields need to be parsed, so all the datatypes are input.
expected_dtypes = {
"acquisition_end_utc": "parse_datetime_single", # single datetime
"acquisition_mode": str,
"acquisition_prf": float,
"acquisition_start_utc": str,
"ant_elev_corr_flag": bool,
"area_or_point": str,
"avg_scene_height": float,
"azimuth_spacing": float,
"azimuth_look_bandwidth": float,
"azimuth_look_overlap": float,
"azimuth_looks": int,
"azimuth_time_interval": float,
"calibration_factor": float,
"carrier_frequency": float,
"chirp_bandwidth": float,
"chirp_duration": float,
"coord_center": "parse_float_vect", # 1d vect of floats, needs to be parsed
"coord_first_far": "parse_float_vect",
"coord_first_near": "parse_float_vect",
"coord_last_far": "parse_float_vect",
"coord_last_near": "parse_float_vect",
"dc_estimate_coeffs": "parse_float_vect",
"dc_estimate_poly_order": int,
"dc_estimate_time_utc": "parse_datetime_vect", # datetime vector
"dc_reference_pixel_time": float,
"doppler_rate_coeffs": "parse_float_vect",
"doppler_rate_poly_order": int,
"doppler_rate_reference_pixel_time": float,
"gcp_terrain_model": str,
"geo_ref_system": str,
"grsr_coefficients": "parse_float_vect",
"grsr_ground_range_origin": float,
"grsr_poly_order": int,
"grsr_zero_doppler_time": "parse_datetime_single", # single datetime
"heading": float,
"incidence_angle_coefficients": "parse_float_vect",
"incidence_angle_ground_range_origin": float,
"incidence_angle_poly_order": int,
"incidence_angle_zero_doppler_time": "parse_datetime_single", # single datetime
"incidence_center": float,
"incidence_far": float,
"incidence_near": float,
"look_side": str,
"mean_earth_radius": float,
"mean_orbit_altitude": float,
"number_of_azimuth_samples": int,
"number_of_dc_estimations": int,
"number_of_range_samples": int,
"number_of_state_vectors": int,
"orbit_absolute_number": int,
"orbit_direction": str,
"orbit_processing_level": str,
"orbit_relative_number": int,
"orbit_repeat_cycle": int,
"polarization": str,
"posX": "parse_float_vect",
"posY": "parse_float_vect",
"posZ": "parse_float_vect",
"processing_prf": float,
"processing_time": "parse_datetime_single", # single datetime
"processor_version": str,
"product_file": str,
"product_level": str,
"product_name": str,
"product_type": str,
"range_looks": int,
"range_sampling_rate": float,
"range_spacing": float,
"range_spread_comp_flag": bool,
"sample_precision": str,
"satellite_look_angle": str,
"satellite_name": str,
"slant_range_to_first_pixel": float,
"state_vector_time_utc": "parse_datetime_vect", # 1d vect of datetimes, need to be parsed.
"total_processed_bandwidth_azimuth": float,
"velX": "parse_float_vect",
"velY": "parse_float_vect",
"velZ": "parse_float_vect",
"window_function_azimuth": str,
"window_function_range": str,
"zerodoppler_end_utc": "parse_datetime_single", # single datetime
"zerodoppler_start_utc": "parse_datetime_single", # single datetime
}
elif product_type == "xml":
raise NotImplementedError
elif not isinstance(product_type, str):
raise TypeError(
'Did not understand input "product_type", a str was expected but a %s datatype variable was input.'
% type(product_type)
)
else:
raise ValueError(
'Did not understand input "product_type", either "SLC", "GRD" or "xml" was expected but %s was input.'
% product_type
)
return expected_dtypes
def _fix_GRD_metadata_datatypes(metadata, expected_dtypes):
"""
Attempt to convert all the metadata fields according to the formula specified
in expected_dtypes.
"""
def __parse_float_vect(str_of_vect):
"""
The 1D vectors are interpreted as strings by the rasterio.keys() reader.
This aux function splits them into numpy arrays of floating points.
"""
num_left_brackets = str_of_vect.count("[")
num_right_brackets = str_of_vect.count("[")
assert (
num_left_brackets == num_right_brackets
), 'The input was expected to be a str representation of a python list of floats. The number of left brackets "[" and right brackets "]" did not match. The parser will most likely break, aborting.'
str_of_vect = str_of_vect[1:-1] # starts and ends with a bracket. Remove them.
if num_left_brackets == 1: # single list
str_of_vect = str_of_vect.replace(",", "") # remove dots
str_of_vect = str_of_vect.split(" ")
while "" in str_of_vect:
str_of_vect.remove("")
floats = []
for i in range(0, len(str_of_vect)):
floats.append(float(str_of_vect[i]))
floats = np.array(floats)
elif num_left_brackets == 0:
raise ValueError(
"The input was expected to be a str representation of a python list of floats, but no brackets were found in the input str. The parser will most likely break, aborting."
)
else: # num_left_brackets > 1:
raise ValueError(
'The input was expected to be a str representation of a python list of floats, but %d left brackets "[" were found in the input str. The parser will most likely break, aborting.'
% num_left_brackets
)
# numpy array of floats
return floats
def __parse_datetime_single(str_of_single):
"""
Just a single datetime value. Turn it into a numpy array and return.
"""
return np.array(str_of_single)
def __parse_datetime_vect(str_of_vect):
"""
The datetime vectors are interpreted as strings by the rasterio.keys() reader.
This aux function splits them into lists of strings, which are further parsed in Zerodoppler.py.
"""
num_left_brackets = str_of_vect.count("[")
num_right_brackets = str_of_vect.count("]")
assert (
num_left_brackets == num_right_brackets
), 'The input was expected to be a str representation of a python list of str dates. The number of left brackets "[" and right brackets "]" did not match. The parser will most likely break, aborting.'
str_of_vect = str_of_vect[1:-1] # starts and ends with a bracket. Remove them.
if num_left_brackets == 1:
str_of_vect = str_of_vect.replace("'", "")
str_of_vect = str_of_vect.replace(" ", "")
vect_of_str = str_of_vect.split(",")
while "" in vect_of_str:
vect_of_str.remove("")
vect_of_str = np.array(vect_of_str)
elif num_left_brackets == 0:
raise ValueError(
"The input was expected to be a str representation of a python list of floats, but no brackets were found in the input str. The parser will most likely break, aborting."
)
else: # num_left_brackets > 1:
raise ValueError(
'The input was expected to be a str representation of a python list of floats, but %d left brackets "[" were found in the input str. The parser will most likely break, aborting.'
% num_left_brackets
)
# numpy array of chars
return vect_of_str
# Main loop. Go through each field in the metadata and parse the contents.
for key in metadata.keys():
if key in expected_dtypes:
var_type = expected_dtypes[key]
old_val = metadata[key]
if type(var_type) is type: # the field specifies a datatype
new_val = np.array(
var_type(old_val)
) # Everything is wrapped in a numpy array so that they index like a hdf5 dataset and everything works off the shelf.
elif type(var_type) is str:
if var_type == | |
<reponame>nimzco/Environment
# -*- coding: utf-8 -*-
import os
import platform
import fnmatch
from re import match, sub
from subprocess import PIPE
from subprocess import Popen
import sublime
import sublime_plugin
#
# Monkey patch `sublime.Region` so it can be iterable:
sublime.Region.totuple = lambda self: (self.a, self.b)
sublime.Region.__iter__ = lambda self: self.totuple().__iter__()
PLUGIN_NAME = 'JsPrettier'
PLUGIN_PATH = os.path.join(sublime.packages_path(),
os.path.dirname(os.path.realpath(__file__)))
PLUGIN_CMD_NAME = 'js_prettier'
PROJECT_SETTINGS_KEY = PLUGIN_CMD_NAME
SETTINGS_FILE = '{0}.sublime-settings'.format(PLUGIN_NAME)
PRETTIER_OPTIONS_KEY = 'prettier_options'
PRETTIER_OPTION_CLI_MAP = [
{
'option': 'printWidth',
'cli': '--print-width',
'default': '80'
},
{
'option': 'singleQuote',
'cli': '--single-quote',
'default': 'false'
},
{
'option': 'trailingComma',
'cli': '--trailing-comma',
'default': 'none'
},
{
'option': 'bracketSpacing',
'cli': '--bracket-spacing',
'default': 'true'
},
{
'option': 'jsxBracketSameLine',
'cli': '--jsx-bracket-same-line',
'default': 'false'
},
{
'option': 'parser',
'cli': '--parser',
'default': 'babylon'
},
{
'option': 'semi',
'cli': '--semi',
'default': 'true'
}
]
ALLOWED_FILE_EXTENSIONS = [
'js',
'jsx',
'json',
'graphql',
'ts',
'tsx',
'css',
'scss',
'less'
]
IS_SUBLIME_TEXT_LATEST = int(sublime.version()) >= 3000
class JsPrettierCommand(sublime_plugin.TextCommand):
_error_message = None
@property
def debug(self):
return self.get_setting('debug', False)
@property
def has_error(self):
if not self._error_message:
return False
return True
@property
def error_message(self):
return self._error_message
@error_message.setter
def error_message(self, message=None):
self._error_message = message
@property
def proc_env(self):
env = None
if not self.is_windows():
env = os.environ.copy()
usr_path = ':/usr/local/bin'
if not self.env_path_exists(usr_path) \
and self.path_exists(usr_path):
env['PATH'] += usr_path
return env
@property
def prettier_cli_path(self):
"""The prettier cli path.
When the `prettier_cli_path` setting is empty (""),
the path is resolved by searching locations in the following order,
returning the first match of the prettier cli path...
- Locally installed prettier, relative to a Sublime Text Project
file's root directory, e.g.: `node_modules/.bin/prettier'.
- User's $HOME/node_modules directory.
- Look in the JsPrettier Sublime Text plug-in directory for
`node_modules/.bin/prettier`.
- Finally, check if prettier is installed globally,
e.g.: `yarn global add prettier`
or: `npm install -g prettier`
:return: The prettier cli path.
"""
user_prettier_path = self.get_setting('prettier_cli_path', '')
project_path = self.get_active_project_path()
if self.is_str_none_or_empty(user_prettier_path):
global_prettier_path = self.which('prettier')
project_prettier_path = os.path.join(
project_path, 'node_modules', '.bin', 'prettier')
plugin_prettier_path = os.path.join(
PLUGIN_PATH, 'node_modules', '.bin', 'prettier')
if os.path.exists(project_prettier_path):
return project_prettier_path
if os.path.exists(plugin_prettier_path):
return plugin_prettier_path
return global_prettier_path
# handle cases when the user specifies a prettier cli path that is
# relative to the working file or project:
if not os.path.isabs(user_prettier_path):
user_prettier_path = os.path.join(project_path, user_prettier_path)
return user_prettier_path
@property
def node_path(self):
return self.get_setting('node_path', None)
@property
def tab_size(self):
return int(self.view.settings().get('tab_size', 2))
@property
def use_tabs(self):
translate_tabs_to_spaces = self.view.settings().get(
'translate_tabs_to_spaces', True)
if not translate_tabs_to_spaces:
return True
return False
@property
def allow_inline_formatting(self):
return self.get_setting('allow_inline_formatting', False)
@property
def additional_cli_args(self):
return self.get_setting('additional_cli_args', {})
@property
def max_file_size_limit(self):
return int(self.get_setting('max_file_size_limit', -1))
def exceeds_max_file_size_limit(self, view):
if self.max_file_size_limit == -1:
return False
if os.path.getsize(view.file_name()) > self.max_file_size_limit:
return True
return False
def is_allowed_file_ext(self, view):
filename = view.file_name()
if not filename:
return False
file_ext = os.path.splitext(filename)[1][1:]
if file_ext in ALLOWED_FILE_EXTENSIONS:
return True
if file_ext in set(self.get_setting('custom_file_extensions', [])):
return True
return False
def run(self, edit, force_entire_file=False):
view = self.view
if view.file_name() is None:
#
# Handle file must first be saved:
if not IS_SUBLIME_TEXT_LATEST:
# sublime text 2x: limited dialog support, just show error:
return sublime.error_message(
'{0} Error\n\n'
'File must first be saved.'.format(PLUGIN_NAME))
else:
#
# sublime text 3+: show dialog that includes a save option:
result = sublime.yes_no_cancel_dialog(
'{0}\n\n'
'File must first be Saved.'.format(PLUGIN_NAME),
'Save...', "Don't Save")
if result == sublime.DIALOG_YES:
view.run_command('save')
#
# re-check if the file was saved here, incase user canceled or closed
# the save dialog:
if view.file_name() is None:
return sublime.set_timeout(lambda: sublime.status_message(
'{0}: File save canceled.'.format(PLUGIN_NAME)), 0)
prettier_cli_path = self.prettier_cli_path
if prettier_cli_path is None:
return sublime.error_message(
'{0} Error\n\n'
'The path to the Prettier cli executable could '
'not be found! Please ensure the path to prettier is '
'set in your PATH environment variable.'.format(PLUGIN_NAME))
if self.exceeds_max_file_size_limit(view):
return sublime.set_timeout(lambda: sublime.status_message(
'{0}: File ignored, max allowed file size '
'limit reached.'.format(PLUGIN_NAME)), 0)
prettier_args = self.parse_prettier_options(view)
node_path = self.node_path
# Format entire file:
if not self.has_selection(view) or force_entire_file is True:
region = sublime.Region(0, view.size())
source = view.substr(region)
if self.is_str_empty_or_whitespace_only(source):
return sublime.set_timeout(lambda: sublime.status_message(
'{0}: Nothing to format in file.'.format(PLUGIN_NAME)), 0)
transformed = self._exec_cmd(
source, node_path, prettier_cli_path, prettier_args)
if self.has_error:
self.show_console_error()
return self.show_status_bar_error()
# sanity check to ensure textual content was returned from cmd
# stdout, not necessarily caught in OSError try/catch
# exception handler
if self.is_str_empty_or_whitespace_only(transformed):
self.error_message = 'Empty content returned to stdout'
return self.show_status_bar_error()
file_changed = False
transformed = self.trim_trailing_ws_and_lines(transformed)
if transformed:
if transformed == self.trim_trailing_ws_and_lines(source):
if self.ensure_newline_at_eof(view, edit) is True:
# no formatting changes applied, however, a line
# break was needed/inserted at the end of the file:
file_changed = True
else:
view.replace(edit, region, transformed)
self.ensure_newline_at_eof(view, edit)
file_changed = True
else:
view.replace(edit, region, transformed)
self.ensure_newline_at_eof(view, edit)
file_changed = True
if file_changed is True:
sublime.set_timeout(lambda: sublime.status_message(
'{0}: File formatted.'.format(PLUGIN_NAME)), 0)
else:
sublime.set_timeout(lambda: sublime.status_message(
'{0}: File already formatted.'.format(PLUGIN_NAME)), 0)
return
# Format each selection:
for region in view.sel():
if region.empty():
continue
source = view.substr(region)
if self.is_str_empty_or_whitespace_only(source):
sublime.set_timeout(lambda: sublime.status_message(
'{0}: Nothing to format in selection.'.format(
PLUGIN_NAME)), 0)
continue
transformed = self._exec_cmd(
source, node_path, prettier_cli_path, prettier_args)
if self.has_error:
self.show_console_error()
return self.show_status_bar_error()
# sanity check to ensure textual content was returned from cmd
# stdout, not necessarily caught in OSError try/catch
# exception handler
if self.is_str_empty_or_whitespace_only(transformed):
self.error_message = 'Empty content returned to stdout'
return self.show_status_bar_error()
transformed = self.trim_trailing_ws_and_lines(transformed)
if transformed \
and transformed == self.trim_trailing_ws_and_lines(source):
sublime.set_timeout(lambda: sublime.status_message(
'{0}: Selection(s) already formatted.'.format(
PLUGIN_NAME)), 0)
else:
view.replace(edit, region, transformed)
sublime.set_timeout(lambda: sublime.status_message(
'{0}: Selection(s) formatted.'.format(PLUGIN_NAME)), 0)
def _exec_cmd(self, source, node_path, prettier_cli_path,
prettier_args):
self._error_message = None
if self.is_str_none_or_empty(node_path):
cmd = [prettier_cli_path] \
+ ['--stdin'] \
+ prettier_args
else:
cmd = [node_path] \
+ [prettier_cli_path] \
+ ['--stdin'] \
+ prettier_args
try:
self.show_debug_message(
'Prettier CLI Command', self.list_to_str(cmd))
proc = Popen(
cmd, stdin=PIPE,
stderr=PIPE,
stdout=PIPE,
env=self.proc_env,
shell=self.is_windows())
stdout, stderr = proc.communicate(input=source.encode('utf-8'))
if stderr or proc.returncode != 0:
self.format_error_message(
stderr.decode('utf-8'), str(proc.returncode))
return None
return stdout.decode('utf-8')
except OSError as ex:
sublime.error_message('{0} - {1}'.format(PLUGIN_NAME, ex))
raise
def should_show_plugin(self):
view = self.view
if self.allow_inline_formatting is True:
return True
if self.is_source_js(view) is True:
return True
if self.is_css(view) is True:
return True
if self.is_allowed_file_ext(view) is True:
return True
return False
def is_visible(self):
return self.should_show_plugin()
def is_enabled(self):
return self.should_show_plugin()
def get_setting(self, key, default_value=None):
settings = self.view.settings().get(PLUGIN_NAME)
if settings is None or settings.get(key) is None:
settings = sublime.load_settings(SETTINGS_FILE)
value = settings.get(key, default_value)
# check for project-level overrides:
project_value = self._get_project_setting(key)
if project_value is None:
return value
return project_value
def get_sub_setting(self, key=None):
settings = self.view.settings().get(PLUGIN_NAME)
if settings is None or settings.get(PRETTIER_OPTIONS_KEY).get(
key) is None:
settings = sublime.load_settings(SETTINGS_FILE)
value = settings.get(PRETTIER_OPTIONS_KEY).get(key)
# check for project-level overrides:
project_value = self._get_project_sub_setting(key)
if project_value is None:
return value
return project_value
def parse_prettier_options(self, view):
# TODO: optimize option parsing...
prettier_cli_args = []
is_css = self.is_css(view)
is_typescript = self.is_typescript(view)
is_json = self.is_json(view)
is_graphql = self.is_graphql(view)
for mapping in PRETTIER_OPTION_CLI_MAP:
option_name = mapping['option']
cli_option_name = mapping['cli']
option_value = self.get_sub_setting(option_name)
# internally override the 'parser' option for css
# and set the value to 'postcss':
if option_name == 'parser' and is_css:
prettier_cli_args.append(cli_option_name)
prettier_cli_args.append('postcss')
continue
# internally override the 'parser' for typescript
# and set the value to 'typescript':
if option_name == 'parser' and is_typescript:
prettier_cli_args.append(cli_option_name)
prettier_cli_args.append('typescript')
continue
# internally override the 'parser' for json
# and set the value to 'json':
if option_name == 'parser' and is_json:
prettier_cli_args.append(cli_option_name)
prettier_cli_args.append('json')
continue
# internally override the 'parser' for graphql
# and set the value to 'graphql':
if option_name == 'parser' and is_graphql:
prettier_cli_args.append(cli_option_name)
prettier_cli_args.append('graphql')
continue
if option_value is None or str(option_value) == '':
option_value = mapping['default']
option_value = str(option_value).strip()
if self.is_bool_str(option_value):
prettier_cli_args.append('{0}={1}'.format(
cli_option_name, option_value.lower()))
else:
prettier_cli_args.append(cli_option_name)
prettier_cli_args.append(option_value)
# set the `tabWidth` option based on the current view:
prettier_cli_args.append('--tab-width')
prettier_cli_args.append(str(self.tab_size))
# set the `useTabs` option based on the current view:
prettier_cli_args.append('{0}={1}'.format(
'--use-tabs', str(self.use_tabs).lower()))
# add the additional arguments from the settings file to the command:
if self.additional_cli_args and len(self.additional_cli_args) > 0:
for arg_key, arg_value in self.additional_cli_args.items():
arg_key = str(arg_key).strip()
arg_value = str(arg_value).strip()
# handle bool options
if arg_value != '' and self.is_bool_str(arg_value):
prettier_cli_args.append(
'{0}={1}'.format(arg_key, arg_value.lower()))
continue
prettier_cli_args.append(arg_key)
if arg_value != '':
prettier_cli_args.append(arg_value)
return prettier_cli_args
def which(self, executable, path=None):
if not self.is_str_none_or_empty(executable):
if os.path.isfile(executable):
return executable
if self.is_str_none_or_empty(path):
path = os.environ['PATH']
if not self.is_windows():
usr_path = ':/usr/local/bin'
if not self.env_path_exists(usr_path, path) \
and self.path_exists(usr_path):
| |
<filename>np_processor/processor/np_faster_rcnn_post.py
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import absolute_import
import numpy as np
from platformx.plat_tensorflow.tools.processor.np_utils import shape_utils, \
anchor_generator_builder, box_list_ops, box_list, ops, post_processing_builder, \
target_assigner, post_processing, visualization_utils as vis_util
from platformx.plat_tensorflow.tools.processor.np_utils import standard_fields as fields
from platformx.plat_tensorflow.tools.processor import model_config
import config
from PIL import Image
from platformx.plat_tensorflow.tools.processor.np_utils import label_map_util
from scipy import misc
import os
import matplotlib
matplotlib.use('Agg')
BOX_ENCODINGS = 'box_encodings'
CLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background'
PATH_TO_LABELS = config.cfg.POSTPROCESSOR.PATH_TO_LABELS
BOXES_NAME = "FirstStageBoxPredictor_BoxEncodingPredictor"
CLASSES_NAME = "FirstStageBoxPredictor_ClassPredictor"
FEATURES_NAME = "FirstStageFeatureExtractor"
BOX_PREDICTOR = "SecondStageBoxPredictor_Reshape_1"
def faster_rcnn_stage_one_post(preprocessed_inputs, result_middle=None):
print("========================== faster_rcnn_stage_one_post ========================== ")
preprocessed_inputs = preprocessed_inputs
print("1 preprocessed_inputs:", preprocessed_inputs.shape)
for key, value in result_middle.items():
if BOXES_NAME in key:
box_encodings = value
print("box_encodings:", value.shape)
if CLASSES_NAME in key:
class_predictions_with_background = value
print("class_predictions_with_background:", value.shape)
if FEATURES_NAME in key:
rpn_features_to_crop = value
print("rpn_features_to_crop:", value.shape)
if BOXES_NAME not in key and CLASSES_NAME not in key and FEATURES_NAME not in key:
rpn_box_predictor_features = value
print("rpn_box_predictor_features:", value.shape)
fisrt_post_result = crop_and_resize_to_input(rpn_box_predictor_features, preprocessed_inputs, box_encodings,
class_predictions_with_background, rpn_features_to_crop)
return fisrt_post_result
def faster_rcnn_second_stage_post(preprocessed_inputs, result_middle=None, second_net_result=None):
print("================= faster_rcnn_second_stage_post =================")
preprocessed_inputs = preprocessed_inputs
print("preprocessed_inputs.shape:", preprocessed_inputs.shape)
for key, value in result_middle.items():
if BOXES_NAME in key:
box_encodings = value
print("box_encodings:", value.shape)
if CLASSES_NAME in key:
class_predictions_with_background = value
print("class_predictions_with_background:", value.shape)
if FEATURES_NAME in key:
rpn_features_to_crop = value
print("rpn_features_to_crop:", value.shape)
if BOXES_NAME not in key and CLASSES_NAME not in key and FEATURES_NAME not in key:
rpn_box_predictor_features = value
print("rpn_box_predictor_features:", value.shape)
for key, value in second_net_result.items():
if BOX_PREDICTOR in key:
if value.ndim == 4:
value = np.transpose(value, axes=(0, 2, 3, 1))
if value.ndim == 3:
value = np.expand_dims(value, axis=3)
class_prediction_reshape = value
print("class_prediction_reshape.shape:", value.shape)
if BOX_PREDICTOR not in key:
print(" before box_encoding_reshape.shape:", value.shape)
if value.ndim == 4 and value.shape[3] != 4:
value = value.reshape((value.shape[0], value.shape[2], value.shape[3], value.shape[1]))
box_encoding_reshape = value
print("box_encoding_reshape.shape:", box_encoding_reshape.shape)
input_shape = preprocessed_inputs.shape
true_image_shapes = np.array([[input_shape[1], input_shape[2], input_shape[3]]], dtype=np.int32)
print("2 true_image_shapes:", true_image_shapes)
result_output = second_stage_box_predictor(preprocessed_inputs, box_encoding_reshape, class_prediction_reshape,
rpn_features_to_crop, box_encodings, class_predictions_with_background,
true_image_shapes, rpn_box_predictor_features)
result_show = result_output
show_detection_result(result_show)
return result_output
def second_stage_box_predictor(preprocessed_inputs, box_encoding_reshape, class_prediction_reshape,
rpn_features_to_crop,
rpn_box_encodings,
rpn_objectness_predictions_with_background,
true_image_shapes,
rpn_box_predictor_features):
image_shape = shape_utils.combined_static_and_dynamic_shape(
preprocessed_inputs)
first_stage_anchor_generator = anchor_generator_builder.build()
clip_window = np.stack([0, 0, image_shape[1], image_shape[2]])
feature_map_shape = rpn_features_to_crop.shape
anchors_boxlist = box_list_ops.concatenate(
first_stage_anchor_generator.generate([(feature_map_shape[1],
feature_map_shape[2])]))
anchors_boxlist = box_list_ops.clip_to_window(
anchors_boxlist, clip_window)
_anchors = anchors_boxlist
image_shape_2d = _image_batch_shape_2d(image_shape)
num_anchors_per_location = (
first_stage_anchor_generator.num_anchors_per_location())
if len(num_anchors_per_location) != 1:
raise RuntimeError('anchor_generator is expected to generate anchors '
'corresponding to a single feature map.')
box_predictions = _first_stage_box_predictor_predict([rpn_box_predictor_features], [rpn_box_encodings],
[rpn_objectness_predictions_with_background],
num_anchors_per_location)
predictions_box_encodings = np.concatenate(
box_predictions[BOX_ENCODINGS], axis=1)
rpn_box_encodings = np.squeeze(predictions_box_encodings, axis=2)
rpn_objectness_predictions_with_background = np.concatenate(
box_predictions[CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
first_stage_max_proposals = config.cfg.POSTPROCESSOR.FIRST_STAGE_MAX_PROPOSALS
proposal_boxes_normalized, _, num_proposals = _postprocess_rpn(
rpn_box_encodings, rpn_objectness_predictions_with_background,
_anchors.get(), image_shape_2d, first_stage_max_proposals=first_stage_max_proposals)
print("proposal_boxes_normalized:", proposal_boxes_normalized.shape)
prediction_dict = {
'rpn_box_predictor_features': rpn_box_predictor_features,
'rpn_features_to_crop': rpn_features_to_crop,
'image_shape': image_shape,
'rpn_box_encodings': rpn_box_encodings,
'rpn_objectness_predictions_with_background':
rpn_objectness_predictions_with_background,
}
print("=========== box_encoding_reshape", box_encoding_reshape.shape)
refined_box_encodings = np.squeeze(
box_encoding_reshape,
axis=1)
print("=========== class_prediction_reshape", class_prediction_reshape.shape)
class_predictions_with_background = np.squeeze(
class_prediction_reshape,
axis=1)
_parallel_iterations = 16
proposal_boxes_normalized = proposal_boxes_normalized[0]
absolute_proposal_boxes = ops.normalized_to_image_coordinates(
proposal_boxes_normalized, image_shape, _parallel_iterations)
prediction_dict1 = {
'refined_box_encodings': refined_box_encodings,
'class_predictions_with_background':
class_predictions_with_background,
'num_proposals': num_proposals,
'proposal_boxes': absolute_proposal_boxes,
}
prediction_dict.update(prediction_dict1)
result_output = second_postprocess(prediction_dict, true_image_shapes)
return result_output
def second_postprocess(prediction_dict, true_image_shapes):
postprocessed_tensors = _postprocess_box_classifier(
prediction_dict['refined_box_encodings'],
prediction_dict['class_predictions_with_background'],
prediction_dict['proposal_boxes'],
prediction_dict['num_proposals'],
true_image_shapes,
mask_predictions=None)
return _add_output_tensor_nodes(postprocessed_tensors)
def _postprocess_box_classifier(
refined_box_encodings,
class_predictions_with_background,
proposal_boxes,
num_proposals,
image_shapes,
mask_predictions=None):
_first_stage_max_proposals = config.cfg.POSTPROCESSOR.FIRST_STAGE_MAX_PROPOSALS
max_num_proposals = _first_stage_max_proposals
num_classes = config.cfg.POSTPROCESSOR.NUM_CLASSES
_proposal_target_assigner = target_assigner.create_target_assigner(
'FasterRCNN', 'proposal')
_box_coder = _proposal_target_assigner.box_coder
_second_stage_nms_fn, second_stage_score_conversion_fn = post_processing_builder.build(model_config.FASTER_RCNN)
refined_box_encodings_batch = np.reshape(
refined_box_encodings,
[-1,
max_num_proposals,
refined_box_encodings.shape[1],
_box_coder.code_size])
class_predictions_with_background_batch = np.reshape(
class_predictions_with_background,
[-1, max_num_proposals, num_classes + 1]
)
refined_decoded_boxes_batch = _batch_decode_boxes(
refined_box_encodings_batch, proposal_boxes)
class_predictions_with_background_batch = (
second_stage_score_conversion_fn(
class_predictions_with_background_batch))
new_shape = [-1, max_num_proposals, num_classes]
sliced = class_predictions_with_background_batch[0:, 0:, 1:]
class_predictions_batch = np.reshape(sliced, new_shape)
clip_window = _compute_clip_window(image_shapes)
mask_predictions_batch = None
if mask_predictions is not None:
mask_height = mask_predictions.shape[2].value
mask_width = mask_predictions.shape[3].value
mask_predictions = ops.sigmoid(mask_predictions)
mask_predictions_batch = np.reshape(
mask_predictions, [-1, max_num_proposals,
num_classes, mask_height, mask_width])
(nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks, _,
num_detections) = _second_stage_nms_fn(
refined_decoded_boxes_batch,
class_predictions_batch,
clip_window=clip_window,
change_coordinate_frame=True,
num_valid_boxes=num_proposals,
masks=mask_predictions_batch)
print("========== nmsed_boxes:", nmsed_boxes.shape)
print("========== nmsed_scores:", nmsed_scores.shape)
print("========== nmsed_classes:", nmsed_classes.shape)
print("========== num_detections:", num_detections)
detections = {
fields.DetectionResultFields.detection_boxes: nmsed_boxes,
fields.DetectionResultFields.detection_scores: nmsed_scores,
fields.DetectionResultFields.detection_classes: nmsed_classes,
fields.DetectionResultFields.num_detections: num_detections
}
if nmsed_masks is not None:
detections[fields.DetectionResultFields.detection_masks] = nmsed_masks
return detections
def crop_and_resize_to_input(rpn_box_predictor_features, preprocessed_inputs, box_encodings,
class_predictions_with_background, rpn_features_to_crop):
image_shape = preprocessed_inputs.shape
first_stage_anchor_generator = anchor_generator_builder.build()
num_anchors_per_location = (
first_stage_anchor_generator.num_anchors_per_location())
# 12 num anchors
print("num_anchors_per_location:", num_anchors_per_location)
if len(num_anchors_per_location) != 1:
raise RuntimeError('anchor_generator is expected to generate anchors '
'corresponding to a single feature map.')
box_predictions = _first_stage_box_predictor_predict([rpn_box_predictor_features], [box_encodings],
[class_predictions_with_background],
num_anchors_per_location)
predictions_box_encodings = np.concatenate(
box_predictions[BOX_ENCODINGS], axis=1)
rpn_box_encodings = np.squeeze(predictions_box_encodings, axis=2)
# -2.7135613
rpn_objectness_predictions_with_background = np.concatenate(
box_predictions[CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1)
# The Faster R-CNN paper recommends pruning anchors that venture outside
# the image window at training time and clipping at inference time.
clip_window = np.stack([0, 0, image_shape[1], image_shape[2]])
feature_map_shape = rpn_features_to_crop.shape
anchors_boxlist = box_list_ops.concatenate(
first_stage_anchor_generator.generate([(feature_map_shape[1],
feature_map_shape[2])]))
# [ 0 0 600 1002]
anchors_boxlist = box_list_ops.clip_to_window(
anchors_boxlist, clip_window)
# clip anchors[0]: [ 0. 0. 45.254834 22.627417]
_anchors = anchors_boxlist
cropped_regions = _predict_second_stage_1(rpn_box_encodings, rpn_objectness_predictions_with_background,
rpn_features_to_crop, _anchors.get(), image_shape)
return cropped_regions
def _add_output_tensor_nodes(postprocessed_tensors):
detection_fields = fields.DetectionResultFields
label_id_offset = 1
boxes = postprocessed_tensors.get(detection_fields.detection_boxes)
scores = postprocessed_tensors.get(detection_fields.detection_scores)
classes = postprocessed_tensors.get(
detection_fields.detection_classes) + label_id_offset
keypoints = postprocessed_tensors.get(detection_fields.detection_keypoints)
masks = postprocessed_tensors.get(detection_fields.detection_masks)
# TODO fixed
num_detections = postprocessed_tensors.get(detection_fields.num_detections)
if isinstance(num_detections, list):
num_detections = num_detections[0]
elif isinstance(num_detections, float):
num_detections = int(num_detections)
elif isinstance(num_detections, np.ndarray):
num_detections = int(num_detections[0])
print("=============== num_detections :", num_detections)
outputs = {}
scores = scores.flatten()
scores_1 = scores[0:num_detections]
print("scores_1:", scores_1)
# todo 读取配置文件 置 0 置 1 操作原始代码
if scores.shape[0] < 100:
raw_shape = 100
else:
raw_shape = scores.shape[0]
scores_2 = np.zeros(shape=raw_shape - num_detections)
scores = np.hstack((scores_1, scores_2))
scores = np.expand_dims(scores, axis=0)
outputs[detection_fields.detection_scores] = scores
classes = classes.flatten()
classes_1 = classes[0:num_detections]
print("classes_1:", classes_1)
classes_2 = np.ones(shape=raw_shape - num_detections)
classes = np.hstack((classes_1, classes_2))
classes = np.expand_dims(classes, axis=0)
outputs[detection_fields.detection_classes] = classes
boxes_1 = boxes[:, 0:num_detections]
print("boxes_1:", boxes_1)
boxes_2 = np.zeros(shape=(1, raw_shape - num_detections, 4))
boxes = np.hstack((boxes_1, boxes_2))
outputs[detection_fields.detection_boxes] = boxes
outputs[detection_fields.num_detections] = num_detections
if keypoints is not None:
outputs[detection_fields.detection_keypoints] = keypoints
print("================= scores.shape :", scores.shape)
print("================= boxes.shape :", boxes.shape)
print("================= classes.shape :", classes.shape)
return outputs
def _first_stage_box_predictor_predict(image_features, box_encodings, class_predictions_with_backgrounds,
num_predictions_per_locations):
box_encodings_list = []
class_predictions_list = []
num_classes = 1
num_class_slots = num_classes + 1
# [12]
print("num_predictions_per_locations:", num_predictions_per_locations)
print("image_features:", image_features[0].shape)
print("class_predictions_with_backgrounds:", class_predictions_with_backgrounds[0].shape)
_proposal_target_assigner = target_assigner.create_target_assigner(
'FasterRCNN', 'proposal')
_box_coder = _proposal_target_assigner.box_coder
# print("_box_coder:", _box_coder)
_box_code_size = _box_coder.code_size
for (image_feature, box_encoding, class_predictions_with_background,
num_predictions_per_location) in zip(
image_features, box_encodings, class_predictions_with_backgrounds,
num_predictions_per_locations):
combined_feature_map_shape = list(image_feature.shape)
shapes = np.stack([combined_feature_map_shape[0],
combined_feature_map_shape[1] * combined_feature_map_shape[2] * num_predictions_per_location,
1,
_box_code_size])
box_encoding_reshape = np.reshape(box_encoding, shapes)
box_encodings_list.append(box_encoding_reshape)
class_predictions_with_background = np.reshape(
class_predictions_with_background,
np.stack([combined_feature_map_shape[0],
combined_feature_map_shape[1] *
combined_feature_map_shape[2] *
num_predictions_per_location,
num_class_slots]))
class_predictions_list.append(class_predictions_with_background)
# print("box_encodings_list:", np.array(box_encodings_list).shape)
# print("class_predictions_list:", np.array(class_predictions_list).shape)
return {
BOX_ENCODINGS: box_encodings_list,
CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_list
}
def _predict_second_stage_1(rpn_box_encodings,
rpn_objectness_predictions_with_background,
rpn_features_to_crop,
anchors,
image_shape):
image_shape_2d = _image_batch_shape_2d(image_shape)
first_stage_max_proposals = config.cfg.POSTPROCESSOR.FIRST_STAGE_MAX_PROPOSALS
proposal_boxes_normalized, _, num_proposals = _postprocess_rpn(
rpn_box_encodings, rpn_objectness_predictions_with_background,
anchors, image_shape_2d, first_stage_max_proposals=first_stage_max_proposals)
cropped_regions = (
_compute_second_stage_input_feature_maps(
rpn_features_to_crop, proposal_boxes_normalized))
return cropped_regions
def _flatten_first_two_dimensions(inputs):
combined_shape = shape_utils.combined_static_and_dynamic_shape(inputs)
flattened_shape = np.stack([combined_shape[0] * combined_shape[1]] +
combined_shape[2:])
return np.reshape(inputs, flattened_shape)
def _compute_second_stage_input_feature_maps(features_to_crop,
proposal_boxes_normalized):
def get_box_inds(proposals):
proposals_shape = proposals.shape
ones_mat = np.ones(proposals_shape[:2], dtype=np.int32)
multiplier = np.expand_dims(range(proposals_shape[0]), 1)
return np.reshape(ones_mat * multiplier, [-1])
_initial_crop_size = config.cfg.POSTPROCESSOR.INITIAL_CROP_SIZE
box_index = get_box_inds(proposal_boxes_normalized)
# TODO
import tensorflow as tf
with tf.Session() as sess:
cropped_regions = tf.image.crop_and_resize(
features_to_crop,
_flatten_first_two_dimensions(proposal_boxes_normalized),
box_index,
[_initial_crop_size, _initial_crop_size])
ksize = config.cfg.POSTPROCESSOR.MAXPOOL_KERNEL_SIZE
strides = config.cfg.POSTPROCESSOR.MAXPOOL_STRIDE
slim = tf.contrib.slim
max_pooled = slim.max_pool2d(
cropped_regions,
[ksize, ksize],
stride=strides)
max_pooled = sess.run(max_pooled)
print("============== ROI Pooling result :", max_pooled.shape)
return max_pooled
def _image_batch_shape_2d(image_batch_shape_1d):
return np.tile(np.expand_dims(image_batch_shape_1d[1:], 0),
[image_batch_shape_1d[0], 1])
def _postprocess_rpn(
rpn_box_encodings_batch,
rpn_objectness_predictions_with_background_batch,
anchors,
image_shapes, first_stage_max_proposals):
first_stage_nms_score_threshold = config.cfg.POSTPROCESSOR.FIRST_STAGE_NMS_SCORE_THRESHOLD
first_stage_nms_iou_threshold = config.cfg.POSTPROCESSOR.FIRST_STAGE_NMS_IOU_THRESHOLD
rpn_box_encodings_batch = np.expand_dims(rpn_box_encodings_batch, axis=2)
rpn_encodings_shape = shape_utils.combined_static_and_dynamic_shape(
rpn_box_encodings_batch)
# print("=== anchors:", anchors[0])
tiled_anchor_boxes = np.tile(
np.expand_dims(anchors, 0), [rpn_encodings_shape[0], 1, 1])
# print("=== tiled_anchor_boxes:", tiled_anchor_boxes[0][0])
proposal_boxes = _batch_decode_boxes(rpn_box_encodings_batch,
tiled_anchor_boxes)
proposal_boxes = np.squeeze(proposal_boxes, axis=2)
# (1, 28728, 4)
# 0 [11.60262919 3.12900102 41.31160688 18.96688846]
# rpn_objectness_predictions_with_background_batch (1, 28728, 2)
# rpn_objectness_predictions_with_background_batch[:, :, 1][0][0] -2.7135613
rpn_objectness_softmax_without_background = ops.softmax(rpn_objectness_predictions_with_background_batch)[:, :, 1]
# rpn_objectness_softmax_without_background: (1, 28728)
# ====== softmax score : 0.0032150035
print("====== softmax score :", rpn_objectness_softmax_without_background[0][0])
clip_window = _compute_clip_window(image_shapes)
# [[ 0 0 600 1002]]
print("clip_window:", clip_window)
(proposal_boxes, proposal_scores, _, _, _,
num_proposals) = post_processing.batch_multiclass_non_max_suppression(
np.expand_dims(proposal_boxes, axis=2),
np.expand_dims(rpn_objectness_softmax_without_background,
axis=2),
first_stage_nms_score_threshold,
first_stage_nms_iou_threshold,
first_stage_max_proposals,
first_stage_max_proposals,
clip_window=clip_window)
print("proposal_boxes:", proposal_boxes.shape)
print("proposal_boxes [0][0]:", proposal_boxes[0][0])
# import h5py
# with h5py.File('tf_proposal.h5', 'w') as f:
# f["tf_proposal"] = proposal_boxes[0]
print("proposal_scores:", proposal_scores.shape)
print("proposal_scores [0][0]:", proposal_scores[0][0])
# proposal_boxes [0][0]: [ 6.95569825 402.90691757 398.87478089 947.73357773]
# proposal_scores: (1, 100)
# proposal_scores [0][0]: 0.9992391
# caffe 'proposals final:', array([237.24371 , 18.908209, 561.04926 , 175.2929 ], dtype=float32))
| |
* Ii1I + oO0o
if 66 - 66: i1IIi . I1ii11iIi11i
if 86 - 86: Oo0Ooo
if 48 - 48: OoO0O00
if 55 - 55: OoO0O00 * i1IIi * I11i / iII111i
if ( ooOOOo0o0oo and ooOOOo0o0oo . accept_more_specifics == False ) :
if ( ooOOOo0o0oo . eid_record_matches ( iI1iii1IIIIi ) == False ) :
iiiIIIII1iIi = ooOOOo0o0oo . parent_for_more_specifics
if ( iiiIIIII1iIi ) : ooOOOo0o0oo = iiiIIIII1iIi
if 8 - 8: o0oOOo0O0Ooo * OoO0O00 % IiII / OoooooooOO * ooOoO0o - i11iIiiIii
if 14 - 14: Oo0Ooo . iII111i
if 50 - 50: iIii1I11I1II1
if 48 - 48: Ii1I - o0oOOo0O0Ooo - Oo0Ooo . iIii1I11I1II1
if 1 - 1: i1IIi % OoooooooOO
if 30 - 30: ooOoO0o % I11i
if 4 - 4: oO0o / OoO0O00
if 90 - 90: I11i . IiII / OoO0O00 . IiII
OoO0OOoooooOO = ( ooOOOo0o0oo and ooOOOo0o0oo . accept_more_specifics )
if ( OoO0OOoooooOO ) :
i1iIIiii = lisp_site_eid ( ooOOOo0o0oo . site )
i1iIIiii . dynamic = True
i1iIIiii . eid . copy_address ( iI1iii1IIIIi . eid )
i1iIIiii . group . copy_address ( iI1iii1IIIIi . group )
i1iIIiii . parent_for_more_specifics = ooOOOo0o0oo
i1iIIiii . add_cache ( )
i1iIIiii . inherit_from_ams_parent ( )
ooOOOo0o0oo . more_specific_registrations . append ( i1iIIiii )
ooOOOo0o0oo = i1iIIiii
else :
ooOOOo0o0oo = lisp_site_eid_lookup ( iI1iii1IIIIi . eid , iI1iii1IIIIi . group ,
True )
if 2 - 2: I11i + I1IiiI . IiII . OoOoOO00 * oO0o - ooOoO0o
if 29 - 29: OoO0O00
oOoo0OooOOo00 = iI1iii1IIIIi . print_eid_tuple ( )
if 78 - 78: iII111i * ooOoO0o + O0 % ooOoO0o + OoO0O00
if ( ooOOOo0o0oo == None ) :
OOOO0OOoO = bold ( "Site not found" , False )
lprint ( " {} for EID {}{}" . format ( OOOO0OOoO , green ( oOoo0OooOOo00 , False ) ,
", matched non-ams {}" . format ( green ( o0OOoOOoo0oo0 , False ) if o0OOoOOoo0oo0 else "" ) ) )
if 41 - 41: II111iiii . oO0o + O0 % i1IIi . Ii1I
if 90 - 90: ooOoO0o * I1IiiI / II111iiii % Oo0Ooo % OoooooooOO
if 78 - 78: OoooooooOO . IiII
if 55 - 55: I11i / I1ii11iIi11i * O0 + IiII % I11i
if 69 - 69: o0oOOo0O0Ooo % iIii1I11I1II1 . OoooooooOO - ooOoO0o
packet = oooO0oo00oOOoo0O . end_of_rlocs ( packet , iI1iii1IIIIi . rloc_count )
if ( packet == None ) :
lprint ( " Could not decode RLOC-record in Map-Register packet" )
return
if 94 - 94: iIii1I11I1II1 / Oo0Ooo % IiII * IiII
continue
if 62 - 62: I11i . IiII - OOooOOo - I1Ii111 / OoooooooOO . Ii1I
if 28 - 28: iII111i / I1ii11iIi11i - OoOoOO00 * Oo0Ooo + Ii1I * OoOoOO00
I1ii1I = ooOOOo0o0oo . site
if 94 - 94: oO0o
if ( OoO0OOoooooOO ) :
ooo0OO = ooOOOo0o0oo . parent_for_more_specifics . print_eid_tuple ( )
lprint ( " Found ams {} for site '{}' for registering prefix {}" . format ( green ( ooo0OO , False ) , I1ii1I . site_name , green ( oOoo0OooOOo00 , False ) ) )
if 95 - 95: ooOoO0o * O0 + OOooOOo
else :
ooo0OO = green ( ooOOOo0o0oo . print_eid_tuple ( ) , False )
lprint ( " Found {} for site '{}' for registering prefix {}" . format ( ooo0OO , I1ii1I . site_name , green ( oOoo0OooOOo00 , False ) ) )
if 11 - 11: i1IIi / OoOoOO00 + OoOoOO00 + I1ii11iIi11i + OOooOOo
if 21 - 21: ooOoO0o
if 28 - 28: OoOoOO00 + OoOoOO00 - OoOoOO00 / ooOoO0o
if 81 - 81: oO0o
if 34 - 34: o0oOOo0O0Ooo * OOooOOo - i1IIi * o0oOOo0O0Ooo * Oo0Ooo
if 59 - 59: iIii1I11I1II1 / Oo0Ooo % II111iiii
if ( I1ii1I . shutdown ) :
lprint ( ( " Rejecting registration for site '{}', configured in " +
"admin-shutdown state" ) . format ( I1ii1I . site_name ) )
packet = oooO0oo00oOOoo0O . end_of_rlocs ( packet , iI1iii1IIIIi . rloc_count )
continue
if 55 - 55: ooOoO0o - IiII + o0oOOo0O0Ooo
if 48 - 48: O0 - iIii1I11I1II1 * OOooOOo
if 33 - 33: I11i
if 63 - 63: Ii1I % II111iiii / OoOoOO00 + Oo0Ooo
if 28 - 28: OoO0O00 + I1IiiI . oO0o + II111iiii - O0
if 32 - 32: oO0o
if 62 - 62: i11iIiiIii + OoooooooOO + IiII - OoO0O00 / oO0o * iIii1I11I1II1
if 91 - 91: o0oOOo0O0Ooo - i11iIiiIii + Oo0Ooo % iIii1I11I1II1
o0OOOoO0O = oOOOoO0 . key_id
if ( I1ii1I . auth_key . has_key ( o0OOOoO0O ) == False ) : o0OOOoO0O = 0
O0O0 = I1ii1I . auth_key [ o0OOOoO0O ]
if 40 - 40: I1Ii111 * OoOoOO00 * Ii1I % iII111i % ooOoO0o . Ii1I
i111II = lisp_verify_auth ( iIiiII11 , oOOOoO0 . alg_id ,
oOOOoO0 . auth_data , O0O0 )
iiIi1i1i = "dynamic " if ooOOOo0o0oo . dynamic else ""
if 69 - 69: i11iIiiIii + Oo0Ooo / II111iiii % OoOoOO00
O0O0oooo = bold ( "passed" if i111II else "failed" , False )
o0OOOoO0O = "key-id {}" . format ( o0OOOoO0O ) if o0OOOoO0O == oOOOoO0 . key_id else "bad key-id {}" . format ( oOOOoO0 . key_id )
if 4 - 4: II111iiii + ooOoO0o
lprint ( " Authentication {} for {}EID-prefix {}, {}" . format ( O0O0oooo , iiIi1i1i , green ( oOoo0OooOOo00 , False ) , o0OOOoO0O ) )
if 25 - 25: I1IiiI - iIii1I11I1II1
if 11 - 11: I1Ii111 / iII111i - I11i
if 87 - 87: I1Ii111 * i11iIiiIii . OOooOOo . OoooooooOO
if 2 - 2: i11iIiiIii + oO0o
if 40 - 40: i11iIiiIii + oO0o * IiII
if 19 - 19: iII111i / II111iiii . I1Ii111 * I1IiiI - OOooOOo
oO0OoO0 = True
O0o0O0oooo0O = ( lisp_get_eid_hash ( iI1iii1IIIIi . eid ) != None )
if ( O0o0O0oooo0O or ooOOOo0o0oo . require_signature ) :
o000000oOooO = "Required " if ooOOOo0o0oo . require_signature else ""
oOoo0OooOOo00 = green ( oOoo0OooOOo00 , False )
oOOoo0O00 = lisp_find_sig_in_rloc_set ( packet , iI1iii1IIIIi . rloc_count )
if ( oOOoo0O00 == None ) :
oO0OoO0 = False
lprint ( ( " {}EID-crypto-hash signature verification {} " + "for EID-prefix {}, no signature found" ) . format ( o000000oOooO ,
# OOooOOo
bold ( "failed" , False ) , oOoo0OooOOo00 ) )
else :
oO0OoO0 = lisp_verify_cga_sig ( iI1iii1IIIIi . eid , oOOoo0O00 )
O0O0oooo = bold ( "passed" if oO0OoO0 else "failed" , False )
lprint ( ( " {}EID-crypto-hash signature verification {} " + "for EID-prefix {}" ) . format ( o000000oOooO , O0O0oooo , oOoo0OooOOo00 ) )
if 88 - 88: OoooooooOO / iII111i + i1IIi
if 64 - 64: IiII % I11i / iIii1I11I1II1
if 66 - 66: Ii1I
if 55 - 55: OOooOOo + I1IiiI + IiII . Ii1I * oO0o
if ( i111II == False or oO0OoO0 == False ) :
packet = oooO0oo00oOOoo0O . end_of_rlocs ( packet , iI1iii1IIIIi . rloc_count )
if ( packet == None ) :
lprint ( " Could not decode RLOC-record in Map-Register packet" )
return
if 71 - 71: IiII - iII111i % I1IiiI * iII111i
continue
if 27 - 27: ooOoO0o - OoO0O00
if 83 - 83: iII111i * OoOoOO00 - O0 * Ii1I
if 79 - 79: I11i / iII111i % Ii1I / OoOoOO00 % O0 / IiII
if 32 - 32: IiII * II111iiii . Ii1I
if 68 - 68: I11i / O0
if 6 - 6: oO0o - oO0o . I1IiiI % I1ii11iIi11i
if ( oOOOoO0 . merge_register_requested ) :
iiiIIIII1iIi = ooOOOo0o0oo
iiiIIIII1iIi . inconsistent_registration = False
if 22 - 22: Ii1I / I1IiiI / II111iiii
if 31 - 31: II111iiii - Ii1I * OOooOOo - i11iIiiIii | |
single (T1,T2) pair and configured as trunk for VLAN and VXLAN
is enabled.
:param conn: Connection object
:return: True or False
"""
#ToDo(Hareesh): Interfaces are hard coded for now. Make it dynamic.
interfaces = ['GigabitEthernet 2', 'GigabitEthernet 3']
try:
for i in interfaces:
conf_str = snippets.ENABLE_INTF % i
rpc_obj = conn.edit_config(target='running', config=conf_str)
if self._check_response(rpc_obj, 'ENABLE_INTF'):
LOG.info(_LI("Enabled interface %s "), i)
time.sleep(1)
except Exception:
return False
return True
def _get_vrfs(self):
"""Get the current VRFs configured in the device.
:return: A list of vrf names as string
"""
vrfs = []
ios_cfg = self._get_running_config()
parse = HTParser(ios_cfg)
vrfs_raw = parse.find_lines("^vrf definition")
for line in vrfs_raw:
# raw format ['ip vrf <vrf-name>',....]
vrf_name = line.strip().split(' ')[2]
vrfs.append(vrf_name)
LOG.info(_LI("VRFs:%s"), vrfs)
return vrfs
def _get_capabilities(self):
"""Get the servers NETCONF capabilities.
:return: List of server capabilities.
"""
conn = self._get_connection()
capabilities = []
for c in conn.server_capabilities:
capabilities.append(c)
LOG.debug("Server capabilities: %s", capabilities)
return capabilities
def _get_running_config(self, split=True):
"""Get the CSR's current running config.
:return: Current IOS running config as multiline string
"""
conn = self._get_connection()
config = conn.get_config(source="running")
if config:
root = ET.fromstring(config._raw)
running_config = root[0][0]
if split is True:
rgx = re.compile("\r*\n+")
ioscfg = rgx.split(running_config.text)
else:
ioscfg = running_config.text
return ioscfg
def _check_acl(self, acl_no, network, netmask):
"""Check a ACL config exists in the running config.
:param acl_no: access control list (ACL) number
:param network: network which this ACL permits
:param netmask: netmask of the network
:return:
"""
exp_cfg_lines = ['ip access-list standard ' + str(acl_no),
' permit ' + str(network) + ' ' + str(netmask)]
ios_cfg = self._get_running_config()
parse = HTParser(ios_cfg)
acls_raw = parse.find_children(exp_cfg_lines[0])
if acls_raw:
if exp_cfg_lines[1] in acls_raw:
return True
LOG.error(_LE("Mismatch in ACL configuration for %s"), acl_no)
return False
LOG.debug("%s is not present in config", acl_no)
return False
def _cfg_exists(self, cfg_str):
"""Check a partial config string exists in the running config.
:param cfg_str: config string to check
:return : True or False
"""
ios_cfg = self._get_running_config()
parse = HTParser(ios_cfg)
cfg_raw = parse.find_lines("^" + cfg_str)
LOG.debug("_cfg_exists(): Found lines %s", cfg_raw)
return len(cfg_raw) > 0
def _set_interface(self, name, ip_address, mask):
conf_str = snippets.SET_INTC % (name, ip_address, mask)
self._edit_running_config(conf_str, 'SET_INTC')
def _do_create_vrf(self, vrf_name):
conf_str = snippets.CREATE_VRF % vrf_name
self._edit_running_config(conf_str, 'CREATE_VRF')
def _do_remove_vrf(self, vrf_name):
if vrf_name in self._get_vrfs():
conf_str = snippets.REMOVE_VRF % vrf_name
self._edit_running_config(conf_str, 'REMOVE_VRF')
def _do_create_sub_interface(self, sub_interface, vlan_id, vrf_name, ip,
mask):
if vrf_name not in self._get_vrfs():
LOG.error(_LE("VRF %s not present"), vrf_name)
conf_str = snippets.CREATE_SUBINTERFACE % (sub_interface, vlan_id,
vrf_name, ip, mask)
self._edit_running_config(conf_str, 'CREATE_SUBINTERFACE')
def _do_remove_sub_interface(self, sub_interface):
# optional: verify this is the correct sub_interface
if self._interface_exists(sub_interface):
conf_str = snippets.REMOVE_SUBINTERFACE % sub_interface
self._edit_running_config(conf_str, 'REMOVE_SUBINTERFACE')
def _do_set_ha_hsrp(self, sub_interface, vrf_name, priority, group, ip):
if vrf_name not in self._get_vrfs():
LOG.error(_LE("VRF %s not present"), vrf_name)
conf_str = snippets.SET_INTC_HSRP % (sub_interface, vrf_name, group,
priority, group, ip)
action = "SET_INTC_HSRP (Group: %s, Priority: % s)" % (group, priority)
self._edit_running_config(conf_str, action)
def _do_remove_ha_hsrp(self, sub_interface, group):
conf_str = snippets.REMOVE_INTC_HSRP % (sub_interface, group)
action = ("REMOVE_INTC_HSRP (subinterface:%s, Group:%s)"
% (sub_interface, group))
self._edit_running_config(conf_str, action)
def _get_interface_cfg(self, interface):
ios_cfg = self._get_running_config()
parse = HTParser(ios_cfg)
return parse.find_children('interface ' + interface)
def _nat_rules_for_internet_access(self, acl_no, network,
netmask,
inner_itfc,
outer_itfc,
vrf_name):
"""Configure the NAT rules for an internal network.
Configuring NAT rules in the CSR1kv is a three step process. First
create an ACL for the IP range of the internal network. Then enable
dynamic source NATing on the external interface of the CSR for this
ACL and VRF of the neutron router. Finally enable NAT on the
interfaces of the CSR where the internal and external networks are
connected.
:param acl_no: ACL number of the internal network.
:param network: internal network
:param netmask: netmask of the internal network.
:param inner_itfc: (name of) interface connected to the internal
network
:param outer_itfc: (name of) interface connected to the external
network
:param vrf_name: VRF corresponding to this virtual router
:return: True if configuration succeeded
:raises: networking_cisco.plugins.cisco.cfg_agent.cfg_exceptions.
CSR1kvConfigException
"""
# Duplicate ACL creation throws error, so checking
# it first. Remove it in future as this is not common in production
acl_present = self._check_acl(acl_no, network, netmask)
if not acl_present:
conf_str = snippets.CREATE_ACL % (acl_no, network, netmask)
self._edit_running_config(conf_str, 'CREATE_ACL')
conf_str = snippets.SET_DYN_SRC_TRL_INTFC % (acl_no, outer_itfc,
vrf_name)
self._edit_running_config(conf_str, 'SET_DYN_SRC_TRL_INTFC')
conf_str = snippets.SET_NAT % (inner_itfc, 'inside')
self._edit_running_config(conf_str, 'SET_NAT_INSIDE')
conf_str = snippets.SET_NAT % (outer_itfc, 'outside')
self._edit_running_config(conf_str, 'SET_NAT_OUTSIDE')
def _add_interface_nat(self, itfc_name, itfc_type):
conf_str = snippets.SET_NAT % (itfc_name, itfc_type)
self._edit_running_config(conf_str, 'SET_NAT_' + itfc_type)
def _remove_interface_nat(self, itfc_name, itfc_type):
conf_str = snippets.REMOVE_NAT % (itfc_name, itfc_type)
self._edit_running_config(conf_str, 'SET_NAT_' + itfc_type)
def _remove_dyn_nat_rule(self, acl_no, outer_itfc_name, vrf_name):
conf_str = snippets.SNAT_CFG % (acl_no, outer_itfc_name, vrf_name)
if self._cfg_exists(conf_str):
conf_str = snippets.REMOVE_DYN_SRC_TRL_INTFC % (
acl_no, outer_itfc_name, vrf_name)
self._edit_running_config(conf_str, 'REMOVE_DYN_SRC_TRL_INTFC')
conf_str = snippets.REMOVE_ACL % acl_no
self._edit_running_config(conf_str, 'REMOVE_ACL')
def _remove_dyn_nat_translations(self):
conf_str = snippets.CLEAR_DYN_NAT_TRANS
self._edit_running_config(conf_str, 'CLEAR_DYN_NAT_TRANS')
def _do_add_floating_ip(self, floating_ip, fixed_ip, vrf):
conf_str = snippets.SET_STATIC_SRC_TRL % (fixed_ip, floating_ip, vrf)
self._edit_running_config(conf_str, 'SET_STATIC_SRC_TRL')
def _do_remove_floating_ip(self, floating_ip, fixed_ip, vrf):
conf_str = snippets.REMOVE_STATIC_SRC_TRL % (
fixed_ip, floating_ip, vrf)
self._edit_running_config(conf_str, 'REMOVE_STATIC_SRC_TRL')
def _get_floating_ip_cfg(self):
ios_cfg = self._get_running_config()
parse = HTParser(ios_cfg)
res = parse.find_lines('ip nat inside source static')
return res
def _add_static_route(self, dest, dest_mask, next_hop, vrf):
conf_str = snippets.SET_IP_ROUTE % (vrf, dest, dest_mask, next_hop)
self._edit_running_config(conf_str, 'SET_IP_ROUTE')
def _remove_static_route(self, dest, dest_mask, next_hop, vrf):
conf_str = snippets.REMOVE_IP_ROUTE % (vrf, dest, dest_mask, next_hop)
self._edit_running_config(conf_str, 'REMOVE_IP_ROUTE')
def _get_static_route_cfg(self):
ios_cfg = self._get_running_config()
parse = HTParser(ios_cfg)
return parse.find_lines('ip route')
def caller_name(self, skip=2):
"""
Get a name of a caller in the format module.class.method
`skip` specifies how many levels of stack to skip while getting caller
name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.
An empty string is returned if skipped levels exceed stack height
"""
stack = inspect.stack()
start = 0 + skip
if len(stack) < start + 1:
return ''
parentframe = stack[start][0]
name = []
module = inspect.getmodule(parentframe)
# `modname` can be None when frame is executed directly in console
# TODO(asr1kteam): consider using __main__
if module:
name.append(module.__name__)
# detect classname
if 'self' in parentframe.f_locals:
# I don't know any way to detect call from the object method
# XXX: there seems to be no way to detect static method call,
# it will be just a function call
name.append(parentframe.f_locals['self'].__class__.__name__)
codename = parentframe.f_code.co_name
if codename != '<module>': # top level usually
name.append(codename) # function or a method
del parentframe
return ".".join(name)
# [ OR ]
# curframe = inspect.currentframe()
# calframe = inspect.getouterframes(curframe, 2)
# return calframe[1][3]
def _edit_running_config(self, conf_str, snippet):
conn = self._get_connection()
LOG.info(_LI("Config generated for [%(device)s] %(snip)s is:%(conf)s "
"caller:%(caller)s"),
{'device': self.hosting_device['id'],
'snip': snippet,
'conf': conf_str,
'caller': self.caller_name()})
try:
rpc_obj = conn.edit_config(target='running', config=conf_str)
self._check_response(rpc_obj, snippet, conf_str=conf_str)
except Exception as e:
# Here we catch all exceptions caused by REMOVE_/DELETE_ configs
# to avoid config agent to get stuck once it hits this condition.
# This is needed since the current ncclient version (0.4.2)
# generates an exception when an attempt to configure the device
# fails by the device (ASR1K router) but it doesn't provide any
# details about the error message that the device reported.
# With ncclient 0.4.4 version and onwards the exception returns
# also the proper error. Hence this code can be changed when the
# ncclient version is increased.
if re.search(r"REMOVE_|DELETE_", snippet):
LOG.warning(_LW("Pass exception for %s"), snippet)
pass
elif isinstance(e, ncclient.operations.rpc.RPCError):
e_tag = e.tag
e_type = e.type
params = {'snippet': snippet, 'type': e_type, 'tag': e_tag,
'dev_id': self.hosting_device['id'],
'ip': self._host_ip, 'confstr': conf_str}
raise cfg_exc.CSR1kvConfigException(**params)
def _check_response(self, rpc_obj, snippet_name, conf_str=None):
"""This function checks the rpc response object for status.
This function takes as input the response rpc_obj and the snippet name
that was executed. It parses it to see, if the last edit operation was
a success or not.
<?xml version="1.0" encoding="UTF-8"?>
<rpc-reply message-id="urn:uuid:81bf8082-....-b69a-000c29e1b85c"
xmlns="urn:ietf:params:netconf:base:1.0">
<ok />
</rpc-reply>
In case of error, CSR1kv sends a response as follows.
We take the error type and tag.
<?xml version="1.0" encoding="UTF-8"?>
<rpc-reply message-id="urn:uuid:81bf8082-....-b69a-000c29e1b85c"
xmlns="urn:ietf:params:netconf:base:1.0">
<rpc-error>
<error-type>protocol</error-type>
<error-tag>operation-failed</error-tag>
<error-severity>error</error-severity>
</rpc-error>
</rpc-reply>
:return: True if the config operation completed successfully
:raises: networking_cisco.plugins.cisco.cfg_agent.cfg_exceptions.
CSR1kvConfigException
"""
LOG.debug("RPCReply for %(snippet_name)s is %(rpc_obj)s",
{'snippet_name': snippet_name, | |
Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.338416,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 5.60691,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0286335,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.225179,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.152781,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.115481,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.186266,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0940207,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.395767,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.108652,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.30564,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0288637,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00484377,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0458215,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0358227,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0746852,
'Execution Unit/Register Files/Runtime Dynamic': 0.0406664,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.103693,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.262924,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.32991,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000880731,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000880731,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000794477,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.00032252,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000514595,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00307053,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00746679,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0344372,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.1905,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.10306,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.116964,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.51533,
'Instruction Fetch Unit/Runtime Dynamic': 0.264999,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0399001,
'L2/Runtime Dynamic': 0.00623492,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.46585,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.600235,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0397524,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0397523,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.65357,
'Load Store Unit/Runtime Dynamic': 0.836032,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0980226,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.196045,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0347885,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0353388,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.136197,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0170399,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.352067,
'Memory Management Unit/Runtime Dynamic': 0.0523788,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 15.456,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0759268,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00613418,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.057547,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': | |
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
TestGyp.py: a testing framework for GYP integration tests.
"""
from contextlib import contextmanager
import os
import shutil
import subprocess
import sys
import tempfile
import TestCommon
from TestCommon import __all__
__all__.extend([
'TestGyp',
])
def remove_debug_line_numbers(contents):
"""Function to remove the line numbers from the debug output
of gyp and thus reduce the extreme fragility of the stdout
comparison tests.
"""
lines = contents.splitlines()
# split each line on ":"
lines = [l.split(":", 3) for l in lines]
# join each line back together while ignoring the
# 3rd column which is the line number
lines = [len(l) > 3 and ":".join(l[3:]) or l for l in lines]
return "\n".join(lines)
def match_modulo_line_numbers(contents_a, contents_b):
"""File contents matcher that ignores line numbers."""
contents_a = remove_debug_line_numbers(contents_a)
contents_b = remove_debug_line_numbers(contents_b)
return TestCommon.match_exact(contents_a, contents_b)
@contextmanager
def LocalEnv(local_env):
"""Context manager to provide a local OS environment."""
old_env = os.environ.copy()
os.environ.update(local_env)
try:
yield
finally:
os.environ.clear()
os.environ.update(old_env)
class TestGypBase(TestCommon.TestCommon):
"""
Class for controlling end-to-end tests of gyp generators.
Instantiating this class will create a temporary directory and
arrange for its destruction (via the TestCmd superclass) and
copy all of the non-gyptest files in the directory hierarchy of the
executing script.
The default behavior is to test the 'gyp' or 'gyp.bat' file in the
current directory. An alternative may be specified explicitly on
instantiation, or by setting the TESTGYP_GYP environment variable.
This class should be subclassed for each supported gyp generator
(format). Various abstract methods below define calling signatures
used by the test scripts to invoke builds on the generated build
configuration and to run executables generated by those builds.
"""
formats = []
build_tool = None
build_tool_list = []
_exe = TestCommon.exe_suffix
_obj = TestCommon.obj_suffix
shobj_ = TestCommon.shobj_prefix
_shobj = TestCommon.shobj_suffix
lib_ = TestCommon.lib_prefix
_lib = TestCommon.lib_suffix
dll_ = TestCommon.dll_prefix
_dll = TestCommon.dll_suffix
# Constants to represent different targets.
ALL = '__all__'
DEFAULT = '__default__'
# Constants for different target types.
EXECUTABLE = '__executable__'
STATIC_LIB = '__static_lib__'
SHARED_LIB = '__shared_lib__'
def __init__(self, gyp=None, *args, **kw):
self.origin_cwd = os.path.abspath(os.path.dirname(sys.argv[0]))
self.extra_args = sys.argv[1:]
if not gyp:
gyp = os.environ.get('TESTGYP_GYP')
if not gyp:
if sys.platform == 'win32':
gyp = 'gyn-run.bat'
else:
gyp = 'gyn-run'
self.gyp = os.path.abspath(gyp)
self.no_parallel = False
self.formats = [self.format]
self.initialize_build_tool()
kw.setdefault('match', TestCommon.match_exact)
# Put test output in out/testworkarea by default.
# Use temporary names so there are no collisions.
workdir = os.path.join('out', kw.get('workdir', 'testworkarea'))
# Create work area if it doesn't already exist.
if not os.path.isdir(workdir):
os.makedirs(workdir)
kw['workdir'] = tempfile.mktemp(prefix='testgyp.', dir=workdir)
formats = kw.pop('formats', [])
super(TestGypBase, self).__init__(*args, **kw)
real_format = self.format.split('-')[-1]
excluded_formats = set([f for f in formats if f[0] == '!'])
included_formats = set(formats) - excluded_formats
if ('!'+real_format in excluded_formats or
included_formats and real_format not in included_formats):
msg = 'Invalid test for %r format; skipping test.\n'
self.skip_test(msg % self.format)
self.copy_test_configuration(self.origin_cwd, self.workdir)
self.set_configuration(None)
# Set $HOME so that gyp doesn't read the user's actual
# ~/.gyp/include.gypi file, which may contain variables
# and other settings that would change the output.
os.environ['HOME'] = self.workpath()
# Clear $GYP_DEFINES for the same reason.
if 'GYP_DEFINES' in os.environ:
del os.environ['GYP_DEFINES']
# Override the user's language settings, which could
# otherwise make the output vary from what is expected.
os.environ['LC_ALL'] = 'C'
def built_file_must_exist(self, name, type=None, **kw):
"""
Fails the test if the specified built file name does not exist.
"""
return self.must_exist(self.built_file_path(name, type, **kw))
def built_file_must_not_exist(self, name, type=None, **kw):
"""
Fails the test if the specified built file name exists.
"""
return self.must_not_exist(self.built_file_path(name, type, **kw))
def built_file_must_match(self, name, contents, **kw):
"""
Fails the test if the contents of the specified built file name
do not match the specified contents.
"""
return self.must_match(self.built_file_path(name, **kw), contents)
def built_file_must_not_match(self, name, contents, **kw):
"""
Fails the test if the contents of the specified built file name
match the specified contents.
"""
return self.must_not_match(self.built_file_path(name, **kw), contents)
def built_file_must_not_contain(self, name, contents, **kw):
"""
Fails the test if the specified built file name contains the specified
contents.
"""
return self.must_not_contain(self.built_file_path(name, **kw), contents)
def copy_test_configuration(self, source_dir, dest_dir):
"""
Copies the test configuration from the specified source_dir
(the directory in which the test script lives) to the
specified dest_dir (a temporary working directory).
This ignores all files and directories that begin with
the string 'gyptest', and all '.svn' subdirectories.
"""
for root, dirs, files in os.walk(source_dir):
if '.svn' in dirs:
dirs.remove('.svn')
dirs = [ d for d in dirs if not d.startswith('gyptest') ]
files = [ f for f in files if not f.startswith('gyptest') ]
for dirname in dirs:
source = os.path.join(root, dirname)
destination = source.replace(source_dir, dest_dir)
os.mkdir(destination)
if sys.platform != 'win32':
shutil.copystat(source, destination)
for filename in files:
source = os.path.join(root, filename)
destination = source.replace(source_dir, dest_dir)
shutil.copy2(source, destination)
def initialize_build_tool(self):
"""
Initializes the .build_tool attribute.
Searches the .build_tool_list for an executable name on the user's
$PATH. The first tool on the list is used as-is if nothing is found
on the current $PATH.
"""
for build_tool in self.build_tool_list:
if not build_tool:
continue
if os.path.isabs(build_tool):
self.build_tool = build_tool
return
build_tool = self.where_is(build_tool)
if build_tool:
self.build_tool = build_tool
return
if self.build_tool_list:
self.build_tool = self.build_tool_list[0]
def relocate(self, source, destination):
"""
Renames (relocates) the specified source (usually a directory)
to the specified destination, creating the destination directory
first if necessary.
Note: Don't use this as a generic "rename" operation. In the
future, "relocating" parts of a GYP tree may affect the state of
the test to modify the behavior of later method calls.
"""
destination_dir = os.path.dirname(destination)
if not os.path.exists(destination_dir):
self.subdir(destination_dir)
os.rename(source, destination)
def report_not_up_to_date(self):
"""
Reports that a build is not up-to-date.
This provides common reporting for formats that have complicated
conditions for checking whether a build is up-to-date. Formats
that expect exact output from the command (make) can
just set stdout= when they call the run_build() method.
"""
print "Build is not up-to-date:"
print self.banner('STDOUT ')
print self.stdout()
stderr = self.stderr()
if stderr:
print self.banner('STDERR ')
print stderr
def run_gyp(self, gyp_file, *args, **kw):
"""
Runs gyp against the specified gyp_file with the specified args.
"""
# When running gyp, and comparing its output we use a comparitor
# that ignores the line numbers that gyp logs in its debug output.
if kw.pop('ignore_line_numbers', False):
kw.setdefault('match', match_modulo_line_numbers)
# TODO: --depth=. works around Chromium-specific tree climbing.
depth = kw.pop('depth', '.')
run_args = ['--depth='+depth]
run_args.append(gyp_file)
if self.no_parallel:
run_args += ['--no-parallel']
# TODO: if extra_args contains a '--build' flag
# we really want that to only apply to the last format (self.format).
run_args.extend(self.extra_args)
# Default xcode_ninja_target_pattern to ^.*$ to fix xcode-ninja tests
xcode_ninja_target_pattern = kw.pop('xcode_ninja_target_pattern', '.*')
run_args.extend(
['-G', 'xcode_ninja_target_pattern=%s' % xcode_ninja_target_pattern])
run_args.extend(args)
return self.run(program=self.gyp, arguments=run_args, **kw)
def run(self, *args, **kw):
"""
Executes a program by calling the superclass .run() method.
This exists to provide a common place to filter out keyword
arguments implemented in this layer, without having to update
the tool-specific subclasses or clutter the tests themselves
with platform-specific code.
"""
if kw.has_key('SYMROOT'):
del kw['SYMROOT']
super(TestGypBase, self).run(*args, **kw)
def set_configuration(self, configuration):
"""
Sets the configuration, to be used for invoking the build
tool and testing potential built output.
"""
self.configuration = configuration
def configuration_dirname(self):
if self.configuration:
return self.configuration.split('|')[0]
else:
return 'Default'
def configuration_buildname(self):
if self.configuration:
return self.configuration
else:
return 'Default'
#
# Abstract methods to be defined by format-specific subclasses.
#
def build(self, gyp_file, target=None, **kw):
"""
Runs a build of the specified target against the configuration
generated from the specified gyp_file.
A 'target' argument of None or the special value TestGyp.DEFAULT
specifies the default argument for the underlying build tool.
A 'target' argument of TestGyp.ALL specifies the 'all' target
(if any) of the underlying build tool.
"""
raise NotImplementedError
def built_file_path(self, name, type=None, **kw):
"""
Returns a path to the specified file name, of the specified type.
"""
raise NotImplementedError
def built_file_basename(self, name, type=None, **kw):
"""
Returns the base name of the specified file name, of the | |
standard_val
if lot_attr in item[attributes_attr]:
sample_doc[SampleConstants.STANDARD_ATTRIBUTES] = {}
sample_doc[SampleConstants.STANDARD_ATTRIBUTES][SampleConstants.BEAD_MODEL] = DEFAULT_BEAD_MODEL
sample_doc[SampleConstants.STANDARD_ATTRIBUTES][SampleConstants.BEAD_BATCH] = item[attributes_attr][lot_attr]
# operation id aggregates across files for a single measurement, e.g.
"""
"operation_id": "92240",
"operation_type": {
"operation_type_id": "415",
"category": "Flow Cytometry",
"name": "Flow Cytometry 96 well"
},
"""
# generate a measurement id unique to this sample
# Biofab does not have additional measurements per file, can fix to 1
def add_measurement_id(measurement_doc, sample_doc, output_doc):
# namespace with experiment id, as sometimes the sample is shared (e.g. bead samples)
measurement_doc[SampleConstants.MEASUREMENT_ID] = namespace_measurement_id("1", output_doc[SampleConstants.LAB], sample_doc, output_doc)
def add_measurement_group_id(measurement_doc, file, sample_doc, output_doc):
# record a measurement grouping id to find other linked samples and files
if "generated_by" not in file:
print("Warning, cannot find generated_by, skipping file: {}".format(file))
return
else:
file_gen = file["generated_by"]
mg_val = None
if op_id in file_gen:
mg_val = file_gen[op_id]
elif job_id in file_gen:
mg_val = file_gen[job_id]
else:
raise ValueError("Cannot find measurement group id: {}".format(file))
measurement_doc[SampleConstants.MEASUREMENT_GROUP_ID] = namespace_measurement_id(mg_val, output_doc[SampleConstants.LAB], sample_doc, output_doc)
def add_measurement_type(file, measurement_doc):
global is_sytox
if type_attr in file:
assay_type = file[type_attr]
if assay_type == "FCS":
measurement_type = SampleConstants.MT_FLOW
if SampleConstants.M_CHANNELS not in measurement_doc:
if is_sytox:
measurement_doc[SampleConstants.M_CHANNELS] = SYTOX_DEFAULT_CYTOMETER_CHANNELS
else:
measurement_doc[SampleConstants.M_CHANNELS] = NO_SYTOX_DEFAULT_CYTOMETER_CHANNELS
if SampleConstants.M_INSTRUMENT_CONFIGURATION not in measurement_doc:
measurement_doc[SampleConstants.M_INSTRUMENT_CONFIGURATION] = DEFAULT_CYTOMETER_CONFIGURATION
elif assay_type == "CSV":
measurement_type = SampleConstants.MT_PLATE_READER
else:
raise ValueError("Could not parse MT: {}".format(assay_type))
else:
# Workaround for biofab; uploaded txts are PR
# Otherwise the above version of this fails
# Files that did not have a type
fn = file['filename'].lower()
if fn.endswith(".txt"):
measurement_type = SampleConstants.MT_PLATE_READER
elif fn.endswith(".fastq.gz"):
measurement_type = SampleConstants.MT_RNA_SEQ
elif fn.endswith(".ab1"):
measurement_type = SampleConstants.MT_SEQUENCING_CHROMATOGRAM
elif fn.endswith("jpg"):
measurement_type = SampleConstants.MT_IMAGE
else:
raise ValueError("Could not parse FT: {}".format(file['filename']))
measurement_doc[SampleConstants.MEASUREMENT_TYPE] = measurement_type
def add_measurement_doc(measurement_doc, sample_doc, output_doc):
if SampleConstants.MEASUREMENTS not in sample_doc:
sample_doc[SampleConstants.MEASUREMENTS] = []
sample_doc[SampleConstants.MEASUREMENTS].append(measurement_doc)
# NC Specific Channels
if output_doc[SampleConstants.CHALLENGE_PROBLEM] == SampleConstants.CP_NOVEL_CHASSIS and \
measurement_doc[SampleConstants.MEASUREMENT_TYPE] == SampleConstants.MT_FLOW:
measurement_doc[SampleConstants.M_CHANNELS] = ["FSC-A", "SSC-A", "FL1-A"]
# NC does not provide control mappings
# Use the default NC negative strain, if CP matches
# Match on lab ID for now, as this is unambiguous given dictionary common name changes
# do the same thing for positive control
if SampleConstants.CONTROL_TYPE not in sample_doc and \
SampleConstants.STRAIN in sample_doc and \
output_doc[SampleConstants.CHALLENGE_PROBLEM] == SampleConstants.CP_NOVEL_CHASSIS:
if sample_doc[SampleConstants.STRAIN][SampleConstants.LAB_ID] == namespace_lab_id("8", output_doc[SampleConstants.LAB]):
sample_doc[SampleConstants.CONTROL_TYPE] = SampleConstants.CONTROL_EMPTY_VECTOR
elif sample_doc[SampleConstants.STRAIN][SampleConstants.LAB_ID] == namespace_lab_id("23382", output_doc[SampleConstants.LAB]):
# ON without IPTG, OFF with IPTG, plasmid (high level)
# we also need to indicate the control channels for the fluorescence control
# this is not known by the lab typically, has to be provided externally
if SampleConstants.CONTENTS not in sample_doc:
sample_doc[SampleConstants.CONTROL_TYPE] = SampleConstants.CONTROL_HIGH_FITC
sample_doc[SampleConstants.CONTROL_CHANNEL] = "FL1-A"
else:
found = False
for content in sample_doc[SampleConstants.CONTENTS]:
if SampleConstants.NAME in content and SampleConstants.LABEL in content[SampleConstants.NAME]:
content_label = content[SampleConstants.NAME][SampleConstants.LABEL]
if content_label == "IPTG":
found = True
if not found:
sample_doc[SampleConstants.CONTROL_TYPE] = SampleConstants.CONTROL_HIGH_FITC
sample_doc[SampleConstants.CONTROL_CHANNEL] = "FL1-A"
output_doc[SampleConstants.SAMPLES].append(sample_doc)
def add_file_name(config, file, measurement_doc, original_experiment_id, lab, output_doc):
if config.get('extend', False):
file_name = extend_biofab_filename(
file['filename'], original_experiment_id, file['generated_by'])
else:
file_name = file["filename"]
# same logic as uploads manager
file_name = safen_filename(file_name)
file_id = file.get('file_id', None)
# biofab stores this in multiple ways
if file_id is None:
file_id = file.get('id', None)
# these are localized _per_ run, namespace using exp_id
file_id = '.'.join([original_experiment_id, file_id])
if file_id is None:
raise ValueError("Could not parse file id? {}".format(file_id))
elif file_id is not None:
file_id = namespace_file_id(file_id, lab, measurement_doc, output_doc)
file_type = SampleConstants.infer_file_type(file_name)
measurement_doc[SampleConstants.FILES].append(
{SampleConstants.M_NAME: file_name,
SampleConstants.M_TYPE: file_type,
SampleConstants.M_LAB_LABEL: [SampleConstants.M_LAB_LABEL_RAW],
SampleConstants.FILE_LEVEL: SampleConstants.F_LEVEL_0,
SampleConstants.FILE_ID: file_id})
def extend_biofab_filename(file_name, plan_id, generated_by):
# Add context to the filename while we have enough information to
# generate it
gen_id = None
if 'operation_id' in generated_by:
gen_id = 'op_' + generated_by['operation_id']
elif 'job_id' in generated_by:
gen_id = 'job_' + generated_by['job_id']
else:
gen_id = 'unknown'
return '/'.join([str(plan_id), gen_id, file_name])
def add_inducer_experimental_media(original_experiment_id, item, lab, sbh_query, reagents, biofab_doc):
# no media attribute, try to look up through the last source
if source_attr in item:
last_source_ = item[source_attr][0]
last_source_lookup = jq(".items[] | select(.item_id==\"" + last_source_ + "\")").transform(biofab_doc)
if attributes_attr in last_source_lookup:
found_inducer_media = False
for inducer_media_attr in [experimental_media_attr, inducer_attr]:
if found_inducer_media:
continue
if inducer_media_attr in last_source_lookup[attributes_attr]:
combined_inducer = last_source_lookup[attributes_attr][inducer_media_attr]
if combined_inducer != "None":
# "IPTG_0.25|arab_25.0"
combined_inducer_split = combined_inducer.split("|")
found_inducer_media = True
for inducer in combined_inducer_split:
inducer_split = inducer.split("_")
# there are a large number of edge cases here - nones appear everywhere in the latest 17016 trace
if len(inducer_split) == 2:
if inducer_split[1] == "None":
if inducer_split[0] != "None":
reagents.append(create_media_component(original_experiment_id, inducer_split[0], inducer_split[0], lab, sbh_query))
else:
if inducer_split[0] != "None":
reagents.append(create_media_component(original_experiment_id, inducer_split[0], inducer_split[0], lab, sbh_query, inducer_split[1]))
else:
# now we have something unexpected like None_arab_25.0 or Kan_arab_25.0
# and have to carefully try and figure out the legal pairs
seen_index = set()
for index, sub_inducer_split in enumerate(inducer_split):
if index in seen_index:
continue
if sub_inducer_split == "None":
seen_index.add(index)
else:
if index + 1 < len(inducer_split):
val1 = inducer_split[index]
val2 = inducer_split[index + 1]
try:
float(val2)
# arab_25.0
reagents.append(create_media_component(original_experiment_id, val1, val1, lab, sbh_query, val2))
seen_index.add(index)
seen_index.add(index + 1)
except ValueError:
# Kan
reagents.append(create_media_component(original_experiment_id, val1, val1, lab, sbh_query))
seen_index.add(index)
else:
# Kan
val1 = inducer_split[index]
reagents.append(create_media_component(original_experiment_id, val1, val1, lab, sbh_query))
seen_index.add(index)
if experimental_antibiotic_attr in last_source_lookup[attributes_attr]:
if not found_inducer_media:
experimental_antibiotic = last_source_lookup[attributes_attr][experimental_antibiotic_attr]
if experimental_antibiotic != "None":
reagents.append(create_media_component(original_experiment_id, experimental_antibiotic, experimental_antibiotic, lab, sbh_query))
# new format December 2020
def parse_new_time_val(item):
time_val = None
if attributes_attr in item:
# "Option(s)": {
# "duration": {
# "qty": 180,
# "units": "minute"
# }
if alt_options_attr in item[attributes_attr]:
options_keys = item[attributes_attr][alt_options_attr].keys()
for option_key in options_keys:
option_key_item = item[attributes_attr][alt_options_attr][option_key]
if option_key == alt_duration_attr:
time_qty = option_key_item[qty_attr]
time_units = option_key_item[units_attr]
if time_units == "minute":
time_qty = (float(time_qty))/60.0
time_units = "hour"
time_val = str(time_qty) + ":" + time_units
return time_val
# new format December 2020
def parse_new_attributes(original_experiment_id, lab, sbh_query, reagents, item, sample_doc):
if attributes_attr in item:
if alt_media_attr in item[attributes_attr]:
# alternative media:
# "Media": {
# "SC": {
media_keys = item[attributes_attr][alt_media_attr].keys()
for media_key in media_keys:
reagent_obj = create_media_component(original_experiment_id, media_key, media_key, lab, sbh_query)
reagents.append(reagent_obj)
if alt_inducer_attr in item[attributes_attr]:
# alternative inducer:
# "Inducer(s)": {
# "beta-estradiol": {
inducer_keys = item[attributes_attr][alt_inducer_attr].keys()
for inducer_key in inducer_keys:
# concentration
reagent_key_item = item[attributes_attr][alt_inducer_attr][inducer_key]
if final_concentration_attr in reagent_key_item:
reagent_qty = reagent_key_item[final_concentration_attr][qty_attr]
reagent_units = reagent_key_item[final_concentration_attr][units_attr]
concentration_value_unit = str(reagent_qty) + ":" + reagent_units
reagent_obj = create_media_component(original_experiment_id, inducer_key, inducer_key, lab, sbh_query, concentration_value_unit)
reagents.append(reagent_obj)
# "Option(s)": {
# "temperature": {
# "qty": 30,
# "units": "C"
# }
if alt_options_attr in item[attributes_attr]:
options_keys = item[attributes_attr][alt_options_attr].keys()
for option_key in options_keys:
option_key_item = item[attributes_attr][alt_options_attr][option_key]
if option_key == alt_temperature_attr:
temperature_qty = option_key_item[qty_attr]
temperature_units = option_key_item[units_attr]
if temperature_units == "C":
temperature_units = "celsius"
sample_doc[SampleConstants.TEMPERATURE] = create_value_unit(str(temperature_qty) + ":" + temperature_units)
def convert_biofab(schema, encoding, input_file, verbose=True, output=True, output_file=None, config={}, enforce_validation=True, reactor=None):
if reactor is not None:
helper = AgaveHelper(reactor.client)
print("Helper loaded")
else:
print("Helper not loaded")
# for SBH Librarian Mapping
sbh_query = SynBioHubQuery(SD2Constants.SD2_SERVER)
sbh_query.login(config["sbh"]["user"], config["sbh"]["password"])
biofab_doc = json.load(open(input_file, encoding=encoding))
output_doc = {}
lab = SampleConstants.LAB_UWBF
output_doc[SampleConstants.LAB] = biofab_doc.get("attributes").get("lab", lab)
# The UW_BIOFAB provenance dump from Aquarium is now being used by other labs.
# Use the lab value if provided, and validate it
lab_schema = { "$ref" : "https://schema.catalog.sd2e.org/schemas/lab.json"}
lab = output_doc[SampleConstants.LAB]
validate(lab, lab_schema)
original_experiment_id = None
if "plan_id" in biofab_doc:
original_experiment_id = biofab_doc["plan_id"]
elif "experiment_id" in biofab_doc:
original_experiment_id = biofab_doc["experiment_id"]
else:
raise ValueError("Cannot parse plan/experiment_id")
output_doc[SampleConstants.EXPERIMENT_ID] = namespace_experiment_id(original_experiment_id, lab)
output_doc[SampleConstants.CHALLENGE_PROBLEM] = biofab_doc.get("attributes", {}).get("challenge_problem")
output_doc[SampleConstants.EXPERIMENT_REFERENCE] = biofab_doc.get(
"attributes", {}).get("experiment_reference")
map_experiment_reference(config, output_doc)
output_doc[SampleConstants.SAMPLES] = []
# is this a Sytox plan? Per:
# Biofab has a mix of Sytox and Non-Sytox YS plans.
# We need to peek ahead, as this affects FCS channel mappings
global is_sytox
try:
sytox_found = jq(".items[] | select (.attributes.control == \"positive_sytox\")").transform(biofab_doc)
if sytox_found is not None and len(sytox_found) > 0:
print(sytox_found)
print("Sytox found for plan: {}".format(original_experiment_id))
is_sytox = True
else:
print("No Sytox found for plan: {}".format(original_experiment_id))
is_sytox = False
except StopIteration:
print("Warning, could not find sytox control for plan: {}".format(original_experiment_id))
missing_part_of_items = set()
missing_part_of_map = {}
# process bottom up from file -> sample
for biofab_sample in biofab_doc["files"]:
sample_doc = {}
if source_attr not in biofab_sample:
print("Warning, file is missing a source {}".format(biofab_sample))
# experimental design is a special case
if type_attr in biofab_sample and biofab_sample[type_attr] == "FCS":
print("Trying to resolve as | |
<filename>addons/account/tests/test_account_payment_register.py<gh_stars>0
# -*- coding: utf-8 -*-
from odoo.addons.account.tests.common import AccountTestInvoicingCommon
from odoo.exceptions import UserError
from odoo.tests import tagged
@tagged('post_install', '-at_install')
class TestAccountPaymentRegister(AccountTestInvoicingCommon):
@classmethod
def setUpClass(cls, chart_template_ref=None):
super().setUpClass(chart_template_ref=chart_template_ref)
cls.currency_data_3 = cls.setup_multi_currency_data({
'name': "Umbrella",
'symbol': '☂',
'currency_unit_label': "Umbrella",
'currency_subunit_label': "Broken Umbrella",
}, rate2017=0.01)
cls.payment_debit_account_id = cls.company_data['default_journal_bank'].payment_debit_account_id.copy()
cls.payment_credit_account_id = cls.company_data['default_journal_bank'].payment_credit_account_id.copy()
cls.custom_payment_method_in = cls.env['account.payment.method'].create({
'name': 'custom_payment_method_in',
'code': 'CUSTOMIN',
'payment_type': 'inbound',
})
cls.manual_payment_method_in = cls.env.ref('account.account_payment_method_manual_in')
cls.custom_payment_method_out = cls.env['account.payment.method'].create({
'name': 'custom_payment_method_out',
'code': 'CUSTOMOUT',
'payment_type': 'outbound',
})
cls.manual_payment_method_out = cls.env.ref('account.account_payment_method_manual_out')
cls.company_data['default_journal_bank'].write({
'payment_debit_account_id': cls.payment_debit_account_id.id,
'payment_credit_account_id': cls.payment_credit_account_id.id,
'inbound_payment_method_ids': [(6, 0, (
cls.manual_payment_method_in.id,
cls.custom_payment_method_in.id,
))],
'outbound_payment_method_ids': [(6, 0, (
cls.env.ref('account.account_payment_method_manual_out').id,
cls.custom_payment_method_out.id,
cls.manual_payment_method_out.id,
))],
})
# Customer invoices sharing the same batch.
cls.out_invoice_1 = cls.env['account.move'].create({
'move_type': 'out_invoice',
'date': '2017-01-01',
'invoice_date': '2017-01-01',
'partner_id': cls.partner_a.id,
'currency_id': cls.currency_data['currency'].id,
'invoice_line_ids': [(0, 0, {'product_id': cls.product_a.id, 'price_unit': 1000.0})],
})
cls.out_invoice_2 = cls.env['account.move'].create({
'move_type': 'out_invoice',
'date': '2017-01-01',
'invoice_date': '2017-01-01',
'partner_id': cls.partner_a.id,
'currency_id': cls.currency_data['currency'].id,
'invoice_line_ids': [(0, 0, {'product_id': cls.product_a.id, 'price_unit': 2000.0})],
})
cls.out_invoice_3 = cls.env['account.move'].create({
'move_type': 'out_invoice',
'date': '2017-01-01',
'invoice_date': '2017-01-01',
'partner_id': cls.partner_a.id,
'invoice_line_ids': [(0, 0, {'product_id': cls.product_a.id, 'price_unit': 12.01})],
})
cls.out_invoice_4 = cls.env['account.move'].create({
'move_type': 'out_invoice',
'date': '2017-01-01',
'invoice_date': '2017-01-01',
'partner_id': cls.partner_a.id,
'invoice_line_ids': [(0, 0, {'product_id': cls.product_a.id, 'price_unit': 11.99})],
})
(cls.out_invoice_1 + cls.out_invoice_2 + cls.out_invoice_3 + cls.out_invoice_4).action_post()
# Vendor bills, in_invoice_1 + in_invoice_2 are sharing the same batch but not in_invoice_3.
cls.in_invoice_1 = cls.env['account.move'].create({
'move_type': 'in_invoice',
'date': '2017-01-01',
'invoice_date': '2017-01-01',
'partner_id': cls.partner_a.id,
'invoice_line_ids': [(0, 0, {'product_id': cls.product_a.id, 'price_unit': 1000.0})],
})
cls.in_invoice_2 = cls.env['account.move'].create({
'move_type': 'in_invoice',
'date': '2017-01-01',
'invoice_date': '2017-01-01',
'partner_id': cls.partner_a.id,
'invoice_line_ids': [(0, 0, {'product_id': cls.product_a.id, 'price_unit': 2000.0})],
})
cls.in_invoice_3 = cls.env['account.move'].create({
'move_type': 'in_invoice',
'date': '2017-01-01',
'invoice_date': '2017-01-01',
'partner_id': cls.partner_b.id,
'currency_id': cls.currency_data['currency'].id,
'invoice_line_ids': [(0, 0, {'product_id': cls.product_a.id, 'price_unit': 3000.0})],
})
(cls.in_invoice_1 + cls.in_invoice_2 + cls.in_invoice_3).action_post()
def test_register_payment_single_batch_grouped_keep_open_lower_amount(self):
''' Pay 800.0 with 'open' as payment difference handling on two customer invoices (1000 + 2000). '''
active_ids = (self.out_invoice_1 + self.out_invoice_2).ids
payments = self.env['account.payment.register'].with_context(active_model='account.move', active_ids=active_ids).create({
'amount': 800.0,
'group_payment': True,
'payment_difference_handling': 'open',
'currency_id': self.currency_data['currency'].id,
'payment_method_id': self.custom_payment_method_in.id,
})._create_payments()
self.assertRecordValues(payments, [{
'ref': 'INV/2017/01/0001 INV/2017/01/0002',
'payment_method_id': self.custom_payment_method_in.id,
}])
self.assertRecordValues(payments.line_ids.sorted('balance'), [
# Receivable line:
{
'debit': 0.0,
'credit': 400.0,
'currency_id': self.currency_data['currency'].id,
'amount_currency': -800.0,
'reconciled': True,
},
# Liquidity line:
{
'debit': 400.0,
'credit': 0.0,
'currency_id': self.currency_data['currency'].id,
'amount_currency': 800.0,
'reconciled': False,
},
])
def test_register_payment_single_batch_grouped_keep_open_higher_amount(self):
''' Pay 3100.0 with 'open' as payment difference handling on two customer invoices (1000 + 2000). '''
active_ids = (self.out_invoice_1 + self.out_invoice_2).ids
payments = self.env['account.payment.register'].with_context(active_model='account.move', active_ids=active_ids).create({
'amount': 3100.0,
'group_payment': True,
'payment_difference_handling': 'open',
'currency_id': self.currency_data['currency'].id,
'payment_method_id': self.custom_payment_method_in.id,
})._create_payments()
self.assertRecordValues(payments, [{
'ref': 'INV/2017/01/0001 INV/2017/01/0002',
'payment_method_id': self.custom_payment_method_in.id,
}])
self.assertRecordValues(payments.line_ids.sorted('balance'), [
# Receivable line:
{
'debit': 0.0,
'credit': 1550.0,
'currency_id': self.currency_data['currency'].id,
'amount_currency': -3100.0,
'reconciled': False,
},
# Liquidity line:
{
'debit': 1550.0,
'credit': 0.0,
'currency_id': self.currency_data['currency'].id,
'amount_currency': 3100.0,
'reconciled': False,
},
])
def test_register_payment_single_batch_grouped_writeoff_lower_amount_debit(self):
''' Pay 800.0 with 'reconcile' as payment difference handling on two customer invoices (1000 + 2000). '''
active_ids = (self.out_invoice_1 + self.out_invoice_2).ids
payments = self.env['account.payment.register'].with_context(active_model='account.move', active_ids=active_ids).create({
'amount': 800.0,
'group_payment': True,
'payment_difference_handling': 'reconcile',
'writeoff_account_id': self.company_data['default_account_revenue'].id,
'writeoff_label': 'writeoff',
'payment_method_id': self.custom_payment_method_in.id,
})._create_payments()
self.assertRecordValues(payments, [{
'ref': 'INV/2017/01/0001 INV/2017/01/0002',
'payment_method_id': self.custom_payment_method_in.id,
}])
self.assertRecordValues(payments.line_ids.sorted('balance'), [
# Receivable line:
{
'debit': 0.0,
'credit': 1500.0,
'currency_id': self.currency_data['currency'].id,
'amount_currency': -3000.0,
'reconciled': True,
},
# Liquidity line:
{
'debit': 400.0,
'credit': 0.0,
'currency_id': self.currency_data['currency'].id,
'amount_currency': 800.0,
'reconciled': False,
},
# Writeoff line:
{
'debit': 1100.0,
'credit': 0.0,
'currency_id': self.currency_data['currency'].id,
'amount_currency': 2200.0,
'reconciled': False,
},
])
def test_register_payment_single_batch_grouped_writeoff_higher_amount_debit(self):
''' Pay 3100.0 with 'reconcile' as payment difference handling on two customer invoices (1000 + 2000). '''
active_ids = (self.out_invoice_1 + self.out_invoice_2).ids
payments = self.env['account.payment.register'].with_context(active_model='account.move', active_ids=active_ids).create({
'amount': 3100.0,
'group_payment': True,
'payment_difference_handling': 'reconcile',
'writeoff_account_id': self.company_data['default_account_revenue'].id,
'writeoff_label': 'writeoff',
'payment_method_id': self.custom_payment_method_in.id,
})._create_payments()
self.assertRecordValues(payments, [{
'ref': 'INV/2017/01/0001 INV/2017/01/0002',
'payment_method_id': self.custom_payment_method_in.id,
}])
self.assertRecordValues(payments.line_ids.sorted('balance'), [
# Receivable line:
{
'debit': 0.0,
'credit': 1500.0,
'currency_id': self.currency_data['currency'].id,
'amount_currency': -3000.0,
'reconciled': True,
},
# Writeoff line:
{
'debit': 0.0,
'credit': 50.0,
'currency_id': self.currency_data['currency'].id,
'amount_currency': -100.0,
'reconciled': False,
},
# Liquidity line:
{
'debit': 1550.0,
'credit': 0.0,
'currency_id': self.currency_data['currency'].id,
'amount_currency': 3100.0,
'reconciled': False,
},
])
def test_register_payment_single_batch_grouped_writeoff_lower_amount_credit(self):
''' Pay 800.0 with 'reconcile' as payment difference handling on two vendor billes (1000 + 2000). '''
active_ids = (self.in_invoice_1 + self.in_invoice_2).ids
payments = self.env['account.payment.register'].with_context(active_model='account.move', active_ids=active_ids).create({
'amount': 800.0,
'group_payment': True,
'payment_difference_handling': 'reconcile',
'writeoff_account_id': self.company_data['default_account_revenue'].id,
'writeoff_label': 'writeoff',
'payment_method_id': self.custom_payment_method_in.id,
})._create_payments()
self.assertRecordValues(payments, [{
'ref': 'BILL/2017/01/0001 BILL/2017/01/0002',
'payment_method_id': self.custom_payment_method_in.id,
}])
self.assertRecordValues(payments.line_ids.sorted('balance'), [
# Writeoff line:
{
'debit': 0.0,
'credit': 2200.0,
'currency_id': self.company_data['currency'].id,
'amount_currency': -2200.0,
'reconciled': False,
},
# Liquidity line:
{
'debit': 0.0,
'credit': 800.0,
'currency_id': self.company_data['currency'].id,
'amount_currency': -800.0,
'reconciled': False,
},
# Payable line:
{
'debit': 3000.0,
'credit': 0.0,
'currency_id': self.company_data['currency'].id,
'amount_currency': 3000.0,
'reconciled': True,
},
])
def test_register_payment_single_batch_grouped_writeoff_higher_amount_credit(self):
''' Pay 3100.0 with 'reconcile' as payment difference handling on two vendor billes (1000 + 2000). '''
active_ids = (self.in_invoice_1 + self.in_invoice_2).ids
payments = self.env['account.payment.register'].with_context(active_model='account.move', active_ids=active_ids).create({
'amount': 3100.0,
'group_payment': True,
'payment_difference_handling': 'reconcile',
'writeoff_account_id': self.company_data['default_account_revenue'].id,
'writeoff_label': 'writeoff',
'payment_method_id': self.custom_payment_method_in.id,
})._create_payments()
self.assertRecordValues(payments, [{
'ref': 'BILL/2017/01/0001 BILL/2017/01/0002',
'payment_method_id': self.custom_payment_method_in.id,
}])
self.assertRecordValues(payments.line_ids.sorted('balance'), [
# Liquidity line:
{
'debit': 0.0,
'credit': 3100.0,
'currency_id': self.company_data['currency'].id,
'amount_currency': -3100.0,
'reconciled': False,
},
# Writeoff line:
{
'debit': 100.0,
'credit': 0.0,
'currency_id': self.company_data['currency'].id,
'amount_currency': 100.0,
'reconciled': False,
},
# Payable line:
{
'debit': 3000.0,
'credit': 0.0,
'currency_id': self.company_data['currency'].id,
'amount_currency': 3000.0,
'reconciled': True,
},
])
def test_register_payment_single_batch_not_grouped(self):
''' Choose to pay two customer invoices with separated payments (1000 + 2000). '''
active_ids = (self.out_invoice_1 + self.out_invoice_2).ids
payments = self.env['account.payment.register'].with_context(active_model='account.move', active_ids=active_ids).create({
'group_payment': False,
})._create_payments()
self.assertRecordValues(payments, [
{
'ref': 'INV/2017/01/0001',
'payment_method_id': self.manual_payment_method_in.id,
},
{
'ref': 'INV/2017/01/0002',
'payment_method_id': self.manual_payment_method_in.id,
},
])
self.assertRecordValues(payments[0].line_ids.sorted('balance') + payments[1].line_ids.sorted('balance'), [
# == Payment 1: to pay out_invoice_1 ==
# Receivable line:
{
'debit': 0.0,
'credit': 500.0,
'currency_id': self.currency_data['currency'].id,
'amount_currency': -1000.0,
'reconciled': True,
},
# Liquidity line:
{
'debit': 500.0,
'credit': 0.0,
'currency_id': self.currency_data['currency'].id,
'amount_currency': 1000.0,
'reconciled': False,
},
# == Payment 2: to pay out_invoice_2 ==
# Receivable line:
{
'debit': 0.0,
'credit': 1000.0,
'currency_id': self.currency_data['currency'].id,
'amount_currency': -2000.0,
'reconciled': True,
},
# Liquidity line:
{
'debit': 1000.0,
'credit': 0.0,
'currency_id': self.currency_data['currency'].id,
'amount_currency': 2000.0,
'reconciled': False,
},
])
def test_register_payment_multi_batches_grouped(self):
''' Choose to pay multiple batches, one with two customer invoices (1000 + 2000)
and one with a vendor bill of 600, by grouping payments.
'''
active_ids = (self.in_invoice_1 + self.in_invoice_2 + self.in_invoice_3).ids
payments = self.env['account.payment.register'].with_context(active_model='account.move', active_ids=active_ids).create({
'group_payment': True,
})._create_payments()
self.assertRecordValues(payments, [
{
'ref': 'BILL/2017/01/0001 BILL/2017/01/0002',
'payment_method_id': self.manual_payment_method_out.id,
},
{
'ref': 'BILL/2017/01/0003',
'payment_method_id': self.manual_payment_method_out.id,
},
])
self.assertRecordValues(payments[0].line_ids.sorted('balance') + payments[1].line_ids.sorted('balance'), [
# == Payment 1: to pay in_invoice_1 & in_invoice_2 ==
# Liquidity line:
{
'debit': 0.0,
'credit': 3000.0,
'currency_id': self.company_data['currency'].id,
'amount_currency': -3000.0,
'reconciled': False,
},
# Payable line:
{
'debit': 3000.0,
'credit': 0.0,
'currency_id': self.company_data['currency'].id,
'amount_currency': 3000.0,
'reconciled': True,
},
# == Payment 2: to pay in_invoice_3 ==
# Liquidity line:
{
'debit': 0.0,
'credit': 1500.0,
'currency_id': self.currency_data['currency'].id,
'amount_currency': -3000.0,
'reconciled': False,
},
# Payable line:
{
'debit': 1500.0,
'credit': 0.0,
'currency_id': self.currency_data['currency'].id,
'amount_currency': 3000.0,
'reconciled': True,
},
])
def test_register_payment_multi_batches_not_grouped(self):
''' Choose to pay multiple batches, one with two customer invoices (1000 + 2000)
and one with a vendor bill of 600, by splitting payments.
'''
active_ids = (self.in_invoice_1 + self.in_invoice_2 + self.in_invoice_3).ids
payments = self.env['account.payment.register'].with_context(active_model='account.move', active_ids=active_ids).create({
'group_payment': False,
})._create_payments()
self.assertRecordValues(payments, [
{
'ref': 'BILL/2017/01/0001',
'payment_method_id': self.manual_payment_method_out.id,
},
{
'ref': 'BILL/2017/01/0002',
'payment_method_id': self.manual_payment_method_out.id,
},
{
'ref': 'BILL/2017/01/0003',
'payment_method_id': self.manual_payment_method_out.id,
},
])
self.assertRecordValues(payments[0].line_ids.sorted('balance') + payments[1].line_ids.sorted('balance') + payments[2].line_ids.sorted('balance'), [
# == Payment 1: to pay in_invoice_1 ==
# Liquidity line:
{
'debit': 0.0,
'credit': 1000.0,
'currency_id': self.company_data['currency'].id,
'amount_currency': -1000.0,
'reconciled': False,
},
# Payable line:
{
'debit': 1000.0,
'credit': 0.0,
'currency_id': self.company_data['currency'].id,
'amount_currency': 1000.0,
'reconciled': True,
},
# == Payment 2: to pay in_invoice_2 ==
# Liquidity line:
{
'debit': 0.0,
'credit': 2000.0,
'currency_id': self.company_data['currency'].id,
'amount_currency': -2000.0,
'reconciled': False,
},
# Payable line:
{
'debit': 2000.0,
'credit': 0.0,
'currency_id': self.company_data['currency'].id,
'amount_currency': 2000.0,
'reconciled': True,
},
# == Payment 3: to pay in_invoice_3 ==
# Liquidity line:
{
'debit': 0.0,
'credit': 1500.0,
'currency_id': self.currency_data['currency'].id,
'amount_currency': -3000.0,
'reconciled': False,
},
# Payable line:
{
'debit': 1500.0,
'credit': 0.0,
'currency_id': self.currency_data['currency'].id,
'amount_currency': 3000.0,
'reconciled': True,
},
])
def test_register_payment_constraints(self):
# Test to register a payment for a draft journal entry.
self.out_invoice_1.button_draft()
with self.assertRaises(UserError), self.cr.savepoint():
self.env['account.payment.register']\
.with_context(active_model='account.move', active_ids=self.out_invoice_1.ids)\
.create({})
# | |
"objc-archive",
flag_sets = [
flag_set(
flag_groups = [
flag_group(
flags = [
"-static",
"-filelist",
"%{obj_list_path}",
"-arch_only",
"<architecture>",
"-syslibroot",
"%{sdk_dir}",
"-o",
"%{archive_path}",
],
),
],
),
],
implies = ["apple_env"],
tools = [
tool(
path = "<tool_dir>/libtool",
execution_requirements = xcode_execution_requirements,
),
],
)
elif (ctx.attr.cpu == "ios_arm64"):
objc_archive_action = action_config(
action_name = "objc-archive",
flag_sets = [
flag_set(
flag_groups = [
flag_group(
flags = [
"-static",
"-filelist",
"%{obj_list_path}",
"-arch_only",
"arm64",
"-syslibroot",
"%{sdk_dir}",
"-o",
"%{archive_path}",
],
),
],
),
],
implies = ["apple_env"],
tools = [
tool(
path = "ios/libtool",
execution_requirements = xcode_execution_requirements,
),
],
)
elif (ctx.attr.cpu == "tvos_arm64"):
objc_archive_action = action_config(
action_name = "objc-archive",
flag_sets = [
flag_set(
flag_groups = [
flag_group(
flags = [
"-static",
"-filelist",
"%{obj_list_path}",
"-arch_only",
"arm64",
"-syslibroot",
"%{sdk_dir}",
"-o",
"%{archive_path}",
],
),
],
),
],
implies = ["apple_env"],
tools = [
tool(
path = "tvos/libtool",
execution_requirements = xcode_execution_requirements,
),
],
)
elif (ctx.attr.cpu == "ios_armv7"):
objc_archive_action = action_config(
action_name = "objc-archive",
flag_sets = [
flag_set(
flag_groups = [
flag_group(
flags = [
"-static",
"-filelist",
"%{obj_list_path}",
"-arch_only",
"armv7",
"-syslibroot",
"%{sdk_dir}",
"-o",
"%{archive_path}",
],
),
],
),
],
implies = ["apple_env"],
tools = [
tool(
path = "ios/libtool",
execution_requirements = xcode_execution_requirements,
),
],
)
elif (ctx.attr.cpu == "watchos_armv7k"):
objc_archive_action = action_config(
action_name = "objc-archive",
flag_sets = [
flag_set(
flag_groups = [
flag_group(
flags = [
"-static",
"-filelist",
"%{obj_list_path}",
"-arch_only",
"armv7k",
"-syslibroot",
"%{sdk_dir}",
"-o",
"%{archive_path}",
],
),
],
),
],
implies = ["apple_env"],
tools = [
tool(
path = "watchos/libtool",
execution_requirements = xcode_execution_requirements,
),
],
)
elif (ctx.attr.cpu == "ios_i386"):
objc_archive_action = action_config(
action_name = "objc-archive",
flag_sets = [
flag_set(
flag_groups = [
flag_group(
flags = [
"-static",
"-filelist",
"%{obj_list_path}",
"-arch_only",
"i386",
"-syslibroot",
"%{sdk_dir}",
"-o",
"%{archive_path}",
],
),
],
),
],
implies = ["apple_env"],
tools = [
tool(
path = "iossim/libtool",
execution_requirements = xcode_execution_requirements,
),
],
)
elif (ctx.attr.cpu == "watchos_i386"):
objc_archive_action = action_config(
action_name = "objc-archive",
flag_sets = [
flag_set(
flag_groups = [
flag_group(
flags = [
"-static",
"-filelist",
"%{obj_list_path}",
"-arch_only",
"i386",
"-syslibroot",
"%{sdk_dir}",
"-o",
"%{archive_path}",
],
),
],
),
],
implies = ["apple_env"],
tools = [
tool(
path = "watchsim/libtool",
execution_requirements = xcode_execution_requirements,
),
],
)
elif (ctx.attr.cpu == "ios_x86_64"):
objc_archive_action = action_config(
action_name = "objc-archive",
flag_sets = [
flag_set(
flag_groups = [
flag_group(
flags = [
"-static",
"-filelist",
"%{obj_list_path}",
"-arch_only",
"x86_64",
"-syslibroot",
"%{sdk_dir}",
"-o",
"%{archive_path}",
],
),
],
),
],
implies = ["apple_env"],
tools = [
tool(
path = "iossim/libtool",
execution_requirements = xcode_execution_requirements,
),
],
)
elif (ctx.attr.cpu == "darwin_x86_64"):
objc_archive_action = action_config(
action_name = "objc-archive",
flag_sets = [
flag_set(
flag_groups = [
flag_group(
flags = [
"-static",
"-filelist",
"%{obj_list_path}",
"-arch_only",
"x86_64",
"-syslibroot",
"%{sdk_dir}",
"-o",
"%{archive_path}",
],
),
],
),
],
implies = ["apple_env"],
tools = [
tool(
path = "mac/libtool",
execution_requirements = xcode_execution_requirements,
),
],
)
elif (ctx.attr.cpu == "tvos_x86_64"):
objc_archive_action = action_config(
action_name = "objc-archive",
flag_sets = [
flag_set(
flag_groups = [
flag_group(
flags = [
"-static",
"-filelist",
"%{obj_list_path}",
"-arch_only",
"x86_64",
"-syslibroot",
"%{sdk_dir}",
"-o",
"%{archive_path}",
],
),
],
),
],
implies = ["apple_env"],
tools = [
tool(
path = "tvsim/libtool",
execution_requirements = xcode_execution_requirements,
),
],
)
else:
objc_archive_action = None
if (ctx.attr.cpu == "x64_windows"):
objcpp_compile_action = action_config(
action_name = ACTION_NAMES.objcpp_compile,
flag_sets = [
flag_set(
flag_groups = [
flag_group(
flags = ["-arch <architecture>", "-stdlib=libc++", "-std=gnu++11"],
),
],
),
],
implies = [
"apply_default_compiler_flags",
"apply_default_warnings",
"framework_paths",
"preprocessor_defines",
"include_system_dirs",
"version_min",
"objc_arc",
"no_objc_arc",
"apple_env",
"user_compile_flags",
"sysroot",
"unfiltered_compile_flags",
"compiler_input_flags",
"compiler_output_flags",
],
tools = [
tool(
path = "<tool_dir>/wrapped_clang",
execution_requirements = xcode_execution_requirements,
),
],
)
elif (ctx.attr.cpu == "ios_arm64"):
objcpp_compile_action = action_config(
action_name = ACTION_NAMES.objcpp_compile,
flag_sets = [
flag_set(
flag_groups = [
flag_group(
flags = ["-arch arm64", "-stdlib=libc++", "-std=gnu++11"],
),
],
),
],
implies = [
"apply_default_compiler_flags",
"apply_default_warnings",
"framework_paths",
"preprocessor_defines",
"include_system_dirs",
"version_min",
"objc_arc",
"no_objc_arc",
"apple_env",
"user_compile_flags",
"sysroot",
"unfiltered_compile_flags",
"compiler_input_flags",
"compiler_output_flags",
],
tools = [
tool(
path = "ios/wrapped_clang",
execution_requirements = xcode_execution_requirements,
),
],
)
elif (ctx.attr.cpu == "tvos_arm64"):
objcpp_compile_action = action_config(
action_name = ACTION_NAMES.objcpp_compile,
flag_sets = [
flag_set(
flag_groups = [
flag_group(
flags = ["-arch arm64", "-stdlib=libc++", "-std=gnu++11"],
),
],
),
],
implies = [
"apply_default_compiler_flags",
"apply_default_warnings",
"framework_paths",
"preprocessor_defines",
"include_system_dirs",
"version_min",
"objc_arc",
"no_objc_arc",
"apple_env",
"user_compile_flags",
"sysroot",
"unfiltered_compile_flags",
"compiler_input_flags",
"compiler_output_flags",
],
tools = [
tool(
path = "tvos/wrapped_clang",
execution_requirements = xcode_execution_requirements,
),
],
)
elif (ctx.attr.cpu == "ios_armv7"):
objcpp_compile_action = action_config(
action_name = ACTION_NAMES.objcpp_compile,
flag_sets = [
flag_set(
flag_groups = [
flag_group(
flags = ["-arch armv7", "-stdlib=libc++", "-std=gnu++11"],
),
],
),
],
implies = [
"apply_default_compiler_flags",
"apply_default_warnings",
"framework_paths",
"preprocessor_defines",
"include_system_dirs",
"version_min",
"objc_arc",
"no_objc_arc",
"apple_env",
"user_compile_flags",
"sysroot",
"unfiltered_compile_flags",
"compiler_input_flags",
"compiler_output_flags",
],
tools = [
tool(
path = "ios/wrapped_clang",
execution_requirements = xcode_execution_requirements,
),
],
)
elif (ctx.attr.cpu == "watchos_armv7k"):
objcpp_compile_action = action_config(
action_name = ACTION_NAMES.objcpp_compile,
flag_sets = [
flag_set(
flag_groups = [
flag_group(
flags = ["-arch armv7k", "-stdlib=libc++", "-std=gnu++11"],
),
],
),
],
implies = [
"apply_default_compiler_flags",
"apply_default_warnings",
"framework_paths",
"preprocessor_defines",
"include_system_dirs",
"version_min",
"objc_arc",
"no_objc_arc",
"apple_env",
"user_compile_flags",
"sysroot",
"unfiltered_compile_flags",
"compiler_input_flags",
"compiler_output_flags",
],
tools = [
tool(
path = "watchos/wrapped_clang",
execution_requirements = xcode_execution_requirements,
),
],
)
elif (ctx.attr.cpu == "ios_i386"):
objcpp_compile_action = action_config(
action_name = ACTION_NAMES.objcpp_compile,
flag_sets = [
flag_set(
flag_groups = [
flag_group(
flags = ["-arch i386", "-stdlib=libc++", "-std=gnu++11"],
),
],
),
],
implies = [
"apply_default_compiler_flags",
"apply_default_warnings",
"framework_paths",
"preprocessor_defines",
"include_system_dirs",
"version_min",
"objc_arc",
"no_objc_arc",
"apple_env",
"user_compile_flags",
"sysroot",
"unfiltered_compile_flags",
"compiler_input_flags",
"compiler_output_flags",
"apply_simulator_compiler_flags",
],
tools = [
tool(
path = "iossim/wrapped_clang",
execution_requirements = xcode_execution_requirements,
),
],
)
elif (ctx.attr.cpu == "watchos_i386"):
objcpp_compile_action = action_config(
action_name = ACTION_NAMES.objcpp_compile,
flag_sets = [
flag_set(
flag_groups = [
flag_group(
flags = ["-arch i386", "-stdlib=libc++", "-std=gnu++11"],
),
],
),
],
implies = [
"apply_default_compiler_flags",
"apply_default_warnings",
"framework_paths",
"preprocessor_defines",
"include_system_dirs",
"version_min",
"objc_arc",
"no_objc_arc",
"apple_env",
"user_compile_flags",
"sysroot",
"unfiltered_compile_flags",
"compiler_input_flags",
"compiler_output_flags",
"apply_simulator_compiler_flags",
],
tools = [
tool(
path = "watchsim/wrapped_clang",
execution_requirements = xcode_execution_requirements,
),
],
)
elif (ctx.attr.cpu == "ios_x86_64"):
objcpp_compile_action = action_config(
action_name = ACTION_NAMES.objcpp_compile,
flag_sets = [
flag_set(
flag_groups = [
flag_group(
flags = ["-arch x86_64", "-stdlib=libc++", "-std=gnu++11"],
),
],
),
],
implies = [
"apply_default_compiler_flags",
"apply_default_warnings",
"framework_paths",
"preprocessor_defines",
"include_system_dirs",
"version_min",
"objc_arc",
"no_objc_arc",
"apple_env",
"user_compile_flags",
"sysroot",
"unfiltered_compile_flags",
"compiler_input_flags",
"compiler_output_flags",
"apply_simulator_compiler_flags",
],
tools = [
tool(
path = "iossim/wrapped_clang",
execution_requirements = xcode_execution_requirements,
),
],
)
elif (ctx.attr.cpu == "tvos_x86_64"):
objcpp_compile_action = action_config(
action_name = ACTION_NAMES.objcpp_compile,
flag_sets = [
flag_set(
flag_groups = [
flag_group(
flags = ["-arch x86_64", "-stdlib=libc++", "-std=gnu++11"],
),
],
),
],
implies = [
"apply_default_compiler_flags",
"apply_default_warnings",
"framework_paths",
"preprocessor_defines",
"include_system_dirs",
"version_min",
"objc_arc",
"no_objc_arc",
"apple_env",
"user_compile_flags",
"sysroot",
"unfiltered_compile_flags",
"compiler_input_flags",
"compiler_output_flags",
"apply_simulator_compiler_flags",
],
tools = [
tool(
path = "tvsim/wrapped_clang",
execution_requirements = xcode_execution_requirements,
),
],
)
elif (ctx.attr.cpu == "darwin_x86_64"):
objcpp_compile_action = action_config(
action_name = ACTION_NAMES.objcpp_compile,
flag_sets = [
flag_set(
flag_groups = [
flag_group(
flags = ["-arch x86_64", "-stdlib=libc++", "-std=gnu++11"],
),
],
),
],
implies = [
"apply_default_compiler_flags",
"apply_default_warnings",
"framework_paths",
"preprocessor_defines",
"include_system_dirs",
"version_min",
"objc_arc",
"no_objc_arc",
"apple_env",
"user_compile_flags",
"sysroot",
"unfiltered_compile_flags",
"compiler_input_flags",
"compiler_output_flags",
],
tools = [
tool(
path = "mac/wrapped_clang",
execution_requirements = xcode_execution_requirements,
),
],
)
else:
objcpp_compile_action = None
if (ctx.attr.cpu == "tvos_arm64"):
cpp_header_parsing_action = action_config(
action_name = ACTION_NAMES.cpp_header_parsing,
implies = [
"preprocessor_defines",
"include_system_dirs",
"version_min",
"objc_arc",
"no_objc_arc",
"apple_env",
"user_compile_flags",
"sysroot",
"unfiltered_compile_flags",
"compiler_input_flags",
"compiler_output_flags",
"unfiltered_cxx_flags",
],
tools = [
tool(
path = "tvos/wrapped_clang",
execution_requirements = xcode_execution_requirements,
),
],
)
elif (ctx.attr.cpu == "tvos_x86_64"):
cpp_header_parsing_action = action_config(
action_name = ACTION_NAMES.cpp_header_parsing,
implies = [
"preprocessor_defines",
"include_system_dirs",
"version_min",
"objc_arc",
"no_objc_arc",
"apple_env",
"user_compile_flags",
"sysroot",
"unfiltered_compile_flags",
"compiler_input_flags",
"compiler_output_flags",
"unfiltered_cxx_flags",
],
tools = [
tool(
path = "tvsim/wrapped_clang",
execution_requirements = xcode_execution_requirements,
),
],
)
elif (ctx.attr.cpu == "x64_windows"):
cpp_header_parsing_action = action_config(
action_name = ACTION_NAMES.cpp_header_parsing,
implies = [
"preprocessor_defines",
"include_system_dirs",
"version_min",
"objc_arc",
"no_objc_arc",
"apple_env",
"user_compile_flags",
"sysroot",
"unfiltered_compile_flags",
"compiler_input_flags",
"compiler_output_flags",
],
tools = [
tool(
path = "<tool_dir>/wrapped_clang",
execution_requirements = xcode_execution_requirements,
),
],
)
elif (ctx.attr.cpu == "ios_arm64" or
ctx.attr.cpu == "ios_armv7"):
cpp_header_parsing_action = action_config(
action_name = ACTION_NAMES.cpp_header_parsing,
implies = [
"preprocessor_defines",
"include_system_dirs",
"version_min",
"objc_arc",
"no_objc_arc",
"apple_env",
"user_compile_flags",
"sysroot",
"unfiltered_compile_flags",
"compiler_input_flags",
"compiler_output_flags",
],
tools = [
tool(
path = "ios/wrapped_clang",
execution_requirements = xcode_execution_requirements,
),
],
)
elif (ctx.attr.cpu == "ios_i386" or
ctx.attr.cpu == "ios_x86_64"):
cpp_header_parsing_action = action_config(
action_name = ACTION_NAMES.cpp_header_parsing,
implies = [
"preprocessor_defines",
"include_system_dirs",
| |
and gets outcomes [6, 6].
End scores = (48, 61)
>>> print(turns[6])
Start scores = (48, 61).
Player 0 rolls 0 dice and gets outcomes [].
End scores = (53, 61)
>>> print(turns[7])
Start scores = (53, 61).
Player 1 rolls 0 dice and gets outcomes [].
End scores = (53, 65)
>>> print(turns[8])
Start scores = (53, 65).
Player 0 rolls 6 dice and gets outcomes [2, 3, 3, 4, 1, 3].
End scores = (54, 65)
>>> print(turns[9])
Start scores = (54, 65).
Player 1 rolls 1 dice and gets outcomes [1].
End scores = (66, 54)
>>> print(turns[10])
Start scores = (66, 54).
Player 0 rolls 7 dice and gets outcomes [2, 2, 5, 3, 2, 4, 5].
End scores = (89, 54)
>>> print(turns[11])
Start scores = (89, 54).
Player 1 rolls 9 dice and gets outcomes [4, 6, 6, 3, 5, 4, 3, 1, 5].
End scores = (89, 55)
>>> print(turns[12])
Start scores = (89, 55).
Player 0 rolls 8 dice and gets outcomes [2, 2, 6, 4, 2, 6, 5, 6].
End scores = (122, 55)
>>> print(turns[13])
Game Over
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=49015, score0=12, score1=5, goal=82, feral_hogs=False)
>>> print(turns[0])
Start scores = (12, 5).
Player 0 rolls 8 dice and gets outcomes [1, 3, 2, 1, 5, 1, 4, 1].
End scores = (13, 5)
>>> print(turns[1])
Start scores = (13, 5).
Player 1 rolls 10 dice and gets outcomes [1, 6, 2, 5, 5, 6, 5, 4, 1, 2].
End scores = (13, 6)
>>> print(turns[2])
Start scores = (13, 6).
Player 0 rolls 6 dice and gets outcomes [2, 4, 2, 4, 2, 5].
End scores = (32, 6)
>>> print(turns[3])
Start scores = (32, 6).
Player 1 rolls 4 dice and gets outcomes [3, 5, 6, 6].
End scores = (32, 26)
>>> print(turns[4])
Start scores = (32, 26).
Player 0 rolls 7 dice and gets outcomes [3, 5, 4, 5, 3, 2, 1].
End scores = (33, 26)
>>> print(turns[5])
Start scores = (33, 26).
Player 1 rolls 5 dice and gets outcomes [6, 3, 1, 4, 1].
End scores = (33, 27)
>>> print(turns[6])
Start scores = (33, 27).
Player 0 rolls 6 dice and gets outcomes [6, 4, 6, 2, 2, 4].
End scores = (27, 57)
>>> print(turns[7])
Start scores = (27, 57).
Player 1 rolls 7 dice and gets outcomes [6, 3, 4, 6, 2, 6, 2].
End scores = (27, 86)
>>> print(turns[8])
Game Over
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=50497, score0=46, score1=5, goal=51, feral_hogs=True)
>>> print(turns[0])
Start scores = (46, 5).
Player 0 rolls 6 dice and gets outcomes [3, 3, 1, 2, 5, 2].
End scores = (47, 5)
>>> print(turns[1])
Start scores = (47, 5).
Player 1 rolls 3 dice and gets outcomes [4, 5, 6].
End scores = (47, 20)
>>> print(turns[2])
Start scores = (47, 20).
Player 0 rolls 8 dice and gets outcomes [6, 2, 3, 3, 3, 4, 2, 6].
End scores = (76, 20)
>>> print(turns[3])
Game Over
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=42297, score0=6, score1=22, goal=25, feral_hogs=False)
>>> print(turns[0])
Start scores = (6, 22).
Player 0 rolls 2 dice and gets outcomes [6, 1].
End scores = (7, 22)
>>> print(turns[1])
Start scores = (7, 22).
Player 1 rolls 8 dice and gets outcomes [1, 2, 5, 1, 2, 2, 3, 4].
End scores = (7, 23)
>>> print(turns[2])
Start scores = (7, 23).
Player 0 rolls 10 dice and gets outcomes [3, 6, 4, 2, 1, 5, 2, 1, 2, 1].
End scores = (8, 23)
>>> print(turns[3])
Start scores = (8, 23).
Player 1 rolls 10 dice and gets outcomes [1, 1, 2, 5, 6, 5, 6, 4, 6, 4].
End scores = (24, 8)
>>> print(turns[4])
Start scores = (24, 8).
Player 0 rolls 3 dice and gets outcomes [3, 1, 6].
End scores = (25, 8)
>>> print(turns[5])
Game Over
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=1726, score0=19, score1=5, goal=52, feral_hogs=True)
>>> print(turns[0])
Start scores = (19, 5).
Player 0 rolls 10 dice and gets outcomes [5, 1, 3, 4, 3, 1, 5, 1, 5, 3].
End scores = (20, 5)
>>> print(turns[1])
Start scores = (20, 5).
Player 1 rolls 1 dice and gets outcomes [2].
End scores = (7, 20)
>>> print(turns[2])
Start scores = (7, 20).
Player 0 rolls 2 dice and gets outcomes [2, 3].
End scores = (20, 12)
>>> print(turns[3])
Start scores = (20, 12).
Player 1 rolls 1 dice and gets outcomes [3].
End scores = (20, 15)
>>> print(turns[4])
Start scores = (20, 15).
Player 0 rolls 10 dice and gets outcomes [5, 6, 2, 6, 4, 6, 6, 4, 4, 4].
End scores = (67, 15)
>>> print(turns[5])
Game Over
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=17218, score0=19, score1=10, goal=50, feral_hogs=False)
>>> print(turns[0])
Start scores = (19, 10).
Player 0 rolls 0 dice and gets outcomes [].
End scores = (21, 10)
>>> print(turns[1])
Start scores = (21, 10).
Player 1 rolls 8 dice and gets outcomes [3, 1, 5, 2, 3, 3, 5, 1].
End scores = (11, 21)
>>> print(turns[2])
Start scores = (11, 21).
Player 0 rolls 4 dice and gets outcomes [1, 4, 6, 3].
End scores = (12, 21)
>>> print(turns[3])
Start scores = (12, 21).
Player 1 rolls 5 dice and gets outcomes [2, 6, 2, 2, 5].
End scores = (12, 38)
>>> print(turns[4])
Start scores = (12, 38).
Player 0 rolls 6 dice and gets outcomes [3, 2, 5, 3, 5, 6].
End scores = (36, 38)
>>> print(turns[5])
Start scores = (36, 38).
Player 1 rolls 4 dice and gets outcomes [1, 2, 6, 6].
End scores = (36, 39)
>>> print(turns[6])
Start scores = (36, 39).
Player 0 rolls 1 dice and gets outcomes [5].
End scores = (39, 41)
>>> print(turns[7])
Start scores = (39, 41).
Player 1 rolls 0 dice and gets outcomes [].
End scores = (39, 49)
>>> print(turns[8])
Start scores = (39, 49).
Player 0 rolls 6 dice and gets outcomes [3, 4, 6, 1, 5, 2].
End scores = (40, 49)
>>> print(turns[9])
Start scores = (40, 49).
Player 1 rolls 8 dice and gets outcomes [2, 3, 3, 2, 2, 5, 5, 5].
End scores = (40, 76)
>>> print(turns[10])
Game Over
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> turns = tests.play_utils.describe_game(hog, hog_gui, test_number=88988, score0=15, score1=95, goal=100, feral_hogs=False)
>>> print(turns[0])
Start scores = (15, 95).
Player 0 rolls 9 dice and gets outcomes [5, 3, 2, 4, 2, 1, 3, 2, 5].
End scores = (16, 95)
>>> print(turns[1])
Start scores = (16, 95).
Player 1 rolls 10 dice and gets outcomes [6, 4, 3, 2, 6, 4, 6, 1, 2, 1].
End scores = (16, 96)
>>> print(turns[2])
Start scores = (16, 96).
Player 0 rolls 1 dice and gets outcomes [6].
End scores = (22, 96)
>>> print(turns[3])
Start scores = (22, 96).
Player 1 rolls 7 dice and gets outcomes [5, 1, 3, 1, 5, 6, 2].
End scores = (22, 97)
>>> print(turns[4])
Start scores = (22, 97).
Player 0 rolls 6 dice and gets outcomes [4, 2, 2, 3, 6, 1].
End scores = (97, 23)
>>> print(turns[5])
Start scores = (97, 23).
Player 1 rolls 8 dice and gets outcomes [4, 4, 1, 2, 3, 1, 4, 5].
End scores = (97, 24)
>>> print(turns[6])
Start scores = (97, 24).
| |
# -*- coding: utf-8 -*-
"""
This module bundles all the tools of the SNN conversion toolbox.
Important functions:
.. autosummary::
:nosignatures:
test_full
update_setup
@author: rbodo
"""
from __future__ import division, absolute_import
from __future__ import print_function, unicode_literals
import os
from importlib import import_module
from future import standard_library
standard_library.install_aliases()
def test_full(config, queue=None):
"""Convert an analog network to a spiking network and simulate it.
Complete pipeline of
1. loading and testing a pretrained ANN,
2. normalizing parameters
3. converting it to SNN,
4. running it on a simulator,
5. given a specified hyperparameter range ``params``,
repeat simulations with modified parameters.
Parameters
----------
config: configparser.ConfigParser
ConfigParser containing the user settings.
queue: Optional[Queue.Queue]
Results are added to the queue to be displayed in the GUI.
Returns
-------
results: list
List of the accuracies obtained after simulating with each parameter
value in config.get('parameter_sweep', 'param_values').
"""
from snntoolbox.datasets.utils import get_dataset
from snntoolbox.conversion.utils import normalize_parameters
num_to_test = config.getint('simulation', 'num_to_test')
# Instantiate an empty spiking network
target_sim = import_target_sim(config)
spiking_model = target_sim.SNN(config, queue)
# ____________________________ LOAD DATASET ______________________________ #
normset, testset = get_dataset(config)
if config.getboolean('tools', 'convert') and not is_stop(queue):
# ___________________________ LOAD MODEL _____________________________ #
model_lib = import_module('snntoolbox.parsing.model_libs.' +
config.get('input', 'model_lib') +
'_input_lib')
input_model = model_lib.load(config.get('paths', 'path_wd'),
config.get('paths', 'filename_ann'))
# Evaluate input model.
if config.getboolean('tools', 'evaluate_ann') and not is_stop(queue):
print("Evaluating input model on {} samples...".format(num_to_test))
model_lib.evaluate(input_model['val_fn'],
config.getint('simulation', 'batch_size'),
num_to_test, **testset)
# _____________________________ PARSE ________________________________ #
print("Parsing input model...")
model_parser = model_lib.ModelParser(input_model['model'], config)
model_parser.parse()
parsed_model = model_parser.build_parsed_model()
# ____________________________ NORMALIZE _____________________________ #
if config.getboolean('tools', 'normalize') and not is_stop(queue):
normalize_parameters(parsed_model, config, **normset)
# Evaluate parsed model.
if config.getboolean('tools', 'evaluate_ann') and not is_stop(queue):
print("Evaluating parsed model on {} samples...".format(
num_to_test))
model_parser.evaluate(config.getint(
'simulation', 'batch_size'), num_to_test, **testset)
# Write parsed model to disk
parsed_model.save(
os.path.join(config.get('paths', 'path_wd'),
config.get('paths', 'filename_parsed_model') + '.h5'))
# ____________________________ CONVERT _______________________________ #
spiking_model.build(parsed_model)
# Export network in a format specific to the simulator with which it
# will be tested later.
spiking_model.save(config.get('paths', 'path_wd'),
config.get('paths', 'filename_snn'))
# _______________________________ SIMULATE _______________________________ #
if config.getboolean('tools', 'simulate') and not is_stop(queue):
# Decorate the 'run' function of the spiking model with a parameter
# sweep function.
@run_parameter_sweep(config, queue)
def run(snn, **test_set):
return snn.run(**test_set)
# Simulate network
results = run(spiking_model, **testset)
# Clean up
spiking_model.end_sim()
# Add results to queue to be displayed in GUI.
if queue:
queue.put(results)
return results
def is_stop(queue):
"""Determine if the user pressed 'stop' in the GUI.
Parameters
----------
queue: Queue.Queue
Event queue.
Returns
-------
: bool
``True`` if user pressed 'stop' in GUI, ``False`` otherwise.
"""
if not queue:
return False
if queue.empty():
return False
elif queue.get_nowait() == 'stop':
print("Skipped step after user interrupt")
queue.put('stop')
return True
def run_parameter_sweep(config, queue):
"""
Decorator to perform a parameter sweep using the ``run_single`` function.
Need an aditional wrapping layer to be able to pass decorator arguments.
"""
def decorator(run_single):
from functools import wraps
@wraps(run_single)
def wrapper(snn, **testset):
results = []
param_values = eval(config.get('parameter_sweep', 'param_values'))
param_name = config.get('parameter_sweep', 'param_name')
param_logscale = config.getboolean('parameter_sweep',
'param_logscale')
if len(param_values) > 1:
print("Testing SNN for parameter values {} = ".format(
param_name))
print(['{:.2f}'.format(i) for i in param_values])
print('\n')
# Loop over parameter to sweep
for p in param_values:
if is_stop(queue):
break
# Display current parameter value
config.set('cell', param_name, str(p))
if len(param_values) > 1:
print("\nCurrent value of parameter to sweep: " +
"{} = {:.2f}\n".format(param_name, p))
results.append(run_single(snn, **testset))
# Plot and return results of parameter sweep.
try:
from snntoolbox.simulation.plotting import plot_param_sweep
except ImportError:
plot_param_sweep = None
if plot_param_sweep is not None:
plot_param_sweep(
results, config.getint('simulation', 'num_to_test'),
param_values, param_name, param_logscale)
return results
return wrapper
return decorator
def import_target_sim(config):
sim_str = config.get('simulation', 'simulator')
code_str = '_' + config.get('conversion', 'spike_code') \
if sim_str == 'INI' else ''
return import_module('snntoolbox.simulation.target_simulators.'
+ sim_str + code_str + '_target_sim')
def load_config(filepath):
"""
Load a config file from ``filepath``.
"""
try:
import configparser
except ImportError:
# noinspection PyPep8Naming
import ConfigParser as configparser
# noinspection PyUnboundLocalVariable
configparser = configparser
assert os.path.isfile(filepath), \
"Configuration file not found at {}.".format(filepath)
config = configparser.ConfigParser()
config.read(filepath)
return config
def update_setup(config_filepath):
"""Update default settings with user settings and check they are valid.
Load settings from configuration file at ``config_filepath``, and check that
parameter choices are valid. Non-specified settings are filled in with
defaults.
"""
from textwrap import dedent
# Load defaults.
config = load_config(os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', 'config_defaults')))
# Overwrite with user settings.
config.read(config_filepath)
keras_backend = config.get('simulation', 'keras_backend')
keras_backends = config_string_to_set_of_strings(
config.get('restrictions', 'keras_backends'))
assert keras_backend in keras_backends, \
"Keras backend {} not supported. Choose from {}.".format(keras_backend,
keras_backends)
os.environ['KERAS_BACKEND'] = keras_backend
# The keras import has to happen after setting the backend environment
# variable!
import keras.backend as k
assert k.backend() == keras_backend, \
"Keras backend set to {} in snntoolbox config file, but has already " \
"been set to {} by a previous keras import. Set backend " \
"appropriately in the keras config file.".format(keras_backend,
k.backend())
if keras_backend == 'tensorflow':
# Limit GPU usage of tensorflow.
tf_config = k.tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
k.tensorflow_backend.set_session(k.tf.Session(config=tf_config))
# Name of input file must be given.
filename_ann = config.get('paths', 'filename_ann')
assert filename_ann != '', "Filename of input model not specified."
# Check that simulator choice is valid.
simulator = config.get('simulation', 'simulator')
simulators = config_string_to_set_of_strings(config.get('restrictions',
'simulators'))
assert simulator in simulators, \
"Simulator '{}' not supported. Choose from {}".format(simulator,
simulators)
# Warn user that it is not possible to use Brian2 simulator by loading a
# pre-converted network from disk.
if simulator == 'brian2' and not config.getboolean('tools', 'convert'):
print(dedent("""\ \n
SNN toolbox Warning: When using Brian 2 simulator, you need to
convert the network each time you start a new session. (No
saving/reloading methods implemented.) Setting convert = True.
\n"""))
config.set('tools', 'convert', str(True))
# Set default path if user did not specify it.
if config.get('paths', 'path_wd') == '':
config.set('paths', 'path_wd', os.path.dirname(config_filepath))
# Check specified working directory exists.
path_wd = config.get('paths', 'path_wd')
assert os.path.exists(path_wd), \
"Working directory {} does not exist.".format(path_wd)
# Check that choice of input model library is valid.
model_lib = config.get('input', 'model_lib')
model_libs = config_string_to_set_of_strings(config.get('restrictions',
'model_libs'))
assert model_lib in model_libs, "ERROR: Input model library '{}' ".format(
model_lib) + "not supported yet. Possible values: {}".format(model_libs)
# Check input model is found and has the right format for the specified
# model library.
if model_lib == 'caffe':
caffemodel_filepath = os.path.join(path_wd,
filename_ann + '.caffemodel')
caffemodel_h5_filepath = os.path.join(path_wd,
filename_ann + '.caffemodel.h5')
assert os.path.isfile(caffemodel_filepath) or os.path.isfile(
caffemodel_h5_filepath), "File {} or {} not found.".format(
caffemodel_filepath, caffemodel_h5_filepath)
prototxt_filepath = os.path.join(path_wd, filename_ann + '.prototxt')
assert os.path.isfile(prototxt_filepath), \
"File {} not found.".format(prototxt_filepath)
elif model_lib == 'keras':
h5_filepath = os.path.join(path_wd, filename_ann + '.h5')
assert os.path.isfile(h5_filepath), \
"File {} not found.".format(h5_filepath)
json_file = filename_ann + '.json'
if not os.path.isfile(os.path.join(path_wd, json_file)):
import keras
import h5py
from snntoolbox.parsing.utils import get_custom_activations_dict
# Remove optimizer_weights here, because they may cause the
# load_model method to fail if the network was trained on a
# different platform or keras version
# (see https://github.com/fchollet/keras/issues/4044).
# with h5py.File(h5_filepath, 'a') as f:
# if 'optimizer_weights' in f.keys():
# del f['optimizer_weights']
# Try loading the model.
keras.models.load_model(str(h5_filepath), get_custom_activations_dict())
elif model_lib == 'lasagne':
h5_filepath = os.path.join(path_wd, filename_ann + '.h5')
pkl_filepath = os.path.join(path_wd, filename_ann + '.pkl')
assert os.path.isfile(h5_filepath) or os.path.isfile(pkl_filepath), \
"File {} not found.".format('.h5 or .pkl')
py_filepath = os.path.join(path_wd, filename_ann + '.py')
assert os.path.isfile(py_filepath), \
"File {} not found.".format(py_filepath)
else:
print("For the specified input model library {}, ".format(model_lib) +
"no test is implemented to check if input model files exist in "
"the specified working directory!")
# Set default path if user did not specify it.
if config.get('paths', 'dataset_path') == '':
config.set('paths', 'dataset_path', os.path.dirname(__file__))
# Check that the data set path is valid.
dataset_path = os.path.abspath(config.get('paths', 'dataset_path'))
config.set('paths', 'dataset_path', dataset_path)
assert os.path.exists(dataset_path), "Path to data set does not exist: " \
"{}".format(dataset_path)
# Check that data set path contains the data in the specified format.
assert os.listdir(dataset_path), "Data set directory is empty."
normalize = config.getboolean('tools', 'normalize')
dataset_format = config.get('input', 'dataset_format')
if dataset_format == 'npz' and normalize and not os.path.exists(
os.path.join(dataset_path, 'x_norm.npz')):
raise RuntimeWarning(
"No data set file 'x_norm.npz' found in specified data set path " +
"{}. Add it, or disable | |
<gh_stars>0
#
# Copyright (c) 2021 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
# util.py
#
# Cleaning utilities for finding errors in varied corpora
#
import numpy as np
import pandas as pd
import sklearn.random_projection
import sklearn.pipeline
import sklearn.linear_model
import sklearn.metrics
import transformers
import text_extensions_for_pandas as tp
# Always run with the latest version of Text Extensions for Pandas
import importlib
tp = importlib.reload(tp)
from typing import *
def train_reduced_model(
x_values: np.ndarray,
y_values: np.ndarray,
n_components: int,
seed: int,
max_iter: int = 10000,
) -> sklearn.base.BaseEstimator:
"""
Train a reduced-quality model by putting a Gaussian random projection in
front of the multinomial logistic regression stage of the pipeline.
:param x_values: input embeddings for training set
:param y_values: integer labels corresponding to embeddings
:param n_components: Number of dimensions to reduce the embeddings to
:param seed: Random seed to drive Gaussian random projection
:param max_iter: Maximum number of iterations of L-BGFS to run. The default
value of 10000 will achieve a tight fit but takes a while.
:returns A model (Python object with a `predict()` method) fit on the
input training data with the specified level of dimension reduction
by random projection.
"""
reduce_pipeline = sklearn.pipeline.Pipeline(
[
(
"dimred",
sklearn.random_projection.GaussianRandomProjection(
n_components=n_components, random_state=seed
),
),
(
"mlogreg",
sklearn.linear_model.LogisticRegression(
multi_class="multinomial", max_iter=max_iter
),
),
]
)
print(f"Training model with n_components={n_components} and seed={seed}.")
return reduce_pipeline.fit(x_values, y_values)
def train_model_ensemble(
training_data: pd.DataFrame,
labels_col: str,
x_feats_col: str = "embedding",
model_sizes=None,
model_seeds=None,
max_iters=10000,
):
"""
Train an ensemble of reduced-quality models by putting a Gaussian
random projection in front of the multinomial logistic regression
stage of the pipelines for a set of models
two lists are given of model sizes and seeds, and the power set
of the two is the complete set ofparameters used to train the models
Uses Ray to speed up model training.
:param training_data: a dataframe containing the bert embeddings and
labels for the models to train on.
:param labels_col: the name of the column containing the labels for the model
to train on
:param x_feats_col: the name of the column containing the BERT embeddings
for each token, off which the model trains
:param model_sizes: the number of components that the gaussian random progression
reduces the BERT embedding to.
:param model_seeds: seeds for the random initialization of the model.
:param max_iters: the upper bound on the number of iterations to allow
the models to train. 100 is fast and 10,000 typically means full convergence
:returns: A dictionary mapping model names to models (Python object with a
`predict()` method) fit on the input training data with the specified
level of dimension reduction by random projection.
"""
import ray # TODO: put a note about this in the docstring
# input logic
if model_sizes is None:
model_sizes = [32, 64, 128, 256]
model_sizes.reverse()
if model_seeds is None:
model_seeds = [1, 2, 3]
model_params = {
f"{size}_{seed}": (size, seed) for size in model_sizes for seed in model_seeds
}
# training data sets
X_train = training_data[x_feats_col].values
Y_train = training_data[labels_col]
# run ray
if ray.is_initialized():
ray.shutdown()
ray.init()
# wrapper func for ray reduced model training
@ray.remote
def train_reduced_model_task(
x_values: np.ndarray,
y_values: np.ndarray,
n_components: int,
seed: int,
max_iter: int = max_iters,
) -> sklearn.base.BaseEstimator:
return train_reduced_model(x_values, y_values, n_components, seed, max_iter)
# setup plasma
X_id = ray.put(X_train.to_numpy())
Y_id = ray.put(Y_train.to_numpy())
# run training
futures = [
train_reduced_model_task.remote(
X_id, Y_id, components, seed, max_iter=max_iters
)
for components, seed in model_params.values()
]
results = ray.get(futures)
# Clean up items we've added to Plasma and shut down ray
del X_id
del Y_id
ray.shutdown()
models = {name: model for name, model in zip(model_params.keys(), results)}
return models
def infer_on_df(
df: pd.DataFrame, id_to_class_dict, predictor, iob=False, embeddings_col="embedding"
):
"""
Takes a dataframe containing bert embeddings and a model trained on bert embeddings,
and runs inference on the dataframe. if IOB is specified, predicted id and type are
broken out from the raw probabilities given.
:param df: the document on which to perform inference; of the form output by the
`preprocess_documents` method of this module, and containing BERT embeddings,
references to fold and document numbers, as well as some column containing unique
identifiers for the raw tokenization of the document (i.e. `'raw_token_id'` field in
output DataFrames from `preprocess_documents`)
:param id_to_class_dict: Mapping from class ID to class name, as returned by
:func:`text_extensions_for_pandas.make_iob_tag_categories`
:param predictor: Python object with a `predict` method that accepts a
numpy array of embeddings.
:param iob: a boolean value, when set to true, additional logic for iob-formatted
classes is activated
:param embeddings_col: the column in `df` that contains BERT embeddings for that document
"""
result_df = df.copy()
raw_outputs = tp.TensorArray(predictor.predict_proba(result_df[embeddings_col]))
result_df["predicted_id"] = np.argmax(raw_outputs, axis=1)
result_df["predicted_class"] = result_df["predicted_id"].apply(
lambda p_id: id_to_class_dict[p_id]
)
if iob:
iobs, types = tp.io.conll.decode_class_labels(
result_df["predicted_class"].values
)
result_df["predicted_iob"] = iobs
result_df["predicted_type"] = types
result_df["raw_output"] = raw_outputs
return result_df
def infer_and_extract_raw_entites(
doc: pd.DataFrame,
id_to_class_dict,
predictor,
raw_span_id_col="raw_span_id",
fold_col="fold",
doc_col="doc_num",
agg_func=None,
keep_cols: List[str] = None,
):
"""
Takes a dataframe containing bert embeddings and a model trained on bert embeddings, and
runs inference on the dataframe. Then using references to the original spans, reconstucts
the predicted value of each token of the original tokenization.
:param doc: the document on which to perform inference; of the form output by the
`preprocess_documents` method of this module, and containing BERT embeddings, references to
fold and document numbers, as well as some column containing unique identifiers for the raw
tokenization of the document
:param id_to_class_dict: Mapping from class ID to class name, as returned by
:func:`text_extensions_for_pandas.make_iob_tag_categories`
:param predictor: Python object with a `predict` method that accepts a
numpy array of embeddings.
:param fold_col: the name of the column of `doc` containing the fold of each token
:param doc_col: the name of the column of `doc` containing the document number of each token
:param raw_span_id_col: the name of the column of `doc` containing some identifier of the raw
token that each bert token came from.
:param agg_func: if specified, a function that takes in a series of tensorArrays and returns a
pandas-compatible type; used to aggregate the predictions of multiple subtokens when
multiple subtokens all describe the same original token.
:param keep_cols: any column that you wish to be carried over to the output dataframe, by default
the column 'raw_span' is the only column to be carried over, if it exists.
"""
if agg_func is None:
def agg_func(series: pd.Series):
# util function for predicting the probabilities of each class when multiple sub-tokens are combined.
# this method assumes independence between subtoken classes and calculates the probabilities of
# all subtokens being the same class, then re-normalizes so the vector components sum to one again
vec = series.to_numpy().prod(axis=0)
if (
np.sum(vec) == 0
): # if we underflow, (only happens in rare cases) log everything and continue
mat = np.log2(series.to_numpy())
vec = mat.sum(axis=0)
vec -= np.logaddexp2.reduce(vec)
return np.exp2(vec)
return tp.TensorArray(vec / np.sum(vec))
# build aggregation fields
keep_cols = (
keep_cols
if keep_cols is not None
else [
"fold",
"doc_num",
"token_id",
"raw_span",
]
)
sort_cols = [
col for col in [fold_col, doc_col, raw_span_id_col] if col in doc.columns
]
keep_cols = [
c for c in keep_cols if c in doc.columns and c not in sort_cols
] # filter out cols not in df
aggby = {k: "first" for k in keep_cols}
aggby["raw_output"] = agg_func
df = doc[["embedding"] + keep_cols + sort_cols].copy()
# first, run inference
df.loc[:, "raw_output"] = tp.TensorArray(predictor.predict_proba(df["embedding"]))
# group by original tag
groupby = df.groupby(sort_cols)
results_df = groupby.agg(aggby).reset_index().sort_values(sort_cols)
# repeat translation
results_df["predicted_id"] = results_df.raw_output.apply(
lambda s: np.array(s).argmax()
)
results_df["predicted_class"] = results_df["predicted_id"].apply(
lambda p_id: id_to_class_dict[p_id]
)
return results_df
def infer_and_extract_entities_iob(
| |
= fluence_seq[:final_step+1] + [fluence_seq[final_step] + extra_fluence]
return fluence_seq_until_time
def get_fluence_subseq_until_time(path, cell, final_time):
final_substep =find_substep_from_time(path, cell, final_time)
extra_fluence = get_extra_subfluence_from_time(path, cell, final_time)
fluence_subseq = get_fluence_subseq(path, cell)
fluence_subseq_until_time = fluence_subseq[:final_substep+1] + [fluence_subseq[final_substep] + extra_fluence]
return fluence_subseq_until_time
# This function calculate the additional fluence from the previous time point
# to where the final time is set
def get_extra_fluence_from_time(path, cell, time):
step = find_step_from_time(path, cell, time)
dens_file = path +'/output_summary/{}_dens'.format(cell)
# flux seq has an added zero at the beginning of the arry
flux = read_flux(dens_file)[step+1]
previous_time_point = read_time_seq(dens_file)[step]
time_int = time-previous_time_point
return flux*time_int*24*3600
# This function calculate the additional fluence from the previous time point
# to where the final time is set
# From subdens file
def get_extra_subfluence_from_time(path, cell, time):
substep = find_substep_from_time(path, cell, time)
subdens_file = path +'/{}_subdens'.format(cell)
# flux seq has an added zero at the beginning of the arry
flux_subseq = read_flux_subseq(subdens_file)
flux= flux_subseq[substep]
previous_time_point = read_time_seq(subdens_file)[substep]
time_int = time-previous_time_point
return flux*time_int*24*3600
def find_step_from_time(path, cell, time):
dens_file = path +'/output_summary/{}_dens'.format(cell)
time_seq = read_time_seq(dens_file)
step = 0
for t in time_seq[1:]:
if time <= t:
break
step += 1
return step
def find_substep_from_time(path, cell, time):
subdens_file = path +'/{}_subdens'.format(cell)
time_seq = read_time_seq(subdens_file)
substep = 0
for t in time_seq[1:]:
if time <= t:
break
substep += 1
return substep
def get_step_fluence_length(path, cell):
fluence_seq = get_fluence_seq(path, cell)
step_fluence_length = [x-y for x,y in zip(fluence_seq[1:], fluence_seq[:-1])]
return step_fluence_length
def read_flux_spectrum(path, steps_list):
flux_spectrum_file = open(path, 'r')
lines = flux_spectrum_file.readlines()
flux_spectrum_list = []
step_count = 0
for line in lines[6:]: # The flux spectrum data always start at the 6th line
if step_count in steps_list:
flux_spectrum_list.append([float(x) for x in line.split()[3:]])
step_count += 1
return flux_spectrum_list
def read_energy_mid_points(path):
flux_spectrum_file = open(path, 'r')
lines = flux_spectrum_file.readlines()
energy_mid_points = [float(x) for x in lines[2].split()[1:]]
return energy_mid_points
def read_energy_bin_length(path):
flux_spectrum_file = open(path, 'r')
lines = flux_spectrum_file.readlines()
energy_bin_length = [float(x) for x in lines[1].split()[1:]]
return energy_bin_length
def read_dens(nuclide, path):
zamid = name_to_zamid(nuclide)
dens_seq = []
dens_file = open(path, 'r')
lines = dens_file.readlines()
for line in lines:
if line != '\n':
if line.split()[0] == zamid:
dens_seq = [float(x) for x in line.split()[1:]]
break
return dens_seq
# cumulative dens
def get_cum_dens(nuclide, path):
dens_seq = read_dens(nuclide, path)
cum_dens_seq = []
cum_dens_seq.append(dens_seq[0])
for i in range(1, len(dens_seq)):
cum_dens_seq.append(dens_seq[i] + cum_dens_seq[i-1])
return cum_dens_seq
def convert_dens_seq_to_cum_dens_seq(dens_seq):
cum_dens_seq = []
cum_dens_seq.append(dens_seq[0])
for i in range(1, len(dens_seq)):
cum_dens_seq.append(dens_seq[i] + cum_dens_seq[i-1])
return cum_dens_seq
def get_nucl_atomic_mass(nucl):
zamid = name_to_zamid(nucl)
zaid = zamid[:-1]
if zaid in d.default_atm_mass_lib:
M = d.default_atm_mass_lib[zaid]
else:
M = int(get_zamid_a(zamid))
return M
# calculate total mass density at certain step
def get_total_mass_density(path, cell, step):
nucl_name_list = read_dens_nucl(path, cell)
dens_path = path+'/{}_dens'.format(cell)
dens_file = open(dens_path )
NA = d.NA
total_mass_density = 0
for nucl in nucl_name_list:
dens = read_dens(nucl, dens_path)[step]
M = get_nucl_atomic_mass(nucl)
mass_density = dens*(M/NA)
total_mass_density += mass_density
return total_mass_density
def get_pu_subseq_mat(path, cell, EFPD):
final_substep =find_substep_from_time(path, cell, EFPD)
path = path +'/{}_subdens'.format(cell)
name_list = d.Pu_isotopes_name
time_subseq = read_time_seq(path)
t_before = time_subseq[final_substep]
t_after = time_subseq[final_substep+1]
pu_subseq_mat = []
for name in name_list:
dens_subseq = read_dens(name, path)
dens_subseq_until_time = dens_subseq[:final_substep+1]
dens_before = dens_subseq[final_substep]
dens_after = dens_subseq[final_substep+1]
pair1 = [t_before, dens_before]
pair2 = [t_after, dens_after]
interpolated_dens = interpolation_between_two_points(pair1, pair2, EFPD)
dens_subseq = dens_subseq_until_time + [interpolated_dens]
pu_subseq_mat.append(dens_subseq)
return pu_subseq_mat
# cumulative plutonium production
def get_cum_pu_subseq_mat(path, cell, EFPD):
path = path +'/{}_subdens'.format(cell)
name_list = ['Pu-238', 'Pu-239', 'Pu-240', 'Pu-241', 'Pu-242', 'Pu-243']
final_substep =find_substep_from_time(path, cell, EFPD)
time_subseq = read_time_seq(path)
t_before = time_subseq[final_substep]
t_after = time_subseq[final_substep+1]
cum_pu_subseq_mat = []
for name in name_list:
dens_subseq = read_dens(name, path)
dens_subseq_until_time = dens_subseq[:final_substep+1]
dens_before = dens_subseq[final_substep]
dens_after = dens_subseq[final_substep+1]
pair1 = [t_before, dens_before]
pair2 = [t_after, dens_after]
interpolated_dens = interpolation_between_two_points(pair1, pair2, EFPD)
dens_subseq = dens_subseq_until_time + [interpolated_dens]
cum_dens_subseq = convert_dens_seq_to_cum_dens_seq(dens_sbuseq)
cum_pu_subseq_mat.append(cum_dens_subseq)
return cum_pu_subseq_mat
# linear interpolation between two points
def interpolation_between_two_points(pair1, pair2, x):
a = (pair2[1] - pair1[1])/(pair2[0] - pair1[0])
b = (pair1[1]*pair2[0] - pair1[0]*pair2[1])/(pair2[0] - pair1[0])
y = a*x+b
return y
def read_xs_seq(nuclide, xs_name, path, cell):
path = path + '/output_summary/{}_xs_lib'.format(cell)
zamid = name_to_zamid(nuclide)
xs_name_found = 'no'
xs_file = open(path, 'r')
lines = xs_file.readlines()
# Search for the line
line_index = 0
for line in lines:
if line != '\n':
if line.split()[0] == nuclide:
break
line_index += 1
# First line needs to be treated differently
if lines[line_index].split()[2] == xs_name:
xs_name_found = 'yes'
xs_seq= [float(x) for x in lines[line_index].split()[3:]]
xs_loop = line_index+1
while lines[xs_loop].split()[0] == zamid:
if lines[xs_loop].split()[1] == xs_name:
xs_name_found = 'yes'
xs_seq = [float(x) for x in lines[xs_loop].split()[2:]]
xs_loop += 1
if xs_name_found == 'no':
raise xs_name_not_found("nuclide {} has no data for cross section {}".format(nuclide, xs_name))
else:
return xs_seq
def get_time_averaged_xs(nuclide, xs_name, path, cell):
xs_lib_path = path + '/{}_dens'.format(cell)
xs_seq = read_xs_seq(nuclide, xs_name, xs_lib_path, cell)
dens_path = path + '/{}_dens'.format(cell)
time_seq = read_time_seq(dens_path)
tot_time = time_seq[-1]
av_xs = 0
for i in range(len(xs_seq)):
xs = xs_seq[i]
time_bos = time_seq[i]
time_eos = time_seq[i+1]
time_coeff = (time_eos - time_bos)/tot_time
av_xs += xs*time_coeff
return av_xs
def get_time_averaged_flux(path, cell):
xs_lib_path = path + '/{}_dens'.format(cell)
flux_seq = read_flux(xs_lib_path)
time_seq = read_time_seq(xs_lib_path)
tot_time = time_seq[-1]
av_flux = 0
for i in range(len(flux_seq)-1):
flux = flux_seq[i+1]
time_bos = time_seq[i]
time_eos = time_seq[i+1]
time_coeff = (time_eos - time_bos)/tot_time
av_flux += flux*time_coeff
return av_flux
def get_tot_xs(nuclide, path, cell):
xs_name_list = ['fission','(n,gamma)','(n,2n)','(n,3n)','(n,p)','(n,a)','(n,gamma)X']
tot_xs_seq = []
i = 0
for xs_name in xs_name_list:
try:
xs_seq = read_xs_seq(nuclide, xs_name, path, cell)
except xs_name_not_found:
continue
if i == 0:
tot_xs_seq = xs_seq
else:
tot_xs_seq = [x+y for x,y in zip(tot_xs_seq,xs_seq)]
i += 1
return tot_xs_seq
# This method list all nuclides that are present in xs lib
def read_xs_nucl(path, bucell):
path = path +'/output_summary/{}_xs_lib'.format(bucell)
xs_lib_file = open(path)
lines = xs_lib_file.readlines()
nucl_name_list = []
for line in lines:
if line == '\n':
continue
line = line.split()
if line[0].split('-')[0] in d.nuc_name_dic:
nucl_name_list.append(line[0])
xs_lib_file.close()
return nucl_name_list
# make a list of all nuclide present in the dens_file
def read_dens_nucl(path, cell):
dens_path = path + '/{}_dens'.format(cell)
dens_file = open(dens_path, 'r')
lines = dens_file.readlines()
nucl_zamid_list = []
for line in lines[7:]:
line = line.split()
nucl_zamid_list.append(line[0])
dens_file.close()
nucl_name_list = zamid_list_to_name_list(nucl_zamid_list)
return nucl_name_list
def rank_nuclide_per_dens(bucell, step_list, path):
dens_path = path +'/output_summary' + '/{}_dens'.format(bucell)
dens_file = open(dens_path, 'r')
lines = dens_file.readlines()
dens_list_per_step = []
for step in step_list:
dens_dict = {}
# Data starts at 8th line
for line in lines[7:]:
line = line.split()
dens_dict[line[0]] = float(line[step+1])
# for key, value in sorted(dens_dict.iteritems(), key=lambda (k,v): (v,k)):
# print ("%s: %s" % (key, value))
sorted_dens_dict = sorted(dens_dict.items(), key=lambda kv: kv[1], reverse=True)
dens_list_per_step.append(sorted_dens_dict)
dens_file.close()
cwd = os.getcwd()
sorted_dens_file = open('ranked dens', 'w')
txt = ''
for step in step_list:
txt += '{:<20}'.format(step)
txt += '\n\n'
for i in range(len(dens_list_per_step[0])):
for step in step_list:
dens_list = dens_list_per_step[step]
txt += '{:<8}{:<12.2E}'.format(dens_list[i][0], dens_list[i][1])
txt += '\n'
sorted_dens_file.write(txt)
sorted_dens_file.close()
def rank_nuclide_per_reac_rate(bucell, step_list, path, file_name):
dens_path = path +'/output_summary' + '/{}_dens'.format(bucell)
dens_file = open(dens_path, 'r')
xs_path = path +'/output_summary' + '/{}_xs_lib'.format(bucell)
xs_file = open(xs_path, 'r')
# Read densities
lines = dens_file.readlines()
dens_dict_per_step = []
for step in step_list:
dens_dict = {}
# Data starts at 8th line
for line in lines[7:]:
line = line.split()
name = zamid_to_name(line[0])
dens_dict[name] = float(line[step+1])
dens_dict_per_step.append(dens_dict)
# Read xs
lines = xs_file.readlines()
xs_dict_per_step = []
for step in step_list:
xs_dict = {}
count = 0
# Data starts at 8th line
for line in lines[7:]:
if line == '\n':
continue
line = line.split()
if line[0].split('-')[0] in d.nuc_name_dic:
# For first data, you need to read data first
if count == 0:
nucl_name = line[0]
abs_xs = float(line[step+3])
# Reached new nuclide, need to store data
else:
xs_dict[nucl_name] = abs_xs
nucl_name = line[0]
abs_xs = float(line[step+3])
else:
abs_xs += float(line[step+2])
count += 1
xs_dict_per_step.append(xs_dict)
flux_per_step = []
flux_seq = read_flux(dens_path)
for step in step_list:
flux_per_step.append(flux_seq[step+1]) # flux starts with 0
# Now we will go over each nuclide in the dict per step and multiply xs with density
#print (xs_dict_per_step)
sorted_reac_dict_per_step = []
total_abs_per_step = []
for step in range(len(step_list)):
dens_dict = dens_dict_per_step[step]
xs_dict = xs_dict_per_step[step]
reac_dict = {}
for nucl in xs_dict:
if nucl in dens_dict:
nucl_dens = dens_dict[nucl]
nucl_xs = xs_dict[nucl]
reac_rate = nucl_dens*nucl_xs
reac_dict[nucl] = reac_rate
# Create a sorted list of tuples
sorted_reac_tuple = sorted(reac_dict.items(), key=lambda kv: kv[1], reverse=True)
sorted_reac_dict_per_step.append(sorted_reac_tuple)
total_abs = 0
for i in sorted_reac_tuple:
total_abs += i[1]
total_abs_per_step.append(total_abs)
dens_file.close()
xs_file.close()
cwd = os.getcwd()
sorted_reac_file = open('{} ranked react'.format(file_name), 'w')
txt = ''
for step in step_list:
txt += '{:<20}'.format(step)
txt += '\n\n'
for step in range(len(step_list)):
txt += 'flux={:<10.5E}'.format(flux_per_step[step])
txt += '\n'
for step in range(len(step_list)):
txt += 'tot-abs={:<10.5E}'.format(total_abs_per_step[step])
txt += '\n\n'
for i in range(len(sorted_reac_dict_per_step[0])):
for step in range(len(step_list)):
reac_tuple_list = sorted_reac_dict_per_step[step]
txt += '{:<8}{:<12.2E}'.format(reac_tuple_list[i][0], reac_tuple_list[i][1])
txt += '\n'
sorted_reac_file.write(txt)
sorted_reac_file.close()
def plot_matrix_from_compressed_matrix(path, step, cell):
path_to_xs = path +'/step_{}'.format(step) +'/{}_cell'.format(cell) +'/matrix/xs_mat'
path_to_decay = path +'/step_{}'.format(step) +'/{}_cell'.format(cell) +'/matrix/decay_mat'
file_xs = open(path_to_xs, 'r')
file_decay = open(path_to_decay, 'r')
lines_xs = file_xs.readlines()
lines_decay = file_decay.readlines()
plt.figure(1)
count = 0
# x_vect = [i for i in range(len(lines))]
for i in range(len(lines_xs)):
line_xs = lines_xs[i]
line_decay = lines_decay[i]
zamid = line_xs.split('|')[0]
line_elt_xs = line_xs.split(':')[1]
elts_xs = line_elt_xs.split(',')[:-1] # Last element empty because of last coma in each line
current_line_xs = []
current_x_vect_xs = []
elt_val_xs = len(lines_xs) - count # The value assigned is the index of the nuclide starting from the last
for elt_xs in elts_xs:
elt_index = int(elt_xs.split()[0])
current_x_vect_xs .append(elt_index)
current_line_xs.append(elt_val_xs)
if i == len(lines_xs)-1:
plt.scatter(current_x_vect_xs , current_line_xs , marker='s', color = 'k', s = 4, label = 'cross section')
else:
plt.scatter(current_x_vect_xs , current_line_xs , marker='s', color = 'k', s = 4)
line_elt_decay = line_decay.split(':')[1]
elts_decay = line_elt_decay.split(',')[:-1] # Last element empty because of last coma in each line
current_line_decay = []
current_x_vect_decay = []
elt_val_decay = len(lines_decay) - count # The value assigned is the index of the nuclide starting from the last
for elt_decay in elts_decay:
elt_index = int(elt_decay.split()[0])
current_x_vect_decay.append(elt_index)
current_line_decay.append(elt_val_decay)
if i == len(lines_xs)-1:
plt.scatter(current_x_vect_decay , current_line_decay , marker='+', color = 'r', s = 4, label = 'decay')
else:
plt.scatter(current_x_vect_decay , current_line_decay , marker='+', color = 'r', s = 4)
#mat.append(current_line)
count += 1
file_xs.close()
file_decay.close()
plt.legend()
plt.show()
def plot_matrix_bysign_from_compressed_matrix(path, step, cell):
plt.style.use('dark_background')
path_to_xs = path +'/step_{}'.format(step) +'/{}_cell'.format(cell) +'/matrix/xs_mat'
path_to_decay = path +'/step_{}'.format(step) +'/{}_cell'.format(cell) +'/matrix/decay_mat'
file_xs = open(path_to_xs, 'r')
file_decay = open(path_to_decay, 'r')
lines_xs = file_xs.readlines()
lines_decay = file_decay.readlines()
size = 0.8
plt.figure(1)
count = 0
# x_vect = [i for i in range(len(lines))]
for i in range(len(lines_xs)):
line_xs = lines_xs[i]
line_decay = lines_decay[i]
zamid = line_xs.split('|')[0]
line_elt_xs = line_xs.split(':')[1]
elts_xs = line_elt_xs.split(',')[:-1] # Last element empty because of last coma in each line
current_line_xs = []
current_x_vect_xs = []
#elt_val_xs = len(lines_xs) - count # The value assigned is the index of the nuclide starting from the last
elt_val_xs = | |
<filename>gym_highway/modell/modell.py
import gym
import numpy as np
import matplotlib.pyplot as plt
import math
import copy
from gym_highway.modell.ego_vehicle import Egovehicle
from gym_highway.modell.environment_vehicle import Envvehicle, env_add_entry
class Modell:
def __init__(self, envdict):
self.envdict = envdict
self.egovehicle = None
self.highwaylength = self.envdict['length_forward'] + self.envdict['length_backward']
self.lanes = []
self.nextvehicle = []
for i in range(self.envdict['lane_count']):
self.lanes.append([])
self.prev_data = []
self.actual_data = []
self.id = 0
"""
self.log_list = []
self.log_cnt = 0
self.logs_in_file = 15
"""
self.log = Envvehicle(self.envdict)
def onestep(self, action):
"""
:param action: takes action for egovehicle
:return: success, cause
"""
# 5. NewBorn vehicles If lane density is smaller than desired
self.generate_new_vehicles()
self.random_new_des_speed()
# 1. Stepping the ego vehicle
vehiclecnt = len(self.lanes[self.egovehicle.laneindex])
egoveh_lane = self.lanes[self.egovehicle.laneindex]
vnext = None
for i in range(vehiclecnt):
if isinstance(egoveh_lane[i], Egovehicle):
pos = i
break
if (i + 1) <vehiclecnt:
vnext = egoveh_lane[i + 1]
self.egovehicle.step(action, vnext)
if self.egovehicle.vx < 10:
return False, 'Low speed'
# # perform lane change and collision check
# fine, cause = self.check_position()
# if not fine:
# return False, cause
# 2. Transpose everyone to set x of egovehicle to 0
offs = -self.egovehicle.x
for j in range(self.envdict['lane_count']):
lane = self.lanes[j]
for i in range(len(lane)):
lane[i].x = lane[i].x + offs
# 3. Stepping every other vehicle,
for i in range(self.envdict['lane_count']):
lane = self.lanes[i]
for j in range(len(lane)):
vehiclecnt = len(lane)
if j < vehiclecnt:
veh = lane[j]
if isinstance(veh, Envvehicle):
if (veh.skip == 0):
if j + 1 < vehiclecnt:
vnext = lane[j + 1]
else:
vnext = None
if j - 1 >= 0:
vbehind = lane[j - 1]
else:
vbehind = None
vright_a = None
vright_b = None
if i > 0:
lane_right = self.lanes[i - 1]
for k in range(len(lane_right)):
if lane_right[k].x > veh.x:
vright_a = lane_right[k]
break
for k in range(len(lane_right)-1, -1, -1): # from len(lane_right) to -1 with -1, 0 is the last!
if lane_right[k].x < veh.x:
vright_b = lane_right[k]
break
vleft_a = None
vleft_b = None
if i < self.envdict['lane_count'] - 1:
lane_left = self.lanes[i + 1]
for k in range(len(lane_left)):
if lane_left[k].x > veh.x:
vleft_a = lane_left[k]
break
for k in range(len(lane_left)-1, -1, -1):
if lane_left[k].x < veh.x:
vleft_b = lane_left[k]
break
veh.step(vnext, vbehind, vright_a, vright_b, vleft_a, vleft_b)
if veh.state == 'switch_lane_right':
oldlane = self.lanes[veh.oldlane]
for l in range(len(oldlane)):
if oldlane[l].ID == veh.ID:
oldlane[l].x = veh.x
oldlane[l].vx = veh.vx
if (veh.change_finished == 1):
veh.change_finished = 0
oldlane = self.lanes[veh.oldlane]
veh.state = 'in_lane'
for vehicle in oldlane:
if vehicle.ID == veh.ID:
oldlane.remove(vehicle)
#print('Removed in Step ID: ' + str(veh.ID))
break
#3.5 Insert vehicle in the other lane if lane switch has occurred
for i in range(self.envdict['lane_count']):
lane = self.lanes[i]
for j in range(len(lane)):
if j < len(lane):
veh = lane[j]
if isinstance(veh, Envvehicle):
if (veh.state == 'switch_lane_right') and (veh.change_needed == 1):
newlane = self.lanes[i - 1]
veh.skip = 1
ev = Envvehicle(self.envdict)
inserted = 0
vright_a = None
vright_b = None
if i > 0:
lane_right = self.lanes[i - 1]
for k in range(len(lane_right)): # search the vehicle ahead in the right lane
if lane_right[k].x > veh.x:
vright_a = lane_right[k]
break
for k in range(len(lane_right) - 1, -1, -1): # from len(lane_right) to -1 with -1, 0 is the last!
if lane_right[k].x < veh.x: # search the vehicle behind in the right lane
vright_b = lane_right[k]
break
if (vright_a is None) or ((vright_a.state != 'switch_lane_left') and
(vright_a.state != 'acceleration')):
if (vright_b is None) or ((vright_b.state != 'switch_lane_left') and
(vright_b.state != 'acceleration')):
for k in range(len(newlane)):
if (newlane[k].x > veh.x):
veh.change_needed = 0
ev=copy.copy(veh)
ev.skip = 0
newlane.insert(k, ev)
inserted = 1
break
if inserted == 0:
veh.change_needed = 0
ev = copy.copy(veh)
ev.skip = 0
newlane.insert(len(newlane), ev)
else:
veh.state = 'in_lane'
veh.change_needed = 0
veh.skip = 0
else:
veh.state = 'in_lane'
veh.change_needed = 0
veh.skip = 0
elif (veh.state == 'switch_lane_left') and (veh.change_needed == 1):
newlane = self.lanes[i + 1]
vleft_a = None
if i < self.envdict['lane_count']:
lane_left = self.lanes[i + 1]
for k in range(len(lane_left)):
if lane_left[k].x > veh.x:
vleft_a = lane_left[k]
break
if (vleft_a is None) or ((vleft_a.state != 'switch_lane_right')):
for k in range(len(newlane)):
if (newlane[k].x > veh.x):
veh.change_needed = 0
newlane.insert(k, veh)
break
oldlane = self.lanes[veh.oldlane]
for vehicle in oldlane:
if vehicle.ID == veh.ID:
oldlane.remove(vehicle)
break
elif not (vleft_a is None) and ((vleft_a.state == 'switch_lane_right')):
if ((vleft_a.x - vleft_a.length - veh.x) / 4) > veh.length:
for k in range(len(newlane)):
if (newlane[k].x > veh.x):
veh.change_needed = 0
newlane.insert(k, veh)
break
oldlane = self.lanes[veh.oldlane]
for vehicle in oldlane:
if vehicle.ID == veh.ID:
oldlane.remove(vehicle)
break
else:
veh.state = 'in_lane'
print('Acceleration left in MODEL ID: ' + str(veh.ID))
veh.change_needed = 0
elif (veh.change_finished == 1):
veh.change_finished = 0
oldlane = self.lanes[veh.oldlane]
veh.state = 'in_lane'
removed = 0
for vehicle in oldlane:
if vehicle.ID == veh.ID:
oldlane.remove(vehicle)
removed = 1
break
if removed == 0:
for m in range(self.envdict['lane_count']):
lane = self.lanes[m]
for n in range(len(lane)):
if (lane[n].ID == veh.ID) and (lane[n].state != 'in_lane'):
lane.remove(lane[n])
removed = 1
print('Removed for 2nd try ID: ' + str(veh.ID))
if removed == 0:
print('NOT removed ID: ' + str(veh.ID))
elif isinstance(veh, Egovehicle) and (veh.change_needed == 1):
inserted = 0
if veh.cmd == 'switch_lane_left':
if (i + 1) < self.envdict['lane_count']:
newlane = self.lanes[i + 1]
for k in range(len(newlane)):
if (newlane[k].x > veh.x):
newlane.insert(k, veh)
inserted = 1
break
if inserted == 0:
newlane.insert(len(newlane), veh)
oldlane = self.lanes[veh.oldlane]
for vehicle in oldlane:
if vehicle.ID == veh.ID:
oldlane.remove(vehicle)
break
veh.laneindex = veh.laneindex + 1
elif veh.cmd == 'switch_lane_right':
newlane = self.lanes[i - 1]
for k in range(len(newlane)):
if (newlane[k].x > veh.x):
newlane.insert(k, veh)
inserted = 1
break
if inserted == 0:
newlane.insert(len(newlane), veh)
oldlane = self.lanes[veh.oldlane]
for vehicle in oldlane:
if vehicle.ID == veh.ID:
oldlane.remove(vehicle)
break
veh.laneindex = veh.laneindex - 1
veh.change_needed = 0
# 4. Deleting vehicles out of range
for j in range(self.envdict['lane_count']):
lane = self.lanes[j]
for veh in lane:
if veh.x < -self.envdict['length_backward'] or veh.x > self.envdict['length_forward']:
if veh.state == 'switch_lane_right':
oldlane = self.lanes[veh.oldlane]
for vehicle in oldlane:
if vehicle.ID == veh.ID:
oldlane.remove(vehicle)
#print("ID: " + str(veh.ID) + " laneindex: " + str(veh.laneindex) + " oldlane: " + str(veh.oldlane))
try:
lane.remove(veh) #ValueError: list.remove(x): x not in list
except ValueError:
print("x not in list")
# 4.5 Recheck position
fine, cause = self.check_position()
if not fine:
return False, cause
""""
# 5. NewBorn vehicles If lane density is smaller than desired
self.generate_new_vehicles()
self.random_new_des_speed()
"""
"""
# 6. Save data
file = open(r'D:\file.txt', 'a')
file.write('New cycle\n')
for i in range(self.envdict['lane_count']):
lane = self.lanes[i]
for j in range(len(lane)):
lane_num = i
index = lane[j].laneindex
x = lane[j].x
y = lane[j].y
vx = lane[j].vx
id = lane[j].ID
if lane[j].x != 0:
st = lane[j].state
text = 'lane: '+str(lane_num)+' idx: '+str(index)+' ID: ' +str(id)+' x: '+ str(x)+\
' y: '+str(y)+' vx: '+str(vx)+' '+st+' skipped: '+ str(lane[j].skip) + '\n'
else:
text = 'lane: '+str(lane_num)+' idx: '+str(index)+' ID: '+str(id)+' x: '+str(x)+\
' y: '+str(y)+' vx: '+str(vx)+'\n'
file.write(text)
file.close()
"""
# 6. Save data
"""
self.log_cnt += 1
write = 'Step ' + str(self.log_cnt) + '\n'
for i in range(self.envdict['lane_count']):
lane = self.lanes[i]
for j in range(len(lane)):
lane_num = i
index = lane[j].laneindex
x = lane[j].x
y = lane[j].y
vx = lane[j].vx
id = lane[j].ID
if lane[j].x != 0:
st = lane[j].state
text = 'lane: '+str(lane_num)+' idx: '+str(index)+' ID: ' +str(id)+' x: '+ str(x)+\
' y: '+str(y)+' vx: '+str(vx)+' '+st+' skipped: '+ str(lane[j].skip) + '\n'
else:
text = 'lane: '+str(lane_num)+' idx: '+str(index)+' ID: '+str(id)+' x: '+str(x)+\
' y: '+str(y)+' vx: '+str(vx)+'\n'
write += text
self.log_list.append(write)
if self.log_cnt > self.logs_in_file:
self.log_list.pop(0)
self.save_log()
"""
write = ""
for i in range(self.envdict['lane_count']):
lane = self.lanes[i]
for j in range(len(lane)):
lane_num = i
index = lane[j].laneindex
x = lane[j].x
y = lane[j].y
vx = lane[j].vx
id = lane[j].ID
if lane[j].x != 0:
st = lane[j].state
text = 'lane: '+str(lane_num)+' idx: '+str(index)+' ID: ' +str(id)+' x: '+ str(x)+\
' y: '+str(y)+' vx: | |
# Copyright (c) 2012 <NAME> http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from math import ceil
import time
import boto
from boto.compat import json
import requests
class SearchServiceException(Exception):
pass
class CommitMismatchError(Exception):
pass
class SearchResults(object):
def __init__(self, **attrs):
self.rid = attrs['info']['rid']
# self.doc_coverage_pct = attrs['info']['doc-coverage-pct']
self.cpu_time_ms = attrs['info']['cpu-time-ms']
self.time_ms = attrs['info']['time-ms']
self.hits = attrs['hits']['found']
self.docs = attrs['hits']['hit']
self.start = attrs['hits']['start']
self.rank = attrs['rank']
self.match_expression = attrs['match-expr']
self.query = attrs['query']
self.search_service = attrs['search_service']
self.facets = {}
if 'facets' in attrs:
for (facet, values) in attrs['facets'].iteritems():
self.facets[facet] = dict((k, v) for (k, v) in map(lambda x: (x['value'], x['count']), values['constraints']))
self.num_pages_needed = ceil(self.hits / self.query.real_size)
def __len__(self):
return len(self.docs)
def __iter__(self):
return iter(self.docs)
def next_page(self):
"""Call Cloudsearch to get the next page of search results
:rtype: :class:`boto.cloudsearch.search.SearchResults`
:return: the following page of search results
"""
if self.query.page <= self.num_pages_needed:
self.query.start += self.query.real_size
self.query.page += 1
return self.search_service(self.query)
else:
raise StopIteration
class Query(object):
RESULTS_PER_PAGE = 500
def __init__(self, q=None, bq=None, rank=None,
return_fields=None, size=10,
start=0, facet=None, facet_constraints=None,
facet_sort=None, facet_top_n=None, t=None):
self.q = q
self.bq = bq
self.rank = rank or []
self.return_fields = return_fields or []
self.start = start
self.facet = facet or []
self.facet_constraints = facet_constraints or {}
self.facet_sort = facet_sort or {}
self.facet_top_n = facet_top_n or {}
self.t = t or {}
self.page = 0
self.update_size(size)
def update_size(self, new_size):
self.size = new_size
self.real_size = Query.RESULTS_PER_PAGE if (self.size >
Query.RESULTS_PER_PAGE or self.size == 0) else self.size
def to_params(self):
"""Transform search parameters from instance properties to a dictionary
:rtype: dict
:return: search parameters
"""
params = {'start': self.start, 'size': self.real_size}
if self.q:
params['q'] = self.q
if self.bq:
params['bq'] = self.bq
if self.rank:
params['rank'] = ','.join(self.rank)
if self.return_fields:
params['return-fields'] = ','.join(self.return_fields)
if self.facet:
params['facet'] = ','.join(self.facet)
if self.facet_constraints:
for k, v in self.facet_constraints.iteritems():
params['facet-%s-constraints' % k] = v
if self.facet_sort:
for k, v in self.facet_sort.iteritems():
params['facet-%s-sort' % k] = v
if self.facet_top_n:
for k, v in self.facet_top_n.iteritems():
params['facet-%s-top-n' % k] = v
if self.t:
for k, v in self.t.iteritems():
params['t-%s' % k] = v
return params
class SearchConnection(object):
def __init__(self, domain=None, endpoint=None):
self.domain = domain
self.endpoint = endpoint
if not endpoint:
self.endpoint = domain.search_service_endpoint
def build_query(self, q=None, bq=None, rank=None, return_fields=None,
size=10, start=0, facet=None, facet_constraints=None,
facet_sort=None, facet_top_n=None, t=None):
return Query(q=q, bq=bq, rank=rank, return_fields=return_fields,
size=size, start=start, facet=facet,
facet_constraints=facet_constraints,
facet_sort=facet_sort, facet_top_n=facet_top_n, t=t)
def search(self, q=None, bq=None, rank=None, return_fields=None,
size=10, start=0, facet=None, facet_constraints=None,
facet_sort=None, facet_top_n=None, t=None):
"""
Send a query to CloudSearch
Each search query should use at least the q or bq argument to specify
the search parameter. The other options are used to specify the
criteria of the search.
:type q: string
:param q: A string to search the default search fields for.
:type bq: string
:param bq: A string to perform a Boolean search. This can be used to
create advanced searches.
:type rank: List of strings
:param rank: A list of fields or rank expressions used to order the
search results. A field can be reversed by using the - operator.
``['-year', 'author']``
:type return_fields: List of strings
:param return_fields: A list of fields which should be returned by the
search. If this field is not specified, only IDs will be returned.
``['headline']``
:type size: int
:param size: Number of search results to specify
:type start: int
:param start: Offset of the first search result to return (can be used
for paging)
:type facet: list
:param facet: List of fields for which facets should be returned
``['colour', 'size']``
:type facet_constraints: dict
:param facet_constraints: Use to limit facets to specific values
specified as comma-delimited strings in a Dictionary of facets
``{'colour': "'blue','white','red'", 'size': "big"}``
:type facet_sort: dict
:param facet_sort: Rules used to specify the order in which facet
values should be returned. Allowed values are *alpha*, *count*,
*max*, *sum*. Use *alpha* to sort alphabetical, and *count* to sort
the facet by number of available result.
``{'color': 'alpha', 'size': 'count'}``
:type facet_top_n: dict
:param facet_top_n: Dictionary of facets and number of facets to
return.
``{'colour': 2}``
:type t: dict
:param t: Specify ranges for specific fields
``{'year': '2000..2005'}``
:rtype: :class:`boto.cloudsearch.search.SearchResults`
:return: Returns the results of this search
The following examples all assume we have indexed a set of documents
with fields: *author*, *date*, *headline*
A simple search will look for documents whose default text search
fields will contain the search word exactly:
>>> search(q='Tim') # Return documents with the word Tim in them (but not Timothy)
A simple search with more keywords will return documents whose default
text search fields contain the search strings together or separately.
>>> search(q='Tim apple') # Will match "tim" and "apple"
More complex searches require the boolean search operator.
Wildcard searches can be used to search for any words that start with
the search string.
>>> search(bq="'Tim*'") # Return documents with words like Tim or Timothy)
Search terms can also be combined. Allowed operators are "and", "or",
"not", "field", "optional", "token", "phrase", or "filter"
>>> search(bq="(and 'Tim' (field author '<NAME>'))")
Facets allow you to show classification information about the search
results. For example, you can retrieve the authors who have written
about Tim:
>>> search(q='Tim', facet=['Author'])
With facet_constraints, facet_top_n and facet_sort more complicated
constraints can be specified such as returning the top author out of
<NAME> and <NAME> who have a document with the word Tim in it.
>>> search(q='Tim',
... facet=['Author'],
... facet_constraints={'author': "'<NAME>','<NAME>'"},
... facet=['author'],
... facet_top_n={'author': 1},
... facet_sort={'author': 'count'})
"""
query = self.build_query(q=q, bq=bq, rank=rank,
return_fields=return_fields,
size=size, start=start, facet=facet,
facet_constraints=facet_constraints,
facet_sort=facet_sort,
facet_top_n=facet_top_n, t=t)
return self(query)
def __call__(self, query):
"""Make a call to CloudSearch
:type query: :class:`boto.cloudsearch.search.Query`
:param query: A group of search criteria
:rtype: :class:`boto.cloudsearch.search.SearchResults`
:return: search results
"""
url = "http://%s/2011-02-01/search" % (self.endpoint)
params = query.to_params()
r = requests.get(url, params=params)
data = json.loads(r.content)
data['query'] = query
data['search_service'] = self
if 'messages' in data and 'error' in data:
for m in data['messages']:
if m['severity'] == 'fatal':
raise SearchServiceException("Error processing search %s "
"=> %s" % (params, m['message']), query)
elif 'error' in data:
raise SearchServiceException("Unknown error processing search %s"
% (params), query)
return SearchResults(**data)
def get_all_paged(self, query, per_page):
"""Get a generator to iterate over all pages of search results
:type query: :class:`boto.cloudsearch.search.Query`
:param query: A group of search criteria
:type per_page: int
:param per_page: Number of docs in each :class:`boto.cloudsearch.search.SearchResults` object.
:rtype: generator
:return: Generator containing :class:`boto.cloudsearch.search.SearchResults`
"""
query.update_size(per_page)
page = 0
num_pages_needed = 0
while page <= num_pages_needed:
results = self(query)
num_pages_needed = results.num_pages_needed
yield results
query.start += query.real_size
page += 1
def get_all_hits(self, query):
"""Get a generator to iterate over all search results
Transparently handles the results paging from Cloudsearch
search results so even if you have many thousands of results
you can iterate over all results in a reasonably efficient
manner.
:type query: :class:`boto.cloudsearch.search.Query`
:param query: A group of search criteria
:rtype: generator
:return: All docs matching query
"""
| |
DQR) + r_rh * DHR - r_re_R*R - r_v * (1-zeta)
D1 = D + r_dth * (AD + DQD + DHD)
# Helper states
TH1 = TH + r_d * p_d * p_h * I
DVR1 = DVR + r_d * (1 - p_dth) * p_d * p_h * p_v * I - r_rv * DVR
DVD1 = DVD + r_d * p_dth * p_d * p_h * p_v * I - r_dth * DVD
DD1 = DD + r_dth * (DHD + DQD)
DT1 = DT + r_d * p_d * I
V1 = V + r_v -r_re_V*V
x1 = [S1, E1, I1, AR1, DHR1, DQR1, AD1, DHD1, DQD1,
R1, D1, TH1, DVR1, DVD1, DD1, DT1, V1]
return x1
# ------------------ X. Construct initial conditions
def initial_states_func(self,k):
N, PopulationCI, PopulationR, PopulationD, PopulationI, p_d, p_h, p_v = self.GLOBAL_PARAMS
p_dth0 = self.newdeaths_data_fit[0]/(r_dth*PopulationCI) # Set p_dth0 to match D1-D0 to newdeaths_data_fit
E_0 = PopulationCI / p_d * k
I_0 = PopulationCI / p_d * k
UR_0 = (PopulationCI / p_d - PopulationCI) * (1 - p_dth0)
DHR_0 = (PopulationCI * p_h) * (1 - p_dth0)
DQR_0 = PopulationCI * (1 - p_h) * (1 - p_dth0)
UD_0 = (PopulationCI / p_d - PopulationCI) * p_dth0
DHD_0 = PopulationCI * p_h * p_dth0
DQD_0 = PopulationCI * (1 - p_h) * p_dth0
R_0 = PopulationR / p_d
D_0 = PopulationD / p_d
S_0 = N - (E_0 +I_0 +UR_0 +DHR_0 +DQR_0 +UD_0 +DHD_0 +DQD_0 +R_0 +D_0)
TH_0 = PopulationCI * p_h
DVR_0 = (PopulationCI * p_h * p_v) * (1 - p_dth0)
DVD_0 = (PopulationCI * p_h * p_v) * p_dth0
DD_0 = PopulationD
DT_0 = PopulationI
V_0 = 0
x_init = [
S_0, E_0, I_0, UR_0, DHR_0, DQR_0, UD_0, DHD_0, DQD_0, R_0,
D_0, TH_0, DVR_0, DVD_0, DD_0, DT_0, V_0
]
return x_init
# Find k=k1,k2 that matches gamma_0 to 2.08 (R0=6 equivalent)
def loss_gamma0(self,k):
newcases = np.array(self.newcases_data_fit)
newdeaths = np.array(self.newdeaths_data_fit)
newcases_sm = uniform_filter1d(newcases, size=21, mode='nearest')
newdeaths_sm = uniform_filter1d(newdeaths, size=21, mode='nearest')
gamma_t_vec = []
x_init = self.initial_states_func(k)
(S_0, E_0, I_0, UR_0, DHR_0, DQR_0, UD_0, DHD_0, DQD_0, R_0,
D_0, TH_0, DVR_0, DVD_0, DD_0, DT_0, V_0) = x_init
newcases_sm2 = np.append(newcases_sm, newcases_sm[-2:]) # Extend the list for forward projection below
newdeaths_sm2 = np.append(newdeaths_sm, newdeaths_sm[-1])
x_0 = x_init.copy()
for t in range(self.gamma_0_days): # Target first n days
gamma_t = (newcases_sm2[t+2]/(r_d*p_d) - (1-r_d)**2 *I_0 - r_i*(2-r_d-r_i)*E_0 )*self.N/(r_i*S_0*I_0)
p_dth = (newdeaths_sm2[t+1] - r_dth*(1-r_dth)*(DHD_0 + DQD_0))/(r_dth*r_d*p_d*I_0)
gamma_t = np.clip(gamma_t, 0.01, 10)
p_dth = np.clip(p_dth,0,1) # Probability limit [0,1]
x_1 = self.step_seir(t, x_0, gamma_t, p_dth)
x_0 = x_1
gamma_t_vec.append(gamma_t)
gamma_0 = np.mean(gamma_t_vec)
loss = (gamma_0 - (r_d*6) )**2 # gamma_0 equivalent to R0=6 is 2.08
return loss
def fit_gamma0(self):
output = dual_annealing(
self.loss_gamma0,
x0 = [5],
bounds = [(1,50)],
)
k_star = output.x
return k_star
def get_initial_conditions(self):
if Path(f'../params/param_fixed/kstar.csv').exists():
df = pd.read_csv(f'../params/param_fixed/kstar.csv')
kstar = df[self.iso2].values[0]
else:
kstar = self.fit_gamma0()[0] # find kstar that matches gamma_0 to target
x_init = self.initial_states_func(kstar)
return x_init
# -------------------- x. Implied gamma_t and pdth_t in-sample -------------------
def gamma_t_compute(self):
newcases = np.array(self.newcases_data_fit)
newdeaths = np.array(self.newdeaths_data_fit)
newcases_sm = uniform_filter1d(newcases, size=21, mode='nearest')
newdeaths_sm = uniform_filter1d(newdeaths, size=21, mode='nearest')
gamma_t_vec = []
p_dth_vec = []
x_init = self.get_initial_conditions()
S_0, E_0, I_0, AR_0, DHR_0, DQR_0, AD_0, DHD_0, DQD_0, R_0, D_0, TH_0, DVR_0, DVD_0, DD_0, DT_0, V_0 = x_init
S_vec = [S_0]
E_vec = [E_0]
I_vec = [I_0]
DT_vec = [DT_0]
DD_vec = [DD_0]
DHR_vec = [DHR_0]
DHD_vec = [DHD_0]
newcases_sm2 = np.append(newcases_sm, newcases_sm[-2:]) # Extend the list for forward projection below
newdeaths_sm2 = np.append(newdeaths_sm, newdeaths_sm[-1])
x_0 = x_init.copy()
for t in range(len(newcases)):
# Work backwards to compute 'exact' gamma_t and p_dth
gamma_t = (newcases_sm2[t+2]/(r_d*p_d) - (1-r_d)**2 *I_0 - r_i*(2-r_d-r_i)*E_0 )*self.N/(r_i*S_0*I_0)
p_dth = (newdeaths_sm2[t+1] - r_dth*(1-r_dth)*(DHD_0 + DQD_0))/(r_dth*r_d*p_d*I_0)
gamma_t = np.clip(gamma_t, 0.01, 10)
p_dth = np.clip(p_dth,0,1) # Probability limit [0,1]
x_1 = self.step_seir(t, x_0, gamma_t, p_dth)
S_0, E_0, I_0, AR_0, DHR_0, DQR_0, AD_0, DHD_0, DQD_0, R_0, D_0, TH_0, DVR_0, DVD_0, DD_0, DT_0, V_0 = x_1
x_0 = x_1
gamma_t_vec.append(gamma_t)
p_dth_vec.append(p_dth)
S_vec.append(S_0)
I_vec.append(I_0)
E_vec.append(E_0)
DT_vec.append(DT_0)
DD_vec.append(DD_0)
DHR_vec.append(DHR_0)
DHD_vec.append(DHD_0)
self.df2['gamma_t'] = gamma_t_vec
self.df2['pdth_t'] = p_dth_vec
self.S_vec = S_vec # In-sample estmates, useful for phi calculation later on
self.I_vec = I_vec
self.DHR_vec = DHR_vec # For fitting death probability
self.DHD_vec = DHD_vec
HD_HR = np.array(self.DHR_vec) + np.array(self.DHD_vec)
self.df2['HD_HR'] = 100*HD_HR[:-1]/self.N
# gamma_t_sm = uniform_filter1d(gamma_t_vec, size=6, mode='nearest')
# self.df2['gamma_sm'] = gamma_t_sm
return gamma_t_vec, p_dth_vec
# -------------------- x. Estimating the model -----------
def gamma_func(self, params):
m_t = self.df2['google_smooth'].values
tvec = np.arange(len(m_t))
beta0, beta1 = params
gamma_vec = beta0*np.exp(beta1* m_t)
return gamma_vec
def loss_betas(self, params) -> float:
gamma_model = self.gamma_func(params)
loss = sum( (self.df2['gamma_t'].values[:len(gamma_model)] - gamma_model)**2 )
return loss
def fitmodel(self):
# A. Fit beta0 and beta1
x0 = self.default_init_single
bounds_0 = self.default_bounds_single
output = dual_annealing(
self.loss_betas,
x0 = x0,
bounds = bounds_0,
)
best_betas = output.x
self.best_betas = best_betas
# B. Fit the residual (gamma_tilde) to AR models
m_t = self.df2['google_smooth'].values
tvec = np.arange(len(self.df2))
beta0, beta1 = self.best_betas
self.df2['gamma_mob'] = beta0*np.exp(beta1* m_t)
self.df2['gamma_tilde'] = self.df2['gamma_t'] - self.df2['gamma_mob']
self.df2['gamma_tilde_sm'] = uniform_filter1d(self.df2['gamma_tilde'],
size=21, mode='reflect')
self.df2['gamma_tilde_resid'] = self.df2['gamma_tilde'] - self.df2['gamma_tilde_sm']
y = self.df2['gamma_tilde_sm']
self.df2['gamma_tilde_sm_lag1'] = self.df2['gamma_tilde_sm'].shift(1) # No constant term
self.df2['gamma_tilde_sm_lag2'] = self.df2['gamma_tilde_sm'].shift(2)
reg_AR1 = sm.OLS(y,self.df2['gamma_tilde_sm_lag1'],missing='drop').fit()
reg_AR2 = sm.OLS(y,self.df2[['gamma_tilde_sm_lag1','gamma_tilde_sm_lag2']],missing='drop').fit()
best_rho1 = reg_AR1.params[0]
best_rho1 = np.clip(best_rho1, 0.1, 0.99) #Assume stationarity
best_rho2 = reg_AR2.params[:]
best_params = np.array([beta0, beta1, best_rho1, best_rho2[0], best_rho2[1]])
self.best_rho1 = best_rho1
self.best_rho2 = best_rho2
self.best_params = best_params
# C. Empirically fit phi for optimal policy to last observation
if self.phi_option == 'fit':
m = self.df2['google_smooth'][-15:].mean() # Take average of last 15 days to smooth volatility
s = self.S_vec[-1]/self.N
i = self.I_vec[-1]/self.N
gamma_tilde = self.df2['gamma_tilde'][-1]
pdth = self.df2['pdth_t'][-1]
pdth = max(pdth, self.pdth_min) # Get around cases where pdth=0 for countries with very few cases
LHS1 = pdth*r_d*i*s*(beta0*beta1*np.exp(beta1*m))
LHS2 = pdth*r_d*i*(1 - r_d + s*(gamma_tilde + beta0*np.exp(beta1*m)))
phi = -(LHS1 * LHS2)/m
self.phi = max(phi, self.phi_min)
elif self.phi_option == 'exo':
self.phi = self.phi_exo
return best_params
# ------------------ x. Forecasts ---------------------------
def step_gamma_tilde(self, gamma_tilde_lag1, gamma_tilde_lag2, model='AR1'):
if model =='AR1':
return self.best_rho1*gamma_tilde_lag1
elif model =='AR2':
return self.best_rho2[0]*gamma_tilde_lag1 + self.best_rho2[1]*gamma_tilde_lag2
def mobility_choice(self,x,gamma_tilde,pdth):
if self.policy == 'constant':
mob = self.poparam_constant
elif self.policy == 'linear-I': # Respond linearly to infection level
mob = self.poparam_linear_I[0] + self.poparam_linear_I[1]*x[2]
elif self.policy == 'linear-dI': # Respond to new infections
dI = r_i*x[1] - r_d*x[2] # x[1]=E, x[2]=I
mob = self.poparam_linear_dI[0] + self.poparam_linear_dI[1]*dI
elif self.policy == 'optim': # Analytical optimal policy based on simplified model and quadratic losses
beta0 = self.best_params[0]
beta1 = self.best_params[1]
phi = self.phi
s = x[0]/self.N
i = x[2]/self.N
m_set = np.linspace(-1,0,101)
RHS = -phi*m_set
LHS1 = pdth*r_d*i*s*(beta0*beta1*np.exp(beta1*m_set))
LHS2 = pdth*r_d*i*(1 - r_d + s*(gamma_tilde + beta0*np.exp(beta1*m_set)))
LHS = LHS1 * LHS2
m_id = np.argmin(np.abs(RHS-LHS))
mob = m_set[m_id]
return mob
def fatality_factor(self,V): # Factor to adjust 'base' fatality prob
idx = (f_table[self.iso2]['vaccine_%'] - V/self.N).abs().argmin() # Find idx to look up in fatality table
factor = f_table[self.iso2]['fatality_ratio'][idx]
return factor
def sim_seir(self):
df2 = self.df2
ix = pd.date_range(start=df2.index[0], end=default_maxT, freq='D') # Expand time-sample, to include forecast later
df3 = df2.reindex(ix)
x_init = self.get_initial_conditions()
x_data = np.array(x_init)
gamma_tilde_fc = self.df2['gamma_tilde'].values
gamma_tilde_sm_fc = self.df2['gamma_tilde_sm'].values
pdth_t_targ = [] # Death prob when vaccines are targeted
pdth_t_base = [] # Base death prob if vaccines are given randomly
pdth_t_fc = self.df2['pdth_t'].values
pdth_t_base_fc = pdth_t_fc.copy()
gamma_mob_fc = self.df2['gamma_mob'].values
mob_fc = self.df2['google_smooth'].values
# Load parameters
if hasattr(self, 'best_params'):
beta0, beta1, rho, rhos_1, rhos_2 = self.best_params
else:
df_param = pd.read_csv(f'../params/{param_load_folder}/param_est.csv')
beta0, beta1, rho, rhos_1, rhos_2 = df_param[self.iso2]
for t in range(self.maxT):
factor = self.fatality_factor(x_init[-1])
eta = self.target_weight
if t<len(self.df2): # In sample
pdth_t = pdth_t_fc[t]
pdth_base = pdth_t/(eta*factor + 1-eta)
pdth_targ = factor*pdth_base
# if t==len(self.df2): # Parse pdth_base of hospitalised/N
# y = pdth_t_base
# | |
import re
import traceback
from pathlib import Path
from unittest.mock import Mock, patch, call
from math import sqrt, isclose
import pytest
import sys
from PyQt5.QtCore import QTimer
from PyQt5.QtGui import QColor, QImage, QPixmap
from PyQt5.QtWidgets import QApplication, QLabel
from PyQt5.QtWidgets import QHBoxLayout
import pyqt_test_sandals # for patching
from pyqt_test_sandals import check_widget_snapshot, ImgDiffer, cleanup_check_files
def non_existent_path(name: str):
path = Path(name)
if path.exists():
path.unlink()
assert not path.exists()
return path
def eq_portions(actual: str, expected: str):
"""
Compare whether actual matches portions of expected. The portions to ignore are of two types:
- ***: ignore anything in between the left and right portions, including empty
- +++: ignore anything in between left and right, but non-empty
:param actual: string to test
:param expected: expected string, containing at least one of the two patterns
:return: a list of the portions ignored; if empty, it means there is no match.
>>> eq_portions('', '+++aaaaaa***ccccc+++eeeeeee+++')
()
>>> eq_portions('_1__aaaaaa__2__ccccc_3__eeeeeee_4_', '+++aaaaaa***ccccc+++eeeeeee+++')
('_1__', '__2__', '_3__', '_4_')
>>> eq_portions('_1__aaaaaaccccc_3__eeeeeee_4_', '+++aaaaaa***ccccc+++eeeeeee+++')
('_1__', '', '_3__', '_4_')
>>> eq_portions('_1__aaaaaaccccc_3__eeeeeee', '+++aaaaaa***ccccc+++eeeeeee+++')
()
>>> eq_portions('aaaaaaccccc_3__eeeeeee', '+++aaaaaa***ccccc+++eeeeeee')
()
>>> eq_portions('aaaaaa_1__ccccc__2_eeeeeee', '***aaaaaa***ccccc+++eeeeeee***')
('', '_1__', '__2_', '')
>>> eq_portions('aaaaaa___ccccc___eeeeeee', '***aaaaaa')
()
>>> eq_portions('aaaaaa___ccccc___eeeeeee', 'aaaaaa')
Traceback (most recent call last):
...
ValueError: The 'expected' argument must contain at least one *** OR +++
"""
re_expect = re.escape(expected)
ANYTHING = re.escape('\\*' * 3)
SOMETHING = re.escape('\\+' * 3)
if not re.search(ANYTHING, re_expect) and not re.search(SOMETHING, re_expect):
raise ValueError("The 'expected' argument must contain at least one *** OR +++")
re_expect = re.sub(SOMETHING, '(.+)', re_expect)
re_expect = re.sub(ANYTHING, '(.*)', re_expect)
matches = re.fullmatch(re_expect, actual)
if not matches:
return ()
return matches.groups()
def check_log_format(log_all_args, expected):
"""
Assert that log arguments would form the expected message
:param log_args: the arguments given to log.info/warn/debug/error function
:param expect: the expected output of the log, when those arguments given
"""
def partial_eq(actual, expect, ignore='***'):
expected_parts = expect.split(ignore)
exp_part = expected_parts.pop(0)
if exp_part:
assert actual.startswith(exp_part)
actual = actual[len(exp_part):]
while expected_parts:
exp_part = expected_parts.pop(0)
if not exp_part:
break
assert exp_part
found = actual.find(exp_part)
assert found > 0
actual = actual[found + len(exp_part):]
log_args = log_all_args[0]
actual_msg = log_args[0] % log_args[1:]
assert actual_msg == expected or eq_portions(actual_msg, expected)
# from doctest import run_docstring_examples
# run_docstring_examples(eq_portions, globals())
class TestCaseChecker:
@pytest.fixture(autouse=True)
def qt_app(self):
self.app = QApplication.instance()
if self.app is None:
self.app = QApplication([])
def except_hook(*args):
traceback.print_exception(*args)
prev_hook = sys.excepthook
sys.excepthook = except_hook
yield self.app
sys.excepthook = prev_hook
def test1_gen_first_image(self):
ref_image_path = non_existent_path('test1_gen_first_image.png')
files_before = set(Path(__file__).parent.glob('*'))
widget = QLabel('test')
check_widget_snapshot(widget, __file__, str(ref_image_path))
assert ref_image_path.exists()
files_after = set(Path(__file__).parent.glob('*'))
assert files_after.difference(files_before) == set([ref_image_path.resolve()])
ref_image_path.unlink()
def test_log_and_ref_folder_instead_of_file(self):
ref_image_path = non_existent_path('test_ref_folder_instead_of_file.png')
folder = Path(__file__).parent
files_before = set(folder.glob('*'))
widget = QLabel('test')
log = Mock()
check_widget_snapshot(widget, str(folder), str(ref_image_path), log=log)
assert ref_image_path.exists()
assert log.info.call_args == call('Generating ref snapshot %s in %s for widget %s',
ref_image_path.name,
folder, widget)
expected_log = 'Generating ref snapshot test_ref_folder_instead_of_file.png in +++\\tests for widget ***'
check_log_format(log.info.call_args, expected_log)
files_after = set(folder.glob('*'))
assert files_after.difference(files_before) == set([ref_image_path.resolve()])
assert files_after.issuperset(files_before)
ref_image_path.unlink()
def test2_old_results(self):
ref_image_path = non_existent_path('test2_old_results.png')
# create two bogus files that pretend to be previous results:
actual_img_path = Path(ref_image_path.stem + '_actual.png')
actual_img_path.write_text('')
diff_img_path = Path(ref_image_path.stem + '_diff.png')
diff_img_path.write_text('')
assert actual_img_path.exists()
assert diff_img_path.exists()
# check widget snapshot, with delete-old = False, verify results files still there:
files_before = set(Path(__file__).parent.glob('*'))
widget = QLabel('test')
assert check_widget_snapshot(widget, __file__, ref_image_path.stem, delete_old_results=False)
ref_image_path.unlink()
assert actual_img_path.exists()
assert diff_img_path.exists()
files_after = set(Path(__file__).parent.glob('*'))
assert files_after == files_before
# check it again, this time results removed:
actual_img_path_str = actual_img_path.resolve()
diff_img_path_str = diff_img_path.resolve()
assert check_widget_snapshot(widget, __file__, ref_image_path.stem)
ref_image_path.unlink()
assert not actual_img_path.exists()
assert not diff_img_path.exists()
files_after = set(Path(__file__).parent.glob('*'))
assert files_before.difference(files_after) == set([actual_img_path_str, diff_img_path_str])
assert files_before.issuperset(files_before)
def test_equal_images(self):
ref_image_path = non_existent_path('test_equal_images.png')
# generate reference:
files_before = set(Path(__file__).parent.glob('*'))
widget = QLabel('test')
assert check_widget_snapshot(widget, __file__, ref_image_path.stem)
# re-check: should find images are identical:
assert check_widget_snapshot(widget, __file__, ref_image_path.stem)
assert check_widget_snapshot(widget, __file__, ref_image_path.stem)
ref_image_path.unlink()
files_after = set(Path(__file__).parent.glob('*'))
assert files_before == files_after
def test_unequal_images_diff_less_than_tol(self, mocker):
ref_image_path = non_existent_path('test_unequal_images_diff_less_than_tol.png')
class ImgDiffer_SameWithinTol:
def get_diff(self, image, ref_image):
return None
def report(self):
return "report"
# generate reference:
files_before = set(Path(__file__).parent.glob('*'))
widget = QLabel('test')
assert check_widget_snapshot(widget, __file__, ref_image_path.stem)
# pretend label has changed, but less than tolerance (get_diff() returns None):
widget = QLabel('test2')
widget.setObjectName('label')
mock_log = Mock()
mock_timer = mocker.patch.object(pyqt_test_sandals, 'perf_counter', side_effect=[0, 123])
assert check_widget_snapshot(widget, __file__, ref_image_path.stem,
img_differ=ImgDiffer_SameWithinTol(), log=mock_log)
ref_image_path.unlink()
assert mock_log.info.mock_calls == [
call('Widget %s vs ref %s in %s (%.2f sec):',
'label', ref_image_path.name, ref_image_path.parent.resolve(), 123),
call(' report')
]
expect = 'Widget label vs ref test_unequal_images_diff_less_than_tol.png in +++\\tests (123.00 sec):'
check_log_format(mock_log.info.call_args_list[0], expect)
# confirm that no results files were created:
files_after = set(Path(__file__).parent.glob('*'))
assert files_after == files_before
def test_unequal_images(self, mocker):
ref_image_path = non_existent_path('test_unequal_images.png')
class ImgDiffer:
def get_diff(self, image, ref_image):
return QImage(image)
def report(self):
return "report"
# generate reference:
widget = QLabel('test')
assert check_widget_snapshot(widget, __file__, ref_image_path.stem)
# pretend label has changed, but less than tolerance (get_diff() returns None):
widget = QLabel('test2')
widget.setObjectName('label')
mock_log = Mock()
files_before = set(Path(__file__).parent.glob('*'))
mock_timer = mocker.patch.object(pyqt_test_sandals, 'perf_counter', side_effect=[0, 123])
assert not check_widget_snapshot(widget, __file__, ref_image_path.stem,
img_differ=ImgDiffer(), log=mock_log)
assert mock_log.method_calls == [
call.info('Widget %s vs ref %s in %s (%.2f sec):', 'label', ref_image_path.name,
ref_image_path.parent.resolve(), 123),
call.info(' report'),
call.warn(' Snapshot has changed beyond tolerances, saving actual and diff images to folder %s:',
ref_image_path.parent.resolve()),
call.warn(' Saving actual image to %s', 'test_unequal_images_actual.png'),
call.warn(' Saving diff image (White - |ref - widget|) to %s', 'test_unequal_images_diff.png')
]
check_log_format(mock_log.info.call_args_list[0], 'Widget label vs ref test_unequal_images.png in ***\\tests (123.00 sec):')
# confirm that no results files were create:
files_after = set(Path(__file__).parent.glob('*'))
ref_image_path.unlink()
assert files_after.issuperset(files_before)
actual_img_path = Path('test_unequal_images_actual.png')
diff_img_path = Path('test_unequal_images_diff.png')
assert files_after.difference(files_before) == set([actual_img_path.resolve(), diff_img_path.resolve()])
actual_img_path.unlink()
diff_img_path.unlink()
def test_custom_differ(self):
ref_image_path = non_existent_path('test_custom_differ.png')
# generate reference:
widget = QLabel('test')
assert check_widget_snapshot(widget, __file__, ref_image_path.stem)
# pretend label has changed, but less than tolerance (get_diff() returns None):
widget = QLabel('test2')
widget.setObjectName('label')
# first check that kwargs when custom differ given causes exception:
img_differ = Mock()
img_differ.get_diff.return_value = None
pytest.raises(ValueError,
check_widget_snapshot, widget, __file__, ref_image_path.stem,
img_differ=img_differ, kwarg1=1, kwarg2=2)
assert check_widget_snapshot(widget, __file__, ref_image_path.stem, img_differ=img_differ)
assert len(img_differ.method_calls) == 1
assert img_differ.get_diff.call_count == 1
img_differ_class = Mock()
img_differ_class.return_value.get_diff.return_value = None
with patch.object(pyqt_test_sandals, 'ImgDiffer', img_differ_class) as mock_default_differ:
assert check_widget_snapshot(widget, __file__, ref_image_path.stem)
assert mock_default_differ.call_args_list == [call()]
ref_image_path.unlink()
def test_slow_widget_elapses(self, qt_app):
ref_image_path = non_existent_path('test_slow_widget_elapses.png')
widget_ref = QLabel('test')
widget_actual = QLabel('test2')
assert check_widget_snapshot(widget_ref, __file__, ref_image_path.stem)
def check_fails_on_elapse():
assert not check_widget_snapshot(widget_actual, __file__, ref_image_path.stem, try_sec=1)
def change_actual():
widget_actual.setText('test')
QTimer.singleShot(0, check_fails_on_elapse)
QTimer.singleShot(1000, change_actual)
QTimer.singleShot(1100, qt_app.quit)
qt_app.exec()
cleanup_check_files(fig_name=ref_image_path.stem)
ref_image_path.unlink()
def test_slow_widget_ok(self, qt_app, mocker):
ref_image_path = non_existent_path('test_slow_widget_ok.png')
widget_ref = QLabel('test123')
widget_ref.setLayout(QHBoxLayout())
widget_actual = QLabel('test')
widget_actual.setLayout(QHBoxLayout())
def show():
widget_ref.show()
widget_actual.show()
def create_ref():
assert check_widget_snapshot(widget_ref, __file__, ref_image_path.stem)
def check_ok_before_elapse():
mock_sleep = mocker.patch.object(pyqt_test_sandals, 'sleep')
assert check_widget_snapshot(widget_actual, __file__, ref_image_path.stem, try_sec=3)
assert mock_sleep.call_count > 0
def change_actual():
widget_actual.setText('test123')
QTimer.singleShot(0, show)
QTimer.singleShot(10, create_ref)
QTimer.singleShot(20, check_ok_before_elapse)
QTimer.singleShot(1000, change_actual)
QTimer.singleShot(2100, qt_app.quit)
qt_app.exec()
ref_image_path.unlink()
class TestCaseImgDiffer:
@pytest.fixture(autouse=True)
def setup_class(self):
self.app = QApplication.instance()
if self.app is None:
self.app = QApplication([])
def except_hook(*args):
traceback.print_exception(*args)
prev_hook = sys.excepthook
sys.excepthook = except_hook
yield self.app
sys.excepthook = prev_hook
def test_same_img(self):
widget1 = QLabel('test1')
widget2 = QLabel('test1')
ref_img = widget1.grab().toImage()
img = widget2.grab().toImage()
assert img == ref_img
differ = ImgDiffer()
assert differ.get_diff(img, ref_img) is None
expect = 'RMS diff=0.00% (rms_tol_perc=0.00%), number of pixels changed=0.00% (num_tol_perc=None), max pix diff=0 (max_pix_diff_tol=None)'
assert differ.report() == expect
def test_actual_wider(self):
widget_ref = QLabel('test1')
widget_actual = QLabel('test23456')
def test():
widget_ref.show()
widget_actual.show()
ref_img = widget_ref.grab().toImage()
img = widget_actual.grab().toImage()
assert img != ref_img
assert img.width() > ref_img.width()
assert img.height() == ref_img.height()
differ = ImgDiffer()
diff = differ.get_diff(img, ref_img)
# diff.save('actual_wider_diff.png')
expect = QPixmap('actual_wider_diff.png')
assert expect.toImage() == diff
self.app.closeAllWindows()
QTimer.singleShot(0, test)
self.app.exec()
def test_actual_higher(self):
widget_ref = QLabel('test1')
widget_actual = QLabel('test1\n123')
def test():
widget_ref.show()
widget_actual.show()
ref_img = widget_ref.grab().toImage()
img = widget_actual.grab().toImage()
assert img != ref_img
assert img.width() == ref_img.width()
assert img.height() > ref_img.height()
differ = ImgDiffer()
diff = differ.get_diff(img, ref_img)
# diff.save('actual_higher_diff.png')
expect = QPixmap('actual_higher_diff.png')
assert expect.toImage() == diff
self.app.closeAllWindows()
QTimer.singleShot(0, test)
self.app.exec()
def test_actual_higher_thinner(self):
widget_ref = QLabel('test1')
widget_actual = QLabel('tes\n123')
def test():
widget_ref.show()
widget_actual.show()
ref_img = widget_ref.grab().toImage()
img = widget_actual.grab().toImage()
assert img != ref_img
assert img.width() < ref_img.width()
assert img.height() > ref_img.height()
differ = ImgDiffer()
diff = differ.get_diff(img, ref_img)
# diff.save('actual_higher_thinner_diff.png')
expect = QPixmap('actual_higher_thinner_diff.png')
assert expect.toImage() == diff
self.app.closeAllWindows()
QTimer.singleShot(0, test)
self.app.exec()
def test_same_size_img_not_eq(self):
widget_ref = QLabel('test1')
widget_actual = QLabel('test2')
def test():
widget_actual.show()
widget_ref.setFixedSize(widget_actual.width(), widget_actual.height())
widget_ref.show()
ref_img = widget_ref.grab().toImage()
img | |
from scipy.optimize import OptimizeResult
from scipy.optimize.optimize import _status_message
from scipy.sparse.linalg import eigsh
from torch import Tensor
import torch
from .function import ScalarFunction
from .line_search import strong_wolfe
_status_message['cg_warn'] = "Warning: CG iterations didn't converge. The " \
"Hessian is not positive definite."
def _cg_iters(grad, hess, max_iter, normp=1):
"""A CG solver specialized for the NewtonCG sub-problem.
Derived from Algorithm 7.1 of "Numerical Optimization (2nd Ed.)"
(Nocedal & Wright, 2006; pp. 169)
"""
# generalized dot product that supports batch inputs
# TODO: let the user specify dot fn?
dot = lambda u,v: u.mul(v).sum(-1, keepdim=True)
g_norm = grad.norm(p=normp)
tol = g_norm * g_norm.sqrt().clamp(0, 0.5)
eps = torch.finfo(grad.dtype).eps
n_iter = 0 # TODO: remove?
maxiter_reached = False
# initialize state and iterate
x = torch.zeros_like(grad)
r = grad.clone()
p = grad.neg()
rs = dot(r, r)
for n_iter in range(max_iter):
if r.norm(p=normp) < tol:
break
Bp = hess.mv(p)
curv = dot(p, Bp)
curv_sum = curv.sum()
if curv_sum < 0:
# hessian is not positive-definite
if n_iter == 0:
# if first step, fall back to steepest descent direction
# (scaled by Rayleigh quotient)
x = grad.mul(rs / curv)
#x = grad.neg()
break
elif curv_sum <= 3 * eps:
break
alpha = rs / curv
x.addcmul_(alpha, p)
r.addcmul_(alpha, Bp)
rs_new = dot(r, r)
p.mul_(rs_new / rs).sub_(r)
rs = rs_new
else:
# curvature keeps increasing; bail
maxiter_reached = True
return x, n_iter, maxiter_reached
@torch.no_grad()
def _minimize_newton_cg(
fun, x0, lr=1., max_iter=None, cg_max_iter=None,
twice_diffable=True, line_search='strong-wolfe', xtol=1e-5,
normp=1, callback=None, disp=0, return_all=False):
"""Minimize a scalar function of one or more variables using the
Newton-Raphson method, with Conjugate Gradient for the linear inverse
sub-problem.
Parameters
----------
fun : callable
Scalar objective function to minimize.
x0 : Tensor
Initialization point.
lr : float
Step size for parameter updates. If using line search, this will be
used as the initial step size for the search.
max_iter : int, optional
Maximum number of iterations to perform. Defaults to
``200 * x0.numel()``.
cg_max_iter : int, optional
Maximum number of iterations for CG subproblem. Recommended to
leave this at the default of ``20 * x0.numel()``.
twice_diffable : bool
Whether to assume the function is twice continuously differentiable.
If True, hessian-vector products will be much faster.
line_search : str
Line search specifier. Currently the available options are
{'none', 'strong_wolfe'}.
xtol : float
Average relative error in solution `xopt` acceptable for
convergence.
normp : Number or str
The norm type to use for termination conditions. Can be any value
supported by :func:`torch.norm`.
callback : callable, optional
Function to call after each iteration with the current parameter
state, e.g. ``callback(x)``.
disp : int or bool
Display (verbosity) level. Set to >0 to print status messages.
return_all : bool
Set to True to return a list of the best solution at each of the
iterations.
Returns
-------
result : OptimizeResult
Result of the optimization routine.
"""
lr = float(lr)
disp = int(disp)
xtol = x0.numel() * xtol
if max_iter is None:
max_iter = x0.numel() * 200
if cg_max_iter is None:
cg_max_iter = x0.numel() * 20
# construct scalar objective function
sf = ScalarFunction(fun, x0.shape, hessp=True, twice_diffable=twice_diffable)
closure = sf.closure
if line_search == 'strong-wolfe':
dir_evaluate = sf.dir_evaluate
# initial settings
x = x0.detach().clone(memory_format=torch.contiguous_format)
f, g, hessp, _ = closure(x)
if disp > 1:
print('initial fval: %0.4f' % f)
if return_all:
allvecs = [x]
ncg = 0 # number of cg iterations
n_iter = 0
# begin optimization loop
for n_iter in range(1, max_iter + 1):
# ============================================================
# Compute a search direction pk by applying the CG method to
# H_f(xk) p = - J_f(xk) starting from 0.
# ============================================================
# Compute search direction with conjugate gradient (GG)
d, cg_iters, cg_fail = _cg_iters(g, hessp, cg_max_iter, normp)
ncg += cg_iters
if cg_fail:
warnflag = 3
msg = _status_message['cg_warn']
break
# =====================================================
# Perform variable update (with optional line search)
# =====================================================
if line_search == 'none':
update = d.mul(lr)
x = x + update
elif line_search == 'strong-wolfe':
# strong-wolfe line search
_, _, t, ls_nevals = strong_wolfe(dir_evaluate, x, lr, d, f, g)
update = d.mul(t)
x = x + update
else:
raise ValueError('invalid line_search option {}.'.format(line_search))
# re-evaluate function
f, g, hessp, _ = closure(x)
if disp > 1:
print('iter %3d - fval: %0.4f' % (n_iter, f))
if callback is not None:
callback(x)
if return_all:
allvecs.append(x)
# ==========================
# check for convergence
# ==========================
if update.norm(p=normp) <= xtol:
warnflag = 0
msg = _status_message['success']
break
if not f.isfinite():
warnflag = 3
msg = _status_message['nan']
break
else:
# if we get to the end, the maximum num. iterations was reached
warnflag = 1
msg = _status_message['maxiter']
if disp:
print(msg)
print(" Current function value: %f" % f)
print(" Iterations: %d" % n_iter)
print(" Function evaluations: %d" % sf.nfev)
print(" CG iterations: %d" % ncg)
result = OptimizeResult(fun=f, x=x.view_as(x0), grad=g.view_as(x0),
status=warnflag, success=(warnflag==0),
message=msg, nit=n_iter, nfev=sf.nfev, ncg=ncg)
if return_all:
result['allvecs'] = allvecs
return result
@torch.no_grad()
def _minimize_newton_exact(
fun, x0, lr=1., max_iter=None, line_search='strong-wolfe', xtol=1e-5,
normp=1, tikhonov=0., handle_npd='grad', callback=None, disp=0,
return_all=False):
"""Minimize a scalar function of one or more variables using the
Newton-Raphson method.
This variant uses an "exact" Newton routine based on Cholesky factorization
of the explicit Hessian matrix.
Parameters
----------
fun : callable
Scalar objective function to minimize.
x0 : Tensor
Initialization point.
lr : float
Step size for parameter updates. If using line search, this will be
used as the initial step size for the search.
max_iter : int, optional
Maximum number of iterations to perform. Defaults to
``200 * x0.numel()``.
line_search : str
Line search specifier. Currently the available options are
{'none', 'strong_wolfe'}.
xtol : float
Average relative error in solution `xopt` acceptable for
convergence.
normp : Number or str
The norm type to use for termination conditions. Can be any value
supported by :func:`torch.norm`.
tikhonov : float
Optional diagonal regularization (Tikhonov) parameter for the Hessian.
handle_npd : str
Mode for handling non-positive definite hessian matrices. Can be one
of the following:
* 'grad' : use steepest descent direction (gradient)
* 'lu' : solve the inverse hessian with LU factorization
* 'eig' : use symmetric eigendecomposition to determine a
diagonal regularization parameter
callback : callable, optional
Function to call after each iteration with the current parameter
state, e.g. ``callback(x)``.
disp : int or bool
Display (verbosity) level. Set to >0 to print status messages.
return_all : bool
Set to True to return a list of the best solution at each of the
iterations.
Returns
-------
result : OptimizeResult
Result of the optimization routine.
"""
lr = float(lr)
disp = int(disp)
xtol = x0.numel() * xtol
if max_iter is None:
max_iter = x0.numel() * 200
# Construct scalar objective function
sf = ScalarFunction(fun, x0.shape, hess=True)
closure = sf.closure
if line_search == 'strong-wolfe':
dir_evaluate = sf.dir_evaluate
# initial settings
x = x0.detach().view(-1).clone(memory_format=torch.contiguous_format)
f, g, _, hess = closure(x)
if tikhonov > 0:
hess.diagonal().add_(tikhonov)
if disp > 1:
print('initial fval: %0.4f' % f)
if return_all:
allvecs = [x]
nfail = 0
n_iter = 0
# begin optimization loop
for n_iter in range(1, max_iter + 1):
# ==================================================
# Compute a search direction d by solving
# H_f(x) d = - J_f(x)
# with the true Hessian and Cholesky factorization
# ===================================================
# Compute search direction with Cholesky solve
try:
d = torch.cholesky_solve(g.neg().unsqueeze(1),
torch.linalg.cholesky(hess)).squeeze(1)
chol_fail = False
except:
chol_fail = True
nfail += 1
if handle_npd == 'lu':
d = torch.linalg.solve(hess, g.neg())
elif handle_npd == 'grad':
d = g.neg()
elif handle_npd == 'cauchy':
gnorm = g.norm(p=2)
scale = 1 / gnorm
gHg = g.dot(hess.mv(g))
if gHg > 0:
scale *= torch.clamp_max_(gnorm.pow(3) / gHg, max=1)
d = scale * g.neg()
elif handle_npd == 'eig':
# this setting is experimental! use with caution
# TODO: why chose the factor 1.5 here? Seems to work best
eig0 = eigsh(hess.cpu().numpy(), k=1, which="SA", tol=1e-4,
return_eigenvectors=False).item()
tau = max(1e-3 - 1.5 * eig0, 0)
hess.diagonal().add_(tau)
d = torch.cholesky_solve(g.neg().unsqueeze(1),
torch.linalg.cholesky(hess)).squeeze(1)
else:
raise RuntimeError('invalid handle_npd encountered.')
# =====================================================
# | |
the default
reaction.add_metabolites({accoa_HSFc: -1.0,
pi_HSFc: -1.0,
actp_HSFc: 1.0,
coa_HSFc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#accoa_HSFc + h2o_HSFc -> ac_HSFc + coa_HSFc + h_HSFc
reaction = Reaction('HSF_ACOAH')
reaction.name = 'Acteyl-CoA hydrolase'
reaction.subsystem = 'Acetate Metabolism'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({accoa_HSFc: -1.0,
h2o_HSFc: -1.0,
ac_HSFc: 1.0,
coa_HSFc: 1.0,
h_HSFc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Pyruvate Oxidation
#coa_HSFc + nad_HSFc + pyr_HSFc <-> accoa_HSFc + co2_HSFc + nadh_HSFc
reaction = Reaction('HSF_PDH')
#This reaction differs from BiGG database because a different ferredoxin is used and H+ is a product for mass and charge balance
co2_HSFc = Metabolite('co2_HSFc', formula='CO2', name='CO2', compartment='HSFc', charge= 0)
reaction.name = 'Pyruvate dehdyrogenase'
reaction.subsystem = 'Pyruvate Oxidation'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({coa_HSFc: -1.0,
pyr_HSFc: -1.0,
nad_HSFc: -1.0,
accoa_HSFc: 1.0,
co2_HSFc: 1.0,
nadh_HSFc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#PFOR
#coa_HSFc + pyr_HSFc + fdox_HSFc <-> accoa_HSFc + co2_HSFc + fdred_HSFc + h_HSFc
fdred_HSFc = Metabolite('fdred_HSFc', formula='Fe8S8X', name='Ferredoxin (reduced) 2[4Fe-4S]', compartment='HSFc', charge= -2)
fdox_HSFc = Metabolite('fdox_HSFc', formula='Fe8S8X', name='Ferredoxin (oxidized) 2[4Fe-4S]', compartment='HSFc', charge= 0)
co2_HSFc = Metabolite('co2_HSFc', formula='CO2', name='CO2', compartment='HSFc', charge= 0)
reaction = Reaction('HSF_PFOR')
#This reaction differs from BiGG database because a different ferredoxin is used and H+ is a product for mass and charge balance
reaction.name = '*Pyruvate flavodoxin oxidoreductase'
reaction.subsystem = 'Pyruvate Oxidation'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({coa_HSFc: -1.0,
pyr_HSFc: -1.0,
fdox_HSFc: -1.0,
accoa_HSFc: 1.0,
co2_HSFc: 1.0,
fdred_HSFc: 1.0,
h_HSFc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
##Ethanol Metabolism
#etoh_HSFc + nad_HSFc <-> acald_HSFc + h_HSFc + nadh_HSFc
etoh_HSFc = Metabolite('etoh_HSFc', formula='C2H6O', name='Ethanol',compartment='HSFc', charge=0)
acald_HSFc = Metabolite('acald_HSFc',formula='C2H4O', name='Acetaldehyde',compartment='HSFc', charge=0)
reaction = Reaction('HSF_ALCD2x')
reaction.name = 'Alcohol dehydrogenase (ethanol)'
reaction.subsystem = 'Ethanol Utilization'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({etoh_HSFc: -1.0,
nad_HSFc: -1.0,
acald_HSFc: 1.0,
h_HSFc: 1.0,
nadh_HSFc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#acald_HSFc + coa_HSFc + nad_HSFc <-> accoa_HSFc + h_HSFc + nadh_HSFc
reaction = Reaction('HSF_ACALD')
reaction.name = 'Acetaldehyde dehydrogenase (acetylating)'
reaction.subsystem = 'Ethanol Utilization'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({acald_HSFc: -1.0,
coa_HSFc: -1.0,
nad_HSFc: -1.0,
accoa_HSFc: 1.0,
h_HSFc: 1.0,
nadh_HSFc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Glycerol Utilization
#glyc_HSFe <-> glyc_e
glyc_HSFe = Metabolite('glyc_HSFe', formula='C3H8O3', name='Glycerol', compartment='HSFe', charge= 0)
reaction = Reaction('HSF_EX_glyc')
reaction.name = 'Glycerol exchange'
reaction.subsystem = 'Exchange'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({glyc_e: HSF_Abnd,
glyc_HSFe: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#glyc_HSFe <-> glyc_HSFc
glyc_HSFc = Metabolite('glyc_HSFc', formula='C3H8O3', name='Glycerol', compartment='HSFc', charge= 0)
reaction = Reaction('HSF_glyct')
reaction.name = 'Glycerol transport'
reaction.subsystem = 'Transport'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({glyc_HSFe: -1.0,
glyc_HSFc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#atp_HSFc + glyc_HSFc <-> adp_HSFc + glyc3p_HSFc + h_HSFc
glyc3p_HSFc = Metabolite('glyc3p_HSFc', formula='C3H7O6P', name='Glycerol 3-phosphate', compartment='HSFc', charge= -2)
reaction = Reaction('HSF_GLYK')
reaction.name = 'Glycerol kinase'
reaction.subsystem = 'Glycerol utilization'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({glyc_HSFc: -1.0,
atp_HSFc: -1.0,
adp_HSFc: 1.0,
glyc3p_HSFc: 1.0,
h_HSFc: 1.0,
ATP_SLP_HSF: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#dhap_HSFc + h_HSFc + nadh_HSFc <-> glyc3p_HSFc + nad_HSFc
reaction = Reaction('HSF_G3PD1')
reaction.name = 'Glycerol-3-phosphate dehydrogenase (NAD)'
reaction.subsystem = 'Glycerol utilization'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({dhap_HSFc: -1.0,
h_HSFc: -1.0,
nadh_HSFc: -1.0,
glyc3p_HSFc: 1.0,
nad_HSFc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
###Energy Generation
#adp_HSFc + pi_HSFc + 4.0 h_HSFi <-> atp_HSFc + 3.0 h_HSFc + h2o_HSFc
h_HSFi = Metabolite('h_HSFi', formula='H', name='H+', compartment='HSFi', charge=1)
reaction = Reaction('HSF_ATPS4r')
#This reaction differs from the BiGG reaction because this model assumes a different compartment for ion motive force generation
reaction.name = '*ATP Synthase'
reaction.subsystem = 'Energy Generation'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({adp_HSFc: -1.0,
pi_HSFc: -1.0,
h_HSFi: -4.0,
atp_HSFc: 1.0,
h_HSFc: 3.0,
h2o_HSFc: 1.0,
ATP_IMF_HSF: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Hydrogen Generation
#fdred_HSFc + 2.0 h_HSFc <-> h2_HSFc + fdox_HSFc
h2_HSFc = Metabolite('h2_HSFc', formula='H2', name='Hydrogen', compartment='HSFc', charge= 0)
reaction = Reaction('HSF_HYD1')
#The reaction in BiGG uses a different ferredoxin
#BiGG reaction is not balanced for H
reaction.name = '(FeFe)-hydrogenase, cytoplasm'
reaction.subsystem = 'Hydrogen Generation'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({fdred_HSFc: -1.0,
h_HSFc: -2.0,
h2_HSFc: 1.0,
fdox_HSFc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#h2_HSFe <-> h2_e
h2_HSFe = Metabolite('h2_HSFe', formula='H2', name='Hydrogen', compartment='HSFe', charge= 0)
reaction = Reaction('HSF_EX_h2')
reaction.name = 'HSF h2 exchange'
reaction.subsystem = 'Exchange'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({h2_e: HSF_Abnd,
h2_HSFe: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#h2_HSFe <-> h2_HSFc
reaction = Reaction('HSF_H2t')
reaction.name = 'Hydrogen transport'
reaction.subsystem = 'Transport'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({h2_HSFe: -1.0,
h2_HSFc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
##Other
#h2o_HSFe <-> h2o_e
h2o_HSFe = Metabolite('h2o_HSFe', formula='H2O', name='H2O', compartment='HSFe', charge=0)
reaction = Reaction('HSF_EX_h2o')
reaction.name = 'HSF h2o exchange'
reaction.subsystem = 'Exchange'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({h2o_e: HSF_Abnd,
h2o_HSFe: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#h2o_HSFe <-> h2o_HSFc
reaction = Reaction('HSF_H2Ot')
reaction.name = 'H2O transport'
reaction.subsystem = 'Transport'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({h2o_HSFe: -1.0,
h2o_HSFc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#co2_HSFe <-> co2_e
co2_HSFe = Metabolite('co2_HSFe', formula='CO2', name='CO2', compartment='HSFe', charge= 0)
reaction = Reaction('HSF_EX_co2')
reaction.name = 'HSF co2 exchange'
reaction.subsystem = 'Exchange'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({co2_e: HSF_Abnd,
co2_HSFe: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#co2_HSFe <-> co2_HSFc
reaction = Reaction('HSF_co2t')
reaction.name = 'CO2 transport'
reaction.subsystem = 'Transport'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({co2_HSFe: -1.0,
co2_HSFc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#ATP Hydrolysis
#atp_HSFc + h2o_HSFc <-> adp_HSFc + pi_HSFc + h_HSFc + ATP_COMM_e
reaction = Reaction('HSF_ATP_Hydrolysis')
reaction.name = 'ATP Hydrolysis'
reaction.subsystem = 'ATP Hydrolysis'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({atp_HSFc: -1.0,
h2o_HSFc: -1.0,
adp_HSFc: 1.0,
pi_HSFc: 1.0,
h_HSFc: 1.0,
ATP_HYDR_HSF: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
##Import and Export Reactions For Energy Calculations
h_HSFe = Metabolite('h_HSFe', formula='H', name='Proton', compartment='HSFe', charge= 1)
# Formate Transport
# for_HSFe <-> for_e
for_HSFc = Metabolite('for_HSFc', formula='C1H1O2', name='Formate', compartment='c', charge=-1)
for_HSFe = Metabolite('for_HSFe', formula='CHO2', name='Formate', compartment='HSFe', charge=-1)
reaction = Reaction('HSF_EX_for')
reaction.name = 'HSF for exchange'
reaction.subsystem = 'Exchange'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({for_e: HSF_Abnd,
for_HSFe: -1})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# for_HSFe + h_HSFe <-> for_HSFc + h_HSFc
reaction = Reaction('HSF_Formate_import')
reaction.name = 'Formate import'
reaction.subsystem = 'Transport'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({for_HSFe: -1.0,
h_HSFe: -1.0,
for_HSFc: 1.0,
h_HSFc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# for_HSFc + h_HSFc <-> for_HSFe + h_HSFe
reaction = Reaction('HSF_Formate_export')
reaction.name = 'Formate_export'
reaction.subsystem = 'Transport'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({for_HSFc: -1.0,
h_HSFc: -1.0,
for_HSFe: 1.0,
h_HSFe: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Acetate Transport
#ac_HSFe <-> ac_e
ac_HSFe | |
Admin settings for OfficeBasedVisits18 model"""
def has_change_permission(self, request, obj=None):
""" Do not allow users to edit model instances """
return False
def has_delete_permission(self, request, obj=None):
""" Do not allow users to delete model instances """
return False
model = OfficeBasedVisits18
# Column names to display on table
field_names = [field.name for field in model._meta.get_fields()]
list_display = field_names
# Navigation
search_fields = ["DUPERSID"]
list_per_page = 15
class AdminOfficeBasedVisits17(admin.ModelAdmin):
""" Admin settings for OfficeBasedVisits17 model"""
def has_change_permission(self, request, obj=None):
""" Do not allow users to edit model instances """
return False
def has_delete_permission(self, request, obj=None):
""" Do not allow users to delete model instances """
return False
model = OfficeBasedVisits17
# Column names to display on table
field_names = [field.name for field in model._meta.get_fields()]
list_display = field_names
# Navigation
search_fields = ["DUPERSID"]
list_per_page = 15
class AdminOfficeBasedVisits16(admin.ModelAdmin):
""" Admin settings for OfficeBasedVisits16 model"""
def has_change_permission(self, request, obj=None):
""" Do not allow users to edit model instances """
return False
def has_delete_permission(self, request, obj=None):
""" Do not allow users to delete model instances """
return False
model = OfficeBasedVisits16
# Column names to display on table
field_names = [field.name for field in model._meta.get_fields()]
list_display = field_names
# Navigation
search_fields = ["DUPERSID"]
list_per_page = 15
class AdminOfficeBasedVisits15(admin.ModelAdmin):
""" Admin settings for OfficeBasedVisits15 model"""
def has_change_permission(self, request, obj=None):
""" Do not allow users to edit model instances """
return False
def has_delete_permission(self, request, obj=None):
""" Do not allow users to delete model instances """
return False
model = OfficeBasedVisits15
# Column names to display on table
field_names = [field.name for field in model._meta.get_fields()]
list_display = field_names
# Navigation
search_fields = ["DUPERSID"]
list_per_page = 15
class AdminOfficeBasedVisits14(admin.ModelAdmin):
""" Admin settings for OfficeBasedVisits14 model"""
def has_change_permission(self, request, obj=None):
""" Do not allow users to edit model instances """
return False
def has_delete_permission(self, request, obj=None):
""" Do not allow users to delete model instances """
return False
model = OfficeBasedVisits14
# Column names to display on table
field_names = [field.name for field in model._meta.get_fields()]
list_display = field_names
# Navigation
search_fields = ["DUPERSID"]
list_per_page = 15
class AdminOfficeBasedVisits13(admin.ModelAdmin):
""" Admin settings for OfficeBasedVisits13 model"""
def has_change_permission(self, request, obj=None):
""" Do not allow users to edit model instances """
return False
def has_delete_permission(self, request, obj=None):
""" Do not allow users to delete model instances """
return False
model = OfficeBasedVisits13
# Column names to display on table
field_names = [field.name for field in model._meta.get_fields()]
list_display = field_names
# Navigation
search_fields = ["DUPERSID"]
list_per_page = 15
class AdminOfficeBasedVisits12(admin.ModelAdmin):
""" Admin settings for OfficeBasedVisits12 model"""
def has_change_permission(self, request, obj=None):
""" Do not allow users to edit model instances """
return False
def has_delete_permission(self, request, obj=None):
""" Do not allow users to delete model instances """
return False
model = OfficeBasedVisits12
# Column names to display on table
field_names = [field.name for field in model._meta.get_fields()]
list_display = field_names
# Navigation
search_fields = ["DUPERSID"]
list_per_page = 15
class AdminOfficeBasedVisits11(admin.ModelAdmin):
""" Admin settings for OfficeBasedVisits11 model"""
def has_change_permission(self, request, obj=None):
""" Do not allow users to edit model instances """
return False
def has_delete_permission(self, request, obj=None):
""" Do not allow users to delete model instances """
return False
model = OfficeBasedVisits11
# Column names to display on table
field_names = [field.name for field in model._meta.get_fields()]
list_display = field_names
# Navigation
search_fields = ["DUPERSID"]
list_per_page = 15
class AdminOfficeBasedVisits10(admin.ModelAdmin):
""" Admin settings for OfficeBasedVisits10 model"""
def has_change_permission(self, request, obj=None):
""" Do not allow users to edit model instances """
return False
def has_delete_permission(self, request, obj=None):
""" Do not allow users to delete model instances """
return False
model = OfficeBasedVisits10
# Column names to display on table
field_names = [field.name for field in model._meta.get_fields()]
list_display = field_names
# Navigation
search_fields = ["DUPERSID"]
list_per_page = 15
class AdminOfficeBasedVisits09(admin.ModelAdmin):
""" Admin settings for OfficeBasedVisits09 model"""
def has_change_permission(self, request, obj=None):
""" Do not allow users to edit model instances """
return False
def has_delete_permission(self, request, obj=None):
""" Do not allow users to delete model instances """
return False
model = OfficeBasedVisits09
# Column names to display on table
field_names = [field.name for field in model._meta.get_fields()]
list_display = field_names
# Navigation
search_fields = ["DUPERSID"]
list_per_page = 15
class AdminOfficeBasedVisits08(admin.ModelAdmin):
""" Admin settings for OfficeBasedVisits08 model"""
def has_change_permission(self, request, obj=None):
""" Do not allow users to edit model instances """
return False
def has_delete_permission(self, request, obj=None):
""" Do not allow users to delete model instances """
return False
model = OfficeBasedVisits08
# Column names to display on table
field_names = [field.name for field in model._meta.get_fields()]
list_display = field_names
# Navigation
search_fields = ["DUPERSID"]
list_per_page = 15
class AdminOfficeBasedVisits07(admin.ModelAdmin):
""" Admin settings for OfficeBasedVisits07 model"""
def has_change_permission(self, request, obj=None):
""" Do not allow users to edit model instances """
return False
def has_delete_permission(self, request, obj=None):
""" Do not allow users to delete model instances """
return False
model = OfficeBasedVisits07
# Column names to display on table
field_names = [field.name for field in model._meta.get_fields()]
list_display = field_names
# Navigation
search_fields = ["DUPERSID"]
list_per_page = 15
class AdminOfficeBasedVisits06(admin.ModelAdmin):
""" Admin settings for OfficeBasedVisits06 model"""
def has_change_permission(self, request, obj=None):
""" Do not allow users to edit model instances """
return False
def has_delete_permission(self, request, obj=None):
""" Do not allow users to delete model instances """
return False
model = OfficeBasedVisits06
# Column names to display on table
field_names = [field.name for field in model._meta.get_fields()]
list_display = field_names
# Navigation
search_fields = ["DUPERSID"]
list_per_page = 15
class AdminOfficeBasedVisits05(admin.ModelAdmin):
""" Admin settings for OfficeBasedVisits05 model"""
def has_change_permission(self, request, obj=None):
""" Do not allow users to edit model instances """
return False
def has_delete_permission(self, request, obj=None):
""" Do not allow users to delete model instances """
return False
model = OfficeBasedVisits05
# Column names to display on table
field_names = [field.name for field in model._meta.get_fields()]
list_display = field_names
# Navigation
search_fields = ["DUPERSID"]
list_per_page = 15
class AdminHomeHealth18(admin.ModelAdmin):
""" Admin settings for HomeHealth18 model"""
def has_change_permission(self, request, obj=None):
""" Do not allow users to edit model instances """
return False
def has_delete_permission(self, request, obj=None):
""" Do not allow users to delete model instances """
return False
model = HomeHealth18
# Column names to display on table
field_names = [field.name for field in model._meta.get_fields()]
list_display = field_names
# Navigation
search_fields = ["DUPERSID"]
list_per_page = 15
class AdminHomeHealth17(admin.ModelAdmin):
""" Admin settings for HomeHealth17 model"""
def has_change_permission(self, request, obj=None):
""" Do not allow users to edit model instances """
return False
def has_delete_permission(self, request, obj=None):
""" Do not allow users to delete model instances """
return False
model = HomeHealth17
# Column names to display on table
field_names = [field.name for field in model._meta.get_fields()]
list_display = field_names
# Navigation
search_fields = ["DUPERSID"]
list_per_page = 15
class AdminHomeHealth16(admin.ModelAdmin):
""" Admin settings for HomeHealth16 model"""
def has_change_permission(self, request, obj=None):
""" Do not allow users to edit model instances """
return False
def has_delete_permission(self, request, obj=None):
""" Do not allow users to delete model instances """
return False
model = HomeHealth16
# Column names to display on table
field_names = [field.name for field in model._meta.get_fields()]
list_display = field_names
# Navigation
search_fields = ["DUPERSID"]
list_per_page = 15
class AdminHomeHealth15(admin.ModelAdmin):
""" Admin settings for HomeHealth15 model"""
def has_change_permission(self, request, obj=None):
""" Do not allow users to edit model instances """
return False
def has_delete_permission(self, request, obj=None):
""" Do not allow users to delete model instances """
return False
model = HomeHealth15
# Column names to display on table
field_names = [field.name for field in model._meta.get_fields()]
list_display = field_names
# Navigation
search_fields = ["DUPERSID"]
list_per_page = 15
class AdminHomeHealth14(admin.ModelAdmin):
""" Admin settings for HomeHealth14 model"""
def has_change_permission(self, request, obj=None):
""" Do not allow users to edit model instances """
return False
def has_delete_permission(self, request, obj=None):
""" Do not allow users to delete model instances """
return False
model = HomeHealth14
# Column names to display on table
field_names = [field.name for field in model._meta.get_fields()]
list_display = field_names
# Navigation
search_fields = ["DUPERSID"]
list_per_page = 15
class AdminHomeHealth13(admin.ModelAdmin):
""" Admin settings for HomeHealth13 model"""
def has_change_permission(self, | |
+ m.x310
+ m.x311 + m.x312 + m.x313 + m.x314 + m.x315 + m.x316 + m.x317 + m.x318 + m.x319 + m.x320
+ m.x321 + m.x322 + m.x323 + m.x324 + m.x325 + m.x326 + m.x327 + m.x328 + m.x329 + m.x330
+ m.x331 + m.x332 + m.x333 + m.x334 + m.x335 + m.x336 + m.x337 + m.x338 + m.x339 + m.x340
+ m.x341 + m.x342 + m.x1102 + m.x1103 + m.x1104 + m.x1105 <= 304)
m.c7 = Constraint(expr= m.x343 + m.x344 + m.x345 + m.x346 + m.x347 + m.x348 + m.x349 + m.x350 + m.x351 + m.x352
+ m.x353 + m.x354 + m.x355 + m.x356 + m.x357 + m.x358 + m.x359 + m.x360 + m.x361 + m.x362
+ m.x363 + m.x364 + m.x365 + m.x366 + m.x367 + m.x368 + m.x369 + m.x370 + m.x371 + m.x372
+ m.x373 + m.x374 + m.x375 + m.x376 + m.x377 + m.x378 + m.x379 + m.x380 + m.x381 + m.x382
+ m.x383 + m.x384 + m.x385 + m.x386 + m.x387 + m.x388 + m.x389 + m.x390 + m.x391 + m.x392
+ m.x393 + m.x394 + m.x395 + m.x396 + m.x397 + m.x398 + m.x399 + m.x400 + m.x401 + m.x402
+ m.x1113 + m.x1114 + m.x1115 + m.x1116 + m.x1117 <= 91)
m.c8 = Constraint(expr= m.x403 + m.x404 + m.x405 + m.x406 + m.x407 + m.x408 + m.x409 + m.x410 + m.x411 + m.x412
+ m.x413 + m.x414 + m.x415 + m.x416 + m.x417 + m.x418 + m.x419 + m.x420 + m.x421 + m.x422
+ m.x423 + m.x424 + m.x425 + m.x426 + m.x427 + m.x428 + m.x429 + m.x430 + m.x431 + m.x432
+ m.x433 + m.x434 + m.x435 + m.x436 + m.x437 + m.x438 + m.x439 + m.x440 + m.x441 + m.x442
+ m.x443 + m.x444 + m.x445 + m.x446 + m.x447 + m.x448 + m.x449 + m.x450 + m.x451 + m.x452
+ m.x453 + m.x454 + m.x455 + m.x456 + m.x1124 + m.x1125 + m.x1126 <= 64)
m.c9 = Constraint(expr= m.x457 + m.x458 + m.x459 + m.x460 + m.x461 + m.x462 + m.x463 + m.x464 + m.x465 + m.x466
+ m.x467 + m.x468 + m.x469 + m.x470 + m.x471 + m.x472 + m.x473 + m.x474 + m.x475 + m.x476
+ m.x477 + m.x478 + m.x479 + m.x480 + m.x481 + m.x482 + m.x483 + m.x484 + m.x485 + m.x486
+ m.x487 + m.x488 + m.x489 + m.x490 + m.x491 + m.x492 + m.x493 + m.x494 + m.x495 + m.x496
+ m.x497 + m.x498 + m.x499 + m.x500 + m.x501 + m.x502 + m.x503 + m.x504 + m.x505 + m.x506
+ m.x507 + m.x508 + m.x509 + m.x510 + m.x511 + m.x512 + m.x513 + m.x514 + m.x515 + m.x516
+ m.x517 + m.x518 + m.x519 + m.x520 + m.x521 + m.x522 + m.x523 + m.x524 + m.x525 + m.x526
+ m.x527 + m.x528 + m.x529 + m.x1135 + m.x1136 + m.x1137 + m.x1138 <= 33)
m.c10 = Constraint(expr= m.x530 + m.x531 + m.x532 + m.x533 + m.x534 + m.x535 + m.x536 + m.x537 + m.x538 + m.x539
+ m.x540 + m.x541 + m.x542 + m.x543 + m.x544 + m.x545 + m.x546 + m.x547 + m.x548 + m.x549
+ m.x550 + m.x551 + m.x552 + m.x553 + m.x554 + m.x555 + m.x556 + m.x557 + m.x558 + m.x559
+ m.x560 + m.x561 + m.x562 + m.x563 + m.x564 + m.x565 + m.x566 + m.x567 + m.x568 + m.x569
+ m.x570 + m.x571 + m.x572 + m.x573 + m.x574 + m.x575 + m.x576 + m.x577 + m.x578 + m.x579
+ m.x580 + m.x581 + m.x582 + m.x583 + m.x584 + m.x585 + m.x586 + m.x587 + m.x588 + m.x589
+ m.x590 + m.x1146 + m.x1147 + m.x1148 <= 294)
m.c11 = Constraint(expr= m.x591 + m.x592 + m.x593 + m.x594 + m.x595 + m.x596 + m.x597 + m.x598 + m.x599 + m.x600
+ m.x601 + m.x602 + m.x603 + m.x604 + m.x605 + m.x606 + m.x607 + m.x608 + m.x609 + m.x610
+ m.x611 + m.x612 + m.x613 + m.x614 + m.x615 + m.x616 + m.x617 + m.x618 + m.x619 + m.x620
+ m.x621 + m.x622 + m.x623 + m.x624 + m.x625 + m.x626 + m.x627 + m.x628 + m.x629 + m.x630
+ m.x631 + m.x632 + m.x633 + m.x634 + m.x635 + m.x636 + m.x637 + m.x638 + m.x639 + m.x640
+ m.x641 + m.x642 + m.x643 + m.x644 + m.x645 + m.x646 + m.x647 + m.x648 + m.x649 + m.x650
+ m.x651 + m.x652 + m.x653 + m.x654 + m.x655 + m.x656 + m.x657 + m.x658 + m.x659 + m.x660
+ m.x661 + m.x662 + m.x1157 + m.x1158 <= 163)
m.c12 = Constraint(expr= m.x663 + m.x664 + m.x665 + m.x666 + m.x667 + m.x668 + m.x669 + m.x670 + m.x671 + m.x672
+ m.x673 + m.x674 + m.x675 + m.x676 + m.x677 + m.x678 + m.x679 + m.x680 + m.x681 + m.x682
+ m.x683 + m.x684 + m.x685 + m.x686 + m.x687 + m.x688 + m.x689 + m.x690 + m.x691 + m.x692
+ m.x693 + m.x694 + m.x695 + m.x696 + m.x697 + m.x698 + m.x699 + m.x700 + m.x701 + m.x702
+ m.x703 + m.x704 + m.x705 + m.x706 + m.x707 + m.x708 + m.x1164 + m.x1165 + m.x1166 + m.x1167
+ m.x1168 + m.x1169 + m.x1170 <= 213)
m.c13 = Constraint(expr= m.x709 + m.x710 + m.x711 + m.x712 + m.x713 + m.x714 + m.x715 + m.x716 + m.x717 + m.x718
+ m.x719 + m.x720 + m.x721 + m.x722 + m.x723 + m.x724 + m.x725 + m.x726 + m.x727 + m.x728
+ m.x729 + m.x730 + m.x731 + m.x732 + m.x733 + m.x734 + m.x735 + m.x736 + m.x737 + m.x738
+ m.x739 + m.x740 + m.x741 + m.x742 + m.x743 + m.x744 + m.x745 + m.x746 + m.x747 + m.x748
+ m.x1176 + m.x1177 + m.x1178 + m.x1179 + m.x1180 <= 219)
m.c14 = Constraint(expr= m.x749 + m.x750 + m.x751 + m.x752 + m.x753 + m.x754 + m.x755 + m.x756 + m.x757 + m.x758
+ m.x759 + m.x760 + m.x761 + m.x762 + m.x763 + m.x764 + m.x765 + m.x766 + m.x767 + m.x768
+ m.x769 + m.x770 + m.x771 + m.x772 + m.x773 + m.x774 + m.x775 + m.x776 + m.x777 + m.x778
+ m.x779 + m.x780 + m.x781 + m.x782 + m.x783 + m.x784 + m.x785 + m.x786 + m.x787 + m.x788
+ m.x789 + m.x790 + m.x1186 + m.x1187 + m.x1188 + m.x1189 <= 276)
m.c15 = Constraint(expr= m.x791 + m.x792 + m.x793 + m.x794 + m.x795 + m.x796 + m.x797 + m.x798 + m.x799 + m.x800
+ m.x801 + m.x802 + m.x803 + m.x804 + m.x805 + m.x806 + m.x807 + m.x808 + m.x809 + m.x810
+ m.x811 + m.x812 + m.x813 + m.x814 + m.x815 + m.x816 + m.x817 + m.x818 + m.x819 + m.x820
+ m.x821 + m.x822 + m.x823 + m.x824 + m.x825 + m.x826 + m.x827 + m.x828 + m.x829 + m.x830
+ m.x831 + m.x832 + m.x833 + m.x834 + m.x835 + m.x836 + m.x837 + m.x838 + m.x839 + m.x840
+ m.x841 + m.x842 + m.x843 + m.x844 + m.x845 + m.x846 + m.x1196 + m.x1197 + m.x1198 <= 142)
m.c16 = Constraint(expr= m.x847 + m.x848 + m.x849 + m.x850 + m.x851 + m.x852 + m.x853 + m.x854 + m.x855 + m.x856
+ m.x857 + m.x858 + m.x1200 + m.x1201 + m.x1202 + m.x1203 + m.x1204 <= 242)
m.c17 = Constraint(expr= m.x859 + m.x860 + m.x861 + m.x862 + m.x863 + m.x864 + m.x865 + m.x866 + m.x867 + m.x868
+ m.x869 + m.x870 + m.x871 + m.x872 + m.x873 + m.x874 + m.x875 + m.x876 + m.x877 + m.x878
+ m.x879 + m.x880 + m.x881 + m.x882 + m.x883 + m.x884 + m.x885 + m.x886 + m.x887 + | |
"$qst_bring_back_runaway_serfs_num_parties_fleed"),
(ge, ":sum_removed", 3),
(try_begin),
(ge, "$qst_bring_back_runaway_serfs_num_parties_returned", 3),
(call_script, "script_succeed_quest", "qst_bring_back_runaway_serfs"),
(else_try),
(eq, "$qst_bring_back_runaway_serfs_num_parties_returned", 0),
(call_script, "script_fail_quest", "qst_bring_back_runaway_serfs"),
(else_try),
(call_script, "script_conclude_quest", "qst_bring_back_runaway_serfs"),
(try_end),
],
[]
),
### Defend Nobles Against Peasants quest
## (0.2, 0.0, 0.0,
## [
## (check_quest_active, "qst_defend_nobles_against_peasants"),
## (neg|check_quest_succeeded, "qst_defend_nobles_against_peasants"),
## (neg|check_quest_failed, "qst_defend_nobles_against_peasants"),
## (quest_get_slot, ":quest_target_center", "qst_defend_nobles_against_peasants", slot_quest_target_center),
## (assign, ":num_active_parties", 0),
## (try_begin),
## (gt, "$qst_defend_nobles_against_peasants_noble_party_1", 0),
## (party_is_active, "$qst_defend_nobles_against_peasants_noble_party_1"),
## (val_add, ":num_active_parties", 1),
## (party_is_in_town, "$qst_defend_nobles_against_peasants_noble_party_1", ":quest_target_center"),
## (remove_party, "$qst_defend_nobles_against_peasants_noble_party_1"),
## (party_get_num_companions, ":num_companions", "$qst_defend_nobles_against_peasants_noble_party_1"),
## (val_add, "$qst_defend_nobles_against_peasants_num_nobles_saved", ":num_companions"),
## (try_end),
## (try_begin),
## (gt, "$qst_defend_nobles_against_peasants_noble_party_2", 0),
## (party_is_active, "$qst_defend_nobles_against_peasants_noble_party_2"),
## (val_add, ":num_active_parties", 1),
## (party_is_in_town, "$qst_defend_nobles_against_peasants_noble_party_2", ":quest_target_center"),
## (remove_party, "$qst_defend_nobles_against_peasants_noble_party_2"),
## (party_get_num_companions, ":num_companions", "$qst_defend_nobles_against_peasants_noble_party_2"),
## (val_add, "$qst_defend_nobles_against_peasants_num_nobles_saved", ":num_companions"),
## (try_end),
## (try_begin),
## (gt, "$qst_defend_nobles_against_peasants_noble_party_3", 0),
## (party_is_active, "$qst_defend_nobles_against_peasants_noble_party_3"),
## (val_add, ":num_active_parties", 1),
## (party_is_in_town, "$qst_defend_nobles_against_peasants_noble_party_3", ":quest_target_center"),
## (remove_party, "$qst_defend_nobles_against_peasants_noble_party_3"),
## (party_get_num_companions, ":num_companions", "$qst_defend_nobles_against_peasants_noble_party_3"),
## (val_add, "$qst_defend_nobles_against_peasants_num_nobles_saved", ":num_companions"),
## (try_end),
## (try_begin),
## (gt, "$qst_defend_nobles_against_peasants_noble_party_4", 0),
## (party_is_active, "$qst_defend_nobles_against_peasants_noble_party_4"),
## (val_add, ":num_active_parties", 1),
## (party_is_in_town, "$qst_defend_nobles_against_peasants_noble_party_4", ":quest_target_center"),
## (remove_party, "$qst_defend_nobles_against_peasants_noble_party_4"),
## (party_get_num_companions, ":num_companions", "$qst_defend_nobles_against_peasants_noble_party_4"),
## (val_add, "$qst_defend_nobles_against_peasants_num_nobles_saved", ":num_companions"),
## (try_end),
## (try_begin),
## (gt, "$qst_defend_nobles_against_peasants_noble_party_5", 0),
## (party_is_active, "$qst_defend_nobles_against_peasants_noble_party_5"),
## (val_add, ":num_active_parties", 1),
## (party_is_in_town, "$qst_defend_nobles_against_peasants_noble_party_5", ":quest_target_center"),
## (remove_party, "$qst_defend_nobles_against_peasants_noble_party_5"),
## (party_get_num_companions, ":num_companions", "$qst_defend_nobles_against_peasants_noble_party_5"),
## (val_add, "$qst_defend_nobles_against_peasants_num_nobles_saved", ":num_companions"),
## (try_end),
## (try_begin),
## (gt, "$qst_defend_nobles_against_peasants_noble_party_6", 0),
## (party_is_active, "$qst_defend_nobles_against_peasants_noble_party_6"),
## (val_add, ":num_active_parties", 1),
## (party_is_in_town, "$qst_defend_nobles_against_peasants_noble_party_6", ":quest_target_center"),
## (remove_party, "$qst_defend_nobles_against_peasants_noble_party_6"),
## (party_get_num_companions, ":num_companions", "$qst_defend_nobles_against_peasants_noble_party_6"),
## (val_add, "$qst_defend_nobles_against_peasants_num_nobles_saved", ":num_companions"),
## (try_end),
## (try_begin),
## (gt, "$qst_defend_nobles_against_peasants_noble_party_7", 0),
## (party_is_active, "$qst_defend_nobles_against_peasants_noble_party_7"),
## (val_add, ":num_active_parties", 1),
## (party_is_in_town, "$qst_defend_nobles_against_peasants_noble_party_7", ":quest_target_center"),
## (remove_party, "$qst_defend_nobles_against_peasants_noble_party_7"),
## (party_get_num_companions, ":num_companions", "$qst_defend_nobles_against_peasants_noble_party_7"),
## (val_add, "$qst_defend_nobles_against_peasants_num_nobles_saved", ":num_companions"),
## (try_end),
## (try_begin),
## (gt, "$qst_defend_nobles_against_peasants_noble_party_8", 0),
## (party_is_active, "$qst_defend_nobles_against_peasants_noble_party_8"),
## (val_add, ":num_active_parties", 1),
## (party_is_in_town, "$qst_defend_nobles_against_peasants_noble_party_8", ":quest_target_center"),
## (remove_party, "$qst_defend_nobles_against_peasants_noble_party_8"),
## (party_get_num_companions, ":num_companions", "$qst_defend_nobles_against_peasants_noble_party_8"),
## (val_add, "$qst_defend_nobles_against_peasants_num_nobles_saved", ":num_companions"),
## (try_end),
## (eq, ":num_active_parties", 0),
## (try_begin),
## (store_div, ":limit", "$qst_defend_nobles_against_peasants_num_nobles_to_save", 2),
## (ge, "$qst_defend_nobles_against_peasants_num_nobles_saved", ":limit"),
## (call_script, "script_succeed_quest", "qst_defend_nobles_against_peasants"),
## (else_try),
## (call_script, "script_fail_quest", "qst_defend_nobles_against_peasants"),
## (try_end),
## ],
## []
## ),
### Capture Conspirators quest
## (0.15, 0.0, 0.0,
## [
## (check_quest_active, "qst_capture_conspirators"),
## (neg|check_quest_succeeded, "qst_capture_conspirators"),
## (neg|check_quest_failed, "qst_capture_conspirators"),
## (quest_get_slot, ":quest_target_center", "qst_capture_conspirators", slot_quest_target_center),
## (quest_get_slot, ":faction_no", "qst_capture_conspirators", slot_quest_target_faction),
## (try_begin),
## (gt, "$qst_capture_conspirators_num_parties_to_spawn", "$qst_capture_conspirators_num_parties_spawned"),
## (store_random_in_range, ":random_no", 0, 100),
## (lt, ":random_no", 20),
## (set_spawn_radius, 3),
## (spawn_around_party,":quest_target_center","pt_conspirator"),
## (val_add, "$qst_capture_conspirators_num_parties_spawned", 1),
## (party_get_num_companions, ":num_companions", reg0),
## (val_add, "$qst_capture_conspirators_num_troops_to_capture", ":num_companions"),
## (party_set_ai_behavior, reg0, ai_bhvr_travel_to_party),
## (party_set_ai_object, reg0, "$qst_capture_conspirators_party_1"),
## (party_set_flags, reg0, pf_default_behavior, 0),
## (try_begin),
## (le, "$qst_capture_conspirators_party_2", 0),
## (assign, "$qst_capture_conspirators_party_2", reg0),
## (else_try),
## (le, "$qst_capture_conspirators_party_3", 0),
## (assign, "$qst_capture_conspirators_party_3", reg0),
## (else_try),
## (le, "$qst_capture_conspirators_party_4", 0),
## (assign, "$qst_capture_conspirators_party_4", reg0),
## (else_try),
## (le, "$qst_capture_conspirators_party_5", 0),
## (assign, "$qst_capture_conspirators_party_5", reg0),
## (else_try),
## (le, "$qst_capture_conspirators_party_6", 0),
## (assign, "$qst_capture_conspirators_party_6", reg0),
## (else_try),
## (le, "$qst_capture_conspirators_party_7", 0),
## (assign, "$qst_capture_conspirators_party_7", reg0),
## (try_end),
## (try_end),
##
## (assign, ":num_active_parties", 0),
##
## (try_begin),
## (gt, "$qst_capture_conspirators_party_1", 0),
## (party_is_active, "$qst_capture_conspirators_party_1"),
## (val_add, ":num_active_parties", 1),
## (try_begin),
## (party_is_in_any_town, "$qst_capture_conspirators_party_1"),
## (remove_party, "$qst_capture_conspirators_party_1"),
## (else_try),
## (party_get_num_attached_parties, ":num_attachments", "$qst_capture_conspirators_party_1"),
## (gt, ":num_attachments", 0),
## (assign, ":leave_meeting", 0),
## (try_begin),
## (store_sub, ":required_attachments", "$qst_capture_conspirators_num_parties_to_spawn", 1),
## (eq, ":num_attachments", ":required_attachments"),
## (val_add, "$qst_capture_conspirators_leave_meeting_counter", 1),
## (ge, "$qst_capture_conspirators_leave_meeting_counter", 15),
## (assign, ":leave_meeting", 1),
## (try_end),
## (try_begin),
## (eq, "$qst_capture_conspirators_num_parties_to_spawn", "$qst_capture_conspirators_num_parties_spawned"),
## (store_distance_to_party_from_party, ":cur_distance", "p_main_party", "$qst_capture_conspirators_party_1"),
## (assign, ":min_distance", 3),
## (try_begin),
## (is_currently_night),
## (assign, ":min_distance", 2),
## (try_end),
## (lt, ":cur_distance", ":min_distance"),
## (assign, "$qst_capture_conspirators_leave_meeting_counter", 15),
## (assign, ":leave_meeting", 1),
## (try_end),
## (eq, ":leave_meeting", 1),
## (party_set_ai_behavior, "$qst_capture_conspirators_party_1", ai_bhvr_travel_to_point),
## (party_set_flags, "$qst_capture_conspirators_party_1", pf_default_behavior, 0),
## (party_get_position, pos1, "$qst_capture_conspirators_party_1"),
## (call_script, "script_map_get_random_position_around_position_within_range", 15, 17),
## (party_set_ai_target_position, "$qst_capture_conspirators_party_1", pos2),
## (try_begin),
## (gt, "$qst_capture_conspirators_party_2", 0),
## (party_detach, "$qst_capture_conspirators_party_2"),
## (party_set_ai_behavior, "$qst_capture_conspirators_party_2", ai_bhvr_travel_to_point),
## (party_set_flags, "$qst_capture_conspirators_party_2", pf_default_behavior, 0),
## (call_script, "script_map_get_random_position_around_position_within_range", 15, 17),
## (party_set_ai_target_position, "$qst_capture_conspirators_party_2", pos2),
## (try_end),
## (try_begin),
## (gt, "$qst_capture_conspirators_party_3", 0),
## (party_detach, "$qst_capture_conspirators_party_3"),
## (party_set_ai_behavior, "$qst_capture_conspirators_party_3", ai_bhvr_travel_to_point),
## (party_set_flags, "$qst_capture_conspirators_party_3", pf_default_behavior, 0),
## (call_script, "script_map_get_random_position_around_position_within_range", 15, 17),
## (party_set_ai_target_position, "$qst_capture_conspirators_party_3", pos2),
## (try_end),
## (try_begin),
## (gt, "$qst_capture_conspirators_party_4", 0),
## (party_detach, "$qst_capture_conspirators_party_4"),
## (party_set_ai_behavior, "$qst_capture_conspirators_party_4", ai_bhvr_travel_to_point),
## (party_set_flags, "$qst_capture_conspirators_party_4", pf_default_behavior, 0),
## (call_script, "script_map_get_random_position_around_position_within_range", 15, 17),
## (party_set_ai_target_position, "$qst_capture_conspirators_party_4", pos2),
## (try_end),
## (try_begin),
## (gt, "$qst_capture_conspirators_party_5", 0),
## (party_detach, "$qst_capture_conspirators_party_5"),
## (party_set_ai_behavior, "$qst_capture_conspirators_party_5", ai_bhvr_travel_to_point),
## (party_set_flags, "$qst_capture_conspirators_party_5", pf_default_behavior, 0),
## (call_script, "script_map_get_random_position_around_position_within_range", 15, 17),
## (party_set_ai_target_position, "$qst_capture_conspirators_party_5", pos2),
## (try_end),
## (try_begin),
## (gt, "$qst_capture_conspirators_party_6", 0),
## (party_detach, "$qst_capture_conspirators_party_6"),
## (party_set_ai_behavior, "$qst_capture_conspirators_party_6", ai_bhvr_travel_to_point),
## (party_set_flags, "$qst_capture_conspirators_party_6", pf_default_behavior, 0),
## (call_script, "script_map_get_random_position_around_position_within_range", 15, 17),
## (party_set_ai_target_position, "$qst_capture_conspirators_party_6", pos2),
## (try_end),
## (try_begin),
## (gt, "$qst_capture_conspirators_party_7", 0),
## (party_detach, "$qst_capture_conspirators_party_7"),
## (party_set_ai_behavior, "$qst_capture_conspirators_party_7", ai_bhvr_travel_to_point),
## (party_set_flags, "$qst_capture_conspirators_party_7", pf_default_behavior, 0),
## (call_script, "script_map_get_random_position_around_position_within_range", 15, 17),
## (party_set_ai_target_position, "$qst_capture_conspirators_party_7", pos2),
## (try_end),
## (try_end),
## (try_begin),
## (get_party_ai_behavior, ":ai_behavior", "$qst_capture_conspirators_party_1"),
## (eq, ":ai_behavior", ai_bhvr_travel_to_point),
## (party_get_ai_target_position, pos2, "$qst_capture_conspirators_party_1"),
## (party_get_position, pos1, "$qst_capture_conspirators_party_1"),
## (get_distance_between_positions, ":distance", pos2, pos1),
## (lt, ":distance", 200),
## (call_script, "script_get_closest_walled_center_of_faction", "$qst_capture_conspirators_party_1", ":faction_no"),#Can fail
## (ge, reg0, 0),
## (party_set_ai_object, "$qst_capture_conspirators_party_1", reg0),
## (party_set_ai_behavior, "$qst_capture_conspirators_party_1", ai_bhvr_travel_to_party),
## (party_set_flags, "$qst_capture_conspirators_party_1", pf_default_behavior, 0),
## (try_end),
## (try_end),
## (try_begin),
## (gt, "$qst_capture_conspirators_party_2", 0),
## (party_is_active, "$qst_capture_conspirators_party_2"),
## (val_add, ":num_active_parties", 1),
## (try_begin),
## (party_is_in_any_town, "$qst_capture_conspirators_party_2"),
## (try_begin),
## (neg|party_is_in_town, "$qst_capture_conspirators_party_2", "$qst_capture_conspirators_party_1"),
## (remove_party, "$qst_capture_conspirators_party_2"),
## (else_try),
## (get_party_ai_behavior, ":ai_behavior", "$qst_capture_conspirators_party_2"),
## (neq, ":ai_behavior", ai_bhvr_hold),
## (party_set_ai_behavior, "$qst_capture_conspirators_party_2", ai_bhvr_hold),
## (party_attach_to_party, "$qst_capture_conspirators_party_2", "$qst_capture_conspirators_party_1"),
## (party_set_flags, "$qst_capture_conspirators_party_2", pf_default_behavior, 0),
## (try_end),
## (try_end),
## (try_begin),
## (get_party_ai_behavior, ":ai_behavior", "$qst_capture_conspirators_party_2"),
## (eq, ":ai_behavior", ai_bhvr_travel_to_point),
## (party_get_ai_target_position, pos2, "$qst_capture_conspirators_party_2"),
## (party_get_position, pos1, "$qst_capture_conspirators_party_2"),
## (get_distance_between_positions, ":distance", pos2, pos1),
## (lt, ":distance", 200),
## (call_script, "script_get_closest_walled_center_of_faction", "$qst_capture_conspirators_party_2", ":faction_no"),#Can fail
## (ge, reg0, 0),
## (party_set_ai_object, "$qst_capture_conspirators_party_2", reg0),
## (party_set_ai_behavior, "$qst_capture_conspirators_party_2", ai_bhvr_travel_to_party),
## (party_set_flags, "$qst_capture_conspirators_party_2", pf_default_behavior, 0),
## (try_end),
## (try_end),
## (try_begin),
## (gt, "$qst_capture_conspirators_party_3", 0),
## (party_is_active, "$qst_capture_conspirators_party_3"),
## (val_add, ":num_active_parties", 1),
## (try_begin),
## (party_is_in_any_town, "$qst_capture_conspirators_party_3"),
## (try_begin),
## (neg|party_is_in_town, "$qst_capture_conspirators_party_3", "$qst_capture_conspirators_party_1"),
## (remove_party, "$qst_capture_conspirators_party_3"),
## (else_try),
## (get_party_ai_behavior, ":ai_behavior", "$qst_capture_conspirators_party_3"),
## (neq, ":ai_behavior", ai_bhvr_hold),
## (party_set_ai_behavior, "$qst_capture_conspirators_party_3", ai_bhvr_hold),
## (party_attach_to_party, "$qst_capture_conspirators_party_3", "$qst_capture_conspirators_party_1"),
## (party_set_flags, "$qst_capture_conspirators_party_3", pf_default_behavior, 0),
## (try_end),
## (try_end),
## (try_begin),
## (get_party_ai_behavior, ":ai_behavior", "$qst_capture_conspirators_party_3"),
## (eq, ":ai_behavior", ai_bhvr_travel_to_point),
## (party_get_ai_target_position, pos2, "$qst_capture_conspirators_party_3"),
## (party_get_position, pos1, "$qst_capture_conspirators_party_3"),
## (get_distance_between_positions, ":distance", pos2, pos1),
## (lt, ":distance", 200),
## (call_script, "script_get_closest_walled_center_of_faction", "$qst_capture_conspirators_party_3", ":faction_no"),#Can fail
## (ge, reg0, 0),
## (party_set_ai_object, "$qst_capture_conspirators_party_3", reg0),
## (party_set_ai_behavior, "$qst_capture_conspirators_party_3", ai_bhvr_travel_to_party),
## (party_set_flags, "$qst_capture_conspirators_party_3", pf_default_behavior, 0),
## (try_end),
## (try_end),
## (try_begin),
## (gt, "$qst_capture_conspirators_party_4", 0),
## (party_is_active, "$qst_capture_conspirators_party_4"),
## (val_add, ":num_active_parties", 1),
## (try_begin),
## (party_is_in_any_town, "$qst_capture_conspirators_party_4"),
## (try_begin),
## (neg|party_is_in_town, "$qst_capture_conspirators_party_4", "$qst_capture_conspirators_party_1"),
## (remove_party, "$qst_capture_conspirators_party_4"),
## (else_try),
## (get_party_ai_behavior, ":ai_behavior", "$qst_capture_conspirators_party_4"),
## (neq, ":ai_behavior", ai_bhvr_hold),
## (party_set_ai_behavior, "$qst_capture_conspirators_party_4", ai_bhvr_hold),
## (party_set_flags, "$qst_capture_conspirators_party_4", pf_default_behavior, 0),
## (party_attach_to_party, "$qst_capture_conspirators_party_4", "$qst_capture_conspirators_party_1"),
## (try_end),
## (try_end),
## (try_begin),
## (get_party_ai_behavior, ":ai_behavior", "$qst_capture_conspirators_party_4"),
## (eq, ":ai_behavior", ai_bhvr_travel_to_point),
## (party_get_ai_target_position, pos2, "$qst_capture_conspirators_party_4"),
## (party_get_position, pos1, "$qst_capture_conspirators_party_4"),
## (get_distance_between_positions, ":distance", pos2, pos1),
## (lt, ":distance", 200),
## (call_script, "script_get_closest_walled_center_of_faction", "$qst_capture_conspirators_party_4", ":faction_no"),#Can fail
## (ge, reg0, 0),
## (party_set_ai_object, "$qst_capture_conspirators_party_4", reg0),
## (party_set_ai_behavior, "$qst_capture_conspirators_party_4", ai_bhvr_travel_to_party),
## (party_set_flags, "$qst_capture_conspirators_party_4", pf_default_behavior, 0),
## (try_end),
## (try_end),
## (try_begin),
## (gt, "$qst_capture_conspirators_party_5", 0),
## (party_is_active, "$qst_capture_conspirators_party_5"),
## (val_add, ":num_active_parties", 1),
## (try_begin),
## (party_is_in_any_town, "$qst_capture_conspirators_party_5"),
## (try_begin),
## (neg|party_is_in_town, "$qst_capture_conspirators_party_5", "$qst_capture_conspirators_party_1"),
## (remove_party, "$qst_capture_conspirators_party_5"),
## (else_try),
## (get_party_ai_behavior, ":ai_behavior", "$qst_capture_conspirators_party_5"),
## (neq, ":ai_behavior", ai_bhvr_hold),
## (party_set_ai_behavior, "$qst_capture_conspirators_party_5", ai_bhvr_hold),
## (party_set_flags, "$qst_capture_conspirators_party_5", pf_default_behavior, 0),
## (party_attach_to_party, "$qst_capture_conspirators_party_5", "$qst_capture_conspirators_party_1"),
## (try_end),
## (try_end),
## (try_begin),
## (get_party_ai_behavior, ":ai_behavior", "$qst_capture_conspirators_party_5"),
## (eq, ":ai_behavior", ai_bhvr_travel_to_point),
## (party_get_ai_target_position, pos2, "$qst_capture_conspirators_party_5"),
## (party_get_position, pos1, "$qst_capture_conspirators_party_5"),
## (get_distance_between_positions, ":distance", pos2, pos1),
## (lt, ":distance", 200),
## (call_script, "script_get_closest_walled_center_of_faction", "$qst_capture_conspirators_party_5", ":faction_no"),#Can fail
## (ge, reg0, 0),
## (party_set_ai_object, "$qst_capture_conspirators_party_5", reg0),
## (party_set_ai_behavior, "$qst_capture_conspirators_party_5", ai_bhvr_travel_to_party),
## (party_set_flags, "$qst_capture_conspirators_party_5", pf_default_behavior, 0),
## (try_end),
## (try_end),
## (try_begin),
## (gt, "$qst_capture_conspirators_party_6", 0),
## (party_is_active, "$qst_capture_conspirators_party_6"),
## (val_add, ":num_active_parties", 1),
## (try_begin),
## (party_is_in_any_town, "$qst_capture_conspirators_party_6"),
## (try_begin),
## (neg|party_is_in_town, "$qst_capture_conspirators_party_6", "$qst_capture_conspirators_party_1"),
## (remove_party, "$qst_capture_conspirators_party_6"),
## (else_try),
## (get_party_ai_behavior, ":ai_behavior", "$qst_capture_conspirators_party_6"),
## (neq, ":ai_behavior", ai_bhvr_hold),
## (party_set_ai_behavior, "$qst_capture_conspirators_party_6", ai_bhvr_hold),
## (party_set_flags, "$qst_capture_conspirators_party_6", pf_default_behavior, 0),
## (party_attach_to_party, "$qst_capture_conspirators_party_6", "$qst_capture_conspirators_party_1"),
## (try_end),
## (try_end),
## (try_begin),
## (get_party_ai_behavior, ":ai_behavior", "$qst_capture_conspirators_party_6"),
## (eq, ":ai_behavior", ai_bhvr_travel_to_point),
## (party_get_ai_target_position, pos2, "$qst_capture_conspirators_party_6"),
## (party_get_position, pos1, "$qst_capture_conspirators_party_6"),
## (get_distance_between_positions, ":distance", pos2, pos1),
## (lt, ":distance", 200),
## (call_script, "script_get_closest_walled_center_of_faction", "$qst_capture_conspirators_party_6", ":faction_no"),#Can fail
## (ge, reg0, 0),
## (party_set_ai_object, "$qst_capture_conspirators_party_6", reg0),
## (party_set_ai_behavior, "$qst_capture_conspirators_party_6", ai_bhvr_travel_to_party),
## (party_set_flags, "$qst_capture_conspirators_party_6", pf_default_behavior, 0),
## (try_end),
## (try_end),
## (try_begin),
## (gt, "$qst_capture_conspirators_party_7", 0),
## (party_is_active, "$qst_capture_conspirators_party_7"),
## (val_add, ":num_active_parties", 1),
## (try_begin),
## (party_is_in_any_town, "$qst_capture_conspirators_party_7"),
## (try_begin),
## (neg|party_is_in_town, "$qst_capture_conspirators_party_7", "$qst_capture_conspirators_party_1"),
## (remove_party, "$qst_capture_conspirators_party_7"),
## (else_try),
## (get_party_ai_behavior, ":ai_behavior", "$qst_capture_conspirators_party_7"),
## (neq, ":ai_behavior", ai_bhvr_hold),
## (party_set_ai_behavior, "$qst_capture_conspirators_party_7", ai_bhvr_hold),
## (party_set_flags, "$qst_capture_conspirators_party_7", pf_default_behavior, 0),
## (party_attach_to_party, "$qst_capture_conspirators_party_7", "$qst_capture_conspirators_party_1"),
## (try_end),
## (try_end),
## (try_begin),
## (get_party_ai_behavior, ":ai_behavior", "$qst_capture_conspirators_party_7"),
## (eq, ":ai_behavior", ai_bhvr_travel_to_point),
## (party_get_ai_target_position, pos2, "$qst_capture_conspirators_party_7"),
## (party_get_position, pos1, "$qst_capture_conspirators_party_7"),
## (get_distance_between_positions, ":distance", pos2, pos1),
## (lt, ":distance", 200),
## (call_script, "script_get_closest_walled_center_of_faction", "$qst_capture_conspirators_party_7", ":faction_no"),#Can fail
## (ge, reg0, 0),
## (party_set_ai_object, "$qst_capture_conspirators_party_7", reg0),
## (party_set_ai_behavior, "$qst_capture_conspirators_party_7", ai_bhvr_travel_to_party),
## (party_set_flags, "$qst_capture_conspirators_party_7", pf_default_behavior, 0),
## (try_end),
## (try_end),
##
## (eq, ":num_active_parties", 0),
## (party_count_prisoners_of_type, ":count_captured_conspirators", "p_main_party", "trp_conspirator"),
## (party_count_prisoners_of_type, ":count_captured_conspirator_leaders", "p_main_party", "trp_conspirator_leader"),
## (val_add, ":count_captured_conspirators", ":count_captured_conspirator_leaders"),
## (try_begin),
## (store_div, ":limit", "$qst_capture_conspirators_num_troops_to_capture", 2),
## (gt, ":count_captured_conspirators", ":limit"),
## (call_script, "script_succeed_quest", "qst_capture_conspirators"),
## (else_try),
## (call_script, "script_fail_quest", "qst_capture_conspirators"),
## (try_end),
## ],
## []
## ),
# Follow Spy quest
(0.5, 0.0, 0.0,
[
(check_quest_active, "qst_follow_spy"),
(eq, "$qst_follow_spy_no_active_parties", 0),
(quest_get_slot, ":quest_giver_center", "qst_follow_spy", slot_quest_giver_center),
(quest_get_slot, ":quest_object_center", "qst_follow_spy", slot_quest_object_center),
(assign, ":abort_meeting", 0),
(try_begin),
(this_or_next|ge, "$qst_follow_spy_run_away", 2),
(this_or_next|neg|party_is_active, "$qst_follow_spy_spy_party"),
(neg|party_is_active, "$qst_follow_spy_spy_partners_party"),
(else_try),
(eq, "$qst_follow_spy_meeting_state", 0),
(store_distance_to_party_from_party, ":cur_distance", "p_main_party", "$qst_follow_spy_spy_party"),
(try_begin),
(assign, ":min_distance", 3),
(try_begin),
(is_currently_night),
(assign, ":min_distance", 1),
(try_end),
(le, ":cur_distance", ":min_distance"),
(store_distance_to_party_from_party, ":player_distance_to_quest_giver_center", "p_main_party", ":quest_giver_center"),
(gt, ":player_distance_to_quest_giver_center", 1),
(val_add, "$qst_follow_spy_run_away", 1),
(try_begin),
(eq, "$qst_follow_spy_run_away", 2),
(assign, ":abort_meeting", 1),
(display_message, "str_qst_follow_spy_noticed_you"),
(try_end),
(else_try),
(store_distance_to_party_from_party, ":cur_distance", "$qst_follow_spy_spy_partners_party", "$qst_follow_spy_spy_party"),
(le, ":cur_distance", 1),
(party_attach_to_party, "$qst_follow_spy_spy_party", "$qst_follow_spy_spy_partners_party"),
(assign, "$qst_follow_spy_meeting_state", 1),
(assign, "$qst_follow_spy_meeting_counter", 0),
(try_end),
(else_try),
(eq, "$qst_follow_spy_meeting_state", 1),
(store_distance_to_party_from_party, ":cur_distance", "p_main_party", "$qst_follow_spy_spy_partners_party"),
(try_begin),
(le, ":cur_distance", 1),
(party_detach, "$qst_follow_spy_spy_party"),
(val_add, "$qst_follow_spy_run_away", 1),
(try_begin),
(eq, "$qst_follow_spy_run_away", 2),
(assign, ":abort_meeting", 1),
(display_message, "str_qst_follow_spy_noticed_you"),
(try_end),
(else_try),
(val_add, "$qst_follow_spy_meeting_counter", 1),
(gt, "$qst_follow_spy_meeting_counter", 4),
(party_detach, "$qst_follow_spy_spy_party"),
(assign, ":abort_meeting", 1),
(assign, "$qst_follow_spy_meeting_state", 2),
(try_end),
(try_end),
(try_begin),
(eq, ":abort_meeting", 1),
(party_set_ai_object, "$qst_follow_spy_spy_party", ":quest_giver_center"),
(party_set_ai_object, "$qst_follow_spy_spy_partners_party", ":quest_object_center"),
(party_set_ai_behavior, "$qst_follow_spy_spy_party", ai_bhvr_travel_to_party),
(party_set_ai_behavior, "$qst_follow_spy_spy_partners_party", ai_bhvr_travel_to_party),
(party_set_flags, "$qst_follow_spy_spy_party", pf_default_behavior, 0),
(party_set_flags, "$qst_follow_spy_spy_partners_party", pf_default_behavior, 0),
(try_end),
(assign, ":num_active", 0),
(try_begin),
(party_is_active, "$qst_follow_spy_spy_party"),
(val_add, ":num_active", 1),
(party_is_in_town, "$qst_follow_spy_spy_party", ":quest_giver_center"),
(remove_party, "$qst_follow_spy_spy_party"),
(assign, "$qst_follow_spy_spy_back_in_town", 1),
(val_sub, ":num_active", 1),
(try_end),
(try_begin),
(party_is_active, "$qst_follow_spy_spy_partners_party"),
(val_add, ":num_active", 1),
(party_is_in_town, "$qst_follow_spy_spy_partners_party", ":quest_object_center"),
(remove_party, | |
<reponame>YBZh/AuxSelfTrain
import torch
import os
import math
import torch.nn as nn
import time
import numpy as np
from .base_solver import BaseSolver
import torch.nn.functional as F
from utils.utils import AverageMeter, to_cuda, accuracy, weight_ema, to_onehot, EMA_fixmatch, LabelGuessor, fix_bn, \
release_bn, get_labels_from_classifier_prediction, get_labels_from_Sphericalkmeans, get_labels_from_kmeans, get_labels_from_lp
import ipdb
from torch.distributions import Categorical
from sklearn import svm
import ot
import ot.plot
import random
def accuracy_for_each_class(output, target, total_vector, correct_vector):
"""Computes the precision for each class"""
batch_size = target.size(0)
_, pred = output.topk(1, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1)).float().cpu().squeeze()
for i in range(batch_size):
total_vector[target[i]] += 1
correct_vector[torch.LongTensor([target[i]])] += correct[i]
return total_vector, correct_vector
def proxy_a_distance(source_X, target_X, verbose=False):
"""
Compute the Proxy-A-Distance of a source/target representation
"""
nb_source = np.shape(source_X)[0]
nb_target = np.shape(target_X)[0]
if verbose:
print('PAD on', (nb_source, nb_target), 'examples')
C_list = np.logspace(-5, -1, 5)
half_source, half_target = int(nb_source/2), int(nb_target/2)
train_X = np.vstack((source_X[0:half_source, :], target_X[0:half_target, :]))
train_Y = np.hstack((np.zeros(half_source, dtype=int), np.ones(half_target, dtype=int)))
test_X = np.vstack((source_X[half_source:, :], target_X[half_target:, :]))
test_Y = np.hstack((np.zeros(nb_source - half_source, dtype=int), np.ones(nb_target - half_target, dtype=int)))
best_risk = 1.0
for C in C_list:
clf = svm.SVC(C=C, kernel='linear', verbose=False)
clf.fit(train_X, train_Y)
train_risk = np.mean(clf.predict(train_X) != train_Y)
test_risk = np.mean(clf.predict(test_X) != test_Y)
if verbose:
print('[ PAD C = %f ] train risk: %f test risk: %f' % (C, train_risk, test_risk))
if test_risk > .5:
test_risk = 1. - test_risk
best_risk = min(best_risk, test_risk)
return 2 * (1. - 2 * best_risk)
def wasserstein_infinity_calculation(s_cate_i, t_cate_i, power=10):
### set power as 10 to approximate the wasserstein_infinity
nb_source = np.shape(s_cate_i)[0]
nb_target = np.shape(t_cate_i)[0]
if nb_source == 0 or nb_target == 0:
return 0
else:
num_min = min(nb_target, nb_source)
s_cate_i = s_cate_i[: num_min]
t_cate_i = t_cate_i[: num_min]
s_cate_i = s_cate_i.reshape(num_min, 1, -1)
t_cate_i = t_cate_i.reshape(num_min, -1)
M = ((s_cate_i - t_cate_i) ** 2).sum(2) ** 0.5 #### the pair wise distance
M = M ** power
a = np.ones(num_min)
b = np.ones(num_min)
ot_cost = ot.emd2(a, b, M, numItermax=2000000)
ot_cost = ot_cost ** (1/power)
return ot_cost
class Solver(BaseSolver):
def __init__(self, G, F, dataloaders, args, **kwargs):
super(Solver, self).__init__(G, F, dataloaders, args, **kwargs)
self.ema = EMA_fixmatch(G, F, args.ema_decay)
self.lb_guessor = LabelGuessor(thresh=args.thr)
self.CELoss = nn.CrossEntropyLoss(reduction='none')
from data.prepare_data_da import generate_dataloader_pseudo_label as Dataloader
dataloaders_pseudo = Dataloader(args)
self.init_data_pseudo(dataloaders_pseudo)
self.selected_index = None
self.selected_index_source = None
if args.resume != '':
resume_dict = torch.load(args.resume)
self.G.load_state_dict(resume_dict['G_state_dict'])
self.F.load_state_dict(resume_dict['F_state_dict'])
self.best_prec1 = resume_dict['best_prec1']
self.iter = resume_dict['iter']
def init_data_pseudo(self, dataloaders):
self.pseudo_data = {key: dict() for key in dataloaders}
for key in self.pseudo_data.keys():
if key not in dataloaders:
continue
cur_dataloader = dataloaders[key]
self.pseudo_data[key]['loader'] = cur_dataloader
def pre_train_classifier(self):
# if not self.args.pre_trained_G:
# print('fix the running mean and var for BN')
# self.G.apply(fix_bn)
# self.F.apply(fix_bn)
if self.args.fixbn:
print('fix the running mean and var for BN')
self.G.apply(fix_bn)
self.F.apply(fix_bn)
initial_lr = self.args.base_lr
for i in range(self.args.pre_epoch):
new_lr = initial_lr #/ (10 ** i)
print('new lr for classifier training is: %3f' % (new_lr))
self.G.train()
self.F.train()
self.train_data['source']['iterator'] = iter(self.train_data['source']['loader'])
self.train_data['target']['iterator'] = iter(self.train_data['target']['loader'])
self.update_lr(given_lr=new_lr)
self.iters_per_epoch = len(self.train_data['source']['loader'])
print('iters in each epoch is: %d' % (self.iters_per_epoch))
iters_counter_within_epoch = 0
data_time = AverageMeter()
batch_time = AverageMeter()
losses_all = AverageMeter()
losses_s = AverageMeter()
losses_t = AverageMeter()
stop = False
end = time.time()
while not stop:
source_data, _, source_gt, _, _ = self.get_samples('source')
# target_data_u, _, _, _, _ = self.get_samples('target')
########################
source_data = to_cuda(source_data)
source_gt = to_cuda(source_gt)
data_time.update(time.time() - end)
logit = self.F(self.G(source_data))
# feature_not_use = self.G(target_data) ### update the bn with target data.
loss = self.CELoss(logit, source_gt).mean()
self.optimizer_G.zero_grad()
self.optimizer_F.zero_grad()
loss.backward()
if self.args.pre_trained_G:
self.optimizer_G.step()
self.optimizer_F.step()
self.ema.update_params()
losses_all.update(loss.item(), source_data.size(0))
batch_time.update(time.time() - end)
end = time.time()
self.iters += 1
iters_counter_within_epoch += 1
if self.iters % 10 == 0:
print(
" Pre-Train:epoch: %d:[%d/%d], Tdata: %3f, Tbatch: %3f, LossL: %3f, LossU: %3f, LossAll:%3f" % \
(i, self.iters, self.args.max_iters, data_time.avg, batch_time.avg, losses_s.avg,
losses_t.avg, losses_all.avg))
if iters_counter_within_epoch >= self.iters_per_epoch:
log = open(os.path.join(self.args.save_dir, 'log.txt'), 'a')
log.write("\n")
log.write(
" Pre-Train:epoch: %d:[%d/%d], Tdata: %3f, Tbatch: %3f, LossL: %3f, LossU: %3f, LossAll:%3f" % \
(i, self.iters, self.args.max_iters, data_time.avg, batch_time.avg, losses_s.avg,
losses_t.avg, losses_all.avg))
log.close()
stop = True
self.ema.update_buffer()
acc, acc_val = self.test()
log = open(os.path.join(self.args.save_dir, 'log.txt'), 'a')
log.write(" Best acc by far:%3f" % (acc))
log.close()
# if not self.args.pre_trained_G:
# self.G.apply(release_bn)
# self.F.apply(release_bn)
def Get_pseudo_labels_with_classifiers_consistency(self):
# self.G.apply(fix_bn)
# self.F.apply(fix_bn)
if self.args.feat_type_pseudo == 'train':
self.G.train()
self.F.train()
elif self.args.feat_type_pseudo == 'eval':
self.G.eval()
self.F.eval()
else:
raise NotImplementedError
################## prepare all features and other ###############################################
target_u_feature_list = []
target_u_prediction_list = []
target_u_index_list = []
target_u_label_list = []
target_u_path_list = []
print('prepare feature of target unlabeled data')
for i, (input, _, target_for_visual, index, path) in enumerate(self.pseudo_data['target']['loader']):
if i % 100 == 0:
print(i)
input = to_cuda(input)
target_for_visual = to_cuda(target_for_visual)
if self.args.feat_type_pseudo == 'train':
org_state_G = {
k: v.clone().detach()
for k, v in self.G.state_dict().items()
}
org_state_F = {
k: v.clone().detach()
for k, v in self.F.state_dict().items()
}
with torch.no_grad():
target_u_feature_iter = self.G(input)
target_u_prediction_itre = self.F(target_u_feature_iter)
if self.args.feat_type_pseudo == 'train':
self.G.load_state_dict(org_state_G)
self.F.load_state_dict(org_state_F)
target_u_feature_list.append(target_u_feature_iter)
target_u_prediction_list.append(target_u_prediction_itre)
target_u_index_list.append(index)
# ipdb.set_trace()
# target_u_path_list+=path
target_u_label_list.append(target_for_visual)
target_u_feature_matrix = torch.cat(target_u_feature_list, dim=0)
target_u_prediction_matrix = torch.cat(target_u_prediction_list, dim=0)
target_u_index = torch.cat(target_u_index_list, dim=0)
target_u_gt_label_for_visual = torch.cat(target_u_label_list)
source_feature_list = []
source_label_list = []
source_index_list = []
source_cate_feature_list = []
for i in range(self.args.num_class):
source_cate_feature_list.append([])
print('prepare features of source data')
for i, (input, _, target, index, path) in enumerate(self.pseudo_data['source']['loader']):
input = to_cuda(input)
if self.args.feat_type_pseudo == 'train':
org_state_G = {
k: v.clone().detach()
for k, v in self.G.state_dict().items()
}
org_state_F = {
k: v.clone().detach()
for k, v in self.F.state_dict().items()
}
with torch.no_grad():
source_feature_iter = self.G(input)
if self.args.feat_type_pseudo == 'train':
self.G.load_state_dict(org_state_G)
self.F.load_state_dict(org_state_F)
source_feature_list.append(source_feature_iter)
source_label_list.append(target)
source_index_list.append(index)
for j in range(input.size(0)):
img_label = target[j]
source_cate_feature_list[img_label].append(source_feature_iter[j].view(1, source_feature_iter.size(1)))
source_feature_matrix = torch.cat(source_feature_list, dim=0)
hard_label_s = torch.cat(source_label_list, dim=0)
source_index = torch.cat(source_index_list, dim=0)
soft_label_s = to_onehot(hard_label_s, self.args.num_class)
############################################################################################
################# get the prediction with the discriminative clustering#######
soft_label_fc, soft_label_uniform_fc, hard_label_fc, hard_label_uniform_fc, acc_fc, acc_uniform_fc = get_labels_from_classifier_prediction(target_u_prediction_matrix, self.args.T, target_u_gt_label_for_visual)
if self.args.no_uniform:
soft_label_uniform_fc = soft_label_fc
hard_label_uniform_fc = hard_label_fc
acc_uniform_fc = acc_fc
log = open(os.path.join(self.args.save_dir, 'log.txt'), 'a')
log.write("\n")
log.write('FC Prediction without and with uniform prior are: %3f and %3f' % (acc_fc, acc_uniform_fc))
log.close()
scores_fc, hard_label_fc = torch.max(soft_label_uniform_fc, dim=1)
if self.args.filter_type == 'cluster' or self.args.filter_type == 'cluster_lp' or self.args.filter_type == 'all':
if self.args.pseudo_label_generator == 'spheticalkmeans':
for i in range(self.args.num_class):
source_cate_feature_list[i] = torch.cat(source_cate_feature_list[i], dim=0)
source_cate_feature_list[i] = source_cate_feature_list[i].mean(0)
source_cate_feature_list[i] = F.normalize(source_cate_feature_list[i], dim=0, p=2)
source_cate_feature_list[i] = source_cate_feature_list[i].cpu().numpy()
source_cate_feature_list = np.array(source_cate_feature_list)
target_u_feature_matrix_norm = F.normalize(target_u_feature_matrix, dim=1, p=2)
soft_label_kmean, soft_label_uniform_kmean, hard_label_kmean, hard_label_uniform_kmean, acc_kmean, acc_uniform_kmean = \
get_labels_from_Sphericalkmeans(initial_centers_array=source_cate_feature_list, target_u_feature=target_u_feature_matrix_norm.cpu(),
num_class=self.args.num_class, gt_label=target_u_gt_label_for_visual.cpu(), T=0.05, max_iter=100, target_l_feature=None)
if self.args.no_uniform:
soft_label_uniform_kmean = soft_label_kmean
hard_label_uniform_kmean = hard_label_kmean
acc_uniform_kmean = acc_kmean
log = open(os.path.join(self.args.save_dir, 'log.txt'), 'a')
log.write("\n")
log.write('sphetical Kmeans Prediction without and with uniform prior are: %3f and %3f' % (acc_kmean, acc_uniform_kmean))
log.close()
# scores, hard_label_fc = torch.max(soft_label_uniform_kmean, dim=1)
# idx = target_u_index[scores > self.args.thr]
# self.all_hard_pseudo_label = hard_label_uniform_kmean
# self.all_soft_pseudo_label = soft_label_uniform_kmean
elif self.args.pseudo_label_generator == 'kmeans':
for i in range(self.args.num_class):
## only one option here, adopt the source data to initial centers
source_cate_feature_list[i] = torch.cat(source_cate_feature_list[i], dim=0)
source_cate_feature_list[i] = source_cate_feature_list[i].mean(0)
source_cate_feature_list[i] = source_cate_feature_list[i].cpu().numpy()
target_l_cate_feature_list = np.array(source_cate_feature_list)
# target_u_feature_matrix_norm = F.normalize(target_u_feature_matrix, dim=1, p=2)
# target_l_feature_matrix_norm = F.normalize(target_l_feature_matrix, dim=1, p=2)
soft_label_kmean, soft_label_uniform_kmean, hard_label_kmean, hard_label_uniform_kmean, acc_kmean, acc_uniform_kmean = \
get_labels_from_kmeans(initial_centers_array=target_l_cate_feature_list, target_u_feature=target_u_feature_matrix.cpu(),
num_class=self.args.num_class, gt_label=target_u_gt_label_for_visual.cpu(), T=0.05, max_iter=100, target_l_feature=None)
if self.args.no_uniform:
soft_label_uniform_kmean = soft_label_kmean
hard_label_uniform_kmean = hard_label_kmean
acc_uniform_kmean = acc_kmean
log = open(os.path.join(self.args.save_dir, 'log.txt'), 'a')
log.write("\n")
log.write('Kmeans Prediction without and with uniform prior are: %3f and %3f' % (acc_kmean, acc_uniform_kmean))
log.close()
# scores, hard_label_fc = torch.max(soft_label_uniform_kmean, dim=1)
# idx = target_u_index[scores > self.args.thr]
# self.all_hard_pseudo_label = hard_label_uniform_kmean
# self.all_soft_pseudo_label = soft_label_uniform_kmean
if self.args.filter_type == 'lp' or self.args.filter_type == 'fc_lp' or self.args.filter_type == 'cluster_lp' or self.args.filter_type == 'all':
if self.args.lp_labeled == 's':
labeled_feature_matrix = source_feature_matrix
labeled_onehot = soft_label_s
else:
raise NotImplementedError
soft_label_lp, soft_label_uniform_lp, hard_label_lp, hard_label_uniform_lp, acc_lp, acc_uniform_lp = \
get_labels_from_lp(labeled_features=labeled_feature_matrix.cpu(), labeled_onehot_gt=labeled_onehot.cpu(),
unlabeled_features = target_u_feature_matrix.cpu(), gt_label=target_u_gt_label_for_visual.cpu(),
num_class=self.args.num_class, dis=self.args.lp_dis, solver=self.args.lp_solver, graphk = self.args.lp_graphk, alpha=self.args.lp_alpha)
if self.args.no_uniform:
soft_label_uniform_lp = soft_label_lp
hard_label_uniform_lp = hard_label_lp
acc_uniform_lp = acc_lp
log = open(os.path.join(self.args.save_dir, 'log.txt'), 'a')
log.write("\n")
log.write('LP Prediction without and with uniform prior are: %3f and %3f' % (acc_lp, acc_uniform_lp))
log.close()
# scores, hard_label_fc = torch.max(soft_label_uniform_lp, dim=1)
###################################### norm all scores,
#idx = target_u_index[scores > self.args.thr]
soft_label_uniform_fc = soft_label_uniform_fc.cpu()
if not self.args.noprogressive:
percent = self.iters / (self.args.max_iters * 0.9) ### the last 0.1 * epoch train with all labeled data.
if percent > 1.0:
percent = 1.0
num_unl = | |
import numpy as np
import pandas as pd
from datetime import datetime
from .pandas_functions import unique_values_from_column
from bokeh.models import ColumnDataSource, Select, DataTable, TableColumn, DateFormatter, NumberFormatter, Circle, Label
from bokeh.models import NumeralTickFormatter
from bokeh.layouts import column, row
from bokeh.plotting import figure
from bokeh.models.widgets import Div
class Category(object):
"""Category Object that provides methods to generate gridplot used in Category View in flask_app.
Object expects:
- appropriate column names for the dataframe that will be provided to other methods;
- month_format string, which represents in what string format date in monthyear column was saved;
- color_map ColorMap object, which exposes attributes for specific colors.
Main methods are:
- gridplot() : workhorse of the Object, creates elements gridplot, creates callbacks and updates
appropriate elements of the grid. Returns grid that can be served in the Bokeh Server.
- initalize_grid_elements() : creates all elements and data sources of gridplot and assigns them
appropriately to self.grid_elem_dict and self.grid_source_dict
- update_grid_on_chosen_category_change() and update_grid_on_month_selection_change() : functions that are
called when user changes selected category or selected months on a gridplot; they are responsible for
appropriate updating gridplot elements.
Attributes of the instance Object are described as single-line comments in __init__() method;
Attributes of the class are HTML templates used in Div Elements creation; they are described in corresponding
functions that update those Divs.
"""
category_title = "{category}"
total_from_category = "<span>{total_from_category:.2f}</span> - total Money spent"
category_fraction = "It makes <span>{category_fraction:.2%}</span> of all Expenses"
total_products_from_category = "<span>{total_products_from_category:.0f}</span> - number of Products bought"
category_products_fraction = "This is <span>{category_products_fraction:.2%}</span> of all Products"
statistics_table = """<table>
<caption>Details</caption>
<thead>
<tr>
<th scope="col"></th>
<th scope="col"></th>
</tr>
</thead>
<tbody>
<tr>
<th scope="row">Last Month</th>
<td>{last:.2f}</td>
</tr>
<tr>
<th scope="row">Average</th>
<td>{mean:.2f}</td>
</tr>
<tr>
<th scope="row">Median</th>
<td>{median:.2f}</td>
</tr>
<tr>
<th scope="row">Minimum</th>
<td>{min:.2f}</td>
</tr>
<tr>
<th scope="row">Maximum</th>
<td>{max:.2f}</td>
</tr>
<tr>
<th scope="row">Standard Deviation</th>
<td>{std:.2f}</td>
</tr>
</tbody>
<tfoot>
<tr>
<th scope="row"></th>
<td>[{curr}]</td>
</tr>
</tfoot>
</table>"""
line_plot_tooltip = """
<div class="hover_tooltip">
<div>
<span>Month: </span>
<span>@x</span>
</div>
<div>
<span>Value: </span>
<span>@y{0.00}</y>
</div>
</div>
"""
interaction_message = "Select MonthPoints on the Plot to interact with the Dashboard"
def __init__(self, category_colname, monthyear_colname, price_colname, product_colname,
date_colname, currency_colname, shop_colname, month_format, color_mapping):
# Column Names
self.category = category_colname
self.monthyear = monthyear_colname
self.price = price_colname
self.product = product_colname
self.date = date_colname
self.currency = currency_colname
self.shop = shop_colname
# MonthYear Formatting
self.monthyear_format = month_format # formatting of date in MonthYear column
# ColorMap
self.color_map = color_mapping # ColorMap object exposing attributes with specific colors
# DataFrames
self.original_df = None # original dataframe passed to the gridplot function
self.chosen_category_df = None # original dataframe filtered only to the chosen category
self.chosen_months_and_category_df = None # original dataframe filtered only to the chosen category and months
# State Variables
self.categories = None
self.months = None
self.chosen_category = None
self.chosen_months = None
# Identifiers for Grid Elements and DataSources
self.g_category_title = "Category Title"
self.g_dropdown = "Dropdown"
self.g_statistics_table = "Statistics Table"
self.g_total_from_category = "Total Category"
self.g_category_fraction = "Category Fraction"
self.g_total_products_from_category = "Total Products From Category"
self.g_category_products_fraction = "Category Products Fraction"
self.g_line_plot = "Line Plot"
self.g_product_histogram = "Product Histogram"
self.g_transactions = "Transactions"
# Dicts of Elements and DataSources
self.grid_elem_dict = None
self.grid_source_dict = None
def gridplot(self, dataframe, current_categories):
"""Main function of Category Object. Creates Gridplot with appropriate Visualizations and Elements and
returns it.
Accepts
- dataframe that should be a Dataframe representing Expenses;
- current_categories list with categories which user will interact with.
The function does several things:
- initializes the gridplot,
- chooses the first category that will be displayed (first item of all categories
- sorted in alphabetical order),
- updates the elements of the gridplot to reflect change to this category
- sets callback on chosen grid elements, so that changes made by the user are then reflected
to other grid elements
- returns created grid as a bokeh layout, that can be later used in a Bokeh Server
Function extracts all categories present in the dataframe and loads them into the Select Element.
However, this behavior might be changed in the future when creation of category list is delegated
to another object.
"""
self.original_df = dataframe
self.chosen_category_df = dataframe
self.chosen_months_and_category_df = dataframe
# TODO: categories will be extracted depending on settings
self.categories = current_categories
self.months = unique_values_from_column(dataframe, self.monthyear)
self.chosen_months = self.months # during initialization, all months are selected
self.__update_chosen_category(self.categories[0])
self.initialize_grid_elements()
self.update_grid_on_chosen_category_change()
# Setting up the Callbacks
def dropdown_callback(attr, old, new):
if new != old:
self.__update_chosen_category(new)
self.update_grid_on_chosen_category_change()
self.grid_elem_dict[self.g_dropdown].on_change("value", dropdown_callback)
def selection_callback(attr, old, new):
new_indices = set(new)
old_indices = set(old)
if new_indices != old_indices:
self.update_grid_on_month_selection_change(new_indices)
self.grid_source_dict[self.g_line_plot].selected.on_change("indices", selection_callback)
# Gridplot
output = column(
row(self.grid_elem_dict[self.g_category_title], css_classes=["first_row"]),
row(
column(
self.grid_elem_dict[self.g_total_from_category],
self.grid_elem_dict[self.g_category_fraction],
self.grid_elem_dict[self.g_total_products_from_category],
self.grid_elem_dict[self.g_category_products_fraction],
css_classes=["info_column"]),
column(self.grid_elem_dict[self.g_statistics_table]),
column(
self.grid_elem_dict[self.g_dropdown],
self.grid_elem_dict[self.g_line_plot]),
css_classes=["second_row"]),
row(
self.grid_elem_dict[self.g_product_histogram],
self.grid_elem_dict[self.g_transactions],
css_classes=["third_row"]),
sizing_mode="stretch_width"
)
return output
def initialize_grid_elements(self):
"""Initializes all Elements and DataSources of the Gridplot.
Function creates several Elements of a Gridplot:
- Category Title Div
- "Statistics" Table Div
- 4 "Headline" Divs
- Category Dropdown Select Widget
- Line Plot
- 2 DataTables
Additionally, Separate DataSources (ColumnDataSources) are created for:
- Line Plot
- 2 DataTables
This is made as updates to some Elements are based only on property changes of those Elements
(e.g. Div.text), whereas other Elements are automatically changed when their ColumnDataSource data is
changed (e.g. Line Plot).
In the end, all elements go into one dictionary, whereas DataSources go into other dictionary which are
then placed into .grid_elem_dict and .grid_source_dict attributes, respectively.
Purposes of Elements are described either in the functions that create them or in the functions that
update the content of the Element.
"""
elem_dict = {}
source_dict = {}
# Category Title and Statistics Table
elem_dict[self.g_category_title] = Div(text="", css_classes=["category_title"], )
elem_dict[self.g_statistics_table] = Div(text="", css_classes=["statistics_table"], )
# 4 Headline Divs
info_element_class = "info_element"
elem_dict[self.g_total_from_category] = Div(text="", css_classes=[info_element_class])
elem_dict[self.g_category_fraction] = Div(text="", css_classes=[info_element_class])
elem_dict[self.g_total_products_from_category] = Div(text="", css_classes=[info_element_class])
elem_dict[self.g_category_products_fraction] = Div(text="", css_classes=[info_element_class])
# Line Plot
source_dict[self.g_line_plot] = self.__create_line_plot_source()
elem_dict[self.g_line_plot] = self.__create_line_plot(source_dict[self.g_line_plot])
# DataTables
source_dict[self.g_product_histogram] = self.__create_product_histogram_source()
elem_dict[self.g_product_histogram] = self.__create_product_histogram_table(
source_dict[self.g_product_histogram])
source_dict[self.g_transactions] = self.__create_transactions_source()
elem_dict[self.g_transactions] = self.__create_transactions_table(source_dict[self.g_transactions])
# Select Dropdown
elem_dict[self.g_dropdown] = Select(value=self.chosen_category, options=self.categories,
css_classes=["category_dropdown"])
self.grid_elem_dict = elem_dict
self.grid_source_dict = source_dict
def update_grid_on_chosen_category_change(self):
"""Helper function that calls specific updates for specified elements of the grid."""
self.__update_chosen_category_dataframe()
self.__update_chosen_months_and_category_dataframe()
self.__update_category_title()
self.__update_statistics_table()
self.__update_total_from_category()
self.__update_category_fraction()
self.__update_total_products_from_category()
self.__update_category_products_fraction()
self.__update_line_plot()
self.__update_product_histogram_table()
self.__update_transactions_table()
def update_grid_on_month_selection_change(self, new_indices):
"""Helper function that calls specific updates for specified elements of the grid."""
self.__update_chosen_months(new_indices)
self.__update_chosen_months_and_category_dataframe()
self.__update_transactions_table()
self.__update_product_histogram_table()
def change_category_column(self, new_col):
"""Changes .category attribute to col argument."""
self.category = new_col
# ========== Creation of Grid Elements ========== #
def __create_line_plot_source(self):
"""Creation of Line Plot DataSource for Gridplot.
ColumnDataSource consist of two keys:
- x : contains months for the X-axis, formatted in "%b-%y" format (e.g. Jan-19)
- y : temp values of the same length as x; they will be replaced when the update function for the
line plot is called
Returns created ColumnDataSource.
"""
temp_values = [1] * len(self.months) # done to ensure that the shape of y values is the same as x
formatted_months = [datetime.strftime(datetime.strptime(month, self.monthyear_format), "%b-%y")
for month in self.months]
source = ColumnDataSource(
data={
"x": formatted_months,
"y": temp_values
}
)
return source
def __create_line_plot(self, cds):
"""Creates Line Plot showing trends of different amounts of money spent on chosen category.
Function accept argument:
- cds
which should be ColumnDataSource with "x" and "y" keys and corresponding collections of values associated
with them. Cds will be then used as a source for a created Plot.
Created figure will have bokeh toolbar with only "box_select" and "hover_tool" options enabled;
hover tooltip is defined as HTML in .line_plot_tooltip property.
Figure will plot two models: Line and Scatter (Circles). This is done so that user can freely select
points on the graph and have visual cues (decreased alpha) that the selection works.
Figure itself will plot "x" values from CDS on X-axis and "y" values from CDS on Y-axis.
Additionally, message is added at the bottom of the plot, informing user about possibility of
selecting points on the graph.
Returns created Plot p.
"""
# | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2017-2018, <NAME>
# Distributed under the MIT License. See LICENSE.md for more info.
# http://pymiescatt.readthedocs.io/en/latest/inverse.html
from PyMieScatt.Mie import Mie_ab
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.contour import QuadContourSet
from matplotlib.collections import LineCollection
from scipy.ndimage import zoom
from scipy.integrate import trapz
from shapely import geometry
def coerceDType(d):
if type(d) is not np.ndarray:
return np.array(d)
else:
return d
def Inversion(Qsca, Qabs, wavelength, diameter, nMin=1, nMax=3, kMin=0.001,
kMax=1, scatteringPrecision=0.010, absorptionPrecision=0.010,
spaceSize=120, interp=2):
nRange = np.linspace(nMin, nMax, spaceSize)
kRange = np.logspace(np.log10(kMin), np.log10(kMax), spaceSize)
scaSpace = np.zeros((spaceSize, spaceSize))
absSpace = np.zeros((spaceSize, spaceSize))
for ni, n in enumerate(nRange):
for ki, k in enumerate(kRange):
_derp = fastMieQ(n + (1j * k), wavelength, diameter)
scaSpace[ni][ki] = _derp[0]
absSpace[ni][ki] = _derp[1]
if interp is not None:
nRange = zoom(nRange, interp)
kRange = zoom(kRange, interp)
scaSpace = zoom(scaSpace, interp)
absSpace = zoom(absSpace, interp)
scaSolutions = np.where(
np.logical_and(Qsca * (1 - scatteringPrecision) < scaSpace,
scaSpace < Qsca * (1 + scatteringPrecision)))
absSolutions = np.where(
np.logical_and(Qabs * (1 - absorptionPrecision) < absSpace,
absSpace < Qabs * (1 + absorptionPrecision)))
validScattering = nRange[scaSolutions[0]] + 1j * kRange[scaSolutions[1]]
validAbsorption = nRange[absSolutions[0]] + 1j * kRange[absSolutions[1]]
solution = np.intersect1d(validScattering, validAbsorption)
# errors = [error()]
return solution
def Inversion_SD(Bsca, Babs, wavelength, dp, ndp, nMin=1, nMax=3, kMin=0,
kMax=1, scatteringPrecision=0.001, absorptionPrecision=0.001,
spaceSize=40, interp=2):
dp = coerceDType(dp)
ndp = coerceDType(ndp)
nRange = np.linspace(nMin, nMax, spaceSize)
kRange = np.linspace(kMin, kMax, spaceSize)
scaSpace = np.zeros((spaceSize, spaceSize))
absSpace = np.zeros((spaceSize, spaceSize))
for ni, n in enumerate(nRange):
for ki, k in enumerate(kRange):
_derp = fastMie_SD(n + (1j * k), wavelength, dp, ndp)
scaSpace[ni][ki] = _derp[0]
absSpace[ni][ki] = _derp[1]
if interp is not None:
nRange = zoom(nRange, interp)
kRange = zoom(kRange, interp)
scaSpace = zoom(scaSpace, interp)
absSpace = zoom(absSpace, interp)
scaSolutions = np.where(
np.logical_and(Bsca * (1 - scatteringPrecision) < scaSpace,
scaSpace < Bsca * (1 + scatteringPrecision)))
absSolutions = np.where(
np.logical_and(Babs * (1 - absorptionPrecision) < absSpace,
absSpace < Babs * (1 + absorptionPrecision)))
validScattering = nRange[scaSolutions[0]] + 1j * kRange[scaSolutions[1]]
validAbsorption = nRange[absSolutions[0]] + 1j * kRange[absSolutions[1]]
return np.intersect1d(validScattering, validAbsorption)
def ContourIntersection(Qsca, Qabs, wavelength, diameter, Qback=None, n=None,
k=None, nMin=1, nMax=3, kMin=0.00001, kMax=1,
gridPoints=100, interpolationFactor=2, maxError=0.005,
fig=None, ax=None, axisOption=0):
# http://pymiescatt.readthedocs.io/en/latest/inverse.html#ContourIntersectio
if (type(Qabs) == np.float64 and Qabs == 0.0) or (
type(Qabs) in [list, tuple, np.ndarray] and Qabs[0] == 0):
k = 0.0
if k == 0.0:
kMin = -0.1
axisOption = 1
error = lambda measured, calculated: np.abs(
(calculated - measured) / measured)
if Qback is not None:
if gridPoints * interpolationFactor < 400:
gridPoints = 2 * gridPoints
labels = []
incErrors = False
if type(Qsca) in [list, tuple, np.ndarray]:
incErrors = True
scaError = Qsca[1]
Qsca = Qsca[0]
labels.append("Qsca = {b:1.3f}±{e:1.3f}".format(b=Qsca, e=scaError))
else:
scaError = None
labels.append("Qsca = {b:1.3f}".format(b=Qsca))
if type(Qabs) in [list, tuple, np.ndarray]:
incErrors = True
absError = Qabs[1]
Qabs = Qabs[0]
labels.append("Qabs = {b:1.3f}±{e:1.3f}".format(b=Qabs, e=absError))
else:
absError = None
labels.append("Qabs = {b:1.3f}".format(b=Qabs))
if type(Qback) in [list, tuple, np.ndarray]:
backError = Qback[1]
Qback = Qback[0]
labels.append("Qback = {b:1.3f}±{e:1.3f}".format(b=Qback, e=backError))
elif Qback is not None:
backError = None
labels.append("Qback - {b:1.3f}".format(b=Qback))
else:
backError = None
nRange = np.linspace(nMin, nMax, gridPoints)
if k == 0.0:
kRange = np.linspace(kMin, kMax, gridPoints)
else:
kRange = np.logspace(np.log10(kMin), np.log10(kMax), gridPoints)
QscaList, QabsList, QbackList = [], [], []
for _n in nRange:
s, a, b = [], [], []
for _k in kRange:
m = _n + _k * 1.0j
_Qsca, _Qabs, _Qback = fastMieQ(m, wavelength, diameter)
s.append(_Qsca)
a.append(_Qabs)
b.append(_Qback)
QscaList.append(s)
QabsList.append(a)
QbackList.append(b)
QscaList = zoom(np.transpose(np.array(QscaList)), interpolationFactor)
QabsList = zoom(np.transpose(np.array(QabsList)), interpolationFactor)
QbackList = zoom(np.transpose(np.array(QbackList)), interpolationFactor)
_n = zoom(nRange, interpolationFactor)
_k = zoom(kRange, interpolationFactor)
if fig is None and ax is None:
fig, ax = plt.subplots()
elif fig is None:
fig = ax.get_figure()
elif ax is None:
ax = fig.gca()
scaLevels = np.array([Qsca])
absLevels = np.array([Qabs])
if Qback is not None:
backLevels = np.array([Qback])
if backError is not None:
backErrorLevels = np.array(
[Qback + x for x in [-backError, backError]])
if n is None:
scaChart = ax.contour(_n, _k, QscaList, scaLevels, origin='lower',
linestyles='dashdot', linewidths=1.5,
colors=('red'))
if scaError is not None:
scaErrorLevels = np.array([Qsca + x for x in [-scaError, scaError]])
ax.contourf(_n, _k, QscaList, scaErrorLevels, origin='lower',
colors=('red'), alpha=0.15)
ax.contour(_n, _k, QscaList, scaErrorLevels, origin='lower',
linewidths=0.5, colors=('red'), alpha=0.5)
else:
if type(n) in [list, tuple, np.ndarray]:
scaErrorLevels = [n[0] * (1 + x) for x in [-n[1], n[1]]]
scaChart = ax.vlines(n[0], kMin, kMax, linestyle='dashdot',
linewidth=1.5, color='r')
else:
scaChart = ax.vlines(n, kMin, kMax, linestyle='dashdot',
linewidth=1.5, color='r')
if k is None:
absChart = ax.contour(_n, _k, QabsList, absLevels, origin='lower',
linewidths=1.5, colors=('blue'))
if absError is not None:
absErrorLevels = np.array([Qabs + x for x in [-absError, absError]])
ax.contourf(_n, _k, QabsList, absErrorLevels, origin='lower',
colors=('blue'), alpha=0.15)
ax.contour(_n, _k, QabsList, absErrorLevels, origin='lower',
linewidths=0.5, colors=('blue'), alpha=0.5)
else:
if type(k) in [list, tuple, np.ndarray]:
absErrorLevels = [k[0] * (1 + x) for x in [-k[1], k[1]]]
absChart = ax.hlines(k[0], nMin, nMax, linestyle='solid',
linewidth=1.5, color='b')
else:
absChart = ax.hlines(k, nMin, nMax, linestyle='solid',
linewidth=1.5, color='b')
if Qback is not None:
backChart = ax.contour(_n, _k, QbackList, backLevels, origin='lower',
linestyles='dotted', linewidths=1.5,
colors=('green'))
if backError is not None:
backErrorLevels = np.array(
[Qback + x for x in [-backError, backError]])
ax.contourf(_n, _k, QbackList, backErrorLevels, origin='lower',
colors=('green'), alpha=0.15)
ax.contour(_n, _k, QbackList, backErrorLevels, origin='lower',
linewidths=0.5, colors=('green'), alpha=0.5)
m1 = find_intersections(scaChart, absChart)
if n is not None and type(n) in [list, tuple, np.ndarray]:
scaChart = ax.vlines(scaErrorLevels, kMin, kMax, linestyle='dashdot',
linewidth=0.5, color='r', alpha=0.5)
ax.axvspan(scaErrorLevels[0], scaErrorLevels[1], alpha=0.15, color='r')
if k is not None and type(k) in [list, tuple, np.ndarray]:
absChart = ax.hlines(absErrorLevels, nMin, nMax, linestyle='solid',
linewidth=0.5, color='b', alpha=0.5)
ax.axhspan(absErrorLevels[0], absErrorLevels[1], alpha=0.15, color='b')
if Qback is not None:
m2 = find_intersections(scaChart, backChart)
r1 = [np.round(x + y * 1j, 2) for x, y in zip(m1[0], m1[1])]
r2 = [np.round(x + y * 1j, 2) for x, y in zip(m2[0], m2[1])]
m_sol = list(set(r1).intersection(r2))
nSolution, kSolution = [xx.real for xx in m_sol], [xx.imag for xx in
m_sol]
else:
nSolution, kSolution = m1[0], m1[1]
if type(nSolution) == np.float64:
solutionSet = [nSolution + (0 + 1j) * kSolution]
else:
solutionSet = [(x + y * 1j) for x, y in zip(nSolution, kSolution)]
forwardCalculations = []
for s in solutionSet:
_s, _a, _ = fastMieQ(s, wavelength, diameter)
forwardCalculations.append([_s, _a])
solutionErrors = []
for f in forwardCalculations:
solutionErrors.append([error(f[0], Qsca), error(f[1], Qabs)])
solutionSet = np.array(solutionSet)
forwardCalculations = np.array(forwardCalculations)
solutionErrors = np.array(solutionErrors)
if n is None and k is None:
proper = solutionErrors <= maxError
solution = []
for r, c in proper:
if r and c:
solution.append(True)
else:
solution.append(False)
solutionSet = solutionSet[solution]
forwardCalculations = forwardCalculations[solution]
solutionErrors = solutionErrors[solution]
nSolutionsToPlot, kSolutionsToPlot = [x.real for x in solutionSet], [
x.imag for x in solutionSet]
else:
nSolutionsToPlot, kSolutionsToPlot = m1[0], m1[1]
ax.scatter(nSolutionsToPlot, kSolutionsToPlot, marker='o', s=128,
linewidth=1.5, edgecolor='k', facecolor='none', zorder=3)
ax.scatter(nSolutionsToPlot, kSolutionsToPlot, marker='o', s=128,
linewidth=0, edgecolor='none', facecolor='c', zorder=1,
alpha=0.25)
for x, y, s in zip(nSolutionsToPlot, kSolutionsToPlot, solutionErrors):
if n is not None:
ax.axhline(y, linewidth=0.5, alpha=0.5, zorder=0)
if k is not None:
ax.axvline(x, linewidth=0.5, alpha=0.5, zorder=0)
ax.set_xlabel('n', fontsize=16)
ax.set_ylabel('k', fontsize=16)
ax.set_xlim((np.min(nRange), np.max(nRange)))
ax.set_ylim((np.min(kRange), np.max(kRange)))
ax.tick_params(which='both', direction='in')
if axisOption == 0:
if max(kSolutionsToPlot) <= 0.5 or kMax <= 1:
ax.set_yscale('log')
else:
ax.set_yscale('linear')
elif axisOption == 1:
ax.set_xscale('linear')
ax.set_yscale('linear')
elif axisOption == 2:
ax.set_yscale('log')
elif axisOption == 3:
ax.set_xscale('log')
elif axisOption == 4:
ax.set_xscale('log')
ax.set_yscale('log')
else:
pass
_c = ax.get_children()
if Qback is None:
if incErrors:
# no Qback, with error bounds
graphElements = {'Qsca': _c[0], 'Qabs': _c[1], # contours
'QscaErrFill': _c[2], 'QscaErrOutline1': _c[3],
'QscaErrOutline2': _c[4],
'QabsErrFill': _c[5], 'QabsErrOutline1': _c[6],
'QabsErrOutline2': _c[7],
'SolMark': _c[8], 'SolFill': _c[9],
# the circly thingies at each solutions
'CrosshairsH': _c[10:-10:2],
'CrosshairsV': _c[11:-10:2], # solution crosshairs
'LeftSpine': _c[-10], 'RightSpine': _c[-9],
'BottomSpine': _c[-8], 'TopSpine': _c[-7],
# spines
'XAxis': _c[-6], 'YAxis': _c[-5]} # the axes
else:
# no Qback, no error bounds
graphElements = {'Qsca': _c[0], 'Qabs': _c[1], # contours
'SolFill': _c[2], 'SolMark': _c[3],
# the circly thingies at each solutions
'CrosshairsH': _c[4:-10:2],
'CrosshairsV': _c[5:-10:2], # solution crosshairs
'LeftSpine': _c[-10], 'RightSpine': _c[-9],
'BottomSpine': _c[-8], 'TopSpine': _c[-7],
# spines
'XAxis': _c[-6], 'YAxis': _c[-5]} # the axes
else:
if | |
"""Implementation of vpype's data model
"""
import logging
import math
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union, cast
import numpy as np
from shapely.geometry import LinearRing, LineString, MultiLineString
from .geometry import crop, reloop
from .line_index import LineIndex
# REMINDER: anything added here must be added to docs/api.rst
__all__ = [
"LineCollection",
"Document",
"LineLike",
"LineCollectionLike",
"as_vector",
# deprecated:
"VectorData",
]
LineLike = Union[LineString, LinearRing, Iterable[complex]]
# We accept LineString and LinearRing as line collection because MultiLineString are regularly
# converted to LineString/LinearRing when operation reduce them to single-line construct.
LineCollectionLike = Union[
Iterable[LineLike], MultiLineString, "LineCollection", LineString, LinearRing
]
def as_vector(a: np.ndarray):
"""Return a view of a complex line array that behaves as an Nx2 real array"""
return a.view(dtype=float).reshape(len(a), 2)
# noinspection PyShadowingNames
class LineCollection:
"""
:py:class:`LineCollection` encapsulate a list of piecewise linear lines (or paths). Lines
are implemented as 1D numpy arrays of complex numbers whose real and imaginary parts
represent the X, respectively Y, coordinates of point in the paths.
An instance of :py:class:`LineCollection` is used to model a single layer in vpype's
:ref:`pipeline <fundamentals_pipeline>`. The complete pipeline is modelled by a
:py:class:`Document` instance, which essentially is a mapping of ``int`` (layer ID) to
:py:class:`LineCollection`.
Although the actual ``list`` is stored as private data member in :py:class:`LineCollection`
instances, the class provides a sequence API similar to ``list``::
>>> import vpype, numpy as np
>>> lc = vpype.LineCollection()
>>> lc.append(np.array([0, 10. + 10.j]))
>>> lc.append(np.array([10.j, 5. + 5.j]))
>>> len(lc)
2
>>> lc[0]
array([ 0. +0.j, 10.+10.j])
>>> for line in lc:
... print(repr(line))
...
array([ 0. +0.j, 10.+10.j])
array([0.+10.j, 5. +5.j])
In addition to Numpy arrays, the class accepts paths expressed in a variety of format
including Python ``list`` or Shapely objects::
>>> from shapely.geometry import LineString, LinearRing, MultiLineString
>>> lc = vpype.LineCollection()
>>> lc.append([5, 5+5j])
>>> lc.append(LineString([(1, 1), (3, 2)]))
>>> lc.append(LinearRing([(0, 0), (1, 0), (1, 1), (0, 1)]))
>>> lc.extend(MultiLineString([[(0, 0), (10, 0)], [(4, 4), (0, 4)]]))
>>> lc
LineCollection([array([5.+0.j, 5.+5.j]), array([1.+1.j, 3.+2.j]), array([0.+0.j,
1.+0.j, 1.+1.j, 0.+1.j, 0.+0.j]), array([ 0.+0.j, 10.+0.j]), array([4.+4.j, 0.+4.j])])
Instances can also be converted to Shapely's MultiLineString:
>>> mls = lc.as_mls()
>>> print(mls)
MULTILINESTRING ((5 0, 5 5), (1 1, 3 2), (0 0, 1 0, 1 1, 0 1, 0 0), (0 0, 10 0),
(4 4, 0 4))
Finally, :py:class:`LineCollection` implements a number of operations such as geometrical
transformation, cropping, merging, etc. (see member function documentation for details).
"""
def __init__(self, lines: LineCollectionLike = ()):
"""Create a LineCollection instance from an iterable of lines.
Args:
lines (LineCollectionLike): iterable of line (accepts the same input as
:func:`~LineCollection.append`).
"""
self._lines: List[np.ndarray] = []
self.extend(lines)
@property
def lines(self) -> List[np.ndarray]:
"""Returns the list of line.
Returns:
list of line
"""
return self._lines
def append(self, line: LineLike) -> None:
"""Append a single line.
This function accepts an iterable of complex or a Shapely geometry
(:py:class:`LineString` or :py:class:`LinearRing`).
Args:
line (LineLike): line to append
"""
if isinstance(line, LineString) or isinstance(line, LinearRing):
# noinspection PyTypeChecker
self._lines.append(np.array(line).view(dtype=complex).reshape(-1))
else:
line = np.array(line, dtype=complex).reshape(-1)
if len(line) > 1:
self._lines.append(line)
def extend(self, lines: LineCollectionLike) -> None:
"""Append lines from a collection.
This function accepts an iterable of iterable of complex, another
:py:class:`LineCollection` instance, or a Shapely geometry
(:py:class:`MultiLineString`, :py:class:`LineString` or :py:class:`LinearRing`).
Shapely's LineString and LinearRing are occasionally obtained when a MultiLineString is
actually expected. As a result, they are accepted as input even though they are not,
strictly speaking, a line collection.
Args:
lines (LineCollectionLike): lines to append
"""
if hasattr(lines, "geom_type") and lines.is_empty: # type: ignore
return
# sometimes, mls end up actually being ls
if isinstance(lines, LineString) or isinstance(lines, LinearRing):
lines = [lines]
for line in lines:
self.append(line)
def is_empty(self) -> bool:
"""Check for emptiness.
Returns:
True if the instance does not contain any line, False otherwise.
"""
return len(self) == 0
def reverse(self) -> None:
"""Reverse order of the lines."""
self._lines = list(reversed(self._lines))
def __iter__(self):
return self._lines.__iter__()
def __len__(self) -> int:
return len(self._lines)
def __getitem__(self, item: Union[int, slice]):
return self._lines[item]
def __repr__(self):
return f"LineCollection({self._lines})"
def as_mls(self) -> MultiLineString:
"""Converts the LineCollection to a :py:class:`MultiLineString`.
Returns:
a MultiLineString Shapely object
"""
return MultiLineString([as_vector(line) for line in self.lines])
def translate(self, dx: float, dy: float) -> None:
"""Translates all line by a given offset.
Args:
dx: offset along X axis
dy: offset along Y axis
"""
c = complex(dx, dy)
for line in self._lines:
line += c
def scale(self, sx: float, sy: Optional[float] = None) -> None:
"""Scale the geometry.
The scaling is performed about the coordinates origin (0, 0). To scale around a
specific location, appropriate translations must be performed before and after the
scaling::
>>> import vpype
>>> lc = vpype.LineCollection([(-1+1j, 1+1j)])
>>> lc.translate(0, -1)
>>> lc.scale(1.2)
>>> lc.translate(0, 1)
>>> lc
LineCollection([array([-1.2+1.j, 1.2+1.j])])
Args:
sx: scale factor along x
sy: scale factor along y (if None, then sx is used)
"""
if sy is None:
sy = sx
for line in self._lines:
line.real *= sx
line.imag *= sy
def rotate(self, angle: float) -> None:
"""Rotates the geometry by ``angle`` amount.
The angle is expressed in radian. Positive value rotate clockwise.
The rotation is performed about the coordinates origin (0, 0). To rotate around a
specific location, appropriate translations must be performed before and after the
scaling::
>>> import vpype
>>> lc = vpype.LineCollection([(-1+1j, 1+1j)])
>>> lc.translate(0, -1)
>>> lc.rotate(1.2)
>>> lc.translate(0, 1)
Args:
angle: rotation angle in rad
"""
c = complex(math.cos(angle), math.sin(angle))
for line in self._lines:
line *= c
def skew(self, ax: float, ay: float) -> None:
"""Skew the geometry by some angular amounts along X and Y axes.
The angle is expressed in radians.
The skew is performed about the coordinates origin (0, 0). To rotate around a
specific location, appropriate translations must be performed before and after the
scaling::
>>> import vpype
>>> lc = vpype.LineCollection([(-1+1j, 1+1j)])
>>> lc.translate(0, -1)
>>> lc.skew(0., 1.2)
>>> lc.translate(0, 1)
Args:
ax: skew angle in rad along X axis
ay: skew angle in rad along Y axis
"""
tx, ty = math.tan(ax), math.tan(ay)
for line in self._lines:
line += tx * line.imag + 1j * ty * line.real
def reloop(self, tolerance: float) -> None:
"""Randomizes the seam of closed paths. Paths are considered closed when their first
and last point are closer than *tolerance*.
:param tolerance: tolerance to determine if a path is closed
"""
for i, line in enumerate(self._lines):
delta = line[-1] - line[0]
if np.hypot(delta.real, delta.imag) <= tolerance:
self._lines[i] = reloop(line)
def crop(self, x1: float, y1: float, x2: float, y2: float) -> None:
"""Crop all lines to a rectangular area.
Args:
x1, y1: first corner of the crop area
x2, y2: second corner of the crop area
"""
if x1 > x2:
x1, x2 = x2, x1
if y1 > y2:
y1, y2 = y2, y1
if x1 == x2 or y1 == y2:
self._lines = []
else:
new_lines = []
for line in self._lines:
new_lines.extend(crop(line, x1, y1, x2, y2))
self._lines = new_lines
def filter(self, key: Callable[[np.ndarray], bool]) -> None:
"""Remove lines from the :class:`LineCollection` for which key returns False.
Args:
key: filter (returns True if the line should be kept or False otherwise)
"""
self._lines = [line for line in self._lines if key(line)]
def merge(self, tolerance: float, flip: bool = True) -> None:
"""Merge lines whose endings overlap or are very close.
Args:
tolerance: max distance between line ending that may be merged
flip: allow flipping line direction for further merging
"""
if len(self) < 2:
return
index = LineIndex(self.lines, reverse=flip)
new_lines = LineCollection()
while len(index) > 0:
line = index.pop_front()
# we append to `line` until we dont find anything to add
while True:
idx, reverse = index.find_nearest_within(line[-1], tolerance)
if idx is None and flip:
idx, reverse = index.find_nearest_within(line[0], tolerance)
line = np.flip(line)
if idx is None:
break
new_line = cast(np.ndarray, index.pop(idx))
if reverse:
new_line = np.flip(new_line)
line = np.hstack([line, new_line])
new_lines.append(line)
self._lines = new_lines._lines
def bounds(self) -> Optional[Tuple[float, float, float, | |
<reponame>spxiwh/pegasus<filename>packages/pegasus-python/src/Pegasus/monitoring/event_output.py
"""
Functions for output pegasus-monitord events to various destinations.
"""
##
# Copyright 2007-2011 University Of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import logging
import queue
import re
import socket
import ssl
import time
import traceback
import urllib.parse
from threading import Thread
from Pegasus import json
from Pegasus.db import connection, expunge
from Pegasus.db.dashboard_loader import DashboardLoader
from Pegasus.db.workflow_loader import WorkflowLoader
from Pegasus.netlogger import nlapi
from Pegasus.tools import properties, utils
log = logging.getLogger(__name__)
# Optional imports, only generate 'warnings' if they fail
bson = None
try:
import bson
except Exception:
log.info("cannot import BSON library, 'bson'")
amqp = None
try:
import pika as amqp
except Exception:
log.info("cannot import AMQP library")
# Event name-spaces
STAMPEDE_NS = "stampede."
DASHBOARD_NS = "dashboard."
def purge_wf_uuid_from_database(rundir, output_db):
"""
This function purges a workflow id from the output database.
"""
# PM-652 do nothing for sqlite
# DB is already rotated in pegasus-monitord
if output_db.lower().startswith("sqlite"):
return
# Parse the braindump file
wfparams = utils.slurp_braindb(rundir)
wf_uuid = wfparams.get("wf_uuid", None)
if wf_uuid is None:
return
expunge.delete_workflow(output_db, wf_uuid)
def purge_wf_uuid_from_dashboard_database(rundir, output_db):
"""
This function purges a workflow id from the output database.
"""
# Parse the braindump file
wfparams = utils.slurp_braindb(rundir)
wf_uuid = wfparams.get("wf_uuid", None)
if wf_uuid is None:
return
expunge.delete_dashboard_workflow(output_db, wf_uuid)
class OutputURL:
"""
Break output URL into named parts for easier handling.
"""
def __init__(self, url):
(
self.scheme,
self.netloc,
self.path,
self.params,
query,
frag,
) = urllib.parse.urlparse(url)
host_port = ""
user_pass = ""
if "@" in self.netloc:
user_pass, host_port = self.netloc.split("@", 1)
else:
host_port = self.netloc
if ":" in host_port:
self.host, portstr = host_port.split(":", 1)
self.port = int(portstr)
else:
self.host = self.netloc
self.port = None
if ":" in user_pass:
self.user, self.password = user_pass.split(":", 1)
class EventSink:
"""
Base class for an Event Sink.
"""
def __init__(self):
self._log = logging.getLogger(
"{}.{}".format(self.__module__, self.__class__.__name__)
)
# Set listing events handled to be kept consistent with dict in workflow loader
self._acceptedEvents = (
"stampede.wf.plan",
"stampede.wf.map.task_job",
"stampede.static.start",
"stampede.static.end",
"stampede.xwf.start",
"stampede.xwf.end",
"stampede.xwf.map.subwf_job",
"stampede.task.info",
"stampede.task.edge",
"stampede.job.info",
"stampede.job.edge",
"stampede.job_inst.pre.start",
"stampede.job_inst.pre.term",
"stampede.job_inst.pre.end",
"stampede.job_inst.submit.start",
"stampede.job_inst.submit.end",
"stampede.job_inst.held.start",
"stampede.job_inst.held.end",
"stampede.job_inst.main.start",
"stampede.job_inst.main.term",
"stampede.job_inst.main.end",
"stampede.job_inst.post.start",
"stampede.job_inst.post.term",
"stampede.job_inst.post.end",
"stampede.job_inst.host.info",
"stampede.job_inst.image.info",
"stampede.job_inst.abort.info",
"stampede.job_inst.grid.submit.start",
"stampede.job_inst.grid.submit.end",
"stampede.job_inst.globus.submit.start",
"stampede.job_inst.globus.submit.end",
"stampede.job_inst.tag",
"stampede.job_inst.composite",
"stampede.inv.start",
"stampede.inv.end",
"stampede.static.meta.start",
"stampede.xwf.meta",
"stampede.task.meta",
"stampede.rc.meta",
"stampede.int.metric",
"stampede.rc.pfn",
"stampede.wf.map.file",
"stampede.static.meta.end",
"stampede.task.monitoring",
)
def send(self, event, kw):
"""
Clients call this function to send an event to the sink.
"""
def close(self):
"""
Clients call this function to close the output to this sink.
"""
def flush(self):
"Clients call this to flush events to the sink"
class DBEventSink(EventSink):
"""
Write wflow event logs to database via loader
"""
def __init__(
self,
dest,
db_stats=False,
namespace=STAMPEDE_NS,
props=None,
db_type=None,
backup=False,
**kw
):
self._namespace = namespace
# pick the right database loader based on prefix
if namespace == STAMPEDE_NS:
self._db = WorkflowLoader(
dest,
perf=db_stats,
batch=True,
props=props,
db_type=db_type,
backup=backup,
)
elif namespace == DASHBOARD_NS:
self._db = DashboardLoader(
dest,
perf=db_stats,
batch=True,
props=props,
db_type=db_type,
backup=backup,
)
else:
raise ValueError("Unknown namespace specified '%s'" % (namespace))
super().__init__()
def send(self, event, kw):
self._log.trace("send.start event=%s", event)
d = {"event": self._namespace + event}
for k, v in kw.items():
d[k.replace("__", ".")] = v
self._db.process(d)
self._log.trace("send.end event=%s", event)
def close(self):
self._log.trace("close.start")
self._db.finish()
self._log.trace("close.end")
def flush(self):
self._db.flush()
class FileEventSink(EventSink):
"""
Write wflow event logs to a file.
"""
def __init__(self, path, restart=False, encoder=None, **kw):
super().__init__()
if restart:
self._output = open(path, "w", 1)
else:
self._output = open(path, "a", 1)
self._encoder = encoder
def send(self, event, kw):
self._log.trace("send.start event=%s", event)
self._output.write(self._encoder(event=event, **kw))
if self._encoder == json_encode:
self._output.write("\n")
self._log.trace("send.end event=%s", event)
def close(self):
self._log.trace("close.start")
self._output.close()
self._log.trace("close.end")
class TCPEventSink(EventSink):
"""
Write wflow event logs to a host:port.
"""
def __init__(self, host, port, encoder=None, **kw):
super().__init__()
self._encoder = encoder
self._sock = socket.socket()
self._sock.connect((host, port))
def send(self, event, kw):
self._log.trace("send.start event=%s", event)
self._sock.send(self._encoder(event=event, **kw))
self._log.trace("send.end event=%s", event)
def close(self):
self._log.trace("close.start")
self._sock.close()
self._log.trace("close.end")
class AMQPEventSink(EventSink):
"""
Write wflow event logs to an AMQP server.
"""
EXCH_OPTS = {"exchange_type": "topic", "durable": True, "auto_delete": False}
DEFAULT_AMQP_VIRTUAL_HOST = "pegasus" # should be /
def __init__(
self,
host,
port,
exch=None,
encoder=None,
userid="guest",
password="<PASSWORD>",
virtual_host=DEFAULT_AMQP_VIRTUAL_HOST,
ssl_enabled=False,
props=None,
connect_timeout=None,
**kw
):
super().__init__()
self._log.info("Encoder used {} Properties received {}".format(encoder, props))
self._encoder = encoder
self._handled_events = set()
self._handle_all_events = False
self.configure_filters(props.property("events"))
self._msg_queue = queue.Queue()
self._stopping = False
self._exch = exch
if connect_timeout is None:
# pick timeout from properties
connect_timeout = props.property("timeout")
if connect_timeout:
connect_timeout = float(connect_timeout)
# insecure ssl
SSLOptions = None
if ssl_enabled:
context = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH)
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
SSLOptions = amqp.SSLOptions(context)
creds = amqp.PlainCredentials(userid, password)
self._params = amqp.ConnectionParameters(
host=host,
port=port,
ssl_options=SSLOptions,
virtual_host=virtual_host,
credentials=creds,
blocked_connection_timeout=connect_timeout,
heartbeat=None,
) # None -> negotiate heartbeat with the AMQP server
# initialize worker thread in daemon and start it
self._worker_thread = Thread(target=self.event_publisher, daemon=True)
self._worker_thread.start()
def event_publisher(self):
full_event, event, data = (None, None, None)
reconnect_attempts = 0
while not self._stopping:
try:
self._log.info(
"Connecting to host: %s:%s virtual host: %s exchange: %s with user: %s ssl: %s"
% (
self._params.host,
self._params.port,
self._params.virtual_host,
self._exch,
self._params.credentials.username,
not self._params.ssl_options is None,
)
)
self._conn = amqp.BlockingConnection(self._params)
self._channel = self._conn.channel()
self._channel.exchange_declare(self._exch, **self.EXCH_OPTS)
reconnect_attempts = 0
while not self._stopping:
try:
# if variables are initialized we haven't sent them yet.
# don't retrieve a new event, send the old one
if (full_event is None) and (event is None) and (data is None):
full_event, event, data = self._msg_queue.get(timeout=5)
self._log.trace("send.start event=%s", full_event)
self._channel.basic_publish(
body=data, exchange=self._exch, routing_key=full_event
)
self._log.trace("send.end event=%s", event)
# reset vars
full_event, event, data = (None, None, None)
# mark item as processed
self._msg_queue.task_done()
except queue.Empty:
self._conn.process_data_events() # keep up with the AMQP heartbeats
continue
# Do not recover if connection was closed by broker
except amqp.exceptions.ConnectionClosedByBroker as err:
self._log.error(
"Connection to %s:%s was closed by Broker - Not Recovering"
% (self._params.host, self._params.port)
)
self._log.error("Broker closed connection with: %s, stopping..." % err)
self._conn = None
break
# Do not recover on channel errors
except amqp.exceptions.AMQPChannelError as err:
self._log.error(
"Channel error at %s:%s - Not Recovering"
% (self._params.host, self._params.port)
)
self._log.error("Channel error: %s, stopping..." % err)
self._conn = None
break
# Recover on all other connection errors if reconnect attempts is less than 5
except amqp.exceptions.AMQPConnectionError:
reconnect_attempts += 1
if reconnect_attempts > 5:
self._log.info(
"Connection to %s:%s was closed - Not Recovering"
% (self._params.host, self._params.port)
)
break
else:
self._log.info(
"Connection to %s:%s was closed - Will try to recover the connection"
% (self._params.host, self._params.port)
)
time.sleep((2 ** reconnect_attempts) * 10)
continue
if not self._conn is None:
self._log.trace("connection - close.start")
self._conn.close()
self._log.trace("connection - close.end")
def configure_filters(self, events):
event_regexes = set()
if events is None:
# add pre-configured specific events
event_regexes.add(re.compile(STAMPEDE_NS + "job_inst.tag"))
event_regexes.add(re.compile(STAMPEDE_NS + "job_inst.composite"))
event_regexes.add(re.compile(STAMPEDE_NS + "inv.end"))
event_regexes.add(re.compile(STAMPEDE_NS + "wf.plan"))
else:
for exp in events.split(","):
if exp == "*":
# short circuit
self._handle_all_events = True
self._log.debug("Events Handled: All")
return
else:
event_regexes.add(re.compile(exp))
# go through each regex and match against accepted events once
for regex in event_regexes:
# go through each list of accepted events to check match
for event in self._acceptedEvents:
if regex.search(event) is not None:
self._handled_events.add(event)
self._log.debug("Events Handled: %s", self._handled_events)
def send(self, event, kw):
if not self._worker_thread.is_alive():
raise Exception("AMQP publisher thread is dead. Cannot send amqp events.")
full_event = STAMPEDE_NS + event
if self.ignore(full_event):
return
data = self._encoder(event=event, **kw)
self._msg_queue.put((full_event, event, data))
def ignore(self, event):
if self._handle_all_events:
# we want all events
return False
return event not in self._handled_events
def close(self):
if self._worker_thread.is_alive():
self._log.trace("Waiting for queue to emtpy.")
self._msg_queue.join() # wait for queue to empty if worker is alive
self._stopping = True
self._log.trace("Waiting for publisher thread to exit.")
self._worker_thread.join()
self._log.trace("Publisher thread exited.")
class MultiplexEventSink(EventSink):
"""
Sends events to multiple end points
"""
def __init__(self, dest, enc, prefix=STAMPEDE_NS, props=None, **kw):
super().__init__()
self._endpoints = {}
self._log.info("Multiplexed Event Sink Connection Properties %s", props)
for key in props.keyset():
if key.endswith(".url"):
sink_name = key[0 : key.rfind(".url")]
# remove from our copy sink_name properties if they exist
| |
the dimension to downsample
downsampling_factor : int
downsampling factor
Returns
-------
ndarray
a downsampled image
'''
if downsampling_factor == 1:
logger.error("downsampling with a factor = 1 means no downsampling, thereby ignoring...")
return self
if self.metadata['dimensions'] is None:
logger.error("Image dimensions not specified!!!")
return self
idx = None
for dim in self.metadata['dimensions']:
if dim in dimensions_to_downsample:
if idx is None:
idx = np.index_exp[::downsampling_factor]
else:
idx += np.index_exp[::downsampling_factor]
else:
if idx is None:
idx = np.index_exp[:]
else:
idx += np.index_exp[:]
if idx is None:
return self
return self[idx]
def rescale(self, factor=2):
'''rescales an image (using scipy)
Parameters
----------
factor : int
rescaling factor
Returns
-------
ndarray
a rescaled image
'''
return skimage.transform.rescale(self, 1. / factor, preserve_range=True, anti_aliasing=False, multichannel=True)
# ideally should make it return an image but maybe too complicated --> ok for now let's wait for my python skills to improve
def convolve(self, kernel=np.array([[-1, -1, -1],
[-1, 8, -1],
[-1, -1, -1]])):
'''convolves an image (using scipy)
Parameters
----------
kernel : np.array
a convolution kernel
Returns
-------
ndarray
a convolved image
'''
convolved = scipy.signal.convolve2d(self, kernel, 'valid')
return convolved
def has_dimension(self, dim):
'''Returns True if image has the specified dimension, False otherwise
Parameters
----------
dim : single char string
dimension of interest
Returns
-------
boolean
True if dimension of interest exist in image
'''
# use dimension synonyms
if dim == 'x':
dim = 'w'
if dim == 'y':
dim = 'h'
if dim == 'z':
dim = 'd'
if dim in self.meta_data['dimensions']:
return True
return False
def is_stack(self):
'''returns True if image has a z/d dimension, False otherwise
'''
return self.has_d()
def has_channels(self):
'''returns True if image has a c dimension, False otherwise
'''
return self.has_c()
def get_t(self, t):
'''returns an image at time t, None otherwise
Parameters
----------
t : int
time point of interest
Returns
-------
ndarray
image at time t or None
'''
if not self.is_time_series():
return None
if t < self.get_dimension('t'): # TODO check code
return self.imCopy(t=t)
return None
# set the current time frame
def set_t(self, t):
self.metadata['cur_t'] = t
def get_d_scaling(self):
'''gets the z/d scaling factor for the current image
Returns
-------
float
the depth scaling factor
'''
return self.z_scale
def set_d_scaling(self, scaling_factor):
'''sets the z/d scaling factor for the current image
Parameters
----------
scaling_factor : float
the new image scaling factor
'''
self.z_scale = scaling_factor
def has_t(self):
'''returns True if the image is a time series, False otherwise
'''
return self.has_dimension('t')
def is_time_series(self):
'''returns True if the image is a time series, False otherwise
'''
return self.has_t()
def has_d(self):
'''returns True if the image is a Z-stack, False otherwise
'''
return self.has_dimension('d') or self.has_dimension('z')
def has_dimension(self, d):
'''returns True if the image has the specified dimension, False otherwise
Parameters
----------
dim : single char string
dimension of interest
Returns
-------
boolean
True if dim exists
'''
return d in self.metadata['dimensions']
# check for the presence of LUTs
def has_LUTs(self):
return 'LUTs' in self.metadata and self.metadata['LUTs'] is not None
# get LUTs
def get_LUTs(self):
if 'LUTs' in self.metadata:
return self.metadata['LUTs']
return None
# set LUTs
def set_LUTs(self, LUTs):
self.metadata['LUTs'] = LUTs
def has_c(self):
'''returns True if the image has color channels, False otherwise
'''
return 'c' in self.metadata['dimensions']
def _create_dir(self, output_name):
# create dir if does not exist
if output_name is None:
return
output_folder, filename = os.path.split(output_name)
# bug fix in case just a filename and no parent folder
if output_folder:
os.makedirs(output_folder, exist_ok=True)
@staticmethod
def img2Base64(img):
# save it as png and encode it
if img is not None:
# assume image
buf = io.BytesIO()
im = Image.fromarray(img)
im.save(buf, format='png')
buf.seek(0) # rewind file
figdata_png = base64.b64encode(buf.getvalue()).decode("utf-8")
buf.close()
return figdata_png
else:
# assume pyplot image then
print('Please call this before plt.show() to avoid getting a blank output')
buf = io.BytesIO()
plt.savefig(buf, format='png', bbox_inches='tight') # TO REMOVE UNNECESSARY WHITE SPACE AROUND GRAPH...
buf.seek(0) # rewind file
figdata_png = base64.b64encode(buf.getvalue()).decode("utf-8")
buf.close()
return figdata_png
# mode can be IJ or raw --> if raw --> set IJ to false and save directly TODO clean the mode and mode is only for tif so far --> find a way to make it better and more optimal --> check also how mode would behave with z stacks, etc...
def save(self, output_name, print_file_name=False, ijmetadata='copy', mode='IJ'):
'''saves the current image
Parameters
----------
output_name : string
name of the file to save
'''
if print_file_name:
print('saving', output_name)
if output_name is None:
logger.error("No output name specified... ignoring...")
return
# TODO maybe handle tif with stars in their name here to avoid loss of data but ok for now...
if not '*' in output_name and (output_name.lower().endswith('.tif') or output_name.lower().endswith('.tiff')):
self._create_dir(output_name)
if mode != 'IJ': # TODO maybe do a TA mode or alike instead...
out = self
tifffile.imwrite(output_name, out)
else:
# create dir if does not exist
out = self
# apparently int type is not supported by IJ
if out.dtype == np.int32:
out = out.astype(np.float32) # TODO check if correct with real image but should be
if out.dtype == np.int64:
out = out.astype(np.float64) # TODO check if correct with real image but should be
# IJ does not support bool type too
if out.dtype == np.bool:
out = out.astype(np.uint8) * 255
if out.dtype == np.double:
out = out.astype(np.float32)
# if self.has_c():
# if not self.has_d() and self.has_t():
# out = np.expand_dims(out, axis=-1)
# out = np.moveaxis(out, -1, 1)
# out = np.moveaxis(out, -1, -3)
# tifffile.imwrite(output_name, out, imagej=True) # make the data compatible with IJ
# else:
# # most likely a big bug here --> fix it --> if has d and no t does it create a bug ???? --> maybe
# if not self.has_d() and self.has_t():
# out = np.expand_dims(out, axis=-1)
# out = np.moveaxis(out, -1, 1)
# out = np.expand_dims(out, axis=-1)
# # reorder dimensions in the IJ order
# out = np.moveaxis(out, -1, -3)
# tifffile.imwrite(output_name, out, imagej=True) # this is the way to get the data compatible with IJ
# should work better now and fix several issues... but need test it with real images
# if image has no c --> assume all ok
if self.metadata['dimensions'] is not None:
# print('in dims')
# print(self.has_c()) # why has no c channel ???
if not self.has_c():
out = out[..., np.newaxis]
if not self.has_d():
out = out[np.newaxis, ...]
if not self.has_t():
out = out[np.newaxis, ...]
else:
# print('othyer')
# no dimension specified --> assume always the same order that is tzyxc --> TODO maybe ...tzyxc
if out.ndim < 3:
out = out[..., np.newaxis]
if out.ndim < 4:
out = out[np.newaxis, ...]
if out.ndim < 5:
out = out[np.newaxis, ...]
# print('final', out.shape)
out = np.moveaxis(out, -1, -3) # need move c channel before hw (because it is default IJ style)
# TODO maybe offer compression at some point to gain space ???
# imageJ order is TZCYXS order with dtype is uint8, uint16, or float32. Is S a LUT ???? probably yes because (S=3 or S=4) must be uint8. can I use compression with ImageJ's Bio-Formats import function.
# TODO add the possibility to save ROIs if needed...
# Parameters 'append', 'byteorder', 'bigtiff', and 'imagej', are passed # to TiffWriter(). Other parameters are passed to TiffWriter.save().
# print(ijmetadata)
rois = {}
if ijmetadata == 'copy' and self.metadata['Overlays']:
rois['Overlays'] = self.metadata['Overlays']
if ijmetadata == 'copy' and self.metadata['ROI']:
rois['ROI'] = self.metadata['ROI']
if not rois:
rois = None
# quick hack to force images to display as composite in IJ if they have channels -> probably needs be improved at some point
# try:
tifffile.imwrite(output_name, out, imagej=True, ijmetadata=rois,
metadata={'mode': 'composite'} if self.metadata[
'dimensions'] is not None and self.has_c() else {}) # small hack to keep only non RGB images as composite and self.get_dimension('c')!=3
# TODO at some point handle support for RGB 24-32 bits images saving as IJ compatible but skip for now
# |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.