text stringlengths 957 885k |
|---|
<gh_stars>0
import math
from abc import ABC, abstractmethod
from copy import copy
from functools import reduce
from inspect import getfullargspec
from itertools import count
from numbers import Real
from operator import and_
from typing import Dict, Tuple, Iterator
from typing import Optional, Union, Type, List
import numpy as np
from autoconf import cached_property
from .transform import AbstractDensityTransform, LinearShiftTransform
from ..mapper.variable import Variable
enforce_id_match = True
def update_array(arr1, ind, arr2):
if np.shape(arr1):
out = arr1.copy()
out[ind] = arr2
return out
return arr2
class AbstractMessage(ABC):
log_base_measure: float
_Base_class: Optional[Type["AbstractMessage"]] = None
_projection_class: Optional[Type["AbstractMessage"]] = None
_multivariate: bool = False
_parameter_support: Optional[Tuple[Tuple[float, float], ...]] = None
_support: Optional[Tuple[Tuple[float, float], ...]] = None
ids = count()
def __init__(
self,
*parameters: Union[np.ndarray, float],
log_norm=0.0,
lower_limit=-math.inf,
upper_limit=math.inf,
id_=None
):
self.lower_limit = lower_limit
self.upper_limit = upper_limit
self.id = next(self.ids) if id_ is None else id_
self.log_norm = log_norm
self._broadcast = np.broadcast(*parameters)
if self.shape:
self.parameters = tuple(np.asanyarray(p) for p in parameters)
else:
self.parameters = tuple(parameters)
def __eq__(self, other):
return self.id == other.id
def copy(self):
cls = self._Base_class or type(self)
result = cls(
*(copy(params) for params in self.parameters),
log_norm=self.log_norm,
lower_limit=self.lower_limit,
upper_limit=self.upper_limit,
)
result.id = self.id
return result
def __bool__(self):
return True
@cached_property
@abstractmethod
def natural_parameters(self):
pass
@abstractmethod
def sample(self, n_samples: Optional[int] = None):
pass
@staticmethod
@abstractmethod
def invert_natural_parameters(
natural_parameters: np.ndarray,
) -> Tuple[np.ndarray, ...]:
pass
@staticmethod
@abstractmethod
def to_canonical_form(x: np.ndarray) -> np.ndarray:
pass
@cached_property
@abstractmethod
def log_partition(self) -> np.ndarray:
pass
@cached_property
@abstractmethod
def variance(self) -> np.ndarray:
pass
@cached_property
def scale(self) -> np.ndarray:
return self.std
@cached_property
def std(self) -> np.ndarray:
return self.variance ** 0.5
def __hash__(self):
return self.id
@classmethod
def calc_log_base_measure(cls, x):
return cls.log_base_measure
def __iter__(self) -> Iterator[np.ndarray]:
return iter(self.parameters)
@property
def shape(self) -> Tuple[int, ...]:
return self._broadcast.shape
@property
def size(self) -> int:
return self._broadcast.size
@property
def ndim(self) -> int:
return self._broadcast.ndim
@classmethod
def _cached_attrs(cls):
for n in dir(cls):
attr = getattr(cls, n)
if isinstance(attr, cached_property):
yield n
def _reset_cache(self):
for attr in self._cached_attrs():
self.__dict__.pop(attr, None)
def __getitem__(self, index) -> "AbstractMessage":
cls = self._Base_class or type(self)
if index == ():
return self
else:
return cls(*(param[index] for param in self.parameters))
def __setitem__(self, index, value):
self._reset_cache()
for param0, param1 in zip(self.parameters, value.parameters):
param0[index] = param1
def merge(self, index, value):
cls = self._Base_class or type(self)
return cls(
*(
update_array(param0, index, param1)
for param0, param1 in zip(self.parameters, value.parameters)
)
)
@classmethod
def from_natural_parameters(
cls, natural_parameters: np.ndarray, **kwargs
) -> "AbstractMessage":
cls_ = cls._projection_class or cls._Base_class or cls
args = cls_.invert_natural_parameters(natural_parameters)
return cls_(*args, **kwargs)
@classmethod
@abstractmethod
def invert_sufficient_statistics(
cls, sufficient_statistics: np.ndarray
) -> np.ndarray:
pass
@classmethod
def from_sufficient_statistics(
cls, suff_stats: np.ndarray, **kwargs
) -> "AbstractMessage":
natural_params = cls.invert_sufficient_statistics(suff_stats)
cls_ = cls._projection_class or cls._Base_class or cls
return cls_.from_natural_parameters(natural_params, **kwargs)
def sum_natural_parameters(self, *dists: "AbstractMessage") -> "AbstractMessage":
"""return the unnormalised result of multiplying the pdf
of this distribution with another distribution of the same
type
"""
new_params = sum(
(
dist.natural_parameters
for dist in self._iter_dists(dists)
if isinstance(dist, AbstractMessage)
),
self.natural_parameters,
)
return self.from_natural_parameters(
new_params,
id_=self.id,
lower_limit=self.lower_limit,
upper_limit=self.upper_limit,
)
def sub_natural_parameters(self, other: "AbstractMessage") -> "AbstractMessage":
"""return the unnormalised result of dividing the pdf
of this distribution with another distribution of the same
type"""
log_norm = self.log_norm - other.log_norm
new_params = self.natural_parameters - other.natural_parameters
return self.from_natural_parameters(
new_params,
log_norm=log_norm,
id_=self.id,
lower_limit=self.lower_limit,
upper_limit=self.upper_limit,
)
_multiply = sum_natural_parameters
_divide = sub_natural_parameters
def __mul__(self, other: Union["AbstractMessage", Real]) -> "AbstractMessage":
if isinstance(other, AbstractMessage):
return self._multiply(other)
else:
cls = self._Base_class or type(self)
log_norm = self.log_norm + np.log(other)
return cls(
*self.parameters,
log_norm=log_norm,
id_=self.id,
lower_limit=self.lower_limit,
upper_limit=self.upper_limit,
)
def __rmul__(self, other: "AbstractMessage") -> "AbstractMessage":
return self * other
def __truediv__(self, other: Union["AbstractMessage", Real]) -> "AbstractMessage":
if isinstance(other, AbstractMessage):
return self._divide(other)
else:
cls = self._Base_class or type(self)
log_norm = self.log_norm - np.log(other)
return cls(
*self.parameters,
log_norm=log_norm,
id_=self.id,
lower_limit=self.lower_limit,
upper_limit=self.upper_limit,
)
def __pow__(self, other: Real) -> "AbstractMessage":
natural = self.natural_parameters
new_params = other * natural
log_norm = other * self.log_norm
new = self.from_natural_parameters(
new_params,
log_norm=log_norm,
id_=self.id,
lower_limit=self.lower_limit,
upper_limit=self.upper_limit,
)
return new
@classmethod
def parameter_names(cls):
return getfullargspec(cls.__init__).args[1:-1]
def __str__(self) -> str:
param_attrs = [
(attr, np.asanyarray(getattr(self, attr)))
for attr in self.parameter_names()
]
if self.shape:
pad = max(len(attr) for attr, _ in param_attrs)
attr_str = " {:<%d}={}" % pad
param_strs = ",\n".join(
attr_str.format(attr, np.array2string(val, prefix=" " * (pad + 5)))
for attr, val in param_attrs
)
return f"{type(self).__name__}(\n{param_strs})"
else:
param_strs = ", ".join(
attr + "=" + np.array2string(val, prefix=" " * (len(attr) + 1))
for attr, val in param_attrs
)
return f"{type(self).__name__}({param_strs})"
__repr__ = __str__
def pdf(self, x: np.ndarray) -> np.ndarray:
return np.exp(self.logpdf(x))
def _broadcast_natural_parameters(self, x):
shape = np.shape(x)
if shape == self.shape:
return self.natural_parameters
elif shape[1:] == self.shape:
return self.natural_parameters[:, None, ...]
else:
raise ValueError(
f"shape of passed value {shape} does not "
f"match message shape {self.shape}"
)
def factor(self, x):
# self.assert_within_limits(x)
return self.logpdf(x)
def logpdf(self, x: np.ndarray) -> np.ndarray:
eta = self._broadcast_natural_parameters(x)
t = self.to_canonical_form(x)
log_base = self.calc_log_base_measure(x)
return self.natural_logpdf(eta, t, log_base, self.log_partition)
@classmethod
def natural_logpdf(cls, eta, t, log_base, log_partition):
eta_t = np.multiply(eta, t).sum(0)
return np.nan_to_num(log_base + eta_t - log_partition, nan=-np.inf)
def numerical_logpdf_gradient(
self, x: np.ndarray, eps: float = 1e-6
) -> Tuple[np.ndarray, np.ndarray]:
shape = np.shape(x)
if shape:
x0 = np.array(x, dtype=np.float64)
logl0 = self.logpdf(x0)
if self._multivariate:
grad_logl = np.empty(logl0.shape + x0.shape)
sl = tuple(slice(None) for _ in range(logl0.ndim))
with np.nditer(x0, flags=["multi_index"], op_flags=["readwrite"]) as it:
for xv in it:
xv += eps
logl = self.logpdf(x0)
grad_logl[sl + it.multi_index] = (logl - logl0) / eps
xv -= eps
else:
l0 = logl0.sum()
grad_logl = np.empty_like(x0)
with np.nditer(x0, flags=["multi_index"], op_flags=["readwrite"]) as it:
for xv in it:
xv += eps
logl = self.logpdf(x0).sum() # type: ignore
grad_logl[it.multi_index] = (logl - l0) / eps
xv -= eps
else:
logl0 = self.logpdf(x)
grad_logl = (self.logpdf(x + eps) - logl0) / eps
return logl0, grad_logl
def numerical_logpdf_gradient_hessian(
self, x: np.ndarray, eps: float = 1e-6
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
shape = np.shape(x)
if shape:
x0 = np.array(x, dtype=np.float64)
if self._multivariate:
logl0, gradl0 = self.numerical_logpdf_gradient(x0)
hess_logl = np.empty(gradl0.shape + x0.shape)
sl = tuple(slice(None) for _ in range(gradl0.ndim))
with np.nditer(x0, flags=["multi_index"], op_flags=["readwrite"]) as it:
for xv in it:
xv += eps
_, gradl = self.numerical_logpdf_gradient(x0)
hess_logl[sl + it.multi_index] = (gradl - gradl0) / eps
xv -= eps
else:
logl0 = self.logpdf(x0)
l0 = logl0.sum()
grad_logl = np.empty_like(x0)
hess_logl = np.empty_like(x0)
with np.nditer(x0, flags=["multi_index"], op_flags=["readwrite"]) as it:
for xv in it:
xv += eps
l1 = self.logpdf(x0).sum()
xv -= 2 * eps
l2 = self.logpdf(x0).sum()
g1 = (l1 - l0) / eps
g2 = (l0 - l2) / eps
grad_logl[it.multi_index] = g1
hess_logl[it.multi_index] = (g1 - g2) / eps
xv += eps
gradl0 = grad_logl
else:
logl0 = self.logpdf(x)
logl1 = self.logpdf(x + eps)
logl2 = self.logpdf(x - eps)
gradl0 = (logl1 - logl0) / eps
gradl1 = (logl0 - logl2) / eps
hess_logl = (gradl0 - gradl1) / eps
return logl0, gradl0, hess_logl
logpdf_gradient = numerical_logpdf_gradient
logpdf_gradient_hessian = numerical_logpdf_gradient_hessian
@classmethod
def project(
cls, samples: np.ndarray, log_weight_list: Optional[np.ndarray] = None, **kwargs
) -> "AbstractMessage":
"""Calculates the sufficient statistics of a set of samples
and returns the distribution with the appropriate parameters
that match the sufficient statistics
"""
# if weight_list aren't passed then equally weight all samples
# Numerically stable weighting for very small/large weight_list
# rescale coordinates to 'natural parameter space'
if log_weight_list is None:
log_weight_list = np.zeros_like(samples)
log_w_max = np.max(log_weight_list, axis=0, keepdims=True)
w = np.exp(log_weight_list - log_w_max)
norm = w.mean(0)
log_norm = np.log(norm) + log_w_max[0]
tx = cls.to_canonical_form(samples)
w /= norm
suff_stats = (tx * w[None, ...]).mean(1)
assert np.isfinite(suff_stats).all()
cls_ = cls._projection_class or cls._Base_class or cls
return cls_.from_sufficient_statistics(suff_stats, log_norm=log_norm, **kwargs)
@classmethod
def from_mode(
cls, mode: np.ndarray, covariance: np.ndarray, **kwargs
) -> "AbstractMessage":
pass
def log_normalisation(self, *elems: Union["AbstractMessage", float]) -> np.ndarray:
"""
Calculates the log of the integral of the product of a
set of distributions
NOTE: ignores log normalisation
"""
# Remove floats from messages passed
dists: List[AbstractMessage] = [
dist
for dist in self._iter_dists(elems)
if isinstance(dist, AbstractMessage)
]
# Calculate log product of message normalisation
log_norm = self.log_base_measure - self.log_partition
log_norm += sum(dist.log_base_measure - dist.log_partition for dist in dists)
# Calculate log normalisation of product of messages
prod_dist = self.sum_natural_parameters(*dists)
log_norm -= prod_dist.log_base_measure - prod_dist.log_partition
return log_norm
def instance(self):
return self
@staticmethod
def _iter_dists(dists) -> Iterator[Union["AbstractMessage", float]]:
for elem in dists:
from autofit.mapper.prior.wrapped_instance import WrappedInstance
if isinstance(elem, (AbstractMessage, WrappedInstance)):
yield elem
elif np.isscalar(elem):
yield elem
else:
for dist in elem:
yield dist
def update_invalid(self, other: "AbstractMessage") -> "AbstractMessage":
valid = self.check_valid()
if self.ndim:
valid_parameters: Iterator[np.ndarray] = (
np.where(valid, p, p_safe) for p, p_safe in zip(self, other)
)
else:
# TODO: Fairly certain this would not work
valid_parameters = iter(self if valid else other)
cls = self._Base_class or type(self)
new = cls(
*valid_parameters,
log_norm=self.log_norm,
id_=self.id,
lower_limit=self.lower_limit,
upper_limit=self.upper_limit,
)
return new
def check_support(self) -> np.ndarray:
if self._parameter_support is not None:
return reduce(
and_,
(
(p >= support[0]) & (p <= support[1])
for p, support in zip(self.parameters, self._parameter_support)
),
)
elif self.ndim:
return np.array(True, dtype=bool, ndmin=self.ndim)
return np.array([True])
def check_finite(self) -> np.ndarray:
return np.isfinite(self.natural_parameters).all(0)
def check_valid(self) -> np.ndarray:
return self.check_finite() & self.check_support()
@cached_property
def is_valid(self) -> Union[np.ndarray, np.bool_]:
return np.all(self.check_finite()) and np.all(self.check_support())
@staticmethod
def _get_mean_variance(
mean: np.ndarray, covariance: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
mean, covariance = np.asanyarray(mean), np.asanyarray(covariance)
if not covariance.shape:
# If variance is float simply pass through
variance = covariance * np.ones_like(mean)
if not variance.shape:
variance = variance.item()
elif mean.shape == covariance.shape:
variance = np.asanyarray(covariance)
elif covariance.shape == mean.shape * 2:
# If 2D covariance matrix passed get diagonal
inds = tuple(np.indices(mean.shape))
variance = np.asanyarray(covariance)[inds * 2]
else:
raise ValueError(
f"shape of covariance {covariance.shape} is invalid "
f"must be (), {mean.shape}, or {mean.shape * 2}"
)
return mean, variance
def __call__(self, x):
return np.sum(self.logpdf(x))
def factor_jacobian(
self, x: np.ndarray, _variables: Optional[Tuple[str]] = ("x",)
) -> Union[np.ndarray, Tuple[np.ndarray, Tuple[np.ndarray, ...]]]:
loglike, g = self.logpdf_gradient(x)
g = np.expand_dims(g, list(range(loglike.ndim)))
return loglike.sum(), (g,)
def as_factor(self, variable: "Variable", name: Optional[str] = None):
from autofit.graphical.factor_graphs import Factor
if name is None:
shape = self.shape
clsname = type(self).__name__
family = clsname[:-7] if clsname.endswith("Message") else clsname
name = f"{family}Likelihood" + (str(shape) if shape else "")
return Factor(
self,
variable,
name=name,
factor_jacobian=self.factor_jacobian,
plates=variable.plates,
arg_names=["x"],
)
def calc_exact_update(self, x: "AbstractMessage") -> "AbstractMessage":
return self,
def has_exact_projection(self, x: "AbstractMessage") -> bool:
return type(self) is type(x)
@classmethod
def transformed(
cls,
transform: Union[AbstractDensityTransform, Type[AbstractDensityTransform]],
clsname: Optional[str] = None,
support: Optional[Tuple[Tuple[float, float], ...]] = None,
wrapper_cls=None,
):
# noinspection PyUnresolvedReferences
"""
transforms the distribution according the passed transform,
returns a newly created class that encodes the transformation.
Parameters
----------
wrapper_cls
transform: AbstractDensityTransform
object that transforms the density
clsname: str, optional
the class name of the newly created class.
defaults to "Transformed<OriginalClassName>"
support: Tuple[Tuple[float, float], optional
the support of the new class. Generally this can be
automatically calculated from the parent class
Examples
--------
>>> from autofit.messages.normal import NormalMessage
Normal distributions have infinite univariate support
>>> NormalMessage._support
((-inf, inf),)
We can tranform the NormalMessage to the unit interval
using `transform.phi_transform`
>>> UnitNormal = NormalMessage.transformed(transform.phi_transform)
>>> message = UnitNormal(1.2, 0.8)
>>> message._support
((0.0, 1.0),)
Samples from the UnitNormal will exist in the Unit interval
>>> samples = message.sample(1000)
>>> samples.min(), samples.mean(), samples.max()
(0.06631750944045942, 0.8183189295040845, 0.9999056316923468)
Projections still work for the transformed class
>>> UnitNormal.project(samples, samples*0)
TransformedNormalMessage(mu=1.20273342, sigma=0.80929032)
Can specify the name of the new transformed class
>>> NormalMessage.transformed(transform.phi_transform, 'UnitNormal')(0, 1.)
UnitNormal(mu=0, sigma=1.)
The transformed objects are pickleable
>>> import pickle
>>> pickle.loads(pickle.dumps(message))
TransformedNormalMessage(mu=1.2, sigma=0.8)
The transformed objects also are normalised,
>>> from scipy.integrate import quad
>>> # noinspection PyTypeChecker
>>> quad(message.pdf, 0, 1)
(1.0000000000114622, 3.977073226302252e-09)
Can also nest transforms
>>> WeirdNormal = NormalMessage.transformed(
transform.log_transform).transformed(
transform.exp_transform)
This transformation is equivalent to the identity transform!
>>> WeirdNormal.project(NormalMessage(0.3, 0.8).sample(1000))
Transformed2NormalMessage(mu=0.31663248, sigma=0.79426984)
This functionality is more useful for applying linear shifts
e.g.
>>> ShiftedUnitNormal = NormalMessage.transformed(
transform.phi_transform
).shifted(shift=0.7, scale=2.3)
>>> ShiftedUnitNormal._support
((0.7, 3.0),)
>>> samples = ShiftedUnitNormal(0.2, 0.8).sample(1000)
>>> samples.min(), samples.mean(), samples.max()
"""
from .transform_wrapper import TransformedWrapper
wrapper_cls = wrapper_cls or TransformedWrapper
return wrapper_cls(
cls=cls,
transform=transform,
clsname=clsname,
support=support,
)
@classmethod
def shifted(
cls,
shift: float = 0,
scale: float = 1,
wrapper_cls=None,
):
return cls.transformed(
LinearShiftTransform(shift=shift, scale=scale),
clsname=f"Shifted{cls.__name__}",
wrapper_cls=wrapper_cls,
)
@classmethod
def _reconstruct(
cls,
parameters: Tuple[np.ndarray, ...],
log_norm: float,
id_,
lower_limit,
upper_limit,
*args,
):
return cls(
*parameters,
log_norm=log_norm,
id_=id_,
lower_limit=lower_limit,
upper_limit=upper_limit,
)
def __reduce__(self):
# serialises TransformedMessage during pickling
return (
self._reconstruct,
(
self.parameters,
self.log_norm,
self.id,
self.lower_limit,
self.upper_limit,
),
)
def _sample(self, n_samples):
# Needed for nested TransformedMessage method resolution
return self.sample(n_samples)
@classmethod
def _logpdf_gradient(cls, self, x):
# Needed for nested TransformedMessage method resolution
return cls.logpdf_gradient(self, x)
@classmethod
def _logpdf_gradient_hessian(cls, self, x):
# Needed for nested TransformedMessage method resolution
return cls.logpdf_gradient_hessian(self, x)
def map_dists(
dists: Dict[str, AbstractMessage],
values: Dict[str, np.ndarray],
_call: str = "logpdf",
) -> Iterator[Tuple[str, np.ndarray]]:
"""
Calls a method (default: logpdf) for each Message in dists
on the corresponding value in values
"""
for v in dists.keys() & values.keys():
dist = dists[v]
if isinstance(dist, AbstractMessage):
yield v, getattr(dist, _call)(values[v])
|
<filename>eda_plugin/examples/main.py
"""Main functions that assemble a full EDA pipeline."""
import sys
import eda_plugin.utility.settings
from eda_plugin.actuators.micro_manager import TimerMMAcquisition
from eda_plugin.interpreters.frame_rate import BinaryFrameRateInterpreter
from eda_plugin.utility.eda_gui import EDAMainGUI
from eda_plugin.utility.event_bus import EventBus
from PyQt5 import QtWidgets
def basic():
"""EDA loop that can be used to test without a microscope and without CUDA installation."""
from eda_plugin.actuators.micro_manager import MMActuator, TimerMMAcquisition
from eda_plugin.analysers.image import ImageAnalyser
eda_plugin.utility.settings.setup_logging()
# Construct the QApplication environment, that the GUIs and event loop runs in.
app = QtWidgets.QApplication(sys.argv)
# Start an additional zmq server that works together with the PythonEventServer Plugin
# for both communication between the EDA components and Micro-Manager.
event_bus = EventBus()
# Call the main components of the EDA loop (TimerMMAcquisition is also the default)
actuator = MMActuator(event_bus, TimerMMAcquisition)
analyser = ImageAnalyser(event_bus)
interpreter = BinaryFrameRateInterpreter(event_bus)
# Start the main GUI showing the EDA plot and the controls for the specific components
gui = EDAMainGUI(event_bus, viewer=False)
gui.show()
actuator.gui.show()
interpreter.gui.show()
# Start the event loop
sys.exit(app.exec_())
# This could also be used with a Napari Viewer:
# viewer = NapariImageViewer()
# event_bus.new_network_image.connect(viewer.add_network_image)
def pyro():
"""EDA loop thay can be used to test without a microscope and without CUDA installation."""
from eda_plugin.actuators.micro_manager import MMActuator
from eda_plugin.actuators.pycromanager import PycroAcquisition
from eda_plugin.analysers.image import PycroImageAnalyser
eda_plugin.utility.settings.setup_logging()
app = QtWidgets.QApplication(sys.argv)
event_bus = EventBus()
gui = EDAMainGUI(event_bus, viewer=False)
actuator = MMActuator(event_bus, PycroAcquisition)
analyser = PycroImageAnalyser(event_bus)
interpreter = BinaryFrameRateInterpreter(event_bus)
gui.show()
actuator.gui.show()
interpreter.gui.show()
sys.exit(app.exec_())
def keras():
"""EDA loop using a neural network analyser that can be used for testing."""
from eda_plugin.actuators.micro_manager import MMActuator
from eda_plugin.analysers.keras import KerasAnalyser
eda_plugin.utility.settings.setup_logging()
app = QtWidgets.QApplication(sys.argv)
event_bus = EventBus()
gui = EDAMainGUI(event_bus, viewer=True)
actuator = MMActuator(event_bus)
analyser = KerasAnalyser(event_bus)
interpreter = BinaryFrameRateInterpreter(event_bus)
gui.add_dock_widget(actuator.gui)
gui.add_dock_widget(interpreter.gui)
gui.add_dock_widget(analyser.gui)
gui.show()
# actuator.gui.show()
# interpreter.gui.show()
# analyser.gui.show()
sys.exit(app.exec_())
def pyro_keras():
"""EDA loop thay can be used to test without a microscope and without CUDA installation."""
from eda_plugin.actuators.micro_manager import MMActuator
from eda_plugin.analysers.keras import KerasAnalyser
from .actuators import InjectedPycroAcquisition
eda_plugin.utility.settings.setup_logging()
app = QtWidgets.QApplication(sys.argv)
event_bus = EventBus()
gui = EDAMainGUI(event_bus, viewer=True)
actuator = MMActuator(event_bus, InjectedPycroAcquisition)
analyser = KerasAnalyser(event_bus)
interpreter = BinaryFrameRateInterpreter(event_bus)
gui.show()
actuator.gui.show()
analyser.gui.show()
interpreter.gui.show()
sys.exit(app.exec_())
def main_isim():
"""EDA loop used on the iSIM."""
from eda_plugin.actuators.daq import DAQActuator
from eda_plugin.analysers.image import ImageAnalyser
eda_plugin.utility.settings.setup_logging()
app = QtWidgets.QApplication(sys.argv)
event_bus = EventBus()
gui = EDAMainGUI(event_bus, viewer=True)
actuator = DAQActuator(event_bus)
analyser = ImageAnalyser(event_bus)
interpreter = BinaryFrameRateInterpreter(event_bus)
gui.show()
# actuator.gui.show()
sys.exit(app.exec_())
if __name__ == "__main__":
keras()
|
# Copyright 2018 Huawei Technologies Co.,Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
from openstack import exceptions
from openstack import resource2 as resource
from openstack.vpc import vpc_service
class PrivateIP(resource.Resource):
resource_key = 'privateip'
resources_key = 'privateips'
base_path = '/privateips'
# Note: The uri for listing is different from base_path
list_base_path = '/subnets/%(subnet_id)s/privateips'
service = vpc_service.VpcServiceV1()
# capabilities
allow_create = True
allow_get = True
allow_update = True
allow_delete = True
allow_list = True
#: The status of the private IP address.
status = resource.Body('status')
#: The ID of the subnet from which the IP address is allocated.
subnet_id = resource.Body('subnet_id')
#: The project(tenant) ID of the operator.
project_id = resource.Body('tenant_id')
#: The VM or network device using the private IP address.
device_owner = resource.Body('device_owner')
#: The private IP address obtained.
ip_address = resource.Body('ip_address')
@classmethod
def get_list_uri(cls, params):
return cls.list_base_path % params
def create(self, session, prepend_key=True):
"""Create a remote resource based on this instance.
:param session: The session to use for making this request.
:type session: :class:`~openstack.session.Session`
:param prepend_key: A boolean indicating whether the resource_key
should be prepended in a resource creation
request. Default to True.
:return: This :class:`Resource` instance.
:raises: :exc:`~openstack.exceptions.MethodNotSupported` if
:data:`Resource.allow_create` is not set to ``True``.
"""
if not self.allow_create:
raise exceptions.MethodNotSupported(self, "create")
endpoint_override = self.service.get_endpoint_override()
request = self._prepare_request(requires_id=False,
prepend_key=prepend_key)
# Note: The creation request body is a list of private
# ip address objects with 'privateips' as the key. This is not
# identical to the normal API request.
# The 'subnet_id' attribute is mandatory in the creation request Body
# but is an URI attribute in the updating and deleting requests. So
# we add it into the creation request body manually.
if prepend_key:
body = request.body[self.resource_key]
body['subnet_id'] = self.subnet_id
request.body = {'privateips': [body]}
else:
request.body['subnet_id'] = self.subnet_id
request.body = {'privateips': [request.body]}
response = session.post(request.uri, endpoint_filter=self.service,
endpoint_override=endpoint_override,
json=request.body, headers=request.headers)
body = response.json()
if self.resource_key and self.resource_key in body:
body = body[self.resource_key]
if self.resources_key and self.resources_key in body:
private_ips = body[self.resources_key]
# Note: We only support to create private ips one
# by one.
body = private_ips[0] if len(private_ips) > 0 else {}
body = self._filter_component(body, self._body_mapping())
self._body.attributes.update(body)
self._body.clean()
headers = self._filter_component(response.headers,
self._header_mapping())
self._header.attributes.update(headers)
self._header.clean()
return self
@classmethod
def batch_create(cls, session, private_ips):
"""Create the given private ips in batch
:param session: The session to use for making this request.
:type session: :class:`~openstack.session.Session`
:param private_ips: A list of dict defined private ip.
:returns: A list of the created private ips.
"""
uri = cls.base_path
body = {'privateips': private_ips}
endpoint_override = cls.service.get_endpoint_override()
response = session.post(uri, endpoint_filter=cls.service,
endpoint_override=endpoint_override,
json=body)
body = response.json()
result = []
for each in body[cls.resources_key]:
result.append(cls.new(**each))
return result
|
<filename>manimlib/utils/color.py
import random
from colour import Color
import numpy as np
from manimlib.constants import WHITE
from manimlib.utils.bezier import interpolate
from manimlib.utils.simple_functions import clip_in_place
from manimlib.utils.space_ops import normalize
def color_to_rgb(color):
if isinstance(color, str):
return hex_to_rgb(color)
elif isinstance(color, Color):
return np.array(color.get_rgb())
else:
raise Exception("Invalid color type")
def color_to_rgba(color, alpha=1):
return np.array([*color_to_rgb(color), alpha])
def rgb_to_color(rgb):
try:
return Color(rgb=rgb)
except ValueError:
return Color(WHITE)
def rgba_to_color(rgba):
return rgb_to_color(rgba[:3])
def rgb_to_hex(rgb):
return "#" + "".join(
hex(int_x // 16)[2] + hex(int_x % 16)[2]
for x in rgb
for int_x in [int(255 * x)]
)
def hex_to_rgb(hex_code):
hex_part = hex_code[1:]
if len(hex_part) == 3:
hex_part = "".join([2 * c for c in hex_part])
return np.array([
int(hex_part[i:i + 2], 16) / 255
for i in range(0, 6, 2)
])
def invert_color(color):
return rgb_to_color(1.0 - color_to_rgb(color))
def color_to_int_rgb(color):
return (255 * color_to_rgb(color)).astype('uint8')
def color_to_int_rgba(color, opacity=1.0):
alpha = int(255 * opacity)
return np.array([*color_to_int_rgb(color), alpha])
def color_gradient(reference_colors, length_of_output):
if length_of_output == 0:
return reference_colors[0]
rgbs = list(map(color_to_rgb, reference_colors))
alphas = np.linspace(0, (len(rgbs) - 1), length_of_output)
floors = alphas.astype('int')
alphas_mod1 = alphas % 1
# End edge case
alphas_mod1[-1] = 1
floors[-1] = len(rgbs) - 2
return [
rgb_to_color(interpolate(rgbs[i], rgbs[i + 1], alpha))
for i, alpha in zip(floors, alphas_mod1)
]
def interpolate_color(color1, color2, alpha):
rgb = interpolate(color_to_rgb(color1), color_to_rgb(color2), alpha)
return rgb_to_color(rgb)
def average_color(*colors):
rgbs = np.array(list(map(color_to_rgb, colors)))
return rgb_to_color(rgbs.mean(0))
def random_bright_color():
color = random_color()
curr_rgb = color_to_rgb(color)
new_rgb = interpolate(
curr_rgb, np.ones(len(curr_rgb)), 0.5
)
return Color(rgb=new_rgb)
def random_color():
return Color(rgb=(random.random() for i in range(3)))
def get_shaded_rgb(rgb, point, unit_normal_vect, light_source):
to_sun = normalize(light_source - point)
factor = 0.5 * np.dot(unit_normal_vect, to_sun)**3
if factor < 0:
factor *= 0.5
result = rgb + factor
clip_in_place(rgb + factor, 0, 1)
return result
def get_colormap_list(map_name="viridis", n_colors=9):
from matplotlib.cm import get_cmap
rgbs = get_cmap(map_name).colors # Make more general?
return [
rgbs[int(n)]
for n in np.linspace(0, len(rgbs) - 1, n_colors)
]
|
<reponame>banjin/FluentPython-example
"""
A multi-dimensional ``Vector`` class, take 4
A ``Vector`` is built from an iterable of numbers::
>>> Vector([3.1, 4.2])
Vector([3.1, 4.2])
>>> Vector((3, 4, 5))
Vector([3.0, 4.0, 5.0])
>>> Vector(range(10))
Vector([0.0, 1.0, 2.0, 3.0, 4.0, ...])
Tests with 2-dimensions (same results as ``vector2d_v1.py``)::
>>> v1 = Vector([3, 4])
>>> x, y = v1
>>> x, y
(3.0, 4.0)
>>> v1
Vector([3.0, 4.0])
>>> v1_clone = eval(repr(v1))
>>> v1 == v1_clone
True
>>> print(v1)
(3.0, 4.0)
>>> octets = bytes(v1)
>>> octets
b'd\\x00\\x00\\x00\\x00\\x00\\x00\\x08@\\x00\\x00\\x00\\x00\\x00\\x00\\x10@'
>>> abs(v1)
5.0
>>> bool(v1), bool(Vector([0, 0]))
(True, False)
Test of ``.frombytes()`` class method:
>>> v1_clone = Vector.frombytes(bytes(v1))
>>> v1_clone
Vector([3.0, 4.0])
>>> v1 == v1_clone
True
Tests with 3-dimensions::
>>> v1 = Vector([3, 4, 5])
>>> x, y, z = v1
>>> x, y, z
(3.0, 4.0, 5.0)
>>> v1
Vector([3.0, 4.0, 5.0])
>>> v1_clone = eval(repr(v1))
>>> v1 == v1_clone
True
>>> print(v1)
(3.0, 4.0, 5.0)
>>> abs(v1) # doctest:+ELLIPSIS
7.071067811...
>>> bool(v1), bool(Vector([0, 0, 0]))
(True, False)
Tests with many dimensions::
>>> v7 = Vector(range(7))
>>> v7
Vector([0.0, 1.0, 2.0, 3.0, 4.0, ...])
>>> abs(v7) # doctest:+ELLIPSIS
9.53939201...
Test of ``.__bytes__`` and ``.frombytes()`` methods::
>>> v1 = Vector([3, 4, 5])
>>> v1_clone = Vector.frombytes(bytes(v1))
>>> v1_clone
Vector([3.0, 4.0, 5.0])
>>> v1 == v1_clone
True
Tests of sequence behavior::
>>> v1 = Vector([3, 4, 5])
>>> len(v1)
3
>>> v1[0], v1[len(v1)-1], v1[-1]
(3.0, 5.0, 5.0)
Test of slicing::
>>> v7 = Vector(range(7))
>>> v7[-1]
6.0
>>> v7[1:4]
Vector([1.0, 2.0, 3.0])
>>> v7[-1:]
Vector([6.0])
>>> v7[1,2]
Traceback (most recent call last):
...
TypeError: Vector indices must be integers
Tests of dynamic attribute access::
>>> v7 = Vector(range(10))
>>> v7.x
0.0
>>> v7.y, v7.z, v7.t
(1.0, 2.0, 3.0)
Dynamic attribute lookup failures::
>>> v7.k
Traceback (most recent call last):
...
AttributeError: 'Vector' object has no attribute 'k'
>>> v3 = Vector(range(3))
>>> v3.t
Traceback (most recent call last):
...
AttributeError: 'Vector' object has no attribute 't'
>>> v3.spam
Traceback (most recent call last):
...
AttributeError: 'Vector' object has no attribute 'spam'
Tests of hashing::
>>> v1 = Vector([3, 4])
>>> v2 = Vector([3.1, 4.2])
>>> v3 = Vector([3, 4, 5])
>>> v6 = Vector(range(6))
>>> hash(v1), hash(v3), hash(v6)
(7, 2, 1)
Most hash values of non-integers vary from a 32-bit to 64-bit CPython build::
>>> import sys
>>> hash(v2) == (384307168202284039 if sys.maxsize > 2**32 else 357915986)
True
"""
from array import array
import reprlib
import math
import numbers
import functools
import operator
class Vector:
typecode = 'd'
def __init__(self, components):
self._components = array(self.typecode, components)
def __iter__(self):
return iter(self._components)
def __repr__(self):
components = reprlib.repr(self._components)
components = components[components.find('['):-1]
return 'Vector({})'.format(components)
def __str__(self):
return str(tuple(self))
def __bytes__(self):
return (bytes([ord(self.typecode)]) +
bytes(self._components))
def __eq__(self, other):
return (len(self) == len(other) and
all(a == b for a, b in zip(self, other)))
def __hash__(self):
hashes = (hash(x) for x in self)
return functools.reduce(operator.xor, hashes, 0)
def __abs__(self):
return math.sqrt(sum(x * x for x in self))
def __bool__(self):
return bool(abs(self))
def __len__(self):
return len(self._components)
def __getitem__(self, index):
cls = type(self)
if isinstance(index, slice):
return cls(self._components[index])
elif isinstance(index, numbers.Integral):
return self._components[index]
else:
msg = '{cls.__name__} indices must be integers'
raise TypeError(msg.format(cls=cls))
shortcut_names = 'xyzt'
def __getattr__(self, name):
cls = type(self)
if len(name) == 1:
pos = cls.shortcut_names.find(name)
if 0 <= pos < len(self._components):
return self._components[pos]
msg = '{.__name__!r} object has no attribute {!r}'
raise AttributeError(msg.format(cls, name))
@classmethod
def frombytes(cls, octets):
typecode = chr(octets[0])
memv = memoryview(octets[1:]).cast(typecode)
return cls(memv)
|
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.compat import long
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises
)
from numpy.lib.type_check import (
common_type, mintypecode, isreal, iscomplex, isposinf, isneginf,
nan_to_num, isrealobj, iscomplexobj, asfarray, real_if_close
)
def assert_all(x):
assert_(np.all(x), x)
class TestCommonType(object):
def test_basic(self):
ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32)
af16 = np.array([[1, 2], [3, 4]], dtype=np.float16)
af32 = np.array([[1, 2], [3, 4]], dtype=np.float32)
af64 = np.array([[1, 2], [3, 4]], dtype=np.float64)
acs = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.csingle)
acd = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.cdouble)
assert_(common_type(ai32) == np.float64)
assert_(common_type(af16) == np.float16)
assert_(common_type(af32) == np.float32)
assert_(common_type(af64) == np.float64)
assert_(common_type(acs) == np.csingle)
assert_(common_type(acd) == np.cdouble)
class TestMintypecode(object):
def test_default_1(self):
for itype in '1bcsuwil':
assert_equal(mintypecode(itype), 'd')
assert_equal(mintypecode('f'), 'f')
assert_equal(mintypecode('d'), 'd')
assert_equal(mintypecode('F'), 'F')
assert_equal(mintypecode('D'), 'D')
def test_default_2(self):
for itype in '1bcsuwil':
assert_equal(mintypecode(itype+'f'), 'f')
assert_equal(mintypecode(itype+'d'), 'd')
assert_equal(mintypecode(itype+'F'), 'F')
assert_equal(mintypecode(itype+'D'), 'D')
assert_equal(mintypecode('ff'), 'f')
assert_equal(mintypecode('fd'), 'd')
assert_equal(mintypecode('fF'), 'F')
assert_equal(mintypecode('fD'), 'D')
assert_equal(mintypecode('df'), 'd')
assert_equal(mintypecode('dd'), 'd')
#assert_equal(mintypecode('dF',savespace=1),'F')
assert_equal(mintypecode('dF'), 'D')
assert_equal(mintypecode('dD'), 'D')
assert_equal(mintypecode('Ff'), 'F')
#assert_equal(mintypecode('Fd',savespace=1),'F')
assert_equal(mintypecode('Fd'), 'D')
assert_equal(mintypecode('FF'), 'F')
assert_equal(mintypecode('FD'), 'D')
assert_equal(mintypecode('Df'), 'D')
assert_equal(mintypecode('Dd'), 'D')
assert_equal(mintypecode('DF'), 'D')
assert_equal(mintypecode('DD'), 'D')
def test_default_3(self):
assert_equal(mintypecode('fdF'), 'D')
#assert_equal(mintypecode('fdF',savespace=1),'F')
assert_equal(mintypecode('fdD'), 'D')
assert_equal(mintypecode('fFD'), 'D')
assert_equal(mintypecode('dFD'), 'D')
assert_equal(mintypecode('ifd'), 'd')
assert_equal(mintypecode('ifF'), 'F')
assert_equal(mintypecode('ifD'), 'D')
assert_equal(mintypecode('idF'), 'D')
#assert_equal(mintypecode('idF',savespace=1),'F')
assert_equal(mintypecode('idD'), 'D')
class TestIsscalar(object):
def test_basic(self):
assert_(np.isscalar(3))
assert_(not np.isscalar([3]))
assert_(not np.isscalar((3,)))
assert_(np.isscalar(3j))
assert_(np.isscalar(long(10)))
assert_(np.isscalar(4.0))
class TestReal(object):
def test_real(self):
y = np.random.rand(10,)
assert_array_equal(y, np.real(y))
y = np.array(1)
out = np.real(y)
assert_array_equal(y, out)
assert_(isinstance(out, np.ndarray))
y = 1
out = np.real(y)
assert_equal(y, out)
assert_(not isinstance(out, np.ndarray))
def test_cmplx(self):
y = np.random.rand(10,)+1j*np.random.rand(10,)
assert_array_equal(y.real, np.real(y))
y = np.array(1 + 1j)
out = np.real(y)
assert_array_equal(y.real, out)
assert_(isinstance(out, np.ndarray))
y = 1 + 1j
out = np.real(y)
assert_equal(1.0, out)
assert_(not isinstance(out, np.ndarray))
class TestImag(object):
def test_real(self):
y = np.random.rand(10,)
assert_array_equal(0, np.imag(y))
y = np.array(1)
out = np.imag(y)
assert_array_equal(0, out)
assert_(isinstance(out, np.ndarray))
y = 1
out = np.imag(y)
assert_equal(0, out)
assert_(not isinstance(out, np.ndarray))
def test_cmplx(self):
y = np.random.rand(10,)+1j*np.random.rand(10,)
assert_array_equal(y.imag, np.imag(y))
y = np.array(1 + 1j)
out = np.imag(y)
assert_array_equal(y.imag, out)
assert_(isinstance(out, np.ndarray))
y = 1 + 1j
out = np.imag(y)
assert_equal(1.0, out)
assert_(not isinstance(out, np.ndarray))
class TestIscomplex(object):
def test_fail(self):
z = np.array([-1, 0, 1])
res = iscomplex(z)
assert_(not np.sometrue(res, axis=0))
def test_pass(self):
z = np.array([-1j, 1, 0])
res = iscomplex(z)
assert_array_equal(res, [1, 0, 0])
class TestIsreal(object):
def test_pass(self):
z = np.array([-1, 0, 1j])
res = isreal(z)
assert_array_equal(res, [1, 1, 0])
def test_fail(self):
z = np.array([-1j, 1, 0])
res = isreal(z)
assert_array_equal(res, [0, 1, 1])
class TestIscomplexobj(object):
def test_basic(self):
z = np.array([-1, 0, 1])
assert_(not iscomplexobj(z))
z = np.array([-1j, 0, -1])
assert_(iscomplexobj(z))
def test_scalar(self):
assert_(not iscomplexobj(1.0))
assert_(iscomplexobj(1+0j))
def test_list(self):
assert_(iscomplexobj([3, 1+0j, True]))
assert_(not iscomplexobj([3, 1, True]))
def test_duck(self):
class DummyComplexArray:
@property
def dtype(self):
return np.dtype(complex)
dummy = DummyComplexArray()
assert_(iscomplexobj(dummy))
def test_pandas_duck(self):
# This tests a custom np.dtype duck-typed class, such as used by pandas
# (pandas.core.dtypes)
class PdComplex(np.complex128):
pass
class PdDtype(object):
name = 'category'
names = None
type = PdComplex
kind = 'c'
str = '<c16'
base = np.dtype('complex128')
class DummyPd:
@property
def dtype(self):
return PdDtype
dummy = DummyPd()
assert_(iscomplexobj(dummy))
def test_custom_dtype_duck(self):
class MyArray(list):
@property
def dtype(self):
return complex
a = MyArray([1+0j, 2+0j, 3+0j])
assert_(iscomplexobj(a))
class TestIsrealobj(object):
def test_basic(self):
z = np.array([-1, 0, 1])
assert_(isrealobj(z))
z = np.array([-1j, 0, -1])
assert_(not isrealobj(z))
class TestIsnan(object):
def test_goodvalues(self):
z = np.array((-1., 0., 1.))
res = np.isnan(z) == 0
assert_all(np.all(res, axis=0))
def test_posinf(self):
with np.errstate(divide='ignore'):
assert_all(np.isnan(np.array((1.,))/0.) == 0)
def test_neginf(self):
with np.errstate(divide='ignore'):
assert_all(np.isnan(np.array((-1.,))/0.) == 0)
def test_ind(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isnan(np.array((0.,))/0.) == 1)
def test_integer(self):
assert_all(np.isnan(1) == 0)
def test_complex(self):
assert_all(np.isnan(1+1j) == 0)
def test_complex1(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isnan(np.array(0+0j)/0.) == 1)
class TestIsfinite(object):
# Fixme, wrong place, isfinite now ufunc
def test_goodvalues(self):
z = np.array((-1., 0., 1.))
res = np.isfinite(z) == 1
assert_all(np.all(res, axis=0))
def test_posinf(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isfinite(np.array((1.,))/0.) == 0)
def test_neginf(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isfinite(np.array((-1.,))/0.) == 0)
def test_ind(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isfinite(np.array((0.,))/0.) == 0)
def test_integer(self):
assert_all(np.isfinite(1) == 1)
def test_complex(self):
assert_all(np.isfinite(1+1j) == 1)
def test_complex1(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isfinite(np.array(1+1j)/0.) == 0)
class TestIsinf(object):
# Fixme, wrong place, isinf now ufunc
def test_goodvalues(self):
z = np.array((-1., 0., 1.))
res = np.isinf(z) == 0
assert_all(np.all(res, axis=0))
def test_posinf(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isinf(np.array((1.,))/0.) == 1)
def test_posinf_scalar(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isinf(np.array(1.,)/0.) == 1)
def test_neginf(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isinf(np.array((-1.,))/0.) == 1)
def test_neginf_scalar(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isinf(np.array(-1.)/0.) == 1)
def test_ind(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isinf(np.array((0.,))/0.) == 0)
class TestIsposinf(object):
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
vals = isposinf(np.array((-1., 0, 1))/0.)
assert_(vals[0] == 0)
assert_(vals[1] == 0)
assert_(vals[2] == 1)
class TestIsneginf(object):
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
vals = isneginf(np.array((-1., 0, 1))/0.)
assert_(vals[0] == 1)
assert_(vals[1] == 0)
assert_(vals[2] == 0)
class TestNanToNum(object):
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
vals = nan_to_num(np.array((-1., 0, 1))/0.)
assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0]))
assert_(vals[1] == 0)
assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))
assert_equal(type(vals), np.ndarray)
# perform the same test but in-place
with np.errstate(divide='ignore', invalid='ignore'):
vals = np.array((-1., 0, 1))/0.
result = nan_to_num(vals, copy=False)
assert_(result is vals)
assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0]))
assert_(vals[1] == 0)
assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))
assert_equal(type(vals), np.ndarray)
def test_array(self):
vals = nan_to_num([1])
assert_array_equal(vals, np.array([1], int))
assert_equal(type(vals), np.ndarray)
def test_integer(self):
vals = nan_to_num(1)
assert_all(vals == 1)
assert_equal(type(vals), np.int_)
def test_float(self):
vals = nan_to_num(1.0)
assert_all(vals == 1.0)
assert_equal(type(vals), np.float_)
def test_complex_good(self):
vals = nan_to_num(1+1j)
assert_all(vals == 1+1j)
assert_equal(type(vals), np.complex_)
def test_complex_bad(self):
with np.errstate(divide='ignore', invalid='ignore'):
v = 1 + 1j
v += np.array(0+1.j)/0.
vals = nan_to_num(v)
# !! This is actually (unexpectedly) zero
assert_all(np.isfinite(vals))
assert_equal(type(vals), np.complex_)
def test_complex_bad2(self):
with np.errstate(divide='ignore', invalid='ignore'):
v = 1 + 1j
v += np.array(-1+1.j)/0.
vals = nan_to_num(v)
assert_all(np.isfinite(vals))
assert_equal(type(vals), np.complex_)
# Fixme
#assert_all(vals.imag > 1e10) and assert_all(np.isfinite(vals))
# !! This is actually (unexpectedly) positive
# !! inf. Comment out for now, and see if it
# !! changes
#assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals))
class TestRealIfClose(object):
def test_basic(self):
a = np.random.rand(10)
b = real_if_close(a+1e-15j)
assert_all(isrealobj(b))
assert_array_equal(a, b)
b = real_if_close(a+1e-7j)
assert_all(iscomplexobj(b))
b = real_if_close(a+1e-7j, tol=1e-6)
assert_all(isrealobj(b))
class TestArrayConversion(object):
def test_asfarray(self):
a = asfarray(np.array([1, 2, 3]))
assert_equal(a.__class__, np.ndarray)
assert_(np.issubdtype(a.dtype, np.floating))
# previously this would infer dtypes from arrays, unlike every single
# other numpy function
assert_raises(TypeError,
asfarray, np.array([1, 2, 3]), dtype=np.array(1.0))
|
# Copyright 2019 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from newrelic_telemetry_sdk.metric import (
Metric,
GaugeMetric,
CountMetric,
SummaryMetric,
)
from utils import CustomMapping
def test_gauge_metric_defaults(freeze_time):
metric = GaugeMetric("name", 0)
assert metric["name"] == "name"
assert metric["value"] == 0
assert metric["timestamp"] == 2000
assert type(metric["timestamp"]) is int
assert "type" not in metric
assert "interval.ms" not in metric
assert "attributes" not in metric
def test_count_metric_defaults(freeze_time):
metric = CountMetric("name", 0, 0)
assert metric["type"] == "count"
assert metric["name"] == "name"
assert metric["value"] == 0
assert metric["interval.ms"] == 0
assert type(metric["interval.ms"]) is int
assert metric["timestamp"] == 2000
assert type(metric["timestamp"]) is int
assert "attributes" not in metric
def test_summary_metric_defaults(freeze_time):
metric = SummaryMetric("name", 0, 0, 0, 0, 0)
assert metric["type"] == "summary"
assert metric["name"] == "name"
assert metric["value"] == {"count": 0, "sum": 0, "min": 0, "max": 0}
assert metric["interval.ms"] == 0
assert type(metric["interval.ms"]) is int
assert metric["timestamp"] == 2000
assert type(metric["timestamp"]) is int
assert "attributes" not in metric
@pytest.mark.parametrize(
"arg_name,arg_value,metric_key,metric_value",
(
("tags", {"foo": "bar"}, "attributes", {"foo": "bar"}),
("tags", CustomMapping(), "attributes", {"foo": "bar"}),
("end_time_ms", 1000, "timestamp", 1000),
),
)
def test_metric_optional(arg_name, arg_value, metric_key, metric_value):
kwargs = {arg_name: arg_value}
metric = Metric("foo", 3, 0, **kwargs)
assert metric.name == "foo"
assert metric.value == 3
assert metric.interval_ms == 0
assert metric[metric_key] == metric_value
assert type(metric[metric_key]) is type(metric_value)
@pytest.mark.parametrize(
"kwargs, metric_key, attribute_name",
(
({"name": "a", "value": 0, "interval_ms": None}, "interval.ms", "interval_ms"),
(
{"name": "a", "value": 0, "interval_ms": 0, "end_time_ms": None},
"timestamp",
"start_time_ms",
),
(
{"name": "a", "value": 0, "interval_ms": 0, "end_time_ms": None},
"timestamp",
"end_time_ms",
),
),
)
def test_metric_none(kwargs, metric_key, attribute_name):
metric = Metric(**kwargs)
assert metric_key not in metric
assert getattr(metric, attribute_name) is None
@pytest.mark.parametrize(
"attribute_name,attribute_value",
(
("name", "foo"),
("value", 8),
("tags", {"tag": "value"}),
("interval_ms", 1000),
("interval_ms", None),
("start_time_ms", 1000),
("end_time_ms", 2000),
),
)
def test_metric_accessors(attribute_name, attribute_value):
if attribute_name == "interval_ms" and attribute_value is None:
interval = None
else:
interval = 1000
metric = CountMetric(
"foo", 8, tags={"tag": "value"}, interval_ms=interval, end_time_ms=2000
)
value = getattr(metric, attribute_name)
assert value == attribute_value
assert type(value) is type(attribute_value)
# Verify that end_time_ms uses default interval of 0
if interval is None:
assert metric.end_time_ms == 2000
def test_metric_copy():
original = GaugeMetric("foo", "bar")
copy = original.copy()
assert copy == original
assert copy is not original
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import os
import tensorflow as tf
'''
cls : aeroplane|| Recall: 0.9473684210526315 || Precison: 0.0006199030196164867|| AP: 0.826992691184208
____________________
cls : cow|| Recall: 0.9631147540983607 || Precison: 0.0005354526625668462|| AP: 0.8344652186720717
____________________
cls : dog|| Recall: 0.9652351738241309 || Precison: 0.0010528593384385115|| AP: 0.8848104631457077
____________________
cls : pottedplant|| Recall: 0.7708333333333334 || Precison: 0.000823124000293655|| AP: 0.4527288299945802
____________________
cls : diningtable|| Recall: 0.8980582524271845 || Precison: 0.00042887810125232407|| AP: 0.6700510019755388
____________________
cls : bird|| Recall: 0.9237472766884531 || Precison: 0.0009519832235409285|| AP: 0.7858394634006082
____________________
cls : tvmonitor|| Recall: 0.9415584415584416 || Precison: 0.0006423247726945524|| AP: 0.7532342429791412
____________________
cls : chair|| Recall: 0.8452380952380952 || Precison: 0.0014212159291838572|| AP: 0.5629849133883229
____________________
cls : train|| Recall: 0.925531914893617 || Precison: 0.0006022581218315107|| AP: 0.81368729431196
____________________
cls : horse|| Recall: 0.9454022988505747 || Precison: 0.0007562505602920185|| AP: 0.8603450848286776
____________________
cls : cat|| Recall: 0.9608938547486033 || Precison: 0.0007696817008175631|| AP: 0.8780370107529119
____________________
cls : sofa|| Recall: 0.9623430962343096 || Precison: 0.0005431753558979397|| AP: 0.748024582610825
____________________
cls : bottle|| Recall: 0.8571428571428571 || Precison: 0.0008744472166693132|| AP: 0.6253912291817303
____________________
cls : person|| Recall: 0.9149734982332155 || Precison: 0.009253199981238986|| AP: 0.8351147684067881
____________________
cls : car|| Recall: 0.9533721898417985 || Precison: 0.00259228066362385|| AP: 0.8841614814276471
____________________
cls : boat|| Recall: 0.8821292775665399 || Precison: 0.0005342347777629379|| AP: 0.6106671555293245
____________________
cls : motorbike|| Recall: 0.9323076923076923 || Precison: 0.0006731029825348658|| AP: 0.8421918380864666
____________________
cls : bicycle|| Recall: 0.9317507418397626 || Precison: 0.0007036524942688176|| AP: 0.8552669093308443
____________________
cls : bus|| Recall: 0.9765258215962441 || Precison: 0.00047651993823568495|| AP: 0.8420876315549962
____________________
cls : sheep|| Recall: 0.9049586776859504 || Precison: 0.000502333902950925|| AP: 0.7647489734437813
____________________
mAP is : 0.7665415392103065 USE_12_METRIC
cls : bicycle|| Recall: 0.9317507418397626 || Precison: 0.0007036524942688176|| AP: 0.8298982119397122
____________________
cls : sofa|| Recall: 0.9623430962343096 || Precison: 0.0005431753558979397|| AP: 0.7272523895735249
____________________
cls : bus|| Recall: 0.9765258215962441 || Precison: 0.00047651993823568495|| AP: 0.8137027123104137
____________________
cls : diningtable|| Recall: 0.8980582524271845 || Precison: 0.00042887810125232407|| AP: 0.6530525394835751
____________________
cls : person|| Recall: 0.9149734982332155 || Precison: 0.009253199981238986|| AP: 0.803256081733613
____________________
cls : car|| Recall: 0.9533721898417985 || Precison: 0.00259228066362385|| AP: 0.8577825832291308
____________________
cls : boat|| Recall: 0.8821292775665399 || Precison: 0.0005342347777629379|| AP: 0.5979719282542533
____________________
cls : chair|| Recall: 0.8452380952380952 || Precison: 0.0014212159291838572|| AP: 0.5599343653732526
____________________
cls : aeroplane|| Recall: 0.9473684210526315 || Precison: 0.0006199030196164867|| AP: 0.7917730109896329
____________________
cls : cat|| Recall: 0.9608938547486033 || Precison: 0.0007696817008175631|| AP: 0.8475644227001603
____________________
cls : sheep|| Recall: 0.9049586776859504 || Precison: 0.000502333902950925|| AP: 0.7327379110779253
____________________
cls : train|| Recall: 0.925531914893617 || Precison: 0.0006022581218315107|| AP: 0.7743045860493956
____________________
cls : horse|| Recall: 0.9454022988505747 || Precison: 0.0007562505602920185|| AP: 0.8223412836194737
____________________
cls : cow|| Recall: 0.9631147540983607 || Precison: 0.0005354526625668462|| AP: 0.8058877343148467
____________________
cls : tvmonitor|| Recall: 0.9415584415584416 || Precison: 0.0006423247726945524|| AP: 0.7310441973657807
____________________
cls : pottedplant|| Recall: 0.7708333333333334 || Precison: 0.000823124000293655|| AP: 0.4646864671975241
____________________
cls : dog|| Recall: 0.9652351738241309 || Precison: 0.0010528593384385115|| AP: 0.8525619478862897
____________________
cls : bird|| Recall: 0.9237472766884531 || Precison: 0.0009519832235409285|| AP: 0.7610720209528306
____________________
cls : bottle|| Recall: 0.8571428571428571 || Precison: 0.0008744472166693132|| AP: 0.6127328834288011
____________________
cls : motorbike|| Recall: 0.9323076923076923 || Precison: 0.0006731029825348658|| AP: 0.8119378019468331
____________________
mAP is : 0.7425747539713485 USE_07_METRIC
'''
# ------------------------------------------------
VERSION = 'FPN_Res50_20181201'
NET_NAME = 'resnet_v1_50'
ADD_BOX_IN_TENSORBOARD = True
# ---------------------------------------- System_config
ROOT_PATH = os.path.abspath('../')
print (20*"++--")
print (ROOT_PATH)
GPU_GROUP = "1"
SHOW_TRAIN_INFO_INTE = 10
SMRY_ITER = 100
SAVE_WEIGHTS_INTE = 10000
SUMMARY_PATH = ROOT_PATH + '/output/summary'
TEST_SAVE_PATH = ROOT_PATH + '/tools/test_result'
INFERENCE_IMAGE_PATH = ROOT_PATH + '/tools/inference_image'
INFERENCE_SAVE_PATH = ROOT_PATH + '/tools/inference_results'
if NET_NAME.startswith("resnet"):
weights_name = NET_NAME
elif NET_NAME.startswith("MobilenetV2"):
weights_name = "mobilenet/mobilenet_v2_1.0_224"
else:
raise NotImplementedError
PRETRAINED_CKPT = ROOT_PATH + '/data/pretrained_weights/' + weights_name + '.ckpt'
TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')
EVALUATE_DIR = ROOT_PATH + '/output/evaluate_result_pickle/'
test_annotate_path = '/home/yjr/DataSet/VOC/VOC_test/VOC2007/Annotations'
# ------------------------------------------ Train config
RESTORE_FROM_RPN = False
IS_FILTER_OUTSIDE_BOXES = False
FIXED_BLOCKS = 0 # allow 0~3
USE_07_METRIC = False
RPN_LOCATION_LOSS_WEIGHT = 1.
RPN_CLASSIFICATION_LOSS_WEIGHT = 1.0
FAST_RCNN_LOCATION_LOSS_WEIGHT = 1.0
FAST_RCNN_CLASSIFICATION_LOSS_WEIGHT = 1.0
RPN_SIGMA = 3.0
FASTRCNN_SIGMA = 1.0
MUTILPY_BIAS_GRADIENT = None # 2.0 # if None, will not multipy
GRADIENT_CLIPPING_BY_NORM = None # 10.0 if None, will not clip
EPSILON = 1e-5
MOMENTUM = 0.9
LR = 0.001 # 0.001 # 0.0003
DECAY_STEP = [60000, 80000] # 50000, 70000
MAX_ITERATION = 150000
# -------------------------------------------- Data_preprocess_config
DATASET_NAME = 'pascal' # 'ship', 'spacenet', 'pascal', 'coco'
PIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
IMG_SHORT_SIDE_LEN = 600 # 600 # 600
IMG_MAX_LENGTH = 1000 # 1000 # 1000
CLASS_NUM = 20
# --------------------------------------------- Network_config
BATCH_SIZE = 1
INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01)
BBOX_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.001)
WEIGHT_DECAY = 0.00004 if NET_NAME.startswith('Mobilenet') else 0.0001
# ---------------------------------------------Anchor config
USE_CENTER_OFFSET = False
LEVLES = ['P2', 'P3', 'P4', 'P5', 'P6']
BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512] # addjust the base anchor size for voc.
ANCHOR_STRIDE_LIST = [4, 8, 16, 32, 64]
ANCHOR_SCALES = [1.0]
ANCHOR_RATIOS = [0.5, 1., 2.0]
ROI_SCALE_FACTORS = [10., 10., 5.0, 5.0]
ANCHOR_SCALE_FACTORS = None
# --------------------------------------------FPN config
SHARE_HEADS = True
KERNEL_SIZE = 3
RPN_IOU_POSITIVE_THRESHOLD = 0.7
RPN_IOU_NEGATIVE_THRESHOLD = 0.3
TRAIN_RPN_CLOOBER_POSITIVES = False
RPN_MINIBATCH_SIZE = 256
RPN_POSITIVE_RATE = 0.5
RPN_NMS_IOU_THRESHOLD = 0.7
RPN_TOP_K_NMS_TRAIN = 12000
RPN_MAXIMUM_PROPOSAL_TARIN = 2000
RPN_TOP_K_NMS_TEST = 6000
RPN_MAXIMUM_PROPOSAL_TEST = 1000
# specific settings for FPN
# FPN_TOP_K_PER_LEVEL_TRAIN = 2000
# FPN_TOP_K_PER_LEVEL_TEST = 1000
# -------------------------------------------Fast-RCNN config
ROI_SIZE = 14
ROI_POOL_KERNEL_SIZE = 2
USE_DROPOUT = False
KEEP_PROB = 1.0
SHOW_SCORE_THRSHOLD = 0.5 # only show in tensorboard
FAST_RCNN_NMS_IOU_THRESHOLD = 0.3 # 0.6
FAST_RCNN_NMS_MAX_BOXES_PER_CLASS = 100
FAST_RCNN_IOU_POSITIVE_THRESHOLD = 0.5
FAST_RCNN_IOU_NEGATIVE_THRESHOLD = 0.0 # 0.1 < IOU < 0.5 is negative
FAST_RCNN_MINIBATCH_SIZE = 256 # if is -1, that is train with OHEM
FAST_RCNN_POSITIVE_RATE = 0.25
ADD_GTBOXES_TO_TRAIN = False
|
<reponame>paledger/CSS
from django import forms
from django.core.mail import send_mail
from css.models import CUser, Room, Course, SectionType, Schedule, Section, Availability, FacultyCoursePreferences
from django.http import HttpResponseRedirect
from settings import DEPARTMENT_SETTINGS, HOSTNAME
import re
from django.forms import ModelChoiceField
from django.contrib.auth.models import User
# Login Form
class LoginForm(forms.Form):
email = forms.EmailField()
password = forms.CharField(label='Password', widget=forms.PasswordInput)
#@TODO validate pass?
@staticmethod
def validate_password(password):
if re.match(r'^(?=.*\d)(?=.*[A-Za-z])(?=.*[-._!@#$%^&*?+])[A-Za-z0-9-._!@#$%^&*?+]{8,32}$', password) is None:
raise ValidationError("Attempted CUser creation with invalid password")
# Invite Form
class InviteUserForm(forms.Form):
email = forms.EmailField()
first_name = forms.CharField()
last_name = forms.CharField()
def send_invite(self, usertype, request):
first_name = self.cleaned_data['first_name']
last_name = self.cleaned_data['last_name']
name = first_name + ' ' + last_name
email = self.cleaned_data['email']
host = request.META['HTTP_HOST']
if not re.search(r'http', host):
host = 'http://' + host
link = host + '/register?first_name=' + first_name +'&last_name=' + last_name +'&user_type='+ usertype + '&email=' + email
send_mail('Invite to register for CSS', name + ", you have been invited to register for CSS. Please register using the following link:\n\n "
+ link, '<EMAIL>', [self.cleaned_data['email']])
# Registration Form
# @TODO on load, pull fields from query string -> show failure if field not able to be loaded:
# Fields to pull: email, first_name, last_name, user_type
class RegisterUserForm(forms.Form):
first_name = forms.CharField()
last_name = forms.CharField()
email = forms.EmailField()
user_type = forms.CharField()
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(label='Confirm Password', widget=forms.PasswordInput)
def __init__(self, *args, **kwargs):
if kwargs.pop('request') is "GET":
self.first_name = kwargs.pop('first_name')
self.last_name = kwargs.pop('last_name')
self.user_type = kwargs.pop('user_type')
self.email = kwargs.pop('email')
self.declared_fields['first_name'].initial = self.first_name
self.declared_fields['last_name'].initial = self.last_name
self.declared_fields['email'].initial = self.email
self.declared_fields['user_type'].initial = self.user_type
self.declared_fields['user_type'].disabled = True
super(RegisterUserForm, self).__init__(*args,**kwargs)
def save(self):
user = CUser.create(email=self.cleaned_data['email'],
password=<PASSWORD>.cleaned_data['<PASSWORD>'],
user_type=self.cleaned_data['user_type'],
first_name=self.cleaned_data['first_name'],
last_name=self.cleaned_data['last_name'])
user.save()
return user
# Edit User Form
class EditUserForm(forms.Form):
user_email = forms.CharField(widget=forms.HiddenInput(), initial='<EMAIL>')
first_name = forms.CharField()
last_name = forms.CharField()
password = forms.CharField()
def save(self):
user = CUser.get_user(email=self.cleaned_data['user_email'])
user.set_first_name(self.cleaned_data['first_name'])
user.set_last_name(self.cleaned_data['last_name'])
user.set_password(self.cleaned_data['password'])
user.save()
return user
# Delete Form
class DeleteUserForm(forms.Form):
email = forms.CharField(label='Confirm email')
def delete_user(self):
email = self.cleaned_data['email']
User.objects.filter(username = self.cleaned_data['email']).delete()
class AddRoomForm(forms.Form):
name = forms.CharField()
description = forms.CharField()
capacity = forms.IntegerField()
notes = forms.CharField()
equipment = forms.CharField()
def save(self):
room = Room.objects.create(name=self.cleaned_data['name'], description=self.cleaned_data['description'], capacity=self.cleaned_data['capacity'], notes=self.cleaned_data['notes'], equipment=self.cleaned_data['equipment'])
room.save()
return room
class EditRoomForm(forms.Form):
name = forms.CharField(widget=forms.HiddenInput(), initial='defaultRoom')
description = forms.CharField()
capacity = forms.IntegerField()
notes = forms.CharField()
equipment = forms.CharField()
def save(self):
nameString = self.cleaned_data['name']
room = Room.get_room(nameString)
room.name = self.cleaned_data['name']
room.description = self.cleaned_data['description']
room.capacity = self.cleaned_data['capacity']
room.notes = self.cleaned_data['notes']
room.equipment = self.cleaned_data['equipment']
room.save()
class DeleteRoomForm(forms.Form):
roomName = forms.CharField(widget=forms.HiddenInput(), initial='defaultRoom')
def deleteRoom(self):
nameString=self.cleaned_data['roomName']
Room.objects.filter(name=nameString).delete()
class EditCourseSectionTypeForm(forms.Form):
work_units = forms.IntegerField()
work_hours = forms.IntegerField()
def save(self):
name = self.cleaned_data['name']
work_units = self.cleaned_data['work_units']
work_hours = self.cleaned_data['work_hours']
class AddCourseSectionTypeForm(forms.Form):
course = forms.CharField(widget=forms.HiddenInput(), initial='defaultCourse')
name = forms. MultipleChoiceField(
required = True,
widget = forms.RadioSelect,
choices = SectionType.get_all_section_types_list
)
work_units = forms.IntegerField()
work_hours = forms.IntegerField()
class AddCourseForm(forms.Form):
course_name = forms.CharField()
description = forms.CharField()
equipment_req = forms.CharField()
def save(self):
course = Course(name = self.cleaned_data['course_name'],
description = self.cleaned_data['description'],
equipment_req = self.cleaned_data['equipment_req'])
course.save();
class DeleteCourseForm(forms.Form):
course_name = forms.CharField(widget=forms.HiddenInput(), initial='defaultCourse')
def save(self):
course = Course.get_course(name=self.cleaned_data['course_name'])
course.delete()
return
# @TODO Fix naming -> EditCourseForm
class EditCourseForm(forms.Form):
course_name = forms.CharField(widget=forms.HiddenInput(), initial='defaultcourse')
equipment_req = forms.CharField()
description = forms.CharField()
def save(self):
course = Course.get_course(name=self.cleaned_data['course_name'])
course.set_equipment_req(self.cleaned_data['equipment_req'])
course.set_description(self.cleaned_data['description'])
class AddSectionTypeForm(forms.Form):
section_type_name = forms.CharField()
def save(self):
SectionType.create(name=self.cleaned_data['section_type_name'])
# Custom ModelChoiceField for faculty full names
class FacultyModelChoiceField(ModelChoiceField):
def label_from_instance(self, obj):
return obj.user.first_name + " " + obj.user.last_name
class AddSectionForm(forms.Form):
academic_term = forms.ModelChoiceField(label='Term', queryset=Schedule.objects.values_list('academic_term', flat=True), empty_label=" ")
course = forms.ModelChoiceField(label='Course', queryset=Course.objects.values_list('name', flat=True), empty_label=" ")
start_time = forms.TimeField(label='Start Time', input_formats=('%I:%M %p'))
end_time = forms.TimeField(label='End Time', input_formats=('%I:%M %p'))
days = forms.CharField(label='Days')
days = forms.ChoiceField(label='Days', choices=[('MWF', 'MWF'), ('TR', 'TR')])
faculty = FacultyModelChoiceField(label='Faculty', queryset=CUser.objects.filter(user_type='faculty'))
room = forms.ModelChoiceField(label='Room', queryset=Room.objects.values_list('name', flat=True), empty_label=" ")
capacity = forms.IntegerField()
section_type = forms.ModelChoiceField(label='Section Type', queryset=SectionType.objects.values_list('name', flat=True), empty_label=" ")
def save(self):
section = Section.create (schedule = Schedule.objects.get(academic_term=self.cleaned_data['academic_term']),
course = Course.objects.get(course=self.cleaned_data['course']),
start_time = self.cleaned_data['start_time'],
end_time = self.cleaned_data['end_time'],
days = self.cleaned_data['days'],
faculty = CUser.get_cuser_by_full_name(self.cleaned_data['faculty']),
room = Room.objects.get(name=self.cleaned_data['room']),
capacity = self.cleaned_data['capacity'],
students_enrolled = 0,
students_waitlisted = 0,
conflict = 'n',
conflict_reason = null,
fault = 'n',
fault_reason = null)
section.save()
return
class CoursePrefModelChoiceField(forms.ModelChoiceField):
def label_from_instance(self,obj):
return obj.name
class CoursePrefForm(forms.Form):
query = Course.objects.values_list('name', flat=True)
query_choices = [('', 'None')] + [(id, id) for id in query]
course = forms.ChoiceField(query_choices,
required=False, widget=forms.Select())
comments = forms.CharField()
rank = forms.IntegerField()
def save(self, email):
course_pref = FacultyCoursePreferences.create(faculty=email,
course = self.cleaned_data['course'],
comments = self.cleaned_data['comments'],
rank = self.cleaned_data['rank'])
course_pref.save()
class AddAvailabilityForm(forms.Form):
DAYS = ('Mon/Wed/Fri', 'Mon/Wed/Fri',),('Tue/Thu','Tue/Thu')
day = forms.ChoiceField(label='Day', choices=DAYS)
start_time = forms.TimeField(label='Start Time')
end_time = forms.TimeField(label='End Time')
level = forms.ChoiceField(label='Type', choices=[('Preferred', 'Preferred'), ('Unavailable','Unavailable'), ('Available','Available')])
def save(self, email):
faculty = faculty = CUser.get_faculty(email=email)
Availability.setRange(faculty=faculty, day_of_week=self.cleaned_data['day'], start_time= self.cleaned_data['start_time'], end_time=self.cleaned_data['end_time'], level=self.cleaned_data['level'])
class AddScheduleForm(forms.Form):
academic_term = forms.CharField(max_length=16)
def save(self):
Schedule.create(academic_term=self.cleaned_data['academic_term'],
state="active").save()
class UploadForm(forms.Form):
file = forms.FileField()
|
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import torch
import torch.optim as optim
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(4, 32, 3, padding=1)
self.conv2 = nn.Conv2d(32, 64, 3, padding=1)
self.conv3 = nn.Conv2d(64, 128, 3, padding=1)
self.p_fc1 = nn.Conv2d(128, 4, 1)
self.p_fc2 = nn.Linear(4 * 8 * 8, 64)
self.v_fc1 = nn.Conv2d(128, 2, 1)
self.v_fc2 = nn.Linear(2 * 8 * 8, 1)
def forward(self, x):
a1 = F.relu(self.conv1(x))
a2 = F.relu(self.conv2(a1))
a3 = F.relu(self.conv3(a2))
p1 = F.relu(self.p_fc1(a3))
p_act = p1.view(-1, 4 * 8 * 8)
p_out = F.softmax(self.p_fc2(p_act), dim=0)
v1 = F.relu(self.v_fc1(a3))
v_act = v1.view(-1, 2 * 8 * 8)
v_out = torch.tanh(self.v_fc2(v_act))
return p_out, v_out
class PolicyValueNet():
def __init__(self, board_width=8, board_height=8, model_file=None):
self.board_width = board_width
self.board_height = board_height
self.model = Net()
self.v_loss = nn.MSELoss()
# self.p_loss=nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.model.parameters(), lr=0.01)
if model_file:
self.model.load_state_dict(torch.load(model_file))
def policy_value_fn(self, board):
legal_positions = board.available
current_state = board.current_state()
x = torch.FloatTensor(
current_state.reshape(-1, 4, self.board_width, self.board_height))
probs, value = self.model.forward(x)
probs = probs.data.numpy()
value = value.data.numpy()
probs = zip(legal_positions, probs.flatten()[legal_positions])
return probs, value
def policy_value(self, x):
x = torch.FloatTensor(np.reshape(
x, (-1, 4, self.board_width, self.board_height)))
probs, value = self.model.forward(x)
probs = probs.data.numpy()
value = value.data.numpy()
return probs, value
def train_step(self, state_batch, probs_batch, winner_batch, lr):
state_batch = torch.FloatTensor(state_batch)
probs_batch = torch.LongTensor(probs_batch)
winner_batch = torch.FloatTensor(winner_batch)
self.optimizer.zero_grad()
p_out, v_out = self.model(state_batch)
p_loss = -torch.mean(torch.sum(probs_batch * torch.log(p_out), 1))
v_loss = self.v_loss(v_out, winner_batch)
loss = p_loss + v_loss
loss.backward()
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
self.optimizer.step()
return p_loss.item(), v_loss.item()
def save_model(self, dir):
torch.save(self.model.state_dict(), dir)
if __name__ == '__main__':
net = PolicyValueNet()
net.save_model("./testmodel")
|
#%%
import argparse
import itertools as it
import os
import requests
import yaml
from bs4 import BeautifulSoup
from bokeh.models import ColorBar, ColumnDataSource, HoverTool, LabelSet, Legend
from bokeh.models import LinearColorMapper, FactorRange
from bokeh.palettes import Magma, d3
from bokeh.plotting import figure, output_file, show
import numpy as np
import pandas as pd
from tqdm import tqdm
from utils import bokeh_spikein_lineplot, bokeh_xprs_distribution, bokeh_num_nonexpressed_genes
#%%
def query_spikeins(accession):
"""
Query spikeines IDs from Encode Websites
"""
query = f'https://www.encodeproject.org/experiments/{accession}/'
page = requests.get(query)
soup = BeautifulSoup(page.content, 'html.parser')
for div in soup.find_all('div'):
try:
if div['data-test'] == 'spikeins':
return div.find('a').get_text()
except KeyError:
continue
return None
def process_meta_data(dataset_dir):
"""
Process Meta Data
"""
meta = []
tissue_dirs = os.listdir(dataset_dir)
for tissue in tqdm(tissue_dirs):
files = os.listdir(os.path.join(dataset_dir, tissue))
for file_name in files:
if file_name[0:5] == 'ENCFF':
continue
tmp_file = os.path.join(dataset_dir, tissue, file_name)
tmp_meta = pd.read_csv(tmp_file, sep='\t')
tmp_accessions = pd.Series(tmp_meta.accession.unique())
tmp_spikeins = tmp_accessions.apply(query_spikeins)
tmp_spikeins = pd.concat(
[tmp_accessions, tmp_spikeins],
axis=1
)
tmp_spikeins.columns = ['accession', 'spikeins']
tmp_meta = tmp_meta.merge(tmp_spikeins, on='accession')
meta.append(tmp_meta.loc[tmp_meta.spikeins == 'ENCSR884LPM'])
meta = pd.concat(meta)
meta.index = meta['file_accession']
meta.to_csv(os.path.join(dataset_dir, 'meta.tsv'), sep='\t')
tissue = meta['biosample_name']
tissue.to_csv(os.path.join(dataset_dir, 'meta_tissue.tsv'), sep='\t', header=False)
return meta
def read_xprs(dataset_dir, meta):
xprs = []
for sample, tissue in tqdm(list(zip(meta.file_accession, meta.biosample_name))):
file = os.path.join(
dataset_dir,
tissue.replace(' ', '_'),
f'{sample}.tsv'
)
tmp_xprs = pd.read_csv(file, sep='\t', index_col=0)
selected_genes = tmp_xprs.index.str.match('gSpikein|ENSMUSG')
tmp_xprs = tmp_xprs.loc[selected_genes, 'expected_count']
tmp_xprs.index = tmp_xprs.index.str.replace('\.\S+', '', regex=True)
xprs.append(tmp_xprs)
xprs = pd.concat(xprs, axis=1)
xprs.columns = meta.file_accession
xprs_spikeins = xprs.loc[xprs.index.str.match('gSpikein')]
xprs_genes = xprs.loc[xprs.index.str.match('ENSMUSG')].astype(np.uint32)
selected_spikeins = xprs_spikeins.T.corr().fillna(0).abs().median(axis=1) > 0.9
xprs_spikeins = xprs_spikeins.loc[selected_spikeins]
return xprs_genes, xprs_spikeins
def create_validation_dataset(outdir, xprs_spikeins, xprs_genes):
spikeins_total = xprs_spikeins.sum()
scaling_factor = spikeins_total / spikeins_total.mean()
xprs_corrected = (xprs_genes / scaling_factor).astype(np.float32)
xprs_genes.to_csv(os.path.join(outdir, 'xprs_count.tsv'), sep='\t')
xprs_corrected.to_csv(os.path.join(outdir, 'xprs_validation.tsv'), sep='\t')
return
#%%
def main():
description = """Extract the Count Data, Meta Data of Samples and Create Validation
Dataset for ENCODE Data"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
'-d', '--dataset',
metavar='[path]',
type=str,
default=None,
help='Path to the directory of count data.',
required=True,
)
parser.add_argument(
'-c', '--config',
metavar='[config.yaml]',
type=str,
default=None,
help='Path to the config data.',
required=True,
)
args = parser.parse_args()
args.config = yaml.load(open(args.config, 'r'), Loader=yaml.FullLoader)
dataset_dir = args.dataset
out_dir = args.config['out_dir']
os.makedirs(out_dir, exist_ok=True)
meta = process_meta_data(dataset_dir)
xprs_genes, xprs_spikeins = read_xprs(dataset_dir, meta)
ordered = xprs_spikeins.sum().argsort().values
create_validation_dataset(dataset_dir, xprs_spikeins, xprs_genes)
bokeh_spikein_lineplot(out_dir, xprs_spikeins.iloc[:, ordered], 'ENCODE Spikeins Expression')
bokeh_xprs_distribution('ENCODE', out_dir, xprs_genes, meta.biosample_name.iloc[::-1])
bokeh_num_nonexpressed_genes('ENCODE', out_dir, xprs_genes, meta.biosample_name.iloc[::-1])
if __name__ == '__main__':
main() |
<gh_stars>1-10
# -*- coding: utf-8 -*-
import numpy as np
from ..patch.pint import ureg
from . import compound
from . import types
from ..sources import emspectrum
from ..utils import instance
from ..simulation.classfactory import with_metaclass
from ..math import noisepropagation
class Scintillator(with_metaclass()):
"""
Class representing a scintillator
"""
def __init__(
self, thickness=None, material=None, nvisperkeV=None, visspectrum=None
):
"""
Args:
thickness(num): thickness in micron
material(Compound|Mixture): scintillator compoisition
nvisperkeV(num): number of VIS photons generated per keV
visspectrum(visspectrum.discrete): VIS visspectrum
"""
self.thickness = thickness
self.nvisperkeV = nvisperkeV
self.material = material
self.visspectrum = visspectrum
def __getstate__(self):
return {
"thickness": self.thickness,
"nvisperkeV": self.nvisperkeV,
"material": self.material,
"visspectrum": self.visspectrum,
}
def __setstate__(self, state):
self.thickness = state["thickness"]
self.nvisperkeV = state["nvisperkeV"]
self.material = state["material"]
self.visspectrum = state["visspectrum"]
def __eq__(self, other):
if isinstance(other, self.__class__):
return (
self.thickness == other.thickness
and self.nvisperkeV == other.nvisperkeV
and self.material == other.material
and self.visspectrum == other.visspectrum
)
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
@staticmethod
def doping(material, dopants, ftype):
material.addelements(list(dopants.keys()), list(dopants.values()), ftype)
def absorption(self, energy):
return 1 - np.exp(
-self.material.density
* self.thickness
* 1e-4
* self.material.mass_abs_coeff(energy)
)
def attenuation(self, energy):
return 1 - self.transmission(energy)
def transmission(self, energy):
return np.exp(
-self.material.density
* self.thickness
* 1e-4
* self.material.mass_att_coeff(energy)
)
def propagate(self, N, energy, forward=True):
"""Error propagation of a number of photons.
Args:
N(num|array): incomming number of photons with uncertainties
energy(num|array): associated energies
Returns:
numpy.array,dict
"""
# Absorption of X-rays
probsuccess = self.absorption(energy)
# Fluorescence of visible photons
# https://doi.org/10.1088/0031-9155/57/15/4885
# http://arizona.openrepository.com/arizona/handle/10150/577317
# https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4669903/
gain = energy * self.nvisperkeV
N, probsuccess, gain = self.propagate_broadcast(N, probsuccess, gain)
if instance.isuscalar(N):
if forward:
proc1 = noisepropagation.bernouilli(probsuccess)
proc2 = noisepropagation.poisson(gain)
else:
proc2 = noisepropagation.bernouilli(probsuccess)
proc1 = noisepropagation.poisson(gain)
Nout = noisepropagation.compound(N, proc1, forward=forward)
Nout = noisepropagation.compound(Nout, proc2, forward=forward)
else:
if forward:
Nout = N * (probsuccess * gain)
else:
Nout = N / (probsuccess * gain)
return Nout
def get_nrefrac(self):
return self.material.nrefrac
class GGG_ID21(Scintillator):
"""
Eu doped GGG
"""
aliases = ["GGG ID21"]
def __init__(self, thickness=None):
"""
Args:
thickness(num): thickness in micron
"""
material = compound.Compound(
["Gd", "Ga", "O"],
[3, 5, 12],
types.fraction.mole,
7.08,
nrefrac=1.8,
name="GGG",
)
Scintillator.doping(material, {"Eu": 0.03}, types.fraction.mass)
visspectrum = emspectrum.Discrete(
ureg.Quantity([595, 610, 715], "nm"), [1, 1, 1]
)
# http://www.esrf.eu/files/live/sites/www/files/Industry/documentation/F2_Scintillators.pdf
super(GGG_ID21, self).__init__(
thickness=thickness,
material=material,
nvisperkeV=32,
visspectrum=visspectrum,
)
class LSO_ID21(Scintillator):
"""
Tb doped LSO
"""
aliases = ["LSO ID21"]
def __init__(self, thickness=None):
"""
Args:
thickness(num): thickness in micron
"""
material = compound.Compound(
["Lu", "Si", "O"],
[2, 1, 5],
types.fraction.mole,
7.4,
nrefrac=1.82,
name="LSO",
)
Scintillator.doping(material, {"Tb": 0.03}, types.fraction.mass)
visspectrum = emspectrum.Discrete(ureg.Quantity(550, "nm"), 1)
# http://www.esrf.eu/files/live/sites/www/files/Industry/documentation/F2_Scintillators.pdf
super(LSO_ID21, self).__init__(
thickness=thickness,
material=material,
nvisperkeV=40,
visspectrum=visspectrum,
)
factory = Scintillator.factory
registry = Scintillator.clsregistry
|
<reponame>henrystoldt/MAPLEAF
#Created by: <NAME>
# August 2020
import math
import unittest
from MAPLEAF.SimulationRunners import Simulation
from MAPLEAF.ENV import (FlatEarth, NoEarth, SphericalEarth,
WGS84)
from MAPLEAF.Motion import Vector
from test.testUtilities import assertVectorsAlmostEqual
from MAPLEAF.IO import SimDefinition
from MAPLEAF.SimulationRunners import Simulation
from test.testUtilities import assertVectorsAlmostEqual
class TestEarthModels(unittest.TestCase):
def setUp(self):
self.WGS84 = WGS84()
self.roundEarth = SphericalEarth()
self.noEarth = NoEarth()
self.flatEarth = FlatEarth()
def checkCoordinateTransforms(self, lat, lon, h, x, y, z, earthModel, n=6):
'''
Provide a correct pair of lat/lon/h and x/y/z coordinates. This function checks that conversions
work in both directions using the WGS84 model
'''
x2, y2, z2 = earthModel.geodeticToCartesian(lat, lon, h)
# Compare to results obtained from pyproj
self.assertAlmostEqual(x2, x, n)
self.assertAlmostEqual(y2, y, n)
self.assertAlmostEqual(z2, z, n)
# Check conversion back to latitude and longitude
lat2, lon2, h2 = earthModel.cartesianToGeodetic(x, y, z)
self.assertAlmostEqual(lat, lat2, n)
self.assertAlmostEqual(lon, lon2, n)
self.assertAlmostEqual(h, h2, n)
def test_WGS84coordinateConversions(self):
'''
Check that we can convert to cartesian, and back, getting the same parameters we started with
Comparison x, y, z results obtained from PROJ, through pyproj
pyproj is not used in simulations because it is very slow for single requests compared the method currently implemented
'''
# University of Calgary
lat, lon, h = 51.075339, -114.131767, 0
x, y, z = -1641688.5729170945, -3664588.431914631, 4938814.914854015
self.checkCoordinateTransforms(lat, lon, h, x, y, z, self.WGS84)
# Greenland
lat, lon, h = 81.136427, -34.544050, 500
x, y, z = 812209.7821296552, -559136.2029726238, 6280831.753924148
self.checkCoordinateTransforms(lat, lon, h, x, y, z, self.WGS84)
# Antarctica
lat, lon, h = -86.618022, 38.395443, 1000
x, y, z = 295927.0631906039, 234510.58046597126, -6346605.447459776
self.checkCoordinateTransforms(lat, lon, h, x, y, z, self.WGS84)
# Netherlands
lat, lon, h = 51.624678, 5.261977, -500
x, y, z = 3950752.939746138, 363856.04428183084, 4976593.902843423
self.checkCoordinateTransforms(lat, lon, h, x, y, z, self.WGS84)
def test_RoundEarthCoordinateConversions(self):
lat, lon, h = 50.887093455, -114.131767, -5762.965219
# Position radius = 6365244.216
x, y, z = -1641690.425, -3664592.566, 4938820.487
self.checkCoordinateTransforms(lat, lon, h, x, y, z, self.roundEarth, n=3)
def test_rotationRate(self):
secondsInOneDay = 60 * 60 * 24 - 235.9095 # Sidereal day - noy exactly right but close
secondsToRotate = 2*math.pi / self.WGS84.rotationRate
self.assertAlmostEqual(secondsToRotate, secondsInOneDay, 1)
secondsToRotate = 2*math.pi / self.roundEarth.rotationRate
self.assertAlmostEqual(secondsToRotate, secondsInOneDay, 1)
def test_getGravity_noEarth(self):
self.assertEqual(Vector(0,0,0), self.noEarth.getGravityForce("asdf", "adf"))
def test_Initialization_NASASphere(self):
simRunner = Simulation("./MAPLEAF/Examples/Simulations/NASASphere.mapleaf")
sphere = simRunner.createRocket()
# Should be at an altitude of 9144 m above earth's surface
distanceFromEarthCenter = sphere.rigidBody.state.position.length()
expected = sphere.environment.earthModel.a + 9144
self.assertAlmostEqual(distanceFromEarthCenter, expected)
# Should have zero angular velocity in inertial frame
angVel = sphere.rigidBody.state.angularVelocity.length()
self.assertAlmostEqual(angVel, 0.0)
# Velocity should be zero in earth-fixed frame
rotationVel = distanceFromEarthCenter * sphere.environment.earthModel.rotationRate
inertialVel = sphere.rigidBody.state.velocity
assertVectorsAlmostEqual(self, inertialVel, Vector(0, rotationVel, 0))
def test_Initialization_Velocity(self):
# Zero velocity in launch tower frame
simDef = SimDefinition("MAPLEAF/Examples/Simulations/NASATwoStageOrbitalRocket.mapleaf", silent=True)
simDef.setValue("Rocket.velocity", "(0 0 0)")
simRunner = Simulation(simDefinition=simDef, silent=True)
rocket = simRunner.createRocket()
computedInitGlobalFrameVel = rocket.rigidBody.state.velocity
expectdedVel = Vector(0, 465.1020982258931, 0) # Earth's surface velocity at 0 lat, 0 long
assertVectorsAlmostEqual(self, computedInitGlobalFrameVel, expectdedVel)
# Velocity in the +x (East) direction in the launch tower frame
simDef.setValue("Rocket.velocity", "(1 0 0)")
simRunner = Simulation(simDefinition=simDef, silent=True)
rocket = simRunner.createRocket()
computedInitGlobalFrameVel = rocket.rigidBody.state.velocity
expectdedVel = Vector(0, 466.1020982258931, 0) # Earth's surface velocity at 0 lat, 0 long + 1m/s east
assertVectorsAlmostEqual(self, computedInitGlobalFrameVel, expectdedVel)
# Velocity in the +y (North) direction in the launch tower frame
simDef.setValue("Rocket.velocity", "(0 1 0)")
simRunner = Simulation(simDefinition=simDef, silent=True)
rocket = simRunner.createRocket()
computedInitGlobalFrameVel = rocket.rigidBody.state.velocity
expectdedVel = Vector(0, 465.1020982258931, 1) # Earth's surface velocity at 0 lat, 0 long + 1m/s north
assertVectorsAlmostEqual(self, computedInitGlobalFrameVel, expectdedVel)
# Velocity in the +z (Up) direction in the launch tower frame
simDef.setValue("Rocket.velocity", "(0 0 1)")
simRunner = Simulation(simDefinition=simDef, silent=True)
rocket = simRunner.createRocket()
computedInitGlobalFrameVel = rocket.rigidBody.state.velocity
expectdedVel = Vector(1, 465.1020982258931, 0) # Earth's surface velocity at 0 lat, 0 long + 1m/s up
assertVectorsAlmostEqual(self, computedInitGlobalFrameVel, expectdedVel)
#If this file is run by itself, run the tests above
if __name__ == '__main__':
unittest.main()
|
<reponame>Zholistic/Lithium6<filename>kristian_python_virial_bisection_fit.py
def virial_fit_residuals2(beta_eb_lower, beta_eb_upper, eb, density, potential):
import subprocess
beta_eb = np.zeros(3)
beta_eb[0] = beta_eb_lower
beta_eb[2] = beta_eb_upper
beta_eb[1] = beta_eb[0] + ((beta_eb_upper - beta_eb_lower) / 2)
b2 = np.zeros(3)
b3 = np.zeros(3)
# Use virial coeffcients to find T and mu0 with virial coeffcients
def v_residuals(params, x, b2, b3, eps_data, y=None):
T = params['T'].value
mu0 = params['mu0'].value
#alpha = params['alpha'].value
#dens_fit = ((2/((2*pi*hbar**2)/((mass_li*kb*T)))) *
# (np.exp((1/(kb*T)) * (mu0 - x))) +
# (2*b2*np.exp(2*(1/(kb*T))*(mu0 -x))) +
# (3*b3*np.exp(3*(1/(kb*T))*(mu0 - x))))
dens_fit = ((2/((2*pi*hbar**2)/((mass_li*kb*T)))) *
(np.log( 1 + np.exp((1/(kb*T)) * (mu0 - x))) +
(2*b2*np.exp(2*(1/(kb*T))*(mu0 -x))) +
(3*b3*np.exp(3*(1/(kb*T))*(mu0 - x)))))
if y is None:
return dens_fit
return (y - dens_fit)
eps_data = 1e-4
# Repeat bisection procedure for 10 iterations
for i in range(25):
for i, beb in enumerate(beta_eb):
#run virial code to get b2 and b3 coeff
b2_proc = subprocess.Popen(["/home/kristian/dev/c/virial/virial",
str(beta_eb[i]), "1"], stdout=subprocess.PIPE)
b3_proc = subprocess.Popen(["/home/kristian/dev/c/virial/virial",
str(beta_eb[i]), "2"], stdout=subprocess.PIPE)
b2_out = b2_proc.communicate()[0]
b3_out = b3_proc.communicate()[0]
b2[i] = float(b2_out)
b3[i] = float(b3_out)
beta_eb_fit = unp.uarray(np.zeros(3),np.zeros(3))
beta_eb_diff = unp.uarray(np.zeros(3),np.zeros(3))
# fit for upper, mid and lower beta_eb
for i, beb in enumerate(beta_eb):
params = lmfit.Parameters()
params.add('T', value = 50e-9, min=5e-9, max=70e-9)
params.add('mu0', value = 1.7e-30, min=1.e-30, max=4e-30)
#params.add('alpha', value = 1.25, min = 1, max=2)
fit_output = lmfit.minimize(v_residuals, params,
args=(potential, b2[i], b3[i], eps_data, density))
beta_fit = 1/(kb * params['T'].value)
beta_eb_fit[i] = eb * beta_fit
beta_eb_diff[i] = np.abs(beta_eb_fit[i] - beb)
#print(lmfit.fit_report(fit_output))
#print(beta_eb_diff[i]*100)
if (beta_eb_diff[0] - beta_eb_diff[1]) < (beta_eb_diff[2] -
beta_eb_diff[1]):
beta_eb[2] = beta_eb[1]
beta_eb[1] = beta_eb[0] + ((beta_eb[2] - beta_eb[0]) / 2)
else:
beta_eb[0] = beta_eb[1]
beta_eb[1] = beta_eb[0] + ((beta_eb[2] - beta_eb[0]) / 2)
beta_eb_max = eb * 1/(kb * (params['T'].value+params['T'].stderr))
beta_eb_err = np.abs(beta_eb_max - beta_eb[1])
beta_eb_final = eb * 1/(kb * (params['T'].value))
fit_output = lmfit.minimize(v_residuals, params,
args=(potential, b2[1], b3[1], eps_data, density))
print('Final betaEb: {0:f} +/- {1:f}'.format(beta_eb_final, beta_eb_err))
print(lmfit.fit_report(fit_output))
fit = v_residuals(params, potential, b2[1], b3[1], 1)
return fit, fit_output#, beta_eb[1]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2017, Data61
# Commonwealth Scientific and Industrial Research Organisation (CSIRO)
# ABN 41 687 119 230.
#
# This software may be distributed and modified according to the terms of
# the BSD 2-Clause license. Note that NO WARRANTY is provided.
# See "LICENSE_BSD2.txt" for details.
#
# @TAG(DATA61_BSD)
#
'''
Stage 7 parser. The following parser is designed to accept a stage 6 parser,
whose output it consumes. This parser's purpose is to resolve hierarchical
systems into a flat, top-level assembly.
'''
from __future__ import absolute_import, division, print_function, \
unicode_literals
from camkes.internal.seven import cmp, filter, map, zip
from .base import Transformer
from .exception import ParseError
from camkes.ast import Assembly, ASTObject, AttributeReference, \
Component, Composition, Configuration, Consumes, Dataport, Emits, \
Instance, Interface, Provides, Setting, Uses
import copy, six
# The pre-condition of this stage is simply the post-condition of the previous
# stage; that only a single assembly remains.
from .stage6 import postcondition as precondition
class InterfacePointer(object):
'''
A representation of a connection end that we will potentially relabel. See
usage of this below.
'''
def __init__(self, instance, interface):
assert isinstance(instance, Instance)
assert isinstance(interface, Interface)
self.instance = instance
self.interface = interface
# We need to implement the following methods to allow `InterfacePointer`s
# to be stored sensibly in dicts. Note that we just implement equality
# based on memory addresses because we know precisely how this will be
# used.
def __hash__(self):
return hash(self.instance) ^ hash(self.interface)
def __eq__(self, other):
if not isinstance(other, InterfacePointer):
return False
return self.instance is other.instance and \
self.interface is other.interface
def __ne__(self, other):
return not self.__eq__(other)
def derive(obj, namespace):
'''
Make a copy of the given object, mangling the new object's name such that
it will not conflict with existing objects.
'''
assert isinstance(obj, ASTObject)
assert namespace is None or isinstance(namespace, six.string_types)
if namespace is None:
# No replication necessary.
return obj
new = copy.copy(obj)
if isinstance(new, Setting):
new.instance = '%s.%s' % (namespace, new.instance)
if isinstance(new.value, AttributeReference):
new.value = copy.copy(new.value)
new.value.reference = '%s.%s' % (namespace, new.value.reference)
else:
new.name = '%s.%s' % (namespace, new.name)
# If this is a component instance, we need to name-mangle its address
# space as well. If we don't do this, their address space (custom or
# implicit) can collide with another entity in the hierarchy and users'
# components can accidentally be combined into a single address space.
if isinstance(new, Instance):
new.address_space = '%s.%s' % (namespace, new.address_space)
return new
def infer_all(item, parent=None):
'''
Infer all relevant objects that are direct or indirect children of this
item. We do this by recursively "hoisting" things from nested compositions
and configurations. Though the lifted AST has a built-in traversal
mechanism, we do not use it here because we want to only visit certain
entities and we want to propagate learned information upwards.
'''
assert isinstance(item, (Assembly, Component))
assert parent is None or isinstance(parent, Instance)
# A prefix we'll use to name-mangle children of the current item.
if parent is None:
prefix = None
else:
prefix = parent.name
# Below we'll discover and then derive two types of instances: our
# immediate children and indirect children, which will be accumulated in
# these lists, respectively. We use two separate lists because we want to
# maintain the ordering of immediate children first.
immediate_instances = []
child_instances = []
# Indirect settings we'll accumulate.
child_settings = []
# Final connections and settings we'll return.
connections = []
final_settings = []
# In the context of connections, we may need to adjust their ends based on
# export statements. Rather than trying to do this as we go, we just track
# which interfaces are just aliases for others. These will be eventually
# used by our outermost caller.
aliases = {}
if item.composition is not None:
# As we go through the AST, we'll derive new instances. These will have
# new names (and addresses), but will potentially be referred to later
# under their old pointers. We track this derivation in a mapping from
# old instances to new instances in order to adjust these references.
derived = {}
# We'll accumulate indirect child connections here.
child_connections = []
for i in item.composition.instances:
assert i.name is not None, 'unnamed instance in AST (bug in ' \
'stage 3 parser?)'
# Hoist everything from this instance.
insts, conns, alias, settings = infer_all(i.type, i)
# Name-mangle the instances.
for i2 in insts:
n = derive(i2, prefix)
child_instances.append(n)
derived[i2] = n
n = derive(i, prefix)
immediate_instances.append(n)
derived[i] = n
child_connections.extend(conns)
# Adjust the connection aliases for the name-mangling we just
# performed.
for k, v in alias.items():
assert k.instance in derived
assert v.instance in derived
k = InterfacePointer(derived[k.instance], k.interface)
v = InterfacePointer(derived[v.instance], v.interface)
aliases[k] = v
child_settings.extend(settings)
for c in item.composition.connections + child_connections:
# Derive and then re-adjust the ends of each connection.
n = derive(c, prefix)
from_ends = []
for f in n.from_ends:
e = copy.copy(f)
if e.instance is None:
if isinstance(item, Assembly):
raise ParseError('top-level connection end with no '
'instance', e.location)
e.instance = parent
else:
assert e.instance in derived
e.instance = derived[e.instance]
from_ends.append(e)
n.from_ends = from_ends
to_ends = []
for t in n.to_ends:
e = copy.copy(t)
if e.instance is None:
if isinstance(item, Assembly):
raise ParseError('top-level connection end with no '
'instance', e.location)
e.instance = parent
else:
assert e.instance in derived
e.instance = derived[e.instance]
to_ends.append(e)
n.to_ends = to_ends
n.claim_children()
connections.append(n)
assert len(item.composition.exports) == 0 or not \
isinstance(item, Assembly), 'export statement in assembly block ' \
'(bug in stage 4 parser?)'
# Accrue any new aliases we have.
for e in item.composition.exports:
p = InterfacePointer(parent, e.destination)
assert e.source_instance in derived
d = InterfacePointer(derived[e.source_instance], e.source_interface)
aliases[p] = d
# Accrue any settings we have.
for s in (item.configuration.settings if item.configuration is not None
else []) + child_settings:
n = derive(s, prefix)
final_settings.append(n)
return immediate_instances + child_instances, connections, aliases, \
final_settings
class Parse7(Transformer):
def precondition(self, ast_lifted, _):
return precondition(ast_lifted)
def postcondition(self, ast_lifted, _):
'''
There is no natural post-condition for this transformation because the
action taken has been to augment the top-level assembly with
information that still remains in the AST. This could be formulated
with an expensive and complex traversal, but it is not worth it.
'''
return True
def transform(self, ast_lifted, read):
assembly = ast_lifted.assembly
# Hoist everything relevant from the assembly and its children.
instances, connections, aliases, settings = infer_all(assembly)
assembly.composition.instances = instances
# Replace the connections. Now we take into account the interface
# aliases.
assembly.composition.connections = []
for c in connections:
for e in c.from_ends + c.to_ends:
p = InterfacePointer(e.instance, e.interface)
while p in aliases:
p = aliases[p]
e.instance = p.instance
e.interface = p.interface
assembly.composition.connections.append(c)
# Replace the settings.
assembly.configuration.settings = settings
assembly.claim_children()
return ast_lifted, read
|
#!/usr/bin/env python3
# Copyright (c) 2021 oatsu
"""
eval.list と dev.list と train.list を生成する。
utt_list.txtは作らなくていい気がする。
data/list/eval.list
data/list/dev.list
data/list/train.list
全ファイルから12個おきにevalとdevに入れる。dev以外の全ファイルをtrainに入れる。
"""
from glob import glob
from os import makedirs
from os.path import basename, expanduser, join, splitext
from sys import argv
from typing import Union
import yaml
from natsort import natsorted
def generate_train_list(out_dir, interval: Union[int, None] = None):
"""
utt.list
eval.list
dev.list
train.list
"""
# 学習対象のファイル一覧を取得
utt_list = glob(f'{join(out_dir)}/acoustic/wav/*.wav')
utt_list = natsorted([splitext(basename(path))[0] for path in utt_list])
len_utt_list = len(utt_list)
if len_utt_list == 0:
raise Exception(f'There are no wav files in "{join(out_dir)}/acoustic/wav".')
if interval is None:
for i in (23, 19, 17, 13, 11):
if (i < len_utt_list + 5) and (len_utt_list % i != 0):
interval = i
break
else:
interval = 13
# 評価用が5分の1より多いと困るので
elif interval <= 5:
raise ValueError('Argument "interval" must be larger than 5.')
makedirs(join(out_dir, 'list'), exist_ok=True)
print(f'generate_train_list.py: interval = {interval}')
# 各種曲名リストを作る
eval_list = [songname for idx, songname in enumerate(utt_list) if idx % interval == 0]
dev_list = [songname for idx, songname in enumerate(utt_list) if idx % interval == 5]
train_list = [songname for idx, songname in enumerate(utt_list)
if (idx % interval != 0 and idx % interval != 5)]
# ファイルの出力パス
path_utt_list = join(out_dir, 'list', 'utt_list.txt')
path_eval_list = join(out_dir, 'list', 'eval.list')
path_dev_list = join(out_dir, 'list', 'dev.list')
path_train_list = join(out_dir, 'list', 'train_no_dev.list')
# ファイル出力
with open(path_utt_list, mode='w', encoding='utf-8', newline='\n') as f_utt:
f_utt.write('\n'.join(utt_list))
with open(path_eval_list, mode='w', encoding='utf-8', newline='\n') as f_utt:
f_utt.write('\n'.join(eval_list))
with open(path_dev_list, mode='w', encoding='utf-8', newline='\n') as f_utt:
f_utt.write('\n'.join(dev_list))
with open(path_train_list, mode='w', encoding='utf-8', newline='\n') as f_utt:
f_utt.write('\n'.join(train_list))
def main(path_config_yaml):
"""
フォルダを指定して実行
"""
with open(path_config_yaml, 'r') as fy:
config = yaml.load(fy, Loader=yaml.FullLoader)
out_dir = expanduser(config['out_dir'])
generate_train_list(out_dir)
if __name__ == '__main__':
main(argv[1].strip('"'))
|
import numpy as np
from sklearn.tree import DecisionTreeClassifier
import unittest as ut
import nnetsauce as ns
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer, load_wine
from sklearn.linear_model import LogisticRegression
class TestRandomBag(ut.TestCase):
def test_RandomBag(self):
breast_cancer = load_breast_cancer()
X = breast_cancer.data
y = breast_cancer.target
np.random.seed(123)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=19823
)
wine = load_wine()
Z = wine.data
t = wine.target
Z_train, Z_test, t_train, t_test = train_test_split(
Z, t, test_size=0.2, random_state=12736
)
clf = DecisionTreeClassifier(max_depth=1, random_state=14235)
fit_obj = ns.RandomBagClassifier(
clf,
n_estimators=3,
n_hidden_features=2,
activation_name="relu",
a=0.01,
nodes_sim="sobol",
bias=True,
dropout=0,
direct_link=False,
n_clusters=2,
type_clust="kmeans",
type_scaling=("std", "std", "std"),
col_sample=0.9,
row_sample=0.9,
n_jobs=None,
seed=14253,
verbose=0,
)
clf2 = DecisionTreeClassifier(max_depth=1, random_state=15243)
fit_obj2 = ns.RandomBagClassifier(
clf2,
n_estimators=3,
n_hidden_features=2,
activation_name="relu",
a=0.01,
nodes_sim="sobol",
bias=True,
dropout=0,
direct_link=False,
n_clusters=2,
type_clust="kmeans",
type_scaling=("std", "std", "std"),
col_sample=0.9,
row_sample=0.9,
n_jobs=None,
seed=19237,
verbose=0,
)
clf3 = DecisionTreeClassifier(max_depth=1, random_state=15243)
fit_obj3 = ns.RandomBagClassifier(
clf3,
n_estimators=3,
n_hidden_features=2,
activation_name="relu",
a=0.01,
nodes_sim="sobol",
bias=True,
dropout=0,
direct_link=False,
n_clusters=2,
cluster_encode=False,
type_clust="kmeans",
type_scaling=("std", "std", "std"),
col_sample=0.9,
row_sample=0.9,
n_jobs=None,
seed=19237,
verbose=0,
)
fit_obj.fit(X_train, y_train)
preds1 = fit_obj.predict_proba(X_test)
fit_obj2.fit(Z_train, t_train)
preds2 = fit_obj2.predict_proba(Z_test)
fit_obj3.fit(Z_train, t_train)
preds3 = fit_obj3.predict_proba(Z_test)
self.assertTrue(np.allclose(preds1[0, 0], 0.043789295499125226))
self.assertTrue(np.allclose(preds1[0, 1], 0.9562107045008749))
self.assertTrue(np.allclose(preds2[0, 0], 0.04650031359722393))
self.assertTrue(np.allclose(preds3[0, 0], 0.04650031359722393))
self.assertTrue(
np.allclose(fit_obj.predict(X_test)[0], 1)
& np.allclose(fit_obj2.predict(X_test)[0], 0)
)
def test_score(self):
breast_cancer = load_breast_cancer()
X = breast_cancer.data
y = breast_cancer.target
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=123
)
clf = DecisionTreeClassifier(max_depth=1, random_state=14235)
fit_obj = ns.RandomBagClassifier(
clf,
n_estimators=3,
n_hidden_features=2,
activation_name="relu",
a=0.01,
nodes_sim="sobol",
bias=True,
dropout=0,
direct_link=False,
n_clusters=2,
type_clust="kmeans",
type_scaling=("std", "std", "std"),
col_sample=0.9,
row_sample=0.9,
n_jobs=None,
seed=14253,
verbose=0,
)
clf2 = DecisionTreeClassifier(max_depth=1, random_state=15243)
fit_obj2 = ns.RandomBagClassifier(
clf2,
n_estimators=3,
n_hidden_features=2,
activation_name="relu",
a=0.01,
nodes_sim="sobol",
bias=True,
dropout=0,
direct_link=False,
n_clusters=2,
type_clust="gmm",
type_scaling=("std", "std", "std"),
col_sample=0.9,
row_sample=0.9,
n_jobs=None,
seed=19237,
verbose=0,
)
fit_obj.fit(X_train, y_train)
score1 = fit_obj.score(X_test, y_test)
fit_obj2.fit(X_train, y_train)
score2 = fit_obj2.score(X_test, y_test)
self.assertTrue(
np.allclose(score1, 0.9385964912280702)
& np.allclose(score2, 0.9298245614035088)
)
if __name__ == "__main__":
ut.main()
|
<reponame>nizz009/pywikibot
"""Bot tests."""
#
# (C) Pywikibot team, 2015-2021
#
# Distributed under the terms of the MIT license.
#
import sys
from contextlib import suppress
import pywikibot
import pywikibot.bot
from pywikibot import i18n
from pywikibot.tools import suppress_warnings
from tests.aspects import (
DefaultSiteTestCase,
SiteAttributeTestCase,
TestCase,
unittest,
)
class TWNBotTestCase(TestCase):
"""Verify that i18n is available."""
@classmethod
def setUpClass(cls):
"""Verify that the translations are available."""
if not i18n.messages_available():
raise unittest.SkipTest("i18n messages package '{}' not available."
.format(i18n._messages_package_name))
super().setUpClass()
class FakeSaveBotTestCase(TestCase):
"""
An abstract test case which patches the bot class to not actually write.
It redirects the bot's _save_page to it's own C{bot_save} method. Currently
userPut, put_current and user_edit_entity call it. By default it'll call
the original method but replace the function called to actually save the
page by C{page_save}. It patches the bot class as soon as this class'
attribute bot is defined. It also sets the bot's 'always' option to True to
avoid user interaction.
The C{bot_save} method compares the save counter before the call and
asserts that it has increased by one after the call. It also stores
locally in C{save_called} if C{page_save} has been called. If C{bot_save}
or C{page_save} are implemented they should call super's method at some
point to make sure these assertions work. At C{tearDown} it checks that
the pages are saved often enough. The attribute C{default_assert_saves}
defines the number of saves which must happen and compares it to the
difference using the save counter. It is possible to define C{assert_saves}
after C{setUp} to overwrite the default value for certain tests. By default
the number of saves it asserts are 1. Additionally C{save_called} increases
by 1 on each call of C{page_save} and should be equal to C{assert_saves}.
This means if the bot class actually does other writes, like using
L{pywikibot.page.Page.save} manually, it'll still write.
"""
@property
def bot(self):
"""Get the current bot."""
return self._bot
@bot.setter
def bot(self, value):
"""Set and patch the current bot."""
assert value._save_page != self.bot_save, 'bot may not be patched.'
self._bot = value
self._bot.opt.always = True
self._original = self._bot._save_page
self._bot._save_page = self.bot_save
self._old_counter = self._bot._save_counter
def setUp(self):
"""Set up test by resetting the counters."""
super().setUp()
self.assert_saves = getattr(self, 'default_assert_saves', 1)
self.save_called = 0
def tearDown(self):
"""Tear down by asserting the counters."""
self.assertEqual(self._bot._save_counter,
self._old_counter + self.assert_saves)
self.assertEqual(self.save_called, self.assert_saves)
super().tearDown()
def bot_save(self, page, func, *args, **kwargs):
"""Handle when bot's userPut was called."""
self.assertGreaterEqual(self._bot._save_counter, 0)
old_counter = self._bot._save_counter
old_local_cnt = self.save_called
result = self._original(page, self.page_save, *args, **kwargs)
self.assertEqual(self._bot._save_counter, old_counter + 1)
self.assertEqual(self.save_called, old_local_cnt + 1)
self.assertGreater(self._bot._save_counter, self._old_counter)
return result
def page_save(self, *args, **kwargs):
"""Handle when bot calls the page's save method."""
self.save_called += 1
class TestBotTreatExit:
"""Mixin to provide handling for treat and exit."""
def _treat(self, pages, post_treat=None):
"""
Get tests which are executed on each treat.
It uses pages as an iterator and compares the page given to the page
returned by pages iterator. It checks that the bot's _site and site
attributes are set to the page's site. If _treat_site is set with a
Site it compares it to that one too.
Afterwards it calls post_treat so it's possible to do additional
checks.
"""
def treat(page):
self.assertEqual(page, next(self._page_iter))
if self._treat_site is None:
self.assertFalse(hasattr(self.bot, 'site'))
self.assertFalse(hasattr(self.bot, '_site'))
else:
self.assertIsNotNone(self.bot._site)
self.assertEqual(self.bot.site, self.bot._site)
if self._treat_site:
self.assertEqual(self.bot._site, self._treat_site)
self.assertEqual(page.site, self.bot.site)
if post_treat:
post_treat(page)
self._page_iter = iter(pages)
return treat
def _treat_page(self, pages=True, post_treat=None):
"""
Adjust to CurrentPageBot signature.
It uses almost the same logic as _treat but returns a wrapper function
which itself calls the function returned by _treat.
The pages may be set to True which sill use _treat_generator as the
source for the pages.
"""
def treat_page():
treat(self.bot.current_page)
if pages is True:
pages = self._treat_generator()
treat = self._treat(pages, post_treat)
return treat_page
def _exit(self, treated, written=0, exception=None):
"""Get tests which are executed on exit."""
def exit():
exc = sys.exc_info()[0]
if exc is AssertionError:
# When an AssertionError happened we shouldn't do these
# assertions as they are invalid anyway and hide the actual
# failed assertion
return
self.assertEqual(self.bot._treat_counter, treated)
self.assertEqual(self.bot._save_counter, written)
if exception:
self.assertIs(exc, exception)
else:
self.assertIsNone(exc)
with self.assertRaisesRegex(StopIteration, '^$'):
next(self._page_iter)
return exit
class TestDrySiteBot(TestBotTreatExit, SiteAttributeTestCase):
"""Tests for the BaseBot subclasses."""
CANT_SET_ATTRIBUTE_RE = "can't set attribute"
NOT_IN_TREAT_RE = 'Requesting the site not while in treat is not allowed.'
dry = True
sites = {
'de': {
'family': 'wikipedia',
'code': 'de'
},
'en': {
'family': 'wikipedia',
'code': 'en'
}
}
def _generator(self):
"""Generic generator."""
yield pywikibot.Page(self.de, 'Page 1')
yield pywikibot.Page(self.en, 'Page 2')
yield pywikibot.Page(self.de, 'Page 3')
yield pywikibot.Page(self.en, 'Page 4')
def test_SingleSiteBot_automatic(self):
"""Test SingleSiteBot class with no predefined site."""
self._treat_site = self.de
self.bot = pywikibot.bot.SingleSiteBot(site=None,
generator=self._generator())
self.bot.treat = self._treat([pywikibot.Page(self.de, 'Page 1'),
pywikibot.Page(self.de, 'Page 3')])
self.bot.exit = self._exit(2)
self.bot.run()
self.assertEqual(self.bot.site, self._treat_site)
def test_SingleSiteBot_specific(self):
"""Test SingleSiteBot class with predefined site."""
self._treat_site = self.en
self.bot = pywikibot.bot.SingleSiteBot(site=self.en,
generator=self._generator())
self.bot.treat = self._treat([pywikibot.Page(self.en, 'Page 2'),
pywikibot.Page(self.en, 'Page 4')])
self.bot.exit = self._exit(2)
self.bot.run()
self.assertEqual(self.bot.site, self._treat_site)
@suppress_warnings('pywikibot.bot.MultipleSitesBot.site is deprecated')
def test_MultipleSitesBot(self):
"""Test MultipleSitesBot class."""
# Assert no specific site
self._treat_site = False
self.bot = pywikibot.bot.MultipleSitesBot(generator=self._generator())
with self.assertRaisesRegex(AttributeError,
self.CANT_SET_ATTRIBUTE_RE):
self.bot.site = self.de
with self.assertRaisesRegex(ValueError, self.NOT_IN_TREAT_RE):
self.bot.site
self.bot.treat = self._treat(self._generator())
self.bot.exit = self._exit(4)
self.bot.run()
with self.assertRaisesRegex(ValueError, self.NOT_IN_TREAT_RE):
self.bot.site
def test_Bot(self):
"""Test normal Bot class."""
# Assert no specific site
self._treat_site = False
self.bot = pywikibot.bot.Bot(generator=self._generator())
self.bot.treat = self._treat(self._generator())
self.bot.exit = self._exit(4)
self.bot.run()
def test_CurrentPageBot(self):
"""Test normal Bot class."""
def post_treat(page):
self.assertIs(self.bot.current_page, page)
# Assert no specific site
self._treat_site = None
self.bot = pywikibot.bot.CurrentPageBot(generator=self._generator())
self.bot.treat_page = self._treat_page(self._generator(), post_treat)
self.bot.exit = self._exit(4)
self.bot.run()
def test_Bot_ValueError(self):
"""Test normal Bot class with a ValueError in treat."""
def post_treat(page):
if page.title() == 'Page 3':
raise ValueError('Whatever')
self._treat_site = False
self.bot = pywikibot.bot.Bot(generator=self._generator())
self.bot.treat = self._treat([pywikibot.Page(self.de, 'Page 1'),
pywikibot.Page(self.en, 'Page 2'),
pywikibot.Page(self.de, 'Page 3')],
post_treat)
self.bot.exit = self._exit(2, exception=ValueError)
with self.assertRaisesRegex(ValueError, 'Whatever'):
self.bot.run()
def test_Bot_KeyboardInterrupt(self):
"""Test normal Bot class with a KeyboardInterrupt in treat."""
def post_treat(page):
if page.title() == 'Page 3':
raise KeyboardInterrupt('Whatever')
self._treat_site = False
self.bot = pywikibot.bot.Bot(generator=self._generator())
self.bot.treat = self._treat([pywikibot.Page(self.de, 'Page 1'),
pywikibot.Page(self.en, 'Page 2'),
pywikibot.Page(self.de, 'Page 3')],
post_treat)
self.bot.exit = self._exit(2, exception=None)
self.bot.run()
# TODO: This could be written as dry tests probably by faking the important
# properties
class LiveBotTestCase(TestBotTreatExit, DefaultSiteTestCase):
"""Test bot classes which need to check the Page object live."""
def _treat_generator(self):
"""Yield the current page until it's None."""
while self._current_page:
yield self._current_page
def _missing_generator(self):
"""Yield pages and the last one does not exist."""
self._count = 0 # skip_page skips one page
self._current_page = list(self.site.allpages(total=1))[0]
yield self._current_page
while self._current_page.exists():
self._count += 1
self._current_page = pywikibot.Page(
self.site, self._current_page.title() + 'X')
yield self._current_page
self._current_page = None
def _exit(self, treated=None, written=0, exception=None):
"""Set the number of treated pages to _count."""
def exit():
t = self._count if treated is None else treated
# Due to PEP 3135 super()._exit(...)() would raise
# RuntimeError: super(): no arguments
super(LiveBotTestCase, self)._exit(t, written, exception)()
return exit
def test_ExistingPageBot(self):
"""Test ExistingPageBot class."""
def post_treat(page):
"""Verify the page exists."""
self.assertTrue(page.exists())
self._treat_site = None
self.bot = pywikibot.bot.ExistingPageBot(
generator=self._missing_generator())
self.bot.treat_page = self._treat_page(post_treat=post_treat)
self.bot.exit = self._exit()
self.bot.run()
def test_CreatingPageBot(self):
"""Test CreatingPageBot class."""
# This doesn't verify much (e.g. it could yield the first existing
# page) but the assertion in post_treat should verify that the page
# is valid
def treat_generator():
"""Yield just one current page (the last one)."""
yield self._current_page
def post_treat(page):
"""Verify the page is missing."""
self.assertFalse(page.exists())
self._treat_site = None
self.bot = pywikibot.bot.CreatingPageBot(
generator=self._missing_generator())
self.bot.treat_page = self._treat_page(treat_generator(), post_treat)
self.bot.exit = self._exit()
self.bot.run()
class Options(pywikibot.bot.OptionHandler):
"""A derived OptionHandler class."""
available_options = {
'foo': 'bar',
'bar': 42,
'baz': False
}
class TestOptionHandler(TestCase):
"""OptionHandler test class."""
dry = True
def setUp(self):
"""Setup tests."""
self.option_handler = Options(baz=True)
super().setUp()
def test_opt_values(self):
"""Test OptionHandler."""
oh = self.option_handler
self.assertEqual(oh.opt.foo, 'bar')
self.assertEqual(oh.opt.bar, 42)
self.assertTrue(oh.opt.baz)
self.assertEqual(oh.opt.foo, oh.opt['foo'])
oh.opt.baz = 'Hey'
self.assertEqual(oh.opt.baz, 'Hey')
self.assertEqual(oh.opt['baz'], 'Hey')
self.assertNotIn('baz', oh.opt.__dict__)
if __name__ == '__main__': # pragma: no cover
with suppress(SystemExit):
unittest.main()
|
<filename>ansible/library/helm_toolbox.py<gh_stars>100-1000
#!/usr/bin/env python
#
# Copyright 2020 Caoyingjun
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import traceback
from ansible.module_utils.basic import AnsibleModule
DOCUMENTATION = '''
---
module: helm_toolbox
short_description: >
Module for invoking ansible module in helm_toolbox.
description:
- A module targerting at invoking ansible module in helm_toolbox
as used by Kubez-ansible project.
author: Caoyingjun
'''
EXAMPLES = '''
- hosts: all
tasks:
- name: Install harbor applications by helm3
helm_toolbox:
name: harbor
namespace: default
chart: chart
chart_extra_vars:
setkey1: setvalue1
setkey2: setvalue2
...
- hosts: all
tasks:
- name: Uninstall harbor applications by helm3
helm_toolbox:
name: harbor
namespace: default
action: uninstall
'''
KUBECONFIG = '/etc/kubernetes/admin.conf'
class Helm3Worker(object):
def __init__(self, params):
self.params = params
self.name = self.params.get('name')
self.namespace = self.params.get('namespace')
self.chart = self.params.get('chart')
self.changed = False
self.result = {}
def run_cmd(self, cmd):
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
stdout, stderr = proc.communicate()
retcode = proc.poll()
if retcode != 0:
output = 'stdout: "%s", stderr: "%s"' % (stdout, stderr)
raise subprocess.CalledProcessError(retcode, cmd, output)
return stdout.rstrip()
def install(self):
# To install the applications by helm3
# a. check whether the chart installed
# b. install the chart if not installed or pass
if not self.is_installed:
cmd = ['helm', 'install', self.name, self.chart,
'-n', self.namespace, '--kubeconfig', KUBECONFIG]
if self.params.get('chart_extra_vars'):
chart_extra_vars = self.params.get('chart_extra_vars')
if isinstance(chart_extra_vars, dict):
chart_extra_cmd = ' '.join('--set {}={}'.format(key, value) # noqa
for key, value in chart_extra_vars.items() if value) # noqa
cmd.append(chart_extra_cmd)
self.run_cmd(' '.join(cmd))
self.changed = True
def uninstall(self):
# To uninstall the applications by helm3
# a. check whether the chart installed
# b. uninstall the chart if installed
if self.is_installed:
cmd = ['helm', 'uninstall', self.name,
'-n', self.namespace, '--kubeconfig', KUBECONFIG]
self.run_cmd(' '.join(cmd))
self.changed = True
@property
def is_installed(self):
charts = self.run_cmd(
' '.join(['helm', 'list', '-n', self.namespace, '--kubeconfig', KUBECONFIG])).split('\n')
for chart in charts:
if self.name in chart and self.namespace in chart:
return True
return False
def main():
specs = dict(
name=dict(required=True, type='str'),
namespace=dict(required=False, type='str', default='default'),
action=dict(type='str', default='install', choices=['install',
'uninstall']),
chart=dict(required=True, type='str'),
chart_extra_vars=dict(type='json')
)
module = AnsibleModule(argument_spec=specs, bypass_checks=True)
params = module.params
hw = None
try:
hw = Helm3Worker(params)
getattr(hw, params.get('action'))()
module.exit_json(changed=hw.changed, result=hw.result)
except Exception:
module.fail_json(changed=True, msg=repr(traceback.format_exc()),
**getattr(hw, 'result', {}))
if __name__ == '__main__':
main()
|
<gh_stars>1-10
#!/usr/bin/env python
# Filename tools.py
__author__ = '<EMAIL> (duanqz)'
### Import blocks
import os
import shutil
import commands
import tempfile
import signal
import subprocess
import time
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
### Class blocks
class Toolkit:
""" Toolkit including all tools
"""
TOOLS_ROOT = os.path.dirname(os.path.abspath(__file__))
TOOLKIT_XML = os.path.join(TOOLS_ROOT, "toolkit.xml")
TYPE_CONFIG = "type.config"
def __init__(self):
""" Initialize tools factory from config.xml
"""
self.allTools = {}
self.sequence = {}
tree = ET.parse(Toolkit.TOOLKIT_XML)
for tool in tree.findall("tool"):
seq = tool.attrib["seq"]
bootType = tool.attrib["type"]
unpackTool = tool.find("unpack").text
packTool = tool.find("pack").text
self.allTools[bootType] = { "UNPACK" : os.path.join(Toolkit.TOOLS_ROOT, unpackTool),
"PACK" : os.path.join(Toolkit.TOOLS_ROOT, packTool) }
self.sequence[seq] = bootType
def parseType(self, bootfile):
""" Match appropriate tools for the boot image file.
"""
tryType = None
# Try to unpack boot image for each type,
# choose the appropriate one.
for seq in sorted(self.sequence.keys()):
bootType = self.sequence.get(seq)
# Try to unpack the boot image by unpack tool
unpackTool = self.getTools(bootType, "UNPACK")
if BootimgParser.tryUnpack(unpackTool, bootfile) == True:
tryType = bootType
break
BootimgParser.clearTempDir()
return tryType
def getTools(self, bootType, attrib=None):
""" Get tools by type of boot.img
"""
tools = self.allTools.get(bootType)
if attrib == None :
return tools
else:
return tools[attrib]
@staticmethod
def storeType(bootType, bootout):
# Serialize
fileHandle = open(os.path.join(bootout, Toolkit.TYPE_CONFIG), "w")
fileHandle.write(bootType)
fileHandle.close()
@staticmethod
def retrieveType(bootout):
# De-serialize
try:
fileHandle = open(os.path.join(bootout, Toolkit.TYPE_CONFIG), "r")
bootType = fileHandle.read().rstrip()
fileHandle.close()
except:
print ">>> Can not find type.config, use COMMON as image type by default"
bootType = "COMMON"
return bootType
### End of class Toolkit
class BootimgParser:
""" Match out appropriate tools
"""
# Directory for temporary data storage.
TEMP_DIR = tempfile.mkdtemp()
TIME_OUT = 10 # 5.0 seconds
@staticmethod
def tryUnpack(unpackTool, bootimg):
""" Try to unpack the boot image into TEMP_DIR.
Return True: unpack successfully. False: otherwise.
"""
BootimgParser.clearTempDir()
cmd = "%s %s %s" %(unpackTool, bootimg, BootimgParser.TEMP_DIR)
p = subprocess.Popen(cmd, shell=True, preexec_fn=os.setsid, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
timeout=BootimgParser.TIME_OUT
while True:
if p.poll() != None:
status = p.poll()
break
timeout-=1
time.sleep(0.5)
if timeout <= 0:
status = -1
os.killpg(p.pid, signal.SIGTERM)
break
(output, erroutput) = p.communicate()
# Debug code. Useless for release version
BootimgParser.__debug("\nTry: %s" %cmd)
BootimgParser.__debug(output)
return status == 0
@staticmethod
def clearTempDir():
""" Clear the temporary directory
"""
if os.path.exists(BootimgParser.TEMP_DIR) == True:
shutil.rmtree(BootimgParser.TEMP_DIR)
@staticmethod
def __debug(msg):
if False: print msg
### End of class ToolsMatcher
|
<gh_stars>0
import argparse
import datetime
import json
import os
import os.path
import praw
import requests as r
FIELDS_TO_UPDATE = [
'locked',
'num_comments',
'num_crossposts',
'over_18',
'pinned',
'score',
'selftext',
'spoiler',
'stickied',
'subreddit_subscribers',
]
class DateAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string):
try:
setattr(namespace, self.dest, datetime.datetime.strptime(values, '%Y-%m-%d'))
except ValueError:
raise ValueError(f'{values} is not a valid YYYY-MM-DD date')
def main():
parser = argparse.ArgumentParser(description='Chronologically scrape reddit')
parser.add_argument('--sub', help='Subreddit to scrape (e.g. "me_irl")')
parser.add_argument('--sub_list', help='Path to a .txt file with one subreddit name per line to scrape')
parser.add_argument('--field_list', help='Path to a .txt file with one field per line to keep from posts. If not supplied, keep all fields.')
parser.add_argument('--start_date', help='Start date in YYYY-MM-DD format (starts at 00:00 UTC)', action=DateAction)
parser.add_argument('--end_date', help='End date in YYYY-MM-DD format ends at (23:59:59 UTC)', action=DateAction)
parser.add_argument('--count', type=int, default=1000, help='A maximum amount of posts to download for each provided subreddit')
parser.add_argument('--update', help='Update the data from the Reddit API after downloading it. Recommended for anything that requires up-to-date karma counts.', action='store_true')
args = parser.parse_args()
if not args.sub_list and not args.sub:
print('You must provide either --sub or --sub_list.')
exit(1)
"""
Things that are okay:
- Supplying a start_date, end_date, and count
- Supplying a start_date and an end_date
- Supplying a start_date and a count
- Supplying only a count
"""
if not ((args.start_date is not None and args.end_date is not None) or (args.start_date is not None and args.count is not None) or (args.count is not None)):
print('Invalid combination of --start_date, --end_date, and --count. Make sure these arguments make sense.')
exit(1)
if args.sub_list:
subreddits = read_lines_of_file(args.sub_list)
else:
subreddits = [args.sub]
if args.field_list:
fields = read_lines_of_file(args.field_list)
for subreddit in subreddits:
scrape_subreddit(
subreddit,
args.update,
args.count,
fields,
args.start_date if args.start_date else datetime.datetime(2000, 1, 1),
args.end_date if args.end_date else datetime.datetime.now(),
)
def read_lines_of_file(path):
with open(path, 'r') as f:
return [line.strip() for line in f.readlines()]
def scrape_subreddit(subreddit, update, count, fields, start_date, end_date):
print(f'Scraping {subreddit}')
if update:
reddit = login_to_reddit()
end_epoch = int(start_date.timestamp())
# add 23 hours, 59 minutes, 59 seconds to include the posts made on end_date
start_epoch = int((end_date + datetime.timedelta(days=1) - datetime.timedelta(seconds=1)).timestamp())
current_epoch = start_epoch
output = []
while len(output) < count or count is None:
payload = {
'subreddit': subreddit,
'sort': 'desc',
'size': 500,
'before': current_epoch,
'after': end_epoch,
'sort_type': 'created_utc',
}
posts = query_pushshift(payload)
ids = [f't3_{post["id"]}' for post in posts]
if len(posts) == 0:
break
# Zip with some reddit info if we plan on updating stuff
if update:
zipped_posts = zip(posts, reddit.info(ids))
else:
zipped_posts = zip(posts, [None] * len(posts))
for post, submission in zipped_posts:
# Update the time of the last post we downloaded
current_epoch = int(post['created_utc'])
# Lose the fields we don't care about
if fields:
post = keep_whitelisted_fields(post, fields)
# Query reddit for up-to-date info on fields we need to update
if update:
for field in FIELDS_TO_UPDATE:
if field in post:
post[field] = getattr(submission, field)
# Stick it on the end
output.append(post)
if len(output) % 500 == 0:
print(f'Downloaded {len(output)} posts so far')
output_path = os.path.abspath(f'{subreddit}-{datetime.datetime.fromtimestamp(start_epoch).strftime("%Y-%m-%d")}-{datetime.datetime.fromtimestamp(end_epoch).strftime("%Y-%m-%d")}.json')
with open(output_path, 'w', encoding='utf-8') as f:
json.dump(output, f, ensure_ascii=False)
print(f'Downloaded {len(output)} posts from {subreddit}.')
def keep_whitelisted_fields(post, whitelist):
output = {}
for field in whitelist:
output[field] = post[field]
return output
def login_to_reddit():
if not os.path.isfile('praw.ini'):
print("Couldn't find praw.ini in this directory! Try making one if this breaks: https://praw.readthedocs.io/en/latest/getting_started/configuration/prawini.html")
exit(1)
reddit = praw.Reddit('DEFAULT', user_agent='python:reddit-scraper:v0.2.0 (by /u/notverycreative1)')
print('Logged in!')
return reddit
def query_pushshift(payload):
"""
Query the pushshift API.
:param payload: Payload of params to query with
:returns: list of post objects
"""
response = r.get('https://api.pushshift.io/reddit/search/submission', params=payload)
return [post for post in response.json()['data']]
if __name__ == '__main__':
main()
|
<filename>gtsfm/scene_optimizer.py
"""The main class which integrates all the modules.
Authors: <NAME>, <NAME>
"""
from gtsfm.common.gtsfm_data import GtsfmData
import logging
import os
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
import dask
import matplotlib
from gtsam import Pose3, Similarity3
matplotlib.use("Agg")
from dask.delayed import Delayed
import gtsfm.averaging.rotation.cycle_consistency as cycle_consistency
import gtsfm.evaluation.metrics_report as metrics_report
import gtsfm.two_view_estimator as two_view_estimator
import gtsfm.utils.ellipsoid as ellipsoid_utils
import gtsfm.utils.io as io_utils
import gtsfm.utils.logger as logger_utils
import gtsfm.utils.metrics as metrics_utils
import gtsfm.utils.viz as viz_utils
from gtsfm.averaging.rotation.cycle_consistency import EdgeErrorAggregationCriterion
from gtsfm.common.image import Image
from gtsfm.feature_extractor import FeatureExtractor
from gtsfm.multi_view_optimizer import MultiViewOptimizer
from gtsfm.two_view_estimator import TwoViewEstimator, TwoViewEstimationReport
# base paths for storage
PLOT_BASE_PATH = Path(__file__).resolve().parent.parent / "plots"
METRICS_PATH = Path(__file__).resolve().parent.parent / "result_metrics"
RESULTS_PATH = Path(__file__).resolve().parent.parent / "results"
# plot paths
PLOT_CORRESPONDENCE_PATH = PLOT_BASE_PATH / "correspondences"
PLOT_BA_INPUT_PATH = PLOT_BASE_PATH / "ba_input"
PLOT_RESULTS_PATH = PLOT_BASE_PATH / "results"
# Paths to Save Output in React Folders.
REACT_METRICS_PATH = Path(__file__).resolve().parent.parent / "rtf_vis_tool" / "src" / "result_metrics"
REACT_RESULTS_PATH = Path(__file__).resolve().parent.parent / "rtf_vis_tool" / "public" / "results"
logger = logger_utils.get_logger()
mpl_logger = logging.getLogger("matplotlib")
mpl_logger.setLevel(logging.WARNING)
pil_logger = logging.getLogger("PIL")
pil_logger.setLevel(logging.INFO)
# number of digits (significant figures) to include in each entry of error metrics
PRINT_NUM_SIG_FIGS = 2
class SceneOptimizer:
"""Wrapper combining different modules to run the whole pipeline on a
loader."""
def __init__(
self,
feature_extractor: FeatureExtractor,
two_view_estimator: TwoViewEstimator,
multiview_optimizer: MultiViewOptimizer,
save_two_view_correspondences_viz: bool,
save_3d_viz: bool,
save_gtsfm_data: bool,
pose_angular_error_thresh: float,
) -> None:
"""pose_angular_error_thresh is given in degrees"""
self.feature_extractor = feature_extractor
self.two_view_estimator = two_view_estimator
self.multiview_optimizer = multiview_optimizer
self._save_two_view_correspondences_viz = save_two_view_correspondences_viz
self._save_3d_viz = save_3d_viz
self._save_gtsfm_data = save_gtsfm_data
self._pose_angular_error_thresh = pose_angular_error_thresh
# make directories for persisting data
os.makedirs(PLOT_BASE_PATH, exist_ok=True)
os.makedirs(METRICS_PATH, exist_ok=True)
os.makedirs(RESULTS_PATH, exist_ok=True)
os.makedirs(PLOT_CORRESPONDENCE_PATH, exist_ok=True)
os.makedirs(PLOT_BA_INPUT_PATH, exist_ok=True)
os.makedirs(PLOT_RESULTS_PATH, exist_ok=True)
# Save duplicate directories within React folders.
os.makedirs(REACT_RESULTS_PATH, exist_ok=True)
os.makedirs(REACT_METRICS_PATH, exist_ok=True)
def create_computation_graph(
self,
num_images: int,
image_pair_indices: List[Tuple[int, int]],
image_graph: List[Delayed],
camera_intrinsics_graph: List[Delayed],
image_shape_graph: List[Delayed],
gt_cameras_graph: Optional[List[Delayed]] = None,
) -> Delayed:
"""The SceneOptimizer plate calls the FeatureExtractor and TwoViewEstimator plates several times."""
# auxiliary graph elements for visualizations and saving intermediate
# data for analysis, not returned to the user.
auxiliary_graph_list = []
metrics_graph_list = []
# detection and description graph
keypoints_graph_list = []
descriptors_graph_list = []
for delayed_image in image_graph:
(delayed_dets, delayed_descs) = self.feature_extractor.create_computation_graph(delayed_image)
keypoints_graph_list += [delayed_dets]
descriptors_graph_list += [delayed_descs]
# estimate two-view geometry and get indices of verified correspondences.
i2Ri1_graph_dict = {}
i2Ui1_graph_dict = {}
v_corr_idxs_graph_dict = {}
two_view_reports_dict = {}
two_view_reports_pp_dict = {}
for (i1, i2) in image_pair_indices:
# TODO(johnwlambert): decompose this method -- name it as "calling_the_plate()"
if gt_cameras_graph is not None:
# compute GT relative pose
gt_i2Ti1 = dask.delayed(lambda x, y: x.pose().between(y.pose()))(
gt_cameras_graph[i2], gt_cameras_graph[i1]
)
else:
gt_i2Ti1 = None
# TODO(johnwlambert): decompose this so what happens in the loop is a separate method
(
i2Ri1,
i2Ui1,
v_corr_idxs,
two_view_report,
two_view_report_pp,
) = self.two_view_estimator.create_computation_graph(
keypoints_graph_list[i1],
keypoints_graph_list[i2],
descriptors_graph_list[i1],
descriptors_graph_list[i2],
camera_intrinsics_graph[i1],
camera_intrinsics_graph[i2],
image_shape_graph[i1],
image_shape_graph[i2],
gt_i2Ti1,
)
i2Ri1_graph_dict[(i1, i2)] = i2Ri1
i2Ui1_graph_dict[(i1, i2)] = i2Ui1
v_corr_idxs_graph_dict[(i1, i2)] = v_corr_idxs
two_view_reports_dict[(i1, i2)] = two_view_report
two_view_reports_pp_dict[(i1, i2)] = two_view_report_pp
if self._save_two_view_correspondences_viz:
auxiliary_graph_list.append(
dask.delayed(viz_utils.save_twoview_correspondences_viz)(
image_graph[i1],
image_graph[i2],
keypoints_graph_list[i1],
keypoints_graph_list[i2],
v_corr_idxs,
two_view_report=two_view_report,
file_path=os.path.join(PLOT_CORRESPONDENCE_PATH, f"{i1}_{i2}.jpg"),
)
)
# persist all front-end metrics and its summary
auxiliary_graph_list.append(
dask.delayed(save_full_frontend_metrics)(two_view_reports_dict, image_graph, filename="verifier_full.json")
)
if gt_cameras_graph is not None:
metrics_graph_list.append(
dask.delayed(two_view_estimator.aggregate_frontend_metrics)(
two_view_reports_dict, self._pose_angular_error_thresh, metric_group_name="verifier_summary"
)
)
metrics_graph_list.append(
dask.delayed(two_view_estimator.aggregate_frontend_metrics)(
two_view_reports_pp_dict,
self._pose_angular_error_thresh,
metric_group_name="inlier_support_processor_summary",
)
)
# as visualization tasks are not to be provided to the user, we create a
# dummy computation of concatenating viz tasks with the output graph,
# forcing computation of viz tasks. Doing this here forces the
# frontend's auxiliary tasks to be computed before the multi-view stage.
keypoints_graph_list = dask.delayed(lambda x, y: (x, y))(keypoints_graph_list, auxiliary_graph_list)[0]
auxiliary_graph_list = []
# ensure cycle consistency in triplets
# TODO: add a get_computational_graph() method to ViewGraphOptimizer
# TODO(johnwlambert): use a different name for variable, since this is something different
i2Ri1_graph_dict, i2Ui1_graph_dict, v_corr_idxs_graph_dict, rcc_metrics_graph = dask.delayed(
cycle_consistency.filter_to_cycle_consistent_edges, nout=4
)(
i2Ri1_graph_dict,
i2Ui1_graph_dict,
v_corr_idxs_graph_dict,
two_view_reports_dict,
EdgeErrorAggregationCriterion.MEDIAN_EDGE_ERROR,
)
metrics_graph_list.append(rcc_metrics_graph)
def _filter_dict_keys(dict: Dict[Any, Any], ref_dict: Dict[Any, Any]) -> Dict[Any, Any]:
"""Return a subset of a dictionary based on keys present in the reference dictionary."""
valid_keys = list(ref_dict.keys())
return {k: v for k, v in dict.items() if k in valid_keys}
if gt_cameras_graph is not None:
two_view_reports_dict_cycle_consistent = dask.delayed(_filter_dict_keys)(
dict=two_view_reports_dict, ref_dict=i2Ri1_graph_dict
)
metrics_graph_list.append(
dask.delayed(two_view_estimator.aggregate_frontend_metrics)(
two_view_reports_dict_cycle_consistent,
self._pose_angular_error_thresh,
metric_group_name="cycle_consistent_frontend_summary",
)
)
auxiliary_graph_list.append(
dask.delayed(save_full_frontend_metrics)(
two_view_reports_dict_cycle_consistent,
image_graph,
filename="cycle_consistent_frontend_full.json",
)
)
# Note: the MultiviewOptimizer returns BA input and BA output that are aligned to GT via Sim(3).
(ba_input_graph, ba_output_graph, optimizer_metrics_graph) = self.multiview_optimizer.create_computation_graph(
image_graph,
num_images,
keypoints_graph_list,
i2Ri1_graph_dict,
i2Ui1_graph_dict,
v_corr_idxs_graph_dict,
camera_intrinsics_graph,
gt_cameras_graph,
)
# aggregate metrics for multiview optimizer
if optimizer_metrics_graph is not None:
metrics_graph_list.extend(optimizer_metrics_graph)
# Save metrics to JSON and generate HTML report.
auxiliary_graph_list.extend(save_metrics_reports(metrics_graph_list))
# Modify BA input, BA output, and GT poses to have point clouds and frustums aligned with x,y,z axes.
gt_poses_graph = (
[dask.delayed(lambda x: x.pose())(cam) for cam in gt_cameras_graph] if gt_cameras_graph else None
)
ba_input_graph, ba_output_graph, gt_poses_graph = dask.delayed(align_estimated_gtsfm_data, nout=3)(
ba_input_graph, ba_output_graph, gt_poses_graph
)
if self._save_3d_viz:
auxiliary_graph_list.extend(save_visualizations(ba_input_graph, ba_output_graph, gt_poses_graph))
if self._save_gtsfm_data:
auxiliary_graph_list.extend(save_gtsfm_data(image_graph, ba_input_graph, ba_output_graph))
# as visualization tasks are not to be provided to the user, we create a
# dummy computation of concatenating viz tasks with the output graph,
# forcing computation of viz tasks
output_graph = dask.delayed(lambda x, y: (x, y))(ba_output_graph, auxiliary_graph_list)
# return the entry with just the sfm result
return output_graph[0]
def align_estimated_gtsfm_data(
ba_input: GtsfmData, ba_output: GtsfmData, gt_pose_graph: List[Pose3]
) -> Tuple[GtsfmData, GtsfmData, List[Pose3]]:
"""Creates modified GtsfmData objects that emulate ba_input and ba_output but with point cloud and camera
frustums aligned to the x,y,z axes. Also transforms GT camera poses to be aligned to axes.
Args:
ba_input: GtsfmData input to bundle adjustment.
ba_output: GtsfmData output from bundle adjustment.
gt_pose_graph: list of GT camera poses.
Returns:
Updated ba_input GtsfmData object aligned to axes.
Updated ba_output GtsfmData object aligned to axes.
Updated gt_pose_graph with GT poses aligned to axes.
"""
walignedTw = ellipsoid_utils.get_ortho_axis_alignment_transform(ba_output)
walignedSw = Similarity3(R=walignedTw.rotation(), t=walignedTw.translation(), s=1.0)
ba_input = ba_input.apply_Sim3(walignedSw)
ba_output = ba_output.apply_Sim3(walignedSw)
gt_pose_graph = [walignedSw.transformFrom(wTi) for wTi in gt_pose_graph]
return ba_input, ba_output, gt_pose_graph
def save_visualizations(
ba_input_graph: Delayed, ba_output_graph: Delayed, gt_pose_graph: Optional[List[Delayed]]
) -> List[Delayed]:
"""Save SfmData before and after bundle adjustment and camera poses for visualization.
Accepts delayed GtsfmData before and after bundle adjustment, along with GT poses,
saves them and returns a delayed object.
Args:
ba_input_graph: Delayed GtsfmData input to bundle adjustment.
ba_output_graph: Delayed GtsfmData output from bundle adjustment.
gt_pose_graph: Delayed ground truth poses.
Returns:
A list of Delayed objects after saving the different visualizations.
"""
viz_graph_list = []
viz_graph_list.append(dask.delayed(viz_utils.save_sfm_data_viz)(ba_input_graph, PLOT_BA_INPUT_PATH))
viz_graph_list.append(dask.delayed(viz_utils.save_sfm_data_viz)(ba_output_graph, PLOT_RESULTS_PATH))
viz_graph_list.append(
dask.delayed(viz_utils.save_camera_poses_viz)(ba_input_graph, ba_output_graph, gt_pose_graph, PLOT_RESULTS_PATH)
)
return viz_graph_list
def save_gtsfm_data(image_graph: Delayed, ba_input_graph: Delayed, ba_output_graph: Delayed) -> List[Delayed]:
"""Saves the Gtsfm data before and after bundle adjustment.
Args:
image_graph: input image wrapped as Delayed objects.
ba_input_graph: GtsfmData input to bundle adjustment wrapped as Delayed.
ba_output_graph: GtsfmData output to bundle adjustment wrapped as Delayed.
Returns:
A list of delayed objects after saving the input and outputs to bundle adjustment.
"""
saving_graph_list = []
# Save a duplicate in REACT_RESULTS_PATH.
for output_dir in [RESULTS_PATH, REACT_RESULTS_PATH]:
# Save the input to Bundle Adjustment (from data association).
saving_graph_list.append(
dask.delayed(io_utils.export_model_as_colmap_text)(
ba_input_graph, image_graph, save_dir=os.path.join(output_dir, "ba_input")
)
)
# Save the output of Bundle Adjustment.
saving_graph_list.append(
dask.delayed(io_utils.export_model_as_colmap_text)(
ba_output_graph, image_graph, save_dir=os.path.join(output_dir, "ba_output")
)
)
return saving_graph_list
def save_metrics_reports(metrics_graph_list: Delayed) -> List[Delayed]:
"""Saves metrics to JSON and HTML report.
Args:
metrics_graph: List of GtsfmMetricsGroup from different modules wrapped as Delayed.
Returns:
List of delayed objects after saving metrics.
"""
save_metrics_graph_list = []
if len(metrics_graph_list) == 0:
return save_metrics_graph_list
# Save metrics to JSON
save_metrics_graph_list.append(dask.delayed(metrics_utils.save_metrics_as_json)(metrics_graph_list, METRICS_PATH))
save_metrics_graph_list.append(
dask.delayed(metrics_utils.save_metrics_as_json)(metrics_graph_list, REACT_METRICS_PATH)
)
save_metrics_graph_list.append(
dask.delayed(metrics_report.generate_metrics_report_html)(
metrics_graph_list, os.path.join(METRICS_PATH, "gtsfm_metrics_report.html")
)
)
return save_metrics_graph_list
def save_full_frontend_metrics(
two_view_report_dict: Dict[Tuple[int, int], TwoViewEstimationReport], images: List[Image], filename: str
) -> None:
"""Converts the TwoViewEstimationReports for all image pairs to a Dict and saves it as JSON.
Args:
two_view_report_dict: front-end metrics for pairs of images.
images: list of all images for this scene, in order of image/frame index.
filename: file name to use when saving report to JSON.
"""
metrics_list = []
for (i1, i2), report in two_view_report_dict.items():
# Note: if GT is unknown, then R_error_deg, U_error_deg, and inlier_ratio_gt_model will be None
metrics_list.append(
{
"i1": i1,
"i2": i2,
"i1_filename": images[i1].file_name,
"i2_filename": images[i2].file_name,
"rotation_angular_error": round(report.R_error_deg, PRINT_NUM_SIG_FIGS) if report.R_error_deg else None,
"translation_angular_error": round(report.U_error_deg, PRINT_NUM_SIG_FIGS)
if report.U_error_deg
else None,
"num_inliers_gt_model": report.num_inliers_gt_model if report.num_inliers_gt_model else None,
"inlier_ratio_gt_model": round(report.inlier_ratio_gt_model, PRINT_NUM_SIG_FIGS)
if report.inlier_ratio_gt_model
else None,
"inlier_ratio_est_model": round(report.inlier_ratio_est_model, PRINT_NUM_SIG_FIGS),
"num_inliers_est_model": report.num_inliers_est_model,
}
)
io_utils.save_json_file(os.path.join(METRICS_PATH, filename), metrics_list)
# Save duplicate copy of 'frontend_full.json' within React Folder.
io_utils.save_json_file(os.path.join(REACT_METRICS_PATH, filename), metrics_list)
|
#Code based on Andres code from November 2017
import numpy as np
from six.moves import xrange
def part2dens3d(part_pos, box_l, bin_x=128):
"""
Calculate 3D matter density using numpy histograms
:param part_pos: particle positions in the shape (N, D), where N is particle number and D is dimension
:param box_l: box length in comoving Mpc/h
:param bin_x: desired bins per axis for the histogram
:return: density field
"""
hist, _edges = np.histogramdd(np.vstack((part_pos[:, 0], part_pos[:, 1], part_pos[:, 2])).T,
bins=bin_x, range=[[0, box_l], [0, box_l], [0, box_l]])
del _edges
return hist
def part2dens2d(part_pos, box_l, bin_x=128):
"""
Calculate 2D matter density using numpy histograms
:param part_pos: particle positions in the shape (N, D), where N is particle number and D is dimension
:param box_l: box length in comoving Mpc/h
:param bin_x: desired bins per axis for the histogram
:return: density field
"""
hist, _edgex, _edgey = np.histogram2d(part_pos[:, 0], part_pos[:, 1], bins=bin_x, range=[[0, box_l], [0, box_l], [0, box_l]])
del _edgex, _edgey
return hist
def dens2overdens(density, mean_density=None):
"""
Calculate the overdensity corresponding to a density field
:param density: input density field
:param mean_density: if defined normalisation is calculated according to (density - mean(density)) / mean_density
:return: overdensity field
"""
#assert np.ndim(density) == 3, 'density is not 3D'
if mean_density:
delta = (density - np.mean(density)) / mean_density
else:
mean_density = np.mean(density)
if mean_density == 0.:
delta = np.zeros(shape=density.shape)
else:
delta = density / mean_density - 1.
return delta
def power_spectrum(field_x, box_l, bin_k, field_y=None, log_sampling=True):
"""
Measures the mass power spectrum of a 2D or 3D input field for a given number of bins in Fourier space.
:param field_x: 3D input field to compute the power spectrum of (typically the overdensity field), dimensionless
:param box_l: box length of image/cube/box or whatever, units of Mpc or Mpc/h
:param bin_k: number of bins in Fourier space
:return: power_k, k: 1D mass power spectrum of field_x, same units as [box_l]**3 and corresponding k values
"""
# assert np.ndim(field_x) == 3, 'field_x is not 3D'
box_pix = np.size(field_x, axis=0) # pixel number per axis
box_dim = np.ndim(field_x) # dimension
# This first 'paragraph' is to create masks of indices corresponding to one Fourier bin each
_freq = np.fft.fftfreq(n=box_pix, d=box_l / box_pix) * 2 * np.pi
_rfreq = np.fft.rfftfreq(n=box_pix, d=box_l / box_pix) * 2 * np.pi
if box_dim == 2:
_kx, _ky = np.meshgrid(_freq, _rfreq, indexing='ij')
_k_abs = np.sqrt(_kx ** 2. + _ky ** 2.)
elif box_dim == 3:
_kx, _ky, _kz = np.meshgrid(_freq, _freq, _rfreq, indexing='ij')
_k_abs = np.sqrt(_kx ** 2. + _ky ** 2. + _kz ** 2.)
else:
raise ValueError('field_x is not 2D or 3D')
# The following complicated line is actually only creating a 1D array spanning k-space logarithmically from minimum _k_abs to maximum.
# To start slightly below the minimum and finish slightly above the maximum I use ceil and floor.
# To ceil and floor not to the next integer but to the next 15th digit, I multiply by 1e15 before flooring and divide afterwards.
# Since the ceiled/floored value is actually the exponent used for the logspace, going to the next integer would be way too much.
if log_sampling:
_k_log = np.logspace(np.floor(np.log10(np.min(_k_abs[1:])) * 1.e15) / 1.e15,
np.ceil(np.log10(np.max(_k_abs[1:])) * 1.e15) / 1.e15, bin_k)
else:
_k_log = np.linspace(np.floor(np.min(_k_abs[1:]) * 1.e15) / 1.e15,
np.ceil(np.max(_k_abs[1:]) * 1.e15) / 1.e15, bin_k)
X = np.fft.rfftn(np.fft.fftshift(field_x)) * (box_l / box_pix) ** box_dim
if field_y is not None:
Y = np.conj(np.fft.rfftn(np.fft.fftshift(field_y))) * (box_l / box_pix) ** box_dim
power_k = np.empty(np.size(_k_log) - 1)
mask_tot = np.zeros(_k_abs.shape)
for i in xrange(np.size(_k_log) - 1):
mask = (_k_abs >= _k_log[i]) & (_k_abs < _k_log[i + 1])
if field_y is None:
if np.sum(mask):
power_k[i] = np.mean(np.abs(X[mask] ** 2)) / box_l ** box_dim
else:
power_k[i] = np.nan
else:
if np.sum(mask):
power_k[i] = np.mean(np.real(X[mask] * np.conj(Y[mask]))) / box_l ** box_dim
else:
power_k[i] = np.nan
# mask_tot = mask_tot+mask
# print(np.sum(mask_tot>1))
# print(np.sum(mask_tot==0))
# print(_k_log)
# print(np.min(_k_abs), np.max(_k_abs))
# print(np.sum(mask_tot==0)/np.prod(mask_tot.shape))
k = (_k_log[1:] + _k_log[:-1]) / 2
return power_k, k |
<gh_stars>0
# -*- coding: utf-8 -*-
import pygame
from src.scenes.Stage import *
from src.scenes.stage.StageState import *
from src.scenes.stage.OnBossRoomState import *
# -------------------------------------------------
# Clase OnTransitionState
class OnTransitionState(StageState):
def __init__(self, connection, player):
self.connection = connection
self.scrollX = SCREEN_WIDTH
self.scrollY = SCREEN_HEIGHT
self.scrollPlayerX = player.rect.width+24
self.scrollPlayerY = player.rect.height+24
self.speed = 0.5
self.speedPlayer = self.speed*3/5
def update(self, time, stage):
if self.connection["direction"] == "left" or self.connection["direction"] == "right":
shiftX = int(self.speed*time)
shiftPlayerX = int(self.speedPlayer*time)
self.scrollX -= shiftX
# Desplazamos el viewport en función de la dirección
if self.connection["direction"] == "right":
stage.viewport = stage.viewport.move(shiftX, 0)
else:
stage.viewport = stage.viewport.move(-shiftX, 0)
# Desplazamos el jugador para que quede en la entrada de la nueva sala
self.scrollPlayerX -= shiftPlayerX
if self.scrollPlayerX > 0:
if self.connection["direction"] == "right":
stage.player.increment_position((shiftPlayerX, 0))
else:
stage.player.increment_position((-shiftPlayerX, 0))
# Si hemos terminado de desplazar el mapa, volvemos al estado InRoomState y cambiamos la sala actual
if self.scrollX <= 0:
dstRoom = self.connection["to"]
stage.currentRoom = dstRoom
if hasattr(stage.rooms[dstRoom], 'boss'):
stage.set_state(OnBossRoomState(stage))
else:
if stage.rooms[dstRoom].small:
stage.set_state(stage.smallRoomState)
else:
stage.set_state(stage.inRoomState)
else:
shiftY = int(self.speed*time)
shiftPlayerY = int(self.speedPlayer*time)
self.scrollY -= shiftY
# Desplazamos el viewport en función de la dirección
if self.connection["direction"] == "down":
stage.viewport = stage.viewport.move(0, shiftY)
else:
stage.viewport = stage.viewport.move(0, -shiftY)
# Desplazamos el jugador para que quede en la entrada de la nueva sala
self.scrollPlayerY -= shiftPlayerY
if self.scrollPlayerY > 0:
if self.connection["direction"] == "down":
stage.player.increment_position((0, shiftPlayerY))
else:
stage.player.increment_position((0, -shiftPlayerY))
# Si hemos terminado de desplazar el mapa, volvemos al estado InRoomState y cambiamos la sala actual
if self.scrollY <= 0:
dstRoom = self.connection["to"]
stage.currentRoom = dstRoom
if hasattr(stage.rooms[dstRoom], 'boss'):
stage.set_state(OnBossRoomState(stage))
else:
if stage.rooms[dstRoom].small:
stage.set_state(stage.smallRoomState)
else:
stage.set_state(stage.inRoomState)
def events(self, time, stage):
pass
def draw(self, screen, stage):
currentRoom = stage.rooms[stage.currentRoom]
nextRoom = stage.rooms[self.connection["to"]]
# Muestro un color de fondo
screen.fill((0, 0, 0))
# Luego los Sprites sobre una copia del mapa de la sala
newImage = stage.image.copy()
# Ventana mágica
currentRoom.magicWindowGroup.draw(newImage)
nextRoom.magicWindowGroup.draw(newImage)
# Puertas
currentRoom.doors.draw(newImage)
nextRoom.doors.draw(newImage)
# Recolectables
currentRoom.collectables.draw(newImage)
nextRoom.collectables.draw(newImage)
# Enemigos
currentRoom.enemies.draw(newImage)
nextRoom.enemies.draw(newImage)
# Sprites interactivos
currentRoom.unlockedDoorsGroup.draw(newImage)
nextRoom.unlockedDoorsGroup.draw(newImage)
currentRoom.upgradesGroup.draw(newImage)
nextRoom.upgradesGroup.draw(newImage)
# Player
stage.player.draw(newImage)
# Se pinta la porción de la sala que coincide con el viewport
screen.blit(newImage, (0,0), stage.viewport)
|
import tweepy
from tweepy import OAuthHandler
import json
import datetime as dt
import time
import os
import sys
'''
In order to use this script you should register a data-mining application
with Twitter. Good instructions for doing so can be found here:
http://marcobonzanini.com/2015/03/02/mining-twitter-data-with-python-part-1/
After doing this you can copy and paste your unique consumer key,
consumer secret, access token, and access secret into the load_api()
function below.
The main() function can be run by executing the command:
python twitter_search.py
I used Python 3 and tweepy version 3.5.0. You will also need the other
packages imported above.
'''
def load_api():
''' Function that loads the twitter API after authorizing the user. '''
consumer_key = ''
consumer_secret = ''
access_token = ''
access_secret = ''
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
# load the twitter API via tweepy
return tweepy.API(auth)
def tweet_search(api, query, max_tweets, max_id, since_id, geocode):
''' Function that takes in a search string 'query', the maximum
number of tweets 'max_tweets', and the minimum (i.e., starting)
tweet id. It returns a list of tweepy.models.Status objects. '''
searched_tweets = []
while len(searched_tweets) < max_tweets:
remaining_tweets = max_tweets - len(searched_tweets)
try:
new_tweets = api.search(q=query, count=remaining_tweets,
since_id=str(since_id),
max_id=str(max_id-1))
# geocode=geocode)
print('found',len(new_tweets),'tweets')
if not new_tweets:
print('no tweets found')
break
searched_tweets.extend(new_tweets)
max_id = new_tweets[-1].id
except tweepy.TweepError:
print('exception raised, waiting 15 minutes')
print('(until:', dt.datetime.now()+dt.timedelta(minutes=15), ')')
time.sleep(15*60)
break # stop the loop
return searched_tweets, max_id
def get_tweet_id(api, date='', days_ago=9, query='a'):
''' Function that gets the ID of a tweet. This ID can then be
used as a 'starting point' from which to search. The query is
required and has been set to a commonly used word by default.
The variable 'days_ago' has been initialized to the maximum
amount we are able to search back in time (9).'''
if date:
# return an ID from the start of the given day
td = date + dt.timedelta(days=1)
tweet_date = '{0}-{1:0>2}-{2:0>2}'.format(td.year, td.month, td.day)
tweet = api.search(q=query, count=1, until=tweet_date)
else:
# return an ID from __ days ago
td = dt.datetime.now() - dt.timedelta(days=days_ago)
tweet_date = '{0}-{1:0>2}-{2:0>2}'.format(td.year, td.month, td.day)
# get list of up to 10 tweets
tweet = api.search(q=query, count=10, until=tweet_date)
print('search limit (start/stop):',tweet[0].created_at)
# return the id of the first tweet in the list
return tweet[0].id
def write_tweets(tweets, filename):
''' Function that appends tweets to a file. '''
with open(filename, 'a') as f:
for tweet in tweets:
json.dump(tweet._json, f)
f.write('\n')
def main():
''' This is a script that continuously searches for tweets
that were created over a given number of days. The search
dates and search phrase can be changed below. '''
''' search variables: '''
search_phrases = ['Pavelski', 'Lucic',
'Ovechkin', 'Giroux',
'Jagr', '<NAME>',
'Kucherov', 'Mrazek',
'Seguin', 'Pominville',
'Crosby', 'Lundqvist',
'Tarasenko', '<NAME>',
'<NAME>', 'Forsberg']
time_limit = 1.5 # runtime limit in hours
max_tweets = 100 # number of tweets per search (will be
# iterated over) - maximum is 100
min_days_old, max_days_old = 6, 7 # search limits e.g., from 7 to 8
# gives current weekday from last week,
# min_days_old=0 will search from right now
USA = '39.8,-95.583068847656,2500km' # this geocode includes nearly all American
# states (and a large portion of Canada)
# loop over search items,
# creating a new file for each
for search_phrase in search_phrases:
print('Search phrase =', search_phrase)
''' other variables '''
name = search_phrase.split()[0]
json_file_root = name + '/' + name
os.makedirs(os.path.dirname(json_file_root), exist_ok=True)
read_IDs = False
# open a file in which to store the tweets
if max_days_old - min_days_old == 1:
d = dt.datetime.now() - dt.timedelta(days=min_days_old)
day = '{0}-{1:0>2}-{2:0>2}'.format(d.year, d.month, d.day)
else:
d1 = dt.datetime.now() - dt.timedelta(days=max_days_old-1)
d2 = dt.datetime.now() - dt.timedelta(days=min_days_old)
day = '{0}-{1:0>2}-{2:0>2}_to_{3}-{4:0>2}-{5:0>2}'.format(
d1.year, d1.month, d1.day, d2.year, d2.month, d2.day)
json_file = json_file_root + '_' + day + '.json'
if os.path.isfile(json_file):
print('Appending tweets to file named: ',json_file)
read_IDs = True
# authorize and load the twitter API
api = load_api()
# set the 'starting point' ID for tweet collection
if read_IDs:
# open the json file and get the latest tweet ID
with open(json_file, 'r') as f:
lines = f.readlines()
max_id = json.loads(lines[-1])['id']
print('Searching from the bottom ID in file')
else:
# get the ID of a tweet that is min_days_old
if min_days_old == 0:
max_id = -1
else:
max_id = get_tweet_id(api, days_ago=(min_days_old-1))
# set the smallest ID to search for
since_id = get_tweet_id(api, days_ago=(max_days_old-1))
print('max id (starting point) =', max_id)
print('since id (ending point) =', since_id)
''' tweet gathering loop '''
start = dt.datetime.now()
end = start + dt.timedelta(hours=time_limit)
count, exitcount = 0, 0
while dt.datetime.now() < end:
count += 1
print('count =',count)
# collect tweets and update max_id
tweets, max_id = tweet_search(api, search_phrase, max_tweets,
max_id=max_id, since_id=since_id,
geocode=USA)
# write tweets to file in JSON format
if tweets:
write_tweets(tweets, json_file)
exitcount = 0
else:
exitcount += 1
if exitcount == 3:
if search_phrase == search_phrases[-1]:
sys.exit('Maximum number of empty tweet strings reached - exiting')
else:
print('Maximum number of empty tweet strings reached - breaking')
break
if __name__ == "__main__":
main()
|
import tkinter as tk
from tkinter import ttk, INSERT, DISABLED, GROOVE, CURRENT, Radiobutton, \
NORMAL, ACTIVE, messagebox, Menu, IntVar, Checkbutton, FLAT, PhotoImage, Label,\
SOLID, N, S, W, E, END, LEFT, Scrollbar, RIGHT, Y, BOTH
import Globals
import re
import CoMet_functions, intro_tab_functions, Map_Dose
import Dose_response_functions
from PIL import Image, ImageTk
import os
import sys
Globals.form.title("FIDORA")
#lobals.form.geometry("1250x600")
Globals.form.configure(bg='#ffffff')
Globals.form.state('zoomed')
Globals.form.tk.call('wm', 'iconphoto', Globals.form._w, PhotoImage(file='logo_fidora.png'))
Globals.form.iconbitmap(default='logo_fidora.png')
load = Image.open("fidora_logo.png")
render = ImageTk.PhotoImage(load)
label = Label(Globals.form, image=render)
label.image = render
label.grid(row = 0, column = 0, sticky=W)# place(relwidt=0.61,relheight=0.15, relx=0.02, rely=0.0)
label.config(bg='#FFFFFF')
Globals.tab_parent.add(Globals.intro_tab, text='FIDORA')
Globals.tab_parent.add(Globals.tab1, text='CoMet')
Globals.tab_parent.add(Globals.tab2, text='Dose-response')
Globals.tab_parent.add(Globals.tab3, text='Map dose')
Globals.tab_parent.add(Globals.tab4, text='Profiles')
style = ttk.Style()
style.theme_create('MyStyle', parent= 'classic', settings={
".": {
"configure": {
"background": '#FFFFFF', # All colors except for active tab-button
"font": 'red'
}
},
"Horizontal.TProgressbar":{
"configure": {
"background": '#2C8EAD',
"bordercolor": '#32A9CE',
"troughcolor": "#ffffff",
}
},
"TNotebook": {
"configure": {
"background":'#ffffff', # color behind the notebook
"tabmargins": [5, 5, 10, 10], # [left margin, upper margin, right margin, margin beetwen tab and frames]
"tabposition": 'wn',
"borderwidth": 0,
}
},
"TNotebook.Tab": {
"configure": {
"background": '#0A7D76', # Color of non selected tab-button
"foreground": '#ffffff',
"padding": [30,35, 20,35], # [space beetwen text and horizontal tab-button border, space between text and vertical tab_button border]
"font": ('#FFFFFF', '15'),
"borderwidth": 1,
"equalTabs": True,
"width": 13
},
"map": {
"background": [("selected", '#02B9A5')], # Color of active tab
"expand": [("selected", [1, 1, 1, 0])] # [expanse of text]
}
}
})
style.theme_use('MyStyle')
menubar = Menu(Globals.form)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label="Restart", command=CoMet_functions.nothingButton)
filemenu.add_command(label="Open", command=CoMet_functions.nothingButton)
filemenu.add_separator()
filemenu.add_command(label="Exit", command=Globals.form.quit)
menubar.add_cascade(label="File", menu=filemenu)
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label="Help", command=CoMet_functions.nothingButton)
helpmenu.add_command(label="About", command=CoMet_functions.nothingButton)
menubar.add_cascade(label="Help", menu=helpmenu)
Globals.form.config(menu=menubar)
upload_button_file = "uploadbutton2.png"
upload_button_image = ImageTk.PhotoImage(file=upload_button_file)
select_folder_button_file = "select_folder.png"
select_folder_image = ImageTk.PhotoImage(file=select_folder_button_file)
CoMet_border_dark_file = "border.png"
CoMet_border_dark = ImageTk.PhotoImage(file=CoMet_border_dark_file)
CoMet_border_light_file = "border_light.png"
CoMet_border_light = ImageTk.PhotoImage(file=CoMet_border_light_file)
CoMet_save_button_file = "save.png"
CoMet_save_button = ImageTk.PhotoImage(file=CoMet_save_button_file)
CoMet_correct_button_file = "correct_button.png"
CoMet_correct_button_image= ImageTk.PhotoImage(file=CoMet_correct_button_file)
###################################### INTRO TAB #################################################
#scrollbar = Scrollbar(Globals.intro_tab)
#scrollbar.pack(side=RIGHT, fill=Y)#grid(row=0, column=1, sticky=N+S+E)#pack(side=RIGHT, fill=Y)
#Globals.intro_tab.grid_columnconfigure(0, weight=0)
#Globals.intro_tab.grid_rowconfigure(0, weight=0)
intro_tab_canvas = tk.Canvas(Globals.intro_tab)#, yscrollcommand=scrollbar.set)
intro_tab_canvas.config(bg='#ffffff', bd = 0, relief=FLAT, highlightthickness=0)
tab1_text_box = tk.Frame(intro_tab_canvas, height=230, width=400)
tab1_text_box.grid(row=0, column=0, pady=(30,30), padx=(95,0))
tab1_text_box.config(bd=0, bg='#E5f9ff')
tab1_title_text = tk.Text(tab1_text_box, height=1, width=6)
tab1_title_text.insert(END, "CoMet")
tab1_title_text.grid(in_=tab1_text_box, row=0, column = 0, pady=(15,5), padx=(10,10))
tab1_title_text.config(state=DISABLED, bd=0, bg ='#E5f9ff', fg='#130e07', font=('calibri', '25', 'bold'))
tab1_text_box.grid_columnconfigure(0,weight=1)
tab1_text_box.grid_rowconfigure(0,weight=1)
tab1_text = tk.Text(tab1_text_box, height=4, width=43)
tab1_text.grid(in_=tab1_text_box, row=1, column=0, sticky=N+S+W+E, pady=(0,0), padx=(20,20))
tab1_text.insert(INSERT,"Correct your scanned images using CoMet. A method \ndeveloped to correct for non-uniformity introduced\n\
by the scanner. The correction is based on absolute \nsubtraction.")
tab1_text.config(state=DISABLED, bd=0, bg='#E5f9ff', fg='#130E07', font=('calibri', '13'))
tab1_text_box.grid_columnconfigure(1,weight=1)
tab1_text_box.grid_rowconfigure(1,weight=1)
tab1_readmore_text = tk.Text(tab1_text_box, height=1, width=1)
tab1_readmore_text.grid(row=1, column=0, sticky = N+S+W+E, pady=(65,0), padx = (110,0))
tab1_readmore_text.insert(INSERT,"Read more...")
tab1_readmore_text.config(state=DISABLED, bd=0, bg='#E5f9ff', fg='#130E07', font=('calibri', '12', 'bold'))
tab1_text_box.grid_columnconfigure(2,weight=1)
tab1_text_box.grid_rowconfigure(2,weight=1)
tab1_box_figure = Image.open("CoMet_ikon.PNG")
tab1_figure = ImageTk.PhotoImage(tab1_box_figure)
tab1_figure_label = Label(tab1_text_box, image=tab1_figure)
tab1_figure_label.image = tab1_figure
tab1_figure_label.grid(row=3, sticky=N+S+W+E, pady=(0,10))
tab1_figure_label.config(bg='#E5f9ff')
tab1_text_box.grid_columnconfigure(3, weight=1)
tab1_text_box.grid_rowconfigure(3, weight=1)
"""
tab1_readmore = tk.Button(tab1_text_box, text='Read more',cursor='hand2',font=('calibri', '12', 'bold'),\
relief=FLAT, state=tk.ACTIVE, width = 15, command=intro_tab_functions.readMore)
tab1_readmore.place(relwidth=0.25, relheight=0.13, relx=0.27, rely=0.054)
"""
tab2_text_box = tk.Frame(intro_tab_canvas, height=230, width=400)
tab2_text_box.grid(row=0, column=1, pady=(30,30), padx=(85,40))
tab2_text_box.config(bd=0, bg='#E5f9ff')
tab2_title = tk.Text(tab2_text_box, height=1, width=12)
tab2_title.grid(in_=tab2_text_box, row=0, column = 0, pady=(15,5), padx=(10,10))
tab2_title.insert(INSERT, "Dose response")
tab2_title.config(state=DISABLED, bd=0, bg = '#E5f9ff', fg='#130e07', font=('calibri', '25', 'bold'))
tab2_text_box.grid_columnconfigure(0, weight=1)
tab2_text_box.grid_rowconfigure(0, weight=1)
tab2_text = tk.Text(tab2_text_box, height=4, width=43)
tab2_text.grid(in_=tab2_text_box, row=1, column=0, sticky=N+S+W+E, pady=(0,0), padx=(20,20))
tab2_text.insert(INSERT,"Make a calibration curve and read the dose response \nfunction. For every new batch of GafChromic film\
\nthere is a need to update the dose response. All three \nchannels (RGB) are read and calculated.")
tab2_text.config(state=DISABLED, bd=0, bg='#E5f9ff', fg='#130E07', font=('calibri', '13'))
tab2_text_box.grid_columnconfigure(1, weight=1)
tab2_text_box.grid_rowconfigure(1, weight=1)
tab2_readmore_text = tk.Text(tab2_text_box, height=1, width=1)
tab2_readmore_text.grid(row=1, column=0, sticky = N+S+W+E, pady=(65,0), padx = (300,0))
tab2_readmore_text.insert(INSERT,"Read more...")
tab2_readmore_text.config(state=DISABLED, bd=0, bg='#E5f9ff', fg='#130E07', font=('calibri', '12', 'bold'))
tab2_text_box.grid_columnconfigure(2, weight=1)
tab2_text_box.grid_rowconfigure(2, weight=1)
tab2_box_figure = Image.open("kalibrering_ikon.PNG")
tab2_figure = ImageTk.PhotoImage(tab2_box_figure)
tab2_figure_label = Label(tab2_text_box, image=tab2_figure)
tab2_figure_label.image = tab2_figure
tab2_figure_label.grid(row=3, sticky=N+S+W+E, pady=(0,10))
tab2_figure_label.config(bg='#E5f9ff')
tab2_text_box.grid_columnconfigure(3, weight=1)
tab2_text_box.grid_rowconfigure(3, weight=1)
tab3_text_box = tk.Frame(intro_tab_canvas, height=230, width=400)
tab3_text_box.grid(row=1, column=0, pady=(0,30), padx=(95,0))
tab3_text_box.config(bd=0, bg='#E5f9ff')
tab3_title = tk.Text(tab3_text_box, height=1, width=8)
tab3_title.grid(in_=tab3_text_box, row=0, column = 0, pady=(15,5), padx=(10,10))
tab3_title.insert(INSERT, "Map dose")
tab3_title.config(state=DISABLED, bd=0, bg = '#E5f9ff', fg='#130e07', font=('calibri', '25', 'bold'))
tab3_text_box.grid_columnconfigure(0, weight=1)
tab3_text_box.grid_rowconfigure(0, weight=1)
tab3_text = tk.Text(tab3_text_box, height=4, width=43)
tab3_text.grid(in_=tab3_text_box, row=1, column=0, sticky=N+S+W+E, pady=(0,0), padx=(20,20))
tab3_text.insert(INSERT,"Compare dose distribution in your treatment plan \nwith the measures distribution by the Gafchromic \nfilm.\
Using the gamma evaluation index a map of \npass/fail and variations is visualised.")
tab3_text.config(state=DISABLED, bd=0, bg='#E5f9ff', fg='#130E07', font=('calibri', '13'))
tab3_text_box.grid_columnconfigure(1, weight=1)
tab3_text_box.grid_rowconfigure(1, weight=1)
tab3_readmore_text = tk.Text(tab3_text_box, height=1, width=1)
tab3_readmore_text.grid(row=1, column=0, sticky = N+S+W+E, pady=(65,0), padx = (285,0))
tab3_readmore_text.insert(INSERT,"Read more...")
tab3_readmore_text.config(state=DISABLED, bd=0, bg='#E5f9ff', fg='#130E07', font=('calibri', '12', 'bold'))
tab3_text_box.grid_columnconfigure(2, weight=1)
tab3_text_box.grid_rowconfigure(2, weight=1)
tab3_box_figure = Image.open("gammaTest_ikon.PNG")
tab3_figure = ImageTk.PhotoImage(tab3_box_figure)
tab3_figure_label = Label(tab3_text_box, image=tab3_figure)
tab3_figure_label.image = tab3_figure
tab3_figure_label.grid(row=3, sticky=N+S+W+E, pady=(0,10))
tab3_figure_label.config(bg='#E5f9ff')
tab3_text_box.grid_columnconfigure(3, weight=1)
tab3_text_box.grid_rowconfigure(3, weight=1)
tab4_text_box = tk.Frame(intro_tab_canvas, height=230, width=400)
tab4_text_box.grid(row=1, column=1, pady=(0,30), padx=(85,40))
tab4_text_box.config(bd=0, bg='#E5f9ff')
tab4_title = tk.Text(tab4_text_box, height=1, width=7)
tab4_title.grid(in_=tab4_text_box, row=0, column = 0, pady=(15,5), padx=(10,10))
tab4_title.insert(INSERT, "Profiles")
tab4_title.config(state=DISABLED, bd=0, bg = '#E5f9ff', fg='#130e07', font=('calibri', '25', 'bold'))
tab4_text_box.grid_columnconfigure(0,weight=1)
tab4_text_box.grid_rowconfigure(0, weight=1)
tab4_text = tk.Text(tab4_text_box, height=4, width=43)
tab4_text.grid(in_=tab4_text_box, row=1, column=0, sticky=N+S+W+E, pady=(0,0), padx=(20,20))
tab4_text.insert(INSERT,"Investigate the profiles measured using GafChromic \nfilm and compare with the profiles in your treatment \nplan.\
Using gamma evaluation an acceptance tube \ncan be places over the profile.")
tab4_text.config(state=DISABLED, bd=0, bg='#E5f9ff', fg='#130E07', font=('calibri', '13'))
tab4_text_box.grid_columnconfigure(1, weight=1)
tab4_text_box.grid_rowconfigure(1, weight=1)
tab4_readmore_text = tk.Text(tab4_text_box, height=1, width=1)
tab4_readmore_text.grid(row=1, column=0, sticky = N+S+W+E, pady=(65,0), padx = (235,0))
tab4_readmore_text.insert(INSERT,"Read more...")
tab4_readmore_text.config(state=DISABLED, bd=0, bg='#E5f9ff', fg='#130E07', font=('calibri', '12', 'bold'))
tab4_text_box.grid_columnconfigure(2, weight=1)
tab4_text_box.grid_rowconfigure(2, weight=1)
tab4_box_figure = Image.open("profil_ikon.PNG")
tab4_figure = ImageTk.PhotoImage(tab4_box_figure)
tab4_figure_label = Label(tab4_text_box, image=tab4_figure)
tab4_figure_label.image = tab4_figure
tab4_figure_label.grid(row=3, sticky=N+S+W+E, pady=(0,10))
tab4_figure_label.config(bg='#E5f9ff')
tab4_text_box.grid_columnconfigure(3, weight=1)
tab4_text_box.grid_rowconfigure(3, weight=1)
#intro_tab_canvas.configure(scrollregion = intro_tab_canvas.bbox("all"))
intro_tab_canvas.grid(row=0, column=0, sticky=N+S+W)#pack(side=LEFT, fill=BOTH)
#Globals.intro_tab.grid_columnconfigure(1, weight=2)
#Globals.intro_tab.grid_rowconfigure(1, weight=2)
#scrollbar.config(command=intro_tab_canvas.yview)
##################################### TAB 1 - CoMet ############################################
Globals.tab1_canvas.config(bg='#ffffff', bd = 0, relief=FLAT, highlightthickness=0)
CoMet_explained = tk.Text(Globals.tab1_canvas, height=4)#, width=200)
CoMet_explained.insert(INSERT, \
"A linear accelerator is a tool used to generate ionizing radiation, which can be used in radiotherapy\n\
treatment. Using a modulator, electron gun and RF power source electrons are released and \n\
accelerated through a waveguide. The modulator provide high voltage pulses to the RF pulse which\n\
leads to a propagating electromagnetic field inside the waveguide.")
CoMet_explained.grid(row=0, column = 0, columnspan=4, sticky=N+S+E+W, padx=(20,20), pady=(10,20))
Globals.tab1_canvas.grid_columnconfigure(0, weight=0)
Globals.tab1_canvas.grid_rowconfigure(0, weight=0)
CoMet_explained.config(state=DISABLED, bg='#ffffff', font=('calibri', '13'), relief=FLAT)
Globals.CoMet_border_1_label = Label(Globals.tab1_canvas, image = CoMet_border_dark,width=50)
Globals.CoMet_border_1_label.image=CoMet_border_dark
Globals.CoMet_border_1_label.grid(row=1, column=0, columnspan=3, sticky = W+E, padx = (0, 50), pady=(10,15))
Globals.tab1_canvas.grid_columnconfigure(1, weight=0)
Globals.tab1_canvas.grid_rowconfigure(1, weight=0)
Globals.CoMet_border_1_label.config(bg='#ffffff', borderwidth=0)
CoMet_upload_button_frame = tk.Frame(Globals.tab1_canvas)
CoMet_upload_button_frame.grid(row=1, column = 2, padx = (60, 0), pady=(10,15))
Globals.tab1_canvas.grid_columnconfigure(2, weight=0)
Globals.tab1_canvas.grid_rowconfigure(2, weight=0)
CoMet_upload_button_frame.config(bg = '#ffffff')
CoMet_upload_button = tk.Button(CoMet_upload_button_frame, text='Browse', image = upload_button_image ,cursor='hand2',font=('calibri', '14'),\
relief=FLAT, state=ACTIVE, command=CoMet_functions.UploadAction)
CoMet_upload_button.grid(row=0,column=5,rowspan=5, sticky=N+S)#pack(expand=True, fill=BOTH)
CoMet_upload_button.config(bg='#ffffff', activebackground='#ffffff', activeforeground='#ffffff', highlightthickness=0)
CoMet_upload_button.image = upload_button_image
Globals.CoMet_uploaded_file_text = tk.Text(Globals.CoMet_border_1_label, height=1, width=31)
Globals.CoMet_uploaded_file_text.grid(row=0, column=0, columnspan=3, sticky=E+W, pady=(20,20), padx=(80,0))
Globals.CoMet_uploaded_file_text.insert(INSERT, "Upload the image you want to correct")
Globals.CoMet_uploaded_file_text.config(state=DISABLED, bd=0, font=('calibri', '12'), fg='gray', bg='#ffffff')
Globals.CoMet_border_2_label = Label(Globals.tab1_canvas, image = CoMet_border_dark)
Globals.CoMet_border_2_label.image=CoMet_border_dark
Globals.CoMet_border_2_label.grid(row=2, column=0, columnspan=3, sticky=N+S+E+W, padx = (0, 50), pady=(10,15))
Globals.tab1_canvas.grid_columnconfigure(3, weight=0)
Globals.tab1_canvas.grid_rowconfigure(3, weight=0)
Globals.CoMet_border_2_label.config(bg='#ffffff', borderwidth=0)
CoMet_folder_button_frame = tk.Frame(Globals.tab1_canvas)
CoMet_folder_button_frame.grid(row=2, column = 2, padx = (60, 0), pady=(10,15))
Globals.tab1_canvas.grid_columnconfigure(4, weight=0)
Globals.tab1_canvas.grid_rowconfigure(4, weight=0)
CoMet_folder_button_frame.config(bg = '#ffffff')
CoMet_folder_button = tk.Button(CoMet_folder_button_frame, text='Browse', image = select_folder_image ,cursor='hand2',font=('calibri', '14'),\
relief=FLAT, state=ACTIVE, command=CoMet_functions.setCoMet_export_folder)
CoMet_folder_button.grid(row=0,column=5,rowspan=5, sticky=N+S)#pack(expand=True, fill=BOTH)
CoMet_folder_button.config(bg='#ffffff', activebackground='#ffffff', activeforeground='#ffffff', highlightthickness=0)
CoMet_folder_button.image=select_folder_image
CoMet_save_to_folder = tk.Text(Globals.CoMet_border_2_label, height=1, width=32)
CoMet_save_to_folder.grid(row=0, column=0, columnspan=3, sticky=E+W, pady=(20,20), padx=(80,0))
CoMet_save_to_folder.insert(INSERT,"Folder to save the corrected image:")
CoMet_save_to_folder.config(state=DISABLED, bd=0, font=('calibri', '12'), fg='gray', bg='#ffffff')
## Function to test the filename the user chooses for the corrected image
def testFilename():
Globals.CoMet_corrected_image_filename.set(CoMet_save_filename.get("1.0",'end-1c'))
if(Globals.CoMet_corrected_image_filename.get() == " " or Globals.CoMet_corrected_image_filename.get() == "Filename"):
Globals.CoMet_corrected_image_filename.set("Error!")
elif(len(Globals.CoMet_corrected_image_filename.get()) >21):
messagebox.showerror("Error", "The filename must be under 20 characters")
Globals.CoMet_corrected_image_filename.set("Error!")
elif(re.match("^[A-Za-z0-9_]*$", (Globals.CoMet_corrected_image_filename.get()).lstrip())==None):
messagebox.showerror("Error","Filename can only contain letters and/or numbers")
Globals.CoMet_corrected_image_filename.set("Error!")
else:
CoMet_save_button_1.config(state=DISABLED)
CoMet_save_filename.config(state=DISABLED)
Globals.CoMet_progressbar_counter += 1
Globals.CoMet_progressbar["value"] = Globals.CoMet_progressbar_counter*25
Globals.CoMet_border_3_label = Label(Globals.tab1_canvas, image = CoMet_border_light)
Globals.CoMet_border_3_label.image=CoMet_border_light
Globals.CoMet_border_3_label.grid(row=3, column=0, columnspan=3, sticky=N+S+E+W, padx = (0, 50), pady=(10,0))
Globals.tab1_canvas.grid_columnconfigure(5, weight=0)
Globals.tab1_canvas.grid_rowconfigure(5, weight=0)
Globals.CoMet_border_3_label.config(bg='#ffffff', borderwidth=0)
CoMet_save_button_frame_1 = tk.Frame(Globals.tab1_canvas)
CoMet_save_button_frame_1.grid(row=3, column = 2, padx = (60, 0), pady=(10,0))
Globals.tab1_canvas.grid_columnconfigure(6, weight=0)
Globals.tab1_canvas.grid_rowconfigure(6, weight=0)
CoMet_save_button_frame_1.config(bg = '#ffffff')
CoMet_save_button_1 = tk.Button(CoMet_save_button_frame_1, text='Save', image = CoMet_save_button ,cursor='hand2',font=('calibri', '14'),\
relief=FLAT, state=ACTIVE, command=testFilename)
CoMet_save_button_1.grid(row=0,column=5,rowspan=5, sticky=N+S)#pack(expand=True, fill=BOTH)
CoMet_save_button_1.config(bg='#ffffff', activebackground='#ffffff', activeforeground='#ffffff', highlightthickness=0)
CoMet_save_button_1.image = CoMet_save_button
CoMet_save_filename = tk.Text(Globals.CoMet_border_3_label, height=1, width=30)
CoMet_save_filename.grid(row=0, column=0, columnspan=3, sticky=E+W, pady=(20,20), padx=(80,0))
CoMet_save_filename.insert(END,"Filename (will be saved as *.dcm)")
CoMet_save_filename.config(state=NORMAL, bd=0, font=('calibri', '12'), fg='gray', bg='#ffffff')
def writeFilename(event):
current = CoMet_save_filename.get("1.0", tk.END)
if(current == "Filename (will be saved as *.dcm)\n"):
CoMet_save_filename.delete("1.0", tk.END)
else:
CoMet_save_filename.insert("1.0", "Filename (will be saved as *.dcm)")
CoMet_save_filename.bind("<FocusIn>", writeFilename)
CoMet_save_filename.bind("<FocusOut>", writeFilename)
#Functioin to validate the patient name written in by the user
def testName():
Globals.CoMet_patientName.set(CoMet_save_patientName.get("1.0",'end-1c'))
if(Globals.CoMet_patientName.get() == " " or Globals.CoMet_patientName.get() == "Patient name"):
Globals.CoMet_patientName.set("Error!")
elif(len(Globals.CoMet_patientName.get()) >31):
messagebox.showerror("Error", "The Name must be under 30 characters")
Globals.CoMet_patientName.set("Error!")
elif(re.match("^[A-Za-z0-9_]*$", (Globals.CoMet_patientName.get()).lstrip())==None):
messagebox.showerror("Error","Name can only contain letters (not æ,ø,å) and no spaces")
Globals.CoMet_patientName.set("Error!")
else:
CoMet_save_button_2.config(state=DISABLED)
CoMet_save_patientName.config(state=DISABLED)
Globals.CoMet_border_4_label = Label(Globals.tab1_canvas, image = CoMet_border_light)
Globals.CoMet_border_4_label.image=CoMet_border_light
Globals.CoMet_border_4_label.grid(row=4, column=0, columnspan=3, sticky=E+W, padx = (0, 50), pady=(0,0))
Globals.tab1_canvas.grid_columnconfigure(7, weight=0)
Globals.tab1_canvas.grid_rowconfigure(7, weight=0)
Globals.CoMet_border_4_label.config(bg='#ffffff', borderwidth=0)
CoMet_save_button_frame_2 = tk.Frame(Globals.tab1_canvas)
CoMet_save_button_frame_2.grid(row=4, column = 2, padx = (60, 0), pady=(0,0))
Globals.tab1_canvas.grid_columnconfigure(8, weight=0)
Globals.tab1_canvas.grid_rowconfigure(8, weight=0)
CoMet_save_button_frame_2.config(bg = '#ffffff')
CoMet_save_button_2 = tk.Button(CoMet_save_button_frame_2, text='Save', image = CoMet_save_button ,cursor='hand2',font=('calibri', '14'),\
relief=FLAT, state=ACTIVE, command=testName)
CoMet_save_button_2.grid(row=0,column=5,rowspan=5, sticky=N+S)#pack(expand=True, fill=BOTH)
CoMet_save_button_2.config(bg='#ffffff', activebackground='#ffffff', activeforeground='#ffffff', highlightthickness=0)
CoMet_save_button_2.image = CoMet_save_button
CoMet_save_patientName = tk.Text(Globals.CoMet_border_4_label, height=1, width=30)
CoMet_save_patientName.grid(row=0, column=0, columnspan=3, sticky=E+W, pady=(20,20), padx=(80,0))
CoMet_save_patientName.insert(END,"Patient name")
CoMet_save_patientName.config(state=NORMAL, bd=0, font=('calibri', '12'), fg='gray', bg='#ffffff')
def writePname(event):
current = CoMet_save_patientName.get("1.0", tk.END)
if(current == "Patient name\n"):
CoMet_save_patientName.delete("1.0", tk.END)
else:
CoMet_save_patientName.insert("1.0", "Patient name")
CoMet_save_patientName.bind("<FocusIn>", writePname)
CoMet_save_patientName.bind("<FocusOut>", writePname)
CoMet_correct_button_frame = tk.Frame(Globals.tab1_canvas)
CoMet_correct_button_frame.grid(row=4, column = 4,rowspan=2, padx = (0, 0), pady=(0,0))
Globals.tab1_canvas.grid_columnconfigure(9, weight=0)
Globals.tab1_canvas.grid_rowconfigure(9, weight=0)
CoMet_correct_button_frame.config(bg = '#ffffff')
CoMet_correct_button = tk.Button(CoMet_correct_button_frame, text='Save', image = CoMet_correct_button_image ,cursor='hand2',font=('calibri', '14'),\
relief=FLAT, state=ACTIVE, command=CoMet_functions.Correct)
CoMet_correct_button.grid(row=0,column=5,rowspan=5, sticky=N+S)#pack(expand=True, fill=BOTH)
CoMet_correct_button.config(bg='#ffffff', activebackground='#ffffff', activeforeground='#ffffff', highlightthickness=0)
CoMet_correct_button.image = CoMet_correct_button_image
Globals.CoMet_print_corrected_image = tk.Canvas(Globals.tab1_canvas , width=240, height=290)
Globals.CoMet_print_corrected_image.grid(row=0, column=4, rowspan=3, sticky=N+W+S+E, pady=(20,0))
Globals.CoMet_print_corrected_image.config(bg='#ffffff', bd = 0, relief=FLAT)
Globals.tab1_canvas.grid_columnconfigure(11,weight=0)
Globals.tab1_canvas.grid_rowconfigure(11, weight=0)
Globals.tab1_canvas.grid(row=0,column=5,rowspan=5, sticky=N+S)#pack(expand=True, fill=BOTH)
##### teste scrollbar ##########
def onFrameConfigure(canvas):
'''Reset the scroll region to encompass the inner frame'''
canvas.configure(scrollregion=canvas.bbox("all"))
vsb = tk.Scrollbar(Globals.form, orient="vertical", command=Globals.tab1_canvas.yview)
Globals.tab1_canvas.configure(yscrollcommand=vsb.set)
vsb.grid(row=0,column=5,rowspan=5, sticky=N+S)#pack(side="right", fill="y")
Globals.tab1_canvas.grid(row=0,column=5,rowspan=5, sticky=N+S) #(side="left", fill="both", expand=True)
Globals.tab1_canvas.create_window((4,4), window=Globals.form, anchor="nw")
Globals.form.bind("<Configure>", lambda event, canvas=Globals.tab1_canvas: onFrameConfigure(Globals.tab1_canvas))
#populate(Globals.form)
##################################### TAB 2 - Dose response ############################################
#img_file_name="default.png"
#path_img=db_config.photo_directory + img_file_name
## Text and buttons for the user to choose DPI
#choose_doseResponse_dpi = tk.Text(Globals.tab2, height=1, width=1)
#choose_doseResponse_dpi.place(relwidth=0.35, relheight=0.5, relx=0.07, rely=0.61)
#choose_doseResponse_dpi.insert(tk.CURRENT,"Dots per inch (dpi) used during scanning: ")
#choose_doseResponse_dpi.config(state=DISABLED, bd=0, font=('calibri', '15'))
#Radiobutton(Globals.tab2, text='72 dpi',cursor='hand2',font=('calibri', '14'), \
# variable=Globals.doseResponse_dpi, value=72, command=CoMet_functions.nothingButton).place(relwidth=0.075, relheight=0.05, relx=0.13, rely=0.66)
#Radiobutton(Globals.tab2, text='127 dpi',cursor='hand2',font=('calibri', '14'), \
# variable=Globals.doseResponse_dpi, value=127, command=CoMet_functions.nothingButton).place(relwidth=0.077, relheight=0.05, relx= 0.23, rely=0.66)
#openImageTabOne=Image.open(path_img)
#imgTabOne=ImageTk.PhotoImage(openImageTabOne)
#imgLabelTabOne=tk.Label(tab2,image=imgTabOne)
why_dose_response_text = tk.Text(Globals.tab2, height=1, width=1)
why_dose_response_text.place(relwidt=0.48, relheight=0.4, relx=0.004, rely=0.005)
why_dose_response_text.insert(INSERT,\
"To be able to perform an accurate dose caluclations using GafChromic film EBT3 \n\
it is necessary to create a dose-respons curve for each batch of film, in addition\n\
to a calibration scan before/along every use. The respons of GafChromic film \n\
EBT3 is modelled using a rational function, X(D,n) = a + b/(D-c), as this has \n\
proven to fit well with the film behavior. In the model X(D,n) is the scanner \n\
respons in color channel n and a, b and c are constants. Because of the nature \n\
of asymptotic fitting functions a good fit will be achieved by using doses in \n\
geomteric progression, D, nD, nnD, etc.. Also, to avoid scanner uncertainties\n\
each dose should be scannet three times and uploaded here where an average will be used." )
why_dose_response_text.config(state=DISABLED, bd=0, font=('calibri', '12'))
how_dose_response_text = tk.Text(Globals.tab2, height=1, width=1)
how_dose_response_text.place(relwidt=0.48, relheight=0.4, relx=0.51, rely=0.005)
how_dose_response_text.insert(INSERT,\
"Irradiate film piece of size (Bestemt med maske?) with known doses. Place one and one\n\
film piece in the center of the scanner and perfom three scans per dose. " )
how_dose_response_text.config(state=DISABLED, bd=0, font=('calibri', '12'))
upload_button1 = tk.Button(Globals.dose_response_scroll_window_1, text='Upload file', cursor='hand2', font=('calibri', '12'), highlightthickness=7, \
overrelief=GROOVE, state=ACTIVE, width=12, command=Dose_response_functions.create_window)
upload_button1.place(relwidth=0.5, relheight=0.1, relx=0.3, rely=0.03)
red = tk.Text(Globals.dose_response_scroll_window_1, height=1, width=1)
red.place(relwidth=0.1, relheight=0.08, relx=0.3, rely=0.2)
red.insert(INSERT, "Red")
red.config(state=DISABLED, bd=0, font=('calibri', '12'))
green = tk.Text(Globals.dose_response_scroll_window_1, height=1, width=1)
green.place(relwidth=0.1, relheight=0.08, relx=0.5, rely=0.2)
green.insert(INSERT, "Green")
green.config(state=DISABLED, bd=0, font=('calibri', '12'))
blue = tk.Text(Globals.dose_response_scroll_window_1, height=1, width=1)
blue.place(relwidth=0.1, relheight=0.08, relx=0.75, rely=0.2)
blue.insert(INSERT, "Blue")
blue.config(state=DISABLED, bd=0, font=('calibri', '12'))
dose_title = tk.Text(Globals.dose_response_scroll_window_1, height=1, width=1)
dose_title.place(relheight=0.08, relwidth=0.15, relx= 0.05, rely=0.2)
dose_title.insert(INSERT, "Dose (cGy)")
dose_title.config(state=DISABLED, bd=0, font=('calibri', '12'))
check1 = Checkbutton(Globals.dose_response_scroll_window_1, variable=Globals.dose_response_var1, command=Dose_response_functions.plot_dose_response)
check1.place(relx=0.4, rely=0.19)
check2 = Checkbutton(Globals.dose_response_scroll_window_1, variable=Globals.dose_response_var2, command=Dose_response_functions.plot_dose_response)
check2.place(relx=0.6, rely=0.19)
check3 = Checkbutton(Globals.dose_response_scroll_window_1, variable=Globals.dose_response_var3, command=Dose_response_functions.plot_dose_response)
check3.place(relx=0.85, rely=0.19)
Globals.dose_response_save_calibration_button = tk.Button(Globals.tab2, text='Save calibration', cursor='hand2', font=('calibri', '12'), highlightthickness=7, \
overrelief=GROOVE, state=DISABLED, width=12, command=Dose_response_functions.saveCalibration)
Globals.dose_response_save_calibration_button.place(relwidth=0.1, relheight=0.05, relx=0.4, rely=0.4)
##################################### TAB 3 - Map dose ############################################
#path = os.path.dirname(sys.argv[0])
#path= "upload.png"
#upload_button_image = ImageTk.PhotoImage(file=path)
upload_film_data = tk.Button(Globals.tab3, text='Upload',image=upload_button_image, cursor='hand2', font=('calibri', '12'), \
relief=FLAT, state=ACTIVE, width=12, command=lambda: Map_Dose.UploadAction("FILM"))
upload_film_data.place(relwidth=0.17, relheight=0.11, relx=0.3, rely=0.03)
upload_film_data.image = upload_button_image
##################################### TAB 4 - Profiles ###########################################
##################################### End statements ############################################
#Globals.tab_parent.place(relwidth=1, relheight=0.9, relx=0, rely=0.15)
Globals.form.mainloop() |
<reponame>yasirkose/Hand-Gesture
import PyQt5,sys
from PyQt5 import QtGui,QtWidgets,uic,QtCore,Qt
from PyQt5.QtWidgets import *
import cv2
import time
import numpy as np
import HandTrackingModule as htm
import math
from ctypes import cast, POINTER
from comtypes import CLSCTX_ALL
from pycaw.pycaw import AudioUtilities, IAudioEndpointVolume
from pyautogui import screenshot
import screen_brightness_control as sbc
from PyQt5.QtWidgets import QFileDialog
import os
class anaSayfa(QMainWindow):
def __init__(self):
super(anaSayfa, self).__init__()
uic.loadUi('form.ui', self)
self.buttonBaslat.clicked.connect(self.programBaslat)
self.show()
def programBaslat(self):
self.dosyaPath = None
self.sayac = 0
self.setVisible(False)
durum=0
cap = cv2.VideoCapture(0)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
detector = htm.handDetector(detectionCon=0.7)
while True:
success, img = cap.read()
img = detector.findHands(img)
lmList = detector.findPosition(img, draw=False)
if durum == 0:
cv2.putText(img, "<NAME>", (width - 180, height - 20), cv2.FONT_HERSHEY_COMPLEX,
0.6, (0, 0, 255), 3)
elif durum == 1:
cv2.putText(img, "<NAME>", (width - 150, height - 20), cv2.FONT_HERSHEY_COMPLEX,
0.6, (3, 252, 23), 3)
if len(lmList) != 0:
if durum == 0:
if ((lmList[8][1] + 20 < lmList[6][1]) and (lmList[12][1] + 20 < lmList[10][1]) and (
lmList[16][1] + 20 < lmList[14][1]) and (lmList[20][1] + 20 < lmList[18][1]) and (
lmList[4][2] < lmList[2][2]) and (lmList[8][2] < lmList[12][2]) and (
lmList[3][2] < lmList[7][2]) and (lmList[1][1] < lmList[5][1]) and (
lmList[3][2] < lmList[5][2])) or (
(lmList[8][1] > lmList[6][1] + 20) and (lmList[12][1] > lmList[10][1] + 20) and (
lmList[16][1] > lmList[14][1] + 20) and (lmList[20][1] > lmList[18][1] + 20) and (
lmList[4][2] < lmList[2][2]) and (lmList[8][2] < lmList[12][2]) and (
lmList[3][2] < lmList[7][2]) and (lmList[1][1] > lmList[5][1]) and (
lmList[3][2] < lmList[5][2])):
durum = 1
elif durum == 1:
if ((lmList[8][1] + 20 < lmList[6][1]) and (lmList[12][1] + 20 < lmList[10][1]) and (
lmList[16][1] + 20 < lmList[14][1]) and (lmList[20][1] + 20 < lmList[18][1]) and (
lmList[4][2] > lmList[2][2]) and (lmList[8][2] > lmList[12][2]) and (
lmList[3][2] > lmList[7][2]) and (lmList[1][1] < lmList[5][1]) and (
lmList[3][2] > lmList[5][2])) or (
(lmList[8][1] > lmList[6][1] + 20) and (lmList[12][1] > lmList[10][1] + 20) and (
lmList[16][1] > lmList[14][1] + 20) and (lmList[20][1] > lmList[18][1] + 20) and (
lmList[4][2] > lmList[2][2]) and (lmList[8][2] > lmList[12][2]) and (
lmList[3][2] > lmList[7][2]) and (lmList[1][1] > lmList[5][1]) and (
lmList[3][2] > lmList[5][2])):
durum = 0
elif ((lmList[8][2] < lmList[7][2]) and (lmList[12][2] > lmList[9][2]) and (
lmList[16][2] > lmList[13][2]) and (lmList[20][2] > lmList[17][2]) and (
lmList[0][2] > lmList[17][2]) and (
((lmList[4][1] < lmList[20][1]) and (lmList[4][1] > lmList[8][1])) or (
(lmList[4][1] > lmList[20][1]) and (lmList[4][1] < lmList[8][1]))) and (
lmList[4][2] > lmList[8][2]) and (lmList[4][2] < lmList[1][2])):
cv2.putText(img, "Bir", (20, 30), cv2.FONT_HERSHEY_COMPLEX,
0.6, (255, 0, 0), 3)
self.parlaklikArttir()
elif ((lmList[8][2] < lmList[7][2]) and (lmList[12][2] < lmList[9][2]) and (
lmList[16][2] > lmList[13][2]) and (lmList[20][2] > lmList[17][2]) and (
lmList[0][2] > lmList[17][2]) and (
((lmList[4][1] < lmList[20][1]) and (lmList[4][1] > lmList[8][1])) or (
(lmList[4][1] > lmList[20][1]) and (lmList[4][1] < lmList[8][1]))) and (
lmList[4][2] > lmList[12][2]) and (lmList[4][2] < lmList[1][2])):
cv2.putText(img, "Iki", (20, 30), cv2.FONT_HERSHEY_COMPLEX,
0.6, (255, 0, 0), 3)
self.parlaklikAzalt()
elif ((lmList[8][2] < lmList[7][2]) and (lmList[12][2] < lmList[9][2]) and (
lmList[16][2] < lmList[13][2]) and (lmList[20][2] > lmList[17][2]) and (
lmList[0][2] > lmList[17][2]) and (
((lmList[4][1] < lmList[20][1]) and (lmList[4][1] > lmList[8][1])) or (
(lmList[4][1] > lmList[20][1]) and (lmList[4][1] < lmList[8][1]))) and (
lmList[4][2] > lmList[16][2]) and (lmList[4][2] < lmList[1][2])):
cv2.putText(img, "Uc", (20, 30), cv2.FONT_HERSHEY_COMPLEX,
0.6, (255, 0, 0), 3)
self.sesSeviyesiSifirla()
elif ((lmList[8][2] < lmList[7][2]) and (lmList[12][2] < lmList[9][2]) and (
lmList[16][2] < lmList[13][2]) and (lmList[20][2] < lmList[17][2]) and (
lmList[0][2] > lmList[17][2]) and (
((lmList[4][1] < lmList[20][1]) and (lmList[4][1] > lmList[8][1])) or (
(lmList[4][1] > lmList[20][1]) and (lmList[4][1] < lmList[8][1]))) and (
lmList[4][2] > lmList[16][2]) and (lmList[4][2] < lmList[1][2])):
cv2.putText(img, "Dort", (20, 30), cv2.FONT_HERSHEY_COMPLEX,
0.6, (255, 0, 0), 3)
self.sesSeviyesiMaxla()
elif ((lmList[8][2] < lmList[5][2]) and (lmList[12][2] < lmList[9][2]) and (
lmList[16][2] < lmList[13][2]) and (lmList[20][2] < lmList[17][2]) and (
lmList[4][1] < lmList[8][1]) and (lmList[8][1] < lmList[12][1]) and (
lmList[12][1] < lmList[16][1]) and (lmList[16][1] < lmList[20][1])) or (
(lmList[8][2] < lmList[5][2]) and (lmList[12][2] < lmList[9][2]) and (
lmList[16][2] < lmList[13][2]) and (lmList[20][2] < lmList[17][2]) and (
lmList[4][1] > lmList[8][1]) and (lmList[8][1] > lmList[12][1]) and (
lmList[12][1] > lmList[16][1]) and (lmList[16][1] > lmList[20][1])):
cv2.putText(img, "Bes", (20, 30), cv2.FONT_HERSHEY_COMPLEX,
0.6, (255, 0, 0), 3)
self.ekranResmiAl()
cv2.imshow("Ekran", img)
k = cv2.waitKey(5) & 0xFF
if k == 27:
cv2.destroyAllWindows()
quit()
break
def ekranResmiAl(self):
ekranResmi=screenshot()
#dosyaPath =Null
if self.dosyaPath==None:
self.dosyaPath = QFileDialog.getExistingDirectory(caption='Choose Directory', directory=os.getcwd())
if self.dosyaPath != "":
path = self.dosyaPath + "/elHareketiyleAlinanSS"+str(self.sayac)+".png"
self.sayac=self.sayac+1
ekranResmi.save(path)
else:
path = self.dosyaPath + "/elHareketiyleAlinanSS" + str(self.sayac) + ".png"
self.sayac = self.sayac + 1
ekranResmi.save(path)
def parlaklikArttir(self):
sbc.set_brightness(100)
def parlaklikAzalt(self):
sbc.set_brightness(0)
def sesSeviyesiSifirla(self):
devices = AudioUtilities.GetSpeakers()
interface = devices.Activate(
IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
volume = cast(interface, POINTER(IAudioEndpointVolume))
volRange = volume.GetVolumeRange()
minVol = volRange[0]
volume.SetMasterVolumeLevel(minVol, None)
def sesSeviyesiMaxla(self):
devices = AudioUtilities.GetSpeakers()
interface = devices.Activate(
IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
volume = cast(interface, POINTER(IAudioEndpointVolume))
volRange = volume.GetVolumeRange()
maxVol = volRange[1]
volume.SetMasterVolumeLevel(maxVol, None)
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
window = anaSayfa()
app.exec_() |
# Copyright 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import retrying
from os_vif import objects as obj_vif
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
from kuryr_kubernetes import clients
from kuryr_kubernetes.cni.binding import base as b_base
from kuryr_kubernetes.cni.plugins import base as base_cni
from kuryr_kubernetes.cni import utils
from kuryr_kubernetes import constants as k_const
from kuryr_kubernetes import exceptions
from kuryr_kubernetes import utils as k_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
RETRY_DELAY = 1000 # 1 second in milliseconds
# TODO(dulek, gryf): Another corner case is (and was) when pod is deleted
# before it's corresponding CRD was created and populated by vifs by
# controller or even noticed by any watcher. Kubelet will try to delete such
# vif, but we will have no data about it. This is currently worked around by
# returning successfully in case of timing out in delete. To solve this
# properly we need to watch for pod deletes as well, or perhaps create
# finalizer for the pod as soon, as we know, that kuryrport CRD will be
# created.
class K8sCNIRegistryPlugin(base_cni.CNIPlugin):
def __init__(self, registry, healthy):
self.healthy = healthy
self.registry = registry
self.k8s = clients.get_kubernetes_client()
def _get_obj_name(self, params):
return "%(namespace)s/%(name)s" % {
'namespace': params.args.K8S_POD_NAMESPACE,
'name': params.args.K8S_POD_NAME}
def add(self, params):
kp_name = self._get_obj_name(params)
timeout = CONF.cni_daemon.vif_annotation_timeout
# Try to confirm if CRD in the registry is not stale cache. If it is,
# remove it.
with lockutils.lock(kp_name, external=True):
if kp_name in self.registry:
cached_kp = self.registry[kp_name]['kp']
try:
kp = self.k8s.get(k_utils.get_res_link(cached_kp))
except Exception:
LOG.exception('Error when getting KuryrPort %s', kp_name)
raise exceptions.ResourceNotReady(kp_name)
if kp['metadata']['uid'] != cached_kp['metadata']['uid']:
LOG.warning('Stale KuryrPort %s detected in cache. (API '
'uid=%s, cached uid=%s). Removing it from '
'cache.', kp_name, kp['metadata']['uid'],
cached_kp['metadata']['uid'])
del self.registry[kp_name]
vifs = self._do_work(params, b_base.connect, timeout)
# NOTE(dulek): Saving containerid to be able to distinguish old DEL
# requests that we should ignore. We need a lock to
# prevent race conditions and replace whole object in the
# dict for multiprocessing.Manager to notice that.
with lockutils.lock(kp_name, external=True):
d = self.registry[kp_name]
d['containerid'] = params.CNI_CONTAINERID
self.registry[kp_name] = d
LOG.debug('Saved containerid = %s for CRD %s',
params.CNI_CONTAINERID, kp_name)
# Wait for timeout sec, 1 sec between tries, retry when even one
# vif is not active.
@retrying.retry(stop_max_delay=timeout * 1000, wait_fixed=RETRY_DELAY,
retry_on_result=utils.any_vif_inactive)
def wait_for_active(kp_name):
return self.registry[kp_name]['vifs']
data = {'metadata': {'name': params.args.K8S_POD_NAME,
'namespace': params.args.K8S_POD_NAMESPACE}}
pod = k_utils.get_referenced_object(data, 'Pod')
try:
self.k8s.add_event(pod, 'CNIWaitingForVIFs',
f'Waiting for Neutron ports of {kp_name} to '
f'become ACTIVE after binding.',
component='kuryr-daemon')
vifs = wait_for_active(kp_name)
except retrying.RetryError:
self.k8s.add_event(pod, 'CNITimedOutWaitingForVIFs',
f'Timed out waiting for Neutron ports of '
f'{kp_name} to become ACTIVE after binding.',
'Warning', 'kuryr-daemon')
raise exceptions.CNINeutronPortActivationTimeout(
kp_name, self.registry[kp_name]['vifs'])
return vifs[k_const.DEFAULT_IFNAME]
def delete(self, params):
kp_name = self._get_obj_name(params)
try:
reg_ci = self.registry[kp_name]['containerid']
LOG.debug('Read containerid = %s for KuryrPort %s', reg_ci,
kp_name)
if reg_ci and reg_ci != params.CNI_CONTAINERID:
# NOTE(dulek): This is a DEL request for some older (probably
# failed) ADD call. We should ignore it or we'll
# unplug a running pod.
LOG.warning('Received DEL request for unknown ADD call for '
'Kuryrport %s (CNI_CONTAINERID=%s). Ignoring.',
kp_name, params.CNI_CONTAINERID)
return
except KeyError:
pass
# Passing arbitrary 5 seconds as timeout, as it does not make any sense
# to wait on CNI DEL. If kuryrport got deleted from API - VIF info is
# gone. If kuryrport got the vif info removed - it is now gone too.
# The number's not 0, because we need to anticipate for restarts and
# delay before registry is populated by watcher.
try:
self._do_work(params, b_base.disconnect, 5)
except exceptions.CNIKuryrPortTimeout:
# So the VIF info seems to be lost at this point, we don't even
# know what binding driver was used to plug it. Let's at least
# try to remove the interface we created from the netns to prevent
# possible VLAN ID conflicts.
b_base.cleanup(params.CNI_IFNAME, params.CNI_NETNS)
raise
# NOTE(ndesh): We need to lock here to avoid race condition
# with the deletion code in the watcher to ensure that
# we delete the registry entry exactly once
try:
with lockutils.lock(kp_name, external=True):
if self.registry[kp_name]['del_received']:
del self.registry[kp_name]
else:
kp_dict = self.registry[kp_name]
kp_dict['vif_unplugged'] = True
self.registry[kp_name] = kp_dict
except KeyError:
# This means the kuryrport was removed before vif was unplugged.
# This shouldn't happen, but we can't do anything about it now
LOG.debug('KuryrPort %s not found registry while handling DEL '
'request. Ignoring.', kp_name)
pass
def report_drivers_health(self, driver_healthy):
if not driver_healthy:
with self.healthy.get_lock():
LOG.debug("Reporting CNI driver not healthy.")
self.healthy.value = driver_healthy
def _do_work(self, params, fn, timeout):
kp_name = self._get_obj_name(params)
# In case of KeyError retry for `timeout` s, wait 1 s between tries.
@retrying.retry(stop_max_delay=timeout * 1000, wait_fixed=RETRY_DELAY,
retry_on_exception=lambda e: isinstance(e, KeyError))
def find():
return self.registry[kp_name]
try:
d = find()
kp = d['kp']
vifs = d['vifs']
except KeyError:
data = {'metadata': {'name': params.args.K8S_POD_NAME,
'namespace': params.args.K8S_POD_NAMESPACE}}
pod = k_utils.get_referenced_object(data, 'Pod')
self.k8s.add_event(pod, 'CNITimeoutKuryrPortRegistry',
f'Timed out waiting for Neutron ports to be '
f'created for {kp_name}. Check '
f'kuryr-controller logs.', 'Warning',
'kuryr-daemon')
raise exceptions.CNIKuryrPortTimeout(kp_name)
for ifname, vif in vifs.items():
is_default_gateway = (ifname == k_const.DEFAULT_IFNAME)
if is_default_gateway:
# NOTE(ygupta): if this is the default interface, we should
# use the ifname supplied in the CNI ADD request
ifname = params.CNI_IFNAME
fn(vif, self._get_inst(kp), ifname, params.CNI_NETNS,
report_health=self.report_drivers_health,
is_default_gateway=is_default_gateway,
container_id=params.CNI_CONTAINERID)
return vifs
def _get_inst(self, kp):
return (obj_vif.instance_info
.InstanceInfo(uuid=kp['spec']['podUid'],
name=kp['metadata']['name']))
|
from typer.testing import CliRunner
from manifestoo.commands.check_dev_status import (
CORE_DEV_STATUS,
check_dev_status_command,
)
from manifestoo.main import app
from manifestoo.odoo_series import OdooSeries
from .common import mock_addons_selection, mock_addons_set, populate_addons_dir
def test_missing_dev_status():
addons_set = mock_addons_set(
{
"a": {},
}
)
addons_selection = mock_addons_selection("a")
errors = check_dev_status_command(
addons_selection,
addons_set,
default_dev_status=None,
transitive=False,
odoo_series=OdooSeries.v12_0,
)
assert errors == ["a has missing development_status"]
def test_missing_dev_status_depends():
addons_set = mock_addons_set(
{
"a": {"depends": ["b"], "development_status": "Stable"},
"b": {},
}
)
addons_selection = mock_addons_selection("a")
errors = check_dev_status_command(
addons_selection,
addons_set,
default_dev_status=None,
transitive=False,
odoo_series=OdooSeries.v14_0,
)
assert errors == ["b has missing development_status"]
def test_invalid_dev_status():
addons_set = mock_addons_set(
{
"a": {"development_status": "bad"},
"b": {"development_status": CORE_DEV_STATUS},
}
)
addons_selection = mock_addons_selection("a,b")
errors = check_dev_status_command(
addons_selection,
addons_set,
default_dev_status=None,
transitive=False,
odoo_series=OdooSeries.v13_0,
)
assert sorted(errors) == [
"a has invalid development_status 'bad'",
"b has invalid development_status 'core'",
]
def test_invalid_dev_status_depends():
addons_set = mock_addons_set(
{
"a": {"depends": ["b"], "development_status": "Stable"},
"b": {"development_status": "bad"},
}
)
addons_selection = mock_addons_selection("a")
errors = check_dev_status_command(
addons_selection,
addons_set,
default_dev_status=None,
transitive=False,
odoo_series=OdooSeries.v13_0,
)
assert errors == ["b has invalid development_status 'bad'"]
def test_missing_selection():
addons_set = mock_addons_set({})
addons_selection = mock_addons_selection("a")
errors = check_dev_status_command(
addons_selection,
addons_set,
default_dev_status="Beta",
transitive=False,
odoo_series=OdooSeries.v13_0,
)
assert errors == ["a not found"]
def test_missing_depend():
addons_set = mock_addons_set(
{
"a": {"depends": ["b"]},
}
)
addons_selection = mock_addons_selection("a")
errors = check_dev_status_command(
addons_selection,
addons_set,
default_dev_status="Beta",
transitive=False,
odoo_series=OdooSeries.v13_0,
)
assert errors == ["b not found"]
def test_ok():
addons_set = mock_addons_set(
{
"a": {"depends": ["b"]},
"b": {},
}
)
addons_selection = mock_addons_selection("a")
errors = check_dev_status_command(
addons_selection,
addons_set,
default_dev_status="Beta",
transitive=False,
odoo_series=OdooSeries.v13_0,
)
assert errors == []
def test_basic():
addons_set = mock_addons_set(
{
"a": {"depends": ["b"]},
"b": {"development_status": "Alpha"},
}
)
addons_selection = mock_addons_selection("a")
errors = check_dev_status_command(
addons_selection,
addons_set,
default_dev_status="Beta",
transitive=False,
odoo_series=OdooSeries.v13_0,
)
assert errors == ["a (Beta) depends on b (Alpha)"]
def test_double():
addons_set = mock_addons_set(
{
"a": {"depends": ["b"], "development_status": "Stable"},
"b": {"depends": ["c"], "development_status": "Beta"},
"c": {"depends": [], "development_status": "Alpha"},
}
)
addons_selection = mock_addons_selection("a,b")
errors = check_dev_status_command(
sorted(addons_selection),
addons_set,
default_dev_status="Beta",
transitive=True,
odoo_series=OdooSeries.v13_0,
)
assert sorted(errors) == [
"a (Stable) depends on b (Beta)",
"b (Beta) depends on c (Alpha)",
]
def test_transitive():
addons_set = mock_addons_set(
{
"a": {"depends": ["b"]},
"b": {"depends": ["c"]},
"c": {"development_status": "Alpha"},
}
)
addons_selection = mock_addons_selection("a")
errors = check_dev_status_command(
addons_selection,
addons_set,
default_dev_status="Beta",
transitive=False,
odoo_series=OdooSeries.v13_0,
)
assert errors == []
errors = check_dev_status_command(
addons_selection,
addons_set,
default_dev_status="Beta",
transitive=True,
odoo_series=OdooSeries.v13_0,
)
assert errors == ["b (Beta) depends on c (Alpha)"]
def test_core_addon():
addons_set = mock_addons_set(
{
"a": {"depends": ["base"], "development_status": "Stable"},
"base": {},
}
)
addons_selection = mock_addons_selection("a")
errors = check_dev_status_command(
addons_selection,
addons_set,
default_dev_status="Beta",
transitive=True,
odoo_series=OdooSeries.v13_0,
)
assert errors == []
def test_integration(tmp_path):
addons = {
"a": {"depends": ["b"], "development_status": "Beta"},
"b": {},
}
populate_addons_dir(tmp_path, addons)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
app,
[
f"--addons-path={tmp_path}",
"--select-include=a",
"--odoo-series=13.0",
"check-dev-status",
"--default-dev-status=Alpha",
],
catch_exceptions=False,
)
assert result.exit_code == 1
assert result.stdout == "a (Beta) depends on b (Alpha)\n"
|
<gh_stars>0
# -*- coding=utf8 -*-
"""
V1.
一、查询功能:当期开出的号,可以计算(显示)出遗漏的总和,篮球的遗漏不计算在内。
如本期开:1 2 3 4 5 6 遗漏和为3+1+0+4+16+6=30
二、选号功能:两个条件(1.遗漏和,2.号段内出的个数)
将号码分为三段 1-11;12-22;23-33(此号段先暂定,最好以后可修改,根据实际情况划分三个或四个号段)
选号程序设计为:2+3+1;3+2+1;2+2+2,(这个先定这个三个模式,后期最好可以随时调整)
2+3+1即号段1为2个数,号段2为3个数,号段3为1个数
举例:本期买遗漏和30,号段1内出6个数,一种可能就是1 2 3 4 5 6+X
V2
条件1: 选号区间设定:1-3-2;2-2-2(如果此设置不是很复杂,可以加上1-2-3和2-3-1)
条件2:遗漏和值区间
条件3:与上期开奖号最大重复数(0、1、2)
条件4:允许出现连号的号码0/1组,1组即2个号,3连号以上排除。
条件5:奇偶比设定(2:4,3:3,4:2)
"""
import json
import os
import random
import sys
from itertools import combinations
import collector
import tkinter as tk
from tkinter import ttk
from tkinter import *
class LotteryQurey(object):
"""docstring for LotteryQurey"""
DB_FILE = os.path.join(os.path.dirname(os.environ["APPDATA"]), 'lottery.json')
DEFAULT_CODE = [num for num in range(1, 34)]
def __init__(self):
self._data = {}
self._selected_codes = []
self.__load()
def parse_raw_lottery(self, raw_data):
data = []
for e in raw_data:
item = {}
item['date'] = e['date'][0:10]
item['redcode'] = [int(num) for num in e['red'].strip().split(',')]
item['bluecode'] = int(e['blue'])
item['code'] = item['redcode'] + [item['bluecode']]
data.append(item)
return data
def refresh(self):
try:
start_date = None
if len(self._data) > 0:
start_date = self.edition[0]
raw_data = collector.get_all_lottery_codes_by_date(start_date, None)
if len(raw_data) > 0:
data = self.parse_raw_lottery(raw_data)
self.__parse_data(data)
self.__save()
except Exception as e:
print('ERROR: cannot get the new info from internet.[{err}]'.format(err=e))
def __get_data_163(self):
try:
html = urllib2.urlopen(self.QUERY_URL_163).read().decode('utf-8')
p = re.compile('data-period=\"(.*)\" data-award=\"(.*)\"')
codes = p.findall(html)
# print codes
data = []
for code in codes:
# print code
expect = code[0]
red_code = code[1].split(':')[0]
blue_code = code[1].split(':')[1]
item = {'expect': expect, 'redcode': red_code, 'bluecode': blue_code}
data.append(item)
return data
except Exception as e:
print ('ERROR: cannot get the data from 163.com. {}'.format(e))
def save_selected_codes(self, seleced_codes):
try:
save_data = {'last_selected': seleced_codes}
self._selected_codes = list(seleced_codes)
with open(self.DB_SELECTED_FILE, 'w') as fd:
json.dump(save_data, fd)
except Exception as e:
print('ERROR: Cannot save seleted codes! {}'.format(e))
def __parse_data(self, data):
lotteries = {}
for item in data:
lottery = {}
lottery['date'] = item['date']
lottery['code'] = item['code']
lottery['redcode'] = item['redcode']
lottery['bluecode'] = item['bluecode']
lottery['lostcode'] = list(set(self.DEFAULT_CODE) - set(lottery['redcode']))
lotteries[lottery['date']] = lottery
self._data.update(lotteries)
def __load(self):
try:
if os.path.exists(self.DB_FILE):
with open(self.DB_FILE, 'r') as fd:
self._data = json.load(fd)
self.refresh()
except Exception as e:
try:
self.refresh()
except Exception as e:
raise e
def __save(self):
try:
if os.path.exists(self.DB_FILE):
back_file = '{file}.bk'.format(file=self.DB_FILE)
if os.path.exists(back_file):
os.remove(back_file)
os.rename(self.DB_FILE, back_file)
with open(self.DB_FILE, 'w') as fd:
json.dump(self._data, fd, indent=4, separators=(',', ': '))
except Exception as e:
raise e
@property
def data(self):
return self._data
@property
def edition(self):
tmp = [elem for elem in self._data]
tmp.sort(reverse=True)
return tmp
@property
def allcode(self):
return self.DEFAULT_CODE
@property
def selectedcodes(self):
return self._selected_codes
class CombinationsTool(object):
@classmethod
def get(cls, select_mode, open_code, all_lost_code, min_lost, max_lost, repeat_cnt, repeat_team_max_cnt, odd_cnt,
min_opensum, max_opensum, contains_codes):
"""
:param select_mode:
:param open_code:
:param all_lost_code:
:param min_lost:
:param max_lost:
:param repeat_cnt:
:param repeat_team_max_cnt:
:param odd_cnt:
:param min_opensum:
:param max_opensum:
:param contains_codes:
:return:
"""
tmp = select_mode.split('-')
low_code_cnt, mid_code_cnt, high_code_cnt = int(tmp[0]), int(tmp[1]), int(tmp[2])
low_code = [num[0] for num in all_lost_code if num[1] < max_lost and num[0] in range(1, 12)]
mid_code = [num[0] for num in all_lost_code if num[1] < max_lost and num[0] in range(12, 23)]
high_code = [num[0] for num in all_lost_code if num[1] < max_lost and num[0] in range(23, 34)]
low_code_ret = cls.__calculate(low_code, low_code_cnt)
mid_code_ret = cls.__calculate(mid_code, mid_code_cnt)
high_code_ret = cls.__calculate(high_code, high_code_cnt)
results = []
for high in high_code_ret:
for low in low_code_ret:
for mid in mid_code_ret:
tmp_high = list(high)
tmp_low = list(low)
tmp_mid = list(mid)
calculate_arr = []
calculate_arr.extend(tmp_low)
calculate_arr.extend(tmp_mid)
calculate_arr.extend(tmp_high)
lost_sum = sum([all_lost_code[elem - 1][1] for elem in calculate_arr])
calculate_arr.sort()
if len(contains_codes) > 0:
if len(set(contains_codes) & set(calculate_arr)) != len(contains_codes):
continue
is_continue = False
repeat_team_cnt = 0
for index in range(5):
serial_arr = list(range(calculate_arr[index], calculate_arr[index] + 3))
repeat_count = len(set(serial_arr) & set(calculate_arr))
if repeat_count > 2:
# ignore more than 3 chains number
is_continue = True
break
elif repeat_count > 1:
serial_arr = list(range(calculate_arr[index], calculate_arr[index] + 2))
repeat_count = len(set(serial_arr) & set(calculate_arr))
if repeat_count == 2:
repeat_team_cnt += 1
if is_continue is True or repeat_team_cnt != repeat_team_max_cnt:
continue
if odd_cnt > -1:
odd_numbers = [num for num in calculate_arr if num % 2 != 0]
if len(odd_numbers) != odd_cnt:
continue
if max_opensum != 0 and (sum(calculate_arr) < min_opensum or sum(calculate_arr) > max_opensum):
continue
if min_lost <= lost_sum <= max_lost and \
len(set(calculate_arr) & set(open_code)) <= repeat_cnt:
results.append([calculate_arr, lost_sum])
return results
@classmethod
def __calculate(cls, arr, cnt):
return list(combinations(arr, cnt))
class App(object):
DB_SELECTED_CONF_FILE = os.path.join(os.path.dirname(__file__), 'select_conf.json')
@classmethod
def main(cls):
root = tk.Tk()
root.grid_columnconfigure(0, weight=1)
style = ttk.Style()
available_themes = style.theme_names()
random_theme = random.choice(available_themes)
style.theme_use(random_theme)
root.title('遗漏选号谦哥版')
w = 1200
h = 780
root.geometry("%dx%d" % (w, h))
app = cls(root)
root.protocol("WM_DELETE_WINDOW", app.__on_closing_window)
root.mainloop()
def __init__(self, root):
self.root = root
self._lottery = LotteryQurey()
self.__initialize_components()
self.__load_select_conf()
def __initialize_components(self):
try:
self.__initialize_left()
self.__initialize_right()
self.__initialize_right_bottom()
self.__load_local_data()
self.__refresh_view()
except Exception as e:
raise e
def __initialize_left(self):
self.frm_left = tk.LabelFrame(self.root)
self.frm_left.pack(side=LEFT, fill=Y, padx=4)
self.cbb_lottery_edition = StringVar()
self.cbb_edition = ttk.Combobox(self.frm_left, textvariable=self.cbb_lottery_edition)
self.cbb_edition['value'] = self._lottery.edition
self.cbb_edition['state'] = 'readonly'
self.cbb_edition.bind('<<ComboboxSelected>>', self.__edition_change)
# self.cbb_edition.grid(row=0, column=0, padx=8, sticky=W)
self.cbb_edition.pack(fill=X, pady=8)
self.cbb_edition.current(0)
self.btn_refresh_data = tk.Button(self.frm_left, text='手 动 刷 新', font='16', bg='PaleGreen',
command=self.__refresh_raw_data)
self.btn_refresh_data.pack(fill=X, pady=8)
self.lb_select_mode = tk.Label(self.frm_left, anchor=W, text='区间比(可输入 低区-中区-高区)')
self.lb_select_mode.pack(fill=X)
self.cbb_select_mode_var = StringVar()
self.cbb_select_mode = ttk.Combobox(self.frm_left, textvariable=self.cbb_select_mode_var)
self.cbb_select_mode['value'] = ['2-3-1', '3-2-1', '2-2-2', '1-2-3', '1-3-2', '3-1-2']
self.cbb_select_mode.pack(fill=X, pady=8)
self.cbb_select_mode.current(0)
self.lb_select_mode = tk.Label(self.frm_left, anchor=W, text='奇偶比(可输入 奇数-偶数)')
self.lb_select_mode.pack(fill=X)
self.cbb_select_odd_var = StringVar()
self.cbb_select_odd = ttk.Combobox(self.frm_left, textvariable=self.cbb_select_odd_var)
self.cbb_select_odd['value'] = ['2-4', '4-2', '1-5', '5-1', '3-3']
self.cbb_select_odd.pack(fill=X, pady=8)
self.cbb_select_odd.current(0)
self.ckb_is_allow_odd_var = BooleanVar()
self.ckb_is_allow_odd = tk.Checkbutton(
self.frm_left,
text='是否允许奇偶比',
font='12',
anchor=W,
variable=self.ckb_is_allow_odd_var,
onvalue=True,
offvalue=False)
self.ckb_is_allow_odd.pack(fill=X, pady=8)
self.lb_opencode = tk.Label(self.frm_left, text='遗漏区间(25~35)')
self.lb_opencode.pack()
self.frm_left_lost_range = tk.LabelFrame(self.frm_left)
self.frm_left_lost_range.pack()
self.tf_range_min_var = StringVar()
self.tf_range_min = tk.Entry(self.frm_left_lost_range, justify=CENTER, textvariable=self.tf_range_min_var)
self.tf_range_min.pack(side=LEFT)
self.lb_opencode = tk.Label(self.frm_left_lost_range, text='~~~')
self.lb_opencode.pack(side=LEFT)
self.tf_range_max_var = StringVar()
self.tf_range_max = tk.Entry(self.frm_left_lost_range, justify=CENTER, textvariable=self.tf_range_max_var)
self.tf_range_max.pack(side=LEFT)
self.tf_range_min_var.set('25')
self.tf_range_max_var.set('35')
self.lb_opencode_sum = tk.Label(self.frm_left, text='开奖和值区间(0~0表示不启用))')
self.lb_opencode_sum.pack()
self.frm_left_opensum_range = tk.LabelFrame(self.frm_left)
self.frm_left_opensum_range.pack()
self.tf_range_sum_min_var = IntVar()
self.tf_range_sum_min = tk.Entry(self.frm_left_opensum_range, justify=CENTER,
textvariable=self.tf_range_sum_min_var)
self.tf_range_sum_min.pack(side=LEFT)
self.lb_opencode_sum = tk.Label(self.frm_left_opensum_range, text='~~~')
self.lb_opencode_sum.pack(side=LEFT)
self.tf_range_sum_max_var = IntVar()
self.tf_range_sum_max = tk.Entry(self.frm_left_opensum_range, justify=CENTER,
textvariable=self.tf_range_sum_max_var)
self.tf_range_sum_max.pack(side=LEFT)
self.tf_range_sum_min_var.set('0')
self.tf_range_sum_max_var.set('0')
self.lb_repeat_cnt = tk.Label(self.frm_left, text='和本期开奖号码最大重复数:')
self.lb_repeat_cnt.pack()
self.tf_repeat_cnt_var = StringVar()
self.tf_repeat_cnt = tk.Entry(self.frm_left, justify=CENTER, textvariable=self.tf_repeat_cnt_var)
self.tf_repeat_cnt.pack(fill=X)
self.tf_repeat_cnt_var.set('2')
self.lb_contains_codes = tk.Label(self.frm_left, text='必含号码(多个用空格区分 1 2 3)')
self.lb_contains_codes.pack()
self.tf_contains_codes_var = StringVar()
self.tf_contains_codes = tk.Entry(self.frm_left, justify=CENTER, textvariable=self.tf_contains_codes_var)
self.tf_contains_codes.pack(fill=X)
self.lb_opencode = tk.Label(self.frm_left, text='允许连号组数(3个以上连号排除)')
self.lb_opencode.pack()
self.frm_left_lost_repeat = tk.LabelFrame(self.frm_left)
self.frm_left_lost_repeat.pack(fill=X, padx=8)
self.rb_lost_repeat_cnt = IntVar()
self.rb_lost_repeat_0 = tk.Radiobutton(self.frm_left_lost_repeat, text="0组", variable=self.rb_lost_repeat_cnt,
value=0)
self.rb_lost_repeat_1 = tk.Radiobutton(self.frm_left_lost_repeat, text="1组", variable=self.rb_lost_repeat_cnt,
value=1)
self.rb_lost_repeat_2 = tk.Radiobutton(self.frm_left_lost_repeat, text="2组", variable=self.rb_lost_repeat_cnt,
value=2)
self.rb_lost_repeat_3 = tk.Radiobutton(self.frm_left_lost_repeat, text="3组", variable=self.rb_lost_repeat_cnt,
value=3)
self.rb_lost_repeat_0.pack(side=LEFT, fill=X, expand=True)
self.rb_lost_repeat_1.pack(side=LEFT, fill=X, expand=True)
self.rb_lost_repeat_2.pack(side=LEFT, fill=X, expand=True)
self.rb_lost_repeat_3.pack(side=LEFT, fill=X, expand=True)
self.btn_select_code = tk.Button(self.frm_left, text='清空已选记录', font='16', fg='DarkOrange', bg='RoyalBlue',
command=self.__clear_select_code)
self.btn_select_code.pack(fill=X, pady=8)
self.btn_select_code = tk.Button(self.frm_left, text='开 始 选 号', font='16', bg='Orange',
command=self.__select_code)
self.btn_select_code.pack(fill=X, pady=8)
self.btn_save_select_code = tk.Button(self.frm_left, text='保 存 结 果', font='16', bg='FireBrick',
command=self.__save_select_code)
self.btn_save_select_code.pack(fill=X, pady=8)
def __initialize_right(self):
self.frm_right = tk.LabelFrame(self.root)
self.frm_right.pack(fill=BOTH, expand=True)
self.lb_opencode = tk.Label(self.frm_right, text='开奖号码:')
self.lb_opencode.pack()
self.tf_opencode_var = StringVar()
self.tf_oepncode = tk.Entry(self.frm_right, justify=CENTER, font='Arial 16', fg='OrangeRed2', bg='ghost white',
textvariable=self.tf_opencode_var)
self.tf_oepncode['state'] = 'readonly'
self.tf_oepncode.pack(fill=X)
self.lb_curr_all_lostcode = tk.Label(self.frm_right, text='本期全号遗漏:')
self.lb_curr_all_lostcode.pack()
self.tf_curr_all_lostcode_var = StringVar()
self.tf_curr_all_lostcode = tk.Entry(self.frm_right, justify=CENTER, font='Arial 10', fg='HotPink',
bg='ghost white', textvariable=self.tf_curr_all_lostcode_var)
self.tf_curr_all_lostcode['state'] = 'readonly'
self.tf_curr_all_lostcode.pack(fill=X)
self.lb_lostcode_var = StringVar()
self.lb_lostcode = tk.Label(self.frm_right, textvariable=self.lb_lostcode_var)
self.lb_lostcode.pack(fill=X)
self.tf_lostcode_var = StringVar()
self.tf_lostcode = tk.Entry(self.frm_right, justify=CENTER, font='Arial 16', fg='SteelBlue', bg='ghost white',
textvariable=self.tf_lostcode_var)
self.tf_lostcode['state'] = 'readonly'
self.tf_lostcode.pack(fill=X)
self.lb_selectcode_var = StringVar()
self.lb_selectcode = tk.Label(self.frm_right, anchor=W, textvariable=self.lb_selectcode_var)
self.lb_selectcode.pack(fill=X)
self.lb_selectcode_var.set('选号结果: ')
def __initialize_right_bottom(self):
self.frm_right_bottom = tk.LabelFrame(self.frm_right)
self.frm_right_bottom.pack(fill=BOTH, expand=True)
self.list_selectcode = tk.Listbox(self.frm_right_bottom, font='Times 16', fg='forest green',
selectmode=EXTENDED)
self.sb_selectcode = tk.Scrollbar(self.frm_right_bottom, orient=VERTICAL)
self.list_selectcode.config(yscrollcommand=self.sb_selectcode.set)
self.sb_selectcode.config(command=self.list_selectcode.yview)
self.list_selectcode.pack(side=LEFT, fill=BOTH, expand=True)
self.sb_selectcode.pack(side=LEFT, fill=Y)
self.list_selectcode.bind('<Control-a>', self.__select_all_electcodes)
self.list_selectcode.bind('<Double-Button-1>', self.__doubleclick_on_selectcods_list)
self.list_selectcode.bind('<space>', self.__doubleclick_on_selectcods_list)
self.list_selectcode_out = tk.Listbox(self.frm_right_bottom, font='Arial 14', fg='goldenrod1',
bg='white smoke', selectmode=EXTENDED)
self.sb_selectcode_out = tk.Scrollbar(self.frm_right_bottom, orient=VERTICAL)
self.list_selectcode_out.config(yscrollcommand=self.sb_selectcode_out.set)
self.sb_selectcode_out.config(command=self.list_selectcode_out.yview)
self.list_selectcode_out.pack(side=LEFT, fill=BOTH, expand=True)
self.sb_selectcode_out.pack(side=LEFT, fill=Y)
self.list_selectcode_out.bind('<Control-a>', self.__select_all_electcodes_out)
self.list_selectcode_out.bind('<Double-Button-1>', self.__doubleclick_on_selectcodes_out_list)
self.list_selectcode_out.bind('<space>', self.__doubleclick_on_selectcodes_out_list)
self.list_selectcode_out.bind('<BackSpace>', self.__delete_on_selectcodes_out_list)
self.list_selectcode_out.bind('<Control-v>', self.__paste_selectedcodes_out)
self.list_selectcode_out.bind('<Control-d>', self.__delete_bingo_selected_codes)
def __edition_change(self, event):
self.__refresh_view()
def __get_current_lottery(self):
edition = self.cbb_edition.get()
lottery = self._lottery.data[edition]
return lottery
def __refresh_raw_data(self):
self._lottery.refresh()
def __load_local_data(self):
for item in self._lottery.selectedcodes:
self.list_selectcode_out.insert(END, item)
def __refresh_view(self):
lostcode = self.__calculate_lost_current()
lost_code_str = ' '.join(['{num}({cnt})'.format(num=elem[0], cnt=elem[1]) for elem in lostcode])
lost_code_sum_str = '合={sum}({lostsum})'.format(sum=sum([elem[0] for elem in lostcode]),
lostsum=sum([elem[1] for elem in lostcode]))
self.tf_opencode_var.set(','.join([lost_code_str, lost_code_sum_str]))
# show lottery
self.__refresh_bingo_selectedcodes_tips()
self.__refresh_selection_results_tips()
def __refresh_bingo_selectedcodes_tips(self):
lvl_3_cnt = 0
lvl_4_cnt = 0
lvl_5_cnt = 0
lvl_6_cnt = 0
cur_lottery_codes = self.__get_current_opencode()
index = 0
for selected_code_str in self._lottery.selectedcodes:
selected_code = [int(num) for num in (selected_code_str.split())]
if len(set(cur_lottery_codes) & set(selected_code)) == 6:
lvl_6_cnt += 1
self.list_selectcode_out.itemconfig(index, {'bg': 'FireBrick', "fg": 'Black'})
elif len(set(cur_lottery_codes) & set(selected_code)) == 5:
lvl_5_cnt += 1
self.list_selectcode_out.itemconfig(index, {'bg': 'DarkGoldenrod', "fg": 'Black'})
elif len(set(cur_lottery_codes) & set(selected_code)) == 4:
lvl_4_cnt += 1
self.list_selectcode_out.itemconfig(index, {'bg': 'SteelBlue', "fg": 'Black'})
elif len(set(cur_lottery_codes) & set(selected_code)) == 3:
lvl_3_cnt += 1
self.list_selectcode_out.itemconfig(index, {'bg': 'SeaGreen', "fg": 'Black'})
else:
self.list_selectcode_out.itemconfig(index, {'bg': 'white smoke', "fg": 'goldenrod1'})
index += 1
self.lb_lostcode_var.set('上期中奖结果(共 {cnt} 注)'.format(cnt=sum([lvl_3_cnt, lvl_4_cnt, lvl_5_cnt, lvl_6_cnt])))
show_str = '中6球 [{cnt_6}] 注<红色>, 中5球 [{cnt_5}] 注<黄色>, 中4球 [{cnt_4}] 注<蓝色>, 中3球 [{cnt_3}] 注<绿色>'.format(
cnt_6=lvl_6_cnt,
cnt_5=lvl_5_cnt,
cnt_4=lvl_4_cnt,
cnt_3=lvl_3_cnt)
self.tf_lostcode_var.set(show_str)
lostcode = self.__calculate_lost_all()
lost_code_str = ' '.join(['{num}({cnt})'.format(num=elem[0], cnt=elem[1]) for elem in lostcode])
lost_code_sum_str = '合={sum}'.format(sum=sum([elem[1] for elem in lostcode]))
self.tf_curr_all_lostcode_var.set(','.join([lost_code_str, lost_code_sum_str]))
def __get_last_20_repeat_selected_lottery(self):
bingo_codes_indexs = []
editions = self._lottery.edition[:20]
lotteries = [self._lottery.data[item]['redcode'] for item in editions]
index = 0
for selected_code_str in self._lottery.selectedcodes:
selected_code = [int(num) for num in (selected_code_str.split())]
if any(len(set(selected_code) & set(lottery)) > 2 for lottery in lotteries):
bingo_codes_indexs.append(index)
index += 1
return bingo_codes_indexs
def __refresh_selection_results_tips(self):
cnt = self.list_selectcode.size()
selected_cnt = self.list_selectcode_out.size()
self.lb_selectcode_var.set(
'选号结果: 共 {count} 注, 已选取 {selectedcnt} 注.(双击选中或选中+空格,可以(反)选取号码), 退格键(BackSpace)可删除已选, Ctrl+d删除与近20期重复3个号码以上'.format(
count=cnt, selectedcnt=selected_cnt))
def __calculate_lost_current(self):
return self.__calculate_lost_code(self.__get_current_opencode(), is_cur_lost=True)
def __get_current_opencode(self):
cur_edition = self.cbb_edition.get()
lottery = self._lottery.data[cur_edition]
return lottery['redcode']
def __calculate_lost_all(self):
return self.__calculate_lost_code(self._lottery.allcode)
def __calculate_lost_code(self, lost_code, spec_editon=None, is_cur_lost=False):
if spec_editon is None:
cur_edition = self.cbb_edition.get()
else:
cur_edition = spec_editon
all_edtion = self._lottery.edition
lost_lottery_code = [[num, 0, False] for num in lost_code]
is_find = False
for item in all_edtion:
if cur_edition == item:
is_find = True
if is_cur_lost:
continue
if is_find is True:
for lostcode in lost_lottery_code:
if lostcode[2] is True:
continue
if lostcode[0] in self._lottery.data[item]['redcode']:
lostcode[2] = True
else:
lostcode[1] += 1
else:
continue
can_break = True
for lostcode in lost_lottery_code:
if lostcode[2] is False:
can_break = False
break
if can_break is True:
break
return lost_lottery_code
@staticmethod
def __codes_to_lottery(codes):
return ' '.join(['{0:0>2}'.format(elem) for elem in codes])
def __select_code(self):
self.list_selectcode.delete(0, self.list_selectcode.size() - 1)
lost_select_code = CombinationsTool.get(
self.cbb_select_mode_var.get(),
self.__get_current_opencode(),
self.__calculate_lost_all(),
int(self.tf_range_min_var.get()),
int(self.tf_range_max_var.get()),
int(self.tf_repeat_cnt_var.get()),
self.rb_lost_repeat_cnt.get(),
int(self.cbb_select_odd_var.get().split('-')[0]) if self.ckb_is_allow_odd_var.get() else -1,
self.tf_range_sum_min_var.get(),
self.tf_range_sum_max_var.get(),
[int(num) for num in
self.tf_contains_codes_var.get().strip().split(' ')] if self.tf_contains_codes_var.get().strip() else [])
for code in lost_select_code:
self.list_selectcode.insert(END, self.__codes_to_lottery(code[0]))
self.__refresh_selection_results_tips()
def __save_select_code(self):
selected_out_codes = self.list_selectcode_out.get(0, END)
self._lottery.save_selected_codes(selected_out_codes)
self.__refresh_bingo_selectedcodes_tips()
self.__save_select_conf()
def __clear_select_code(self):
self.list_selectcode_out.delete(0, END)
self.__save_select_code()
def __select_all_electcodes(self, event):
self.list_selectcode.select_set(0, END)
def __select_all_electcodes_out(self, event):
self.list_selectcode_out.select_set(0, END)
def __paste_selectedcodes_out(self, event):
selected_codes = self.root.clipboard_get()
lotteries = selected_codes.split('\n')
try:
for lottery in lotteries:
lottery = lottery.strip()
if lottery == '':
continue
self.list_selectcode_out.insert(END, self.__codes_to_lottery([int(elem) for elem in lottery.split()]))
self.__save_select_code()
except Exception as e:
print('ERROR DATA: {data}'.format(data=selected_codes))
print('ERROR: {err}'.format(err=e))
tkmsgbox.showerror('不符合粘贴格式', '示例:\n{err}'.format(err='01 02 03 04 05 06\n01 02 03 04 04 06'))
def __doubleclick_on_selectcods_list(self, event):
indexs = list(self.list_selectcode.curselection())
if len(indexs) > 0:
selected_items = [self.list_selectcode.get(index) for index in indexs]
for item in selected_items:
self.list_selectcode_out.insert(END, item)
if len(indexs) == self.list_selectcode.size():
self.list_selectcode.delete(0, END)
else:
self.__delete_selected_codes(self.list_selectcode, indexs)
self.__refresh_selection_results_tips()
def __doubleclick_on_selectcodes_out_list(self, event):
indexs = list(self.list_selectcode_out.curselection())
if len(indexs) > 0:
selected_items = [self.list_selectcode_out.get(index) for index in indexs]
for item in selected_items:
self.list_selectcode.insert(END, item)
if len(indexs) == self.list_selectcode_out.size():
self.list_selectcode_out.delete(0, END)
else:
self.__delete_selected_codes(self.list_selectcode_out, indexs)
self.__refresh_selection_results_tips()
def __delete_on_selectcodes_out_list(self, event):
indexs = list(self.list_selectcode_out.curselection())
self.__delete_selected_codes(self.list_selectcode_out, indexs)
self.__refresh_selection_results_tips()
def __delete_bingo_selected_codes(self, event):
indexs = self.__get_last_20_repeat_selected_lottery()
self.__delete_selected_codes(self.list_selectcode_out, indexs)
self.__save_select_code()
self.__refresh_selection_results_tips()
def __delete_selected_codes(self, listbox, indexs):
if not indexs:
return
elif len(indexs) == 1:
listbox.delete(indexs[0])
elif len(indexs) == listbox.size():
listbox.delete(0, END)
elif len(indexs) == len(range(indexs[0], indexs[-1] + 1)):
listbox.delete(indexs[0], indexs[-1])
else:
all_codes = listbox.get(0, END)
new_codes = [all_codes[index] for index in range(listbox.size()) if index not in indexs]
listbox.delete(0, END)
for codes in new_codes:
listbox.insert(END, codes)
def __has_unsaved_selected_codes(self):
if self.list_selectcode_out.size() > 0:
saved_selected_codes = set(self._lottery.selectedcodes)
unsaved_selected_codes = set(self.list_selectcode_out.get(0, END))
if len(saved_selected_codes | unsaved_selected_codes) != len(saved_selected_codes):
return True
return False
def __on_closing_window(self):
if self.__has_unsaved_selected_codes() > 0:
if tkmsgbox.askokcancel('退出', "有未保存的选注结果, 确认要退出?"):
self.root.destroy()
else:
self.root.destroy()
def __load_select_conf(self):
if not os.path.exists(self.DB_SELECTED_CONF_FILE):
return
with open(self.DB_SELECTED_CONF_FILE, 'r') as fd:
save_conf = json.load(fd)
if not save_conf:
return
self.cbb_select_mode_var.set(save_conf['cbb_select_mode_var'])
self.ckb_is_allow_odd_var.set(save_conf['ckb_is_allow_odd_var'])
self.cbb_select_odd_var.set(save_conf['cbb_select_odd_var'])
self.tf_range_min_var.set(save_conf['tf_range_min_var'])
self.tf_range_max_var.set(save_conf['tf_range_max_var'])
self.tf_repeat_cnt_var.set(save_conf['tf_repeat_cnt_var'])
self.rb_lost_repeat_cnt.set(save_conf['rb_lost_repeat_cnt'])
self.tf_range_sum_min_var.set(save_conf['tf_range_sum_min_var'])
self.tf_range_sum_max_var.set(save_conf['tf_range_sum_max_var'])
self.tf_contains_codes_var.set(save_conf['tf_contains_codes_var'])
def __save_select_conf(self):
save_conf = {'cbb_select_mode_var': self.cbb_select_mode_var.get(),
'ckb_is_allow_odd_var': self.ckb_is_allow_odd_var.get(),
'cbb_select_odd_var': self.cbb_select_odd_var.get(),
'tf_range_min_var': self.tf_range_min_var.get(), 'tf_range_max_var': self.tf_range_max_var.get(),
'tf_repeat_cnt_var': self.tf_repeat_cnt_var.get(),
'rb_lost_repeat_cnt': self.rb_lost_repeat_cnt.get(),
'tf_range_sum_min_var': self.tf_range_sum_min_var.get(),
'tf_range_sum_max_var': self.tf_range_sum_max_var.get(),
'tf_contains_codes_var': self.tf_contains_codes_var.get()}
with open(self.DB_SELECTED_CONF_FILE, 'w') as fd:
json.dump(save_conf, fd, indent=4, separators=(',', ': '))
App.main()
|
from django.shortcuts import render
from django.contrib.auth.models import User
from django.http import JsonResponse
from django.views.generic import FormView, TemplateView, DeleteView, View, DetailView, ListView
from accounts.models import Profile
from .forms import AddForm
from .models import RequestCall, Friend
class AddView(FormView):
form_class = AddForm
template_name = 'friend/addfriend.html'
success_url = '/form-success/'
def form_invalid(self, form):
response = super(AddView, self).form_invalid(form)
if self.request.is_ajax():
return JsonResponse(form.errors, status=400)
else:
return response
def form_valid(self, form):
response = super(AddView, self).form_valid(form)
if self.request.is_ajax():
uname = form.cleaned_data.get("uname")
uname2 = self.request.user.username
if uname==uname2:
return JsonResponse({'msg':"You can not send a request to yourself"}, status=400)
elif len(User.objects.filter(username=uname))==0:
return JsonResponse({'msg':"This is not a valid username"}, status=400)
elif len(Friend.objects.filter(username=uname,profile=self.request.user.profile.id)):
return JsonResponse({'msg':"You are already a friend"}, status=400)
elif len(RequestCall.objects.filter(coming_from=uname2,coming_for=uname)):
return JsonResponse({'msg':"Already sent a request"}, status=400)
elif len(RequestCall.objects.filter(coming_from=uname,coming_for=uname2)):
return JsonResponse({'msg':"You have already got a request"}, status=400)
else:
z = User.objects.filter(username=uname)
req = RequestCall(
coming_from=uname2,
coming_for=uname,
coming_from_profile_id=self.request.user.profile.id ,
coming_for_profile_id=z[0].profile.id ,
coming_from_name=self.request.user.first_name+" "+self.request.user.last_name,
coming_for_name=z[0].first_name+" "+z[0].last_name,
)
req.save()
data = {
'message': "Request Sent"
}
return JsonResponse(data)
else:
return response
class ReqView(TemplateView):
template_name = 'friend/requests.html'
def get_context_data(self, **kwargs):
context = super(ReqView, self).get_context_data(**kwargs)
send_username = self.request.user.username
context['send_req'] = RequestCall.objects.filter(coming_from=send_username)
context['rec_req'] = RequestCall.objects.filter(coming_for=send_username)
return context
class DelReqView(DeleteView):
def get(self,request, *args, **kwargs):
try:
r = RequestCall.objects.get(pk=self.kwargs['pk'])
r.delete()
except:
pass
finally:
return JsonResponse({"message":"Deletion Confirmed"})
class AccReqView(DeleteView):
def get(self,request, *args, **kwargs):
try:
r = RequestCall.objects.get(pk=self.kwargs['pk'])
Friend.objects.create(
profile_id = self.request.user.profile.id,
username = r.coming_from,
name = r.coming_from_name,
fid =int(r.coming_from_profile_id),
)
u = User.objects.get(username=r.coming_from)
Friend.objects.create(
profile_id = u.profile.id,
username = r.coming_for,
name = r.coming_for_name,
fid = int(r.coming_for_profile_id),
)
r.delete()
except:
pass
finally:
return JsonResponse({'message':'Request accepted'})
class FriendsView(TemplateView):
template_name = 'friend/friend-page.html'
def get_context_data(self, **kwargs):
context = super(FriendsView, self).get_context_data(**kwargs)
context['friends'] = Friend.objects.filter(profile_id=self.request.user.profile.id)
return context |
import lshlink as lsh
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict
from sklearn import datasets
from scipy.cluster.hierarchy import dendrogram, linkage, cophenet
from scipy.spatial.distance import pdist
from functools import reduce, lru_cache
import datetime
import pickle
import pstats
import pandas as pd
import multiprocessing
from mpl_toolkits.mplot3d import Axes3D
def mp (build_hash_table, args):
p = multiprocessing.Pool()
a = p.starmap(build_hash_table, args)
p.close()
def mp_run_times(x1, y1, x2, y2, title = '', xlabel = '', ylabel = ''):
plt.scatter(x1, y1)
plt.plot(x1, y1, label = 'multiprocessing build_hash_tables()')
plt.scatter(x2, y2)
plt.plot(x2, y2, label = 'original build_hash_tables()')
plt.legend(loc = 'upper left')
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.gcf().set_size_inches(12, 6)
plt.show();
def iris_run_times():
lsh_A14_y = [.46, 2.37, 12.4, 20, 37.2, 56.4, 94]
lsh_A14_x = [150, 450, 1050, 1500, 1950, 2550, 3000]
lsh_A20_x = [150, 450, 1050, 1500, 1950, 2550, 3000]
lsh_A20_y = [.285, 1.31, 7.15, 13, 21.8, 36.1, 58.2]
sci_x = [150, 450, 1050, 1500, 1950, 2550, 3000]
sci_y = [0.000215, 0.00136, 0.00996, 0.0187, 0.0341, 0.0592, 0.0843]
single_x = [150, 450, 1050, 1500, 1950, 2550, 3000]
single_y = [0.530, 7.13, 61, 149, 302, 630, 996]
plt.scatter(lsh_A14_x, lsh_A14_y)
plt.plot(lsh_A14_x, lsh_A14_y, label = 'LSH, A = 1.4')
plt.scatter(lsh_A20_x, lsh_A20_y)
plt.plot(lsh_A20_x, lsh_A20_y, label = 'LSH, A = 2.0')
plt.scatter(sci_x, sci_y)
plt.plot(sci_x, sci_y, label = 'single-linkage (scipy)')
plt.scatter(single_x, single_y)
plt.plot(single_x, single_y, label = 'single-linkage (custom)')
plt.legend(loc = 'upper left')
plt.xlabel('data size')
plt.ylabel('time (seconds)')
plt.title('Figure 4: Run Time Comparisons for Single-Linkage vs. LSH')
plt.gcf().set_size_inches(12, 6)
plt.show();
def kmeans(k, data):
n = data.shape[0]
clusters = np.zeros(n)
updates = True
iterations = 0
# randomly initialize k cluster centers from input data
idx = list(np.random.choice(range(n), k))
centers = data[idx,]
while updates:
updates = False
iterations += 1
print('iteration: ' + str(iterations))
counter = 0
# assign each point to closest cluster center
for i in range(n):
point = data[i,]
closest_cluster = np.argmin(np.linalg.norm(point - centers, axis = 1))
if clusters[i] != closest_cluster:
clusters[i] = closest_cluster
updates = True
counter += 1
print('number of updates: %s' % str(counter))
# change each cluster center to be in the middle of its points
for j in range(k):
cluster_idx = np.where(clusters == j)[0]
centers[j] = np.mean(data[cluster_idx, :], axis = 0)
return(clusters, centers) |
from logger import logger
logging = logger.getChild('sessions.twitter.buffers.users')
import core.sessions.buffers.field_metadata as meta
from core.sessions.buffers.buffer_defaults import buffer_defaults
from core.sessions.buffers.update_type import set_update_type
from core.sessions.buffers.buffers import Buffer
from main import Twitter
from core.sessions.buffers.buffers import Dismissable
class Users (Dismissable, Twitter):
rapid_access = ['id']
def __init__ (self, username=None, *args, **kwargs):
if not username:
username = kwargs['session'].username
kwargs['count'] = 200
kwargs['maxAPIPerUpdate'] = 15
self.username = username
super(Users, self).__init__(*args, **kwargs)
self.default_template = 'default_followers_friends'
self.store_args({'username':username})
self.buffer_metadata['interval'] = self.session.config['updates']['friendsInterval']
self.set_flag('updatable', False)
self.set_flag('temp', True)
self.set_field('name', _("Name"), None)
self.set_field('screen_name', _("Screen name"), None)
self.set_field('location', _("Location"), None)
self.set_field('bio', _("Description"), 'description')
self.set_field('protected', _("Protected"), None, field_type=meta.FT_BOOL, filter=False)
self.set_field('followers_count', _("Follower count"), None, field_type=meta.FT_NUMERIC)
self.set_field('friends_count', _("Friend count"), None, field_type=meta.FT_NUMERIC)
self.set_field('tweets_count', _("Number of tweets"), 'statuses_count', field_type=meta.FT_NUMERIC)
self.set_field('notifications', _("Device notifications"), None, field_type=meta.FT_BOOL, filter=False)
self.set_field('local_time', _("User's local time"), ('utc_offset', self.user_local_time), filter=False)
self.set_field('local_date', _("User's local date",), ('utc_offset', self.user_local_date), filter=False)
self.set_field('last_tweet_time', _("Last Tweet Time"), (('status', 'created_at'), self.standardize_timestamp, self.actual_time), filter=False)
self.set_field('last_tweet_date', _("Last Tweet Date"), (('status', 'created_at'), self.standardize_timestamp, self.actual_date), filter=False)
self.set_field('last_tweet_when', _("Last Tweet Relative Time"), (('status', 'created_at'), self.standardize_timestamp, self.relative_time), filter=False)
@buffer_defaults
def get_message(self, index=None):
return self.RetrievePost(index)[-1]
@buffer_defaults
def remove_item (self, index=None):
self.session.interface.Unfollow()
Buffer.remove_item(self, index=index, announce=False)
def process_users(self, items):
return items
def get_next_item_time_step_index (self, step, index=0):
raise AttributeError
def get_prev_item_time_step_index (self, step, index=0):
raise AttributeError
def get_mentions (self, index=None):
return []
def process_update(self, update, *args, **kwargs):
update.reverse()
return update
def extend(self, items, *args, **kwargs):
index = self.index
self.clear()
super(Users, self).extend(items)
self.index = index
@set_update_type
def report_update(self, items, msg="", update_type=None, *args, **kwargs):
if not msg:
if self.session.is_current_user(self.username):
msg = _("%s updated") % self.item_name_plural
else:
msg = _("%s for %s updated") % (self.item_name_plural, self.username)
super(Users, self).report_update(items, msg=msg, update_type=update_type)
|
"""This file contains code used in "Think Stats",
by <NAME>, available from greenteapress.com
Copyright 2011 <NAME>
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import math
import numpy
import cPickle
import random
import brfss
import correlation
import Cdf
import myplot
import Pmf
import thinkstats
import rankit
import matplotlib.pyplot as pyplot
def MakeUniformPrior(t, num_points, label, spread=3.0):
"""Makes a prior distribution for mu and sigma based on a sample.
t: sample
num_points: number of values in each dimension
label: string label for the new Pmf
spread: number of standard errors to include
Returns: Pmf that maps from (mu, sigma) to prob.
"""
# estimate mean and stddev of t
n = len(t)
xbar, S2 = thinkstats.MeanVar(t)
sighat = math.sqrt(S2)
print xbar, sighat, sighat / xbar
# compute standard error for mu and the range of ms
stderr_xbar = sighat / math.sqrt(n)
mspread = spread * stderr_xbar
ms = numpy.linspace(xbar-mspread, xbar+mspread, num_points)
# compute standard error for sigma and the range of ss
stderr_sighat = sighat / math.sqrt(2 * (n-1))
sspread = spread * stderr_sighat
ss = numpy.linspace(sighat-sspread, sighat+sspread, num_points)
# populate the PMF
pmf = Pmf.Pmf(name=label)
for m in ms:
for s in ss:
pmf.Set((m, s), 1)
return ms, ss, pmf
def LogUpdate(suite, evidence):
"""Updates a suite of hypotheses based on new evidence.
Modifies the suite directly; if you want to keep the original, make
a copy.
Args:
suite: Pmf object
evidence: whatever kind of object Likelihood expects
"""
for hypo in suite.Values():
likelihood = LogLikelihood(evidence, hypo)
suite.Incr(hypo, likelihood)
print suite.Total()
def LogLikelihood(evidence, hypo):
"""Computes the log likelihood of the evidence under the hypothesis.
Args:
evidence: a list of values
hypo: tuple of hypothetical mu and sigma
Returns:
log likelihood of the sample given mu and sigma
"""
t = evidence
mu, sigma = hypo
total = Summation(t, mu)
return -len(t) * math.log(sigma) - total / 2 / sigma**2
def Summation(t, mu, cache={}):
"""Computes the sum of (x-mu)**2 for x in t.
Caches previous results.
t: tuple of values
mu: hypothetical mean
cache: cache of previous results
"""
try:
return cache[t, mu]
except KeyError:
ds = [(x-mu)**2 for x in t]
total = sum(ds)
cache[t, mu] = total
return total
def EstimateParameters(t, label, num_points=31):
"""Computes the posterior distibution of mu and sigma.
t: sequence of values
label: string label for the suite of hypotheses
num_points: number of values in each dimension
Returns
xs: sequence of hypothetical values for mu
ys: sequence of hypothetical values for sigma
suite: Pmf that maps from (mu, sigma) to prob
"""
xs, ys, suite = MakeUniformPrior(t, num_points, label)
suite.Log()
LogUpdate(suite, tuple(t))
suite.Exp()
suite.Normalize()
return xs, ys, suite
def ComputeMarginals(suite):
"""Computes the marginal distributions for mu and sigma.
suite: Pmf that maps (x, y) to z
Returns: Pmf objects for mu and sigma
"""
pmf_m = Pmf.Pmf()
pmf_s = Pmf.Pmf()
for (m, s), p in suite.Items():
pmf_m.Incr(m, p)
pmf_s.Incr(s, p)
return pmf_m, pmf_s
def ComputeCoefVariation(suite):
"""Computes the distribution of CV.
suite: Pmf that maps (x, y) to z
Returns: Pmf object for CV.
"""
pmf = Pmf.Pmf()
for (m, s), p in suite.Items():
pmf.Incr(s/m, p)
return pmf
def ProbBigger(pmf1, pmf2):
"""Returns the probability that a value from one pmf exceeds another."""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 > v2:
total += p1 * p2
return total
def PlotPosterior(xs, ys, suite, pcolor=False, contour=True):
"""Makes a contour plot.
xs: sequence of values
ys: sequence of values
suite: Pmf that maps (x, y) to z
"""
X, Y = numpy.meshgrid(xs, ys)
func = lambda x, y: suite.Prob((x, y))
prob = numpy.vectorize(func)
Z = prob(X, Y)
pyplot.clf()
if pcolor:
pyplot.pcolor(X, Y, Z)
if contour:
pyplot.contour(X, Y, Z)
myplot.Save(root='bayes_height_posterior_%s' % suite.name,
title='Posterior joint distribution',
xlabel='Mean height (cm)',
ylabel='Stddev (cm)')
def PlotCoefVariation(suites):
"""Plot the posterior distributions for CV.
suites: map from label to Pmf of CVs.
"""
pyplot.clf()
pmfs = {}
for label, suite in suites.iteritems():
pmf = ComputeCoefVariation(suite)
cdf = Cdf.MakeCdfFromPmf(pmf, label)
myplot.Cdf(cdf)
pmfs[label] = pmf
myplot.Save(root='bayes_height_cv',
title='Coefficient of variation',
xlabel='cv',
ylabel='CDF')
print 'female bigger', ProbBigger(pmfs['female'], pmfs['male'])
print 'male bigger', ProbBigger(pmfs['male'], pmfs['female'])
def PlotCdfs(samples):
"""Make CDFs showing the distribution of outliers."""
cdfs = []
for label, sample in samples.iteritems():
outliers = [x for x in sample if x < 150]
cdf = Cdf.MakeCdfFromList(outliers, label)
cdfs.append(cdf)
myplot.Clf()
myplot.Cdfs(cdfs)
myplot.Save(root='bayes_height_cdfs',
title='CDF of height',
xlabel='Reported height (cm)',
ylabel='CDF')
def NormalProbPlot(samples):
"""Makes a normal probability plot for each sample in samples."""
pyplot.clf()
markers = dict(male='b', female='g')
for label, sample in samples.iteritems():
NormalPlot(sample, label, markers[label], jitter=0.0)
myplot.Save(show=True,
#root='bayes_height_normal',
title='Normal probability plot',
xlabel='Standard normal',
ylabel='Reported height (cm)')
def NormalPlot(ys, label, color='b', jitter=0.0, **line_options):
"""Makes a normal probability plot.
Args:
ys: sequence of values
label: string label for the plotted line
color: color string passed along to pyplot.plot
jitter: float magnitude of jitter added to the ys
line_options: dictionary of options for pyplot.plot
"""
n = len(ys)
xs = [random.gauss(0.0, 1.0) for i in range(n)]
xs.sort()
ys = [y + random.uniform(-jitter, +jitter) for y in ys]
ys.sort()
inter, slope = correlation.LeastSquares(xs, ys)
fit = correlation.FitLine(xs, inter, slope)
pyplot.plot(*fit, color=color, linewidth=0.5, alpha=0.5)
pyplot.plot(sorted(xs), sorted(ys),
color=color,
marker='.',
label=label,
markersize=3,
alpha=0.1,
**line_options)
def PlotMarginals(suite):
"""Plot the marginal distributions for a 2-D joint distribution."""
pmf_m, pmf_s = ComputeMarginals(suite)
pyplot.clf()
pyplot.figure(1, figsize=(7, 4))
pyplot.subplot(1, 2, 1)
cdf_m = Cdf.MakeCdfFromPmf(pmf_m, 'mu')
myplot.Cdf(cdf_m)
pyplot.xlabel('Mean height (cm)')
pyplot.ylabel('CDF')
pyplot.subplot(1, 2, 2)
cdf_s = Cdf.MakeCdfFromPmf(pmf_s, 'sigma')
myplot.Cdf(cdf_s)
pyplot.xlabel('Std Dev height (cm)')
pyplot.ylabel('CDF')
myplot.Save(root='bayes_height_marginals_%s' % suite.name)
def PlotAges(resp):
"""Plot the distribution of ages."""
ages = [r.age for r in resp.records]
cdf = Cdf.MakeCdfFromList(ages)
myplot.Clf()
myplot.Cdf(cdf)
myplot.Show()
def DumpHeights(data_dir='.', n=10000):
"""Read the BRFSS dataset, extract the heights and pickle them."""
resp = brfss.Respondents()
resp.ReadRecords(data_dir, n)
#PlotAges(resp)
d = {1:[], 2:[]}
[d[r.sex].append(r.htm3) for r in resp.records if r.htm3 != 'NA']
fp = open('bayes_height_data.pkl', 'wb')
cPickle.dump(d, fp)
fp.close()
def LoadHeights():
"""Read the pickled height data."""
fp = open('bayes_height_data.pkl', 'r')
d = cPickle.load(fp)
fp.close()
return d
def Winsorize(xs, p=0.01):
"""Compresses outliers."""
cdf = Cdf.MakeCdfFromList(xs)
low, high = cdf.Value(p), cdf.Value(1-p)
print low, high
outliers = [x for x in xs if x < low or x > high]
outliers.sort()
print outliers
wxs = [min(max(low, x), high) for x in xs]
return wxs
def main():
if False:
random.seed(16)
t = [random.gauss(3, 5) for i in range(100000)]
EstimateParameters(t)
return
#DumpHeights(n=1000000)
d = LoadHeights()
labels = {1:'male', 2:'female'}
samples = {}
suites = {}
for key, t in d.iteritems():
label = labels[key]
print label, len(t)
t = Winsorize(t, 0.0001)
samples[label] = t
xs, ys, suite = EstimateParameters(t, label)
suites[label] = suite
PlotPosterior(xs, ys, suite)
PlotMarginals(suite)
#PlotCdfs(samples)
#NormalProbPlot(samples)
PlotCoefVariation(suites)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 11 12:14:23 2020
Elemento de placa de Reissner-Mildlin com 4 nós e interpolação linear
Resultados coerentes dos deslocamentos, mas reações de apoio estranhas...
Momentos e cortes muito estranhos... ??!?!?!?!
@author: markinho
"""
import sympy as sp
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d, Axes3D
nosElemento = 4
grausLiberdadeNo = 3
grausLiberdade = grausLiberdadeNo*nosElemento
x, y, l = sp.symbols('x, y, l')
E, G, nu = sp.symbols('E, G, nu')
t = sp.Symbol('t') #espessura da placa
N = sp.Matrix(np.load('funcoesN%d.npy' % nosElemento, allow_pickle=True))
Blx = sp.diff(N, x)
Bly = sp.diff(N, y)
####!!!!! RESOLVENDO AINDA SEM O JACOBIANO!!!
u = sp.Matrix(sp.symbols('u0:%d' % grausLiberdade))
Bb = []
Bs = []
for i in range(nosElemento):
BB = np.array([ [0, Blx[i], 0],
[0, 0, Bly[i]],
[0, Bly[i], Blx[i]] ])
BS = np.array([ [Blx[i], -N[i], 0],
[Bly[i], 0, -N[i]] ])
Bb.append(np.transpose(BB))
Bs.append(np.transpose(BS))
Bb = np.transpose(np.array(Bb).reshape(12, 3))
Bs = np.transpose(np.array(Bs).reshape(12, 2))
Db = E/(1 - nu**2)*np.array([ [1, nu, 0],
[nu, 1, 0],
[0, 0, (1 - nu)/2]])
Ds = G*np.eye(2)
Bb_mult = sp.Matrix(np.matmul(np.matmul(Bb.T, Db), Bb))
Bs_mult = sp.Matrix(np.matmul(np.matmul(Bs.T, Ds), Bs))
#integrando e calculando as matrizes de rigidez !!!!! será que tem que multiplicar pelo determinante do jabociano L/2?????
Kb = t*sp.integrate( sp.integrate( Bb_mult, (x, -l*sp.Rational(1, 2), l*sp.Rational(1, 2)) ), (y, -l*sp.Rational(1, 2), l*sp.Rational(1, 2)) )#*L*sp.Rational(1, 2)
Ks = t*sp.integrate( sp.integrate( Bs_mult, (x, -l*sp.Rational(1, 2), l*sp.Rational(1, 2)) ), (y, -l*sp.Rational(1, 2), l*sp.Rational(1, 2)) )#*L*sp.Rational(1, 2)
#Usando valores numéricos
Ep = 20000.
Gp = 7700.
nup = 0.3
L = 1. #precisa ser 1 pois não foi usado o Jacobiano???
tp = 50. #cm
Kbp = np.array(Kb.subs({E: Ep, nu: nup, l: L, t: tp }), dtype = float) #### usar o lambdify!!!
Ksp = np.array(Ks.subs({G: Gp, nu: nup, l: L, t: tp }), dtype = float)
F = np.array([-10., 0., 0., -10., 0., 0. ])
#restringido a placa, engastada na esquerda (negativos x)
Ku = np.delete(np.delete(Kbp, [3, 4, 5, 9, 10, 11], axis=0), [3, 4, 5, 9, 10, 11], axis=1) + np.delete(np.delete(Ksp, [3, 4, 5, 9, 10, 11], axis=0), [3, 4, 5, 9, 10, 11], axis=1)
Kr = np.delete(np.delete(Kbp, [3, 4, 5, 9, 10, 11], axis=0), [0, 1, 2, 6, 7, 8], axis=1) + np.delete(np.delete(Ksp, [3, 4, 5, 9, 10, 11], axis=0), [0, 1, 2, 6, 7, 8], axis=1)
U = np.linalg.solve(Ku, F)
Ra = np.matmul(Kr, U)
Ug = np.zeros(12)
Ug[[3, 4, 5, 9, 10, 11]] = U
epsilon_b = sp.Matrix(np.matmul(Bb, Ug)).subs({E: Ep, nu: nup, l: L, t: tp })
epsilon_s = sp.Matrix(np.matmul(Bs, Ug)).subs({E: Ep, nu: nup, l: L, t: tp })
Ms = (t**3/12*Db*epsilon_b).subs({E: Ep, nu: nup, l: L, t: tp })
Qs = (5/6*t*Ds*epsilon_s).subs({G: Gp, nu: nup, l: L, t: tp })
#gerando os gráficos de momentos e cortantes-----------------------------------------------------------------------------
#definido valores para x, y:
numerico_xy = np.linspace(-0.5*L, 0.5*L, 50) #valores para x e y numéricos, são iguais pois o elemento é quadrado
#criando funções numéricas para Ms e Qs
Mx = sp.utilities.lambdify([x, y], Ms[0], "numpy")
My = sp.utilities.lambdify([x, y], Ms[1], "numpy")
Mxy = sp.utilities.lambdify([x, y], Ms[2], "numpy")
Qx = sp.utilities.lambdify([x, y], Qs[0], "numpy")
Qy = sp.utilities.lambdify([x, y], Qs[1], "numpy")
#criando o grid para o gráfico
grid_x, grid_y = np.meshgrid(numerico_xy, numerico_xy)
#geração do gráfico com o matplotlib
fig = plt.figure(figsize=(16, 12), dpi=100)
ax = fig.add_subplot(1, 1, 1, projection='3d') #todos em um gráfico
surfMx = ax.plot_surface(grid_x, grid_y, Mx(grid_x, grid_y), cmap=cm.jet, linewidth=0, antialiased=False)
fig.colorbar(surfMx, shrink=0.7)
plt.show()
fig = plt.figure(figsize=(16, 12), dpi=100)
ax = fig.add_subplot(1, 1, 1, projection='3d') #todos em um gráfico
surfMy = ax.plot_surface(grid_x, grid_y, My(grid_x, grid_y), cmap=cm.jet, linewidth=0, antialiased=False)
fig.colorbar(surfMy, shrink=0.7)
plt.show()
fig = plt.figure(figsize=(16, 12), dpi=100)
ax = fig.add_subplot(1, 1, 1, projection='3d') #todos em um gráfico
surfMxy = ax.plot_surface(grid_x, grid_y, Mxy(grid_x, grid_y), cmap=cm.jet, linewidth=0, antialiased=False)
fig.colorbar(surfMxy, shrink=0.7)
plt.show()
fig = plt.figure(figsize=(16, 12), dpi=100)
ax = fig.add_subplot(1, 1, 1, projection='3d') #todos em um gráfico
surfQx = ax.plot_surface(grid_x, grid_y, Qx(grid_x, grid_y), cmap=cm.jet, linewidth=0, antialiased=False)
fig.colorbar(surfQx, shrink=0.7)
plt.show()
fig = plt.figure(figsize=(16, 12), dpi=100)
ax = fig.add_subplot(1, 1, 1, projection='3d') #todos em um gráfico
surfQy = ax.plot_surface(grid_x, grid_y, Qy(grid_x, grid_y), cmap=cm.jet, linewidth=0, antialiased=False)
fig.colorbar(surfQy, shrink=0.7)
plt.show()
#------------------------------------------------------------------------------------------------------------------
|
#!/usr/bin/env python
"""Basic pipeline building blocks.
This modules provides the basic building blocks in a JIP pipeline and a way
to search and find them at run-time. The basic buiding blocks are instances
of :py:class:`Tool`. The JIP library comes with two sub-classes that can be
used to create tool implementations:
:py:class:`ScriptTool`
This sub-class of `Tool` integrates file or script based tool
implementations which can be served from stand-alone script files
:py:class:`PythonTool`
In contrast to the script tool, this `Tool` extension allows to create
`Tool` instances from other, possibly non-related, python classes. The
easiest way to used this is with the :py:class:`jip.tools.tool` decorator,
which allows you to take arbitrary python classes and *make* them jip
tools.
In addition to the `Tool` implementations, this module provides the
:py:class:`Scanner` class, which is used to find tool implementations either
form disk or from an arbitrary python module. This class is supposed to be
used as a *singleton* and an configured instance is available in the main
`jip` module, exposed as `jip.scanner`. The scanner class itself is
configured either through the :py:mod:`jip.configuration`, or through
environment variables. The :py:class:`Scanner` documentation covers both
the environment variables that can be used as well as the configuration
properties.
"""
import cPickle
import copy
import inspect
from textwrap import dedent
from os import remove, getcwd, getenv, listdir
from os.path import exists, basename, dirname, abspath
import os
import sys
import types
import shutil
import jip.templates
from jip.options import Options, TYPE_OUTPUT, TYPE_INPUT, Option
from jip.templates import render_template, set_global_context
from jip.utils import list_dir
from jip.logger import getLogger
import jip.profiles
log = getLogger('jip.tools')
# the pickle template to store a pyton tool
_pickel_template = """
python -c '
import sys
import cPickle
import jip
import jip.tools
import types
jip._disable_module_search = True
source="".join([l for l in sys.stdin]).decode("base64")
data = cPickle.loads(source)
deco = jip.tools.tool()
tool = jip.tools.PythonTool(
data["instance"],
deco
)
tool._options = data["options"]
if isinstance(tool, types.FunctionType):
tool()
else:
tool.run()
'<< __EOF__
%s__EOF__
"""
#########################################################
# Exceptions
#########################################################
class ValidationError(Exception):
"""Exception raised in validation steps. The exception
carries the source tool and a message.
"""
def __init__(self, source, message):
self.source = source
self.message = message
def __repr__(self):
import jip.cli
if self.source:
return "%s: %s" % (
jip.cli.colorize(self.source, jip.cli.RED),
jip.cli.colorize(self.message, jip.cli.BLUE)
)
else:
return "%s" % (
jip.cli.colorize(self.message, jip.cli.RED)
)
def __str__(self):
return self.__repr__()
class ToolNotFoundException(Exception):
"""Raised in case a tool is not found by the scanner"""
pass
#########################################################
# decorators
#########################################################
class tool(object):
"""Decorate functions and classes and convert them to tools.
The @jip.tool decorator turns classes and functions into valid JIP
tools. The simplest way to use this decorator is to annotate a python
function that returns a string. This string is then interpreted as a
JIP script template. The functions docstring is used, similar to
JIP scripts, to parse command line options and tool input and
output parameters. For example::
@tool()
def mytool():
'''
Send a greeting
usage:
mytool <name>
'''
return 'echo "hello ${name}'"
This create a single *bash* interpreted script and exposes a tool,
`mytool`, into the JIP environment. You can use the decorators
arguments to further customize the tool specification, i.e. specify
a different name. If you want to use a different interpreter, you can
return a tuple where the first element is the interpreter name and the
second is the script template.
:param name: specify a tool name. If no name is specified, the name
of the decorated function or class is used as the tool
name
:param inputs: specify a list of option names that are treated
as input options
:param outputs: specify a list of option names that are treated as output
options
:param argparse: specify the name of the function or a function reference
that take an ``ArgumentParser`` instance and populates
it. This takes precedence over the doc string if the
function exists.
:param get_command: name of the function or a function reference that
implements the tools ``get_command`` function
:param validate: name of the function or a function reference that
implements the tools ``validate`` function
:param setup: name of the function or a function reference that
implements the tools ``setup`` function
:param init: name of the function or a function reference that
implements the tools ``init`` function
:param run: name of the function or a function reference that
implements the tools ``run`` function
:param pipeline: name of the function or a function reference that
implements the tools ``pipeline`` function
:param is_done: name of the function or a function reference that
implements the tools ``is_done`` function
:param cleanup: name of the function or a function reference that
implements the tools ``cleanup`` function
:param help: name of the function or a function reference that
implements the tools ``help`` function
:param add_outputs: takes a list of values to add hidden output
options
:param check_files: takes a list of option names that will be passed
through file checks on validation
"""
def __init__(self, name=None, inputs=None, outputs=None,
argparse='register', get_command='get_command',
validate='validate',
setup='setup',
init='init',
run='run',
pipeline='pipeline',
is_done='is_done',
cleanup='cleanup',
help='help',
add_outputs=None,
check_files=None,
ensure=None,
pytool=False,
force_pipeline=False):
self.name = name
self.inputs = inputs
self.outputs = outputs
self.argparse = argparse
self.add_outputs = add_outputs
self._check_files = check_files
self._ensure = ensure
self._pytool = pytool
self._force_pipeline = force_pipeline
################################################################
# tool delegates
################################################################
self._validate = validate if validate else "validate"
self._setup = setup if setup else "setup"
self._init = init if init else "init"
self._is_done = is_done if is_done else "is_done"
self._pipeline = pipeline if pipeline else "pipeline"
self._get_command = get_command if get_command else "get_command"
self._cleanup = cleanup if cleanup else "cleanup"
self._help = help if help else "help"
self._run = run if run else "run"
def __call__(self, *args):
cls = args[0]
log.debug("Decorated tool or pipeline: %s", cls)
# check the name
if self.name is None:
if isinstance(cls, types.FunctionType):
self.name = cls.func_name
else:
self.name = cls.__name__
# overwrite the string representation
is_class = False
if not isinstance(cls, types.FunctionType):
cls.__repr__ = lambda x: self.name
is_class = True
if is_class:
old = None
if hasattr(cls, '__setattr__'):
old = cls.__setattr__
def setatr(slf, name, value):
ov = slf.__dict__.get(name, None)
if ov is not None and isinstance(ov, Option):
ov.set(value)
else:
if old:
old(slf, name, value)
else:
if name in slf.__dict__:
slf.__dict__[name] = value
else:
raise AttributeError()
cls.__setattr__ = setatr
tool_instance = PythonTool(cls, self, self.add_outputs)
Scanner.registry[self.name] = tool_instance
log.debug("Registered tool from module: %s", self.name)
return cls
################################################################
# tool delegates
################################################################
def _update_delegate(self, wrapper, instance):
# helper function to expose a name function directly
def set_name(name):
# set the job name
wrapper.job.name = name
# inject helper functions
helper_function = {
"name": set_name,
"job": wrapper.job,
"profile": wrapper.job,
"add_output": wrapper.options.add_output,
"add_input": wrapper.options.add_input,
"add_option": wrapper.options.add_option,
'r': render_template,
'render_template': render_template,
'options': wrapper.options,
'opts': wrapper.options,
'args': wrapper.args,
'ensure': wrapper.ensure,
'check_file': wrapper.check_file,
'validation_error': wrapper.validation_error
}
for k, v in helper_function.iteritems():
if not hasattr(instance, k):
instance.__dict__[k] = v
# inject options if they don't exists
for o in wrapper.options:
if not hasattr(instance, o.name):
instance.__dict__[o.name] = o
def __call_delegate(self, fun, wrapper, instance):
if not callable(fun):
name = fun
try:
fun = getattr(instance, name)
except:
# don't double validate, the python tool will call the
# Tool validate already
if name == 'validate':
return
# try to get the function frow main Tool implementation
fun = getattr(Tool, name)
if fun:
# make sure the instance is aware of the options
if (hasattr(fun, "__self__") and fun.__self__ is not None) or \
(hasattr(fun, "im_self") and fun.im_self is not None):
self._update_delegate(wrapper, instance)
# force options and args
instance.options = wrapper.options
instance.opts = wrapper.options
instance.args = wrapper.args
return fun()
else:
# function based implementation
self._update_delegate(wrapper, wrapper)
return fun(wrapper)
def validate(self, wrapper, instance):
try:
r = self.__call_delegate(self._validate, wrapper, instance)
if self._check_files:
for check in self._check_files:
wrapper.check_file(check)
if self._ensure:
for e in self._ensure:
wrapper.ensure(e[0], e[1], None if len(e) < 3 else e[2])
return r
except Exception as err:
if not isinstance(err, ValidationError):
log.debug("Validation error: %s", str(err).strip())
err = ValidationError(wrapper, str(err))
raise err
def setup(self, wrapper, instance):
return self.__call_delegate(self._setup, wrapper, instance)
def init(self, wrapper, instance):
return self.__call_delegate(self._init, wrapper, instance)
def is_done(self, wrapper, instance):
return self.__call_delegate(self._is_done, wrapper, instance)
def pipeline(self, wrapper, instance):
return self.__call_delegate(self._pipeline, wrapper, instance)
def get_command(self, wrapper, instance):
interp = "bash"
cmd = None
if not self._pytool and not isinstance(instance, types.FunctionType):
cmds = self.__call_delegate(self._get_command, wrapper,
instance)
else:
if self._pytool:
# this is a python tool that wrapps a class or function.
# In order to get a single command, we pickle the
# wrapped instance and the options and then push it
# through the pickel template
data = {
"instance": instance,
"options": wrapper.options
}
r = ('bash', _pickel_template %
(cPickle.dumps(data).encode("base64")))
return r
else:
# this is not a python tool function but a function
# that will return a template
argspec = inspect.getargspec(instance)
if len(argspec[0]) > 0:
cmds = instance(wrapper)
else:
cmds = instance()
if isinstance(cmds, (list, tuple)):
interp = cmds[0]
cmd = cmds[1]
else:
cmd = cmds
if interp and cmd:
block = Block(content=cmd, interpreter=interp)
return interp, block.render(wrapper)
return None, None
def cleanup(self, wrapper, instance):
return self.__call_delegate(self._cleanup, wrapper, instance)
def run(self, wrapper, instance):
return self.__call_delegate(self._run, wrapper, instance)
def help(self, wrapper, instance):
return self.__call_delegate(self._help, wrapper, instance)
class pytool(tool):
"""This is a decorator that can be used to mark single python functions
as tools. The function will be wrapped in a PythonTool instance and
the function must accept a single paramter self to access to tools
options.
"""
def __init__(self, *args, **kwargs):
kwargs['pytool'] = True
tool.__init__(self, *args, **kwargs)
class pipeline(tool):
"""This is a decorator that can be used to mark single python functions
as pipelines.
"""
def __init__(self, *args, **kwargs):
kwargs['force_pipeline'] = True
tool.__init__(self, *args, **kwargs)
class Scanner():
"""
This class holds a script/tool cache
The cache is organized in to dicts, the script_cache, which
store name->instance pairs pointing form the name of the tool
to its cahced instance. The find implementations will return
clones of the instances in the cache.
"""
registry = {}
def __init__(self, jip_path=None, jip_modules=None):
self.initialized = False
self.instances = {}
self.jip_path = jip_path if jip_path else ""
self.jip_modules = jip_modules if jip_modules else []
self.jip_file_paths = set([])
self.__scanned = False
self.__scanned_files = None
def find(self, name, path=None, is_pipeline=False):
"""Finds a tool by its name or file name.
If the given name points to an existing file, the file is loaded
as a script tools and returned. Otherwise, a default search is
triggered, optionally including the specified path.
:returns: a new instance of the tool
:rtype: :class:`Tool`
:raises ToolNotFoundException: if the tool could not be found
"""
if name is None:
return None
s = name.split(" ", 1)
args = None
if len(s) > 1:
import shlex
name = s[0]
args = shlex.split(s[1])
if exists(name) and os.path.isfile(name):
## the passed argument is a file. Try to load it at a
## script and add the files directory to the search path
tool = ScriptTool.from_file(name, is_pipeline=is_pipeline)
self._register_tool(name, tool)
self.jip_file_paths.add(dirname(name))
clone = tool.clone()
clone.init()
if args:
log.debug("Scanner | Parsing arguments passed "
"through tool name")
clone.parse_args(args)
return clone
if not self.initialized:
self.scan()
self.initialized = True
self.instances.update(Scanner.registry)
tool = self.instances.get(name, None)
if tool is None:
tool = self.instances.get(name + ".jip", None)
if tool is None:
raise ToolNotFoundException("No tool named '%s' found!" % name)
if isinstance(tool, basestring):
## the tool is not loaded, load the script,
## and add it to the cache
tool = ScriptTool.from_file(tool, is_pipeline=is_pipeline)
self._register_tool(name, tool)
log.debug("Scanner | Cloning tool %s [%s]", tool, tool.__hash__())
clone = tool.clone()
clone.init()
if args:
log.debug("Scanner | Parsing arguments passed through tool name")
clone.parse_args(args)
return clone
def scan(self, path=None):
"""Searches for scripts and python modules in the configured
locations and returns a dictionary of the detected instances
:param path: optional path value to define a folder to scan
:returns: dict of tools
"""
log.debug("Searching for JIP tools")
if self.instances is None:
self.instances = {}
self.scan_files(parent=path)
self.scan_modules()
for n, m in Scanner.registry.iteritems():
self._register_tool(n, m)
return self.instances
def _register_tool(self, name, tool):
self.instances[name] = tool
# check and load profile for the given tool
if tool.path:
spec_file = tool.path
# replace extension with .spec
try:
i = spec_file.rindex(".")
if i >= 0:
spec_file = spec_file[:i] + ".spec"
log.debug("Checking for spec file at: %s", spec_file)
if os.path.exists(spec_file):
log.info("Loading spec for %s from %s",
name, spec_file)
profile = jip.profiles.Profile.from_file(spec_file)
tool._job = profile
except Exception as err:
log.error("Error while loading spec for %s: %s", name, err,
exc_info=True)
def scan_files(self, parent=None):
"""Scan files for jip tools. This functions detects files with
the ``.jip`` extension in the default search locations.
:param parent: optional parent folder
:returns: list of found files
"""
if parent is None and self.__scanned_files is not None:
return self.__scanned_files
import re
pattern = re.compile(r'^.*(.jip)$')
files = {}
if parent:
for path in self.__search(parent, pattern):
self.instances[basename(path)] = path
files[basename(path)] = path
#check cwd
for path in self.__search(getcwd(), pattern, False):
self.instances[basename(path)] = path
files[basename(path)] = path
jip_path = "%s:%s" % (self.jip_path, getenv("JIP_PATH", ""))
for folder in jip_path.split(":") + list(self.jip_file_paths):
for path in self.__search(folder, pattern):
self.instances[basename(path)] = path
files[basename(path)] = path
if parent is None:
self.__scanned_files = files
return files
def __search(self, folder, pattern, recursive=True):
log.debug("Searching folder: %s", folder)
for path in list_dir(folder, recursive=recursive):
if pattern.match(path) and os.path.isfile(path):
log.debug("Found tool: %s", path)
yield path
def add_module(self, path):
"""Add a module or a python file to the list of module that are
scanned for tools.
:param: path to the module that will be added to the search path
"""
self.jip_modules.append(path)
self.__scanned = False
def add_folder(self, path):
"""Add a folder to the list of folders that are
scanned for tools.
:param: path to the folder that will be added to the search path
"""
self.jip_file_paths.add(path)
self.__scanned = False
self.__scanned_files = None
self.initialized = False
def scan_modules(self):
"""Loads the python modules specified in the JIP configuration.
This will register any functions and classes decorated with
one of the JIP decorators.
"""
if self.__scanned:
return
path = getenv("JIP_MODULES", "")
log.debug("Scanning modules")
for module in path.split(":") + self.jip_modules + ['jip.scripts']:
try:
if module:
log.debug("Importing module: %s", module)
__import__(module)
except ImportError, e:
log.debug("Error while importing module: %s. "
"Trying file import", str(e))
if exists(module):
self._load_from_file(module)
self.__scanned = True
def _load_from_file(self, path):
"""Try to load a module from the given file. No module is loaded
if the file does not exists. Otherwise, a fukk module name us guessed
by checking for __init__.py files upwards. Then imp.load_source is
used to import the module
:param path: the path to the module file
"""
if not exists(path):
return
name, parent_dir = self._guess_module_name(path)
log.debug("Importing module from file: %s %s %s", name, path,
parent_dir)
sys.path.insert(0, parent_dir)
mod = __import__(name)
log.debug("Imported module from file %s : %s", path, mod)
#imp.load_source(name, path)
def _guess_module_name(self, path):
"""Guess the absolute module name for the given file by checking for
__init__.py files in the current folder structure and upwards"""
path = abspath(path)
base = basename(path)
if base.endswith('.py'):
base = base[:-3]
name = [base]
def _load_package_name(current, module_name):
inits = filter(lambda x: x == '__init__.py', listdir(current))
if inits:
module_name.append(basename(current))
return _load_package_name(dirname(current), module_name)
return module_name, current
# check if this is in a package
name, parent_dir = _load_package_name(dirname(path), name)
name.reverse()
return ".".join(name), parent_dir
class Block(object):
"""Base class for executable blocks that can render themselves to scripts
and provide information about the interpreter that should be used to
run the script.
"""
def __init__(self, content=None, interpreter=None, interpreter_args=None,
lineno=0):
self._lineno = lineno
self.interpreter = interpreter
self._process = None
self.content = content
if self.content is None:
self.content = []
self.interpreter_args = interpreter_args
def run(self, tool, stdin=None, stdout=None):
"""Execute this block
"""
import subprocess
import jip
# write template to named temp file and run with interpreter
script_file = jip.create_temp_file()
try:
script_file.write(self.render(tool))
script_file.close()
cmd = [self.interpreter if self.interpreter else "bash"]
if self.interpreter_args:
cmd += self.interpreter_args
self.process = subprocess.Popen(
cmd + [script_file.name],
stdin=stdin,
stdout=stdout
)
return self.process
except OSError, err:
# catch the errno 2 No such file or directory, which indicates the
# interpreter is not available
if err.errno == 2:
raise Exception("Interpreter %s not found!" % self.interpreter)
raise err
def render(self, tool):
"""Renders this blocks content within the context of the given tool
:param tool: the tool
:returns: rendered block content
:rtype: string
"""
content = self.content
if isinstance(content, (list, tuple)):
content = "\n".join(content)
ctx = dict(tool.options.to_dict(raw=True))
ctx['tool'] = tool
ctx['__file__'] = tool.path
ctx['args'] = tool.options.to_dict()
ctx['options'] = tool.options.to_cmd
return render_template(content, **ctx)
def terminate(self):
"""
Terminate currently running blocks
"""
if self._process is not None:
if self._process._popen is not None:
self._process.terminate()
import time
# sleep and check job states a few times before we do a hard
# kill
for t in [0.01, 0.05, 0.10, 2, 3]:
time.sleep(t)
if not self.process.is_alive():
break
if self.process.is_alive():
# kill it
import os
import signal
os.kill(self.process._popen.pid, signal.SIGKILL)
def __str__(self):
return "Block['%s']" % self.interpreter
class PythonBlockUtils(object):
"""Utility functions that are exposed in template blocks and template
functions
The block utilities store a reference to the *local* and *global*
environment, to the current *tool* and to the current *pipeline*.
"""
def __init__(self, tool, local_env):
self.tool = tool
self._pipeline = None
self._local_env = local_env
self._global_env = None
if hasattr(tool, "_pipeline"):
self._pipeline = tool._pipeline
@property
def pipeline(self):
from jip import Pipeline
if self._pipeline is None:
self._pipeline = Pipeline()
self._pipeline._utils = self
return self._pipeline
def check_file(self, name):
"""Checks for the existence of a file referenced by an options.
Please note that this doe **not** take a file name, but the name
of an option. This function is preferred over a simple check
using ``os.path.exists()`` because it also checks for job dependencies.
This is important because a mandatory file might not *yet* exist
within the context of a pipeline, but it will be created at runtime
in a previous step.
:param name: the options name
:returns: True if the file exists or the file is created by another
job that will run before this options job is executed.
:rtype: boolean
"""
opt = self.tool.options[name]
if not opt.is_dependency():
self.tool.options[name].validate()
def validation_error(self, message, *args):
"""Quickly raise a validation error with a custom message.
This function simply raises a ValidationError. You can use it
in a custom validation implementation to quickly fail the validation
:param message: the message
:param args: argument interpolated into the message
:raises ValidationError: always
"""
raise ValidationError(self.tool, message % args)
def set(self, name, value):
"""Set an options value.
:param name: the options name
:type name: string
:param value: the new value
"""
self.tool.options[name].value = value
def run(self, _name, **kwargs):
"""Searches for a tool with the specified name and adds it as a
new :py:class:`~jip.pipelines.Node` to the current pipeline.
All specified keyword argument are passed as option values to
the tool.
Delegates to the pipelines :py:meth:`jip.pipelines.Pipeline.run`
method.
:param _name: the name of the tool
:type _name: string
:param kwargs: additional argument passed to the tool as options
:returns: a new node that executes the specified tool and is added
to the current pipeline
:rtype: :py:class:`jip.pipelines.Node`
"""
return self.pipeline.run(_name, **kwargs)
def job(self, *args, **kwargs):
"""Create and returns a new :class:`~jip.pipelines.Job`.
The job instance can be used to customize the execution environment
for *the next* job. For example::
job("Test", threads=2).run('mytool', ...)
This is a typical usage in a pipeline context, where a new job
environment is created and then applied to a new 'mytool' pipeline
node.
:param args: job arguments
:param kwargs: job keyword arguments
:returns: a new job instance
:rtype: :class:`jip.pipelines.Job`
"""
return self.pipeline.job(*args, **kwargs)
def name(self, name):
"""Set the runtime name of a pipeline.
The runtime name of the pipeline is stored in the database and is
used as a general identifier for a pipeline run.
**Note** that this set the name of the *pipeline* if used in a pipeline
context, otherwise it set the name of the tool/job.
Within a pipeline context, you can be changed using a :py:func:`job`::
job("my job").run(...)
or after the node was created:
myrun = run(...)
myrun.job.name = "my job"
:param name: the name of the pipeline
:type name: string
"""
self.tool._job.name = name
def bash(self, command, **kwargs):
"""Create a *bash* job that executes a bash command.
This us a fast way to build pipelines that execute shell commands. The
functions wraps the given command string in the *bash tool* that
is defined with ``input``, ``output``, and ``outfile``. Input and
output default to stdin and stdout. Note that you can access your
local context within the command string. Take for example the following
pipeline script::
name = "Joe"
bash("echo 'Hello ${name}'")
This will work as expected. The command template can access local
variables. Please keep in mind that the tools context takes precedence
over the script context. That means that::
input="myfile.txt"
bash("wc -l ${input}")
in this example, the command ``wc -l`` will be rendered and wait for
input on stdin. The bash command has an ``input`` option and that takes
precedence before the globally defined ``input`` variable. This is true
for ``input``, ``output``, and ``outfile``, even if they are not
explicitly set.
You can however access variables defined in the global context using
the `_ctx`::
input="myfile.txt"
bash("wc -l ${_ctx.input}")
will indeed render and execute ``wc -l myfile.txt``.
:param command: the bash command to execute
:type command: string
:param kwargs: arguments passed into the context used to render the
bash command. ``input``, ``output``, and ``outfile`` are
passed as options to the *bash* tool that is used to
run the command
:returns: a new pipeline node that represents the bash job
:rtype: :class:`jip.pipelines.Node`
"""
bash_node = self.pipeline.run('bash', cmd=command, **kwargs)
return bash_node
def _update_global_env(self, env):
if not self._global_env:
self._global_env = {}
self._global_env.update(env)
def _update_context(self, ctx, kwargs=None, base_node=None):
if self._global_env:
for k, v in self._global_env.iteritems():
if k not in ctx:
ctx[k] = v
if kwargs:
ctx.update(kwargs)
## update all Nodes with their default output options
if base_node is not None:
from jip.pipelines import Node
class OptionWrapper(object):
def __init__(self, node, option):
self.node = node
self.option = option
def __str__(self):
if base_node != self.node:
base_node.depends_on(self.node)
if self.option.option_type != jip.options.TYPE_OPTION:
log.debug("Adding additional input option "
"for node %s : %s",
base_node, self.option.name)
self.node._tool.options.make_absolute(
self.node._job.working_dir
)
base_node._additional_input_options.add(
self.option
)
return str(self.option)
def __getattr__(self, name):
return OptionWrapper(
self.node, self.node._tool.options[name]
)
for k in ctx.keys():
v = ctx[k]
if isinstance(v, Node):
try:
ctx[k] = OptionWrapper(
v,
v._tool.options.get_default_output()
)
except LookupError:
# no default output option
pass
return ctx
class PythonBlock(Block):
"""Extends block and runs the content as embedded python
"""
def __init__(self, content=None, lineno=0):
Block.__init__(self, content=content, lineno=lineno)
self.interpreter = "__embedded__"
def run(self, tool, stdin=None, stdout=None):
"""Execute this block as an embedded python script
"""
log.debug("Block: run python block for: %s", tool)
#tmpl = self.render(tool)
content = self.content
if isinstance(content, (list, tuple)):
content = "\n".join(content)
local_env = locals()
utils = PythonBlockUtils(tool, local_env)
profile = jip.profiles.Profile()
if hasattr(tool, '_job'):
profile = tool._job
env = {
"tool": tool,
"args": tool.options.to_dict(),
"opts": tool.options,
"options": tool.options,
"check_file": utils.check_file,
"ensure": tool.ensure,
"run": utils.run,
"validation_error": utils.validation_error,
"bash": utils.bash,
"job": utils.job,
"name": utils.name,
"add_output": tool.options.add_output,
"add_input": tool.options.add_input,
"add_option": tool.options.add_option,
"set": utils.set,
'r': render_template,
'render_template': render_template,
'utils': utils,
'profile': profile,
'basename': basename,
'dirname': dirname,
'abspath': abspath,
'pwd': getcwd(),
'exists': exists,
'__file__': tool.path if tool.path else None
}
# link known tools into the context
from jip import scanner
from functools import partial
scanner.scan_modules()
for name, cls in scanner.registry.iteritems():
if not name in env:
env[name] = partial(utils.run, name)
for name, path in scanner.scan_files().iteritems():
k = name
if k.endswith(".jip"):
k = k[:-4]
if not k in env:
env[k] = partial(utils.run, name)
# link options to context
for o in tool.options:
if not o.name in env:
n = o.name.replace("-", "_").replace(" ", "_")
env[n] = o
utils._global_env = env
old_global_context = jip.templates.global_context
set_global_context(env)
try:
exec content in local_env, env
except Exception as e:
if hasattr(e, 'lineno'):
e.lineno += self._lineno
raise
# auto naming for tools
from jip.pipelines import Node
for k, v in env.iteritems():
if isinstance(v, Node):
if v._job.name is None:
v._job.name = k
# reset index
log.debug("Block: block for: %s executed", tool)
return env
def terminate(self):
"""The terminate function on a python block does nothing. A
Python block can not be terminated directly"""
pass
def __str__(self):
return "PythonBlock"
class Tool(object):
"""The base class for all implementation of executable units.
This class provides all the building block to integrated new tool
implementations that can be executed, submitted and integrated in pipelines
to construct more complex setups.
A `Tool` in a JIP setup is considered to be a container for the executions
meta-data, i.e. options and files that are needed to the actual run. The
main function of the `Tool` class is it :py:meth:`get_command`
function, which returns a tuple `(interpreter, command)`, where the
`interpreter` is a string like "bash" or "perl" or even a *path* to some
interpreter executable that will be used to execute the `command`. The
command itself is the string representation of the content of a script that
will be passed to the `interpreter` at execution time. Please note that
the :py:meth:`get_command` functions command part is supposed to be
fully *rendered*, it will not be modified any further. The JIP default
tool classes that are used, for example, to provide script to the system,
are already integrated with the :py:mod:`jip.templates` system, but you can
easily use the rendering function directly to create more dynamic commands
that can adopt easily to changed in the configuration of a tool.
The class exposes a name and a path to a source file as properties. Both
are optional and can be omitted in order to implement anonymous tools. In
addition to these *meta* data, the tools :py:meth:`__init__` function
allows you to provide a *options_source*. This object is used to create the
:py:class:`jip.options.Options` that cover the runtime configuration of a
tool. The options are initialize lazily on first access using the
`options_source` provided at initialization time. This object can be either
a string or an instance of an `argparse.ArgumentParser`. Both styles of
providing tool options are described in the :py:mod:`jip.options` module.
"""
def __init__(self, options_source=None, name=None):
"""Initialize a tool instance. If no options_source is given
the class docstring is used as a the options source.
:param options_source: either a string or an argparser instance
defaults to the class docstring
:param name: the name of this tool
"""
#: the tools name
self._name = name
#: path to the tools source file
self.path = None
self._options = None
self._options_source = options_source
self._job = None
self._is_pipeline = False
def setup(self):
"""Setup method that can be implemented to manipulate tool options
before rendering and validation. Note that options here might still
contain template string. You are also allowed to set option values
to template strings.
:raises Exception: in case of a critical error
"""
pass
def init(self):
"""Initialization method that can be implemented to initialize the tool
instance and, for example, add options. ``init`` is called once for
the tool instance and the logic within the ``init`` is not allowed to
rely on any values set or applied to the tool.
:raises Exception: in case of a critical error
"""
pass
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def job(self):
if self._job is None:
self._job = jip.profiles.Profile()
return self._job
def profile(self):
return self.job
@property
def options(self):
"""Access this tools :py:class:`jip.options.Options` instance.
The tools options are the main way to interact with and configure a
tool instance either from outside or from within a pipeline.
"""
if self._options is None:
if self._options_source is not None:
self._options = self._parse_options(self._options_source)
return self._options
@property
def args(self):
"""Returns a dictionary from the option names to the option values
"""
return self.options.to_dict()
def parse_args(self, args):
"""Parses the given argument. An excetion is raised if
an error ocurres during argument parsing
:param args: the argument list
:type args: list of strings
"""
self.options.parse(args)
def _parse_options(self, options_source, inputs=None, outputs=None):
"""Initialize the options from the docstring or an argparser.
In addition to the options, the function tries to deduce a tool
name if none was specified at construction time.
Optional inputs and outputs lists can be specified. Both must
be lists of strings containing option names. If the option is found
the option type is set accordingly to input or output. This is
usefull if the options are not organized in groups and the
parser can not automatically identify the options type.
:param options_source: ther a docstring or an argparser instance
:type options_source: string or argparse.ArgumentParser
:param inputs: list of option names that will be marked as inputs
:type inputs: list of strings
:param outputs: list of option names that will be marked as outputs
:type outputs: list of strings
"""
if options_source is None:
raise Exception("No docstring or argument parser provided!")
opts = None
if not isinstance(options_source, basestring):
opts = Options.from_argparse(options_source, source=self,
inputs=inputs, outputs=outputs)
else:
opts = Options.from_docopt(options_source, source=self,
inputs=inputs, outputs=outputs)
if self.name is None:
import re
match = re.match(r'usage:\s*\n*(\w+).*', opts.usage(),
re.IGNORECASE | re.MULTILINE)
if match:
self.name = match.groups()[0]
return opts
def validate(self):
"""The default implementation validates all options that belong to
this tool and checks that all options that are of `TYPE_INPUT`
reference existing files.
The method raises a :py:class:`ValidationError` in case an option could
not be validated or an input file does not exist.
"""
log.debug("Default options validation for %s", self)
try:
self.options.validate()
except Exception, e:
log.debug("Validation error: %s", str(e).strip())
raise ValidationError(self, str(e))
for opt in self.options.get_by_type(TYPE_INPUT):
if opt.source is not None and opt.source != self:
continue
if opt.is_dependency():
continue
for value in opt._value:
if isinstance(value, basestring):
if not exists(value):
raise ValidationError(self,
"Input file not found: %s" %
value)
def validation_error(self, message, *args):
"""Quickly raise a validation error with a custom message.
This function simply raises a ValidationError. You can use it
in a custom validation implementation to quickly fail the validation
:param message: the message
:param args: argument interpolated into the message
:raises ValidationError: always
"""
raise ValidationError(self, message % args)
def ensure(self, option_name, check, message=None):
"""Check a given option value using the check pattern or function and
raise a ValidationError in case the pattern does not match or the
function does return False.
In case of list values, please note that in case check is a pattern,
all values are checked independently. If check is a function, the
list is passed on as is if the option takes list values, otherwise,
the check function is called for each value independently.
Note also that you should not use this function to check for file
existence. Use the `check_file()` function on the option or on the
tool instead. `check_file` checks for incoming dependencies in
pipelines, in which case the file does not exist _yet_ but it
will be created by a parent job.
:param option_name: the name of the option to check
:param check: either a string that is interpreter as a regexp pattern
or a function that takes the options value as a single
paramter and returns True if the value is valid
"""
o = self.options[option_name]
if isinstance(check, basestring):
# regexp patter
import re
for v in o.value:
if not re.match(check, str(v)):
self.validation_error(
message if message else "check failed for %s" % str(v)
)
return
elif callable(check):
if o.nargs == 0 or o.nargs == 1:
for v in o.value:
if not check(v):
self.validation_error(
message if message
else "check failed for %s" % str(v)
)
else:
if not check(o.value):
self.validation_error(
message if message else "check failed for %s" % str(v)
)
return
raise Exception("Ensure check paramter has to be a "
"function or a pattern")
def check_file(self, option_name):
"""Delegates to the options check name function
:param option_name: the name of the option
"""
try:
self.options[option_name].check_file()
except ValueError as e:
self.validation_error(str(e))
def is_done(self):
"""The default implementation return true if the tools has output
files and all output files exist.
"""
outfiles = set(self.get_output_files())
if len(outfiles) == 0:
return False
for outfile in outfiles:
if not exists(outfile):
return False
return True
def pipeline(self):
"""Create and return the pipeline that will run this tool"""
return None
def get_command(self):
"""Return a tuple of (template, interpreter) where the template is
a string that will be rendered and the interpreter is a name of
an interpreter that will be used to run the filled template.
"""
return "bash", _pickel_template % \
(cPickle.dumps(self).encode("base64"))
def cleanup(self):
"""The celanup method removes all output files for this tool"""
outfiles = list(self.get_output_files(sticky=False))
log.debug("Tool cleanup check files: %s", outfiles)
for outfile in outfiles:
if exists(outfile):
log.warning("Tool cleanup! Removing: %s", outfile)
if os.path.isfile(outfile):
remove(outfile)
elif os.path.isdir(outfile):
shutil.rmtree(outfile)
def get_output_files(self, sticky=True):
"""Yields a list of all output files for the options
of this tool. Only TYPE_OUTPUT options are considered
whose values are strings. If a source for the option
is not None, it has to be equal to this tool.
If `sticky` is set to False, all options marked with the
sticky flag are ignored
:param sticky: by default all output option values are returned,
if this is set to False, only non-sticky output
options are yield
:type sticky: boolean
:returns: list of file names
"""
for opt in self.options.get_by_type(TYPE_OUTPUT):
if (opt.source and opt.source != self) or \
(not sticky and opt.sticky):
continue
values = opt.value
if not isinstance(values, (list, tuple)):
values = [values]
for value in values:
if isinstance(value, basestring):
import glob
globbed = glob.glob(value)
if globbed:
for v in globbed:
yield v
else:
yield value
def get_input_files(self):
"""Yields a list of all input files for the options
of this tool. Only TYPE_INPUT options are considered
whose values are strings. If a source for the option
is not None, it has to be equal to this tool.
:returns: list of file names
"""
for opt in self.options.get_by_type(TYPE_INPUT):
if opt.source and opt.source != self:
continue
values = opt.raw()
if not isinstance(values, (list, tuple)):
values = [values]
for value in values:
if isinstance(value, basestring):
yield value
def help(self):
"""Return help for this tool. By default this delegates
to the options help.
"""
return dedent(self.options.help())
def __repr__(self):
return self.name if self.name else "<Unknown>"
def __str__(self):
return self.__repr__()
def clone(self, counter=None):
"""Clones this instance of the tool and returns the clone. If the
optional counter is profiled, the name of the cloned tool will be
updated using .counter as a suffix.
"""
cloned_tool = copy.copy(self)
cloned_tool._options = self.options.copy()
if cloned_tool.name and counter is not None:
cloned_tool.name = "%s.%d" % (cloned_tool.name, str(counter))
cloned_tool._options._help = self.options._help
cloned_tool._options._usage = self.options._usage
# update the options source
cloned_tool._options.source = cloned_tool
for o in cloned_tool._options:
o.source = cloned_tool
log.debug("Tool | cloned instance %s [%s->%s]",
self, self.__hash__(), cloned_tool.__hash__())
return cloned_tool
class PythonTool(Tool):
"""An extension of the tool class that is initialized
with a decorated class to simplify the process of implementing
Tools in python.
"""
def __init__(self, cls, decorator, add_outputs=None):
"""Initialize a new python tool
:param cls: the wrapped class
:type cls: class
:param decorator: an instance of the :class:`jip.tool` decorator
:type decorator: jip.tool
:param add_outputs: list of additional names that will be added
to the list of output options
"""
Tool.__init__(self)
self.decorator = decorator
self.cls = cls
self.name = decorator.name
try:
if not isinstance(cls, types.FunctionType):
self.instance = cls()
else:
self.instance = cls
except:
self.instance = cls
try:
self.path = inspect.getsourcefile(cls)
except:
log.debug("Unable to find source file for %s", self.name)
################################################################
# Load options either through a argparser function that was
# specified by name in the decorator or load them from the
# docstring of the instance
################################################################
self._options_source = None
self._add_outputs = add_outputs
self._is_pipeline = decorator._force_pipeline
def clone(self, counter=None):
cloned_tool = Tool.clone(self, counter=counter)
try:
if not isinstance(self.cls, types.FunctionType):
cloned_tool.instance = self.cls()
else:
cloned_tool.instance = self.cls
except:
cloned_tool.instance = self.cls
return cloned_tool
@property
def options(self):
if self._options is not None:
return self._options
if self.decorator.argparse and hasattr(self.instance,
self.decorator.argparse):
#initialize the options from argparse
import argparse
class PrintDefaultsFormatter(argparse.HelpFormatter):
def _get_help_string(self, action):
help = action.help
if '%(default)' not in action.help and \
'(default: ' not in action.help:
if action.default is not argparse.SUPPRESS:
defaulting_nargs = [argparse.OPTIONAL,
argparse.ZERO_OR_MORE]
if action.option_strings or \
action.nargs in defaulting_nargs:
if isinstance(action.default, file):
if action.default == sys.stdout:
help += ' (default: stdout)'
elif action.default == sys.stdin:
help += ' (default: stdin)'
elif action.default == sys.stderr:
help += ' (default: stderr)'
else:
help += ' (default: <stream>)'
else:
help += ' (default: %(default)s)'
return help
self._options_source = argparse.ArgumentParser(
prog=self.name,
formatter_class=PrintDefaultsFormatter
)
init_parser = getattr(self.instance, self.decorator.argparse)
init_parser(self._options_source)
else:
# initialize options from doc string
import textwrap
if self.instance.__doc__ is not None:
self._options_source = textwrap.dedent(self.instance.__doc__)
else:
self._options_source = ""
# create the options
self._options = self._parse_options(self._options_source,
inputs=self.decorator.inputs,
outputs=self.decorator.outputs)
## add additional output arguments
if self._add_outputs is not None:
for arg in self._add_outputs:
if isinstance(arg, (list, tuple)):
# get default value
arg = arg[0]
self._options.add(Option(
arg,
option_type=TYPE_OUTPUT,
nargs=1,
hidden=True
))
return self._options
def run(self):
self.instance.options = self.options
self.instance.tool_instance = self
if isinstance(self.instance, types.FunctionType):
# check if the function takes a parameter
argspec = inspect.getargspec(self.instance)
if len(argspec[0]) > 0:
self.instance(self)
else:
self.instance()
else:
self.decorator.run(self, self.instance)
def validate(self):
r = self.decorator.validate(self, self.instance)
Tool.validate(self)
return r
def setup(self):
return self.decorator.setup(self, self.instance)
def init(self):
if self._add_outputs is not None:
for arg in self._add_outputs:
if isinstance(arg, (list, tuple)):
value = arg[1]
arg = arg[0]
if callable(value):
try:
value = value(self)
except Exception as err:
log.debug("Error evaluating output value: %s",
str(err), exc_info=True)
self.options[arg].set(value)
return self.decorator.init(self, self.instance)
def is_done(self):
return self.decorator.is_done(self, self.instance)
def pipeline(self):
if self.decorator._force_pipeline and isinstance(self.instance,
types.FunctionType):
# force pipeline generation. Call the instance function
# and check if the retrned value is a pipeline or a string
# strings go into a pipeline block for evaluation, pipelines
# are returned unmodified
# check if the function takes a paramter
argspec = inspect.getargspec(self.instance)
r = None
if len(argspec[0]) > 0:
r = self.instance(self)
else:
r = self.instance()
if isinstance(r, basestring):
# create a pipeline block and evaluate it
block = PythonBlock(r)
e = block.run(self)
return e['utils']._pipeline
else:
return r
return self.decorator.pipeline(self, self.instance)
def help(self):
return self.decorator.help(self, self.instance)
def cleanup(self):
return self.decorator.cleanup(self, self.instance)
def get_command(self):
return self.decorator.get_command(self, self.instance)
class ScriptTool(Tool):
"""An extension of the tool class that is initialized
with a docstring and operates on Blocks that can be loade
form a script file or from string.
If specified as initializer parameters, both the validation and the
pipeline block will be handled with special care.
Pipeline blocks currently can only be embedded python block. Therefore
the interpreter has to be 'python'. Validation blocks where the
interpreter is 'python' will be converted to embedded python blocks. This
allows the validation process to modify the tool and its arguments during
validation.
"""
def __init__(self, docstring, command_block=None, setup_block=None,
init_block=None, validation_block=None, pipeline_block=None):
Tool.__init__(self, docstring)
self.command_block = command_block
self.validation_block = validation_block
self.pipeline_block = pipeline_block
self.setup_block = setup_block
self.init_block = init_block
if self.pipeline_block:
if self.pipeline_block.interpreter is not None and \
self.pipeline_block.interpreter != 'python':
raise Exception("Pipeline blocks have to be implemented in "
"python! Sorry about that, but its really a "
"nice language :)")
self.pipeline_block = PythonBlock(
lineno=self.pipeline_block._lineno,
content=self.pipeline_block.content
)
if self.validation_block and \
(self.validation_block.interpreter is None or
self.validation_block.interpreter == 'python'):
self.validation_block = PythonBlock(
lineno=self.validation_block._lineno,
content=self.validation_block.content
)
if self.setup_block:
self.setup_block = PythonBlock(
lineno=self.setup_block._lineno,
content=self.setup_block.content
)
if self.init_block:
self.init_block = PythonBlock(
lineno=self.init_block._lineno,
content=self.init_block.content
)
if not self.command_block and not self.pipeline_block:
raise Exception("No executable or pipeline block found!")
self._is_pipeline = self.pipeline_block is not None
def pipeline(self):
if self.pipeline_block:
r = self.pipeline_block.run(self)
return r['utils'].pipeline
return Tool.pipeline(self)
def run(self):
if self.command_block:
self.command_block.run(self)
def validate(self):
if self.validation_block:
self.validation_block.run(self)
Tool.validate(self)
def init(self):
if self.init_block:
self.init_block.run(self)
Tool.init(self)
def setup(self):
if self.setup_block:
self.setup_block.run(self)
Tool.setup(self)
def get_command(self):
if self.command_block:
return self.command_block.interpreter, \
self.command_block.render(self)
return None, None
@classmethod
def from_string(cls, content):
from jip.parser import load
return load(content, script_class=cls)
@classmethod
def from_file(cls, path, is_pipeline=False):
log.debug("Load script from file: %s", path)
from jip.parser import loads
s = loads(path, script_class=cls, is_pipeline=is_pipeline)
return s
|
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import base64
import codecs
import hashlib
import json
import os.path
import multiprocessing
import random
import re
import shutil
import subprocess
import sys
import stat
import tempfile
import urllib.request
from shutil import copyfile
from urllib.parse import urlparse
def downstream_projects():
return {
"BUILD_file_generator": {
"git_repository": "https://github.com/bazelbuild/BUILD_file_generator.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/BUILD_file_generator-postsubmit.json"
},
"bazel-toolchains": {
"git_repository": "https://github.com/bazelbuild/bazel-toolchains.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/bazel-toolchains-postsubmit.json"
},
"buildtools": {
"git_repository": "https://github.com/bazelbuild/buildtools.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/buildtools-postsubmit.json"
},
"CLion Plugin": {
"git_repository": "https://github.com/bazelbuild/intellij.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/clion-postsubmit.json"
},
"Eclipse Plugin": {
"git_repository": "https://github.com/bazelbuild/eclipse.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/eclipse-postsubmit.json"
},
"Gerrit": {
"git_repository": "https://gerrit.googlesource.com/gerrit.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/gerrit-postsubmit.json"
},
"Google Logging": {
"git_repository": "https://github.com/google/glog.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/glog-postsubmit.json"
},
"IntelliJ Plugin": {
"git_repository": "https://github.com/bazelbuild/intellij.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/intellij-postsubmit.json"
},
"migration-tooling": {
"git_repository": "https://github.com/bazelbuild/migration-tooling.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/migration-tooling-postsubmit.json"
},
"protobuf": {
"git_repository": "https://github.com/google/protobuf.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/protobuf-postsubmit.json"
},
"re2": {
"git_repository": "https://github.com/google/re2.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/re2-postsubmit.json"
},
"rules_appengine": {
"git_repository": "https://github.com/bazelbuild/rules_appengine.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/rules_appengine-postsubmit.json"
},
"rules_closure": {
"git_repository": "https://github.com/bazelbuild/rules_closure.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/rules_closure-postsubmit.json"
},
"rules_d": {
"git_repository": "https://github.com/bazelbuild/rules_d.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/rules_d-postsubmit.json"
},
"rules_go": {
"git_repository": "https://github.com/bazelbuild/rules_go.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/rules_go-postsubmit.json"
},
"rules_groovy": {
"git_repository": "https://github.com/bazelbuild/rules_groovy.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/rules_groovy-postsubmit.json"
},
"rules_gwt": {
"git_repository": "https://github.com/bazelbuild/rules_gwt.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/rules_gwt-postsubmit.json"
},
"rules_jsonnet": {
"git_repository": "https://github.com/bazelbuild/rules_jsonnet.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/rules_jsonnet-postsubmit.json"
},
"rules_k8s": {
"git_repository": "https://github.com/bazelbuild/rules_k8s.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/rules_k8s-postsubmit.json"
},
"rules_nodejs": {
"git_repository": "https://github.com/bazelbuild/rules_nodejs.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/rules_nodejs-postsubmit.json"
},
"rules_perl": {
"git_repository": "https://github.com/bazelbuild/rules_perl.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/rules_perl-postsubmit.json"
},
"rules_python": {
"git_repository": "https://github.com/bazelbuild/rules_python.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/rules_python-postsubmit.json"
},
"rules_rust": {
"git_repository": "https://github.com/bazelbuild/rules_rust.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/rules_rust-postsubmit.json"
},
"rules_sass": {
"git_repository": "https://github.com/bazelbuild/rules_sass.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/rules_sass-postsubmit.json"
},
"rules_scala": {
"git_repository": "https://github.com/bazelbuild/rules_scala.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/rules_scala-postsubmit.json"
},
"rules_typescript": {
"git_repository": "https://github.com/bazelbuild/rules_typescript.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/rules_typescript-postsubmit.json"
},
# Enable once is resolved: https://github.com/bazelbuild/continuous-integration/issues/191
# "rules_webtesting": {
# "git_repository": "https://github.com/bazelbuild/rules_webtesting.git",
# "http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/rules_webtesting-postsubmit.json"
# },
"skydoc": {
"git_repository": "https://github.com/bazelbuild/skydoc.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/skydoc-postsubmit.json"
},
"subpar": {
"git_repository": "https://github.com/google/subpar.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/subpar-postsubmit.json"
},
"TensorFlow": {
"git_repository": "https://github.com/tensorflow/tensorflow.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/tensorflow-postsubmit.json"
},
"TensorFlow Serving": {
"git_repository": "https://github.com/tensorflow/serving.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/tensorflow-serving-postsubmit.json"
}
}
def python_binary(platform=None):
if platform == "windows":
return "python.exe"
return "python3.6"
def bazelcipy_url():
"""
URL to the latest version of this script.
"""
return "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/bazelci.py"
def eprint(*args, **kwargs):
"""
Print to stderr and exit the process.
"""
print(*args, file=sys.stderr, **kwargs)
exit(1)
def platforms_info():
"""
Returns a map containing all supported platform names as keys, with the
values being the platform name in a human readable format, and a the
buildkite-agent's working directory.
"""
return {
"ubuntu1404": {
"name": "Ubuntu 14.04",
"agent-directory": "/var/lib/buildkite-agent/builds/${BUILDKITE_AGENT_NAME}"
},
"ubuntu1604": {
"name": "Ubuntu 16.04",
"agent-directory": "/var/lib/buildkite-agent/builds/${BUILDKITE_AGENT_NAME}"
},
"macos": {
"name": "macOS",
"agent-directory": "/usr/local/var/buildkite-agent/builds/${BUILDKITE_AGENT_NAME}"
},
"windows": {
"name": "Windows",
"agent-directory": "d:/build/${BUILDKITE_AGENT_NAME}",
}
}
def flaky_test_meme_url():
urls = ["https://storage.googleapis.com/bazel-buildkite-memes/flaky_tests_1.jpg",
"https://storage.googleapis.com/bazel-buildkite-memes/flaky_tests_1.jpg"]
return random.choice(urls)
def downstream_projects_root(platform):
downstream_projects_dir = os.path.expandvars(
"${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects")
path = os.path.join(agent_directory(platform), downstream_projects_dir)
if not os.path.exists(path):
os.makedirs(path)
return path
def agent_directory(platform):
return os.path.expandvars(platforms_info()[platform]["agent-directory"])
def supported_platforms():
return set(platforms_info().keys())
def platform_name(platform):
return platforms_info()[platform]["name"]
def fetch_configs(http_url):
"""
If specified fetches the build configuration from http_url, else tries to
read it from .bazelci/config.json.
Returns the json configuration as a python data structure.
"""
if http_url is None:
with open(".bazelci/config.json", "r") as fd:
return json.load(fd)
with urllib.request.urlopen(http_url) as resp:
reader = codecs.getreader("utf-8")
return json.load(reader(resp))
def print_collapsed_group(name):
print("\n--- {0}\n".format(name))
def print_expanded_group(name):
print("\n+++ {0}\n".format(name))
def execute_commands(config, platform, git_repository, use_but, save_but,
build_only, test_only):
exit_code = -1
tmpdir = None
bazel_binary = "bazel"
try:
if git_repository:
clone_git_repository(git_repository, platform)
cleanup(platform)
tmpdir = tempfile.mkdtemp()
if use_but:
print_collapsed_group("Downloading Bazel under test")
bazel_binary = download_bazel_binary(tmpdir, platform)
print_bazel_version_info(bazel_binary)
execute_shell_commands(config.get("shell_commands", None))
execute_bazel_run(bazel_binary, config.get("run_targets", None))
if not test_only:
execute_bazel_build(bazel_binary, config.get("build_flags", []),
config.get("build_targets", None))
if save_but:
upload_bazel_binary()
if not build_only:
bep_file = os.path.join(tmpdir, "build_event_json_file.json")
exit_code = execute_bazel_test(bazel_binary, config.get("test_flags", []),
config.get("test_targets", None), bep_file)
print_test_summary(bep_file)
if has_flaky_tests(bep_file) and exit_code == 0:
# Fail the pipeline if there were any flaky tests.
exit_code = 1
upload_test_logs(bep_file, tmpdir)
finally:
if tmpdir:
shutil.rmtree(tmpdir)
cleanup(platform)
if exit_code > -1:
exit(exit_code)
def show_image(url, alt):
print("\033]1338;url='\"{0}\"';alt='\"{1}\"'\a\n".format(url, alt))
def print_test_summary(bep_file):
failed = test_logs_for_status(bep_file, status="FAILED")
if failed:
print_expanded_group("Failed Tests")
for label, _ in failed:
print(label)
timed_out = test_logs_for_status(bep_file, status="TIMEOUT")
if failed:
print_expanded_group("Timed out Tests")
for label, _ in timed_out:
print(label)
flaky = test_logs_for_status(bep_file, status="FLAKY")
if flaky:
print_expanded_group("Flaky Tests")
show_image(flaky_test_meme_url(), "Flaky Tests")
for label, _ in flaky:
print(label)
def has_flaky_tests(bep_file):
return len(test_logs_for_status(bep_file, status="FLAKY")) > 0
def print_bazel_version_info(bazel_binary):
print_collapsed_group("Bazel Info")
execute_command([bazel_binary, "version"])
execute_command([bazel_binary, "info"])
def upload_bazel_binary():
print_collapsed_group("Uploading Bazel under test")
execute_command(["buildkite-agent", "artifact", "upload", "bazel-bin/src/bazel"])
def download_bazel_binary(dest_dir, platform):
source_step = create_label(platform_name(platform), "Bazel", build_only=True,
test_only=False)
execute_command(["buildkite-agent", "artifact", "download",
"bazel-bin/src/bazel", dest_dir, "--step", source_step])
bazel_binary_path = os.path.join(dest_dir, "bazel-bin/src/bazel")
st = os.stat(bazel_binary_path)
os.chmod(bazel_binary_path, st.st_mode | stat.S_IEXEC)
return bazel_binary_path
def clone_git_repository(git_repository, platform):
root = downstream_projects_root(platform)
project_name = re.search("/([^/]+)\.git$", git_repository).group(1)
clone_path = os.path.join(root, project_name)
print_collapsed_group("Fetching " + project_name + " sources")
if os.path.exists(clone_path):
os.chdir(clone_path)
execute_command(["git", "remote", "set-url", "origin", git_repository])
execute_command(["git", "clean", "-fdqx"])
execute_command(["git", "submodule", "foreach", "--recursive", "git", "clean", "-fdqx"])
# sync to the latest commit of HEAD. Unlikely git pull this also works after
# a force push.
execute_command(["git", "fetch", "origin"])
remote_head = subprocess.check_output(["git", "symbolic-ref", "refs/remotes/origin/HEAD"])
remote_head = remote_head.decode("utf-8")
remote_head = remote_head.rstrip()
execute_command(["git", "reset", remote_head, "--hard"])
execute_command(["git", "submodule", "sync", "--recursive"])
execute_command(["git", "submodule", "update", "--init", "--recursive", "--force"])
execute_command(["git", "submodule", "foreach", "--recursive", "git", "reset", "--hard"])
execute_command(["git", "clean", "-fdqx"])
execute_command(["git", "submodule", "foreach", "--recursive", "git", "clean", "-fdqx"])
else:
execute_command(["git", "clone", "--recurse-submodules", git_repository, clone_path])
os.chdir(clone_path)
def cleanup(platform):
print_collapsed_group("Cleanup")
if os.path.exists("WORKSPACE"):
execute_command(["bazel", "clean", "--expunge"])
def execute_shell_commands(commands):
if not commands:
return
print_collapsed_group("Setup (Shell Commands)")
shell_command = "\n".join(commands)
execute_command([shell_command], shell=True)
def execute_bazel_run(bazel_binary, targets):
if not targets:
return
print_collapsed_group("Setup (Run Targets)")
for target in targets:
execute_command([bazel_binary, "run", "--curses=yes",
"--color=yes", "--verbose_failures", target])
def execute_bazel_build(bazel_binary, flags, targets):
if not targets:
return
print_expanded_group("Build")
num_jobs = str(multiprocessing.cpu_count())
common_flags = ["--curses=yes", "--color=yes", "--keep_going",
"--verbose_failures", "--jobs=" + num_jobs]
execute_command([bazel_binary, "build"] + common_flags + flags + targets)
def execute_bazel_test(bazel_binary, flags, targets, bep_file):
if not targets:
return 0
print_expanded_group("Test")
num_jobs = str(multiprocessing.cpu_count())
common_flags = ["--curses=yes", "--color=yes", "--keep_going", "--verbose_failures",
"--flaky_test_attempts=3", "--build_tests_only",
"--jobs=" + num_jobs, "--local_test_jobs=" + num_jobs,
"--build_event_json_file=" + bep_file]
return execute_command([bazel_binary, "test"] + common_flags + flags + targets, fail_if_nonzero=False)
def upload_test_logs(bep_file, tmpdir):
if not os.path.exists(bep_file):
return
test_logs = test_logs_to_upload(bep_file, tmpdir)
if test_logs:
cwd = os.getcwd()
try:
os.chdir(tmpdir)
print_collapsed_group("Uploading test logs")
execute_command(["buildkite-agent", "artifact", "upload", "*/**/*.log"])
finally:
os.chdir(cwd)
def test_logs_to_upload(bep_file, tmpdir):
failed = test_logs_for_status(bep_file, status="FAILED")
timed_out = test_logs_for_status(bep_file, status="TIMEOUT")
flaky = test_logs_for_status(bep_file, status="FLAKY")
# Rename the test.log files to the target that created them
# so that it's easy to associate test.log and target.
new_paths = []
for label, test_logs in (failed + timed_out + flaky):
attempt = 0
if len(test_logs) > 1:
attempt = 1
for test_log in test_logs:
new_path = test_label_to_path(tmpdir, label, attempt)
os.makedirs(os.path.dirname(new_path), exist_ok=True)
copyfile(test_log, new_path)
new_paths.append(new_path)
attempt = attempt + 1
return new_paths
def test_label_to_path(tmpdir, label, attempt):
# remove leading //
path = label[2:]
path = path.replace(":", "/")
if attempt == 0:
path = os.path.join(path, "test.log")
else:
path = os.path.join(path, "attempt_" + str(attempt) + ".log")
return os.path.join(tmpdir, path)
def test_logs_for_status(bep_file, status):
targets = []
raw_data = ""
with open(bep_file) as f:
raw_data = f.read()
decoder = json.JSONDecoder()
pos = 0
while pos < len(raw_data):
bep_obj, size = decoder.raw_decode(raw_data[pos:])
if "testSummary" in bep_obj:
test_target = bep_obj["id"]["testSummary"]["label"]
test_status = bep_obj["testSummary"]["overallStatus"]
if test_status == status:
outputs = bep_obj["testSummary"]["failed"]
test_logs = []
for output in outputs:
test_logs.append(urlparse(output["uri"]).path)
targets.append((test_target, test_logs))
pos += size + 1
return targets
def execute_command(args, shell=False, fail_if_nonzero=True):
print(" ".join(args))
res = subprocess.run(args, shell=shell, check=fail_if_nonzero)
return res.returncode
def print_project_pipeline(platform_configs, project_name, http_config,
git_repository, use_but):
pipeline_steps = []
for platform, config in platform_configs.items():
step = runner_step(platform, project_name, http_config, git_repository,
use_but)
pipeline_steps.append(step)
print_pipeline(pipeline_steps)
def runner_step(platform, project_name=None, http_config=None,
git_repository=None, use_but=False, save_but=False, build_only=False,
test_only=False):
command = python_binary(platform) + " bazelci.py runner --platform=" + platform
if http_config:
command = command + " --http_config=" + http_config
if git_repository:
command = command + " --git_repository=" + git_repository
if use_but:
command = command + " --use_but"
if save_but:
command = command + " --save_but"
if build_only:
command = command + " --build_only"
if test_only:
command = command + " --test_only"
label = create_label(platform_name(platform),
project_name, build_only, test_only)
return """
- label: \"{0}\"
command: \"{1}\\n{2}\"
agents:
- \"os={3}\"""".format(label, fetch_bazelcipy_command(), command, platform)
def print_pipeline(steps):
print("steps:")
for step in steps:
print(step)
def wait_step():
return """
- wait"""
def http_config_flag(http_config):
if http_config is not None:
return "--http_config=" + http_config
return ""
def fetch_bazelcipy_command():
return "curl -s {0} -o bazelci.py".format(bazelcipy_url())
def upload_project_pipeline_step(project_name, git_repository, http_config):
pipeline_command = ("{0} bazelci.py project_pipeline --project_name=\\\"{1}\\\" " +
"--use_but --git_repository={2}").format(python_binary(), project_name,
git_repository)
if http_config:
pipeline_command = pipeline_command + " --http_config=" + http_config
pipeline_command = pipeline_command + " | buildkite-agent pipeline upload"
return """
- label: \"Setup {0}\"
command: \"{1}\\n{2}\"
agents:
- \"pipeline=true\"""".format(project_name, fetch_bazelcipy_command(),
pipeline_command)
def create_label(platform_name, project_name=None, build_only=False,
test_only=False):
label = ""
if build_only:
label = "Build "
if test_only:
label = "Test "
if project_name:
label = label + "{0} ({1})".format(project_name, platform_name)
else:
label = label + platform_name
return label
def bazel_build_step(platform, project_name, http_config=None,
build_only=False, test_only=False):
pipeline_command = python_binary(platform) + " bazelci.py runner"
if build_only:
pipeline_command = pipeline_command + " --build_only --save_but"
if test_only:
pipeline_command = pipeline_command + " --test_only"
if http_config:
pipeline_command = pipeline_command + " --http_config=" + http_config
label = create_label(platform_name(platform), project_name, build_only=build_only,
test_only=test_only)
pipeline_command = pipeline_command + " --platform=" + platform
return """
- label: \"{0}\"
command: \"{1}\\n{2}\"
agents:
- \"os={3}\"""".format(label, fetch_bazelcipy_command(),
pipeline_command, platform)
def publish_bazel_binaries_step():
command = python_binary() + " bazelci.py publish_binaries"
return """
- label: \"Publish Bazel Binaries\"
command: \"{0}\\n{1}\"
agents:
- \"pipeline=true\"""".format(fetch_bazelcipy_command(), command)
def print_bazel_postsubmit_pipeline(configs, http_config):
if not configs:
eprint("Bazel postsubmit pipeline configuration is empty.")
if set(configs.keys()) != set(supported_platforms()):
eprint("Bazel postsubmit pipeline needs to build Bazel on all " +
"supported platforms.")
pipeline_steps = []
for platform, config in configs.items():
pipeline_steps.append(bazel_build_step(platform, "Bazel",
http_config, build_only=True))
pipeline_steps.append(wait_step())
# todo move this to the end with a wait step.
pipeline_steps.append(publish_bazel_binaries_step())
for platform, config in configs.items():
pipeline_steps.append(bazel_build_step(platform, "Bazel",
http_config, test_only=True))
for project, config in downstream_projects().items():
git_repository = config["git_repository"]
http_config = config.get("http_config", None)
pipeline_steps.append(upload_project_pipeline_step(project,
git_repository, http_config))
print_pipeline(pipeline_steps)
def bazelci_builds_download_url(platform, build_number):
return "https://storage.googleapis.com/bazel-builds/artifacts/{0}/{1}/bazel".format(platform, build_number)
def bazelci_builds_upload_url(platform, build_number):
return "gs://bazel-builds/artifacts/{0}/{1}/bazel".format(platform, build_number)
def bazelci_builds_metadata_url():
return "gs://bazel-builds/metadata/latest_fully_tested.json"
def latest_generation_and_build_number():
output = None
attempt = 0
while attempt < 5:
output = subprocess.check_output(
["gsutil", "stat", bazelci_builds_metadata_url()])
match = re.search("Generation:[ ]*([0-9]+)", output.decode("utf-8"))
if not match:
eprint("Couldn't parse generation. gsutil output format changed?")
generation = match.group(1)
match = re.search("Hash \(md5\):[ ]*([^\s]+)", output.decode("utf-8"))
if not match:
eprint("Couldn't parse md5 hash. gsutil output format changed?")
expected_md5hash = base64.b64decode(match.group(1))
output = subprocess.check_output(
["gsutil", "cat", bazelci_builds_metadata_url()])
hasher = hashlib.md5()
hasher.update(output)
actual_md5hash = hasher.digest()
if expected_md5hash == actual_md5hash:
break
attempt = attempt + 1
info = json.loads(output.decode("utf-8"))
return (generation, info["build_number"])
def sha256_hexdigest(filename):
sha256 = hashlib.sha256()
with open(filename, 'rb') as f:
for block in iter(lambda: f.read(65536), b''):
sha256.update(block)
return sha256.hexdigest()
def try_publish_binaries(build_number, expected_generation):
tmpdir = tempfile.mkdtemp()
try:
info = {
"build_number": build_number,
"git_commit": os.environ["BUILDKITE_COMMIT"],
"platforms": {}
}
for platform in supported_platforms():
bazel_binary_path = download_bazel_binary(tmpdir, platform)
execute_command(["gsutil", "cp", "-a", "public-read", bazel_binary_path,
bazelci_builds_upload_url(platform, build_number)])
info["platforms"][platform] = {
"url": bazelci_builds_download_url(platform, build_number),
"sha256": sha256_hexdigest(bazel_binary_path),
}
info_file = os.path.join(tmpdir, "info.json")
with open(info_file, mode="w", encoding="utf-8") as fp:
json.dump(info, fp)
exitcode = execute_command(["gsutil", "-h", "x-goog-if-generation-match:" + expected_generation,
"-h", "Content-Type:application/json", "cp", "-a",
"public-read", info_file, bazelci_builds_metadata_url()])
return exitcode == 0
finally:
shutil.rmtree(tmpdir)
def publish_binaries():
"""
Publish Bazel binaries to GCS.
"""
attempt = 0
while attempt < 5:
latest_generation, latest_build_number = latest_generation_and_build_number()
current_build_number = os.environ.get("BUILDKITE_BUILD_NUMBER", None)
if not current_build_number:
eprint("Not running inside Buildkite")
current_build_number = int(current_build_number)
if current_build_number <= latest_build_number:
print(("Current build '{0}' is not newer than latest published '{1}'. " +
"Skipping publishing of binaries.").format(current_build_number,
latest_build_number))
break
if try_publish_binaries(current_build_number, latest_generation):
print("Successfully updated '{0}' to binaries from build {1}."
.format(bazelci_builds_metadata_url(), current_build_number))
break
attempt = attempt + 1
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Bazel Continuous Integration Script')
subparsers = parser.add_subparsers(dest="subparsers_name")
bazel_postsubmit_pipeline = subparsers.add_parser("bazel_postsubmit_pipeline")
bazel_postsubmit_pipeline.add_argument("--http_config", type=str)
bazel_postsubmit_pipeline.add_argument("--git_repository", type=str)
project_pipeline = subparsers.add_parser("project_pipeline")
project_pipeline.add_argument("--project_name", type=str)
project_pipeline.add_argument("--http_config", type=str)
project_pipeline.add_argument("--git_repository", type=str)
project_pipeline.add_argument("--use_but", type=bool, nargs="?", const=True)
runner = subparsers.add_parser("runner")
runner.add_argument("--platform", action="store", choices=list(supported_platforms()))
runner.add_argument("--http_config", type=str)
runner.add_argument("--git_repository", type=str)
runner.add_argument("--use_but", type=bool, nargs="?", const=True)
runner.add_argument("--save_but", type=bool, nargs="?", const=True)
runner.add_argument("--build_only", type=bool, nargs="?", const=True)
runner.add_argument("--test_only", type=bool, nargs="?", const=True)
runner = subparsers.add_parser("publish_binaries")
args = parser.parse_args()
if args.subparsers_name == "bazel_postsubmit_pipeline":
configs = fetch_configs(args.http_config)
print_bazel_postsubmit_pipeline(configs.get("platforms", None),
args.http_config)
elif args.subparsers_name == "project_pipeline":
configs = fetch_configs(args.http_config)
print_project_pipeline(configs.get("platforms", None), args.project_name,
args.http_config, args.git_repository, args.use_but)
elif args.subparsers_name == "runner":
configs = fetch_configs(args.http_config)
execute_commands(configs.get("platforms", None)[args.platform],
args.platform, args.git_repository, args.use_but, args.save_but,
args.build_only, args.test_only)
elif args.subparsers_name == "publish_binaries":
publish_binaries()
else:
parser.print_help()
|
'''
Create raw data pickle file
data_raw is a dict mapping image_filename -> [{'class': class_int, 'box_coords': (x1, y1, x2, y2)}, {...}, ...]
'''
import numpy as np
import pickle
import re
import os
from PIL import Image
# Script config
RESIZE_IMAGE = True # resize the images and write to 'resized_images/'
GRAYSCALE = True # convert image to grayscale? this option is only valid if RESIZE_IMAGE==True (FIXME)
TARGET_W, TARGET_H = 400, 260 # 1.74 is weighted avg ratio, but 1.65 aspect ratio is close enough (1.65 was for stop signs)
###########################
# Execute main script
###########################
# First get mapping from sign name string to integer label
sign_map = {'stop': 1, 'pedestrianCrossing': 2} # only 2 sign classes (background class is 0)
'''
sign_map = {} # sign_name -> integer_label
with open('signnames.csv', 'r') as f:
for line in f:
line = line[:-1] # strip newline at the end
integer_label, sign_name = line.split(',')
sign_map[sign_name] = int(integer_label)
'''
# Create raw data pickle file
data_raw = {}
# For speed, put entire contents of mergedAnnotations.csv in memory
merged_annotations = []
with open('mergedAnnotations.csv', 'r') as f:
for line in f:
line = line[:-1] # strip trailing newline
merged_annotations.append(line)
# Create pickle file to represent dataset
image_files = os.listdir('annotations')
for image_file in image_files:
# Find box coordinates for all signs in this image
class_list = []
box_coords_list = []
for line in merged_annotations:
if re.search(image_file, line):
fields = line.split(';')
# Get sign name and assign class label
sign_name = fields[1]
if sign_name != 'stop' and sign_name != 'pedestrianCrossing':
continue # ignore signs that are neither stop nor pedestrianCrossing signs
sign_class = sign_map[sign_name]
class_list.append(sign_class)
# Resize image, get rescaled box coordinates
box_coords = np.array([int(x) for x in fields[2:6]])
if RESIZE_IMAGE:
# Resize the images and write to 'resized_images/'
image = Image.open('annotations/' + image_file)
orig_w, orig_h = image.size
if GRAYSCALE:
image = image.convert('L') # 8-bit grayscale
image = image.resize((TARGET_W, TARGET_H), Image.LANCZOS) # high-quality downsampling filter
resized_dir = 'resized_images_%dx%d/' % (TARGET_W, TARGET_H)
if not os.path.exists(resized_dir):
os.makedirs(resized_dir)
image.save(os.path.join(resized_dir, image_file))
# Rescale box coordinates
x_scale = TARGET_W / orig_w
y_scale = TARGET_H / orig_h
ulc_x, ulc_y, lrc_x, lrc_y = box_coords
new_box_coords = (ulc_x * x_scale, ulc_y * y_scale, lrc_x * x_scale, lrc_y * y_scale)
new_box_coords = [round(x) for x in new_box_coords]
box_coords = np.array(new_box_coords)
box_coords_list.append(box_coords)
if len(class_list) == 0:
continue # ignore images with no signs-of-interest
class_list = np.array(class_list)
box_coords_list = np.array(box_coords_list)
# Create the list of dicts
the_list = []
for i in range(len(box_coords_list)):
d = {'class': class_list[i], 'box_coords': box_coords_list[i]}
the_list.append(d)
data_raw[image_file] = the_list
with open('data_raw_%dx%d.p' % (TARGET_W, TARGET_H), 'wb') as f:
pickle.dump(data_raw, f)
|
import numpy
global ELEV#=[[0 for x in range(17)]for y in range(79)]
ELEV=[[0 for x in range(17)]for y in range(79)]
global NSDEG#[17]
global AA#[17]
global BB#[17]
global SCR,SCR1
#COMMON/MIXC/
global PRSH#(6,3,17,17)
global ESH#(6,3,17)
global AUG#(6,3,17,17,17)
global RAD#[6,3,17,17]
global PRSHBT#(6,3,17)
global IZ#[6,3]
global INIOCC#(6,3,17)
global ISHLMX#(6,3)
global AMZ#[6,3]
#COMMON/UPD/
global NOCC#(6,3,17)
global AUGR#(6,3,17,17,17)
global RADR#(6,3,17,17)
#COMMON/CALCAS2/
global IONSUM0#(10)
global IFLSUM0#(10)
global ESTORE0#(10,28)
global EPHOTON0#(10,28)
global DRXE0#(10,28)
global DRYE0#(10,28)
global DRZE0#(10,28)
global DRX0#(10,28)
global DRY0#(10,28)
global DRZ0#(10,28)
#COMMON/CALCAS3/
global IONSUM#(10)
IONSUM=[0 for x in range(11)]
global IFLSUM#(10)
IFLSUM=[0 for x in range(11)]
global ESTORE#(10,28)
global EPHOTON#(10,28)
global DRXE#(10,28)
global DRYE#(10,28)
global DRZE#(10,28)
global DRX#(10,28)
global DRY#(10,28)
global DRZ#[10,28]
def CALC3(NVAC,KGAS,LGAS,ELECEN,ISHELL,L1):
# IMPLICIT #real*8(A-H,O-Z)
# IMPLICIT #integer*8(I-N)
# COMMON/GENCAS/ELEV(17,79),NSDEG(17),AA(17),BB(17),SCR,SCR1
# COMMON/MIXC/PRSH(6,3,17,17),ESH(6,3,17),AUG(6,3,17,17,17),
# /RAD(6,3,17,17),PRSHBT(6,3,17),IZ(6,3),INIOCC(6,3,17),ISHLMX(6,3),
# /AMZ(6,3)
# COMMON/UPD/NOCC(6,3,17),AUGR(6,3,17,17,17),RADR(6,3,17,17)
# COMMON/CALCAS2/IONSUM0(10),IFLSUM0(10),ESTORE0(10,28),
# /EPHOTON0(10,28),DRXE0(10,28),DRYE0(10,28),DRZE0(10,28),
# /DRX0(10,28),DRY0(10,28),DRZ0(10,28)
# COMMON/CALCAS3/IONSUM(10),IFLSUM(10),ESTORE(10,28),EPHOTON(10,28),
# /DRXE(10,28),DRYE(10,28),DRZE(10,28),DRX(10,28),DRY(10,28),
# /DRZ(10,28)
# DIMENSION TEMP(17),TEMP1(289)
#CHARACTER*6
# SCR="",
# SCR1=""
#COMMON/GENCAS/
global ELEV#[17,79]
global NSDEG#[17]
global AA#[17]
global BB#[17]
global SCR,SCR1
#COMMON/MIXC/
global PRSH#(6,3,17,17)
global ESH#(6,3,17)
global AUG#(6,3,17,17,17)
global RAD#[6,3,17,17]
global PRSHBT#(6,3,17)
global IZ#[6,3]
global INIOCC#(6,3,17)
global ISHLMX#(6,3)
global AMZ#[6,3]
#COMMON/UPD/
global NOCC#(6,3,17)
global AUGR#(6,3,17,17,17)
global RADR#(6,3,17,17)
#COMMON/CALCAS2/
global IONSUM0#(10)
global IFLSUM0#(10)
global ESTORE0#(10,28)
global EPHOTON0#(10,28)
global DRXE0#(10,28)
global DRYE0#(10,28)
global DRZE0#(10,28)
global DRX0#(10,28)
global DRY0#(10,28)
global DRZ0#(10,28)
#COMMON/CALCAS3/
global IONSUM#(10)
global IFLSUM#(10)
global ESTORE#(10,28)
global EPHOTON#(10,28)
global DRXE#(10,28)
global DRYE#(10,28)
global DRZE#(10,28)
global DRX#(10,28)
global DRY#(10,28)
global DRZ#[10,28]
TEMP=[0 for x in range(18)]
TEMP1=[0 for x in range(289)]
#
# CALCULATE CASCADE IN GAS KGAS AND MOLECULAR COMPONENT LGAS
# WITH INTIAL ENERGY DEPOSIT ELECEN AND SHELL VACANCY CREATED AT ISHELL
#
ISTART=IONSUM[NVAC]
ISTARTF=IFLSUM[NVAC]
API=numpy.arccos(-1.00)
TWOPI=2.00*API
def GOTO100():
ELEFT=ELECEN
INIT=1
# SET STARTING ARRAY NOCC EQUAL TO INIOCC
for I in range(1,17):
NOCC[KGAS][LGAS][I]=INIOCC[KGAS][LGAS][I]
IONSUM[NVAC]=ISTART+1
IFLSUM[NVAC]=ISTARTF
# STORE PHOTOELECTRON ENERGY AND ANGLE
ESTORE[NVAC][IONSUM[NVAC]]=ELECEN-ELEV[ISHELL][IZ[KGAS][LGAS]]
ELECN=ESTORE[NVAC][IONSUM[NVAC]]
ELEFT=ELEFT-ELECN
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]-1
# USE PHOTOELECTRON ANGULAR DISTRIBUTION
APE=AA[ISHELL]
BPE=BB[ISHELL]
ANGGEN(APE,BPE,THET)
if(THET < 0.0):
THET=THET+API
R3=DRAND48(RDUM)
PHI=TWOPI*R3
DRCOS(DRX0[NVAC][L1],DRY0[NVAC][L1],DRZ0[NVAC][L1],THET,PHI,DRXX,DRYY,DRZZ)
DRXE[NVAC][IONSUM[NVAC]]=DRXX
DRYE[NVAC][IONSUM[NVAC]]=DRYY
DRZE[NVAC][IONSUM[NVAC]]=DRZZ
# LOOP AROUND CASCADE
def GOTO4():
# CHECK FOR ELECTRON SHAKEOFF
IDUM=1
if(INIT > 1):
ELECN=ESTORE[NVAC][IONSUM[NVAC]]
INSUM=IONSUM[NVAC]
SHAKE(ISHELL,ELECN,KGAS,LGAS,ESHK,IDUM,INSUM,JVAC)
# CALCULATE ENERGY OF ELECTRON
if(JVAC == 0):
GOTO2()
# ELECTRON + SHAKEOFF
ELECN=ELECN-ESHK-ELEV[JVAC,IZ[KGAS,LGAS]]
ESTORE[NVAC][IONSUM[NVAC]]=ELECN
IONSUM[NVAC]=IONSUM[NVAC]+1
# MAXIMUM ION CHARGE STATE =28
if(IONSUM[NVAC]> 28) :
print(' 3RD GEN ION CHARGE LIMITED TO 28 IONSUM=',IONSUM[NVAC])
sys.exit()
# endif
ESTORE[NVAC][IONSUM[NVAC]]=ESHK
ELEFT=ELEFT-ESHK-ELEV[JVAC][IZ[KGAS][LGAS]]
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION ANGLE
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)
def GOTO2():
UPDATE(KGAS,LGAS,ISHELL)
INIT=2
# CHOOSE FLUORESCENCE OR AUGER TRANSITION
TSUM=0.0
for I in range(1,17):
TSUM=TSUM+RADR[KGAS][LGAS][ISHELL][I]
for J in range(1,17):
TSUM=TSUM+AUGR[KGAS][LGAS][ISHELL][I][J]
# NO MORE TRANSITIONS POSSIBLE
if(TSUM == 0.0):
return
# NORMALISE TO 1.0
for I in range(1,17):
RADR[KGAS][LGAS][ISHELL][I]=RADR[KGAS][LGAS][ISHELL][I]/TSUM
for J in range(1,17):
AUGR[KGAS][LGAS][ISHELL][I][J]=AUGR[KGAS][LGAS][ISHELL][I][J]/TSUM
# CREATE CUMULATIVE SUM ARRAY
TEMP[1]=RADR(KGAS,LGAS,ISHELL,1)
for I in range(2,17):
TEMP[I]=RADR[KGAS][LGAS][ISHELL][I]+TEMP[I-1]
TEMP1[1]=AUGR[KGAS][LGAS][ISHELL][1][1]
for I in range(2,17):
TEMP1[I]=AUGR[KGAS][LGAS][ISHELL][I][1]+TEMP1[I-1]
for J in range(1,16):
for I in range(1,17):
TEMP1[I+(J*17)]=AUGR[KGAS][LGAS][ISHELL][I][(J+1)]+TEMP1[I+(J*17)-1]
# FIND FLUORESCENCE OR AUGER TRANSITION
R1=DRAND48(RDUM)
for I in range(1,17):
if(R1 < TEMP[I]) :
# STORE PHOTON ENERGY AND UPDATE NOCC
IFLSUM[NVAC]=IFLSUM[NVAC]+1
EPHOTON[NVAC][IFLSUM[NVAC]]=ELEV[ISHELL,IZ[KGAS,LGAS]]-ELEV[I,IZ[KGAS,LGAS]]
ELEFT=ELEFT-EPHOTON[NVAC][IFLSUM[NVAC]]
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION DIRECTION
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRX[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRY[NVAC][IFLSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZ[NVAC][IFLSUM[NVAC]]=numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1
NOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1
# FIND LOWEST VACANCY
VACANCY(KGAS,LGAS,ISHELL,ILAST)
if(ILAST == 1):
# NO MORE TRANSITIONS POSSIBLE
return
# endif
GOTO2()
# endif
CONTINUE
counter1=1
counter2=1
while(counter1):
counter1=0
R2=R1-TEMP[17]
for J in range(1,17):
if(counter1):
break
for I in range(1,17):
if(R2 < TEMP1[I+((J-1)*17)]) :
# AUGER OR COSTER KRONIG
# STORE EJECTED ELECTRON AND UPDATE NOCC
ETEMP=ELEV[ISHELL,IZ[KGAS,LGAS]]-(ELEV[I,IZ[KGAS,LGAS]]+ELEV[I,IZ[KGAS,LGAS]+1])*0.5-(ELEV[J,IZ[KGAS,LGAS]]+ELEV[J,IZ[KGAS,LGAS]+1])*0.5
if(ETEMP < 0.0):
# DO NOT ALLOW NEGATIVE ENERGY TRANSITIONS
while(counter2):
R1=DRAND48(RDUM)
if(R1 < TEMP[17]):
counter2=1
counter1=1
break
# endif
IONSUM[NVAC]=IONSUM[NVAC]+1
if(IONSUM[NVAC]> 28) :
print(' 3RD GEN ION CHARGE LIMITED TO 28 IONSUM=', IONSUM[NVAC])
sys.exit()
# endif
ESTORE[NVAC][IONSUM[NVAC]]=ETEMP
ELEFT=ELEFT-ETEMP
if(ELEFT < 0.0):
GOTO100()
# RANDOM EMISSION DIRECTION
R3=DRAND48(RDUM)
THET=numpy.arccos(1.0-2.0*R3)
R4=DRAND48(RDUM)
PHI=TWOPI*R4
DRXE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.cos(PHI)
DRYE[NVAC][IONSUM[NVAC]]=numpy.sin(THET)*numpy.sin(PHI)
DRZE[NVAC][IONSUM[NVAC]]=numpy.cos(THET)
NOCC[KGAS][LGAS][ISHELL]=NOCC[KGAS][LGAS][ISHELL]+1
NOCC[KGAS][LGAS][I]=NOCC[KGAS][LGAS][I]-1
NOCC[KGAS][LGAS][J]=NOCC[KGAS][LGAS][J]-1
# FIND LOWEST VACANCY
VACANCY(KGAS,LGAS,ISHELL,ILAST)
if(ILAST == 1):
# NO MORE TRANSITIONS POSSIBLE
returnSCR="",
# SCR1=""
# endif
GOTO4()
# endif
print(' ERROR IN CASCADE 3')
sys.exit()
# end
CALC3(1,1,1,1,1,1) |
import itertools
from dataclasses import dataclass
from typing import List, TypeVar, Tuple, Optional, Iterator, Set
from adventofcode.util.exceptions import SolutionNotFoundException
from adventofcode.util.helpers import solution_timer
from adventofcode.util.input_helpers import get_input_for_day
T = TypeVar("T")
def flatten(values: List[List[T]]) -> Iterator[T]:
return itertools.chain.from_iterable(([p for p in row] for row in values))
def parse_inputs(input_data: List[str]) -> List[List["Octopus"]]:
return [[Octopus(int(c), x, y) for x, c in enumerate(line)] for y, line in enumerate(input_data)]
def get_neighbors(
array: List[List[T]],
position: Tuple[int, int],
n: int = 3,
fill_value: Optional[T] = None
) -> List[List[Optional[T]]]:
"""
Returns the values in the box around position.
"""
x, y = position
if y < 0 or len(array) <= y:
raise IndexError(f"{y=} out of bounds.")
elif x < 0 or len(array[0]) <= x:
raise IndexError(f"{x=} out of bounds.")
if n % 2 == 0:
raise ValueError(f"{n=} cannot be even.")
# Calculate the 'bounding box' with (x,y) as the center
half = n // 2
left_x = x - half
left_y = y - half
right_x = x + half
right_y = y + half
neighbors = []
for y in range(left_y, right_y+1):
if y >= len(array) or y < 0:
# We are in the first or last row of the array
neighbors.append([fill_value] * n)
elif right_x >= len(array):
# on the right border
neighbors.append([array[y][x] if x < len(array) else fill_value for x in range(left_x, right_x+1)])
elif left_x < 0:
# on the left border
neighbors.append([array[y][x] if x >= 0 else fill_value for x in range(left_x, right_x+1)])
else:
neighbors.append(array[y][left_x: right_x+1])
return neighbors
@dataclass
class Octopus:
value: int
x: int
y: int
@property
def position(self):
return self.x, self.y
def reset(self):
self.value = 0
def __repr__(self):
return self.value.__repr__()
def __hash__(self):
return self.position.__hash__()
class Swarm:
def __init__(self, octopi: List[List[Octopus]]):
self.octopi = octopi
self.flashes = 0
self.size = len(list(flatten(octopi)))
def _find_starting_points(self, exclude: Optional[Set[Octopus]] = None) -> Set[Octopus]:
"""
Returns the set of coordinates of the octopi with value >= 9
"""
if exclude is None:
exclude = set()
return set((o for o in flatten(self.octopi) if o.value > 9 and o not in exclude))
def _increment_neighbors(self, octopus: Octopus):
for neighbor in flatten(get_neighbors(self.octopi, octopus.position)):
if neighbor is None:
continue
neighbor.value += 1
def simulate(self):
for octopus in flatten(self.octopi):
octopus.value += 1
# We first increase all neighbors of the octopi with a value of 9 or greater.
# Then we repeat this process until no new octopi has a value of 9 or greater
starters = self._find_starting_points()
have_flashed = set()
while starters != set():
for octopus in starters:
self._increment_neighbors(octopus)
have_flashed.update(starters)
starters = self._find_starting_points(have_flashed)
for octopus in have_flashed:
octopus.value = 0
self.flashes += len(have_flashed)
return self.octopi
def simulate_n(self, n: int):
for _ in range(n):
self.simulate()
def find_synchronisation_step(self):
previous = int(self.flashes)
diff = 0
step = 0
# If all have flashed self.flashes increased by the size of the swarm
while diff != self.size:
previous = self.flashes
self.simulate()
step += 1
diff = self.flashes - previous
return step
def __repr__(self):
text = ""
for row in self.octopi:
text += ''.join(map(str, row)) + '\n'
return text
@solution_timer(2021, 11, 1)
def part_one(input_data: List[str]):
octopi = parse_inputs(input_data)
s = Swarm(octopi)
s.simulate_n(100)
answer = s.flashes
if not answer:
raise SolutionNotFoundException(2021, 11, 1)
return answer
@solution_timer(2021, 11, 2)
def part_two(input_data: List[str]):
octopi = parse_inputs(input_data)
s = Swarm(octopi)
answer = s.find_synchronisation_step()
if not answer:
raise SolutionNotFoundException(2021, 11, 2)
return answer
if __name__ == '__main__':
data = get_input_for_day(2021, 11)
part_one(data)
part_two(data)
|
<filename>ttskit/waveglow/mel2samp.py
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************\
from pathlib import Path
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(Path(__file__).stem)
import os
import random
import argparse
import json
import torch
import torch.utils.data
import sys
from scipy.io.wavfile import read
import librosa
import numpy as np
import traceback
# We're using the audio processing from TacoTron2 to make sure it matches
from ttskit.mellotron.layers import TacotronSTFT
MAX_WAV_VALUE = 32768.0
def files_to_list(filename):
"""
Takes a text file of filenames and makes a list of filenames
"""
with open(filename, encoding='utf-8') as f:
files_ = f.readlines()
# files = [f.rstrip() for f in files_]
dirpath = os.path.dirname(filename)
files = []
for line in files_:
filepath = os.path.join(dirpath, line.split('\t')[0])
if os.path.isfile(filepath):
files.append(filepath)
return files
def load_wav_to_torch(full_path, sr_force=None):
"""
Loads wavdata into torch array
"""
data, sampling_rate = librosa.load(full_path, sr=None)
if (sr_force is not None) and (sampling_rate != sr_force):
data = librosa.resample(data, orig_sr=sampling_rate, target_sr=sr_force)
# sampling_rate, data = read(full_path)
if (max(data) > 1) or (min(data) < -1):
logger.info(
'Wave max or min value out of -1 to 1. Max value is {} and min value is {}.'.format(max(data), min(data)))
# fixme 音量标准化
data = 0.9 * data / max(np.max(np.abs(data)), 0.01)
return torch.from_numpy(data).float(), (sr_force or sampling_rate)
class Mel2Samp(torch.utils.data.Dataset):
"""
This is the main class that calculates the spectrogram and returns the
spectrogram, audio pair.
"""
def __init__(self, training_files, segment_length, filter_length,
hop_length, win_length, sampling_rate, mel_fmin, mel_fmax):
if os.path.isfile(str(training_files)):
self.audio_files = files_to_list(training_files)
self.ids = list(range(len(self.audio_files)))
else:
self.audio_files = []
self.ids = []
random.seed(1234)
random.shuffle(self.audio_files)
self.stft = TacotronSTFT(filter_length=filter_length,
hop_length=hop_length,
win_length=win_length,
sampling_rate=sampling_rate,
mel_fmin=mel_fmin, mel_fmax=mel_fmax)
self.segment_length = segment_length
self.sampling_rate = sampling_rate
def get_mel(self, audio):
audio_norm = audio # audio / MAX_WAV_VALUE
audio_norm = audio_norm.unsqueeze(0)
audio_norm = torch.autograd.Variable(audio_norm, requires_grad=False)
melspec = self.stft.mel_spectrogram(audio_norm)
melspec = torch.squeeze(melspec, 0)
return melspec
def get_item(self, index):
# Read audio
filename = self.audio_files[index]
audio, sampling_rate = load_wav_to_torch(filename, sr_force=self.sampling_rate)
if sampling_rate != self.sampling_rate:
raise ValueError("{} SR doesn't match target {} SR".format(
sampling_rate, self.sampling_rate))
# Take segment
if audio.size(0) >= self.segment_length:
max_audio_start = audio.size(0) - self.segment_length
audio_start = random.randint(0, max_audio_start)
audio = audio[audio_start:audio_start + self.segment_length]
else:
audio = torch.nn.functional.pad(audio, (0, self.segment_length - audio.size(0)), 'constant').data
mel = self.get_mel(audio)
# audio = audio / MAX_WAV_VALUE
return (mel, audio)
def __getitem__(self, index):
tmp = index
while True:
try: # 模型训练模式容错。
out = self.get_item(tmp)
return out
except:
logger.info('The index <{}> loaded failed!'.format(index, tmp))
traceback.print_exc()
tmp = np.random.choice(self.ids)
def __len__(self):
return len(self.audio_files)
# ===================================================================
# Takes directory of clean audio and makes directory of spectrograms
# Useful for making test sets
# ===================================================================
if __name__ == "__main__":
# Get defaults so it can work with no Sacred
parser = argparse.ArgumentParser()
parser.add_argument('-f', "--filelist_path", required=True)
parser.add_argument('-c', '--config', type=str,
help='JSON file for configuration')
parser.add_argument('-o', '--output_dir', type=str,
help='Output directory')
args = parser.parse_args()
with open(args.config) as f:
data = f.read()
data_config = json.loads(data)["data_config"]
mel2samp = Mel2Samp(**data_config)
filepaths = files_to_list(args.filelist_path)
# Make directory if it doesn't exist
if not os.path.isdir(args.output_dir):
os.makedirs(args.output_dir)
os.chmod(args.output_dir, 0o775)
for filepath in filepaths:
audio, sr = load_wav_to_torch(filepath)
melspectrogram = mel2samp.get_mel(audio)
filename = os.path.basename(filepath)
new_filepath = args.output_dir + '/' + filename + '.pt'
print(new_filepath)
torch.save(melspectrogram, new_filepath)
|
from bs4 import BeautifulSoup
from dedoc.readers.docx_reader.styles_extractor import StylesExtractor
from dedoc.readers.docx_reader.properties_extractor import change_paragraph_properties, change_run_properties
from dedoc.readers.docx_reader.data_structures import BaseProperties
from typing import List, Dict, Union
import re
from dedoc.readers.docx_reader.windows_font_mapping import mapping
numFmtList = {"decimal": "1", # 1, 2, 3, ..., 10, 11, 12, ...
"lowerLetter": "a", # a, b, c, ..., y, z, aa, bb, cc, ..., yy, zz, aaa, bbb, ccc, ...
"lowerRoman": "i", # i, ii, iii, iv, ..., xviii, xix, xx, xxi, ...
"none": "",
"russianLower": "а", # а, б, в, ..., ю, я, аа, бб, вв, ..., юю, яя, ааа, ббб, ввв, ...
"russianUpper": "А", # А, Б, В, ..., Ю, Я, АА, ББ, ВВ, ..., ЮЮ, ЯЯ, ААА, БББ, ВВВ, ...
"upperLetter": "A", # A, B, C, ..., Y, Z, AA, BB, CC, ..., YY, ZZ, AAA, BBB, CCC, ...
"upperRoman": "I", # I, II, III, IV, ..., XVIII, XIX, XX, XXI, ...
}
def get_next_item(num_fmt: str,
shift: int):
"""
computes the next item of the list sequence
:param num_fmt: some value from numFmtList
:param shift: shift from the beginning of list numbering
:return: string representation of the next numbering item
"""
if num_fmt == "none":
return numFmtList[num_fmt]
if num_fmt == "decimal":
return str(int(numFmtList[num_fmt]) + shift)
if num_fmt == "lowerLetter" or num_fmt == "upperLetter":
shift1, shift2 = shift % 26, shift // 26 + 1
return chr(ord(numFmtList[num_fmt]) + shift1) * shift2
if num_fmt == "russianLower" or num_fmt == "russianUpper":
shift1, shift2 = shift % 32, shift // 32 + 1
return chr(ord(numFmtList[num_fmt]) + shift1) * shift2
if num_fmt == "lowerRoman" or num_fmt == "upperRoman":
# 1 = I, 5 = V, 10 = X, 50 = L, 100 = C, 500 = D, 1000 = M.
mapping = [(1000, 'm'), (500, 'd'), (100, 'c'),
(50, 'l'), (10, 'x'), (5, 'v'), (1, 'i')]
result = ""
for number, letter in mapping:
cnt, shift = shift // number, shift % number
if num_fmt == "upperRoman":
letter = chr(ord(letter) + ord('A') - ord('a'))
result += letter * cnt
return result
getSuffix = {"nothing": "",
"space": " ",
"tab": "\t"}
class AbstractNum:
def __init__(self,
tree: BeautifulSoup,
styles_extractor: StylesExtractor):
"""
:param tree: BeautifulSoup tree with abstractNum content
:param styles_extractor: StylesExtractor
"""
self.styles_extractor = styles_extractor
self.abstract_num_id = tree['w:abstractNumId']
self.properties = {} # properties for all levels {"styleLink", "restart"}
if tree.numStyleLink:
# styleLink-> abstractNumId of the other numbering
self.properties['styleLink'] = tree.numStyleLink['w:val']
else:
self.properties['styleLink'] = None
try:
if tree['w15:restartNumberingAfterBreak']:
self.properties['restart'] = bool(int(tree['w15:restartNumberingAfterBreak']))
except KeyError:
self.properties['restart'] = False
# properties for each list level {level number: properties}
# properties = {"lvlText", "numFmt", "start", "lvlRestart", "restart", "suff", "styleId", "pPr", "rPr"}
self.levels = {}
def parse(self,
lvl_list: List[BeautifulSoup]):
"""
save information about levels in self.levels
:param lvl_list: list with BeautifulSoup trees which contain information about levels
"""
# isLgl (only mention)
# lvlText (val="some text %num some text")
# numFmt (val="bullet", "decimal")
# pPr -> ind
# pStyle -> pPr
# rPr -> sz, bold, italic, underlined
# start (w:val="1")
# suff (w:val="nothing", "tab" - default, "space")
# lvlRestart (w:val="0")
# restart - startOverride for each level
for lvl in lvl_list:
ilvl = lvl['w:ilvl']
if ilvl not in self.levels:
self.levels[ilvl] = {}
if lvl.lvlText and lvl.lvlText['w:val']:
# some characters in bullets are displayed incorrectly
# replace them with the unicode equivalent
# use mapping between hexadecimal code of windows characters and unicode characters
# if hexadecimal code was found in mapping dictionary use it's unicode equivalent
if hex(ord(lvl.lvlText['w:val'][0])) in mapping:
self.levels[ilvl]['lvlText'] = mapping[hex(ord(lvl.lvlText['w:val'][0]))]
else:
self.levels[ilvl]['lvlText'] = lvl.lvlText['w:val']
elif 'lvlText' not in self.levels[ilvl]:
self.levels[ilvl]['lvlText'] = ""
if lvl.isLgl:
self.levels[ilvl]['numFmt'] = 'decimal'
else:
if lvl.numFmt:
self.levels[ilvl]['numFmt'] = lvl.numFmt['w:val']
elif 'numFmt' not in self.levels[ilvl]:
self.levels[ilvl]['numFmt'] = 'none'
if lvl.start:
self.levels[ilvl]['start'] = int(lvl.start['w:val'])
elif 'start' not in self.levels[ilvl]:
self.levels[ilvl]['start'] = 1
if lvl.lvlRestart:
self.levels[ilvl]['lvlRestart'] = bool(int(lvl.lvlRestart['w:val']))
elif 'lvlRestart' not in self.levels[ilvl]:
self.levels[ilvl]['lvlRestart'] = True
if 'restart' not in self.levels[ilvl]:
self.levels[ilvl]['restart'] = self.properties['restart']
if lvl.suff:
self.levels[ilvl]['suff'] = getSuffix[lvl.suff['w:val']]
elif 'suff' not in self.levels[ilvl]:
self.levels[ilvl]['suff'] = getSuffix["tab"]
# extract information from paragraphs and runs properties
if lvl.pStyle:
self.levels[ilvl]['styleId'] = lvl.pStyle['w:val']
elif 'styleId' not in self.levels[ilvl]:
self.levels[ilvl]['styleId'] = None
# paragraph -> run
if lvl.pPr:
self.levels[ilvl]['pPr'] = lvl.pPr
elif 'pPr' not in self.levels[ilvl]:
self.levels[ilvl]['pPr'] = None
if lvl.rPr:
self.levels[ilvl]['rPr'] = lvl.rPr
elif 'rPr' not in self.levels[ilvl]:
self.levels[ilvl]['rPr'] = None
if lvl.startOverride:
self.levels[ilvl]['restart'] = True
self.levels[ilvl]['start'] = int(lvl.startOverride['w:val'])
class Num(AbstractNum):
def __init__(self,
num_id: str,
abstract_num_list: Dict[str, BeautifulSoup],
num_list: Dict[str, BeautifulSoup],
styles_extractor: StylesExtractor):
"""
:param num_id: numId for num element
:param abstract_num_list: dictionary with abstractNum BeautifulSoup trees
:param num_list: dictionary with num BeautifulSoup trees
:param styles_extractor: StylesExtractor
"""
self.num_id = num_id
num_tree = num_list[num_id]
abstract_num_tree = abstract_num_list[num_tree.abstractNumId['w:val']]
super().__init__(abstract_num_tree, styles_extractor) # create properties
# extract the information from numStyleLink
while self.properties['styleLink']:
for abstract_num in abstract_num_list.values():
if abstract_num.find('w:styleLink', attrs={'w:val': self.properties['styleLink']}):
abstract_num_tree = abstract_num
break
super().__init__(abstract_num_tree, styles_extractor)
self.parse(abstract_num_tree.find_all('w:lvl'))
# override some of abstractNum properties
if num_tree.lvlOverride:
lvl_list = num_tree.find_all('w:lvlOverride')
self.parse(lvl_list)
def get_level_info(self,
level_num: str) -> Dict[str, Union[str, bool, int]]:
"""
:param level_num: ilvl for getting information for specific level
:return: properties for the level
"""
return self.levels[level_num].copy()
class NumberingExtractor:
def __init__(self,
xml: BeautifulSoup,
styles_extractor: StylesExtractor):
"""
:param xml: BeautifulSoup tree with numberings
:param styles_extractor: StylesExtractor
"""
if xml:
self.numbering = xml.numbering
if not self.numbering:
raise Exception("there are no numbering")
else:
return
if styles_extractor:
self.styles_extractor = styles_extractor
else:
raise Exception("styles extractor must not be empty")
# {(abstractNumId, ilvl): current number for list element}
self.numerations = {}
self.prev_num_id = None
self.prev_abstract_num_id = None
# {abstractNumId: ilvl} previous ilvl for list element with given numId
self.prev_ilvl = {}
# {abstractNumId: numId} previous numId for list element with given numId and abstractNumId
self.prev_numId = {}
# {(abstractNumId, ilvl): shift for wrong numeration}
self.shifts = {}
# the number of levels for current list
self.levels_count = 1
abstract_num_list = {abstract_num['w:abstractNumId']: abstract_num
for abstract_num in xml.find_all('w:abstractNum')}
num_list = {num['w:numId']: num for num in xml.find_all('w:num')}
# dictionary with num properties
self.num_list = {num_id: Num(num_id, abstract_num_list, num_list, styles_extractor) for num_id in num_list}
def _get_list_text(self,
ilvl: str,
num_id: str) -> str:
"""
counts list item number and it's text
:param ilvl: string with list ilvl
:param num_id: string with list numId
:return: text of the list numeration
"""
if num_id not in self.num_list:
return ""
abstract_num_id = self.num_list[num_id].abstract_num_id
# checking the correctness of the list numeration
if (abstract_num_id, ilvl) in self.shifts:
ilvl = str(int(ilvl) - self.shifts[(abstract_num_id, ilvl)])
else:
correct_ilvl = int(ilvl)
correct = False
while correct_ilvl > 0 and not correct:
lvl_info = self.num_list[num_id].get_level_info(str(correct_ilvl))
levels = re.findall(r'%\d+', lvl_info['lvlText'])
try:
for level in levels:
if int(level[1:]) - 1 == correct_ilvl:
continue
self._get_next_number(num_id, level[1:])
except KeyError:
correct_ilvl -= 1
correct = True
self.shifts[(abstract_num_id, ilvl)] = int(ilvl) - correct_ilvl
ilvl = str(correct_ilvl)
lvl_info = self.num_list[num_id].get_level_info(ilvl)
# there is the other list
if self.prev_abstract_num_id and self.prev_num_id and self.prev_abstract_num_id != abstract_num_id \
and self.num_list[self.prev_num_id].properties['restart']:
del self.prev_ilvl[self.prev_abstract_num_id]
# there is the information about this list
if abstract_num_id in self.prev_ilvl:
prev_ilvl = self.prev_ilvl[abstract_num_id]
# startOverride:
if lvl_info['restart']:
if abstract_num_id in self.prev_numId:
prev_num_id = self.prev_numId[abstract_num_id]
else:
prev_num_id = None
if prev_num_id and prev_num_id != num_id:
self.numerations[(abstract_num_id, ilvl)] = lvl_info['start']
# it's a new deeper level
if prev_ilvl < ilvl and lvl_info['lvlRestart'] or (abstract_num_id, ilvl) not in self.numerations:
self.numerations[(abstract_num_id, ilvl)] = lvl_info['start']
# it's a continue of the old level (current level <= previous level)
else:
self.numerations[(abstract_num_id, ilvl)] += 1
# there isn't the information about this list
else:
self.numerations[(abstract_num_id, ilvl)] = lvl_info['start']
self.prev_ilvl[abstract_num_id] = ilvl
self.prev_numId[abstract_num_id] = num_id
self.prev_abstract_num_id = abstract_num_id
self.prev_num_id = num_id
text = lvl_info['lvlText']
levels = re.findall(r'%\d+', text)
self.levels_count = len(levels)
for level in levels:
# level = '%level'
level = level[1:]
try:
next_number = self._get_next_number(num_id, level)
except KeyError as err:
# TODO handle very strange list behaviour
# if we haven't found given abstractNumId we set counter = 1
self.numerations[tuple(err.args[0])] = 1
next_number = self._get_next_number(num_id, level)
text = re.sub(r'%\d+', next_number, text, count=1)
text += lvl_info['suff']
return text
def _get_next_number(self,
num_id: str,
level: str):
"""
computes the shift from the first item for given list and text of next item according to the shift
:param num_id: string with list numId
:param level: list level = ilvl + 1
:return: text of the next item in numbering
"""
abstract_num_id = self.num_list[num_id].abstract_num_id
# level = ilvl + 1
ilvl = str(int(level) - 1)
lvl_info = self.num_list[num_id].get_level_info(ilvl)
if lvl_info['numFmt'] == "bullet":
return lvl_info['lvlText']
shift = self.numerations[(abstract_num_id, ilvl)] - 1
num_fmt = get_next_item(lvl_info['numFmt'], shift)
return num_fmt
def parse(self,
xml: BeautifulSoup,
paragraph_properties: BaseProperties,
run_properties: BaseProperties):
"""
parses numPr content and extracts properties for paragraph for given numId and list level
changes old_paragraph properties according to list properties
changes run_properties adding text of numeration and it's properties
:param xml: BeautifulSoup tree with numPr from document.xml or styles.xml (style content)
:param paragraph_properties: Paragraph for changing
:param run_properties: Run for changing
"""
if not xml:
return
ilvl, num_id = xml.ilvl, xml.numId
if not num_id or num_id['w:val'] not in self.num_list:
return
else:
num_id = num_id['w:val']
if not ilvl:
try:
style_id = xml['w:styleId']
num = self.num_list[num_id]
# find link on this styleId in the levels list
for level_num, level in num.levels.items():
if 'styleId' in level and level['styleId'] == style_id:
ilvl = level_num
except KeyError:
return
else:
ilvl = ilvl['w:val']
lvl_info = self.num_list[num_id].get_level_info(ilvl)
text = self._get_list_text(ilvl, num_id)
if lvl_info['styleId']:
self.styles_extractor.parse(lvl_info['styleId'], paragraph_properties, "numbering")
if lvl_info['pPr']:
change_paragraph_properties(paragraph_properties, lvl_info['pPr'])
if lvl_info['rPr']:
change_run_properties(run_properties, lvl_info['rPr'])
change_run_properties(paragraph_properties, lvl_info['rPr'])
run_properties.text = text
paragraph_properties.list_level = self.levels_count
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
#
# @Author : <NAME>
# @Email : <EMAIL>
import cmath
from typing import List
def refine_celegans_posture(neurons: List[List],
ccords: List
):
"""Correct posture of C.elegans
:param neurons: value is Cartesian Coordinate System (x, y, z)
:param ccords: 5 pts(x, y) [mass_of_center, anterior_y, posterior_y, dorsal_x, ventral_x]
:return:
"""
neurons = neurons.copy()
mass_of_center, anterior_y, posterior_y, ventral_x, dorsal_x = ccords.copy()
# 1 Zero-centered
neurons = [[[pt[0] - mass_of_center[0], pt[1] - mass_of_center[1], pt[2]] for pt in pts] for pts in neurons]
anterior_y = [a - b for a, b in zip(anterior_y, mass_of_center)]
posterior_y = [a - b for a, b in zip(posterior_y, mass_of_center)]
ventral_x = [a - b for a, b in zip(ventral_x, mass_of_center)]
# 2 Transfer tail-head direction into y-axis positive direction (python layout: positive y-axis
# 2.1 Coordinate transformation: Cartesian -> Polar: (x, y, z) -> (rho, phi, z), (x, y) -> (rho, phi)
neurons = [[[*cmath.polar(complex(pt[0], pt[1])), pt[2]] for pt in pts] for pts in neurons]
posterior_y = [*cmath.polar(complex(*posterior_y))]
anterior_y = [*cmath.polar(complex(*anterior_y))]
ventral_x = [*cmath.polar(complex(*ventral_x))]
# 2.2 Rotation operation
tail_head_phi = anterior_y[1]
pos_y_phi = tail_head_phi - cmath.pi / 2
neurons = [[[pt[0], pt[1] - pos_y_phi, pt[2]] for pt in pts] for pts in neurons]
posterior_y[1] = posterior_y[1] - pos_y_phi
anterior_y[1] = anterior_y[1] - pos_y_phi
ventral_x[1] = ventral_x[1] - pos_y_phi
# 2.3 Coordinate transformation: Polar -> Cartesian: (rho, phi, z) -> (x, y, z), (rho, phi) -> (x, y)
neurons = [[[cmath.rect(pt[0], pt[1]).real, cmath.rect(pt[0], pt[1]).imag, pt[2]] for pt in pts] for pts in neurons]
ventral_pt = [cmath.rect(*ventral_x).real, cmath.rect(*ventral_x).imag]
# anterior_y = [cmath.rect(*anterior_y).real, cmath.rect(*anterior_y).imag]
# posterior_y = [cmath.rect(*posterior_y).real, cmath.rect(*posterior_y).imag]
# 3 Flip ventral-dorsal direction into x-axis positive direction
neurons = [[[-pt[0], pt[1], pt[2]] for pt in pts] for pts in neurons] if ventral_pt[0] < 0 else neurons
# 4 Robust transition
transition_y = sum([sum([pt[1] for pt in pts]) / len(pts) for pts in neurons]) / len(neurons)
neurons = [[[pt[0], pt[1] - transition_y, pt[2]] for pt in pts] for pts in neurons]
# local count
temp = [list(filter(lambda pt: abs(pt[1]) <= 10.0, pts)) for pts in neurons]
transition_x = sum([sum([pt[0] for pt in pts]) / (len(pts) + 1e-5) for pts in temp]) / (len(temp) + 1e-5)
neurons = [[[pt[0] - transition_x, pt[1], pt[2]] for pt in pts] for pts in neurons]
return neurons
|
import numpy as np
from netCDF4 import Dataset
from datetime import datetime
from datetime import timedelta
import os
import sys
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from tools_BAIU import get_lonlat, prep_proj_multi, get_arain, get_var, def_cmap, draw_rec
from scipy.interpolate import griddata
quick = True
#quick = False
TC = True
TC = False
def main( stime=datetime( 2018, 6, 30, 0), vtime_ref=datetime( 2018, 7, 6, 0 ),
vtime=datetime( 2018, 7, 5, 0 ), nvar="RAIN", nvar2="MSLP",
hpa=950, hpa2=950 ):
TOP = "/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/BAIU2018_5.3.6"
adt_h = 24
adt = timedelta( hours=adt_h )
slon = 130.0
elon = 137.5
slat = 33.0
elat = 36.0
INFO = {"TOP": TOP, }
lon2d, lat2d = get_lonlat( INFO, stime=stime )
rain_l = np.zeros( 50 )
# get reference
for m in range( 50 ):
#rain_ = get_arain( INFO, stime=stime, vtime=vtime_ref, adt=adt, m=m+1 )
rain_ = get_var( INFO, nvar="RAIN", stime=stime, vtime=vtime_ref, m=m+1, adt=adt )
rain_l[m] = np.mean( rain_[ (lon2d >= slon ) & (lon2d <= elon) & (lat2d >= slat) & (lat2d <= elat) ] )
print( rain_l )
#print( np.sort( rain_l )[::-1] )
#print( np.argsort( rain_l )[::-1] )
mem_l = np.argsort( rain_l )[::-1]
rain_l = np.sort( rain_l )[::-1]
print( mem_l )
print( rain_l )
print( "" )
lons = 105 + 6
lone = 165 - 6
late = 50
lats = 16
dth = 24
cmap, levs, unit, extend, nvar_, fac = def_cmap( nvar=nvar, hpa=hpa )
if nvar2 is not None:
cmap2, levs2, unit2, extend2, nvar2_, fac2 = def_cmap( nvar=nvar2, hpa=hpa2 )
bbox = {'facecolor':'w', 'alpha':1.0, 'pad':2}
lw = 0.5
lc = 'k'
if nvar == "PW":
lc = 'gainsboro' #darkgray'
lw = 1.0
for i, m in enumerate( mem_l[::-1] ):
rank = 50 - i
print( m )
fig, ax1 = plt.subplots( 1, 1, figsize=( 5, 4.4 ) )
fig.subplots_adjust( left=0.05, bottom=0.03, right=0.9, top=0.95,
wspace=0.1, hspace=0.3)
m_l = prep_proj_multi('merc', [ ax1 ], ll_lon=lons, ur_lon=lone,
ll_lat=lats, ur_lat=late, fs=6 )
x2d, y2d = m_l[0](lon2d, lat2d)
ptit = "Rank: {0:0=2}, mem: {1:0=2}".format( rank, m )
var_ = get_var( INFO, nvar=nvar, stime=stime, vtime=vtime, m=m+1,
adt=timedelta( hours=dth ), hpa=hpa )
SHADE = ax1.contourf( x2d, y2d, var_*fac,
cmap=cmap, levels=levs, extend=extend )
var2_ = get_var( INFO, nvar=nvar2, stime=stime, vtime=vtime, m=m+1,
adt=timedelta( hours=dth ), hpa=hpa2 )
CONT = ax1.contour( x2d, y2d, var2_*fac2, colors=lc,
linewidths=lw, levels=levs2 )
ax1.clabel( CONT, fontsize=6, fmt='%.0f' )
draw_rec( m_l[0], ax1, slon, elon, slat, elat,
c='k', lw=1.0, ls='solid' )
ax1.text( 0.5, 1.01, ptit,
fontsize=10, transform=ax1.transAxes,
horizontalalignment='center',
verticalalignment='bottom',
zorder=5,
bbox = bbox )
ptit2 = "Init:{0:}\nvalid:{1:}".format( stime.strftime('%HUTC %m/%d'),
vtime.strftime('%HUTC %m/%d') )
ax1.text( 0.99, 0.98, ptit2,
fontsize=10, transform=ax1.transAxes,
ha='right',
va='top',
zorder=5,
bbox = bbox )
pos = ax1.get_position()
cb_width = 0.01
cb_height = pos.height*0.9
ax_cb = fig.add_axes( [pos.x1+0.005, pos.y1-cb_height, cb_width, cb_height] )
cb = plt.colorbar( SHADE, cax=ax_cb,
orientation = 'vertical', ticks=levs[::2] )
cb.ax.tick_params( labelsize=7 )
ax1.text( 0.95, 1.01, unit,
fontsize=9, transform=ax1.transAxes,
horizontalalignment='left',
verticalalignment='bottom', )
ofig = "1p_{0:}_{1:}_{2:}_{3:0=2}_{4:0=2}".format( nvar_, nvar2_,
vtime.strftime('v%H%m%d'), rank, m )
print( ofig )
if not quick:
opath = "png/1p_mem" + stime.strftime('_s%H%m%d')
os.makedirs(opath, exist_ok=True)
ofig = os.path.join(opath, ofig + ".png")
plt.savefig(ofig,bbox_inches="tight", pad_inches = 0.1)
print(ofig)
plt.clf()
else:
print(ofig)
plt.show()
stime = datetime( 2018, 6, 30, 0)
stime = datetime( 2018, 7, 1, 0)
#stime = datetime( 2018, 7, 2, 0)
stime = datetime( 2018, 6, 28, 0)
vtime_ref = datetime( 2018, 7, 6, 0 )
vtime_l = [
datetime( 2018, 7, 3, 0, 0 ),
datetime( 2018, 7, 4, 0, 0 ),
datetime( 2018, 7, 5, 0, 0 ),
datetime( 2018, 7, 6, 0, 0 ),
]
nvar_l = [
"PW",
"RAIN",
"THE",
"QV",
"QV",
"T",
"T",
]
nvar2_l = [
"MSLP",
"MSLP",
"Z",
"MSLP",
"Z",
"Z",
"Z",
]
hpa_l = [ 950, 950, 950, 950, 500, 500, 300 ]
#nvar_l = [
# #"RH",
# "U",
# "U",
# #"QV",
# ]
#
#nvar2_l = [
# "Z",
# "Z",
# #"MSLP",
# ]
#
##hpa_l = [ 850, 500, 300 ]
#hpa_l = [ 850, 850, ]
#hpa_l = [ 500, 500, ]
#hpa_l = [ 300, 300, ]
nvar_l = [
"RAIN",
]
nvar2_l = [
"MSLP",
]
vtime_l = [
# datetime( 2018, 7, 1, 0, 0 ),
# datetime( 2018, 7, 2, 0, 0 ),
# datetime( 2018, 7, 3, 0, 0 ),
# datetime( 2018, 7, 4, 0, 0 ),
# datetime( 2018, 7, 5, 0, 0 ),
datetime( 2018, 7, 6, 0, 0 ),
]
for vtime in vtime_l:
for i, nvar in enumerate( nvar_l ):
nvar2 = nvar2_l[i]
hpa = hpa_l[i]
hpa2 = hpa
main( stime=stime, vtime_ref=vtime_ref, vtime=vtime, nvar=nvar, nvar2=nvar2,
hpa=hpa, hpa2=hpa2 )
|
<gh_stars>1-10
"""Test the Panasonic Viera config flow."""
from unittest.mock import patch
from panasonic_viera import SOAPError
from openpeerpower import config_entries
from openpeerpower.components.panasonic_viera.const import (
ATTR_DEVICE_INFO,
DEFAULT_NAME,
DOMAIN,
ERROR_INVALID_PIN_CODE,
)
from openpeerpower.const import CONF_PIN
from .conftest import (
MOCK_BASIC_DATA,
MOCK_CONFIG_DATA,
MOCK_DEVICE_INFO,
MOCK_ENCRYPTION_DATA,
get_mock_remote,
)
from tests.common import MockConfigEntry
async def test_flow_non_encrypted(opp):
"""Test flow without encryption."""
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
mock_remote = get_mock_remote(encrypted=False)
with patch(
"openpeerpower.components.panasonic_viera.config_flow.RemoteControl",
return_value=mock_remote,
):
result = await opp.config_entries.flow.async_configure(
result["flow_id"],
MOCK_BASIC_DATA,
)
assert result["type"] == "create_entry"
assert result["title"] == DEFAULT_NAME
assert result["data"] == {**MOCK_CONFIG_DATA, ATTR_DEVICE_INFO: MOCK_DEVICE_INFO}
async def test_flow_not_connected_error(opp):
"""Test flow with connection error."""
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
with patch(
"openpeerpower.components.panasonic_viera.config_flow.RemoteControl",
side_effect=TimeoutError,
):
result = await opp.config_entries.flow.async_configure(
result["flow_id"],
MOCK_BASIC_DATA,
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {"base": "cannot_connect"}
async def test_flow_unknown_abort(opp):
"""Test flow with unknown error abortion."""
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
with patch(
"openpeerpower.components.panasonic_viera.config_flow.RemoteControl",
side_effect=Exception,
):
result = await opp.config_entries.flow.async_configure(
result["flow_id"],
MOCK_BASIC_DATA,
)
assert result["type"] == "abort"
assert result["reason"] == "unknown"
async def test_flow_encrypted_not_connected_pin_code_request(opp):
"""Test flow with encryption and PIN code request connection error abortion during pairing request step."""
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
mock_remote = get_mock_remote(encrypted=True, request_error=TimeoutError)
with patch(
"openpeerpower.components.panasonic_viera.config_flow.RemoteControl",
return_value=mock_remote,
):
result = await opp.config_entries.flow.async_configure(
result["flow_id"],
MOCK_BASIC_DATA,
)
assert result["type"] == "abort"
assert result["reason"] == "cannot_connect"
async def test_flow_encrypted_unknown_pin_code_request(opp):
"""Test flow with encryption and PIN code request unknown error abortion during pairing request step."""
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
mock_remote = get_mock_remote(encrypted=True, request_error=Exception)
with patch(
"openpeerpower.components.panasonic_viera.config_flow.RemoteControl",
return_value=mock_remote,
):
result = await opp.config_entries.flow.async_configure(
result["flow_id"],
MOCK_BASIC_DATA,
)
assert result["type"] == "abort"
assert result["reason"] == "unknown"
async def test_flow_encrypted_valid_pin_code(opp):
"""Test flow with encryption and valid PIN code."""
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
mock_remote = get_mock_remote(
encrypted=True,
app_id="mock-app-id",
encryption_key="mock-encryption-key",
)
with patch(
"openpeerpower.components.panasonic_viera.config_flow.RemoteControl",
return_value=mock_remote,
):
result = await opp.config_entries.flow.async_configure(
result["flow_id"],
MOCK_BASIC_DATA,
)
assert result["type"] == "form"
assert result["step_id"] == "pairing"
result = await opp.config_entries.flow.async_configure(
result["flow_id"],
{CONF_PIN: "1234"},
)
assert result["type"] == "create_entry"
assert result["title"] == DEFAULT_NAME
assert result["data"] == {
**MOCK_CONFIG_DATA,
**MOCK_ENCRYPTION_DATA,
ATTR_DEVICE_INFO: MOCK_DEVICE_INFO,
}
async def test_flow_encrypted_invalid_pin_code_error(opp):
"""Test flow with encryption and invalid PIN code error during pairing step."""
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
mock_remote = get_mock_remote(encrypted=True, authorize_error=SOAPError)
with patch(
"openpeerpower.components.panasonic_viera.config_flow.RemoteControl",
return_value=mock_remote,
):
result = await opp.config_entries.flow.async_configure(
result["flow_id"],
MOCK_BASIC_DATA,
)
assert result["type"] == "form"
assert result["step_id"] == "pairing"
with patch(
"openpeerpower.components.panasonic_viera.config_flow.RemoteControl",
return_value=mock_remote,
):
result = await opp.config_entries.flow.async_configure(
result["flow_id"],
{CONF_PIN: "0000"},
)
assert result["type"] == "form"
assert result["step_id"] == "pairing"
assert result["errors"] == {"base": ERROR_INVALID_PIN_CODE}
async def test_flow_encrypted_not_connected_abort(opp):
"""Test flow with encryption and PIN code connection error abortion during pairing step."""
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
mock_remote = get_mock_remote(encrypted=True, authorize_error=TimeoutError)
with patch(
"openpeerpower.components.panasonic_viera.config_flow.RemoteControl",
return_value=mock_remote,
):
result = await opp.config_entries.flow.async_configure(
result["flow_id"],
MOCK_BASIC_DATA,
)
assert result["type"] == "form"
assert result["step_id"] == "pairing"
result = await opp.config_entries.flow.async_configure(
result["flow_id"],
{CONF_PIN: "0000"},
)
assert result["type"] == "abort"
assert result["reason"] == "cannot_connect"
async def test_flow_encrypted_unknown_abort(opp):
"""Test flow with encryption and PIN code unknown error abortion during pairing step."""
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
mock_remote = get_mock_remote(encrypted=True, authorize_error=Exception)
with patch(
"openpeerpower.components.panasonic_viera.config_flow.RemoteControl",
return_value=mock_remote,
):
result = await opp.config_entries.flow.async_configure(
result["flow_id"],
MOCK_BASIC_DATA,
)
assert result["type"] == "form"
assert result["step_id"] == "pairing"
result = await opp.config_entries.flow.async_configure(
result["flow_id"],
{CONF_PIN: "0000"},
)
assert result["type"] == "abort"
assert result["reason"] == "unknown"
async def test_flow_non_encrypted_already_configured_abort(opp):
"""Test flow without encryption and existing config entry abortion."""
MockConfigEntry(
domain=DOMAIN,
unique_id="0.0.0.0",
data=MOCK_CONFIG_DATA,
).add_to_opp(opp)
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USER},
data=MOCK_BASIC_DATA,
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_flow_encrypted_already_configured_abort(opp):
"""Test flow with encryption and existing config entry abortion."""
MockConfigEntry(
domain=DOMAIN,
unique_id="0.0.0.0",
data={**MOCK_CONFIG_DATA, **MOCK_ENCRYPTION_DATA},
).add_to_opp(opp)
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USER},
data=MOCK_BASIC_DATA,
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_imported_flow_non_encrypted(opp):
"""Test imported flow without encryption."""
mock_remote = get_mock_remote(encrypted=False)
with patch(
"openpeerpower.components.panasonic_viera.config_flow.RemoteControl",
return_value=mock_remote,
):
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=MOCK_CONFIG_DATA,
)
assert result["type"] == "create_entry"
assert result["title"] == DEFAULT_NAME
assert result["data"] == {**MOCK_CONFIG_DATA, ATTR_DEVICE_INFO: MOCK_DEVICE_INFO}
async def test_imported_flow_encrypted_valid_pin_code(opp):
"""Test imported flow with encryption and valid PIN code."""
mock_remote = get_mock_remote(
encrypted=True,
app_id="mock-app-id",
encryption_key="mock-encryption-key",
)
with patch(
"openpeerpower.components.panasonic_viera.config_flow.RemoteControl",
return_value=mock_remote,
):
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=MOCK_CONFIG_DATA,
)
assert result["type"] == "form"
assert result["step_id"] == "pairing"
result = await opp.config_entries.flow.async_configure(
result["flow_id"],
{CONF_PIN: "1234"},
)
assert result["type"] == "create_entry"
assert result["title"] == DEFAULT_NAME
assert result["data"] == {
**MOCK_CONFIG_DATA,
**MOCK_ENCRYPTION_DATA,
ATTR_DEVICE_INFO: MOCK_DEVICE_INFO,
}
async def test_imported_flow_encrypted_invalid_pin_code_error(opp):
"""Test imported flow with encryption and invalid PIN code error during pairing step."""
mock_remote = get_mock_remote(encrypted=True, authorize_error=SOAPError)
with patch(
"openpeerpower.components.panasonic_viera.config_flow.RemoteControl",
return_value=mock_remote,
):
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=MOCK_CONFIG_DATA,
)
assert result["type"] == "form"
assert result["step_id"] == "pairing"
with patch(
"openpeerpower.components.panasonic_viera.config_flow.RemoteControl",
return_value=mock_remote,
):
result = await opp.config_entries.flow.async_configure(
result["flow_id"],
{CONF_PIN: "0000"},
)
assert result["type"] == "form"
assert result["step_id"] == "pairing"
assert result["errors"] == {"base": ERROR_INVALID_PIN_CODE}
async def test_imported_flow_encrypted_not_connected_abort(opp):
"""Test imported flow with encryption and PIN code connection error abortion during pairing step."""
mock_remote = get_mock_remote(encrypted=True, authorize_error=TimeoutError)
with patch(
"openpeerpower.components.panasonic_viera.config_flow.RemoteControl",
return_value=mock_remote,
):
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=MOCK_CONFIG_DATA,
)
assert result["type"] == "form"
assert result["step_id"] == "pairing"
result = await opp.config_entries.flow.async_configure(
result["flow_id"],
{CONF_PIN: "0000"},
)
assert result["type"] == "abort"
assert result["reason"] == "cannot_connect"
async def test_imported_flow_encrypted_unknown_abort(opp):
"""Test imported flow with encryption and PIN code unknown error abortion during pairing step."""
mock_remote = get_mock_remote(encrypted=True, authorize_error=Exception)
with patch(
"openpeerpower.components.panasonic_viera.config_flow.RemoteControl",
return_value=mock_remote,
):
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=MOCK_CONFIG_DATA,
)
assert result["type"] == "form"
assert result["step_id"] == "pairing"
result = await opp.config_entries.flow.async_configure(
result["flow_id"],
{CONF_PIN: "0000"},
)
assert result["type"] == "abort"
assert result["reason"] == "unknown"
async def test_imported_flow_not_connected_error(opp):
"""Test imported flow with connection error abortion."""
with patch(
"openpeerpower.components.panasonic_viera.config_flow.RemoteControl",
side_effect=TimeoutError,
):
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=MOCK_CONFIG_DATA,
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {"base": "cannot_connect"}
async def test_imported_flow_unknown_abort(opp):
"""Test imported flow with unknown error abortion."""
with patch(
"openpeerpower.components.panasonic_viera.config_flow.RemoteControl",
side_effect=Exception,
):
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=MOCK_CONFIG_DATA,
)
assert result["type"] == "abort"
assert result["reason"] == "unknown"
async def test_imported_flow_non_encrypted_already_configured_abort(opp):
"""Test imported flow without encryption and existing config entry abortion."""
MockConfigEntry(
domain=DOMAIN,
unique_id="0.0.0.0",
data=MOCK_CONFIG_DATA,
).add_to_opp(opp)
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=MOCK_BASIC_DATA,
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_imported_flow_encrypted_already_configured_abort(opp):
"""Test imported flow with encryption and existing config entry abortion."""
MockConfigEntry(
domain=DOMAIN,
unique_id="0.0.0.0",
data={**MOCK_CONFIG_DATA, **MOCK_ENCRYPTION_DATA},
).add_to_opp(opp)
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=MOCK_BASIC_DATA,
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
|
# ImageNet-CoG Benchmark
# Copyright 2021-present NAVER Corp.
# 3-Clause BSD License
import argparse
import copy
import logging
import math
import os
import shutil
import time
import optuna
import torch as th
import feature_ops
import metrics
import utils
from iterators import TorchIterator
from meters import AverageMeter, ProgressMeter
logger = logging.getLogger()
class LogReg:
"""
Logistic regression classifier with mini-batch SGD.
"""
def __init__(self, args, cfg):
self.args = args
self.cfg = cfg
# load the training set features
trainset = feature_ops.load_feature_set(
args.train_features_path, "train", cfg.CLF.NORM_FTS
)
if args.val:
# randomly split the training set into train + val
logger.info("Splitting the training set into train and val")
trainset, testset = feature_ops.split_trainset(trainset, cfg.CLF.VAL_PERC)
else:
# load the test set
testset = feature_ops.load_feature_set(args.test_features_path, "test", cfg.CLF.NORM_FTS)
if cfg.CLF.N_SHOT > 0:
logger.info(
"Simulating few-shot learning setting, {} images per class.".format(
cfg.CLF.N_SHOT
)
)
trainset = feature_ops.make_fewshot_dataset(trainset, cfg.CLF.N_SHOT)
self.trainset = trainset
self.testset = testset
self.trainset.print_info()
self.testset.print_info()
# determine number of cases
if len(list(self.trainset.y.shape)) == 1:
classes = th.unique(self.trainset.y)
assert th.all(classes == th.unique(self.testset.y))
args.n_classes = classes.size(0)
# move all features to the device
if args.device == "cuda":
feature_ops.move_data_to_cuda([self.trainset, self.testset])
def __call__(self, trial=None):
"""
The function called by Optuna.
"""
# empty the cache allocated in the previous call
th.cuda.empty_cache()
args = copy.deepcopy(self.args)
cfg = self.cfg
x_train = self.trainset.x
y_train = self.trainset.y
x_test = self.testset.x
y_test = self.testset.y
# create training and test set iterators
train_iter = TorchIterator((x_train, y_train), cfg.CLF.BATCH_SIZE, shuffle=True)
test_iter = TorchIterator((x_test, y_test), cfg.CLF.BATCH_SIZE, shuffle=False)
# define logistic classifier
model = th.nn.Linear(x_train.size(1), args.n_classes).to(args.device)
crit = th.nn.CrossEntropyLoss().to(args.device)
# sample a learning rate and weight decay
if trial is not None:
lr_intv = cfg.CLF.LR_INTV
wd_intv = cfg.CLF.WD_INTV
args.lr = trial.suggest_loguniform("lr", lr_intv[0], lr_intv[1])
args.wd = trial.suggest_loguniform("wd", wd_intv[0], wd_intv[1])
optim = th.optim.SGD(
model.parameters(), lr=args.lr, momentum=args.mom, weight_decay=args.wd
)
args.exp_dir = os.path.join(
args.output_dir,
"{}-lr-{}_wd-{}".format("val" if args.val else "final", args.lr, args.wd),
)
os.makedirs(args.exp_dir, exist_ok=True)
# write the model definition into exp_dir
utils.write_to_file(str(model), os.path.join(args.exp_dir, "model.txt"))
# logs computed during training / evaluation
args.logs = {
"train/loss": [],
"train/top1": [],
"train/top5": [],
"test/loss": [],
"test/top1": [],
"test/top5": [],
"lr": [],
}
# predictions over the evaluation sets
args.preds = []
for epoch in range(cfg.CLF.N_EPOCHS):
if not args.val:
logger.info(f"**Epoch:{epoch}**")
args.epoch = epoch
train_stat = train(train_iter, model, crit, optim, epoch, args)
validate(test_iter, model, crit, args)
adjust_learning_rate(optim, args, cfg)
# if something went wrong during training
# e.g. SGD diverged
if train_stat == -1:
break
# save the logs
utils.save_pickle(args.logs, f"{args.exp_dir}/logs.pkl")
# save the predictions
utils.save_pickle(args.preds, f"{args.exp_dir}/preds.pkl")
# save the whole args, for ease of access
utils.save_pickle(vars(args), f"{args.exp_dir}/args.pkl")
# save also the final model
th.save(
{
"model": model.state_dict(),
},
f"{args.exp_dir}/model.pth",
)
# return the last test accuracy
return args.logs["test/top1"][-1]
def train(train_loader, model, criterion, optimizer, epoch, args):
"""
Train the classifier for one epoch.
"""
batch_time = AverageMeter("Time", ":6.3f")
losses = AverageMeter("Loss", ":.4e")
top1 = AverageMeter("Acc@1", ":6.2f")
top5 = AverageMeter("Acc@5", ":6.2f")
progress = ProgressMeter(
len(train_loader),
[batch_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch),
)
# switch to train mode
model.train()
end = time.time()
for i, (fts, lbls) in enumerate(train_loader):
fts = fts.to(args.device)
lbls = lbls.to(args.device)
# compute output
output = model(fts)
loss = criterion(output, lbls)
if not th.isfinite(loss):
logger.info("Loss ({}) is not finite, terminating".format(loss.item()))
optimizer.zero_grad()
return -1
# measure accuracy and record loss
acc1, acc5 = metrics.accuracy(output, lbls, topk=(1, 5))
losses.update(loss.item(), fts.size(0))
top1.update(acc1.item(), fts.size(0))
top5.update(acc5.item(), fts.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if (not args.val) and (i % args.print_freq == 0):
progress.display(i)
args.logs["train/loss"].append(losses.avg)
args.logs["train/top1"].append(top1.avg)
args.logs["train/top5"].append(top5.avg)
return 0
def validate(val_loader, model, criterion, args):
losses = AverageMeter("Loss", ":.4e")
top1 = AverageMeter("Acc@1", ":6.2f")
top5 = AverageMeter("Acc@5", ":6.2f")
# switch to evaluate mode
model.eval()
# keep predictions per class
preds = th.ones(len(val_loader.tensors[0]), dtype=th.int32, device=args.device) * -1.
six = 0
with th.no_grad():
for i, (fts, lbls) in enumerate(val_loader):
fts = fts.to(args.device)
lbls = lbls.to(args.device)
bs = fts.size(0)
# compute output
output = model(fts)
loss = criterion(output, lbls)
# store the predicted classes
preds[six:six + bs] = th.argmax(output, dim=1)
six += bs
# measure accuracy and record loss
acc1, acc5 = metrics.accuracy(output, lbls, topk=(1, 5))
losses.update(loss.item(), bs)
top1.update(acc1[0].item(), bs)
top5.update(acc5[0].item(), bs)
# make sure that there is no invalid prediction
assert th.all(preds >= 0).item()
args.preds.append(preds.detach().cpu())
args.logs["test/loss"].append(losses.avg)
args.logs["test/top1"].append(top1.avg)
args.logs["test/top5"].append(top5.avg)
if not args.val:
logger.info(
" * Acc@1:{top1.avg:.3f} - Acc@5:{top5.avg:.3f}".format(
top1=top1, top5=top5
)
)
def adjust_learning_rate(optimizer, args, cfg):
"""Decay the learning rate based on cosine schedule"""
lr = args.lr
lr *= 0.5 * (1.0 + math.cos(math.pi * args.epoch / cfg.CLF.N_EPOCHS))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
args.logs["lr"].append(lr)
def save_checkpoint(state, is_best, filename="checkpoint.pth.tar"):
th.save(state, filename)
if is_best:
shutil.copyfile(filename, "model_best.pth.tar")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=utils.none_or_string_flag,
help='Name of the model in the <model_title>_<architecture_name> form.'
'See the table of models in ./prepare_models/README.md for all the model names we support.'
'This is an optional argument that needs to be set along with --models_root_dir and --dataset.'
'When these three arguments are set, the script will load features from:'
'<models_root_dir>/<model_title>/<architecture_name>/<dataset>/features_*/X_Y.pth.'
'If you would like to load pre-extracted features from somewhere else'
'then ignore this argument and provide the --train_features_dir and --test_features_dir arguments accordingly')
parser.add_argument('--models_root_dir', type=utils.none_or_string_flag,
help='Root directory for all models, see prepare_models/README.md for a detailed explanation.'
'This is an optional argument that needs to be set along with --model and --dataset.'
'Please see the help message for the --model argument as well.')
parser.add_argument("--dataset", type=utils.none_or_string_flag,
help="On which dataset to learn classifiers"
'Possible values are ("in1k", "cog_l1", "cog_l2", "cog_l3", "cog_l4", "cog_l5")'
'This is an optional argument that needs to be set along with --models_root_dir and --model.'
'Please see the help message for the --model argument as well.')
parser.add_argument('--train_features_dir', type=utils.none_or_string_flag,
help='Path to the directory containing pre-extracted training set features.'
'We expect a features file "X_Y.pth" under <train_features_dir>.'
'This is an optional argument that needs to be set if --models_root_dir, --model and --dataset are not set.')
parser.add_argument('--test_features_dir', type=utils.none_or_string_flag,
help='Path to the directory containing pre-extracted test set features.'
'We expect a features file "X_Y.pth" under <test_features_dir>.'
'This is an optional argument that needs to be set if --models_root_dir, --model and --dataset are not set.')
parser.add_argument('--output_dir', type=utils.none_or_string_flag,
help='Where to log program logs.'
'This is an optional argument that needs to be set if --models_root_dir is not set.'
'If not provided, we try to save the logs under'
'<models_root_dir>/<model_title>/<architecture_name>/<dataset>/eval_logreg/seed*')
# learning rate and momentum are tuned in this program, do not manually set.
parser.add_argument("--lr", type=float, default=0.0, help="initial learning rate")
parser.add_argument("--wd", type=float, default=0.0, help="weight decay")
parser.add_argument("--mom", type=float, default=0.9, help="momentum")
# program-related options
parser.add_argument("--print_freq", default=100, type=int, help="print frequency (default: 10)")
parser.add_argument("--device", type=str, default="cuda")
# optionally to overwrite the default config
parser.add_argument("opts", default=None,
help="see configs/default.py for all options",
nargs=argparse.REMAINDER)
args = parser.parse_args()
if args.device == "cuda" and not th.cuda.is_available():
print("CUDA is not available, I will run on CPU.")
args.device = "cpu"
# load the config file
# create output directory,
# locate pre-extracted features,
# initialize program logger,
# save args and cfg
# this function sets the following arg variables:
# - train_features_path, type=str
# - test_features_path, type=str
# - output_dir, type=str
args, cfg = utils.init_program(args, _for="logreg")
# tune hyper-parameters with optuna
logger.info("Running Optuna...")
hps_sampler = optuna.samplers.TPESampler(multivariate=True, seed=cfg.EVAL.SEED)
study = optuna.create_study(sampler=hps_sampler, direction="maximize")
args.val = True
logreg = LogReg(args, cfg)
study.optimize(logreg, n_trials=cfg.CLF.N_TRIALS, n_jobs=1, show_progress_bar=False)
utils.save_pickle(study, os.path.join(args.output_dir, "study.pkl"))
logger.info("")
logger.info("*" * 50)
logger.info("Hyper-parameter search ended")
logger.info("best_trial:")
logger.info(str(study.best_trial))
logger.info("best_params:")
logger.info(str(study.best_params))
logger.info("*" * 50)
logger.info("")
# train the final classifier with the tuned hyper-parameters
del logreg
th.cuda.empty_cache()
args.lr = study.best_params["lr"]
args.wd = study.best_params["wd"]
args.val = False
logreg = LogReg(args, cfg)
logreg()
|
<reponame>OlafTitz/vdrnfofs<filename>vdrnfofs/vdrnfofs.py
# -*- coding: utf-8 -*-
#
# VDR-NFO-FS creates a file system for VDR recordings, which maps each
# recording to a single mpg-file and nfo-file containing some meta data.
#
# Copyright (c) 2010 - 2011 by <NAME>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the author/copyright holder nor the names of
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import stat
import errno
import sys
import fuse
import traceback
import logging
from concatenated_file_reader import *
from vdr import *
from filesystemnodes import *
from nodecache import *
fuse.fuse_python_api = (0, 2)
def format_exception_info(level = 6):
error_type, error_value, trbk = sys.exc_info()
tb_list = traceback.format_tb(trbk, level)
return 'Error: %s \nDescription: %s \nTraceback: %s' % (error_type.__name__, error_value, '\n'.join(tb_list))
def get_node(video, path):
virtual_path, virtual_file_extension = os.path.splitext(path)
if virtual_file_extension in ['.mpg', '.nfo']:
p = virtual_path.rfind('_')
if p > 0:
video_path = '/'.join((video, virtual_path[1:p], virtual_path[p+1:]))
if not os.path.isdir(video_path):
return None
elif virtual_file_extension == '.mpg':
return MpgNode(video_path)
elif virtual_file_extension == '.nfo':
return NfoNode(video_path)
else:
dir = video + path
if os.path.isdir(dir):
return DirNode(dir)
return None
class VdrNfoFsFile:
def __init__(self, path, flags, *mode):
self.path = path
self.node = get_node(VdrNfoFsFile.video_root, path)
self.keep_cache = True
self.direct_io = False
def read(self, size, offset):
try:
if not self.node:
return -errno.ENOENT
return self.node.read(offset, size)
except:
logging.error('VdrFuseFs: Unexpected error for read(%s): %s' % (self.path, format_exception_info()))
def release(self, flags):
self.node.release()
# def write(self, buf, offset):
# return 0
# def _fflush(self):
# if 'w' in self.file.mode or 'a' in self.file.mode:
# self.file.flush()
# def fsync(self, isfsyncfile):
# def flush(self):
# def fgetattr(self):
# return 0
# def ftruncate(self, len):
# def lock(self, cmd, owner, **kw):
class VdrNfoFs(fuse.Fuse):
def __init__(self, *args, **kw):
fuse.Fuse.__init__(self, *args, **kw)
self.video = ''
self.log = ''
self.loglevel = 'info'
self.cache = NodeCache()
def getattr(self, path):
try:
node = self.cache.get(path, lambda x: get_node(self.video, x))
if node:
return node.get_stat()
return -errno.ENOENT
except:
logging.error('VdrFuseFs: Unexpected error for getattr(%s): %s' % (path, format_exception_info()))
def readdir(self, path, offset):
try:
yield fuse.Direntry('.')
yield fuse.Direntry('..')
node = self.cache.get(path, lambda x: get_node(self.video, x))
if node:
for item in node.content():
yield fuse.Direntry(item.file_system_name())
except:
logging.error('VdrFuseFs: Unexpected error for readdir(%s): %s' % (path, format_exception_info()))
def main(self, *a, **kw):
if self.log and self.log != None:
logging.basicConfig(filename=self.log, level=getattr(logging, self.loglevel.upper()))
else:
logging.basicConfig(level=self.loglevel.upper())
logging.info('Starting vdrnfofs')
VdrNfoFsFile.video_root = self.video
self.file_class = VdrNfoFsFile
return fuse.Fuse.main(self, *a, **kw)
def main():
usage = "\nVDR-NFO-FS - access VDR recordings as mpg and nfo files\n"
usage += fuse.Fuse.fusage
version = "%prog " + fuse.__version__
fs = VdrNfoFs(version=version, usage=usage, dash_s_do='setsingle')
fs.multithreaded = False
fs.parser.add_option(mountopt="video", default='', help="The video directory containing the VDR recordings")
fs.parser.add_option(mountopt="log", default='', help="The log file (default = console)")
fs.parser.add_option(mountopt="loglevel", default='info', help="The log level (debug, info, warning or error)")
fs.parse(values=fs, errex=1)
fs.main()
if __name__ == '__main__':
main()
|
# imported from github.com/ravana69/PornHub to userbot by @heyworld
# please don't nuke my credits 😓
import asyncio
import logging
import os
import time
from datetime import datetime
from urllib.parse import quote
import bs4
import requests
from justwatch import JustWatch
from telethon import *
from telethon import events
from telethon.errors.rpcerrorlist import YouBlockedUserError
from telethon.tl import functions
from telethon.tl.functions.contacts import UnblockRequest
from telethon.tl.types import (
ChatBannedRights,
UserStatusEmpty,
UserStatusLastMonth,
UserStatusLastWeek,
UserStatusOffline,
UserStatusOnline,
UserStatusRecently,
)
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP, SUDO_USERS, TEMP_DOWNLOAD_DIRECTORY
from userbot.utils import edit_delete, edit_or_reply, ayiin_cmd
normiefont = [
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
]
weebyfont = [
"卂",
"乃",
"匚",
"刀",
"乇",
"下",
"厶",
"卄",
"工",
"丁",
"长",
"乚",
"从",
"𠘨",
"口",
"尸",
"㔿",
"尺",
"丂",
"丅",
"凵",
"リ",
"山",
"乂",
"丫",
"乙",
]
logger = logging.getLogger(__name__)
thumb_image_path = TEMP_DOWNLOAD_DIRECTORY + "/thumb_image.jpg"
name = "Profile Photos"
@ayiin_cmd(pattern="app(?: |$)(.*)")
async def apk(e):
xx = await edit_or_reply(e, "`Processing...`")
try:
app_name = e.pattern_match.group(1)
remove_space = app_name.split(" ")
final_name = "+".join(remove_space)
page = requests.get(
"https://play.google.com/store/search?q=" + final_name + "&c=apps"
)
str(page.status_code)
soup = bs4.BeautifulSoup(page.content, "lxml", from_encoding="utf-8")
results = soup.findAll("div", "ZmHEEd")
app_name = (
results[0].findNext(
"div",
"Vpfmgd").findNext(
"div",
"WsMG1c nnK0zc").text)
app_dev = results[0].findNext(
"div", "Vpfmgd").findNext(
"div", "KoLSrc").text
app_dev_link = (
"https://play.google.com" +
results[0].findNext(
"div",
"Vpfmgd").findNext(
"a",
"mnKHRc")["href"])
app_rating = (
results[0]
.findNext("div", "Vpfmgd")
.findNext("div", "pf5lIe")
.find("div")["aria-label"]
)
app_link = (
"https://play.google.com"
+ results[0]
.findNext("div", "Vpfmgd")
.findNext("div", "vU6FJ p63iDd")
.a["href"]
)
app_icon = (
results[0]
.findNext("div", "Vpfmgd")
.findNext("div", "uzcko")
.img["data-src"]
)
app_details = "<a href='" + app_icon + "'>📲​</a>"
app_details += "<b>" + app_name + "</b>"
app_details += ("\n\n<b>Developer :</b> <a href='" +
app_dev_link + "'>" + app_dev + "</a>")
app_details += "\n<b>Rating :</b> " + app_rating.replace(
"Rated ", "⭐ "
).replace(" out of ", "/").replace(" stars", "", 1).replace(
" stars", "⭐ "
).replace(
"five", "5"
)
app_details += ("\n<b>Features :</b> <a href='" +
app_link + "'>View in Play Store</a>")
app_details += "\n\n===> Support @AyiinSupport <==="
await xx.edit(app_details, link_preview=True, parse_mode="HTML")
except IndexError:
await edit_delete(
xx, "**Pencarian tidak ditemukan. Mohon masukkan** `Nama app yang valid`"
)
except Exception as err:
await edit_delete(xx, "Exception Occured:- " + str(err))
@ayiin_cmd(pattern="calc(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
input = event.pattern_match.group(1) # get input
exp = "Given expression is " + input # report back input
xx = await edit_or_reply(event, "`Processing...`")
# lazy workaround to add support for two digits
final_input = tuple(input)
term1part1 = final_input[0]
term1part2 = final_input[1]
term1 = str(term1part1) + str(term1part2)
final_term1 = int(term1)
operator = str(final_input[2])
term2part1 = final_input[3]
term2part2 = final_input[4]
term2 = str(term2part1) + str(term2part2)
final_term2 = int(term2)
# actual calculations go here
if input == "help":
await xx.edit(
"Syntax .calc <term1><operator><term2>\nFor eg .calc 02*02 or 99*99 (the zeros are important) (two terms and two digits max)"
)
elif operator == "*":
await xx.edit("Solution -->\n" + exp + "\n" + str(final_term1 * final_term2))
elif operator == "-":
await xx.edit("Solution -->\n" + exp + "\n" + str(final_term1 - final_term2))
elif operator == "+":
await xx.edit("Solution -->\n" + exp + "\n" + str(final_term1 + final_term2))
elif operator == "/":
await xx.edit("Solution -->\n" + exp + "\n" + str(final_term1 / final_term2))
elif operator == "%":
await xx.edit("Solution -->\n" + exp + "\n" + str(final_term1 % final_term2))
else:
await xx.edit("**Ketik** `$help calc` **bila butuh bantuan**")
@ayiin_cmd(pattern="xcd(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
input_str = event.pattern_match.group(1)
xkcd_id = None
xx = await edit_or_reply(event, "`Processing...`")
if input_str:
if input_str.isdigit():
xkcd_id = input_str
else:
xkcd_search_url = "https://relevantxkcd.appspot.com/process?"
queryresult = requests.get(
xkcd_search_url,
params={
"action": "xkcd",
"query": quote(input_str)}).text
xkcd_id = queryresult.split(" ")[2].lstrip("\n")
if xkcd_id is None:
xkcd_url = "https://xkcd.com/info.0.json"
else:
xkcd_url = "https://xkcd.com/{}/info.0.json".format(xkcd_id)
r = requests.get(xkcd_url)
if r.ok:
data = r.json()
year = data.get("year")
month = data["month"].zfill(2)
day = data["day"].zfill(2)
xkcd_link = "https://xkcd.com/{}".format(data.get("num"))
safe_title = data.get("safe_title")
data.get("transcript")
alt = data.get("alt")
img = data.get("img")
data.get("title")
output_str = """[\u2060]({})**{}**
[XKCD ]({})
Title: {}
Alt: {}
Day: {}
Month: {}
Year: {}""".format(
img, input_str, xkcd_link, safe_title, alt, day, month, year
)
await xx.edit(output_str, link_preview=True)
else:
await edit_delete(xx, "xkcd n.{} not found!".format(xkcd_id))
@ayiin_cmd(pattern="remove(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
if event.is_private:
return False
if event.sender_id in SUDO_USERS:
return
input_str = event.pattern_match.group(1)
if input_str:
chat = await event.get_chat()
if not (chat.admin_rights or chat.creator):
await edit_delete(event, "`Anda Bukan Admin Disini!`")
return False
p = 0
b = 0
c = 0
d = 0
e = []
m = 0
n = 0
y = 0
w = 0
o = 0
q = 0
r = 0
xx = await edit_or_reply(event, "`Mencari Daftar Peserta....`")
async for i in event.client.iter_participants(event.chat_id):
p += 1
#
# Note that it's "reversed". You must set to ``True`` the permissions
# you want to REMOVE, and leave as ``None`` those you want to KEEP.
rights = ChatBannedRights(until_date=None, view_messages=True)
if isinstance(i.status, UserStatusEmpty):
y += 1
if "y" in input_str:
status, e = await ban_user(event.chat_id, i, rights)
if not status:
await edit_delete(
xx,
"**Saya memerlukan hak admin untuk melakukan tindakan ini!**",
)
e.append(str(e))
break
c += 1
if isinstance(i.status, UserStatusLastMonth):
m += 1
if "m" in input_str:
status, e = await ban_user(event.chat_id, i, rights)
if not status:
await edit_delete(
xx,
"**Saya memerlukan hak admin untuk melakukan tindakan ini!**",
)
e.append(str(e))
break
c += 1
if isinstance(i.status, UserStatusLastWeek):
w += 1
if "w" in input_str:
status, e = await ban_user(event.chat_id, i, rights)
if not status:
await edit_delete(
xx,
"**Saya memerlukan hak admin untuk melakukan tindakan ini!**",
)
e.append(str(e))
break
c += 1
if isinstance(i.status, UserStatusOffline):
o += 1
if "o" in input_str:
status, e = await ban_user(event.chat_id, i, rights)
if not status:
await edit_delete(
xx,
"**Saya memerlukan hak admin untuk melakukan tindakan ini!**",
)
e.append(str(e))
break
c += 1
if isinstance(i.status, UserStatusOnline):
q += 1
if "q" in input_str:
status, e = await ban_user(event.chat_id, i, rights)
if not status:
await event.edit(
"**Saya memerlukan hak admin untuk melakukan tindakan ini!**"
)
e.append(str(e))
break
c += 1
if isinstance(i.status, UserStatusRecently):
r += 1
if "r" in input_str:
status, e = await ban_user(event.chat_id, i, rights)
if not status:
await edit_delete(
xx,
"**Saya memerlukan hak admin untuk melakukan tindakan ini!**",
)
e.append(str(e))
break
c += 1
if i.bot:
b += 1
if "b" in input_str:
status, e = await ban_user(event.chat_id, i, rights)
if not status:
await edit_delete(
xx,
"**Saya memerlukan hak admin untuk melakukan tindakan ini!**",
)
e.append(str(e))
break
c += 1
elif i.deleted:
d += 1
if "d" in input_str:
status, e = await ban_user(event.chat_id, i, rights)
if not status:
await edit_delete(
xx,
"**Saya memerlukan hak admin untuk melakukan tindakan ini!**",
)
e.append(str(e))
else:
c += 1
elif i.status is None:
n += 1
if input_str:
required_string = """Kicked {} / {} users
Deleted Accounts: {}
UserStatusEmpty: {}
UserStatusLastMonth: {}
UserStatusLastWeek: {}
UserStatusOffline: {}
UserStatusOnline: {}
UserStatusRecently: {}
Bots: {}
None: {}"""
await xx.edit(required_string.format(c, p, d, y, m, w, o, q, r, b, n))
await asyncio.sleep(5)
await event.edit(
"""Total= {} users
Number Of Deleted Accounts= {}
Status: Empty= {}
: Last Month= {}
: Last Week= {}
: Offline= {}
: Online= {}
: Recently= {}
Number Of Bots= {}
Unidentified= {}""".format(
p, d, y, m, w, o, q, r, b, n
)
)
async def ban_user(chat_id, i, rights):
try:
await client(functions.channels.EditBannedRequest(chat_id, i, rights))
return True, None
except Exception as exc:
return False, str(exc)
@ayiin_cmd(pattern="rnupload(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
thumb = thumb_image_path if os.path.exists(thumb_image_path) else None
xx = await edit_or_reply(event, "`Rename & Upload in processing ....`")
input_str = event.pattern_match.group(1)
if not os.path.isdir(TEMP_DOWNLOAD_DIRECTORY):
os.makedirs(TEMP_DOWNLOAD_DIRECTORY)
if event.reply_to_msg_id:
start = datetime.now()
end = datetime.now()
file_name = input_str
reply_message = await event.get_reply_message()
to_download_directory = TEMP_DOWNLOAD_DIRECTORY
downloaded_file_name = os.path.join(to_download_directory, file_name)
downloaded_file_name = await event.client.download_media(
reply_message,
downloaded_file_name,
)
ms_one = (end - start).seconds
if os.path.exists(downloaded_file_name):
time.time()
await event.client.send_file(
event.chat_id,
downloaded_file_name,
force_document=True,
supports_streaming=False,
allow_cache=False,
reply_to=event.message.id,
thumb=thumb,
)
end_two = datetime.now()
os.remove(downloaded_file_name)
ms_two = (end_two - end).seconds
await xx.edit(
"**Downloaded in** `{}` **seconds. Uploaded in** `{}` **seconds**".format(
ms_one, ms_two
)
)
else:
await edit_delete(xx, "File Not Found {}".format(input_str))
else:
await edit_delete(xx, "Syntax // .rnupload filename.extension <reply ke media>")
@ayiin_cmd(pattern="grab(?: |$)(.*)")
async def potocmd(event):
id = "".join(event.raw_text.split(maxsplit=2)[1:])
user = await event.get_reply_message()
chat = event.input_chat
xx = await edit_or_reply(event, "`Processing...`")
if user:
photos = await event.client.get_profile_photos(user.sender)
else:
photos = await event.client.get_profile_photos(chat)
if id.strip() == "":
try:
await event.client.send_file(event.chat_id, photos)
except a:
photo = await event.client.download_profile_photo(chat)
await event.client.send_file(event.chat_id, photo)
else:
try:
id = int(id)
if id <= 0:
return await edit_delete(
xx, "**Nomer ID Yang Anda Masukkan Tidak Valid**"
)
except BaseException:
return await edit_delete(xx, "**Lmao**")
if int(id) <= (len(photos)):
send_photos = await event.client.download_media(photos[id - 1])
await event.client.send_file(event.chat_id, send_photos)
await xx.delete()
else:
return await edit_delete(xx, "**Tidak Dapat Menemukan Foto Pengguna Ini**")
@ayiin_cmd(pattern="res(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
if not event.reply_to_msg_id:
return await edit_delete(event, "**Mohon Balas Ke Link.**")
reply_message = await event.get_reply_message()
if not reply_message.text:
return await edit_delete(event, "**Mohon Balas Ke Link.**")
chat = "@CheckRestrictionsBot"
xx = await edit_or_reply(event, "`Processing...`")
async with event.client.conversation(chat) as conv:
try:
response = conv.wait_event(
events.NewMessage(incoming=True, from_users=894227130)
)
await event.client.forward_messages(chat, reply_message)
response = await response
except YouBlockedUserError:
await event.client(UnblockRequest(chat))
await event.client.forward_messages(chat, reply_message)
response = await response
if response.text.startswith(""):
await event.edit("**Terjadi Error**")
else:
await xx.delete()
await event.client.send_message(event.chat_id, response.message)
await event.client.delete_message(chat, event.chat_id, response.message)
def get_stream_data(query):
stream_data = {}
# Compatibility for Current Userge Users
try:
country = Config.WATCH_COUNTRY
except Exception:
country = "IN"
# Cooking Data
just_watch = JustWatch(country=country)
results = just_watch.search_for_item(query=query)
movie = results["items"][0]
stream_data["title"] = movie["title"]
stream_data["movie_thumb"] = (
"https://images.justwatch.com"
+ movie["poster"].replace("{profile}", "")
+ "s592"
)
stream_data["release_year"] = movie["original_release_year"]
try:
print(movie["cinema_release_date"])
stream_data["release_date"] = movie["cinema_release_date"]
except KeyError:
try:
stream_data["release_date"] = movie["localized_release_date"]
except KeyError:
stream_data["release_date"] = None
stream_data["type"] = movie["object_type"]
available_streams = {}
for provider in movie["offers"]:
provider_ = get_provider(provider["urls"]["standard_web"])
available_streams[provider_] = provider["urls"]["standard_web"]
stream_data["providers"] = available_streams
scoring = {}
for scorer in movie["scoring"]:
if scorer["provider_type"] == "tmdb:score":
scoring["tmdb"] = scorer["value"]
if scorer["provider_type"] == "imdb:score":
scoring["imdb"] = scorer["value"]
stream_data["score"] = scoring
return stream_data
# Helper Functions
def pretty(name):
if name == "play":
name = "Google Play Movies"
return name[0].upper() + name[1:]
def get_provider(url):
url = url.replace("https://www.", "")
url = url.replace("https://", "")
url = url.replace("http://www.", "")
url = url.replace("http://", "")
url = url.split(".")[0]
return url
@ayiin_cmd(pattern="watch(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
query = event.pattern_match.group(1)
xx = await edit_or_reply(event, "`Processing...`")
streams = get_stream_data(query)
title = streams["title"]
thumb_link = streams["movie_thumb"]
release_year = streams["release_year"]
release_date = streams["release_date"]
scores = streams["score"]
try:
imdb_score = scores["imdb"]
except KeyError:
imdb_score = None
try:
tmdb_score = scores["tmdb"]
except KeyError:
tmdb_score = None
stream_providers = streams["providers"]
if release_date is None:
release_date = release_year
output_ = f"**Movie:**\n`{title}`\n**Release Date:**\n`{release_date}`"
if imdb_score:
output_ = output_ + f"\n**IMDB: **{imdb_score}"
if tmdb_score:
output_ = output_ + f"\n**TMDB: **{tmdb_score}"
output_ = output_ + "\n\n**Available on:**\n"
for provider, link in stream_providers.items():
if "sonyliv" in link:
link = link.replace(" ", "%20")
output_ += f"[{pretty(provider)}]({link})\n"
await event.client.send_file(
event.chat_id,
caption=output_,
file=thumb_link,
force_document=False,
allow_cache=False,
silent=True,
)
await xx.delete()
# credits:
# Ported from Saitama Bot.
# By :- @PhycoNinja13b
# Modified by :- @kirito6969,@deleteduser420
@ayiin_cmd(pattern="weeb(?: |$)(.*)")
async def weebify(event):
args = event.pattern_match.group(1)
if not args:
get = await event.get_reply_message()
args = get.text
if not args:
await event.edit("**Teks Apa Yang Harus Saya Weebify Kan?**")
return
string = " ".join(args).lower()
for normiecharacter in string:
if normiecharacter in normiefont:
weebycharacter = weebyfont[normiefont.index(normiecharacter)]
string = string.replace(normiecharacter, weebycharacter)
await event.edit(string)
boldfont = [
"𝗮",
"𝗯",
"𝗰",
"𝗱",
"𝗲",
"𝗳",
"𝗴",
"𝗵",
"𝗶",
"𝗷",
"𝗸",
"𝗹",
"𝗺",
"𝗻",
"𝗼",
"𝗽",
"𝗾",
"𝗿",
"𝘀",
"𝘁",
"𝘂",
"𝘃",
"𝘄",
"𝘅",
"𝘆",
"𝘇",
]
@ayiin_cmd(pattern="bold(?: |$)(.*)")
async def thicc(bolded):
args = bolded.pattern_match.group(1)
if not args:
get = await bolded.get_reply_message()
args = get.text
if not args:
return await edit_delete(bolded, "**Teks Apa Yang Harus Saya Bold Kan?**")
xx = await edit_or_reply(bolded, "`Processing...`")
string = "".join(args).lower()
for normiecharacter in string:
if normiecharacter in normiefont:
boldcharacter = boldfont[normiefont.index(normiecharacter)]
string = string.replace(normiecharacter, boldcharacter)
await xx.edit(string)
medievalbold = [
"𝖆",
"𝖇",
"𝖈",
"𝖉",
"𝖊",
"𝖋",
"𝖌",
"𝖍",
"𝖎",
"𝖏",
"𝖐",
"𝖑",
"𝖒",
"𝖓",
"𝖔",
"𝖕",
"𝖖",
"𝖗",
"𝖘",
"𝖙",
"𝖚",
"𝖛",
"𝖜",
"𝖝",
"𝖞",
"𝖟",
]
@ayiin_cmd(pattern="medibold(?: |$)(.*)")
async def mediv(medievalx):
args = medievalx.pattern_match.group(1)
if not args:
get = await medievalx.get_reply_message()
args = get.text
if not args:
return await edit_delete(
medievalx, "**Teks Apa Yang Harus Saya Medibold Kan?**"
)
xx = await edit_or_reply(medievalx, "`Processing...`")
string = "".join(args).lower()
for normiecharacter in string:
if normiecharacter in normiefont:
medievalcharacter = medievalbold[normiefont.index(normiecharacter)]
string = string.replace(normiecharacter, medievalcharacter)
await xx.edit(string)
doublestruckt = [
"𝕒",
"𝕓",
"𝕔",
"𝕕",
"𝕖",
"𝕗",
"𝕘",
"𝕙",
"𝕚",
"𝕛",
"𝕜",
"𝕝",
"𝕞",
"𝕟",
"𝕠",
"𝕡",
"𝕢",
"𝕣",
"𝕤",
"𝕥",
"𝕦",
"𝕧",
"𝕨",
"𝕩",
"𝕪",
"𝕫",
]
@ayiin_cmd(pattern="doublestruck(?: |$)(.*)")
async def doublex(doublestrucktx):
args = doublestrucktx.pattern_match.group(1)
if not args:
get = await doublestrucktx.get_reply_message()
args = get.text
if not args:
return await edit_delete(
doublestrucktx, "**Teks Apa Yang Harus Saya Double Struck Kan?**"
)
xx = await edit_or_reply(doublestrucktx, "`Processing...`")
string = "".join(args).lower()
for normiecharacter in string:
if normiecharacter in normiefont:
strucktcharacter = doublestruckt[normiefont.index(normiecharacter)]
string = string.replace(normiecharacter, strucktcharacter)
await xx.edit(string)
cursiveboldx = [
"𝓪",
"𝓫",
"𝓬",
"𝓭",
"𝓮",
"𝓯",
"𝓰",
"𝓱",
"𝓲",
"𝓳",
"𝓴",
"𝓵",
"𝓶",
"𝓷",
"𝓸",
"𝓹",
"𝓺",
"𝓻",
"𝓼",
"𝓽",
"𝓾",
"𝓿",
"𝔀",
"𝔁",
"𝔂",
"𝔃",
]
@ayiin_cmd(pattern="curbold(?: |$)(.*)")
async def cursive2(cursivebolded):
args = cursivebolded.pattern_match.group(1)
if not args:
get = await cursivebolded.get_reply_message()
args = get.text
if not args:
await edit_delete.edit(
cursivebolded, "**Teks Apa Yang Harus Saya Cursive Bold Kan?**"
)
return
xx = await edit_or_reply(cursivebolded, "`Processing...`")
string = "".join(args).lower()
for normiecharacter in string:
if normiecharacter in normiefont:
cursiveboldcharacter = cursiveboldx[normiefont.index(
normiecharacter)]
string = string.replace(normiecharacter, cursiveboldcharacter)
await xx.edit(string)
medival2 = [
"𝔞",
"𝔟",
"𝔠",
"𝔡",
"𝔢",
"𝔣",
"𝔤",
"𝔥",
"𝔦",
"𝔧",
"𝔨",
"𝔩",
"𝔪",
"𝔫",
"𝔬",
"𝔭",
"𝔮",
"𝔯",
"𝔰",
"𝔱",
"𝔲",
"𝔳",
"𝔴",
"𝔵",
"𝔶",
"𝔷",
]
@ayiin_cmd(pattern="medi(?: |$)(.*)")
async def medival22(medivallite):
args = medivallite.pattern_match.group(1)
if not args:
get = await medivallite.get_reply_message()
args = get.text
if not args:
await edit_delete(medivallite, "****Teks Apa Yang Harus Saya Medival Kan?****")
return
xx = await edit_or_reply(medivallite, "`Processing...`")
string = "".join(args).lower()
for normiecharacter in string:
if normiecharacter in normiefont:
medivalxxcharacter = medival2[normiefont.index(normiecharacter)]
string = string.replace(normiecharacter, medivalxxcharacter)
await xx.edit(string)
cursive = [
"𝒶",
"𝒷",
"𝒸",
"𝒹",
"𝑒",
"𝒻",
"𝑔",
"𝒽",
"𝒾",
"𝒿",
"𝓀",
"𝓁",
"𝓂",
"𝓃",
"𝑜",
"𝓅",
"𝓆",
"𝓇",
"𝓈",
"𝓉",
"𝓊",
"𝓋",
"𝓌",
"𝓍",
"𝓎",
"𝓏",
]
@ayiin_cmd(pattern="cur(?: |$)(.*)")
async def xcursive(cursivelite):
args = cursivelite.pattern_match.group(1)
if not args:
get = await cursivelite.get_reply_message()
args = get.text
if not args:
await edit_delete.edit(cursivelite, "**Teks Apa Yang Harus Saya Cursive Kan?**")
return
xx = await edit_or_reply(cursivelite, "`Processing...`")
string = "".join(args).lower()
for normiecharacter in string:
if normiecharacter in normiefont:
cursivecharacter = cursive[normiefont.index(normiecharacter)]
string = string.replace(normiecharacter, cursivecharacter)
await xx.edit(string)
CMD_HELP.update(
{
"watch": f"**Plugin : **`watch`\
\n\n • **Syntax :** `{cmd}watch` <nama movie/tv>\
\n • **Function : **Untuk Mengetahui Detail Tentang Film.\
"
}
)
CMD_HELP.update(
{
"randompp": f"**Plugin : **`randompp`\
\n\n • **Syntax :** `{cmd}randompp`\
\n • **Function : **Otomatis Mengganti Foto Profile Mu, Untuk Stop ini Ketik .restart\
"
}
)
CMD_HELP.update(
{
"glitch": f"**Plugin : **`glitch`\
\n\n • **Syntax :** `{cmd}glitch` <Reply Ke Media>\
\n • **Function : **Memberikan Glitch (Gif , Stickers , Gambar, Video) Ke Gif Dan Level Glitch 1 - 8.\nJika Tidak Memberikan Level Otomatis Default Ke Level 2\
"
}
)
CMD_HELP.update(
{
"grab": f"**Plugin : **`grab`\
\n\n • **Syntax :** `{cmd}grab` <reply ke user yang ingin di grab>\
\n • **Function : **Balas Ke Pesan Pengguna Telegram dan Ketik `{cmd}grab` Atau `{cmd}grab <count>` Untuk Mengambil Foto Profil.\
\n\n • **Syntax :** `{cmd}grab` <jumlah foto>\
\n • **Function : **Untuk Mengambil Foto Profil dengan jumlah foto yg di inginkan.\
"
}
)
CMD_HELP.update(
{
"bannedall": f"**Plugin : **`bannedall`.\
\n\n • **Syntax :** `{cmd}remove`\
\n • **Function : **Untuk Menganalisa user dari grup secara spesifik\
\n\n • **Syntax :** `{cmd}remove d`\
\n • **Function : **Untuk mengkik user dari grup secara spesifik\
\n\n • **Syntax :** `{cmd}remove y`\
\n • **Function : **Untuk Membanned Akun yang Terakhir Dilihat setahun yang lalu\
\n\n • **Syntax :** `{cmd}remove m`\
\n • **Function : **Untuk Membanned Akun yang Terakhir Dilihat sebulan yang lalu\
\n\n • **Syntax :** `{cmd}remove w`\
\n • **Function : **Untuk Membanned Akun yang Terakhir Dilihat seminggu yang lalu\
\n\n • **Syntax :** `{cmd}remove o`\
\n • **Function : **Untuk Membanned Akun yang sedang offline\
\n\n • **Syntax :** `{cmd}remove q`\
\n • **Function : **Untuk Membanned Akun yang sedang online\
\n\n • **Syntax :** `{cmd}remove r`\
\n • **Function : **Untuk Membanned Akun yang terakhir dilihat\
\n\n • **Syntax :** `{cmd}remove b`\
\n • **Function : **Untuk Membanned Bot yang ada di Grup chat\
\n\n • **Syntax :** `{cmd}remove n`\
\n • **Function : **Untuk Membanned Akun yang Last Seen A Long Time Ago\
\n\n **HATI HATI PLUGIN INI BERBAHAYA, MOHON GUNAKAN DENGAN BIJAK**\
"
}
)
CMD_HELP.update(
{
"rnupload": f"**Plugin : **`rnupload`\
\n\n • **Syntax :** `{cmd}rnupload`\
\n • **Function : **Untuk Rename dan Upload, Balas Ke Media Dan Ketik .rnupload xyz.jpg\
"
}
)
CMD_HELP.update(
{
"appmisc": f"`{cmd}app`\
\nUsage: ketik `{cmd}app namaapp` Dan Dapatkan Detail Informasi App.\
\n\n`.calc`\
\nUsage: `{cmd}calc <term1><operator><term2>\nUntuk eg {cmd}calc 02*02 Atau 99*99 (Angka Nol Penting) (Minimal Dua Suku Dan Dua Digit).\
\n\n`{cmd}xcd`\
\nUsage: Ketik xcd <query>.ps:Aku Sangat Bosan\
\n\n`{cmd}res`\
\nUsage: Ketik Username Akun,Channel,Group Atau Bot Bersama {cmd}res Dan Check Batasan\
\n\n`{cmd}weeb` <text>\
\nUsage:Teks Weebify\
\n\nKetik (`{cmd}bold <Teks>`,`{cmd}cur <Teks>`,`{cmd}curbold <Teks>`,`{cmd}medi <Teks>`,`{cmd}medibold <Teks>`,`{cmd}doublestruck <Teks>`)\
\nUsage: Buat Teks <Bold,Cursive,Cursivebold,Medival,Medivalbold,Gayishbold>\
\n\n`{cmd}glitchs` Balas Ke Media\
\nUsage: Memberikan Glitch (Gif , Stickers , Gambar, Video) Ke Sticker Dan Level Glitch 1 to 8.\
Jika Tidak Memberikan Level Otomatis Default Ke Level 2."
}
)
|
<reponame>john-james-sf/nlr<gh_stars>0
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# ======================================================================================================================== #
# Project : Natural Language Recommendation #
# Version : 0.1.0 #
# File : \test_admin.py #
# Language : Python 3.7.11 #
# ------------------------------------------------------------------------------------------------------------------------ #
# Author : <NAME> #
# Company : nov8.ai #
# Email : <EMAIL> #
# URL : https://github.com/john-james-sf/nlr #
# ------------------------------------------------------------------------------------------------------------------------ #
# Created : Tuesday, November 9th 2021, 4:35:35 pm #
# Modified : Saturday, November 13th 2021, 9:26:14 am #
# Modifier : <NAME> (<EMAIL>) #
# ------------------------------------------------------------------------------------------------------------------------ #
# License : BSD 3-clause "New" or "Revised" License #
# Copyright: (c) 2021 nov8.ai #
# ======================================================================================================================== #
# %%
import os
import pytest
import pandas as pd
import logging
import inspect
from nlr.database.connect import MySQLDatabase, MySQLServer
from nlr.database.admin import DatabaseAdmin, TableAdmin
from nlr.database import DBNAME
from nlr.database.ddl import TABLES
# ------------------------------------------------------------------------------------------------------------------------ #
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class DatabaseAdminTests:
def test_drop(self):
logger.info(" Started {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
db = DatabaseAdmin()
server_con = MySQLServer()
with server_con() as connection:
db.drop(DBNAME, connection)
assert not db.exists(DBNAME, connection), "Failure in {}".format(
inspect.stack()[0][3])
logger.info(" Successfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
def test_create(self):
logger.info(" Started {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
db = DatabaseAdmin()
server_conn = MySQLServer()
with server_conn() as connection:
db.create(DBNAME, connection, exist_ok=False)
assert db.exists(DBNAME, connection), "Failure in {}".format(
inspect.stack()[0][3])
logger.info(" Successfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
def test_create_exist(self):
# Should log an error
logger.info(" Started {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
db = DatabaseAdmin()
server_conn = MySQLServer()
with server_conn() as connection:
db.create(DBNAME, connection, exist_ok=False)
assert db.exists(DBNAME, connection), "Failure in {}".format(
inspect.stack()[0][3])
logger.info(" Successfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
def test_create_exist_ok(self):
# Should log at information level
logger.info(" Started {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
db = DatabaseAdmin()
server_conn = MySQLServer()
with server_conn() as connection:
db.create(DBNAME, connection, exist_ok=True)
assert db.exists(DBNAME, connection), "Failure in {}".format(
inspect.stack()[0][3])
logger.info(" Successfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
class TableAdminTests:
TABLE = 'datasources'
def test_drop(self):
logger.info(" Started {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
table_admin = TableAdmin()
dbconn = MySQLDatabase()
with dbconn(DBNAME) as connection:
table.drop(TABLES, connection)
assert not table.exists(self.TABLE, connection), "Failure in {}".format(
inspect.stack()[0][3])
logger.info(" Successfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
def test_create(self):
logger.info(" Started {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
table_admin = TableAdmin()
dbconn = MySQLDatabase()
with dbconn(DBNAME) as connection:
table_admin.create(TABLES, connection)
for name in TABLES.keys():
assert table.exists(name, connection), "Failure in {}".format(
inspect.stack()[0][3])
logger.info(" Successfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
def test_create_exist(self):
# Should log error.
logger.info(" Started {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
table_admin = TableAdmin()
dbconn = MySQLDatabase()
with dbconn(DBNAME) as connection:
table_admin.create(TABLES, connection, exist_ok=False)
for name in TABLES.keys():
assert table.exists(name, connection), "Failure in {}".format(
inspect.stack()[0][3])
logger.info(" Successfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
def test_create_exist_ok(self):
# Should log information
logger.info(" Started {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
table_admin = TableAdmin()
dbconn = MySQLDatabase()
with dbconn(DBNAME) as connection:
table_admin.create(TABLES, connection, exist_ok=True)
for name in TABLES.keys():
assert table.exists(name, connection), "Failure in {}".format(
inspect.stack()[0][3])
logger.info(" Successfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
if __name__ == "__main__":
t = DatabaseAdminTests()
t.test_drop()
t.test_create()
t.test_create_exist()
t.test_create_exist_ok()
t = TableAdminTests()
t.test_drop()
t.test_create()
t.test_create_exist()
t.test_create_exist_ok()
# %%
|
<filename>ofspy/test/test_context.py
"""
Copyright 2015 <NAME>, Massachusetts Institute of Technology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Test cases for L{ofspy.context}.
"""
import unittest
from ..federation import Federation
from ..simulator import Simulator
from ..context import Context
from ..surface import Surface
from ..orbit import Orbit
from ..demand import Demand
from ..valueSchedule import ValueSchedule
class ContextTestCase(unittest.TestCase):
def setUp(self):
self.default = Context(seed=0)
self.locs = []
for s in range(6):
self.locs.append(Surface(s, name='SUR{0}'.format(s+1)))
self.locs.append(Orbit(s, 'LEO', name='LEO{0}'.format(s+1)))
self.locs.append(Orbit(s, 'MEO', name='MEO{0}'.format(s+1)))
self.locs.append(Orbit(s, 'GEO', name='GEO{0}'.format(s+1)))
self.evts = []
for d in range(8):
self.evts.append(Demand(None, 'SAR', 1,
ValueSchedule([(1,500),(4,400)], -50),
name='SAR1.{0}'.format(d+1)))
for d in range(12):
self.evts.append(Demand(None, 'SAR', 1,
ValueSchedule([(2,450),(5,350)], -100),
name='SAR2.{0}'.format(d+1)))
for d in range(23):
self.evts.append(Demand(None, 'SAR', 1,
ValueSchedule([(3,400),(6,300)], -150),
name='SAR3.{0}'.format(d+1)))
for d in range(8):
self.evts.append(Demand(None, 'VIS', 1,
ValueSchedule([(1,600),(4,500)], -50),
name='VIS1.{0}'.format(d+1)))
for d in range(17):
self.evts.append(Demand(None, 'VIS', 1,
ValueSchedule([(2,500),(5,400)], -100),
name='VIS2.{0}'.format(d+1)))
for d in range(8):
self.evts.append(Demand(None, 'VIS', 1,
ValueSchedule([(3,450),(6,350)], -150),
name='VIS3.{0}'.format(d+1)))
self.default = Context(locations=self.locs, events=self.evts,
federations=[Federation()], seed=0)
self.sim = Simulator(entities=[self.default],
initTime=0, timeStep=1, maxTime=3)
def tearDown(self):
self.default = None
self.locs = None
self.evts = None
def test_propagate(self):
self.assertEqual(self.default.propagate(self.locs[0], 0), self.locs[0])
self.assertEqual(self.default.propagate(self.locs[0], 1), self.locs[0])
self.assertEqual(self.default.propagate(self.locs[0], 2), self.locs[0])
self.assertEqual(self.default.propagate(self.locs[1], 0), self.locs[1])
self.assertEqual(self.default.propagate(self.locs[1], 1), self.locs[9])
self.assertEqual(self.default.propagate(self.locs[1], 2), self.locs[17])
self.assertEqual(self.default.propagate(self.locs[1], 3), self.locs[1])
self.assertEqual(self.default.propagate(self.locs[1], 4), self.locs[9])
self.assertEqual(self.default.propagate(self.locs[1], -1), self.locs[17])
self.assertEqual(self.default.propagate(self.locs[2], 0), self.locs[2])
self.assertEqual(self.default.propagate(self.locs[2], 1), self.locs[6])
self.assertEqual(self.default.propagate(self.locs[2], 2), self.locs[10])
self.assertEqual(self.default.propagate(self.locs[3], 0), self.locs[3])
self.assertEqual(self.default.propagate(self.locs[3], 1), self.locs[3])
self.assertEqual(self.default.propagate(self.locs[3], 2), self.locs[3])
def test_init(self):
self.assertEqual(self.default.currentEvents, [])
self.assertEqual(self.default.futureEvents, [])
self.assertEqual(self.default.pastEvents, [])
self.default.init(self.sim)
self.assertEqual(self.default.currentEvents, [])
self.assertNotEqual(self.default.futureEvents, [])
self.assertEqual(len(self.default.futureEvents),
len(self.default.events))
self.assertEqual(self.default.pastEvents, [])
def test_tick(self):
self.default.init(self.sim)
self.default.tick(self.sim)
def test_tock(self):
self.default.init(self.sim)
self.default.tick(self.sim)
self.default.tock()
self.assertEqual(len(self.default.currentEvents), 6)
self.assertEqual(len(self.default.futureEvents),
len(self.default.events) - 6) |
"""aospy.Run objects for simulations from the GFDL HiRAM model."""
import datetime
from aospy import Run
from aospy.data_loader import GFDLDataLoader
hiram_cont = Run(
name='cont',
description=(
'1981-2000 HadISST climatological annual cycle of SSTs '
'and sea ice repeated annually, with PD atmospheric composition.'
),
data_loader=GFDLDataLoader(
data_direc=('/archive/Yi.Ming/siena_201211/c180_hiram_clim/'
'gfdl.ncrc2-default-prod/pp'),
data_start_date=datetime.datetime(1979, 1, 1),
data_end_date=datetime.datetime(1995, 12, 31),
),
)
hiram_aero = Run(
name='aero',
description=(
'1981-2000 HadISST climatological annual cycle of SSTs and '
'sea ice repeated annually, overlaid with annual cycle of '
'equilibrium SST anomalies from a PI-to-PD aerosols '
'simulation of AM2.1 with a mixed layer ocean. '
'PD atmospheric composition.'
),
data_loader=GFDLDataLoader(
template=hiram_cont.data_loader,
data_direc=('/archive/Yi.Ming/siena_201211/c180_hiram_clim_aero/'
'gfdl.ncrc2-default-prod/pp'),
),
)
hiram_atm = Run(
name='aero_tm',
description=(
'1981-2000 HadISST climatological annual cycle of SSTs and sea ice '
'repeated annually, overlaid with annual tropical mean equilibrium '
'SST anomaly from a PI-to-PD aerosols simulation of AM2.1 with a '
'mixed layer ocean. PD atmospheric composition.'
),
data_loader=GFDLDataLoader(
template=hiram_cont.data_loader,
data_direc=('/archive/Yi.Ming/siena_201211/c180_hiram_clim_aero_'
'trop_mean/gfdl.ncrc2-default-prod/pp'),
),
)
hiram_amtm = Run(
name='aero_mtm',
description=(
'1981-2000 HadISST climatological annual cycle of SSTs and sea ice '
'repeated annually, subtracting annual tropical mean equilibrium SST '
'anomaly from a PI-to-PD aerosols simulation of AM2.1 with a mixed '
'layer ocean. PD atmospheric composition.'
),
data_loader=GFDLDataLoader(
template=hiram_cont.data_loader,
data_direc=('/archive/Yi.Ming/siena_201211/c180_hiram_clim_aero_m_'
'trop_mean/gfdl.ncrc2-default-prod/pp'),
),
)
hiram_apac = Run(
name='aero_pac',
description=(
'1981-2000 HadISST climatological annual cycle of SSTs and sea ice '
'repeated annually, overlaid in Pacific Ocean only with annual cycle '
'of equilibrium SST anomalies from a PI-to-PD aerosols simulation of '
'AM2.1 with a mixed layer ocean. PD atmospheric composition.'
),
data_loader=GFDLDataLoader(
template=hiram_cont.data_loader,
data_direc=('/archive/Yi.Ming/siena_201211/c180_hiram_clim_aero_pac/'
'gfdl.ncrc2-default-prod/pp'),
),
)
hiram_aatl = Run(
name='aero_atl',
description=(
'1981-2000 HadISST climatological annual cycle of SSTs and sea ice '
'repeated annually, overlaid in Atlantic Ocean only with annual cycle '
'of equilibrium SST anomalies from a PI-to-PD aerosols simulation of '
'AM2.1 with a mixed layer ocean. PD atmospheric composition.'
),
data_loader=GFDLDataLoader(
template=hiram_cont.data_loader,
data_direc=('/archive/Yi.Ming/siena_201211/c180_hiram_clim_aero_atl/'
'gfdl.ncrc2-default-prod/pp'),
),
)
hiram_aind = Run(
name='aero_ind',
description=(
'1981-2000 HadISST climatological annual cycle of SSTs and sea ice '
'repeated annually, overlaid in Indian Ocean only with annual cycle '
'of equilibrium SST anomalies from a PI-to-PD aerosols simulation of '
'M2.1 with a mixed layer ocean. PD atmospheric composition.'
),
data_loader=GFDLDataLoader(
template=hiram_cont.data_loader,
data_direc=('/archive/Yi.Ming/siena_201211/c180_hiram_clim_aero_ind/'
'gfdl.ncrc2-default-prod/pp'),
),
)
hiram_gas = Run(
name='gas',
description=(
'1981-2000 HadISST climatological annual cycle of SSTs and '
'sea ice repeated annually, overlaid with annual cycle of '
'equilibrium SST anomalies from a PI-to-PD WMGG and ozone '
'simulation of AM2.1 with a mixed layer ocean. '
'PD atmospheric composition.'
),
data_loader=GFDLDataLoader(
template=hiram_cont.data_loader,
data_direc=('/archive/Yi.Ming/siena_201211/c180_hiram_clim_gas_rerun2/'
'gfdl.ncrc2-default-prod/pp'),
),
)
hiram_gtm = Run(
name='gas_tm',
description=(
'1981-2000 HadISST climatological annual cycle of SSTs and sea '
'ice repeated annually, overlaid with annual tropical mean '
'equilibrium SST anomaly from a PI-to-PD WMGG and ozone simulation '
'of AM2.1 with a mixed layer ocean. PD atmospheric composition.'
),
data_loader=GFDLDataLoader(
template=hiram_cont.data_loader,
data_direc=('/archive/Yi.Ming/siena_201211/c180_hiram_clim_gas_'
'trop_mean/gfdl.ncrc2-default-prod/pp'),
),
)
hiram_gmtm = Run(
name='gas_mtm',
description=(
'1981-2000 HadISST climatological annual cycle of SSTs and sea ice'
'repeated annually, overlaid with annual cycle of equilibrium SST'
'anomalies minus their annual tropical mean from a PI-to-PD WMGG &'
'ozone simulation of AM2.1 with a mixed layer ocean. PD atmos'
'composition.'
),
data_loader=GFDLDataLoader(
template=hiram_cont.data_loader,
data_direc=('/archive/Yi.Ming/siena_201211/c180_hiram_clim_gas_m_'
'trop_mean/gfdl.ncrc2-default-prod/pp'),
),
)
# hiram_amip = Run(
# name='amip',
# ens_mem_prefix='/archive/hrao/ornl/cmip5/c180_hiram_',
# ens_mem_ext=['H1', 'H3'],
# ens_mem_suffix='/pp',
# data_dur=5,
# data_start_date=datetime.datetime(1979, 1, 1),
# data_end_date=datetime.datetime(2008, 12, 31),
# data_dir_struc='gfdl'
# )
|
#!/usr/bin/python -tt
# vim:set ts=4 sw=4 expandtab:
#
# NodeManager plugin for creating credentials in slivers
# (*) empower slivers to make API calls throught hmac
# (*) also create a ssh key - used by the OMF resource controller
# for authenticating itself with its Experiment Controller
# in order to avoid spamming the DB with huge amounts of such tags,
# (*) slices need to have the 'enable_hmac' tag set
# (*) or the 'omf_control' tag set, respectively
"""
Sliver authentication support for NodeManager.
"""
import os
import random
import string
import tempfile
import socket
import logger
import tools
def start():
logger.log("sliverauth: (dummy) plugin starting up...")
def GetSlivers(data, config, plc):
if 'OVERRIDES' in dir(config):
if config.OVERRIDES.get('sliverauth') == '-1':
logger.log("sliverauth: Disabled", 2)
return
if 'slivers' not in data:
logger.log_missing_data("sliverauth.GetSlivers", 'slivers')
return
for sliver in data['slivers']:
path = '/vservers/%s' % sliver['name']
if not os.path.exists(path):
# ignore all non-plc-instantiated slivers
instantiation = sliver.get('instantiation','')
if instantiation == 'plc-instantiated':
logger.log("sliverauth: plc-instantiated slice %s does not yet exist111. IGNORING!" % sliver['name'])
continue
system_slice = False
for chunk in sliver['attributes']:
if chunk['tagname'] == "system":
if chunk['value'] in (True, 1, '1') or chunk['value'].lower() == "true":
system_slice = True
logger.log("sliverauth: system_slice is %s " % system_slice)
for chunk in sliver['attributes']:
if chunk['tagname']=='enable_hmac' and not system_slice:
logger.log("sliverauth: slice %s is hmac" % sliver['name'])
manage_hmac (plc, sliver)
if chunk['tagname']=='omf_control':
logger.log("sliverauth: slice %s is omf_cotrol" % sliver['name'])
manage_sshkey (plc, sliver)
def SetSliverTag(plc, slice, tagname, value):
node_id = tools.node_id()
slivertags=plc.GetSliceTags({"name":slice,"node_id":node_id,"tagname":tagname})
if len(slivertags)==0:
# looks like GetSlivers reports about delegated/nm-controller slices that do *not* belong to this node
# and this is something that AddSliceTag does not like
try:
slivertag_id=plc.AddSliceTag(slice,tagname,value,node_id)
except:
logger.log_exc ("sliverauth.SetSliverTag (probably delegated) slice=%(slice)s tag=%(tagname)s node_id=%(node_id)d"%locals())
pass
else:
slivertag_id=slivertags[0]['slice_tag_id']
plc.UpdateSliceTag(slivertag_id,value)
def find_tag (sliver, tagname):
for attribute in sliver['attributes']:
# for legacy, try the old-fashioned 'name' as well
name = attribute.get('tagname',attribute.get('name',''))
if name == tagname:
return attribute['value']
return None
def manage_hmac (plc, sliver):
hmac = find_tag (sliver, 'hmac')
if not hmac:
# let python do its thing
random.seed()
d = [random.choice(string.letters) for x in xrange(32)]
hmac = "".join(d)
SetSliverTag(plc,sliver['name'],'hmac',hmac)
logger.log("sliverauth: %s: setting hmac" % sliver['name'])
path = '/vservers/%s/etc/planetlab' % sliver['name']
if os.path.exists(path):
keyfile = '%s/key' % path
if (tools.replace_file_with_string(keyfile,hmac,chmod=0400)):
logger.log ("sliverauth: (over)wrote hmac into %s " % keyfile)
# create the key if needed and returns the key contents
def generate_sshkey (sliver):
# initial version was storing stuff in the sliver directly
# keyfile="/vservers/%s/home/%s/.ssh/id_rsa"%(sliver['name'],sliver['name'])
# we're now storing this in the same place as the authorized_keys, which in turn
# gets mounted to the user's home directory in the sliver
keyfile="/home/%s/.ssh/id_rsa"%(sliver['name'])
pubfile="%s.pub"%keyfile
dotssh=os.path.dirname(keyfile)
# create dir if needed
if not os.path.isdir (dotssh):
os.mkdir (dotssh, 0700)
logger.log_call ( [ 'chown', "%s:slices"%(sliver['name']), dotssh ] )
if not os.path.isfile (pubfile):
comment="%s@%s"%(sliver['name'],socket.gethostname())
logger.log_call( [ 'ssh-keygen', '-t', 'rsa', '-N', '', '-f', keyfile , '-C', comment] )
os.chmod (keyfile, 0400)
logger.log_call ( [ 'chown', "%s:slices"%(sliver['name']), keyfile, pubfile ] )
return file(pubfile).read().strip()
# a sliver can get created, deleted and re-created
# the slice having the tag is not sufficient to skip key geneneration
def manage_sshkey (plc, sliver):
# regardless of whether the tag is there or not, we need to grab the file
# if it's lost b/c e.g. the sliver was destroyed we cannot save the tags content
ssh_key = generate_sshkey(sliver)
old_tag = find_tag (sliver, 'ssh_key')
if ssh_key <> old_tag:
SetSliverTag(plc, sliver['name'], 'ssh_key', ssh_key)
logger.log ("sliverauth: %s: setting ssh_key" % sliver['name'])
|
<reponame>cqsl/Entanglement-Forging-with-GNN-models<filename>Forging_helper_Circuits_TFIM.py
#!/usr/bin/env python
# coding: utf-8
import netket as nk
from netket.operator.spin import sigmax,sigmaz
from netket import jax as nkjax
import jax
import jax.numpy as jnp
from functools import partial
# from jax import random
# from tqdm import tqdm
import pennylane as qml
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
from pennylane.operation import AnyWires, Operation
from pennylane.wires import Wires
from Forging_helper import *
def Cj_template(alpha, beta, idx1, idx2):
"""
For the training we use the U3 to implement X^alpha.
The problem with this function is that it doesn't work properly for idx1 == idx2
This does not cause problems for TFIM with nearest neighbour interactions
But it can cause problems for the fermionic Hamiltonians.
"""
qml.U3(np.pi*alpha, 0., np.pi*alpha, wires = idx1) # We use U3 because RX gives a phase
qml.U3(np.pi*beta, 0., np.pi*beta, wires = idx2)
if not idx1 == idx2:
qml.CZ(wires = [idx1, idx2])
else:
qml.U1(((alpha-0.5)*2. + (beta-0.5)*2.)*np.pi/2, wires=idx1)
qml.U3(np.pi*alpha, 0., np.pi*alpha, wires = idx1)
qml.U3(np.pi*beta, 0., np.pi*beta, wires = idx2)
def Cj_template_general(alpha, beta, O1 = qml.PauliX(0), O2 = qml.PauliX(1)):
"""
Cj template for arbitrary O1 and O2
"""
o1o2 = jnp.matmul(O1, O2)
one = jnp.eye(O1.shape[0])
M = 0.5*(one + (-1.)**alpha*O1 + (-1.)**beta*O2 - (-1.)**(alpha + beta)*o1o2)
return M
def sample_NN(NN_params, chain_length = 128, sa = None, NN_model = None, n_qubits = 4):
"""
get sample from NN in $s \in {-1, 1}$ and $S \in {0,1}$ conversion.
NN_params: Parameters of classical model
chain_length: Number of samples
sa: Netket sampler
NN_model: Netket NN model
"""
Sample, _ = nk.sampler.ARDirectSampler.sample(sa, NN_model, NN_params, chain_length = chain_length)
# Sample, _ = nk.sampler.sample(sa, NN_model, NN_params, chain_length = chain_length)
Sample = Sample.reshape(-1, n_qubits//2)
s = jax.lax.stop_gradient(Sample)
S = (s + 1)/2
S = S.astype(int)
return s, S
def Circuits_Observable(params, inputs, Observable, n_qubits = 2):
"""
params: Parameters of the VQE
inputs: Samples of the NN
Observable: Is either the Hamiltonian of the subsystem, if we calculate <H_A>
or it is the operator O_1 O_2 (as a multiplication not a tensor product) as in
eq 11 (https://arxiv.org/pdf/2104.10220.pdf)
n_qubits: refers here to number of qubits of whole system
"""
dev = qml.device('default.qubit.jax', wires=n_qubits//2)
@partial(jax.jit, static_argnums=2)
@qml.qnode(dev, interface='jax', diff_method="backprop")
def qnode(params, inputs, Observable):
for i in range(n_qubits//2):
qml.RX(jnp.pi*inputs[i], wires=i)
brick_wall_entangling(params)
return qml.expval(Observable)
return qnode(params, inputs, Observable)
def qnode_y_to_psi(inputs, n_qubits):
"""
Translate a sample into a state vector with pennylane conversion
N: Is nr of qubits of subsystem!!
"""
dev = qml.device('default.qubit.jax', wires=n_qubits//2, shots = None)
@jax.jit
@qml.qnode(dev, interface='jax')
def circuit(inputs):
for i in range(n_qubits//2):
qml.RX(jnp.pi*inputs[i], wires=i)
return qml.state()
return circuit(inputs)
def qnode_Y_given_X(params, inputs, key, alpha_beta, indices, n_qubits = 2):
"""
Circuit to sample p(y|x) for operators ZiZj that act on different subsytems.
params: Parameters of VQE
inputs: Samples from NN model
key: PRNG key for jax
alpha_beta: list of tuple with [alpha, beta]
indices: which index the operator Zi Zj is acting on., where i and j are mod(N)
with N = n_qubits//2
n_qubits: Is nr of qubits of full system!!
"""
idx1, idx2 = indices
alpha, beta = alpha_beta
U = partial(Cj_template, alpha, beta, idx1 = idx1, idx2 = idx2)
dev = qml.device('default.qubit.jax', wires=n_qubits//2, shots = 1000, prng_key = key)
@partial(jax.jit, static_argnums = 2)
@qml.qnode(dev, interface='jax', diff_method=None)
def circuit(params, inputs, U):
for i in range(n_qubits//2):
qml.RX(jnp.pi*inputs[i], wires=i)
brick_wall_entangling(params)
U()
qml.adjoint(brick_wall_entangling)(params)
return qml.sample()
return circuit(params, inputs, U)
def qnode_Y_given_X_states(params, inputs, key, alpha_beta, indices, n_qubits = 2):
"""
Same Circuit as to sample p(y|x) for operators ZiZj that act on different subsytems.
But now it returns the state vector.
params: Parameters of VQE
inputs: Samples from NN model
key: PRNG key for jax
alpha_beta: list of tuple with [alpha, beta]
indices: which index the operator Zi Zj is acting on., where i and j are mod(N)
with N = n_qubits//2
n_qubits: Is nr of qubits of full system!!
"""
dev = qml.device('default.qubit.jax', wires=n_qubits//2)
idx1, idx2 = indices
alpha, beta = alpha_beta
U = partial(Cj_template, alpha, beta, idx1 = idx1, idx2 = idx2)
@partial(jax.jit, static_argnums = 2)
@qml.qnode(dev, interface='jax', diff_method="backprop")
def circuit(params, inputs, U):
for i in range(n_qubits//2):
qml.RX(jnp.pi*inputs[i], wires=i)
brick_wall_entangling(params)
U()
qml.adjoint(brick_wall_entangling)(params)
return qml.state()
return circuit(params, inputs, U)
def qnode_Y_given_X_fermion(params, inputs, key, alpha_beta, Op_AB, n_qubits):
"""
n_qubits: Is nr of qubits of full system!!
"""
alpha, beta = alpha_beta
m = Cj_template_general(alpha=alpha, beta = beta, O1 = jnp.array(Op_AB[1].matrix), O2 = jnp.array(Op_AB[2].matrix)) # index 0 is the prefactor
U = partial(apply_unitary, M=m, wires = range(n_qubits//2))
dev = qml.device('default.qubit.jax', wires=n_qubits//2, shots = 1000, prng_key = key)
@partial(jax.jit, static_argnums = 2)
@qml.qnode(dev, interface='jax', diff_method=None)
def circuit(params, inputs, U):
for i in range(n_qubits//2):
qml.RX(jnp.pi*inputs[i], wires=i)
brick_wall_entangling(params)
U()
qml.adjoint(brick_wall_entangling)(params)
return qml.sample()
return circuit(params, inputs, U)
def qnode_Y_given_X_states_fermion(params, inputs, key, alpha_beta, Op_AB, n_qubits):
"""
n_qubits: Is nr of qubits of full system!!
"""
dev = qml.device('default.qubit.jax', wires=n_qubits//2)
alpha, beta = alpha_beta
m = Cj_template_general(alpha=alpha, beta = beta, O1 = jnp.array(Op_AB[1].matrix), O2 = jnp.array(Op_AB[2].matrix)) # index 0 is the prefactor
U = partial(apply_unitary, M=m, wires = range(n_qubits//2))
@partial(jax.jit, static_argnums = 2)
@qml.qnode(dev, interface='jax', diff_method="backprop")
def circuit(params, inputs, U):
for i in range(n_qubits//2):
qml.RX(jnp.pi*inputs[i], wires=i)
brick_wall_entangling(params)
U()
qml.adjoint(brick_wall_entangling)(params)
return qml.state()
return circuit(params, inputs, U) |
""" ref:
https://gist.github.com/bjpirt/9666d8c623cb98e755c92f1fbeeb6118
https://groups.google.com/group/mearm/attach/18a4eb363ddaa/MeArmPiTechnicalOverviewV0-2DRAFT.pdf?part=0.1
"""
import math
import pigpio
from logging import getLogger, basicConfig, DEBUG
logger = getLogger(__name__)
basicConfig(
level=DEBUG, format="%(asctime)s %(levelname)s %(name)s :%(message)s")
pi = pigpio.pi()
safe_angle = 25
class Servo:
def __init__(self, config):
self.pin = config['pin']
self.min = config['min']
self.max = config['max']
self.minAngle = config['minAngle']
self.maxAngle = config['maxAngle']
def moveTo(self, angle):
self.moveToAngle(angle)
def moveBy(self, angle):
newAngle = self.currentAngle + angle
self.moveToAngle(newAngle)
def moveToCentre(self):
centre = self.minAngle + (self.maxAngle - self.minAngle) / 2
self.moveToAngle(centre)
def moveToAngle(self, angle):
""" prevent MeArmPi from moving so far (angle: +- safe_angle) """
if angle > self.maxAngle - safe_angle:
angle = self.maxAngle - safe_angle
if angle < self.minAngle + safe_angle:
angle = self.minAngle + safe_angle
self.currentAngle = angle
self.updateServo()
def updateServo(self):
pulseWidth = math.floor(self.min + ((float(
self.currentAngle - self.minAngle) / float(
self.maxAngle - self.minAngle)) * (self.max - self.min)))
logger.debug("updateServo currentAngle:{} pin:{} pulseWidth:{}".format(
self.currentAngle, self.pin, pulseWidth))
pi.set_servo_pulsewidth(self.pin, pulseWidth)
class MeArm:
def __init__(self):
# ref. https://github.com/mimeindustries/mearm-js/blob/master/lib/MeArmPi.js
self.lower = Servo({
'pin': 17,
'min': 1300,
'max': 2400,
'minAngle': 0,
'maxAngle': 135
})
self.upper = Servo({
'pin': 22,
'min': 530,
'max': 2000,
'minAngle': 0,
'maxAngle': 135
})
self.base = Servo({
'pin': 4,
'min': 530,
'max': 2400,
'minAngle': -90,
'maxAngle': 90
})
self.grip = Servo({
'pin': 10,
'min': 1400,
'max': 2400,
'minAngle': 0,
'maxAngle': 90
})
def moveToBase(self, angle):
self.base.moveTo(angle)
def moveToPosition(self, lower, upper, base, grip):
self.lower.moveTo(lower)
self.upper.moveTo(upper)
self.base.moveTo(base)
self.grip.moveTo(grip)
"""
def moveByPosition(self, lower, upper, base, grip):
self.lower.moveBy(lower)
self.upper.moveBy(upper)
self.base.moveBy(base)
self.grip.moveBy(grip)
"""
def moveByPosition(self, lower, upper, base):
self.lower.moveBy(lower)
self.upper.moveBy(upper)
self.base.moveBy(base)
def moveByBase(self, angle):
self.base.moveBy(angle)
def moveByUpper(self, angle):
self.upper.moveBy(angle)
def moveByLower(self, angle):
self.lower.moveBy(angle)
def moveToGrip(self, angle):
self.grip.moveTo(angle)
def moveToCentres(self):
self.base.moveToCentre()
self.lower.moveToCentre()
self.upper.moveToCentre()
self.grip.moveToCentre()
|
# 一个小程序,Secret Message. 文件夹alphabet中是英文26个字母及部分标点符号的照片。运行程序过中,提示用户输入文件夹名称(比如test_folder)以及Secret Message的内容(比如“love you python”)。运行完程序后,在test_folder文件夹下的照片是“love you python”这个Secret Message中所有的字母和标点符号,但是顺序是杂乱无章的,所以这个时候它仍然是SECRET message. 但是在test_folder_copy文件中的照片排列顺序是正确的,message不再secret,其中我们可以看到message的内容是“love you python”. 具体代码以及截屏见 1. https://discussions.udacity.com/t/secret-message-mini-project-f/16125/2449
# 2. https://github.com/roy2020china/BingDemo/blob/master/15_Secret_message.py
# There are images for "A~Z", " " and "," in file folder named "alphabet", which can be downloaded from https://s3.amazonaws.com/udacity-hosted-downloads/ud036/alphabet.zip . Images are downloaded and stored on my local machine at "/Users/ABC/Temp/alphabet/".
# Prompt user to provide a name for a new file folder, such as "test_folder". File path is "/Users/ABC/Temp/test_folder/"
# Prompt user to provide a hidden message, such as "love you python".
# Pick photos from alphabet according to the user's hidden message and move them to test_folder. In our case, chennai.jpg, dallas1.jpg, ithaca.jpg, beijing.jpg, madrid1.jpg, kiev1.jpg, dallas2.jpg, istanbul.jpg, madrid2.jpg, delhi.jpg, kiev2.jpg, hyderabad.jpg, bristol.jpg, dallas3.jpg, colombo.jpg will be selected and moved to test_folder.
# Re-arrange all the photos in test_folder in the order of "LOVE YOU PYTHON".
# Snap shots can be referred to at https://discussions.udacity.com/t/secret-message-mini-project-f/16125/2449
import os
import string
import shutil # copy one file from Folder A to Folder B; or copy one file folder recursively to Folder B
# Step 1.1: prompt user to input a folder name. create a folder.
def user_input_folder_name():
folder_name = raw_input("Please give a name for the folder to be created:")
print "Got the folder name. Thank you."
return folder_name
def create_folder():
folder_name = user_input_folder_name()
while os.path.exists("/Users/ABC/Temp/" + folder_name) is True:
print "Please give a valid name"
folder_name = user_input_folder_name()
# create a folder with that name
folder_path = "/Users/ABC/Temp/" + folder_name
print "folder path is: " + folder_path
# os.makedirs("/Users/ABC/Temp/%s" % folder_name)
os.makedirs(folder_path)
return folder_path
# Step 1.2: prompt user to create a hidden message, rename those photo files which are used for more than once. Move those photo files into it
def user_input_hidden_message():
hidden_message = raw_input("the hidden message that you want to print out after implementation of this program: ")
hidden_message = hidden_message.upper()
return hidden_message
# a ~ z, plus . and blank space.
def get_alphabet_list():
# get a string of "a~z", or "A~Z", or "a~zA~Z"
# alphabet = string.ascii_letters
alphabet = string.ascii_uppercase
# make a list
alphabet_list = list(alphabet)
return alphabet_list
# Step 1.3: make a dictionary, key = "A~Z", value = file_name
def file_names_list():
cwd = "/Users/ABC/Temp/alphabet"
file_names = os.listdir(cwd)
# remove ".DS_Store" from the list
file_names.pop(0)
return file_names
def dict_alphabet_filename():
keys = get_alphabet_list()
values = file_names_list()
return dict(zip(keys, values))
def dict_28_keyvaluepairs():
# dict_28 = {" ":"madrid.jpg", ".": "los angles.jpg"}
dict_28 = dict_alphabet_filename()
dict_28[" "] = "madrid.jpg"
dict_28["."] = "los angles.jpg"
return dict_28
# Step 1.4: select photo files according to the hidden message, rename it if it is used for more than once, move them to the folder.
def select_files_according_to_hidden_message(folder_path, hidden_message):
#folder_path = create_folder()
#hidden_message = user_input_hidden_message()
# make a list based on the string of hidden_message
hidden_message_list = list(hidden_message)
# get all keys from dict_28
dict_28 = dict_28_keyvaluepairs()
# get_all_keys = dict.keys()
hidden_message_file_names = []
i = 0
# print "dict_28['l']: " + dict_28["l"]
# print "dict_28:"
# print dict_28
for each_item in hidden_message_list:
file_name = dict_28[each_item]
# if file_name in hidden_message_file_names is not True: -- WRONG
if file_name not in hidden_message_file_names:
hidden_message_file_names.append(file_name)
shutil.copy(("/Users/ABC/Temp/alphabet/" + file_name), folder_path)
else:
# modify the file name before append() it to the hidden_message_file_names list
file_name_index = file_name[:-4] + str(i) + file_name[-4:]
hidden_message_file_names.append(file_name_index)
# copy the file under the name of file_name and paste as file_name_index
shutil.copy(("/Users/ABC/Temp/alphabet/" + file_name), (folder_path+ "/" + file_name_index))
i += 1
return hidden_message_file_names
# Step 2: rename all files so that they will be displayed in the desired order, which in our case is "LOVE YOU PYTHON"
def rearrange_files_according_to_hidden_message(folder_path, hidden_message):
# create a folder under the name provided by the user, in our case test_folder. Select photos, copy and paste them to test_folder
file_names = select_files_according_to_hidden_message(folder_path, hidden_message)
# copy test_folder recursively and paste it as test_folder_copy
# cwd = os.getcwd()
# print "current working directory cwd is: " + cwd
# os.chdir("/Users/ABC/Temp")
# cwd1 = os.getcwd()
# print "current working directory cwd1 is: " + cwd1
copy_folder_path = folder_path + "_copy"
# change working directory to copy_folder_path's parent
os.chdir("/Users/ABC/Temp")
# cwd = os.getcwd()
# print "current working directory cwd is: " + cwd
shutil.copytree(folder_path, copy_folder_path)
# change working directory to copy_folder_path
os.chdir(copy_folder_path)
# rename all the file in test_folder_copy so that they will show "LOVE YOU PYTHON"
i = 0
for file_name in file_names:
# rename each file to a new name in the pattern of index + file_name
new_file_name = str(i) + file_name
os.rename(file_name, new_file_name)
i += 1
# Step 3: remove numbers from file names, in order to show the hidden message.
def main():
os.chdir('/Users/ABC/Temp')
#user_input_folder_name()
folder_path = create_folder()
hidden_message = user_input_hidden_message()
#get_alphabet_list()
#file_names_list()
#dict_alphabet_filename()
#select_files_according_to_hidden_message(folder_path, hidden_message)
rearrange_files_according_to_hidden_message(folder_path, hidden_message)
main()
|
<filename>main.py
import string
from pymongo import MongoClient
from deep_translator import GoogleTranslator
import pysrt
from kivy.core.window import Window
from kivy.properties import StringProperty, ColorProperty
from kivy.utils import rgba
from kivymd.app import MDApp
from kivymd.theming import ThemableBehavior
from kivymd.toast import toast
from kivymd.uix.filemanager import MDFileManager
from kivymd.uix.list import OneLineIconListItem, MDList
from kivymd.uix.card import MDCardSwipe
from kivymd.uix.behaviors import TouchBehavior
import os
import dns
res = dns.resolver.Resolver(configure=False)
res.nameservers = ["8.8.8.8"]
dns.resolver.default_resolver = res
client = MongoClient(os.environ.get("MONGODB_URI"))
db = client["learn-with-srt"]
my_collection = db["users"]
icons_item = {
"folder": "My files",
"star": "Starred",
"history": "Recent",
"upload": "Upload",
}
srt_word_list = set([])
known_words = []
class Processes:
def read_srt_file(self, path):
self.read_known_words()
subtitles = pysrt.open(path)
for sub in subtitles:
line = sub.text.replace("\n", " ")
words = line.split(" ")
for word in words:
temp_word = word.translate(
str.maketrans("", "", string.punctuation)
).lower()
if temp_word not in known_words and not any(
map(str.isdigit, temp_word)
):
srt_word_list.add(temp_word)
def read_known_words(self):
global known_words
with open("known_words.txt") as txt_file:
temp_words = txt_file.readlines()
temp_words = [word.lower().replace("\n", "") for word in temp_words]
known_words.extend(temp_words)
def translate_word(self, word):
translated_word = GoogleTranslator(source="auto", target="tr").translate(word)
return translated_word
class ItemDrawer(OneLineIconListItem):
icon = StringProperty()
text_color = ColorProperty()
theme_text_color = StringProperty()
def on_release(self):
toast("Coming Soon")
class DrawerList(ThemableBehavior, MDList):
pass
class SwipeToLearnWord(MDCardSwipe, TouchBehavior):
text = StringProperty()
actual_word = StringProperty()
translated_word = StringProperty()
def translate_pressed_text(self):
if self.translated_word == "":
self.translated_word = Processes().translate_word(self.actual_word)
if self.text == self.actual_word:
self.text = self.translated_word
else:
self.text = self.actual_word
def on_long_touch(self, touch, *args):
pass
class SrtApp(MDApp):
background_color = rgba("#2b2b31")
start_point = 0
end_point = 5
current_user = StringProperty()
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.current_theme = "dark"
Window.bind(on_keyboard=self.events)
self.manager_open = False
self.file_manager = MDFileManager(
exit_manager=self.exit_manager,
select_path=self.select_path,
)
def build(self):
self.icon = "icon.png"
def login(self):
email = self.root.ids.email.text
password = self.root.ids.password.text
is_login = False
check = my_collection.find_one({"email": email})
check2 = my_collection.find_one({"username": email})
user = check if check is not None else check2
if email == "" or password == "":
toast("Please type your email and password")
else:
if user is not None:
if user["password"] == password:
self.root.ids.screen_manager.current = "app"
self.current_user = user["username"]
global known_words
known_words.extend(user["words"])
is_login = True
else:
toast("Email or password incorrect!")
else:
toast("Email or password incorrect!")
if not is_login:
toast("Email or password incorrect!")
self.root.ids.email.text = ""
self.root.ids.password.text = ""
def register(self):
name = self.root.ids.r_name.text
username = self.root.ids.r_username.text
email = self.root.ids.r_email.text
password = self.root.ids.r_password.text
check = my_collection.find_one({"username": username})
check2 = my_collection.find_one({"email": email})
if check is None and check2 is None:
self.root.ids.screen_manager.current = "login"
toast("Now, you can login the app")
new_user = {
"name": name,
"username": username,
"email": email,
"password": password,
"words": [],
}
my_collection.insert_one(new_user)
else:
if check is not None:
toast("Username has already been taken")
self.root.ids.r_username.text = ""
else:
toast("Email has already been taken")
self.root.ids.r_email.text = ""
def change_screen(self):
self.root.ids.screen_manager.current = "register"
def file_manager_open(self):
self.file_manager.show(self.user_data_dir) # output manager to the screen
self.manager_open = True
def select_path(self, path):
self.exit_manager()
Processes().read_srt_file(path)
self.update_word_list()
def exit_manager(self, *args):
self.manager_open = False
self.file_manager.close()
def events(self, instance, keyboard, keycode, text, modifiers):
"""Called when buttons are pressed on the mobile device."""
if keyboard in (1001, 27):
if self.manager_open:
self.file_manager.back()
return True
def on_start(self):
# add options to the list
for icon_name in icons_item.keys():
self.root.ids.md_list.add_widget(
ItemDrawer(
icon=icon_name,
text=icons_item[icon_name],
theme_text_color="Custom",
text_color=rgba("#f4f4f4"),
)
)
self.update_word_list()
def update_word_list(self):
self.root.ids.word_list.clear_widgets()
for text in list(srt_word_list)[self.start_point : self.end_point]:
self.root.ids.word_list.add_widget(
SwipeToLearnWord(text=text, actual_word=text)
)
def on_swipe_complete(self, instance):
self.root.ids.word_list.remove_widget(instance)
global srt_word_list
srt_word_list.remove(instance.actual_word)
if instance.actual_word not in known_words:
known_words.append(instance.actual_word)
user = my_collection.find_one({"username": self.current_user})
if instance.actual_word not in user["words"]:
my_collection.update_one(
{"username": self.current_user},
{"$push": {"words": instance.actual_word}},
)
@staticmethod
def btn_learn_words():
toast("Coming Soon")
def btn_previous_page(self):
if self.start_point <= 5:
self.start_point = 0
if len(srt_word_list) >= 5:
self.end_point = self.start_point + 5
else:
self.end_point = len(srt_word_list)
else:
self.start_point -= 5
if len(srt_word_list) >= 5:
self.end_point = self.start_point + 5
else:
self.end_point = len(srt_word_list)
self.update_word_list()
def btn_next_page(self):
if self.end_point >= len(srt_word_list):
self.end_point = len(srt_word_list)
if len(srt_word_list) >= 5:
self.start_point = self.end_point - 5
else:
self.start_point = 0
else:
self.end_point += 5
if len(srt_word_list) >= 5:
self.start_point = self.end_point - 5
else:
self.start_point = 0
self.update_word_list()
def btn_yes(self):
toast("Coming Soon")
def btn_no(self):
toast("Coming Soon")
def switch_theme(self):
if self.current_theme == "dark":
toast("Coming Soon")
self.current_theme = "light"
else:
toast("Coming Soon")
self.current_theme = "dark"
if __name__ == "__main__":
SrtApp().run()
|
<filename>sbol2/location.py
from .identified import Identified
from .constants import *
from .property import IntProperty
from .property import OwnedObject
from .property import ReferencedObject
from .property import URIProperty
from rdflib import URIRef
class Location(Identified):
"""The Location class specifies the strand orientation of a Component."""
def __init__(self, uri=URIRef('example'), orientation=SBOL_ORIENTATION_INLINE,
*, type_uri=SBOL_LOCATION, version=VERSION_STRING):
super().__init__(type_uri=type_uri, uri=uri, version=version)
self.orientation = URIProperty(self, SBOL_ORIENTATION,
'1', '1', [], orientation)
self.sequence = ReferencedObject(self, SBOL_SEQUENCE_PROPERTY,
SBOL_SEQUENCE, '0', '1', [])
class Range(Location):
"""A Range object specifies a region via discrete,
inclusive start and end positions that correspond to indices
for characters in the elements String of a Sequence.
Note that the index of the first location is 1,
as is typical practice in biology, rather than 0,
as is typical practice in computer science."""
def __init__(self, uri=URIRef('example'), start=1, end=2,
*, type_uri=SBOL_RANGE, version=VERSION_STRING):
super().__init__(uri=uri, type_uri=type_uri, version=version)
self.start = IntProperty(self, SBOL_START, '0', '1', None, start)
self.end = IntProperty(self, SBOL_END, '0', '1', None, end)
def precedes(self, comparand):
if self.end < comparand.start:
return comparand.start - self.end
else:
return 0
def follows(self, comparand):
if self.start > comparand.end:
return comparand.end - self.start
else:
return 0
def adjoins(self, comparand):
if comparand.end + 1 == self.start:
return 1
if self.end + 1 == comparand.start:
return 1
return 0
def contains(self, comparand):
if self.start <= comparand.start and self.end >= comparand.end:
return comparand.length()
else:
return 0
def overlaps(self, comparand):
if self.start == comparand.start and self.end == comparand.end:
return 0
elif self.start < comparand.start and self.end < comparand.end \
and self.end >= comparand.start:
return self.end - comparand.start + 1
elif self.start > comparand.start and self.end > comparand.end \
and self.start <= comparand.end:
return comparand.end - self.start + 1
elif comparand.contains(self):
return comparand.contains(self)
else:
return 0
def length(self):
return self.end + 1 - self.start
class Cut(Location):
"""The Cut class specifies a location between
two coordinates of a Sequence's elements."""
def __init__(self, uri=URIRef('example'), at=0,
*, type_uri=SBOL_CUT, version=VERSION_STRING):
super().__init__(uri=uri, type_uri=type_uri, version=version)
self.at = IntProperty(self, SBOL_AT, '1', '1', [], at)
class GenericLocation(Location):
"""the GenericLocation class is included as a starting point
for specifying regions on Sequence objects with
encoding properties other than IUPAC and potentially nonlinear structure.
This class can also be used to set the orientation of a SequenceAnnotation
and any associated Component when their parent ComponentDefinition is
a partial design that lacks a Sequence."""
def __init__(self, uri=URIRef('example'),
*, type_uri=SBOL_GENERIC_LOCATION, version=VERSION_STRING):
super().__init__(uri=uri, type_uri=type_uri, version=version)
class OwnedLocation(OwnedObject):
def __init__(self, property_owner, sbol_uri, lower_bound, upper_bound,
validation_rules=None, first_object=None):
"""Initialize a container and optionally put the first object in it.
If validation rules are specified, they will be checked upon initialization.
builder is a function that takes a single argument, a string,
and constructs an object of appropriate type for this
OwnedObject instance. For instance, if this OwnedObject is
intended to hold ComponentDefinitions, then the builder should
return an object of type ComponentDefinition.
"""
super().__init__(property_owner, sbol_uri, Location, lower_bound, upper_bound,
validation_rules, first_object)
def createRange(self, uri=URIRef('example')):
return self.create(uri, Range)
def createCut(self, uri=URIRef('example')):
return self.create(uri, Cut)
def createGenericLocation(self, uri=URIRef('example')):
return self.create(uri, GenericLocation)
def getRange(self, uri=''):
range = self.get(uri)
if isinstance(range, Range):
return range
raise TypeError('Found object is not of type Range')
def getCut(self, uri=''):
cut = self.get(uri)
if isinstance(cut, Cut):
return cut
raise TypeError('Found object is not of type Cut')
|
<reponame>ishine/DeepPhonemizer<filename>dp/model/predictor.py
from typing import Dict, List, Tuple
import torch
from torch.nn.utils.rnn import pad_sequence
from dp import Prediction
from dp.model.model import load_checkpoint
from dp.model.utils import _get_len_util_stop
from dp.preprocessing.text import Preprocessor
from dp.preprocessing.utils import _batchify, _product
class Predictor:
""" Performs model predictions on a batch of inputs. """
def __init__(self,
model: torch.nn.Module,
preprocessor: Preprocessor) -> None:
"""
Initializes a Predictor object with a trained transformer model a preprocessor.
Args:
model (Model): Trained transformer model.
preprocessor (Preprocessor): Preprocessor corresponding to the model configuration.
"""
self.model = model
self.text_tokenizer = preprocessor.text_tokenizer
self.phoneme_tokenizer = preprocessor.phoneme_tokenizer
def __call__(self,
words: List[str],
lang: str,
batch_size: int = 8) -> List[Prediction]:
"""
Predicts phonemes for a list of words.
Args:
words (list): List of words to predict.
lang (str): Language of texts.
batch_size (int): Size of batch for model input to speed up inference.
Returns:
List[Prediction]: A list of result objects containing (word, phonemes, phoneme_tokens, token_probs, confidence)
"""
predictions = dict()
valid_texts = set()
# handle words that result in an empty input to the model
for word in words:
input = self.text_tokenizer(sentence=word, language=lang)
decoded = self.text_tokenizer.decode(
sequence=input, remove_special_tokens=True)
if len(decoded) == 0:
predictions[word] = ([], [])
else:
valid_texts.add(word)
valid_texts = sorted(list(valid_texts), key=lambda x: len(x))
batch_pred = self._predict_batch(texts=valid_texts, batch_size=batch_size,
language=lang)
predictions.update(batch_pred)
output = []
for word in words:
tokens, probs = predictions[word]
out_phons = self.phoneme_tokenizer.decode(
sequence=tokens, remove_special_tokens=True)
out_phons_tokens = self.phoneme_tokenizer.decode(
sequence=tokens, remove_special_tokens=False)
output.append(Prediction(word=word,
phonemes=''.join(out_phons),
phoneme_tokens=out_phons_tokens,
confidence=_product(probs),
token_probs=probs))
return output
def _predict_batch(self,
texts: List[str],
batch_size: int,
language: str) \
-> Dict[str, Tuple[List[int], List[float]]]:
"""
Returns dictionary with key = word and val = Tuple of (phoneme tokens, phoneme probs)
"""
predictions = dict()
text_batches = _batchify(texts, batch_size)
for text_batch in text_batches:
input_batch, lens_batch = [], []
for text in text_batch:
input = self.text_tokenizer(text, language)
input_batch.append(torch.tensor(input))
lens_batch.append(torch.tensor(len(input)))
input_batch = pad_sequence(sequences=input_batch,
batch_first=True, padding_value=0)
lens_batch = torch.stack(lens_batch)
start_indx = self.phoneme_tokenizer._get_start_index(language)
start_inds = torch.tensor([start_indx]*input_batch.size(0)).to(input_batch.device)
batch = {
'text': input_batch,
'text_len': lens_batch,
'start_index': start_inds
}
with torch.no_grad():
output_batch, probs_batch = self.model.generate(batch)
output_batch, probs_batch = output_batch.cpu(), probs_batch.cpu()
for text, output, probs in zip(text_batch, output_batch, probs_batch):
seq_len = _get_len_util_stop(output, self.phoneme_tokenizer.end_index)
predictions[text] = (output[:seq_len].tolist(), probs[:seq_len].tolist())
return predictions
@classmethod
def from_checkpoint(cls, checkpoint_path: str, device='cpu') -> 'Predictor':
"""Initializes the predictor from a checkpoint (.pt file).
Args:
checkpoint_path (str): Path to the checkpoint file (.pt).
device (str): Device to load the model on ('cpu' or 'cuda'). (Default value = 'cpu').
Returns:
Predictor: Predictor object.
"""
model, checkpoint = load_checkpoint(checkpoint_path, device=device)
preprocessor = checkpoint['preprocessor']
return Predictor(model=model, preprocessor=preprocessor)
|
# -*- coding: utf-8 -*-
import unittest
from smsapi.exception import EndpointException, SendException
from smsapi.models import ResultCollection, RemoveMessageResult, InvalidNumber
from smsapi.sms.api import flash_force_params, fast_force_params
from tests import SmsApiTestCase
from tests.unit.doubles import api_response_fixture
class SmsApiTest(SmsApiTestCase):
def test_send_sms(self):
number = '48100200300'
args = {'to': number}
result = self.client.sms.send(**args)
self.assertSendResultForNumberEquals(number, result)
self.assertParamsForwardedToRequestEquals(args)
@api_response_fixture('send')
def test_send_sms_with_custom_sender(self):
number = '48100200300'
any_sender_name = 'any-sender-name'
result = self.client.sms.send(to=number, from_=any_sender_name)
self.assertSendResultForNumberEquals(number, result)
self.assertParamsForwardedToRequestEquals({'to': number, 'from': any_sender_name})
@api_response_fixture('send_to_many_recipients')
def test_send_sms_to_many_numbers(self):
number_1, number_2 = '48100200300', '48100200301'
result = self.client.sms.send(to=[number_1, number_2])
self.assertSendResultForNumberEquals([number_1, number_2], result)
self.assertParamsForwardedToRequestEquals({'to': '%s,%s' % (number_1, number_2)})
@api_response_fixture('send_to_invalid_number')
def test_send_sms_to_invalid_number(self):
invalid_number = '48100200300'
exception = None
try:
self.client.sms.send(to=invalid_number)
except SendException as e:
exception = e
expected_exception = create_sms_exception_for_number(invalid_number)
self.assertEqual(expected_exception, exception)
def test_send_fast(self):
number = '48100200300'
args = {'to': number}
result = self.client.sms.send_fast(**args)
self.assertSendResultForNumberEquals(number, result)
self.assertParamsForwardedToRequestEquals(args, fast_force_params)
def test_send_flash(self):
number = '48100200300'
args = {'to': number}
result = self.client.sms.send_flash(**args)
self.assertSendResultForNumberEquals(number, result)
self.assertParamsForwardedToRequestEquals(args, flash_force_params)
def test_remove_scheduled_sms(self):
sms_id = '1'
args = {'id': sms_id}
result = self.client.sms.remove_scheduled(id=sms_id)
expected_result = ResultCollection(1, [RemoveMessageResult(id='1')])
self.assertParamsForwardedToRequestEquals(args)
self.assertEqual(expected_result, result)
@api_response_fixture('remove_not_exists_sms')
def test_remove_not_exists_sms(self):
exception = None
try:
self.client.sms.remove_scheduled(id='1')
except EndpointException as e:
exception = e
expected_exception = EndpointException(u'Not exists ID message', 301)
self.assertEqual(expected_exception, exception)
@api_response_fixture('send')
def test_send_personalized_sms(self):
args = {'to': '48100200300', 'message': 'some message [%1]', 'param1': ['p1', 'p2']}
self.client.sms.send(**args)
self.assertParamsForwardedToRequestEquals(args, {'param1': 'p1|p2'})
@api_response_fixture('send')
def test_send_sms_with_own_identifier(self):
args = {'to': '48100200300', 'idx': ['id1', 'id2']}
self.client.sms.send(**args)
self.assertParamsForwardedToRequestEquals(args, {'idx': 'id1|id2'})
@api_response_fixture('send')
def test_send_sms_to_group(self):
self.client.sms.send_to_group(group='any')
self.assertParamsForwardedToRequestEquals({'group': 'any'})
def test_send_sms_as_utf8(self):
number = '48100200300'
args = {'to': number, 'encoding': 'utf-8'}
result = self.client.sms.send(**args)
self.assertSendResultForNumberEquals(number, result)
self.assertParamsForwardedToRequestEquals(args)
def test_send_test_sms(self):
number = '48100200300'
args = {'to': number, 'test': '1'}
self.client.sms.send(**args)
self.assertParamsForwardedToRequestEquals(args)
def create_sms_exception_for_number(number):
e = SendException(u'No correct phone numbers', 13)
e.add_invalid_number(InvalidNumber(number, number, u'Invalid phone number'))
return e
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SmsApiTest))
return suite
|
###################################
# 6.00.2x Problem Set 1: Space Cows
from ps1_partition import get_partitions
import time
# ===============================
# Part A: Transporting Space Cows
# ===============================
def load_cows(filename):
"""
Read the contents of the given file. Assumes the file contents contain
data in the form of comma-separated cow name, weight pairs, and return a
dictionary containing cow names as keys and corresponding weights as
values.
Parameters:
filename - the name of the data file as a string
Returns:
a dictionary of cow name (string), weight (int) pairs
"""
cow_dict = dict()
f = open(filename, 'r')
for line in f:
line_data = line.split(',')
cow_dict[line_data[0]] = int(line_data[1])
return cow_dict
# Problem 1
def greedy_cow_transport(cows, limit=10):
"""
Uses a greedy heuristic to determine an allocation of cows that attempts to
minimize the number of spaceship trips needed to transport all the cows.
The returned allocation of cows may or may not be optimal.
The greedy heuristic should follow the following method:
1. As long as the current trip can fit another cow, add the largest cow
that will fit to the trip
2. Once the trip is full, begin a new trip to transport the remaining cows
Does not mutate the given dictionary of cows.
Parameters:
cows - a dictionary of name (string), weight (int) pairs
limit - weight limit of the spaceship (an int)
Returns:
A list of lists, with each inner list containing the names of cows
transported on a particular trip and the overall list containing all the
trips
"""
'''
# --- Version 1 ---
cowsList = sorted(iter([(v, k) for (k, v) in cows.items()]), reverse=True)
trips, ships = [], []
totalTrips, totalWeight = 0, 0
for i in range(len(cowsList)+1):
for j in range(len(cowsList)+1):
if len(cowsList) >= 1:
try:
if (totalWeight+cowsList[j][0]) <= limit:
ships.append(cowsList[j][1])
totalWeight += cowsList[j][0]
cowsList.remove(cowsList[j])
else:
trips.append(ships[:])
totalTrips += 1
ships.clear()
totalWeight = 0
break
except IndexError:
if len(cowsList) >= 1:
if (totalWeight+cowsList[0][0]) <= limit:
ships.append(cowsList[0][1])
cowsList.remove(cowsList[0])
trips.append(ships[:])
totalTrips += 1
ships.clear()
totalWeight = 0
break
return trips
# --- Version 2 ---
maxTrips = len(cows)
cowsList = sorted(iter([(v, k) for (k, v) in cows.items()]), reverse=True)
trips, ships = [], []
totalTrips, totalWeight = 0, 0
for i in range(maxTrips):
if cowsList != []:
ships.append(cowsList[0][1])
totalWeight += cowsList[0][0]
cowsList.remove(cowsList[0])
for j in cowsList:
if (totalWeight+j[0]) <= limit:
ships.append(j[1])
totalWeight += j[0]
cowsList.remove(j)
else:
next
if ships != []:
trips.append(ships[:])
totalTrips += 1
ships.clear()
totalWeight = 0
return trips
'''
# --- Version 3 ---
cowsList = sorted(iter([(v, k) for (k, v) in cows.items()]), reverse=True)
cowWeights = sorted(cows.values(), reverse=True)
maxTrips = len(cowWeights)+1
trips, ships = [], []
totalTrips, totalWeight = 0, 0
for i in range(maxTrips):
if cowsList != []:
ships.append(cowsList[0][1])
totalWeight += cowsList[0][0]
cowsList.remove(cowsList[0])
for j in cowWeights:
if (totalWeight+j) <= limit:
for x in cowsList:
if x[0] == j:
ships.append(x[1])
totalWeight += x[0]
cowsList.remove(x)
else:
next
if ships != []:
trips.append(ships[:])
totalTrips += 1
ships.clear()
totalWeight = 0
return trips
# Problem 2
def brute_force_cow_transport(cows, limit=10):
"""
Finds the allocation of cows that minimizes the number of spaceship trips
via brute force. The brute force algorithm should follow the following
method:
1. Enumerate all possible ways that the cows can be divided into separate
trips
2. Select the allocation that minimizes the number of trips without making
any trip that does not obey the weight limitation
Does not mutate the given dictionary of cows.
Parameters:
cows - a dictionary of name (string), weight (int) pairs
limit - weight limit of the spaceship (an int)
Returns:
A list of lists, with each inner list containing the names of cows
transported on a particular trip and the overall list containing all the
trips
"""
# --- Version 1 ---
trips = []
for trip in get_partitions(cows.keys()):
for ship in trip:
totalWeight = 0
for cow in ship:
totalWeight += cows.get(cow)
if totalWeight <= limit:
trips.append(ship)
# print('Test: {0}, \nvers: {1}\nLimit = {2} | Tripweight = {3}'.format(trips, trip, limit, totalWeight))
if trip == trips:
return trips
else:
trips.clear()
# Problem 3
def compare_cow_transport_algorithms():
"""
Using the data from ps1_cow_data.txt and the specified weight limit, run
your greedy_cow_transport and brute_force_cow_transport functions here.
Use the default weight limits of 10 for both greedy_cow_transport and
brute_force_cow_transport.
Print out the number of trips returned by each method, and how long each
method takes to run in seconds.
Returns:
Does not return anything.
"""
cows = load_cows("ps1_cow_data.txt")
start = time.time()
print(greedy_cow_transport(cows))
end = time.time()
print("Speed - Greedy: {0:.7f}\n\n".format(end-start))
start = time.time()
print(brute_force_cow_transport(cows))
end = time.time()
print("Speed - Brutef: {0:.7f}\n\n".format(end-start))
"""
Here is some test data for you to see the results of your algorithms with.
Do not submit this along with any of your answers. Uncomment the last two
lines to print the result of your problem.
"""
# cows = load_cows("ps1_cow_data.txt")
# limit = 10
# print(cows)
# print(greedy_cow_transport(cows, limit))
# print(brute_force_cow_transport(cows, limit))
'''
print("Problem 1 | Test 1:")
start = time.time()
cows = load_cows("ps1_cow_dataE1.txt")
limit = 100
print(greedy_cow_transport(cows, limit))
end = time.time()
print("Speed: {0}\n\n".format(end-start))
print("Problem 1 | Test 2:")
start = time.time()
cows = load_cows("ps1_cow_dataE2.txt")
limit = 100
print(greedy_cow_transport(cows, limit))
end = time.time()
print("Speed: {0}\n\n".format(end-start))
print("Problem 1 | Test 3:")
start = time.time()
cows = load_cows("ps1_cow_dataE3.txt")
limit = 120
print(greedy_cow_transport(cows, limit))
end = time.time()
print("Speed: {0}\n\n".format(end-start))
print("Problem 2 | Test 1:")
start = time.time()
# print("Correct: [['MooMoo', 'Horns', '<NAME>'], ['Milkshake', 'Lotus', 'Boo']]")
cows = load_cows("ps1_cow_dataE4.txt")
limit = 100
print(brute_force_cow_transport(cows, limit))
end = time.time()
print("Speed: {0}\n\n".format(end-start))
print("Problem 2 | Test 2:")
start = time.time()
# print("Correct: [['Buttercup'], ['Daisy'], ['Betsy']]")
cows = load_cows("ps1_cow_dataE5.txt")
limit = 75
print(brute_force_cow_transport(cows, limit))
end = time.time()
print("Speed: {0}\n\n".format(end-start))
print("Problem 2 | Test 3:")
start = time.time()
# print("Correct: [['Starlight', 'Betsy', 'Luna', 'Buttercup']]")
cows = load_cows("ps1_cow_dataE6.txt")
limit = 145
print(brute_force_cow_transport(cows, limit))
end = time.time()
print("Speed: {0}\n\n".format(end-start))
'''
compare_cow_transport_algorithms()
|
# Convenience functions to perform Image Magicks
from subprocess import run, PIPE
from glob import glob
from neuralstyle.utils import filename
def convert(origin, dest):
"""Transforms the format of an image in a file, by creating a new file with the new format"""
if ismultilayer(origin):
raise ValueError("Cannot operate with multilayer images")
run("convert %s %s" % (origin, dest), shell=True)
def shape(imfile):
"""Returns the shape of an image file"""
result = run("convert " + imfile + ' -format "%w %h" info:', shell=True, check=True, stdout=PIPE)
return [int(x) for x in result.stdout.decode("utf-8").split(" ")]
def resize(imfile, newsize):
"""Resize an image file to a new size.
If a single value for newsize is provided, the image is rescaled to that size while keeping proportion.
If an tuple/list with two values are given, the proportions of the image are changed to meet them.
"""
if isinstance(newsize, int):
command = "convert " + imfile + " -resize " + str(newsize) + " " + imfile
else:
command = "convert " + imfile + " -resize " + str(newsize[0]) + "x" + str(newsize[1]) + "! " + imfile
run(command, shell=True)
def assertshape(imfile, shp):
"""Checks the shape of an image file, and if not equal the given one, reshapes it"""
if shape(imfile) != shp:
resize(imfile, shp)
def choptiles(imfile, xtiles, ytiles, overlap, outname):
"""Chops an image file into a geometry of overlapping tiles. Returns ordered list of generated tiles image files"""
command = 'convert %s -crop %dx%d+%d+%d@ +repage +adjoin %s_%%d.png' % (
imfile, xtiles, ytiles, overlap, overlap, outname
)
run(command, shell=True, check=True)
return sorted(glob(outname + "_*.png"), key=lambda x: int(filename(x).split("_")[-1]))
def feather(imfile, outname):
"""Produces a feathered version of an image. Note the output format must allow for an alpha channel"""
command = 'convert %s -alpha set -virtual-pixel transparent -channel A -morphology Distance Euclidean:1,50\! ' \
'+channel %s' % (imfile, outname)
run(command, shell=True, check=True)
def smush(tiles, xtiles, ytiles, smushw, smushh, outname):
"""Smush previously tiled images together"""
if len(tiles) != xtiles * ytiles:
raise ValueError("Geometry (%d,%d) is incompatible with given number of tiles (%d)"
% (xtiles, ytiles, len(tiles)))
command = "convert -background transparent "
i = 0
for _ in range(ytiles):
rowcmd = ""
for __ in range(xtiles):
rowcmd += " " + tiles[i]
i += 1
rowcmd += " +smush -%d -background transparent" % smushw
command += " '(' %s ')'" % rowcmd
command += " -background none -background transparent -smush -%s %s" % (smushh, outname)
run(command, shell=True, check=True)
def composite(imfiles, outname):
"""Blends several image files together"""
command = "composite"
for imfile in imfiles:
command += " " + imfile
command += " " + outname
run(command, shell=True, check=True)
def extractalpha(imfile, rgbfile, alphafile):
"""Decomposes an image file into the RGB channels and the alpha channel, saving both as separate image files"""
if ismultilayer(imfile):
raise ValueError("Cannot operate with multilayer images")
# Alpha channel extraction
command = "convert -alpha extract %s %s" % (imfile, alphafile)
run(command, shell=True, check=True)
# RGB channels extraction
command = "convert -alpha off %s %s" % (imfile, rgbfile)
run(command, shell=True, check=True)
def mergealpha(rgbfile, alphafile, resfile):
"""Applies an alpha channel image to an RGB image"""
if shape(rgbfile) != shape(alphafile):
raise ValueError("Cant merge RGB and alpha images of differing sizes: %s vs %s" %
(str(shape(rgbfile)), str(shape(alphafile))))
command = "convert %s %s -compose CopyOpacity -composite %s" % (rgbfile, alphafile, resfile)
run(command, shell=True, check=True)
def equalimages(imfile1, imfile2):
"""Returns True if two image files have equal content, False if not"""
# If sizes differ, the images are not equal
if shape(imfile1) != shape(imfile2):
return False
# Run imagemagick comparison commmand
# This command returns with 0 if images are equal, 1 if they are not, 2 in case of error
command = "compare -metric rmse %s %s null:" % (imfile1, imfile2)
result = run(command, shell=True)
if result.returncode == 2:
raise IOError("Error while calling imagemagick compare method")
return result.returncode == 0
def ismultilayer(imfile):
"""Returns whether an image file contains multiple layers"""
return len(shape(imfile)) > 2
|
<filename>src/ipssihelp/worker/migrations/0001_initial.py
# Generated by Django 3.0.3 on 2020-04-21 19:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Ad',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=128, verbose_name='Title')),
('description', models.TextField(blank=True, null=True, verbose_name='Description')),
('type', models.CharField(choices=[('supply', 'Supply'), ('demand', 'Demand')], default='supply', max_length=32, verbose_name='Type')),
('status', models.CharField(blank=True, choices=[('waiting', 'Waiting'), ('online', 'Online'), ('canceled', 'Canceled')], default='waiting', max_length=32, null=True, verbose_name='Status')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated date')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created date')),
],
options={
'verbose_name': 'Ad',
'verbose_name_plural': 'Ads',
'db_table': 'ad',
},
),
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address1', models.CharField(max_length=255, verbose_name='Address 1')),
('address2', models.CharField(blank=True, max_length=255, null=True, verbose_name='Address 2')),
('postal_code', models.CharField(max_length=255, verbose_name='Postal Code')),
('city', models.CharField(max_length=255, verbose_name='City')),
('country', models.CharField(default='FR', help_text='ISO Alpha-2', max_length=128, verbose_name='Country')),
('latitude', models.DecimalField(blank=True, decimal_places=3, max_digits=8, null=True, verbose_name='Latitude')),
('longitude', models.DecimalField(blank=True, decimal_places=3, max_digits=8, null=True, verbose_name='Longitude')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Update date')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created date')),
],
options={
'verbose_name': 'Address',
'verbose_name_plural': 'Addresses',
'db_table': 'address',
},
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='Title')),
('description', models.TextField(blank=True, null=True, verbose_name='Description')),
],
options={
'verbose_name': 'Category',
'verbose_name_plural': 'Categories',
'db_table': 'category',
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Conversation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated date')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created date')),
],
options={
'verbose_name': 'Conversation',
'verbose_name_plural': 'Conversations',
'db_table': 'conversation',
},
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(blank=True, null=True, verbose_name='Content')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated date')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created date')),
],
options={
'verbose_name': 'Message',
'verbose_name_plural': 'Messages',
'db_table': 'message',
},
),
migrations.CreateModel(
name='Mission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Update date')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created date')),
],
options={
'verbose_name': 'Mission',
'verbose_name_plural': 'Missions',
'db_table': 'mission',
},
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254, unique=True, verbose_name='Email')),
('gender', models.CharField(blank=True, choices=[('w', 'Women'), ('m', 'Man'), ('o', 'Other')], max_length=1, null=True, verbose_name='Gender')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='First name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='Last name')),
('phone', models.CharField(blank=True, max_length=16, null=True, verbose_name='Phone')),
('birth_date', models.DateField(blank=True, null=True, verbose_name='Birth date')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated date')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created date')),
],
options={
'verbose_name': 'User',
'verbose_name_plural': 'Users',
'db_table': 'user',
},
),
migrations.AddIndex(
model_name='user',
index=models.Index(fields=['email', 'phone'], name='user_email_7cf9e9_idx'),
),
migrations.AlterUniqueTogether(
name='user',
unique_together={('email', 'phone')},
),
migrations.AddField(
model_name='mission',
name='ad',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='worker.Ad', verbose_name='Ad'),
),
migrations.AddField(
model_name='mission',
name='customer',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='worker.User', verbose_name='User'),
),
migrations.AddField(
model_name='message',
name='conversation',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='worker.Conversation', verbose_name='Conversation'),
),
migrations.AddField(
model_name='message',
name='sender',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='worker.User', verbose_name='User'),
),
migrations.AddField(
model_name='conversation',
name='ad',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='worker.Ad', verbose_name='Ad'),
),
migrations.AddIndex(
model_name='category',
index=models.Index(fields=['name'], name='category_name_d601b7_idx'),
),
migrations.AddField(
model_name='address',
name='user',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.PROTECT, to='worker.User', verbose_name='User'),
),
migrations.AddField(
model_name='ad',
name='category',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.PROTECT, to='worker.Category', verbose_name='Category'),
),
migrations.AddField(
model_name='ad',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='worker.User', verbose_name='User'),
),
migrations.AddIndex(
model_name='address',
index=models.Index(fields=['address1', 'postal_code'], name='address_address_b207d7_idx'),
),
migrations.AddIndex(
model_name='ad',
index=models.Index(fields=['title', 'status'], name='ad_title_fad397_idx'),
),
]
|
'''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
mode: python; py-indent-offset: 4; tab-width: 4; coding: utf-8
'''
import unittest
from numpy.testing import assert_array_equal
from pandas._testing import assert_frame_equal
import pprint
import numpy as np
import pandas as pd
from time import sleep
from shutil import rmtree
from pathlib import Path
from os.path import join
from sos_trades_core.execution_engine.execution_engine import ExecutionEngine
from copy import deepcopy
from tempfile import gettempdir
class TestMultiScenarioOfDoeEval(unittest.TestCase):
"""
MultiScenario and doe_eval processes test class
"""
def setUp(self):
'''
Initialize third data needed for testing
'''
self.namespace = 'MyCase'
self.study_name = f'{self.namespace}'
self.repo = 'sos_trades_core.sos_processes.test'
self.base_path = 'sos_trades_core.sos_wrapping.test_discs'
self.exec_eng = ExecutionEngine(self.namespace)
self.factory = self.exec_eng.factory
def setup_my_usecase(self):
'''
'''
######### Numerical values ####
x_1 = 2
x_2_a = 4
x_2_b = 5
a_1 = 3
b_1 = 4
a_2 = 6
b_2 = 2
constant = 3
power = 2
z_1 = 1.2
z_2 = 1.5
my_doe_algo = "lhs"
n_samples = 4
######### Selection of variables and DS ####
input_selection_z_scenario_1 = {
'selected_input': [False, False, False, True, False, False],
'full_name': ['x', 'a','b','multi_scenarios.scenario_1.Disc3.z','constant','power']}
input_selection_z_scenario_1 = pd.DataFrame(input_selection_z_scenario_1)
input_selection_z_scenario_2 = {
'selected_input': [False, False, False, True, False, False],
'full_name': ['x', 'a','b','multi_scenarios.scenario_2.Disc3.z','constant','power']}
input_selection_z_scenario_2 = pd.DataFrame(input_selection_z_scenario_2)
output_selection_o_scenario_1 = {
'selected_output': [False, False, True],
'full_name': ['indicator', 'y', 'multi_scenarios.scenario_1.o']}
output_selection_o_scenario_1 = pd.DataFrame(output_selection_o_scenario_1)
output_selection_o_scenario_2 = {
'selected_output': [False, False, True],
'full_name': ['indicator', 'y', 'multi_scenarios.scenario_2.o']}
output_selection_o_scenario_2 = pd.DataFrame(output_selection_o_scenario_2)
dspace_dict_z = {'variable': ['z'],
'lower_bnd': [0.],
'upper_bnd': [10.],
'enable_variable': [True],
'activated_elem': [[True]]}
dspace_z = pd.DataFrame(dspace_dict_z)
my_name_list = ['name_1', 'name_2']
my_x_trade = [x_1, x_2_a]
my_trade_variables = {'name_1.x': 'float'}
######### Fill the dictionary for dm ####
dict_values = {}
prefix = f'{self.study_name}.multi_scenarios'
dict_values[f'{self.study_name}.name_2.x'] = x_2_b
dict_values[f'{self.study_name}.name_1.a'] = a_1
dict_values[f'{self.study_name}.name_2.a'] = a_2
dict_values[f'{prefix}.name_1.x_trade'] = my_x_trade
dict_values[f'{prefix}.trade_variables'] = my_trade_variables
dict_values[f'{prefix}.name_list'] = my_name_list
dict_values[ f'{prefix}.scenario_1.DoE_Eval.Disc1.name_1.b'] = b_1
dict_values[ f'{prefix}.scenario_1.DoE_Eval.Disc1.name_2.b'] = b_2
dict_values[ f'{prefix}.scenario_2.DoE_Eval.Disc1.name_1.b'] = b_1
dict_values[ f'{prefix}.scenario_2.DoE_Eval.Disc1.name_2.b'] = b_2
dict_values[f'{prefix}.scenario_1.DoE_Eval.Disc3.constant'] = constant
dict_values[f'{prefix}.scenario_1.DoE_Eval.Disc3.power'] = power
dict_values[f'{prefix}.scenario_1.Disc3.z'] = z_1 # reference value (computed in any case)
dict_values[f'{prefix}.scenario_2.DoE_Eval.Disc3.constant'] = constant
dict_values[f'{prefix}.scenario_2.DoE_Eval.Disc3.power'] = power
dict_values[f'{prefix}.scenario_2.Disc3.z'] = z_2 # reference value (computed in any case)
dict_values[f'{prefix}.scenario_1.DoE_Eval.sampling_algo'] = my_doe_algo
dict_values[f'{prefix}.scenario_1.DoE_Eval.eval_inputs'] = input_selection_z_scenario_1
dict_values[f'{prefix}.scenario_1.DoE_Eval.eval_outputs'] = output_selection_o_scenario_1
dict_values[f'{prefix}.scenario_2.DoE_Eval.sampling_algo'] = my_doe_algo
dict_values[f'{prefix}.scenario_2.DoE_Eval.eval_inputs'] = input_selection_z_scenario_2
dict_values[f'{prefix}.scenario_2.DoE_Eval.eval_outputs'] = output_selection_o_scenario_2
dict_values[f'{prefix}.scenario_1.DoE_Eval.design_space'] = dspace_z
dict_values[f'{prefix}.scenario_1.DoE_Eval.algo_options'] = {'n_samples': n_samples}
dict_values[f'{prefix}.scenario_2.DoE_Eval.design_space'] = dspace_z
dict_values[f'{prefix}.scenario_2.DoE_Eval.algo_options'] = {'n_samples': n_samples}
return dict_values
def test_01_multi_scenario_of_doe_eval(self):
'''
'''
# scatter build map
ac_map = {'input_name': 'name_list',
'input_type': 'string_list',
'input_ns': 'ns_scatter_scenario',
'output_name': 'ac_name',
'scatter_ns': 'ns_ac',
'gather_ns': 'ns_scenario',
'ns_to_update': ['ns_data_ac']}
self.exec_eng.smaps_manager.add_build_map('name_list', ac_map)
# scenario build map
scenario_map = {'input_name': 'scenario_list',
'input_type': 'string_list',
'input_ns': 'ns_scatter_scenario',
'output_name': 'scenario_name',
'scatter_ns': 'ns_scenario',
'gather_ns': 'ns_scatter_scenario',
'ns_to_update': ['ns_disc3', 'ns_doe_eval', 'ns_barrierr', 'ns_out_disc3']}
self.exec_eng.smaps_manager.add_build_map(
'scenario_list', scenario_map)
# shared namespace
self.exec_eng.ns_manager.add_ns('ns_barrierr', 'MyCase')
self.exec_eng.ns_manager.add_ns(
'ns_scatter_scenario', 'MyCase.multi_scenarios')
self.exec_eng.ns_manager.add_ns(
'ns_disc3', 'MyCase.multi_scenarios.Disc3')
self.exec_eng.ns_manager.add_ns(
'ns_out_disc3', 'MyCase.multi_scenarios')
self.exec_eng.ns_manager.add_ns(
'ns_data_ac', 'MyCase')
self.exec_eng.ns_manager.add_ns(
'ns_doe_eval', f'{self.exec_eng.study_name}.multi_scenarios.DoE_Eval')
# instantiate factory # get instantiator from Discipline class
builder_list = self.factory.get_builder_from_process(repo=self.repo,
mod_id='test_disc1_scenario')
scatter_list = self.exec_eng.factory.create_multi_scatter_builder_from_list(
'name_list', builder_list=builder_list, autogather=False)
mod_list = f'{self.base_path}.disc3_scenario.Disc3'
disc3_builder = self.exec_eng.factory.get_builder_from_module(
'Disc3', mod_list)
scatter_list.append(disc3_builder)
doe_eval_builder = self.exec_eng.factory.create_evaluator_builder(
'DoE_Eval', 'doe_eval', scatter_list)
multi_scenarios = self.exec_eng.factory.create_multi_scenario_builder(
'multi_scenarios', 'scenario_list', [doe_eval_builder], autogather=False)
self.exec_eng.factory.set_builders_to_coupling_builder(
multi_scenarios)
self.exec_eng.configure()
self.exec_eng.display_treeview_nodes()
dict_values = self.setup_my_usecase()
# Provide inputs and reconfigure
self.exec_eng.load_study_from_input_dict(dict_values)
self.exec_eng.display_treeview_nodes()
self.exec_eng.display_treeview_nodes(True)
# Run
self.exec_eng.execute()
# Check results
prefix = f'{self.study_name}.multi_scenarios'
x_2_b = dict_values[f'{self.study_name}.name_2.x']
a_1 = dict_values[f'{self.study_name}.name_1.a']
a_2 = dict_values[f'{self.study_name}.name_2.a']
my_x_trade = dict_values[f'{prefix}.name_1.x_trade']
x_1 = my_x_trade[0]
x_2_a = my_x_trade[1]
b_1 = dict_values[ f'{prefix}.scenario_1.DoE_Eval.Disc1.name_1.b']
b_2 = dict_values[ f'{prefix}.scenario_1.DoE_Eval.Disc1.name_2.b']
z_1 = dict_values[f'{prefix}.scenario_1.Disc3.z']
z_2 = dict_values[f'{prefix}.scenario_2.Disc3.z']
constant = dict_values[f'{prefix}.scenario_2.DoE_Eval.Disc3.constant']
power = dict_values[f'{prefix}.scenario_2.DoE_Eval.Disc3.power']
self.assertEqual(self.exec_eng.dm.get_value(
f'{prefix}.scenario_1.name_1.x'), x_1)
self.assertEqual(self.exec_eng.dm.get_value(
f'{prefix}.scenario_2.name_1.x'),x_2_a)
self.assertEqual(self.exec_eng.dm.get_value(
f'{prefix}.scenario_1.DoE_Eval.name_1.y'),a_1 * x_1 + b_1)
self.assertEqual(self.exec_eng.dm.get_value(
f'{prefix}.scenario_1.DoE_Eval.name_2.y'),a_2 * x_2_b + b_2)
self.assertEqual(self.exec_eng.dm.get_value(
f'{prefix}.scenario_2.DoE_Eval.name_1.y'),a_1 * x_2_a + b_1)
self.assertEqual(self.exec_eng.dm.get_value(
f'{prefix}.scenario_2.DoE_Eval.name_2.y'),a_2 * x_2_b + b_2)
def test_02_multi_scenario_of_doe_eval_from_process(self):
'''
'''
# load process in GUI
builders = self.factory.get_builder_from_process(
repo=self.repo, mod_id='test_multiscenario_of_doe_eval')
self.exec_eng.factory.set_builders_to_coupling_builder(builders)
self.exec_eng.configure()
self.exec_eng.display_treeview_nodes()
dict_values = self.setup_my_usecase()
# Provide inputs and reconfigure
self.exec_eng.load_study_from_input_dict(dict_values)
self.exec_eng.display_treeview_nodes()
self.exec_eng.display_treeview_nodes(True)
# Run
self.exec_eng.execute()
|
import os
import unittest
import tempfile
import users
import app
import json
from db import DB
# The bcrypt hashing process was making the tests run slowly so I've statically set the data to save time
# test_create_users still hits the bcrypt code path
hashed_passwords = [
<PASSWORD>", #user1
<PASSWORD>", #user2
<PASSWORD>", #user3
]
test_users = [{'id': str(n), 'username': 'user%s' % n, 'email': '<EMAIL>' % n, 'password': hashed_passwords[n - 1]} for n in range(1, 4)]
# All the tests are prefixed with test. I think this is a bit ugly but makes the unittests standard library
# pick them up. I could use pytest but I wanted to avoid the extra dependency to make running the tests easy.
class UserTest(unittest.TestCase):
def setUp(self):
self.user_service = app.app
self.db_file, self.user_service.config['DATABASE'] = tempfile.mkstemp()
self.db = DB(self.user_service.config['DATABASE'], app.bcrypt)
users.init_db(self.db, self.user_service)
[self.db.write('insert into users values(:id, :username, :email, :password)', user) for user in test_users]
self.user_service.config['TESTING'] = True
self.app = self.user_service.test_client()
def tearDown(self):
os.close(self.db_file)
os.unlink(self.user_service.config['DATABASE'])
def create_user(self, user):
user_json = json.dumps(user._asdict())
return json.loads(raw_resp.data)
def test_create_user(self):
user_json = json.dumps({'username': 'test_user', 'email': '<EMAIL>', 'password': '<PASSWORD>'})
headers = [('Content-Type', 'application/json')]
raw_resp = self.app.post('/users', headers = headers, data = user_json)
resp = json.loads(raw_resp.data)
self.assertEquals('test_user', resp['username'])
self.assertEquals('<EMAIL>', resp['email'])
self.assertIn('id', resp)
self.assertNotIn('password', resp)
hashed_password = self.db.read_all('select password from users where id = :id', {'id': resp['id']})[0][0]
self.assertTrue(self.db.secure_hash_verify(hashed_password, '<PASSWORD>'), 'hashed password did not match password')
def test_list_users(self):
raw_resp = self.app.get('/users')
resp_data = json.loads(raw_resp.data)
self.assertIn('users', resp_data)
users = resp_data['users']
self.assertEquals(3, len(users))
for n in range(0, 3):
expected_user = test_users[n]
actual_user = users[n]
self.assertEquals(expected_user['id'], actual_user['id'])
self.assertEquals(expected_user['username'], actual_user['username'])
self.assertEquals(expected_user['email'], actual_user['email'])
def test_list_user(self):
raw_resp = self.app.get('/users/1')
expected_user = test_users[0]
actual_user = json.loads(raw_resp.data)
self.assertEquals(expected_user['id'], actual_user['id'])
self.assertEquals(expected_user['username'], actual_user['username'])
self.assertEquals(expected_user['email'], actual_user['email'])
def test_delete_user(self):
headers = [('foobar', self.user_service.config['VALID_API_KEY'])]
raw_resp = self.app.delete('/users/1', headers = headers)
resp = json.loads(raw_resp.data)
self.assertEquals('deleted', resp['message'])
def test_fail_creating_user_with_existing_username(self):
user_json = json.dumps({'username': 'user1', 'email': '<EMAIL>', 'password': '<PASSWORD>'})
headers = [('Content-Type', 'application/json')]
raw_resp = self.app.post('/users', headers = headers, data = user_json)
resp = json.loads(raw_resp.data)
self.assertEquals(400, raw_resp.status_code)
self.assertEquals('Username or email already taken', resp['message'])
def test_fail_creating_user_with_existing_email(self):
user_json = json.dumps({'username': 'user2', 'email': test_users[0]['email'], 'password': '<PASSWORD>'})
headers = [('Content-Type', 'application/json')]
raw_resp = self.app.post('/users', headers = headers, data = user_json)
resp = json.loads(raw_resp.data)
self.assertEquals(400, raw_resp.status_code)
self.assertEquals('Username or email already taken', resp['message'])
def test_fail_creating_user_with_missing_fields(self):
user_json = json.dumps({'email': '<EMAIL>', 'password': '<PASSWORD>'})
headers = [('Content-Type', 'application/json')]
raw_resp = self.app.post('/users', headers = headers, data = user_json)
resp = json.loads(raw_resp.data)
self.assertEquals(400, raw_resp.status_code)
self.assertEquals('Expected username, email and password. Got password, email', resp['message'])
def test_fail_listing_user_that_does_not_exist(self):
raw_resp = self.app.get('/users/notreal')
self.assertEquals(404, raw_resp.status_code)
def test_fail_deleting_user_that_does_not_exist(self):
headers = [('foobar', self.user_service.config['VALID_API_KEY'])]
raw_resp = self.app.delete('/users/notreal', headers = headers)
self.assertEquals(404, raw_resp.status_code)
def test_fail_deleting_user_without_api_key(self):
raw_resp = self.app.delete('/users/1')
resp = json.loads(raw_resp.data)
self.assertEquals(401, raw_resp.status_code)
self.assertEquals('Missing or invalid foobar API key header', resp['message'])
def test_fail_deleting_user_without_correct_api_key(self):
headers = [('foobar', 'bad_api_key_value')]
raw_resp = self.app.delete('/users/1', headers = headers)
resp = json.loads(raw_resp.data)
self.assertEquals(401, raw_resp.status_code)
self.assertEquals('Missing or invalid foobar API key header', resp['message'])
if __name__ == '__main__':
unittest.main() |
# increasing paths in an array
def f1_3(array): # O(N^2)
if len(array) <= 1:
return
ans = []
start_idx, idx = 0, 1
prenum = array[start_idx]
while idx < len(array):
if array[idx] >= prenum:
for i in range(start_idx, idx):
ans.append(array[i:idx + 1])
else:
start_idx = idx
prenum = array[idx]
idx += 1
return ans
def f1_2(array): # O(N^2) search from start to end
if len(array) <= 1:
return
ans = [] # a list of increasing arrays
for start_idx in range(len(array) - 1):
prenum = array[start_idx]
idx = start_idx + 1
subarray = [prenum]
while idx < len(array) and array[idx] >= prenum:
subarray.append(array[idx])
ans.append(subarray[:])
prenum = array[idx]
idx += 1
return ans
def f1(array): # O(N^2) search from end to start
if len(array) <= 1:
return
ans = [] # list of increasing arrays
for end_idx in range(1, len(array)):
last_num = array[end_idx]
subarray = [last_num]
idx = end_idx - 1
while idx >= 0:
num = array[idx]
if num <= last_num:
subarray.append(num)
ans.append(list(reversed(subarray)))
idx -= 1
last_num = num
else:
break
return ans
array = [1, 3, 4, 2, 6]
print(f1(array))
print(f1_2(array))
print(f1_3(array))
# increasing paths in a tree
class Node(object):
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
def f2(root):
if not root or (not root.left and not root.right):
return
ans = []
traverse(root, ans)
return ans
def traverse(root, ans): # O(N)
valid_path(root, ans, [root.val])
left, right = root.left, root.right
if left: traverse(left, ans)
if right: traverse(right, ans)
def valid_path(root, ans, path): # O(logN ^2)
left, right = root.left, root.right
if left and left.val >= root.val:
ans.append(path + [left.val])
valid_path(left, ans, path + [left.val])
if right and right.val >= root.val:
ans.append(path + [right.val])
valid_path(right, ans, path + [right.val])
def f2_2(root):
if not root or (not root.left and not root.right):
return
ans = []
get_paths(root, [root.val], ans)
return ans
def get_paths(root, path, ans):
left, right = root.left, root.right
if left:
if left.val >= path[-1]:
valid_path_reversed = [left.val]
for val in reversed(path):
valid_path_reversed.append(val)
ans.append(list(reversed(valid_path_reversed)))
get_paths(left, path + [left.val], ans)
else:
get_paths(left, [left.val], ans)
if right and right.val >= path[-1]:
if right.val >= path[-1]:
valid_path_reversed = [right.val]
for val in reversed(path):
valid_path_reversed.append(val)
ans.append(list(reversed(valid_path_reversed)))
get_paths(right, path + [right.val], ans)
else:
get_paths(right, [right.val], ans)
node_4 = Node(4)
node_5 = Node(5)
node_9 = Node(9)
node_3 = Node(3)
node_6 = Node(6)
node_8 = Node(8)
node_10 = Node(10)
node_4.left = node_5
node_4.right = node_9
node_5.left = node_3
node_5.right = node_6
node_9.left = node_8
node_9.right = node_10
print(f2(node_4))
print(f2_2(node_4)) |
##
# Copyright (c) 2006-2018 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
PyKerberos Function Description.
"""
class KrbError(Exception):
pass
class BasicAuthError(KrbError):
pass
class GSSError(KrbError):
pass
def checkPassword(user, pswd, service, default_realm):
"""
This function provides a simple way to verify that a user name and password
match those normally used for Kerberos authentication.
It does this by checking that the supplied user name and password can be
used to get a ticket for the supplied service.
If the user name does not contain a realm, then the default realm supplied
is used.
For this to work properly the Kerberos must be configured properly on this
machine.
That will likely mean ensuring that the edu.mit.Kerberos preference file
has the correct realms and KDCs listed.
IMPORTANT: This method is vulnerable to KDC spoofing attacks and it should
only used for testing. Do not use this in any production system - your
security could be compromised if you do.
@param user: A string containing the Kerberos user name.
A realm may be included by appending an C{"@"} followed by the realm
string to the actual user id.
If no realm is supplied, then the realm set in the default_realm
argument will be used.
@param pswd: A string containing the password for the user.
@param service: A string containing the Kerberos service to check access
for.
This will be of the form C{"sss/xx.yy.zz"}, where C{"sss"} is the
service identifier (e.g., C{"http"}, C{"krbtgt"}), and C{"xx.yy.zz"} is
the hostname of the server.
@param default_realm: A string containing the default realm to use if one
is not supplied in the user argument.
Note that Kerberos realms are normally all uppercase (e.g.,
C{"EXAMPLE.COM"}).
@return: True if authentication succeeds, false otherwise.
"""
def changePassword(user, oldpswd, newpswd):
"""
This function allows to change the user password on the KDC.
@param user: A string containing the Kerberos user name.
A realm may be included by appending a C{"@"} followed by the realm
string to the actual user id.
If no realm is supplied, then the realm set in the default_realm
argument will be used.
@param oldpswd: A string containing the old (current) password for the
user.
@param newpswd: A string containing the new password for the user.
@return: True if password changing succeeds, false otherwise.
"""
def getServerPrincipalDetails(service, hostname):
"""
This function returns the service principal for the server given a service
type and hostname.
Details are looked up via the C{/etc/keytab} file.
@param service: A string containing the Kerberos service type for the
server.
@param hostname: A string containing the hostname of the server.
@return: A string containing the service principal.
"""
"""
GSSAPI Function Result Codes:
-1 : Error
0 : GSSAPI step continuation (only returned by 'Step' function)
1 : GSSAPI step complete, or function return OK
"""
# Some useful result codes
AUTH_GSS_CONTINUE = 0
AUTH_GSS_COMPLETE = 1
# Some useful gss flags
GSS_C_DELEG_FLAG = 1
GSS_C_MUTUAL_FLAG = 2
GSS_C_REPLAY_FLAG = 4
GSS_C_SEQUENCE_FLAG = 8
GSS_C_CONF_FLAG = 16
GSS_C_INTEG_FLAG = 32
GSS_C_ANON_FLAG = 64
GSS_C_PROT_READY_FLAG = 128
GSS_C_TRANS_FLAG = 256
def authGSSClientInit(service, **kwargs):
"""
Initializes a context for GSSAPI client-side authentication with the given
service principal.
L{authGSSClientClean} must be called after this function returns an OK
result to dispose of the context once all GSSAPI operations are complete.
@param service: A string containing the service principal in the form
C{"type@fqdn"}.
@param principal: Optional string containing the client principal in the
form C{"user@realm"}.
@param gssflags: Optional integer used to set GSS flags.
(e.g. C{GSS_C_DELEG_FLAG|GSS_C_MUTUAL_FLAG|GSS_C_SEQUENCE_FLAG} will
allow for forwarding credentials to the remote host)
@param delegated: Optional server context containing delegated credentials
@param mech_oid: Optional GGS mech OID
@return: A tuple of (result, context) where result is the result code (see
above) and context is an opaque value that will need to be passed to
subsequent functions.
"""
def authGSSClientClean(context):
"""
Destroys the context for GSSAPI client-side authentication. This function
is provided for compatibility with earlier versions of PyKerberos but does
nothing. The context object destroys itself when it is reclaimed.
@param context: The context object returned from L{authGSSClientInit}.
@return: A result code (see above).
"""
def authGSSClientInquireCred(context):
"""
Get the current user name, if any, without a client-side GSSAPI step.
If the principal has already been authenticated via completed client-side
GSSAPI steps then the user name of the authenticated principal is kept. The
user name will be available via authGSSClientUserName.
@param context: The context object returned from L{authGSSClientInit}.
@return: A result code (see above).
"""
"""
Address Types for Channel Bindings
https://docs.oracle.com/cd/E19455-01/806-3814/6jcugr7dp/index.html#reference-9
"""
GSS_C_AF_UNSPEC = 0
GSS_C_AF_LOCAL = 1
GSS_C_AF_INET = 2
GSS_C_AF_IMPLINK = 3
GSS_C_AF_PUP = 4
GSS_C_AF_CHAOS = 5
GSS_C_AF_NS = 6
GSS_C_AF_NBS = 7
GSS_C_AF_ECMA = 8
GSS_C_AF_DATAKIT = 9
GSS_C_AF_CCITT = 10
GSS_C_AF_SNA = 11
GSS_C_AF_DECnet = 12
GSS_C_AF_DLI = 13
GSS_C_AF_LAT = 14
GSS_C_AF_HYLINK = 15
GSS_C_AF_APPLETALK = 16
GSS_C_AF_BSC = 17
GSS_C_AF_DSS = 18
GSS_C_AF_OSI = 19
GSS_C_AF_X25 = 21
GSS_C_AF_NULLADDR = 255
def channelBindings(**kwargs):
"""
Builds a gss_channel_bindings_struct which can be used to pass onto
L{authGSSClientStep} to bind onto the auth. Details on Channel Bindings
can be foud at https://tools.ietf.org/html/rfc5929. More details on the
struct can be found at
https://docs.oracle.com/cd/E19455-01/806-3814/overview-52/index.html
@param initiator_addrtype: Optional integer used to set the
initiator_addrtype, defaults to GSS_C_AF_UNSPEC if not set
@param initiator_address: Optional byte string containing the
initiator_address
@param acceptor_addrtype: Optional integer used to set the
acceptor_addrtype, defaults to GSS_C_AF_UNSPEC if not set
@param acceptor_address: Optional byte string containing the
acceptor_address
@param application_data: Optional byte string containing the
application_data. An example would be 'tls-server-end-point:{cert-hash}'
where {cert-hash} is the hash of the server's certificate
@return: A tuple of (result, gss_channel_bindings_struct) where result is
the result code and gss_channel_bindings_struct is the channel bindings
structure that can be passed onto L{authGSSClientStep}
"""
def authGSSClientStep(context, challenge, **kwargs):
"""
Processes a single GSSAPI client-side step using the supplied server data.
@param context: The context object returned from L{authGSSClientInit}.
@param challenge: A string containing the base64-encoded server data (which
may be empty for the first step).
@param channel_bindings: Optional channel bindings to bind onto the auth
request. This struct can be built using :{channelBindings}
and if not specified it will pass along GSS_C_NO_CHANNEL_BINDINGS as
a default.
@return: A result code (see above).
"""
def authGSSClientResponse(context):
"""
Get the client response from the last successful GSSAPI client-side step.
@param context: The context object returned from L{authGSSClientInit}.
@return: A string containing the base64-encoded client data to be sent to
the server.
"""
def authGSSClientResponseConf(context):
"""
Determine whether confidentiality was enabled in the previously unwrapped
buffer.
@param context: The context object returned from L{authGSSClientInit}.
@return: C{1} if confidentiality was enabled in the previously unwrapped
buffer, C{0} otherwise.
"""
def authGSSClientUserName(context):
"""
Get the user name of the principal authenticated via the now complete
GSSAPI client-side operations, or the current user name obtained via
authGSSClientInquireCred. This method must only be called after
authGSSClientStep or authGSSClientInquireCred return a complete response
code.
@param context: The context object returned from L{authGSSClientInit}.
@return: A string containing the user name.
"""
def authGSSClientUnwrap(context, challenge):
"""
Perform the client side GSSAPI unwrap step.
@param challenge: A string containing the base64-encoded server data.
@return: A result code (see above)
"""
def authGSSClientWrap(context, data, user=None, protect=0):
"""
Perform the client side GSSAPI wrap step.
@param data: The result of the L{authGSSClientResponse} after the
L{authGSSClientUnwrap}.
@param user: The user to authorize.
@param protect: If C{0}, then just provide integrity protection.
If C{1}, then provide confidentiality as well.
@return: A result code (see above)
"""
def authGSSServerInit(service):
"""
Initializes a context for GSSAPI server-side authentication with the given
service principal.
authGSSServerClean must be called after this function returns an OK result
to dispose of the context once all GSSAPI operations are complete.
@param service: A string containing the service principal in the form
C{"type@fqdn"}. To initialize the context for the purpose of accepting
delegated credentials, pass the literal string C{"DELEGATE"}.
@return: A tuple of (result, context) where result is the result code (see
above) and context is an opaque value that will need to be passed to
subsequent functions.
"""
def authGSSServerClean(context):
"""
Destroys the context for GSSAPI server-side authentication. This function
is provided for compatibility with earlier versions of PyKerberos but does
nothing. The context object destroys itself when it is reclaimed.
@param context: The context object returned from L{authGSSClientInit}.
@return: A result code (see above).
"""
def authGSSServerStep(context, challenge):
"""
Processes a single GSSAPI server-side step using the supplied client data.
@param context: The context object returned from L{authGSSClientInit}.
@param challenge: A string containing the base64-encoded client data.
@return: A result code (see above).
"""
def authGSSServerResponse(context):
"""
Get the server response from the last successful GSSAPI server-side step.
@param context: The context object returned from L{authGSSClientInit}.
@return: A string containing the base64-encoded server data to be sent to
the client.
"""
def authGSSServerHasDelegated(context):
"""
Checks whether a server context has delegated credentials.
@param context: The context object returned from L{authGSSClientInit}.
@return: A bool saying whether delegated credentials are available.
"""
def authGSSServerUserName(context):
"""
Get the user name of the principal trying to authenticate to the server.
This method must only be called after L{authGSSServerStep} returns a
complete or continue response code.
@param context: The context object returned from L{authGSSClientInit}.
@return: A string containing the user name.
"""
def authGSSServerTargetName(context):
"""
Get the target name if the server did not supply its own credentials.
This method must only be called after L{authGSSServerStep} returns a
complete or continue response code.
@param context: The context object returned from L{authGSSClientInit}.
@return: A string containing the target name.
"""
def authGSSServerStoreDelegate(context):
"""
Save the ticket sent to the server in the file C{/tmp/krb5_pyserv_XXXXXX}.
This method must only be called after L{authGSSServerStep} returns a
complete or continue response code.
@param context: The context object returned from L{authGSSClientInit}.
@return: A result code (see above).
"""
def authGSSServerCacheName(context):
"""
Get the name of the credential cache created with
L{authGSSServerStoreDelegate}.
This method must only be called after L{authGSSServerStoreDelegate}.
@param context: The context object returned from L{authGSSClientInit}.
@return: A string containing the cache name.
"""
|
#!/usr/bin/env python2
"""Forest training."""
# pylint: disable=wrong-import-order, redefined-outer-name, line-too-long, invalid-name
import os
import sys
import os.path as path
import h5py
import numpy as np
import sys
import logging
import click
from up_tools.model import landmarks_91
import joblib
import pyximport; pyximport.install() # pylint: disable=multiple-statements
from conversions import ( # pylint: disable=import-error
axis_angle_to_matrix)
from clustertools.config import available_cpu_count
from clustertools.log import LOGFORMAT
sys.path.insert(0, path.join(path.dirname(__file__),
'..'))
from config import DIRECT3D_DATA_FP
LOGGER = logging.getLogger(__name__)
OUT_DIR = path.join(path.dirname(__file__),
'..', 'models', '2dto3d',
'separate_regressors')
if not path.exists(OUT_DIR):
os.makedirs(OUT_DIR)
# Inputs:
# * 2x91 absolute x, y coordinates
# Prediction targets:
# * betas: 10
# * pose: 72
# * trans: 3
# * rt: 3
# * t: 3
# * f: 1
torso_ids = [landmarks_91.rshoulder,
landmarks_91.rshoulder_back,
landmarks_91.rshoulder_front,
landmarks_91.rshoulder_back,
landmarks_91.lshoulder,
landmarks_91.lshoulder_back,
landmarks_91.lshoulder_front,
landmarks_91.lshoulder_top,
landmarks_91.shoulderblade_center,
landmarks_91.solar_plexus,
landmarks_91.lpapilla,
landmarks_91.rpapilla,
landmarks_91.belly_button,
landmarks_91.rwaist,
landmarks_91.lwaist,
landmarks_91.waist_back,
landmarks_91.lhip,
landmarks_91.lhip_back,
landmarks_91.lhip_front,
landmarks_91.lhip_outer,
landmarks_91.rhip,
landmarks_91.rhip_back,
landmarks_91.rhip_front,
landmarks_91.rhip_outer]
head_ids = [landmarks_91.head_top,
landmarks_91.head_back,
landmarks_91.nose,
landmarks_91.lear,
landmarks_91.rear]
larm_ids = [landmarks_91.luarm_inner,
landmarks_91.luarm_outer,
landmarks_91.lelbow,
landmarks_91.lelbow_bottom,
landmarks_91.lelbow_inner,
landmarks_91.lelbow_outer,
landmarks_91.lelbow_top,
landmarks_91.llarm_lower,
landmarks_91.llarm_upper,
landmarks_91.lwrist]
rarm_ids = [landmarks_91.ruarm_inner,
landmarks_91.ruarm_outer,
landmarks_91.relbow,
landmarks_91.relbow_bottom,
landmarks_91.relbow_inner,
landmarks_91.relbow_outer,
landmarks_91.relbow_top,
landmarks_91.rlarm_lower,
landmarks_91.rlarm_upper,
landmarks_91.rwrist]
lleg_ids = [landmarks_91.luleg_back,
landmarks_91.luleg_front,
landmarks_91.luleg_outer,
landmarks_91.luleg_inner,
landmarks_91.lknee,
landmarks_91.llleg_back,
landmarks_91.llleg_front,
landmarks_91.llleg_outer,
landmarks_91.llleg_inner,
landmarks_91.lankle,
landmarks_91.lheel,
landmarks_91.lankle_inner,
landmarks_91.lankle_outer,
landmarks_91.lbigtoe]
rleg_ids = [landmarks_91.ruleg_back,
landmarks_91.ruleg_front,
landmarks_91.ruleg_outer,
landmarks_91.ruleg_inner,
landmarks_91.rknee,
landmarks_91.rlleg_back,
landmarks_91.rlleg_front,
landmarks_91.rlleg_outer,
landmarks_91.rlleg_inner,
landmarks_91.rankle,
landmarks_91.rheel,
landmarks_91.rankle_inner,
landmarks_91.rankle_outer,
landmarks_91.rbigtoe]
def create_featseltuple(ids):
"""Add the flat y coordinate to an index list."""
newlist = []
for part_id in ids:
newlist.extend([part_id, part_id + 91])
return tuple(sorted(newlist))
lmset_to_use = {
(0, 10): torso_ids, # shape
(10, 13): torso_ids, # root joint
(13, 16): torso_ids + lleg_ids, # luleg
(16, 19): torso_ids + rleg_ids, # ruleg
(19, 22): torso_ids, # spine
(22, 25): lleg_ids, # llleg
(25, 28): rleg_ids, # rlleg
(28, 31): torso_ids, # spine1
(31, 34): lleg_ids, # lfoot
(34, 37): rleg_ids, # rfoot
(37, 40): torso_ids, # spine2
(40, 43): lleg_ids, # ltoes
(43, 46): rleg_ids, # rtoes
(46, 49): torso_ids + head_ids, # neck
(49, 52): torso_ids + larm_ids, # lshoulder
(52, 55): torso_ids + rarm_ids, # rshoulder
(55, 58): torso_ids + head_ids, # head
(58, 61): torso_ids + larm_ids, # luarm
(61, 64): torso_ids + rarm_ids, # ruarm
(64, 67): larm_ids, # llarm
(67, 70): rarm_ids, # rlarm
(70, 73): larm_ids, # lhand
(73, 76): rarm_ids, # rhand
(76, 79): larm_ids, # lfingers
(79, 82): rarm_ids, # rfingers
(82, 85): torso_ids # depth
}
def normalize_axis_angle(anglevec):
"""Normalize angle periodicity."""
assert len(anglevec) % 3 == 0
for startpos in range(0, len(anglevec), 3):
rep = anglevec[startpos:startpos + 3]
angle = np.linalg.norm(rep)
angle_norm = np.fmod(angle, np.pi)
anglevec[startpos:startpos + 3] = rep / angle * angle_norm
def preprocess(dta_arr):
"""Make the coordinates relative to mean position, apply the modulo operator to pose."""
for dta_idx in range(dta_arr.shape[0]):
pose = dta_arr[dta_idx, :2*91].reshape((2, 91))
mean = np.mean(pose, axis=1)
dta_arr[dta_idx, :2*91] = (pose.T - mean + 513. / 2.).T.flat
normalize_axis_angle(dta_arr[dta_idx, 2*91+10:2*91+10+72])
def get_data(prefix, part_rangestart, finalize, debug_run): # pylint: disable=too-many-branches
"""Get the data."""
rangestart = part_rangestart
rangeend = 10 if part_rangestart == 0 else part_rangestart + 3
train_f = h5py.File(path.join(
DIRECT3D_DATA_FP,
'91', '500', prefix, 'train.hdf5'))
train_dset = train_f['2dto3d']
if debug_run:
train_dta = np.array(train_dset[:10000])
else:
train_dta = np.array(train_dset)
preprocess(train_dta)
#add_noise(train_dta)
val_f = h5py.File(path.join(
DIRECT3D_DATA_FP,
'91', '500', prefix, 'val.hdf5'))
val_dset = val_f['2dto3d']
if debug_run:
val_dta = np.array(val_dset[:10])
else:
val_dta = np.array(val_dset)
preprocess(val_dta)
if finalize:
train_dta = np.vstack((train_dta, val_dta))
val_f = h5py.File(path.join(
DIRECT3D_DATA_FP,
'91', '500', prefix, 'test.hdf5'))
val_dset = val_f['2dto3d']
if debug_run:
val_dta = np.array(val_dset[:10])
else:
val_dta = np.array(val_dset)
preprocess(val_dta)
train_annot = train_dta[:, 182+rangestart:182+rangeend]
val_annot = val_dta[:, 182+rangestart:182+rangeend]
rel_ids = create_featseltuple(lmset_to_use[(rangestart, rangeend)])
train_dta = train_dta[:, rel_ids]
val_dta = val_dta[:, rel_ids]
if rangestart > 0 and rangestart < 82:
train_annot = axis_angle_to_matrix(train_annot)
val_annot = axis_angle_to_matrix(val_annot)
return train_dta, train_annot, val_dta, val_annot
def sqdiff(rnge, val_dta, val_results, addoffs=0):
"""Error measure robust to angle orientations and mirroring."""
orig_ids = tuple(np.array(rnge) + 182 + addoffs)
val_ids = tuple(np.array(rnge))
assert len(orig_ids) == len(val_ids)
assert len(orig_ids) % 3 == 0
diffs = []
for sample_idx in range(val_dta.shape[0]):
for rot_idx in range(0, len(orig_ids), 3):
plaindiff = np.linalg.norm(val_dta[sample_idx, orig_ids[rot_idx:rot_idx+3]] -
val_results[sample_idx, val_ids[rot_idx:rot_idx+3]])
mirrdiff = np.linalg.norm(val_dta[sample_idx, orig_ids[rot_idx:rot_idx+3]] +
val_results[sample_idx, val_ids[rot_idx:rot_idx+3]])
diffs.append(min(plaindiff, mirrdiff))
return np.mean(diffs)
@click.command()
@click.argument('train_prefix', type=click.STRING)
@click.argument('part_rangestart', type=click.INT)
@click.option('--finalize', type=click.BOOL, default=False, is_flag=True,
help='Train on train+val, test on test.')
@click.option('--debug_run', type=click.BOOL, default=False, is_flag=True,
help='Use only a small fraction of data for testing.')
def cli(train_prefix, part_rangestart, # pylint: disable=too-many-branches, too-many-locals, too-many-statements, too-many-arguments
finalize=False, debug_run=False):
"""Run a RotatingTree experiment."""
rangestart = part_rangestart
pref = 'forest'
pref += '_' + str(part_rangestart)
if finalize:
pref += '_final'
if debug_run:
pref += '_debug'
out_fp = path.join(OUT_DIR, pref + '.z')
LOGGER.info("Running for configuration `%s`.", out_fp)
LOGGER.info("Loading data...")
train_dta, train_annot, val_dta, val_annot = get_data( # pylint: disable=unused-variable
train_prefix, part_rangestart, finalize, debug_run)
# Checks.
if rangestart > 0 and rangestart < 82:
# Rotation matrices.
assert train_annot.max() <= 1.
assert train_annot.min() >= -1.
assert val_annot.max() <= 1.
assert val_annot.min() >= -1.
import sklearn.ensemble
rf = sklearn.ensemble.RandomForestRegressor(n_jobs=available_cpu_count())
LOGGER.info("Fitting...")
rf.fit(train_dta, train_annot)
LOGGER.info("Writing results...")
joblib.dump(rf, out_fp, compress=True)
LOGGER.info("Done.")
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, format=LOGFORMAT)
cli() # pylint: disable=no-value-for-parameter
|
<reponame>soran-ghaderi/Chromusic_search_engine<filename>tase/telegram/telegram_client.py
from enum import Enum
from typing import Optional, Coroutine, Union, List, Iterable
import pyrogram
from pyrogram.handlers.handler import Handler
from tase.my_logger import logger
from tase.telegram import handlers
from .methods.search_messages import search_messages
from ..configs import ClientConfig, ClientTypes
class UserClientRoles(Enum):
UNKNOWN = 0
INDEXER = 1
@staticmethod
def _parse(role: str):
for item in UserClientRoles:
if item.name == role:
return item
else:
return UserClientRoles.UNKNOWN
class BotClientRoles(Enum):
UNKNOWN = 0
MAIN = 1
@staticmethod
def _parse(role: str):
for item in BotClientRoles:
if item.name == role:
return item
else:
return BotClientRoles.UNKNOWN
class TelegramClient:
_client: 'pyrogram.Client' = None
name: 'str' = None
api_id: 'int' = None
api_hash: 'str' = None
workdir: 'str' = None
telegram_id: int = None
client_type: 'ClientTypes'
def init_client(self):
pass
def start(self):
if self._client is None:
self.init_client()
logger.info("#" * 50)
logger.info(self.name)
logger.info("#" * 50)
self._client.start()
def stop(self) -> Coroutine:
return self._client.stop()
def is_connected(self) -> bool:
return self._client.is_connected
def get_me(self) -> Optional['pyrogram.types.User']:
return self._client.get_me()
def get_chat(self, chat_id: Union[int, str]) -> Union["pyrogram.types.Chat", "pyrogram.types.ChatPreview"]:
return self._client.get_chat(chat_id=chat_id)
def get_session_name(self) -> str:
return self._client.name
def add_handler(self, handler: "Handler", group: int = 0):
return self._client.add_handler(handler, group)
def add_handlers(self, handlers_list: List['handlers.BaseHandler']):
for handler in handlers_list:
for h in handler.init_handlers():
self.add_handler(
h.cls(
callback=h.callback,
filters=h.filters,
) if h.has_filter else h.cls(
callback=h.callback,
),
h.group,
)
def iter_audios(
self,
chat_id: Union['str', 'int'],
query: str = "",
offset: int = 0,
offset_id: int = 0,
only_newer_messages: bool = True,
):
for message in search_messages(
client=self._client,
chat_id=chat_id,
filter='audio',
query=query,
offset=offset,
offset_id=offset_id,
only_newer_messages=only_newer_messages,
):
yield message
@staticmethod
def _parse(
client_config: 'ClientConfig',
workdir: str
) -> Optional['TelegramClient']:
if client_config.type == ClientTypes.USER:
return UserTelegramClient(client_config, workdir)
elif client_config.type == ClientTypes.BOT:
return BotTelegramClient(client_config, workdir)
else:
# todo: raise error (unknown client type)
logger.error("Unknown TelegramClient Type")
def get_messages(
self,
chat_id: Union[int, str],
message_ids: Union[int, Iterable[int]] = None
) -> Union["pyrogram.types.Message", List["pyrogram.types.Message"]]:
messages = self._client.get_messages(chat_id=chat_id, message_ids=message_ids)
if messages and not isinstance(messages, list):
messages = [messages]
return messages
class UserTelegramClient(TelegramClient):
role: 'UserClientRoles'
def __init__(self, client_config: 'ClientConfig', workdir: str):
self.client_type = ClientTypes.USER
self.workdir = workdir
self.name = client_config.name
self.api_id = client_config.api_id
self.api_hash = client_config.api_hash
self.role = UserClientRoles._parse(client_config.role) # todo: check for unknown roles
def init_client(self):
self._client = pyrogram.Client(
name=self.name,
api_id=self.api_id,
api_hash=self.api_hash,
workdir=self.workdir,
)
class BotTelegramClient(TelegramClient):
role: 'BotClientRoles'
token: 'str'
def __init__(self, client_config: 'ClientConfig', workdir: str):
self.client_type = ClientTypes.BOT
self.workdir = workdir
self.name = client_config.name
self.api_id = client_config.api_id
self.api_hash = client_config.api_hash
self.token = client_config.bot_token
self.role = BotClientRoles._parse(client_config.role) # todo: check for unknown roles
def init_client(self):
self._client = pyrogram.Client(
name=self.name,
api_id=self.api_id,
api_hash=self.api_hash,
bot_token=self.token,
workdir=self.workdir,
)
|
# -*- mode: python; coding: utf-8 -*-
import os
import datetime
import logging
import string
from drydrop_handler import DRY_ROOT
from drydrop.app.core.controller import BaseController
from drydrop.lib.json import json_parse
from drydrop.app.core.events import log_event
class HookController(BaseController):
# see http://github.com/guides/post-receive-hooks
def github(self):
payload = self.params.get('payload', None)
logging.info("Received github hook: %s", payload)
if not payload:
return
data = json_parse(payload)
paths = []
names = []
info = ""
for commit in data['commits']:
author = commit['author']['email']
try:
info += "<a target=\"_blank\" href=\"%s\">%s</a>: %s<br/>" % (commit['url'], commit['id'][:6], commit['message'].split("\n")[0])
except:
info += "?<br/>"
try:
names.index(author)
except:
names.append(author)
try:
paths.extend(commit['added'])
except:
pass
try:
paths.extend(commit['removed'])
except:
pass
try:
paths.extend(commit['modified'])
except:
pass
before_url = "%s/commit/%s" % (data['repository']['url'], data['before'])
after_url = "%s/commit/%s" % (data['repository']['url'], data['after'])
before = "?"
try:
before = data['before'][:6]
except:
pass
after = "?"
try:
after = data['after'][:6]
except:
pass
plural = ''
if len(paths)!=1:
plural = 's'
authors = string.join(names, ',')
log_event("Received github hook for commits <a target=\"_blank\" href=\"%s\">%s</a>..<a target=\"_blank\" href=\"%s\">%s</a> (%d change%s)" % (before_url, before, after_url, after, len(paths), plural), 0, authors, info)
repo_url = data['repository']['url'] # like http://github.com/darwin/drydrop
branch = data['ref'].split('/').pop() # takes 'master' from 'refs/heads/master'
root_url = "%s/raw/%s" % (repo_url, branch) # creates http://github.com/darwin/drydrop/raw/master
if not root_url.endswith('/'):
root_url = root_url + '/'
source_url = self.handler.settings.source
if not source_url.endswith('/'):
source_url = source_url + '/'
# now we have:
# http://github.com/darwin/drydrop/raw/master/ in root_url
# http://github.com/darwin/drydrop/raw/master/tutorial/ in source_url
# safety check
if not source_url.startswith(root_url):
log_event("<a target=\"_blank\" href=\"%s\"><code>%s</code></a><br/>is not affected by incoming changeset for<br/><a target=\"_blank\" href=\"%s\"><code>%s</code></a>" % (source_url, source_url, root_url, root_url), 0, authors)
logging.info("Source url '%s' is not affected by incoming changeset for '%s'", source_url, root_url)
return
vfs = self.handler.vfs
for path in paths:
prefix = source_url[len(root_url):] # prefix is 'tutorial/'
if not path.startswith(prefix):
logging.warning("Unexpected: path '%s' should begin with '%s'. Skipping file.", path, prefix)
else:
# path is something like tutorial/start.html
path = path[len(prefix):] # stripped to 'start.html'
logging.info("Flushing resource %s", path)
vfs.flush_resource(path) |
"""
Dataset from Pandaset (Hesai)
"""
import pickle
import os
try:
import pandas as pd
import pandaset as ps
except:
pass
import numpy as np
from ..dataset import DatasetTemplate
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
import torch
def pose_dict_to_numpy(pose):
"""
Conert pandaset pose dict to a numpy vector in order to pass it through the network
"""
pose_np = [pose["position"]["x"],
pose["position"]["y"],
pose["position"]["z"],
pose["heading"]["w"],
pose["heading"]["x"],
pose["heading"]["y"],
pose["heading"]["z"]]
return pose_np
def pose_numpy_to_dict(pose):
"""
Conert pandaset pose dict to a numpy vector in order to pass it through the network
"""
pose_dict = {'position':
{'x': pose[0],
'y': pose[1],
'z': pose[2]},
'heading':
{'w': pose[3],
'x': pose[4],
'y': pose[5],
'z': pose[6]}}
return pose_dict
class PandasetDataset(DatasetTemplate):
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
if root_path is None:
root_path = self.dataset_cfg.DATA_PATH
self.dataset = ps.DataSet(os.path.join(root_path, 'dataset'))
self.split = self.dataset_cfg.DATA_SPLIT[self.mode]
self.pandaset_infos = []
self.include_pandaset_infos(self.mode)
def include_pandaset_infos(self, mode):
if self.logger is not None:
self.logger.info('Loading PandaSet dataset')
pandaset_infos = []
for info_path in self.dataset_cfg.INFO_PATH[mode]:
info_path = os.path.join(self.root_path, info_path)
if not os.path.exists(info_path):
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
pandaset_infos.extend(infos)
self.pandaset_infos.extend(pandaset_infos)
if self.logger is not None:
self.logger.info('Total samples for PandaSet dataset ({}): {}'.format(self.mode, len(pandaset_infos)))
def set_split(self, split):
self.sequences = self.dataset_cfg.SEQUENCES[split]
self.split = split
def __len__(self):
return len(self.pandaset_infos)
def __getitem__(self, index):
"""
To support a custom dataset, implement this function to load the raw data (and labels), then transform them to
the unified normative coordinate (x pointing forward, z pointing upwards) and call the function self.prepare_data() to process the data and send them
to the model.
Args:
index:
Returns:
"""
info = self.pandaset_infos[index]
seq_idx = info['sequence']
pose = self._get_pose(info)
points = self._get_lidar_points(info, pose)
boxes, labels, zrot_world_to_ego = self._get_annotations(info, pose)
pose_np = pose_dict_to_numpy(pose)
input_dict = {'points': points,
'gt_boxes': boxes,
'gt_names': labels,
'sequence': int(seq_idx),
'frame_idx': info['frame_idx'],
'zrot_world_to_ego': zrot_world_to_ego,
'pose': pose_dict_to_numpy(pose)
}
# seq_idx is converted to int because strings can't be passed to
# the gpu in pytorch
# zrot_world_to_ego is propagated in order to be able to transform the
# predicted yaws back to world coordinates
data_dict = self.prepare_data(data_dict=input_dict)
return data_dict
def _get_pose(self, info):
seq_idx = info['sequence']
# get pose for world to ego frame transformation
if self.dataset[seq_idx].lidar.poses is None:
self.dataset[seq_idx].lidar._load_poses()
pose = self.dataset[seq_idx].lidar.poses[info['frame_idx']]
return pose
def _get_lidar_points(self, info, pose):
"""
Get lidar in the unified normative coordinate system for a given frame
The intensity is normalized to fit [0-1] range (pandaset intensity is in [0-255] range)
"""
# get lidar points
lidar_frame = pd.read_pickle(info['lidar_path'])
# get points for the required lidar(s) only
device = self.dataset_cfg.get('LIDAR_DEVICE', 0)
if device != -1:
lidar_frame = lidar_frame[lidar_frame.d == device]
world_points = lidar_frame.to_numpy()
# There seems to be issues with the automatic deletion of pandas datasets sometimes
del lidar_frame
points_loc = world_points[:, :3]
points_int = world_points[:, 3]
# nromalize intensity
points_int = points_int / 255
ego_points = ps.geometry.lidar_points_to_ego(points_loc, pose)
# Pandaset ego coordinates are:
# - x pointing to the right
# - y pointing to the front
# - z pointing up
# Normative coordinates are:
# - x pointing foreward
# - y pointings to the left
# - z pointing to the top
# So a transformation is required to the match the normative coordinates
ego_points = ego_points[:, [1, 0, 2]] # switch x and y
ego_points[:, 1] = - ego_points[:, 1] # revert y axis
return np.append(ego_points, np.expand_dims(points_int, axis=1), axis=1).astype(np.float32)
def _get_annotations(self,info, pose):
"""
Get box informations in the unified normative coordinate system for a given frame
"""
# get boxes
cuboids = pd.read_pickle(info["cuboids_path"])
device = self.dataset_cfg.get('LIDAR_DEVICE', 0)
if device != -1:
# keep cuboids that are seen by a given device
cuboids = cuboids[cuboids["cuboids.sensor_id"] != 1 - device]
xs = cuboids['position.x'].to_numpy()
ys = cuboids['position.y'].to_numpy()
zs = cuboids['position.z'].to_numpy()
dxs = cuboids['dimensions.x'].to_numpy()
dys = cuboids['dimensions.y'].to_numpy()
dzs = cuboids['dimensions.z'].to_numpy()
yaws = cuboids['yaw'].to_numpy()
labels = cuboids['label'].to_numpy()
del cuboids # There seem to be issues with the automatic deletion of pandas datasets sometimes
labels = np.array([self.dataset_cfg.TRAINING_CATEGORIES.get(lab, lab)
for lab in labels] )
# Compute the center points coordinates in ego coordinates
centers = np.vstack([xs, ys, zs]).T
ego_centers = ps.geometry.lidar_points_to_ego(centers, pose)
# Compute the yaw in ego coordinates
# The following implementation supposes that the pitch of the car is
# negligible compared to its yaw, in order to be able to express the
# bbox coordinates in the ego coordinate system with an {axis aligned
# box + yaw} only representation
yaxis_points_from_pose = ps.geometry.lidar_points_to_ego(np.array([[0, 0, 0], [0, 1., 0]]), pose)
yaxis_from_pose = yaxis_points_from_pose[1, :] - yaxis_points_from_pose[0, :]
if yaxis_from_pose[-1] >= 10**-1:
if self.logger is not None:
self.logger.warning("The car's pitch is supposed to be negligible " +
"sin(pitch) is >= 10**-1 ({})".format(yaxis_from_pose[-1]))
# rotation angle in rads of the y axis around thz z axis
zrot_world_to_ego = np.arctan2(-yaxis_from_pose[0], yaxis_from_pose[1])
ego_yaws = yaws + zrot_world_to_ego
# Pandaset ego coordinates are:
# - x pointing to the right
# - y pointing to the front
# - z pointing up
# Normative coordinates are:
# - x pointing foreward
# - y pointings to the left
# - z pointing to the top
# So a transformation is required to the match the normative coordinates
ego_xs = ego_centers[:, 1]
ego_ys = -ego_centers[:, 0]
ego_zs = ego_centers[:, 2]
ego_dxs = dys
ego_dys = dxs # stays >= 0
ego_dzs = dzs
ego_boxes = np.vstack([ego_xs, ego_ys, ego_zs, ego_dxs, ego_dys, ego_dzs, ego_yaws]).T
return ego_boxes.astype(np.float32), labels, zrot_world_to_ego
@staticmethod
def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None):
"""
To support a custom dataset, implement this function to receive the predicted results from the model, and then
transform the unified normative coordinate to your required coordinate, and optionally save them to disk.
Args:
batch_dict: dict of original data from the dataloader
pred_dicts: dict of predicted results from the model
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path: if it is not None, save the results to this path
Returns:
"""
def generate_single_sample_dataframe(batch_index, box_dict, zrot_world_to_ego, pose):
pred_boxes = box_dict["pred_boxes"].cpu().numpy()
pred_scores = box_dict["pred_scores"].cpu().numpy()
pred_labels = box_dict["pred_labels"].cpu().numpy()
zrot = zrot_world_to_ego.cpu().numpy()
pose_dict = pose_numpy_to_dict(pose.cpu().numpy())
xs = pred_boxes[:, 0]
ys = pred_boxes[:, 1]
zs = pred_boxes[:, 2]
dxs = pred_boxes[:, 3]
dys = pred_boxes[:, 4]
dzs = pred_boxes[:, 5]
yaws = pred_boxes[:, 6]
names = np.array(class_names)[pred_labels - 1] # Predicted labels start on 1
# convert from normative coordinates to pandaset ego coordinates
ego_xs = - ys
ego_ys = xs
ego_zs = zs
ego_dxs = dys
ego_dys = dxs
ego_dzs = dzs
ego_yaws = yaws
# convert from pandaset ego coordinates to world coordinates
# for the moment, an simplified estimation of the ego yaw is computed in __getitem__
# which sets ego_yaw = world_yaw + zrot_world_to_ego
world_yaws = ego_yaws - zrot
ego_centers = np.vstack([ego_xs, ego_ys, ego_zs]).T
world_centers = ps.geometry.ego_to_lidar_points(ego_centers, pose_dict)
world_xs = world_centers[:, 0]
world_ys = world_centers[:, 1]
world_zs = world_centers[:, 2]
# dx, dy, dz remain unchanged as the bbox orientation is handled by
# the yaw information
data_dict = {'position.x': world_xs,
'position.y': world_ys,
'position.z': world_zs,
'dimensions.x': ego_dxs,
'dimensions.y': ego_dys,
'dimensions.z': ego_dzs,
'yaw': world_yaws % (2 * np.pi),
'label': names,
'score': pred_scores
}
return pd.DataFrame(data_dict)
annos = []
for index, box_dict in enumerate(pred_dicts):
frame_idx = batch_dict['frame_idx'][index]
seq_idx = batch_dict['sequence'][index]
zrot = batch_dict['zrot_world_to_ego'][index]
pose = batch_dict['pose'][index]
single_pred_df = generate_single_sample_dataframe(index, box_dict, zrot, pose)
single_pred_dict = {'preds' : single_pred_df,
# 'name 'ensures testing the number of detections in a compatible format as kitti
'name' : single_pred_df['label'].tolist(),
'frame_idx': frame_idx,
'sequence': str(seq_idx).zfill(3)}
# seq_idx was converted to int in self.__getitem__` because strings
# can't be passed to the gpu in pytorch.
# To convert it back to a string, we assume that the sequence is
# provided in pandaset format with 3 digits
if output_path is not None:
frame_id = str(int(frame_idx)).zfill(2)
seq_id = str(int(seq_idx)).zfill(3)
cur_det_file = os.path.join(output_path, seq_id, 'predictions',
'cuboids', ("{}.pkl.gz".format(frame_id)))
os.makedirs(os.path.dirname(cur_det_file), exist_ok=True)
single_pred_df.to_pickle(cur_det_file)
annos.append(single_pred_dict)
return annos
def get_infos(self):
"""
Generate the dataset infos dict for each sample of the dataset.
For each sample, this dict contains:
- the sequence index
- the frame index
- the path to the lidar data
- the path to the bounding box annotations
"""
infos = []
for seq in self.sequences:
s = self.dataset[seq]
s.load_lidar()
if len(s.lidar.data) > 100:
raise ValueError("The implementation for this dataset assumes that each sequence is " +
"no longer than 100 frames. The current sequence has {}".format(len(s.lidar.data)))
info = [{'sequence': seq,
'frame_idx': ii,
'lidar_path': os.path.join(self.root_path, 'dataset', seq, 'lidar', ("{:02d}.pkl.gz".format(ii))),
'cuboids_path': os.path.join(self.root_path, 'dataset', seq,
'annotations', 'cuboids', ("{:02d}.pkl.gz".format(ii)))
} for ii in range(len(s.lidar.data))]
infos.extend(info)
del self.dataset._sequences[seq]
return infos
def create_groundtruth_database(self, info_path=None, used_classes=None, split='train'):
database_save_path = os.path.join(self.root_path,
'gt_database' if split == 'train' else 'gt_database_{}'.format(split))
db_info_save_path = os.path.join(self.root_path,
'pandaset_dbinfos_{}.pkl'.format(split))
os.makedirs(database_save_path, exist_ok=True)
all_db_infos = {}
with open(info_path, 'rb') as f:
infos = pickle.load(f)
for k in range(len(infos)):
print('gt_database sample: %d/%d' % (k + 1, len(infos)))
info = infos[k]
sample_idx = info['frame_idx']
pose = self._get_pose(info)
points = self._get_lidar_points(info, pose)
gt_boxes, names, _ = self._get_annotations(info, pose)
num_obj = gt_boxes.shape[0]
point_indices = roiaware_pool3d_utils.points_in_boxes_cpu(
torch.from_numpy(points[:, 0:3]), torch.from_numpy(gt_boxes)
).numpy() # (nboxes, npoints)
for i in range(num_obj):
tmp_name = names[i].replace("/", "").replace(" ", "")
filename = '%s_%s_%d.bin' % (sample_idx, tmp_name, i)
filepath = os.path.join(database_save_path, filename)
gt_points = points[point_indices[i] > 0]
gt_points[:, :3] -= gt_boxes[i, :3]
with open(filepath, 'wb') as f:
gt_points.tofile(f)
if (used_classes is None) or names[i] in used_classes:
db_path = os.path.relpath(filepath, self.root_path) # gt_database/xxxxx.bin
db_info = {'name': names[i], 'path': db_path, 'gt_idx': i,
'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0],
'difficulty': -1}
if names[i] in all_db_infos:
all_db_infos[names[i]].append(db_info)
else:
all_db_infos[names[i]] = [db_info]
for k, v in all_db_infos.items():
print('Database %s: %d' % (k, len(v)))
with open(db_info_save_path, 'wb') as f:
pickle.dump(all_db_infos, f)
def evaluation(self, det_annos, class_names, **kwargs):
self.logger.warning('Evaluation is not implemented for Pandaset as there is no official one. ' +
'Returning an empty evaluation result.')
ap_result_str = ''
ap_dict = {}
return ap_result_str, ap_dict
def create_pandaset_infos(dataset_cfg, class_names, data_path, save_path):
"""
Create dataset_infos files in order not to have it in a preprocessed pickle
file with the info for each sample
See PandasetDataset.get_infos for further details.
"""
dataset = PandasetDataset(dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path, training=False)
for split in ["train", "val", "test"]:
print("---------------- Start to generate {} data infos ---------------".format(split))
dataset.set_split(split)
infos = dataset.get_infos()
file_path = os.path.join(save_path, 'pandaset_infos_{}.pkl'.format(split))
with open(file_path, 'wb') as f:
pickle.dump(infos, f)
print("Pandaset info {} file is saved to {}".format(split, file_path))
print('------------Start create groundtruth database for data augmentation-----------')
dataset = PandasetDataset(dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path, training=False)
dataset.set_split("train")
dataset.create_groundtruth_database(
os.path.join(save_path, 'pandaset_infos_train.pkl'),
split="train"
)
print('---------------Data preparation Done---------------')
if __name__ == '__main__':
import sys
if sys.argv.__len__() > 1 and sys.argv[1] == 'create_pandaset_infos':
import yaml
from pathlib import Path
from easydict import EasyDict
dataset_cfg = EasyDict(yaml.safe_load(open(sys.argv[2])))
ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve()
create_pandaset_infos(
dataset_cfg=dataset_cfg,
class_names=['Car', 'Pedestrian', 'Cyclist'],
data_path=ROOT_DIR / 'data' / 'pandaset',
save_path=ROOT_DIR / 'data' / 'pandaset'
)
|
<reponame>zhupangithub/WEBERP<filename>Code/odooerp/odoo-8.0/openerp/addons/website_event_sale/controllers/main.py
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.addons.website_event.controllers.main import website_event
from openerp.addons.website_sale.controllers.main import get_pricelist, website_sale
from openerp.tools.translate import _
class website_event(website_event):
@http.route(['/event/<model("event.event"):event>/register'], type='http', auth="public", website=True)
def event_register(self, event, **post):
pricelist_id = int(get_pricelist())
values = {
'event': event.with_context(pricelist=pricelist_id),
'main_object': event.with_context(pricelist=pricelist_id),
'range': range,
}
return request.website.render("website_event.event_description_full", values)
@http.route(['/event/cart/update'], type='http', auth="public", methods=['POST'], website=True)
def cart_update(self, event_id, **post):
cr, uid, context = request.cr, request.uid, request.context
ticket_obj = request.registry.get('event.event.ticket')
sale = False
for key, value in post.items():
quantity = int(value or "0")
if not quantity:
continue
sale = True
ticket_id = key.split("-")[0] == 'ticket' and int(key.split("-")[1]) or None
ticket = ticket_obj.browse(cr, SUPERUSER_ID, ticket_id, context=context)
order = request.website.sale_get_order(force_create=1)
order.with_context(event_ticket_id=ticket.id)._cart_update(product_id=ticket.product_id.id, add_qty=quantity)
if not sale:
return request.redirect("/event/%s" % event_id)
return request.redirect("/shop/checkout")
def _add_event(self, event_name="New Event", context={}, **kwargs):
try:
dummy, res_id = request.registry.get('ir.model.data').get_object_reference(request.cr, request.uid, 'event_sale', 'product_product_event')
context['default_event_ticket_ids'] = [[0,0,{
'name': _('Subscription'),
'product_id': res_id,
'deadline' : False,
'seats_max': 1000,
'price': 0,
}]]
except ValueError:
pass
return super(website_event, self)._add_event(event_name, context, **kwargs)
class website_sale(website_sale):
@http.route(['/shop/get_unit_price'], type='json', auth="public", methods=['POST'], website=True)
def get_unit_price(self, product_ids, add_qty, use_order_pricelist=False, **kw):
cr, uid, context, pool = request.cr, request.uid, request.context, request.registry
res_ticket = {}
if 'line_id' in kw:
line = pool['sale.order.line'].browse(cr, SUPERUSER_ID, kw['line_id'])
if line.event_ticket_id:
if line.order_id.pricelist_id:
ticket = pool['event.event.ticket'].browse(cr, SUPERUSER_ID, line.event_ticket_id.id, context=dict(context, pricelist=line.order_id.pricelist_id.id))
else:
ticket = line.event_ticket_id
res_ticket = {ticket.product_id.id: ticket.price_reduce or ticket.price}
product_ids.remove(ticket.product_id.id)
res_options = super(website_sale, self).get_unit_price(product_ids, add_qty, use_order_pricelist, **kw)
return dict(res_ticket.items() + res_options.items())
|
<reponame>gocept/batou_ext<gh_stars>1-10
"""Helper for Jenkins pipeline deployments."""
import configparser
import argparse
import json
import subprocess
import sys
def git_resolve(url, version):
if len(version) == 40:
# revision.
try:
int(version, 16)
except ValueError:
pass
else:
return version
# Symbolic name?
cmd = subprocess.Popen(['git', 'ls-remote', url, version + '^{}'],
stdout=subprocess.PIPE)
stdout, stderr = cmd.communicate()
# if its not a tag, start another more generic attempt
if not stdout:
cmd = subprocess.Popen(['git', 'ls-remote', url, version],
stdout=subprocess.PIPE)
stdout, stderr = cmd.communicate()
stdout = stdout.decode('ascii')
return stdout.split('\t', 1)[0]
class VersionsUpdater:
UPDATERS = {
'git-resolve': 'update_git',
'pass': 'update_pass_value',
}
def __init__(self, versions_file, version_mapping_json):
self.version_mapping = json.loads(version_mapping_json)
self.versions_file = versions_file
self.config = configparser.SafeConfigParser()
self.config.read(self.versions_file)
def __call__(self):
for service, version in sorted(self.version_mapping.items()):
if not version:
# leave empty to keep current version
continue
self.update(service, version)
with open(self.versions_file, 'w') as f:
self.config.write(f)
def update(self, service, version):
update_mode = self.config[service].get('update', 'git-resolve')
update_mode = update_mode.split(':', 1)
mode = update_mode[0]
args = ''.join(update_mode[1:])
func = getattr(self, self.UPDATERS[mode])
func(service, version, args)
def update_git(self, service, version, extra_args):
resolved = git_resolve(self.config.get(service, 'url'), version)
if not resolved:
raise ValueError('%s: Could not resolve version %s.' %
(service, version))
log('%s: resolved version %s to: %s', service, version, resolved)
self.config.set(service, 'revision', resolved)
self.config.set(service, 'version', version)
def update_pass_value(self, service, version, extra_args):
self.config[service][extra_args] = version
def log(msg, *args):
print(msg % args)
sys.stdout.flush()
def list_components(versions_file, verbose=False):
config = configparser.SafeConfigParser()
config.read(versions_file)
components = sorted(config.sections())
if verbose:
result = []
for component in components:
c = dict(config.items(component))
c['name'] = component
result.append(c)
else:
result = components
print(json.dumps(result, sort_keys=True))
def set_versions(versions_file, version_mapping_json):
vu = VersionsUpdater(versions_file, version_mapping_json)
vu()
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
p = subparsers.add_parser(
'list-components',
help='List available components where versions can be set')
p.add_argument(
'-v',
'--verbose',
action='store_true',
help='Return all options from versions.ini, not only component names')
p.add_argument('versions_file', help='Name of "versions.ini"')
p.set_defaults(func=list_components)
p = subparsers.add_parser('set-versions', help='Update versions')
p.add_argument(
'versions_file',
help='Name of versions.ini. If exists it will be overwritten.')
p.add_argument('version_mapping_json',
help='JSON: mapping of service: version')
p.set_defaults(func=set_versions)
args = parser.parse_args()
func_args = dict(args._get_kwargs())
del func_args['func']
return args.func(**func_args)
if __name__ == '__main__':
main()
|
<gh_stars>1-10
#!/usr/bin/env python
import sys, os, time, yaml
import pprint as pp
import subprocess, math
import numpy as np
import os.path, re
from scipy import stats
from argparse import ArgumentParser
import matplotlib.pyplot as plt
np.set_printoptions(edgeitems=10, linewidth=100000)
# peak BW for mrstem (GB) = 21333.33 * 4 at time of doing these runs
peakMemBW = 85.
myAlpha = .9
# num of dofs for each case for plotting
#785152, 3143168, 12577792, 50321408
meshLabelsPlot = [r'$0.78~\cdot~10^6$',
r'$3~\cdot~10^6$',
r'$12~\cdot~10^6$',
r'$50~\cdot~10^6$']
nThreads = [2, 8, 36]
colors = {2:'#009286', 8:'#ff9e11', 36:'#cd4d84'}
fSizes = [1,4,16,48]
#=====================================================================
def computeMetricValue(lineData, currValueF, metric, stat):
if metric == "mem":
if stat == "ave": return lineData[4]
elif stat=="min": return lineData[5]
elif stat=="max": return lineData[6]
elif metric == "cpu":
if stat == "ave": return lineData[7]
elif stat=="min": return lineData[8]
elif stat=="max": return lineData[9]
elif metric == "itertime":
if stat == "ave": return lineData[10]
elif stat=="min": return lineData[11]
elif stat=="max": return lineData[12]
elif metric == "looptime":
return lineData[13]
#=====================================================================
def createDataDic(data, metric, stat):
all = {}
for nt in nThreads:
dic = {}
for i in range(data.shape[0]):
# number of threads and number of modes
thisNumThr = int(data[i][0])
thisValF = int(data[i][1])
if thisNumThr == nt and thisValF in fSizes:
value = computeMetricValue(data[i,:], thisValF, metric, stat)
if thisValF in dic: dic[thisValF].append(value)
else: dic[thisValF] = [value]
all[nt] = dic
return all
#=====================================================================
def plotBarSet(ax, xLoc, width, f, dic, myColor):
val = dic[f]
ax.bar(xLoc, val, width, alpha=myAlpha, color=myColor, edgecolor='none', zorder=5)
#=====================================================================
def plotBar(dataDic, meshLabels, nThreads, metric, stat):
# number of mesh sizes to deal with
numMeshes = len(meshLabels)
# Setting the positions and width for the bars
posArray = range(numMeshes)
pos = list(posArray)
width = 0.45
plt.rc('axes', axisbelow=True)
fig, ax = plt.subplots(figsize=(9,6))
plt.grid()
ax2 = ax.twiny()
fig.subplots_adjust(bottom=0.25)
gigi = [0.25, 6.5, 12.75, 19.]
xTicksBars, xTlabels = [], []
count=0
for k,v in dataDic.items():
for i,f in enumerate(fSizes):
#x locations for the bars
shift = width*i*3.5
xLoc = [p+shift+0.455*count+gigi[k] for k,p in enumerate(pos)]
plotBarSet(ax, xLoc, width, f, v, colors[k])
xTicksBars += [p+shift+0.475+gigi[k] for k,p in enumerate(pos)]
xTlabels += [str(f) for i in range(numMeshes)]
count+=1
for nt in nThreads:
ax.bar(100, 1, width, alpha=myAlpha, color=colors[nt],
edgecolor='none', zorder=-1, label='threads='+str(nt))
l = ax.legend(loc="upper center", ncol=5, fontsize=13, frameon=False)
for text in l.get_texts():
text.set_color("gray")
# remove the vertical lines of the grid
ax.xaxis.grid(which="major", color='None', linestyle='-.', linewidth=0, zorder=0)
ax.xaxis.set_ticks_position('bottom')
ax.xaxis.set_label_position('bottom')
ax.set_xticks(xTicksBars)
ax.set_xticklabels(xTlabels, fontsize=15, color='gray')
ax.xaxis.set_tick_params(rotation=0)
ax.set_xlabel('Number of simultaneous trajectories (M)', fontsize=16, color='gray')
ax.set_xlim(min(pos)-0.2, max(pos)+width*56)
if metric =="mem":
ax.set_yscale('log')
ax.set_ylabel("Memory Bandwith (GB/s)", fontsize=18)
ax.set_ylim([1e-1, 1000])
ax.set_yticks([1e-1, 1, 10, 100, 1000])
ax.tick_params(axis='y', which='major', labelsize=15, color='gray')
ax.tick_params(axis='y', which='minor', labelsize=13, color='gray')
# # plot peak theoretical mem BW
# ax.plot([min(pos)-0.2, max(pos)+width*70],
# [peakMemBW, peakMemBW], '--k', linewidth=1.2, zorder=7)
# ax.text((min(pos)+width+max(pos)+width*75)*0.45,
# peakMemBW+12, 'Machine\'s theoretical peak', fontsize=15)
elif metric=='cpu':
ax.set_yscale('log')
ax.set_ylabel("GFlops", fontsize=18, color='gray')
ax.set_ylim([1e-1, 1e4])
ax.set_yticks([1e-1, 1, 10, 1e2, 1e3, 1e4])
ax.tick_params(axis='y', which='major', labelsize=15, color='gray')
ax.tick_params(axis='y', which='minor', labelsize=13, color='gray')
elif metric =="itertime":
ax.set_yscale('log')
ax.set_ylim([1e-1, 1e4])
ax.tick_params(axis='y', which='major', labelsize=15, color='gray')
ax.tick_params(axis='y', which='minor', labelsize=13, color='gray')
if stat == 'ave': pref = 'Average'
elif stat=='min': pref = 'Min'
elif stat=='max': pref = 'Max'
ax.set_ylabel(pref+" time (ms)/timestep", fontsize=18, color='gray')
# ticks for the meshes
meshTicks = [3.5, 11., 18.75, 26.35]
ax2.set_xticks(meshTicks)
ax2.xaxis.set_ticks_position('bottom')
ax2.xaxis.set_label_position('bottom')
ax2.spines['bottom'].set_position(('outward', 65))
ax2.set_xlabel('Total degrees of freedom (N)', fontsize=16, color='gray')
ax2.set_xticklabels(meshLabels, fontsize=16, color='gray')
ax2.set_xlim(min(pos), max(pos)+width*60)
ax2.set_axisbelow(True)
ax.tick_params(axis='y', colors='gray')
ax.xaxis.label.set_color('gray')
ax.yaxis.label.set_color('gray')
ax2.xaxis.label.set_color('gray')
ax2.yaxis.label.set_color('gray')
plt.tight_layout()
fileName = "fom_"+metric+"_"+stat+".png"
fig.savefig('./plots/'+fileName, format="png", bbox_inches='tight',
dpi=300, transparent=True)
plt.show()
#=====================================================================
def main(dataFile, metric, stat):
data = np.loadtxt(dataFile)
dataDic = createDataDic(data, metric, stat)
#print(dataDic)
pp.pprint(dataDic)
plotBar(dataDic, meshLabelsPlot, nThreads, metric, stat)
plt.show()
#////////////////////////////////////////////
if __name__== "__main__":
#////////////////////////////////////////////
parser = ArgumentParser()
# parser.add_argument("-file", "--file",
# dest="dataFile",
# help="where to get data from\n")
parser.add_argument("-metric", "--metric",
dest="metric", default="mem",
help="Choices: mem, cpu, itertime \n")
parser.add_argument("-stat", "--stat",
dest="stat", default="ave",
help="ave, min or max\n")
args = parser.parse_args()
assert(args.metric in ['mem', 'cpu', 'itertime'])
main('./data/fom_scaling_final.txt', args.metric, args.stat)
#////////////////////////////////////////////
|
##########################################################################
#
# Copyright (c) 2012-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import functools
import imath
import IECore
import Gaffer
import GafferUI
import GafferImage
class FormatPlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, plug, **kw ) :
grid = GafferUI.GridContainer( spacing = 4 )
GafferUI.PlugValueWidget.__init__( self, grid, plug, **kw )
self.__menuButton = GafferUI.MenuButton( menu = GafferUI.Menu( Gaffer.WeakMethod( self.__menuDefinition ) ) )
grid[0:2,0] = self.__menuButton
self.__minLabel = GafferUI.Label( "Min" )
grid.addChild( self.__minLabel, index = ( 0, 1 ), alignment = ( GafferUI.HorizontalAlignment.Right, GafferUI.VerticalAlignment.Center ) )
self.__minWidget = GafferUI.CompoundNumericPlugValueWidget( plug["displayWindow"]["min"] )
grid[1,1] = self.__minWidget
self.__maxLabel = GafferUI.Label( "Max" )
grid.addChild( self.__maxLabel, index = ( 0, 2 ), alignment = ( GafferUI.HorizontalAlignment.Right, GafferUI.VerticalAlignment.Center ) )
self.__maxWidget = GafferUI.CompoundNumericPlugValueWidget( plug["displayWindow"]["max"] )
grid[1,2] = self.__maxWidget
self.__pixelAspectLabel = GafferUI.Label( "Pixel Aspect" )
grid.addChild( self.__pixelAspectLabel, index = ( 0, 3 ), alignment = ( GafferUI.HorizontalAlignment.Right, GafferUI.VerticalAlignment.Center ) )
self.__pixelAspectWidget = GafferUI.NumericPlugValueWidget( plug["pixelAspect"] )
grid[1,3] = self.__pixelAspectWidget
# If the plug hasn't got an input, the PlugValueWidget base class assumes we're not
# sensitive to contex changes and omits calls to _updateFromPlug(). But the default
# format mechanism uses the context, so we must arrange to do updates ourselves when
# necessary.
self.__contextChangedConnection = self.getContext().changedSignal().connect( Gaffer.WeakMethod( self.__contextChanged ) )
self._addPopupMenu( self.__menuButton )
self._updateFromPlug()
def setPlug( self, plug ) :
self.__minWidget.setPlug( plug["displayWindow"]["min"] )
self.__maxWidget.setPlug( plug["displayWindow"]["max"] )
self.__pixelAspectWidget.setPlug( plug["pixelAspect"] )
GafferUI.PlugValueWidget.setPlug( self, plug )
def _updateFromPlug( self ) :
self.__menuButton.setEnabled( self._editable() )
text = ""
mode = "standard"
if self.getPlug() is not None :
mode = Gaffer.Metadata.value( self.getPlug(), "formatPlugValueWidget:mode" )
with self.getContext() :
fmt = self.getPlug().getValue()
text = self.__formatLabel( fmt )
if fmt == GafferImage.Format() :
# The empty display window of the default format is
# confusing to look at, so turn off custom mode.
mode = "standard"
elif not GafferImage.Format.name( fmt ) :
# If the chosen format hasn't been registered,
# force custom mode even if it hasn't been
# asked for explicitly.
mode = "custom"
self.__menuButton.setText( text if mode != "custom" else "Custom" )
nonZeroOrigin = fmt.getDisplayWindow().min() != imath.V2i( 0 )
for widget in ( self.__minLabel, self.__minWidget ) :
widget.setVisible( mode == "custom" and nonZeroOrigin )
for widget in ( self.__maxLabel, self.__maxWidget, self.__pixelAspectLabel, self.__pixelAspectWidget ) :
widget.setVisible( mode == "custom" )
self.__maxLabel.setText( "Max" if nonZeroOrigin else "Size" )
def __menuDefinition( self ) :
result = IECore.MenuDefinition()
if self.getPlug() is None :
return result
formats = [ GafferImage.Format.format( n ) for n in GafferImage.Format.registeredFormats() ]
if not self.getPlug().ancestor( Gaffer.ScriptNode ).isSame( self.getPlug().node() ) :
formats.insert( 0, GafferImage.Format() )
currentFormat = self.getPlug().getValue()
modeIsCustom = Gaffer.Metadata.value( self.getPlug(), "formatPlugValueWidget:mode" ) == "custom"
for fmt in formats :
result.append(
"/" + self.__formatLabel( fmt ),
{
"command" : functools.partial( Gaffer.WeakMethod( self.__applyFormat ), fmt = fmt ),
"checkBox" : fmt == currentFormat and not modeIsCustom,
}
)
result.append( "/CustomDivider", { "divider" : True } )
result.append(
"/Custom",
{
"command" : Gaffer.WeakMethod( self.__applyCustomFormat ),
"checkBox" : modeIsCustom or currentFormat not in formats,
}
)
return result
def __formatLabel( self, fmt ) :
if fmt == GafferImage.Format() :
return "Default ( %s )" % GafferImage.FormatPlug.getDefaultFormat( self.getContext() )
else :
name = GafferImage.Format.name( fmt )
if name :
return "%s ( %s )" % ( name, str( fmt ) )
else :
return "Custom"
def __applyFormat( self, unused, fmt ) :
with Gaffer.UndoScope( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
Gaffer.Metadata.registerValue( self.getPlug(), "formatPlugValueWidget:mode", "standard", persistent = False )
self.getPlug().setValue( fmt )
def __applyCustomFormat( self, unused ) :
with Gaffer.UndoScope( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
with self.getContext() :
if self.getPlug().getValue() == GafferImage.Format() :
# Format is empty. It's kindof confusing to display that
# to the user in the custom fields, so take the default
# format and set it explicitly as a starting point for
# editing.
self.getPlug().setValue( GafferImage.FormatPlug.getDefaultFormat( self.getContext() ) )
# When we first switch to custom mode, the current value will
# actually be one of the registered formats. So we use this
# metadata value to keep track of the user's desire to be in
# custom mode despite of this fact. We use metadata rather than
# a member variable so that undo will take us back to the non-custom
# state automatically.
Gaffer.Metadata.registerValue( self.getPlug(), "formatPlugValueWidget:mode", "custom", persistent = False )
def __contextChanged( self, context, key ) :
if key == "image:defaultFormat" :
self._updateFromPlug()
GafferUI.PlugValueWidget.registerType( GafferImage.FormatPlug, FormatPlugValueWidget )
|
#!/usr/bin/python
'''
saufh.py
MIT License
Copyright (c) 2018 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Readme:
Script to get measurements from the sauter FS series dinamometer. It is posible
to select experiment range, delay between measurements and units.
Instructions:
Lay down the strain gauge in order to get a 0
Start the script to keep the sensor on
Mount the strain gauge in the desired setup
Start recording with the desired timeframe
Changelog:
V0 2018/12/03
'''
import serial #Serial library, required to comunicate with the cam
import time #Time library
import datetime #Timestamp library
from datetime import datetime as dtime
import ctypes #Conversion Type library
import os
import csv
import sys
#import pandas as pd #data manipulation library
#global variables:
homepath='/home/pi/'
units = {
3: 'KN',
4: 'TF',
5: 'KLBF'
}
force = {
0: 'Press',
1: 'Pull'
}
fsign = {
0: '-',
1: '+'
}
hours = {0:'a', 1:'b',2:'c', 3:'d',4:'e', 5:'f',6:'g', 7:'h',8:'i', 9:'j',10:'k', 11:'l',12:'m', 13:'n',14:'o', 15:'p',16:'q', 17:'r',18:'s', 19:'t', 18:'u',19:'v', 20:'w',21:'x', 22:'y', 23:'z'}
def queryme(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def menu(s,test=False):
'''
Configuration Menu, serving as the human input interface
'''
num=0
unit=0
if test==True: return 60,0
while not(num in range (1,4)): #1 included, 4 excluded
if (queryme("Use default time range (1 minute)" )):
timed=1*60
break
else:
num=input('Select time frame (1- 10 minutes, 2- 30 minutes, 3 -others): ')
if num==1:
timed=10*60
elif num==2:
timed=30*60
elif num==3:
timed=input('Select time frame in minutes: ')*60
print("Selected {} minutes".format(timed/60))
if not(queryme("Is this correct?")):num=4 #query again
selectedOK=True
while selectedOK==True:
if (queryme("Use default device resolution?")):
resolution=0
break
else:
resolution=input('Select time resolution in seconds: ')
print("Selected {} seconds".format(resolution))
if (queryme("Is this correct?")):selectedOK=False #query again
selectedOK=True
while selectedOK==True:
if (queryme("Use default device units? [KN]")):
talknerdy(s,'3')
unit=3
break
else:
while not (unit in range (3,6)):
unit=int(input('Select units [KN]=3 [TF]=4 [KLBF]=5: '))
if unit in range (3,6): talknerdy(s,unit) #sets the unit into the device
else: print('Please input a valid unit')
if (queryme("Is this correct?")):selectedOK=False #query again
return timed, resolution, unit
def init_serial(prt='ttyUSB0', baudrte=9600, timout=1):
'''
Defines the default serial port configuration
'''
return serial.Serial(
port='/dev/'+prt,
baudrate=baudrte,
timeout=timout,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS
)
def talknerdy(s,m='9'):
'''
s: serial port to talk to
m: message to send::
* '2': set the unit to 'zero'
* '3': use KN units; Kilo Newtons
* '4': use TF units; Tone Force
* '5': use KLBF units; Kilo Pound Force
* '6': set tracking mode on the gauge
* '7': set peak mode on the gauge
* '9': Retrieve measurement, default message
-> The default message is YXXX.XX where Y is the sign and XXX.XX the
measurement in the selected units, by default Kilo Newtons
'''
s.flushInput()
s.write(m)
sign=0
data=0
if m=='9':
read = s.read(7)
#print(read)
sign=int(read[0])
data= round(float(read[1:len(read)]),3)
time.sleep(.05)
return sign,data
def storeme(ppath="force"):
'''
Checks if a local path exists whithin the home folder, otherwise it creates one.
Checks if a filename exists within the local path, otherwise creates one.
'''
#Folder update
homep=homepath+ppath #complete path relative to modifiers
if not (os.path.exists(homep)): os.mkdir(homep)
#Filename update
st=dtime.now()
ffile='FH'+'{0}{1}'.format(st.year,st.timetuple().tm_yday)+hours[st.hour]+'.csv' #FH2018223a.csv
if os.path.exists(homep+'/'+ffile) == False: #in case file does not exist
with open(homep+'/'+ffile, 'w') as csvfile:
writer=csv.writer(csvfile)
writer.writerow(['Timestamp','Sign','Force'])
csvfile.close()
else: print ('File '+ ffile + ' already exists, data will be appended')
return homep,ffile
#Main script
def main():
debug=False
#Init the serial port
ser=init_serial()
try: timed,resolution,unit=menu(ser) #timed in minutes, resolution in seconds (0), true for mode Test
except KeyboardInterrupt:
print('\rExit ')
exit()
print (timed,resolution)
print('Reading data...')
#talknerdy(ser,'2') #Set the device to '0' #uncomment for self calibration after deployment.
#Init recording file
homep,filename= storeme()
nowasthen=time.time()
try:
while True:
#Output message in terminal
sign,data=talknerdy(ser) #Parsing default mesage, '9', to read data
sys.stdout.write('\rWaiting for user defined start [ctl+C], elapsed time ' +str(round(time.time()-nowasthen))+ ' \r') #Notice that dictionaries are used to parse the unit name and the force direction
sys.stdout.flush()
except KeyboardInterrupt:
sys.stdout.flush()
print('\rRecording at file '+filename+' ')
pass
#Init time pointers
starttime=time.time()
output=[]
i=0
timenow=time.time()-starttime
try:
while (timenow)<timed : #Compared against selected timeframe
if debug: print('Time since last measurement: ', timenow)
#Retrieve the data
sign,data=talknerdy(ser) #Parsing default mesage, '9', to read data
#Create timestamp
st=dtime.now()
st='{0}/{1}/{2} {3}:{4}:{5}:{6}'.format(st.year,st.month,st.day,st.hour,st.minute,st.second,st.microsecond)
#Create data row
output.append([st,fsign[sign],data])
#Output message in terminal
sys.stdout.write('\r' +force[sign]+' force output '+str(output[i][2])+' '+units[unit] +' remaining time ' +str(round(timed-timenow))+ ' \r') #Notice that dictionaries are used to parse the unit name and the force direction
sys.stdout.flush()
#Update file
with open(homep+'/'+filename, 'ab') as csvfile:
writer=csv.writer(csvfile)
writer.writerow(output[i])
csvfile.close()
#Update pointers
i+=1
time.sleep(resolution)
timenow=time.time()-starttime
except KeyboardInterrupt:
sys.stdout.flush()
print('\rFinished reading ')
pass
ser.close() #Close serial port
print('\r')
if debug: print('Total output',output)
if __name__ == "__main__":
main()
|
import re
from django.utils.functional import memoize
from django.utils.crypto import get_random_string
from django.conf import settings
from django.core.urlresolvers import (
RegexURLResolver, NoReverseMatch,
get_callable, normalize, force_unicode,
get_urlconf, get_script_prefix,
get_ns_resolver, iri_to_uri,
)
def random_session_key(session, prefix=''):
key = None
while not key or (prefix + key) in session:
key = get_random_string(12)
return key
_b_resolver_cache = {} # Maps URLconf modules to RegexURLResolver instances.
class BRegexURLResolver(RegexURLResolver):
"""
from django/core/urlresolvers.py
modified as noted
"""
def _reverse_with_prefix(self, lookup_view, _prefix, *args, **kwargs):
if args and kwargs:
raise ValueError("Don't mix *args and **kwargs in call to reverse()!")
try:
lookup_view = get_callable(lookup_view, True)
except (ImportError, AttributeError), e:
raise NoReverseMatch("Error importing '%s': %s." % (lookup_view, e))
possibilities = self.reverse_dict.getlist(lookup_view)
prefix_norm, prefix_args = normalize(_prefix)[0]
for possibility, pattern, defaults in possibilities:
for result, params in possibility:
if args:
# ## START MODS
expected_length = len(params) + len(prefix_args)
if len(args) < expected_length:
continue
args = args[:expected_length]
# ## END MODS
unicode_args = [force_unicode(val) for val in args]
candidate = (prefix_norm + result) % dict(zip(prefix_args + params, unicode_args))
else:
# ## START MODS
if set(params + defaults.keys() + prefix_args) - set(kwargs.keys() + defaults.keys()):
continue
# ## END MODS
matches = True
for k, v in defaults.items():
if kwargs.get(k, v) != v:
matches = False
break
if not matches:
continue
unicode_kwargs = dict([(k, force_unicode(v)) for (k, v) in kwargs.items()])
candidate = (prefix_norm + result) % unicode_kwargs
if re.search(u'^%s%s' % (_prefix, pattern), candidate, re.UNICODE):
return candidate
# lookup_view can be URL label, or dotted path, or callable, Any of
# these can be passed in at the top, but callables are not friendly in
# error messages.
m = getattr(lookup_view, '__module__', None)
n = getattr(lookup_view, '__name__', None)
if m is not None and n is not None:
lookup_view_s = "%s.%s" % (m, n)
else:
lookup_view_s = lookup_view
raise NoReverseMatch("Reverse for '%s' with arguments '%s' and keyword "
"arguments '%s' not found."
% (lookup_view_s, args, kwargs))
def get_resolver(urlconf):
"""
from django/core/urlresolvers.py
use BRegexURLResolver
"""
if urlconf is None:
urlconf = settings.ROOT_URLCONF
return BRegexURLResolver(r'^/', urlconf)
get_resolver = memoize(get_resolver, _b_resolver_cache, 1)
def fuzzy_reverse(viewname, urlconf=None, args=None, kwargs=None, prefix=None, current_app=None):
"""
from django/core/urlresolvers.py
Unmodified reverse (just need to use our modified version of get_resolver)
With the modified BRegexURLResolver retrieved through get_resolver this will
not error when you pass in extra args (it assumes proper order and ignores
trailing "extra" args) OR kwargs (it assumes you are passing at least the
required keyworded arguments)
It will still error if you pass both args AND kwargs at the same time.
"""
if urlconf is None:
urlconf = get_urlconf()
resolver = get_resolver(urlconf)
args = args or []
kwargs = kwargs or {}
if prefix is None:
prefix = get_script_prefix()
if not isinstance(viewname, basestring):
view = viewname
else:
parts = viewname.split(':')
parts.reverse()
view = parts[0]
path = parts[1:]
resolved_path = []
ns_pattern = ''
while path:
ns = path.pop()
# Lookup the name to see if it could be an app identifier
try:
app_list = resolver.app_dict[ns]
# Yes! Path part matches an app in the current Resolver
if current_app and current_app in app_list:
# If we are reversing for a particular app,
# use that namespace
ns = current_app
elif ns not in app_list:
# The name isn't shared by one of the instances
# (i.e., the default) so just pick the first instance
# as the default.
ns = app_list[0]
except KeyError:
pass
try:
extra, resolver = resolver.namespace_dict[ns]
resolved_path.append(ns)
ns_pattern = ns_pattern + extra
except KeyError, e:
if resolved_path:
raise NoReverseMatch(
"%s is not a registered namespace inside '%s'" %
(e, ':'.join(resolved_path)))
else:
raise NoReverseMatch("%s is not a registered namespace" %
e)
if ns_pattern:
resolver = get_ns_resolver(ns_pattern, resolver)
return iri_to_uri(resolver._reverse_with_prefix(view, prefix, *args, **kwargs))
|
<reponame>Saketkr21/epiabm
#
# Infection due to contact in between people in different cells
#
import random
import numpy as np
import logging
import typing
from pyEpiabm.core import Cell, Parameters, Person
from pyEpiabm.property import InfectionStatus, SpatialInfection
from pyEpiabm.utility import DistanceFunctions, SpatialKernel
from .abstract_sweep import AbstractSweep
class SpatialSweep(AbstractSweep):
"""Class to run the inter-cell space infections
as part of the sweep function. Runs through cells
and calculates their infectiousness parameter and calculates
a poisson variable of how many people each cell should
infect. Then chooses other cells, and persons within that
cell to assign as infectee. Then tests a infection event
against each susceptible member of the place. The resulting
exposed person is added to an infection queue.
"""
def __call__(self, time: float):
"""
Given a population structure, loops over cells and generates
a random number of people to infect. Then decides which cells
the infectees should be found in and considers whether an
infection event occurs on individual and cell infectiousness
and susceptibility.
Parameters
----------
time : float
Current simulation time
"""
# As this tracks intercell infections need to check number of
# cells is more than one (edge case but worth having)
if len(self._population.cells) == 1:
return
# If infection radius is set to zero no infections will occur so
# break immediately to save time.
if Parameters.instance().infection_radius == 0:
return
# Double loop over the whole population, checking infectiousness
# status, and whether they are absent from their household.
for cell in self._population.cells:
# Check to ensure there is an infector in the cell
total_infectors = cell.number_infectious()
if total_infectors == 0:
continue
# Creates a list of posible infectee cells which excludes the
# infector cell.
poss_susc_cells = self._population.cells.copy()
poss_susc_cells.remove(cell)
possible_infectee_num = sum([cell2.compartment_counter.retrieve()
[InfectionStatus.Susceptible]
for cell2 in poss_susc_cells])
if possible_infectee_num == 0:
# Break the loop if no people outside the cell are susceptible.
continue
# If there are any infectors calculate number of infection events
# given out in total by the cell
ave_num_of_infections = SpatialInfection.cell_inf(cell, time)
number_to_infect = np.random.poisson(ave_num_of_infections)
# Sample at random from the cell to find an infector. Have
# checked to ensure there is an infector present.
possible_infectors = [person for person in cell.persons
if person.is_infectious()]
infector = random.choice(possible_infectors)
if Parameters.instance().do_CovidSim:
infectee_list = self.find_infectees_Covidsim(infector,
poss_susc_cells,
number_to_infect)
else:
infectee_list = self.find_infectees(cell,
poss_susc_cells,
number_to_infect)
for infectee in infectee_list:
self.do_infection_event(infector, infectee, time)
def find_infectees(self, infector_cell: Cell,
possible_infectee_cells: typing.List[Cell],
number_to_infect: int):
"""Given a specific infector, a list of possible infectee cells,
and the number of people needed to infect, follows a distance based
implementation to create a list of infectees.
Parameters
----------
infector_cell : Cell
Infector cell instance of Cell
possible_infectee_cells : typing.List[Cell]
List of possible cells to infect
number_to_infect : int
maximum number of people to infect
Returns
----------
infectee_list : typing.List[Person]
List of exposed people to test an infection event
"""
infectee_list = []
# Chooses a list of cells (with replacement) for each infection
# event to occur in. Specifically inter-cell infections
# so can't be the same cell.
distance_weights = []
for cell2 in possible_infectee_cells:
try:
distance_weights.append(1/DistanceFunctions.dist(
infector_cell.location, cell2.location))
except ZeroDivisionError:
# If cells are on top of each other use nan placeholder
distance_weights.append(np.nan)
# Cells on top of each currently have a distance weight equal
# to the maximum of all other weights.
# Possibly want to do twice this.
number_of_nans = sum(np.isnan(distance_weights))
if number_of_nans == len(distance_weights):
distance_weights = [1 for _ in distance_weights]
elif number_of_nans > 0:
max_weight = np.nanmax(distance_weights)
distance_weights = np.nan_to_num(distance_weights,
nan=max_weight)
# Use of the cutoff distance idea from CovidSim.
cutoff = Parameters.instance().infection_radius
distance_weights = [weight if (cutoff > 1/weight) else 0
for weight in distance_weights]
# Will catch the case if distance weights isn't configured
# correctly and returns the wrong length.
assert len(distance_weights) == len(possible_infectee_cells), (
"Distance weights are not the same length as cell list")
try:
# Will catch a list of zeros
if sum(distance_weights) == 0:
raise(ValueError)
cell_list = random.choices(possible_infectee_cells,
weights=distance_weights,
k=number_to_infect)
except ValueError as e:
logging.exception(f"{type(e).__name__}: no cells"
+ f" within radius {cutoff} of"
+ f" cell {infector_cell.id} at location"
+ f" {infector_cell.location} - skipping cell.")
# This returns an empty list so no infection events tested.
return infectee_list
# Each infection event corresponds to a infectee cell
# on the cell list
for infectee_cell in cell_list:
# Sample at random from the infectee cell to find
# an infectee
infectee_list.append(random.sample(infectee_cell.persons, 1)[0])
return infectee_list
def find_infectees_Covidsim(self, infector: Person,
possible_infectee_cells: typing.List[Cell],
number_to_infect: int):
"""Given a specific infector, a list of possible infectee cells,
and the number of people needed to infect, follows Covidsim's
implementation to create a list of infectees.
Parameters
----------
infector : Person
Infector instance of person
possible_infectee_cells : typing.List[Cell]
List of possible cells to infect
number_to_infect : int
Maximum number of people to infect
Returns
-------
typing.List[Person]
List of people to infect
"""
current_cell = infector.microcell.cell
infectee_list = []
count = 0
while number_to_infect > 0 and count < self._population.total_people():
count += 1
# Weighting for cell choice in Covidsim uses cum_trans and
# invCDF arrays, which are equivalent to weighting by total
# susceptibles*max_transmission. May want to add transmission
# parameter later.
weights = [cell2.compartment_counter.retrieve()
[InfectionStatus.Susceptible] * SpatialKernel.weighting(
DistanceFunctions.dist(cell2, current_cell))
for cell2 in possible_infectee_cells]
infectee_cell = random.choices(possible_infectee_cells,
weights=weights, k=1)[0]
# Sample at random from the infectee cell to find
# an infectee
infectee = random.sample(infectee_cell.persons, 1)[0]
# Covidsim tested each infection event by testing the ratio
# of the spatial kernel applied to the distance between people
# to the spatial kernel of the shortest distance between
# their cells.
infection_distance = DistanceFunctions.dist(
infector.microcell.cell.location, infectee_cell.location)
minimum_dist = DistanceFunctions.minimum_between_cells(
infectee_cell, current_cell)
infection_kernel = (SpatialKernel.weighting(infection_distance) /
SpatialKernel.weighting(minimum_dist))
if (infection_kernel > random.random()):
# Covidsim rejects the infection event if the distance
# between infector/infectee is too large.
infectee_list.append(infectee)
number_to_infect -= 1
# I can see an infinte loop here if there are no suitable
# infectees. Have put in a count so no more loops than
# total population.
return infectee_list
def do_infection_event(self, infector: Person, infectee: Person,
time: float):
"""Helper function which takes an infector and infectee,
in different cells and tests whether contact between
them will lead to an infection event.
Parameters
----------
infector : Person
Infector instance of Person
infectee : Person
Infectee instance of Person
time : float
Current simulation time
Returns
-------
typing.List[Person]
List of people to infect
"""
if not infectee.is_susceptible():
return
# force of infection specific to cells and people
# involved in the infection event
force_of_infection = SpatialInfection.\
space_foi(infector.microcell.cell, infectee.microcell.cell,
infector, infectee, time)
# Compare a uniform random number to the force of
# infection to see whether an infection event
# occurs in this timestep between the given
# persons.
r = random.random()
if r < force_of_infection:
infectee.microcell.cell.enqueue_person(infectee)
|
<filename>single_train.py
import os
import torch
import pandas as pd
from transformers import AdamW
from annlp import fix_seed, ptm_path, get_device, Trainer, BertForMultiClassification, print_sentence_length
from sklearn.model_selection import train_test_split
def read_data(path, test_size=0.1, random_state=42):
df = pd.read_csv(path)
text = df['text'].tolist()
print_sentence_length(text)
label_unique = df['label'].unique()
label_dict = {label_unique[i]: i for i in range(len(label_unique))}
label = [label_dict[l] for l in df['label'].tolist()]
return train_test_split(text, label, test_size=test_size, random_state=random_state), len(label_unique)
# (train_text, dev_text, train_label, dev_label), num_labels = read_data('data/news_10.csv')
(train_text, dev_text, train_label, dev_label), num_labels = read_data('data/tc_opinion.csv')
# (train_text, dev_text, train_label, dev_label), num_labels = read_data('data/sentiment_hotel.csv')
# (train_text, dev_text, train_label, dev_label), num_labels = read_data('data/fudan_news.csv')
# (train_text, dev_text, train_label, dev_label), num_labels = read_data('data/iflytek.csv')
# (train_text, dev_text, train_label, dev_label), num_labels = read_data('data/sentiment.csv')
# (train_text, dev_text, train_label, dev_label), num_labels = read_data('data/weibo.csv')
class MyTrainer(Trainer):
def get_train_data(self):
return self.tokenizer_(train_text), train_label
def get_dev_data(self):
return self.tokenizer_(dev_text), dev_label
def configure_optimizer(self):
return AdamW(self.model.parameters(), lr=self.lr)
def train_step(self, data, mode):
input_ids = data['input_ids'].to(self.device)
attention_mask = data['attention_mask'].to(self.device)
labels = data['labels'].to(self.device).long()
outputs = self.model(input_ids, attention_mask=attention_mask, labels=labels)
output = outputs.logits
return outputs.loss, output, labels.cpu().numpy()
def predict_step(self, data):
input_ids = data['input_ids'].to(self.device)
attention_mask = data['attention_mask'].to(self.device)
outputs = self.model(input_ids, attention_mask=attention_mask)
output = outputs.logits.argmax(dim=-1).cpu().numpy()
return output
def main(mode, seed):
if mode == 'train':
do_train = True
do_dev = True
do_test = False
load_model = False
else:
do_train = False
do_dev = False
do_test = True
load_model = True
fix_seed(seed)
max_length = 128
batch_size = 32
lr = 5e-5
model_name = 'best_model.p'
model_path = ptm_path('roberta')
print(model_path)
model = BertForMultiClassification.from_pretrained(model_path, num_labels=num_labels, loss=None)
if os.path.exists(model_name) and load_model:
print('************load model************')
model.load_state_dict(torch.load(model_name, map_location=get_device()))
trainer = MyTrainer(model, batch_size=batch_size, lr=lr, max_length=max_length, model_path=model_path,
do_train=do_train, do_dev=do_dev, do_test=do_test, test_with_label=False,
save_model_name=model_name, attack=False, monitor='f1', epochs=5,
save_metric='all_data', mix=None, augmentation=False)
trainer.configure_metrics(do_acc=True, do_f1=True, do_recall=True, do_precision=True, do_kappa=True,
print_report=False, average='macro')
trainer.run()
if __name__ == '__main__':
import sys
for seed in [100, 101, 102, 103, 104]:
print('seed:', seed)
main(sys.argv[1], seed=seed)
print('*' * 100)
|
import torch
import torch.nn.functional as F
import numpy as np
import torch.nn
def dice_loss(input,target):
'''
make the soft dice loss
:param input: input
:param target: mask label
:return:
'''
input=torch.sigmoid(input)
smooth=1.0#for soft
flat_input=input.view(-1)
flag_target=target.view(-1)
intersection=(flat_input*flag_target).sum()
return 2*(intersection+smooth)/(flag_target.sum()+flat_input.sum()+smooth)
class FocalLoss(torch.nn.Module):
def __init__(self,gamma=2,size_average=True):
super(FocalLoss,self).__init__()
self.gamma=gamma
self.size_average=size_average
def forward(self, input,target,class_weight=None):
'''
here are FocalLoss
:param input:
:param target:
:param class_weight:
:return:
'''
target=target.view(-1,1).long()
if class_weight==None:
class_weight=[1]*2
prob=F.sigmoid(input)
prob=prob.view(-1,1)
prob=torch.cat((1-prob,prob),1)
select=torch.FloatTensor(len(prob),2).zero_().cuda()
select.scatter_(1,target,1)
class_weight=torch.FloatTensor(class_weight).cuda().view(-1,1)
class_weight=torch.gather(class_weight,0,target)
prob=(prob*select).sum(1).view(-1,1)
prob=torch.clamp(prob,1e-8,1-1e-8)
batch_loss=-class_weight*(torch.pow((1-prob),self.gamma))*prob.log()
if self.size_average:
loss=batch_loss.mean()
else:
loss=batch_loss
return loss
class MixedLoss(torch.nn.Module):
def __init__(self,alpha,gamma):
super(MixedLoss,self).__init__()
self.alpha=alpha
self.focal=FocalLoss(gamma=gamma)
def forward(self, input,target):
loss=self.alpha*self.focal(input,target)-torch.log(dice_loss(input,target))
return loss.mean()
class FocalLoss2d(torch.nn.Module):
def __init__(self, gamma=2, size_average=True):
print("focalLoss")
super(FocalLoss2d, self).__init__()
self.gamma = gamma
self.size_average = size_average
def forward(self, logit, target, class_weight=None, type='sigmoid'):
target = target.view(-1, 1).long()
if type=='sigmoid':
if class_weight is None:
class_weight = [1]*2 #[0.5, 0.5]
prob = F.sigmoid(logit)
prob = prob.view(-1, 1)
prob = torch.cat((1-prob, prob), 1)
select = torch.FloatTensor(len(prob), 2).zero_().cuda()
select.scatter_(1, target, 1.)
elif type=='softmax':
B,C,H,W = logit.size()
if class_weight is None:
class_weight =[1]*C #[1/C]*C
logit = logit.permute(0, 2, 3, 1).contiguous().view(-1, C)
prob = F.softmax(logit,1)
select = torch.FloatTensor(len(prob), C).zero_().cuda()
select.scatter_(1, target, 1.)
class_weight = torch.FloatTensor(class_weight).cuda().view(-1,1)
class_weight = torch.gather(class_weight, 0, target)
prob = (prob*select).sum(1).view(-1,1)
prob = torch.clamp(prob,1e-8,1-1e-8)#clip by value
batch_loss = - class_weight *(torch.pow((1-prob), self.gamma))*prob.log()
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss
return loss
|
<filename>softwares/blender_wizard/wizard_menu.py
# coding: utf-8
# Author: <NAME>
# Contact: <EMAIL>
# Blender modules
import bpy
import bpy.utils.previews
# Wizard modules
from blender_wizard import wizard_plugin
from blender_wizard import wizard_tools
bl_info = {
"name": "Wizard",
"author": "<NAME>",
"version": (2, 0),
"blender": (2, 93, 3),
"location": "View3D > Wizard",
"description": "Provide Wizard's tools",
"warning": "",
"doc_url": "wizard-pipeline-manager.webflow.io",
"category": "User",
}
class save_increment(bpy.types.Operator):
'''The save operator that call wizard function'''
bl_idname = "wizard.save_increment"
bl_label = "Save"
bl_description = "Save file in Wizard's hierarchy"
def execute(self, context):
wizard_plugin.save_increment()
return {'FINISHED'}
class export(bpy.types.Operator):
'''The save operator that call wizard function'''
bl_idname = "wizard.export"
bl_label = "Export data"
bl_description = "Export file in Wizard's hierarchy"
def execute(self, context):
wizard_plugin.export()
return {'FINISHED'}
class import_modeling(bpy.types.Operator):
'''The save operator that call wizard function'''
bl_idname = "wizard.import_modeling"
bl_label = "Import modeling"
bl_description = "Import modleing ( hard )"
def execute(self, context):
wizard_plugin.reference_modeling()
return {'FINISHED'}
class import_texturing(bpy.types.Operator):
'''The save operator that call wizard function'''
bl_idname = "wizard.import_texturing"
bl_label = "Import texturing"
bl_description = "Import texturing and create shader"
def execute(self, context):
wizard_plugin.reference_texturing()
return {'FINISHED'}
class update_texturing(bpy.types.Operator):
'''The save operator that call wizard function'''
bl_idname = "wizard.update_texturing"
bl_label = "Update texturing"
bl_description = "Update existing texturing in shaders"
def execute(self, context):
wizard_plugin.update_texturing()
return {'FINISHED'}
class set_image_size(bpy.types.Operator):
'''The set image size operator that call wizard function'''
bl_idname = "wizard.set_image_size"
bl_label = "Set image size"
bl_description = "Apply wizard project image size"
def execute(self, context):
wizard_plugin.set_image_size()
return {'FINISHED'}
class clear_all_materials(bpy.types.Operator):
'''Clear all materials of selection'''
bl_idname = "wizard.clear_all_materials"
bl_label = "Clear all materials"
bl_description = "Clear all materials of selected object and children"
def execute(self, context):
wizard_tools.clear_all_materials_of_selection()
return {'FINISHED'}
class TOPBAR_MT_wizard_import_submenu(bpy.types.Menu):
bl_label = "Import"
def draw(self, context):
layout = self.layout
layout.operator("wizard.import_modeling", icon_value=wizard_icons["modeling"].icon_id)
layout.operator("wizard.import_texturing", icon_value=wizard_icons["texturing"].icon_id)
class TOPBAR_MT_wizard_update_submenu(bpy.types.Menu):
bl_label = "Update"
def draw(self, context):
layout = self.layout
layout.operator("wizard.update_texturing", icon_value=wizard_icons["texturing"].icon_id)
class TOPBAR_MT_wizard_menu(bpy.types.Menu):
bl_label = "Wizard"
def draw(self, context):
layout = self.layout
layout.operator("wizard.save_increment", icon_value=wizard_icons["save_increment"].icon_id)
layout.separator()
layout.operator("wizard.export", icon_value=wizard_icons["export"].icon_id)
layout.menu("TOPBAR_MT_wizard_import_submenu", icon_value=wizard_icons["import"].icon_id)
layout.menu("TOPBAR_MT_wizard_update_submenu", icon_value=wizard_icons["update"].icon_id)
layout.separator()
layout.operator("wizard.set_image_size", icon_value=wizard_icons["set_image_size"].icon_id)
layout.operator("wizard.clear_all_materials", icon_value=wizard_icons["clear_all_materials"].icon_id)
def menu_draw(self, context):
self.layout.menu("TOPBAR_MT_wizard_menu")
classes = (save_increment,
export,
import_modeling,
import_texturing,
update_texturing,
set_image_size,
clear_all_materials,
TOPBAR_MT_wizard_import_submenu,
TOPBAR_MT_wizard_update_submenu,
TOPBAR_MT_wizard_menu)
def register():
# Register classes
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.TOPBAR_MT_editor_menus.append(TOPBAR_MT_wizard_menu.menu_draw)
# Register icons
global wizard_icons
wizard_icons = bpy.utils.previews.new()
wizard_icons.load("save_increment", 'icons/save_increment.png', 'IMAGE')
wizard_icons.load("export", 'icons/export.png', 'IMAGE')
wizard_icons.load("import", 'icons/import.png', 'IMAGE')
wizard_icons.load("update", 'icons/update.png', 'IMAGE')
wizard_icons.load("modeling", 'icons/modeling.png', 'IMAGE')
wizard_icons.load("texturing", 'icons/texturing.png', 'IMAGE')
wizard_icons.load("set_image_size", 'icons/set_image_size.png', 'IMAGE')
wizard_icons.load("clear_all_materials", 'icons/remove_all_materials.png', 'IMAGE')
def unregister():
# Unregister classes
bpy.types.TOPBAR_MT_editor_menus.remove(TOPBAR_MT_wizard_menu.menu_draw)
for cls in classes:
bpy.utils.unregister_class(cls)
# Unregister icons
global custom_icons
bpy.utils.previews.remove(wizard_icons)
if __name__ == "__main__":
register() |
<gh_stars>0
import logging
import os
from flask import jsonify, redirect
from flask_themes2 import render_theme_template, static_file_url
from werkzeug.routing import BaseConverter
from cert_viewer import certificate_store_bridge
from cert_viewer import introduction_store_bridge
from cert_viewer import verifier_bridge
DEFAULT_THEME = 'default'
GUID_REGEX = '([0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}|[0-9a-fA-F]{24})'
def update_app_config(app, config):
app.config.update(
SECRET_KEY=config.secret_key,
ISSUER_NAME=config.issuer_name,
SITE_DESCRIPTION=config.site_description,
ISSUER_LOGO_PATH=config.issuer_logo_path,
ISSUER_EMAIL=config.issuer_email,
THEME=config.theme,
)
if config.recent_certids:
recent_certs = str.split(config.recent_certids, ',')
else:
recent_certs = []
app.config['RECENT_CERT_IDS'] = recent_certs
def render(template, **context):
from cert_viewer import app
return render_theme_template(app.config['THEME'], template, **context)
def configure_views(app, config):
update_app_config(app, config)
add_rules(app, config)
from flask.views import View
class GenericView(View):
def __init__(self, template):
self.template = template
super(GenericView, self).__init__()
def dispatch_request(self):
return render(self.template)
def add_rules(app, config):
from cert_viewer.views.award_view import AwardView
from cert_viewer.views.json_award_view import JsonAwardView
from cert_viewer.views.renderable_view import RenderableView
from cert_viewer.views.issuer_view import IssuerView
from cert_viewer.views.verify_view import VerifyView
from cert_viewer.views.request_view import RequestView
update_app_config(app, config)
app.url_map.converters['regex'] = RegexConverter
app.add_url_rule('/', view_func=GenericView.as_view('index', template='index.html'))
app.add_url_rule(rule='/<regex("{}"):certificate_uid>'.format(GUID_REGEX), endpoint='award',
view_func=AwardView.as_view(name='award', template='award.html',
view=certificate_store_bridge.award))
app.add_url_rule('/certificate/<regex("{}"):certificate_uid>'.format(GUID_REGEX),
view_func=JsonAwardView.as_view('certificate', view=certificate_store_bridge.get_award_json))
app.add_url_rule('/verify/<regex("{}"):certificate_uid>'.format(GUID_REGEX),
view_func=VerifyView.as_view('verify', view=verifier_bridge.verify))
app.add_url_rule('/intro/', view_func=introduction_store_bridge.insert_introduction, methods=['POST', ])
app.add_url_rule('/request', view_func=RequestView.as_view(name='request'))
app.add_url_rule('/faq', view_func=GenericView.as_view('faq', template='faq.html'))
app.add_url_rule('/bitcoinkeys', view_func=GenericView.as_view('bitcoinkeys', template='bitcoinkeys.html'))
app.add_url_rule('/issuer/<issuer_file>', view_func=issuer_page)
app.add_url_rule('/spec', view_func=spec)
app.register_error_handler(404, page_not_found)
app.register_error_handler(KeyError, key_error)
app.register_error_handler(500, internal_server_error)
app.register_error_handler(Exception, unhandled_exception)
from flasgger import Swagger
def spec():
from cert_viewer import app
return jsonify(Swagger(app))
def issuer_page(issuer_file):
from cert_viewer import app
the_url = static_file_url(theme=app.config['THEME'], filename = (os.path.join('issuer/', issuer_file)))
return redirect(the_url, code=302)
class RegexConverter(BaseConverter):
def __init__(self, url_map, *items):
super(RegexConverter, self).__init__(url_map)
self.regex = items[0]
# Errors
def page_not_found(error):
logging.error('Page not found: %s', error, exc_info=True)
return 'This page does not exist', 404
def key_error(error):
key = error.args[0]
logging.error('Key not found not found: %s, error: ', key)
message = 'Key not found: ' + key
return message, 404
def internal_server_error(error):
logging.error('Server Error: %s', error, exc_info=True)
return 'Server error: {0}'.format(error), 500
def unhandled_exception(e):
logging.exception('Unhandled Exception: %s', e, exc_info=True)
return 'Unhandled exception: {0}'.format(e), 500
|
from __future__ import print_function
import FWCore.ParameterSet.Config as cms
class MassSearchReplaceAnyInputTagVisitor(object):
"""Visitor that travels within a cms.Sequence, looks for a parameter and replace its value
It will climb down within PSets, VPSets and VInputTags to find its target"""
def __init__(self,paramSearch,paramReplace,verbose=False,moduleLabelOnly=False,skipLabelTest=False):
self._paramSearch = self.standardizeInputTagFmt(paramSearch)
self._paramReplace = self.standardizeInputTagFmt(paramReplace)
self._moduleName = ''
self._verbose=verbose
self._moduleLabelOnly=moduleLabelOnly
self._skipLabelTest=skipLabelTest
def doIt(self,pset,base):
if isinstance(pset, cms._Parameterizable):
for name in pset.parameterNames_():
# if I use pset.parameters_().items() I get copies of the parameter values
# so I can't modify the nested pset
value = getattr(pset,name)
if isinstance(value, cms.PSet) or isinstance(value, cms.EDProducer) or isinstance(value, cms.EDAlias):
# EDProducer and EDAlias to support SwitchProducer
self.doIt(value,base+"."+name)
elif isinstance(value, cms.VPSet):
for (i,ps) in enumerate(value): self.doIt(ps, "%s.%s[%d]"%(base,name,i) )
elif isinstance(value, cms.VInputTag):
for (i,n) in enumerate(value):
# VInputTag can be declared as a list of strings, so ensure that n is formatted correctly
n = self.standardizeInputTagFmt(n)
if (n == self._paramSearch):
if self._verbose:print("Replace %s.%s[%d] %s ==> %s " % (base, name, i, self._paramSearch, self._paramReplace))
if not value.isTracked():
value[i] = cms.untracked.InputTag(self._paramReplace.getModuleLabel(),
self._paramReplace.getProductInstanceLabel(),
self._paramReplace.getProcessName())
else:
value[i] = self._paramReplace
elif self._moduleLabelOnly and n.moduleLabel == self._paramSearch.moduleLabel:
nrep = n; nrep.moduleLabel = self._paramReplace.moduleLabel
if self._verbose:print("Replace %s.%s[%d] %s ==> %s " % (base, name, i, n, nrep))
value[i] = nrep
elif isinstance(value, cms.InputTag):
if value == self._paramSearch:
if self._verbose:print("Replace %s.%s %s ==> %s " % (base, name, self._paramSearch, self._paramReplace))
from copy import deepcopy
if not value.isTracked():
# the existing value should stay untracked even if the given parameter is tracked
setattr(pset, name, cms.untracked.InputTag(self._paramReplace.getModuleLabel(),
self._paramReplace.getProductInstanceLabel(),
self._paramReplace.getProcessName()))
else:
setattr(pset, name, deepcopy(self._paramReplace) )
elif self._moduleLabelOnly and value.moduleLabel == self._paramSearch.moduleLabel:
from copy import deepcopy
repl = deepcopy(getattr(pset, name))
repl.moduleLabel = self._paramReplace.moduleLabel
setattr(pset, name, repl)
if self._verbose:print("Replace %s.%s %s ==> %s " % (base, name, value, repl))
@staticmethod
def standardizeInputTagFmt(inputTag):
''' helper function to ensure that the InputTag is defined as cms.InputTag(str) and not as a plain str '''
if not isinstance(inputTag, cms.InputTag):
return cms.InputTag(inputTag)
return inputTag
def enter(self,visitee):
label = ''
if (not self._skipLabelTest):
if hasattr(visitee,"hasLabel_") and visitee.hasLabel_():
label = visitee.label_()
else: label = '<Module not in a Process>'
else:
label = '<Module label not tested>'
self.doIt(visitee, label)
def leave(self,visitee):
pass
def massSearchReplaceAnyInputTag(sequence, oldInputTag, newInputTag,verbose=False,moduleLabelOnly=False,skipLabelTest=False) :
"""Replace InputTag oldInputTag with newInputTag, at any level of nesting within PSets, VPSets, VInputTags..."""
sequence.visit(MassSearchReplaceAnyInputTagVisitor(oldInputTag,newInputTag,verbose=verbose,moduleLabelOnly=moduleLabelOnly,skipLabelTest=skipLabelTest))
def massReplaceInputTag(process,old="rawDataCollector",new="rawDataRepacker",verbose=False,moduleLabelOnly=False,skipLabelTest=False):
for s in process.paths_().keys():
massSearchReplaceAnyInputTag(getattr(process,s), old, new, verbose, moduleLabelOnly, skipLabelTest)
for s in process.endpaths_().keys():
massSearchReplaceAnyInputTag(getattr(process,s), old, new, verbose, moduleLabelOnly, skipLabelTest)
if process.schedule_() is not None:
for task in process.schedule_()._tasks:
massSearchReplaceAnyInputTag(task, old, new, verbose, moduleLabelOnly, skipLabelTest)
return(process)
class MassSearchParamVisitor(object):
"""Visitor that travels within a cms.Sequence, looks for a parameter and returns a list of modules that have it"""
def __init__(self,paramName,paramSearch):
self._paramName = paramName
self._paramSearch = paramSearch
self._modules = []
def enter(self,visitee):
if (hasattr(visitee,self._paramName)):
if getattr(visitee,self._paramName) == self._paramSearch:
self._modules.append(visitee)
def leave(self,visitee):
pass
def modules(self):
return self._modules
class MassSearchReplaceParamVisitor(object):
"""Visitor that travels within a cms.Sequence, looks for a parameter and replaces its value"""
def __init__(self,paramName,paramSearch,paramValue,verbose=False):
self._paramName = paramName
self._paramValue = paramValue
self._paramSearch = paramSearch
self._verbose = verbose
def enter(self,visitee):
if isinstance(visitee, cms.SwitchProducer):
for modName in visitee.parameterNames_():
self.doIt(getattr(visitee, modName), "%s.%s"%(str(visitee), modName))
else:
self.doIt(visitee, str(visitee))
def doIt(self, mod, name):
if (hasattr(mod,self._paramName)):
if getattr(mod,self._paramName) == self._paramSearch:
if self._verbose:print("Replaced %s.%s: %s => %s" % (name,self._paramName,getattr(mod,self._paramName),self._paramValue))
setattr(mod,self._paramName,self._paramValue)
def leave(self,visitee):
pass
def massSearchReplaceParam(sequence,paramName,paramOldValue,paramValue,verbose=False):
sequence.visit(MassSearchReplaceParamVisitor(paramName,paramOldValue,paramValue,verbose))
def massReplaceParameter(process,name="label",old="rawDataCollector",new="rawDataRepacker",verbose=False):
for s in process.paths_().keys():
massSearchReplaceParam(getattr(process,s),name,old,new,verbose)
for s in process.endpaths_().keys():
massSearchReplaceParam(getattr(process,s),name,old,new,verbose)
if process.schedule_() is not None:
for task in process.schedule_()._tasks:
massSearchReplaceParam(task, name, old, new, verbose)
return(process)
if __name__=="__main__":
import unittest
class SwitchProducerTest(cms.SwitchProducer):
def __init__(self, **kargs):
super(SwitchProducerTest,self).__init__(
dict(
test1 = lambda: (True, -10),
test2 = lambda: (True, -9),
test3 = lambda: (True, -8),
test4 = lambda: (True, -7)
), **kargs)
class TestModuleCommand(unittest.TestCase):
def testMassSearchReplaceAnyInputTag(self):
p = cms.Process("test")
p.a = cms.EDProducer("a", src=cms.InputTag("gen"))
p.b = cms.EDProducer("ab", src=cms.InputTag("a"))
p.c = cms.EDProducer("ac", src=cms.InputTag("b"), usrc=cms.untracked.InputTag("b"),
nested = cms.PSet(src = cms.InputTag("b"), src2 = cms.InputTag("c"), usrc = cms.untracked.InputTag("b")),
nestedv = cms.VPSet(cms.PSet(src = cms.InputTag("b")), cms.PSet(src = cms.InputTag("d"))),
unestedv = cms.untracked.VPSet(cms.untracked.PSet(src = cms.InputTag("b")), cms.untracked.PSet(src = cms.InputTag("d"))),
vec = cms.VInputTag(cms.InputTag("a"), cms.InputTag("b"), cms.InputTag("c"), cms.InputTag("d")),
uvec = cms.untracked.VInputTag(cms.untracked.InputTag("a"), cms.untracked.InputTag("b"), cms.untracked.InputTag("c"), cms.untracked.InputTag("d")),
)
p.sp = SwitchProducerTest(
test1 = cms.EDProducer("a", src = cms.InputTag("b"),
nested = cms.PSet(src = cms.InputTag("b"), src2 = cms.InputTag("c"), usrc = cms.untracked.InputTag("b"))
),
test2 = cms.EDProducer("b", src = cms.InputTag("c"),
nested = cms.PSet(src = cms.InputTag("b"), src2 = cms.InputTag("c"), usrc = cms.untracked.InputTag("b"))
),
)
p.s = cms.Sequence(p.a*p.b*p.c*p.sp)
massSearchReplaceAnyInputTag(p.s, cms.InputTag("b"), cms.InputTag("new"))
self.assertNotEqual(cms.InputTag("new"), p.b.src)
self.assertEqual(cms.InputTag("new"), p.c.src)
self.assertEqual(cms.InputTag("new"), p.c.usrc)
self.assertEqual(cms.InputTag("new"), p.c.nested.src)
self.assertEqual(cms.InputTag("new"), p.c.nested.usrc)
self.assertFalse(p.c.nested.usrc.isTracked())
self.assertNotEqual(cms.InputTag("new"), p.c.nested.src2)
self.assertEqual(cms.InputTag("new"), p.c.nestedv[0].src)
self.assertNotEqual(cms.InputTag("new"), p.c.nestedv[1].src)
self.assertEqual(cms.InputTag("new"), p.c.unestedv[0].src)
self.assertNotEqual(cms.InputTag("new"), p.c.unestedv[1].src)
self.assertNotEqual(cms.InputTag("new"), p.c.vec[0])
self.assertEqual(cms.InputTag("new"), p.c.vec[1])
self.assertNotEqual(cms.InputTag("new"), p.c.vec[2])
self.assertNotEqual(cms.InputTag("new"), p.c.vec[3])
self.assertNotEqual(cms.InputTag("new"), p.c.uvec[0])
self.assertEqual(cms.InputTag("new"), p.c.uvec[1])
self.assertNotEqual(cms.InputTag("new"), p.c.uvec[2])
self.assertNotEqual(cms.InputTag("new"), p.c.uvec[3])
self.assertFalse(p.c.uvec[0].isTracked())
self.assertFalse(p.c.uvec[1].isTracked())
self.assertFalse(p.c.uvec[2].isTracked())
self.assertFalse(p.c.uvec[3].isTracked())
self.assertEqual(cms.InputTag("new"), p.sp.test1.src)
self.assertEqual(cms.InputTag("new"), p.sp.test1.nested.src)
self.assertEqual(cms.InputTag("c"), p.sp.test1.nested.src2)
self.assertEqual(cms.untracked.InputTag("new"), p.sp.test1.nested.usrc)
self.assertEqual(cms.InputTag("c"), p.sp.test2.src)
self.assertEqual(cms.InputTag("new"), p.sp.test2.nested.src)
self.assertEqual(cms.InputTag("c"), p.sp.test2.nested.src2)
self.assertEqual(cms.untracked.InputTag("new"), p.sp.test2.nested.usrc)
def testMassReplaceInputTag(self):
process1 = cms.Process("test")
massReplaceInputTag(process1, "a", "b", False, False, False)
self.assertEqual(process1.dumpPython(), cms.Process('test').dumpPython())
p = cms.Process("test")
p.a = cms.EDProducer("a", src=cms.InputTag("gen"))
p.b = cms.EDProducer("ab", src=cms.InputTag("a"))
p.c = cms.EDProducer("ac", src=cms.InputTag("b"),
nested = cms.PSet(src = cms.InputTag("a"), src2 = cms.InputTag("c"), usrc = cms.untracked.InputTag("a")),
nestedv = cms.VPSet(cms.PSet(src = cms.InputTag("a")), cms.PSet(src = cms.InputTag("d"))),
unestedv = cms.untracked.VPSet(cms.untracked.PSet(src = cms.InputTag("a")), cms.untracked.PSet(src = cms.InputTag("d"))),
vec = cms.VInputTag(cms.InputTag("a"), cms.InputTag("b"), cms.InputTag("c"), cms.InputTag("d")),
uvec = cms.untracked.VInputTag(cms.untracked.InputTag("a"), cms.untracked.InputTag("b"), cms.untracked.InputTag("c"), cms.InputTag("d")),
)
p.d = cms.EDProducer("ab", src=cms.InputTag("a"))
p.e = cms.EDProducer("ab", src=cms.InputTag("a"))
p.f = cms.EDProducer("ab", src=cms.InputTag("a"))
p.g = cms.EDProducer("ab", src=cms.InputTag("a"))
p.h = cms.EDProducer("ab", src=cms.InputTag("a"))
p.i = cms.EDProducer("ab", src=cms.InputTag("a"))
p.sp = SwitchProducerTest(
test1 = cms.EDProducer("a", src = cms.InputTag("a"),
nested = cms.PSet(src = cms.InputTag("a"), src2 = cms.InputTag("c"), usrc = cms.untracked.InputTag("a"))
),
test2 = cms.EDProducer("b", src = cms.InputTag("c"),
nested = cms.PSet(src = cms.InputTag("a"), src2 = cms.InputTag("c"), usrc = cms.untracked.InputTag("a"))
),
)
p.s1 = cms.Sequence(p.a*p.b*p.c*p.sp)
p.path1 = cms.Path(p.s1)
p.s2 = cms.Sequence(p.d)
p.path2 = cms.Path(p.e)
p.s3 = cms.Sequence(p.f)
p.endpath1 = cms.EndPath(p.s3)
p.endpath2 = cms.EndPath(p.g)
p.t1 = cms.Task(p.h)
p.t2 = cms.Task(p.i)
p.schedule = cms.Schedule()
p.schedule.associate(p.t1, p.t2)
massReplaceInputTag(p, "a", "b", False, False, False)
self.assertEqual(cms.InputTag("b"), p.b.src)
self.assertEqual(cms.InputTag("b"), p.c.nested.src)
self.assertEqual(cms.InputTag("b"), p.c.nested.usrc)
self.assertFalse(p.c.nested.usrc.isTracked())
self.assertEqual(cms.InputTag("b"), p.c.nestedv[0].src)
self.assertEqual(cms.InputTag("b"), p.c.unestedv[0].src)
self.assertEqual(cms.InputTag("b"), p.c.vec[0])
self.assertEqual(cms.InputTag("c"), p.c.vec[2])
self.assertEqual(cms.InputTag("b"), p.c.uvec[0])
self.assertEqual(cms.InputTag("c"), p.c.uvec[2])
self.assertFalse(p.c.uvec[0].isTracked())
self.assertFalse(p.c.uvec[1].isTracked())
self.assertFalse(p.c.uvec[2].isTracked())
self.assertEqual(cms.InputTag("a"), p.d.src)
self.assertEqual(cms.InputTag("b"), p.e.src)
self.assertEqual(cms.InputTag("b"), p.f.src)
self.assertEqual(cms.InputTag("b"), p.g.src)
self.assertEqual(cms.InputTag("b"), p.h.src)
self.assertEqual(cms.InputTag("b"), p.i.src)
self.assertEqual(cms.InputTag("b"), p.sp.test1.src)
self.assertEqual(cms.InputTag("b"), p.sp.test1.nested.src)
self.assertEqual(cms.InputTag("c"), p.sp.test1.nested.src2)
self.assertEqual(cms.untracked.InputTag("b"), p.sp.test1.nested.usrc)
self.assertEqual(cms.InputTag("c"), p.sp.test2.src)
self.assertEqual(cms.InputTag("b"), p.sp.test2.nested.src)
self.assertEqual(cms.InputTag("c"), p.sp.test2.nested.src2)
self.assertEqual(cms.untracked.InputTag("b"), p.sp.test2.nested.usrc)
def testMassSearchReplaceParam(self):
p = cms.Process("test")
p.a = cms.EDProducer("a", src=cms.InputTag("gen"))
p.b = cms.EDProducer("ab", src=cms.InputTag("a"))
p.c = cms.EDProducer("ac", src=cms.InputTag("b"),
nested = cms.PSet(src = cms.InputTag("c"), src2 = cms.InputTag("b"))
)
p.d = cms.EDProducer("ac", src=cms.untracked.InputTag("b"),
nested = cms.PSet(src = cms.InputTag("c"), src2 = cms.InputTag("b"))
)
p.sp = SwitchProducerTest(
test1 = cms.EDProducer("a", src = cms.InputTag("b"),
nested = cms.PSet(src = cms.InputTag("b"))
),
test2 = cms.EDProducer("b", src = cms.InputTag("b")),
)
p.s = cms.Sequence(p.a*p.b*p.c*p.d*p.sp)
massSearchReplaceParam(p.s,"src",cms.InputTag("b"),"a")
self.assertEqual(cms.InputTag("a"),p.c.src)
self.assertEqual(cms.InputTag("c"),p.c.nested.src)
self.assertEqual(cms.InputTag("b"),p.c.nested.src2)
self.assertEqual(cms.untracked.InputTag("a"),p.d.src)
self.assertEqual(cms.InputTag("c"),p.d.nested.src)
self.assertEqual(cms.InputTag("b"),p.d.nested.src2)
self.assertEqual(cms.InputTag("a"),p.sp.test1.src)
self.assertEqual(cms.InputTag("b"),p.sp.test1.nested.src)
self.assertEqual(cms.InputTag("a"),p.sp.test2.src)
def testMassReplaceParam(self):
process1 = cms.Process("test")
massReplaceParameter(process1, "src", cms.InputTag("a"), "b", False)
self.assertEqual(process1.dumpPython(), cms.Process("test").dumpPython())
p = cms.Process("test")
p.a = cms.EDProducer("a", src=cms.InputTag("gen"))
p.b = cms.EDProducer("ab", src=cms.InputTag("a"))
p.c = cms.EDProducer("ac", src=cms.InputTag("b"),
nested = cms.PSet(src = cms.InputTag("a"), src2 = cms.InputTag("c")),
nestedv = cms.VPSet(cms.PSet(src = cms.InputTag("a")), cms.PSet(src = cms.InputTag("d"))),
vec = cms.VInputTag(cms.InputTag("a"), cms.InputTag("b"), cms.InputTag("c"), cms.InputTag("d"))
)
p.d = cms.EDProducer("ab", src=cms.InputTag("a"))
p.e = cms.EDProducer("ab", src=cms.InputTag("a"))
p.f = cms.EDProducer("ab", src=cms.InputTag("a"))
p.g = cms.EDProducer("ab", src=cms.InputTag("a"))
p.h = cms.EDProducer("ab", src=cms.InputTag("a"))
p.i = cms.EDProducer("ab", src=cms.InputTag("a"))
p.j = cms.EDProducer("ab", src=cms.untracked.InputTag("a"))
p.sp = SwitchProducerTest(
test1 = cms.EDProducer("a", src = cms.InputTag("a"),
nested = cms.PSet(src = cms.InputTag("a"))
),
test2 = cms.EDProducer("b", src = cms.InputTag("a")),
)
p.s1 = cms.Sequence(p.a*p.b*p.c*p.sp)
p.path1 = cms.Path(p.s1)
p.s2 = cms.Sequence(p.d)
p.path2 = cms.Path(p.e)
p.s3 = cms.Sequence(p.f)
p.endpath1 = cms.EndPath(p.s3)
p.endpath2 = cms.EndPath(p.g)
p.t1 = cms.Task(p.h)
p.t2 = cms.Task(p.i, p.j)
p.schedule = cms.Schedule()
p.schedule.associate(p.t1, p.t2)
massReplaceParameter(p, "src",cms.InputTag("a"), "b", False)
self.assertEqual(cms.InputTag("gen"), p.a.src)
self.assertEqual(cms.InputTag("b"), p.b.src)
self.assertEqual(cms.InputTag("a"), p.c.vec[0])
self.assertEqual(cms.InputTag("c"), p.c.vec[2])
self.assertEqual(cms.InputTag("a"), p.d.src)
self.assertEqual(cms.InputTag("b"), p.e.src)
self.assertEqual(cms.InputTag("b"), p.f.src)
self.assertEqual(cms.InputTag("b"), p.g.src)
self.assertEqual(cms.InputTag("b"), p.h.src)
self.assertEqual(cms.InputTag("b"), p.i.src)
self.assertEqual(cms.untracked.InputTag("b"), p.j.src)
self.assertEqual(cms.InputTag("b"),p.sp.test1.src)
self.assertEqual(cms.InputTag("a"),p.sp.test1.nested.src)
self.assertEqual(cms.InputTag("b"),p.sp.test2.src)
unittest.main()
|
<gh_stars>1-10
import constants
import collections
def get_doas_from_category(category_prediction):
doa_list = []
for doa, category in constants.class_ids.items():
if category == category_prediction:
doa_list.append(float(doa))
if float(doa) == 180.0:
doa_list.append(float(doa))
return doa_list
def get_possible_doas(doa):
value = constants.class_ids.get(str(doa))
doa_list = get_doas_from_category(value)
return doa_list
def cylindrical(angle):
if angle >= 360:
angle = angle - 360
if angle < 0:
angle = angle + 360
return angle
def get_quadrant(val):
quadrant = None
if 0 <= val < 90:
quadrant = "first_quadrant"
elif 90 <= val < 180:
quadrant = "second_quadrant"
elif 180 <= val < 270:
quadrant = "third_quadrant"
elif 270 <= val < 360:
quadrant = "fourth_quadrant"
return quadrant
def check_if_twice(prediction_list, iteration):
flatten_list = [j for sub in prediction_list for j in sub]
first_quadrant = len([i for i in flatten_list if 0 <= i < 90])
second_quadrant = len([i for i in flatten_list if 90 <= i < 180])
third_quadrant = len([i for i in flatten_list if 180 <= i < 270])
fourth_quadrant = len([i for i in flatten_list if 270 <= i < 360])
max_q = max([first_quadrant, second_quadrant, third_quadrant, fourth_quadrant])
quadrants = {first_quadrant: "first_quadrant", second_quadrant: "second_quadrant",
third_quadrant: "third_quadrant", fourth_quadrant: "fourth_quadrant"}
max_quadrant = None
for key, val in quadrants.items():
if key == max_q:
max_quadrant = val
counter = collections.Counter(flatten_list)
val = None
if 2 in counter.values():
for key, value in counter.items():
if value == 2 and (get_quadrant(key) == max_quadrant):
val = key
break
if (val == 0.0 or val == 360.0) and iteration == 0:
return None
if val == 180.0 and iteration == 0:
return None
return val
def get_mean_prediction(prediction_list):
prediction = None
flatten_list = [j for sub in prediction_list for j in sub]
first_quadrant_size = len([i for i in flatten_list if 0 <= i < 90])
second_quadrant_size = len([i for i in flatten_list if 90 <= i < 180])
third_quadrant_size = len([i for i in flatten_list if 180 <= i < 270])
fourth_quadrant_size = len([i for i in flatten_list if 270 <= i < 360])
max_quadrant = max([first_quadrant_size, second_quadrant_size, third_quadrant_size, fourth_quadrant_size])
if max_quadrant == first_quadrant_size:
prediction = sum([i for i in flatten_list if 0 <= i < 90]) / max_quadrant
elif max_quadrant == second_quadrant_size:
prediction = sum([i for i in flatten_list if 90 <= i < 180]) / max_quadrant
elif max_quadrant == third_quadrant_size:
prediction = sum([i for i in flatten_list if 180 <= i < 270]) / max_quadrant
elif max_quadrant == fourth_quadrant_size:
prediction = sum([i for i in flatten_list if 270 <= i < 360]) / max_quadrant
return prediction
|
# -*- coding: utf-8 -*-
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for the Scalars Plugin."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os.path
from six import StringIO
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorboard.backend import application
from tensorboard.backend.event_processing import plugin_event_accumulator as event_accumulator # pylint: disable=line-too-long
from tensorboard.backend.event_processing import plugin_event_multiplexer as event_multiplexer # pylint: disable=line-too-long
from tensorboard.plugins import base_plugin
from tensorboard.plugins.core import core_plugin
from tensorboard.plugins.scalar import scalars_plugin
from tensorboard.plugins.scalar import summary
from tensorboard.util import test_util
tf.compat.v1.disable_v2_behavior()
class ScalarsPluginTest(tf.test.TestCase):
_STEPS = 99
_LEGACY_SCALAR_TAG = 'ancient-values'
_SCALAR_TAG = 'simple-values'
_HISTOGRAM_TAG = 'complicated-values'
_DISPLAY_NAME = 'Walrus population'
_DESCRIPTION = 'the *most* valuable statistic'
_HTML_DESCRIPTION = '<p>the <em>most</em> valuable statistic</p>'
_RUN_WITH_LEGACY_SCALARS = '_RUN_WITH_LEGACY_SCALARS'
_RUN_WITH_SCALARS = '_RUN_WITH_SCALARS'
_RUN_WITH_HISTOGRAM = '_RUN_WITH_HISTOGRAM'
def __init__(self, *args, **kwargs):
super(ScalarsPluginTest, self).__init__(*args, **kwargs)
self.logdir = None
self.plugin = None
def set_up_with_runs(self, run_names):
self.logdir = self.get_temp_dir()
for run_name in run_names:
self.generate_run(run_name)
multiplexer = event_multiplexer.EventMultiplexer(size_guidance={
# don't truncate my test data, please
event_accumulator.TENSORS: self._STEPS,
})
multiplexer.AddRunsFromDirectory(self.logdir)
multiplexer.Reload()
context = base_plugin.TBContext(logdir=self.logdir, multiplexer=multiplexer)
self.plugin = scalars_plugin.ScalarsPlugin(context)
def set_up_db(self):
self.db_path = os.path.join(self.get_temp_dir(), 'db.db')
self.db_uri = 'sqlite:' + self.db_path
db_module, db_connection_provider = application.get_database_info(
self.db_uri)
context = base_plugin.TBContext(
db_module=db_module,
db_connection_provider=db_connection_provider,
db_uri=self.db_uri)
self.core_plugin = core_plugin.CorePlugin(context)
self.plugin = scalars_plugin.ScalarsPlugin(context)
def generate_run_to_db(self, experiment_name, run_name):
tf.compat.v1.reset_default_graph()
global_step = tf.compat.v1.placeholder(tf.int64)
db_writer = tf.contrib.summary.create_db_writer(
db_uri=self.db_path,
experiment_name=experiment_name,
run_name=run_name,
user_name='user')
scalar_ops = None
with db_writer.as_default(), tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar(self._SCALAR_TAG, 42, step=global_step)
flush_op = tf.contrib.summary.flush(db_writer._resource)
with tf.compat.v1.Session() as sess:
sess.run(tf.contrib.summary.summary_writer_initializer_op())
for step in xrange(self._STEPS):
feed_dict = {global_step: step}
sess.run(tf.contrib.summary.all_summary_ops(), feed_dict=feed_dict)
sess.run(flush_op)
def testRoutesProvided(self):
"""Tests that the plugin offers the correct routes."""
self.set_up_with_runs([self._RUN_WITH_SCALARS])
routes = self.plugin.get_plugin_apps()
self.assertIsInstance(routes['/scalars'], collections.Callable)
self.assertIsInstance(routes['/tags'], collections.Callable)
def generate_run(self, run_name):
tf.compat.v1.reset_default_graph()
sess = tf.compat.v1.Session()
placeholder = tf.compat.v1.placeholder(tf.float32, shape=[3])
if run_name == self._RUN_WITH_LEGACY_SCALARS:
tf.compat.v1.summary.scalar(self._LEGACY_SCALAR_TAG, tf.reduce_mean(input_tensor=placeholder))
elif run_name == self._RUN_WITH_SCALARS:
summary.op(self._SCALAR_TAG, tf.reduce_sum(input_tensor=placeholder),
display_name=self._DISPLAY_NAME,
description=self._DESCRIPTION)
elif run_name == self._RUN_WITH_HISTOGRAM:
tf.compat.v1.summary.histogram(self._HISTOGRAM_TAG, placeholder)
else:
assert False, 'Invalid run name: %r' % run_name
summ = tf.compat.v1.summary.merge_all()
subdir = os.path.join(self.logdir, run_name)
with test_util.FileWriterCache.get(subdir) as writer:
writer.add_graph(sess.graph)
for step in xrange(self._STEPS):
feed_dict = {placeholder: [1 + step, 2 + step, 3 + step]}
s = sess.run(summ, feed_dict=feed_dict)
writer.add_summary(s, global_step=step)
def test_index(self):
self.set_up_with_runs([self._RUN_WITH_LEGACY_SCALARS,
self._RUN_WITH_SCALARS,
self._RUN_WITH_HISTOGRAM])
self.assertEqual({
self._RUN_WITH_LEGACY_SCALARS: {
self._LEGACY_SCALAR_TAG: {
'displayName': self._LEGACY_SCALAR_TAG,
'description': '',
},
},
self._RUN_WITH_SCALARS: {
'%s/scalar_summary' % self._SCALAR_TAG: {
'displayName': self._DISPLAY_NAME,
'description': self._HTML_DESCRIPTION,
},
},
self._RUN_WITH_HISTOGRAM: {},
}, self.plugin.index_impl())
def _test_scalars_json(self, run_name, tag_name, should_work=True):
self.set_up_with_runs([self._RUN_WITH_LEGACY_SCALARS,
self._RUN_WITH_SCALARS,
self._RUN_WITH_HISTOGRAM])
if should_work:
(data, mime_type) = self.plugin.scalars_impl(
tag_name, run_name, None, scalars_plugin.OutputFormat.JSON)
self.assertEqual('application/json', mime_type)
self.assertEqual(len(data), self._STEPS)
else:
with self.assertRaises(KeyError):
self.plugin.scalars_impl(self._SCALAR_TAG, run_name, None,
scalars_plugin.OutputFormat.JSON)
def _test_scalars_csv(self, run_name, tag_name, should_work=True):
self.set_up_with_runs([self._RUN_WITH_LEGACY_SCALARS,
self._RUN_WITH_SCALARS,
self._RUN_WITH_HISTOGRAM])
if should_work:
(data, mime_type) = self.plugin.scalars_impl(
tag_name, run_name, None, scalars_plugin.OutputFormat.CSV)
self.assertEqual('text/csv', mime_type)
s = StringIO(data)
reader = csv.reader(s)
self.assertEqual(['Wall time', 'Step', 'Value'], next(reader))
self.assertEqual(len(list(reader)), self._STEPS)
else:
with self.assertRaises(KeyError):
self.plugin.scalars_impl(self._SCALAR_TAG, run_name, None,
scalars_plugin.OutputFormat.CSV)
def test_scalars_json_with_legacy_scalars(self):
self._test_scalars_json(self._RUN_WITH_LEGACY_SCALARS,
self._LEGACY_SCALAR_TAG)
def test_scalars_json_with_scalars(self):
self._test_scalars_json(self._RUN_WITH_SCALARS,
'%s/scalar_summary' % self._SCALAR_TAG)
def test_scalars_json_with_histogram(self):
self._test_scalars_json(self._RUN_WITH_HISTOGRAM, self._HISTOGRAM_TAG,
should_work=False)
def test_scalars_csv_with_legacy_scalars(self):
self._test_scalars_csv(self._RUN_WITH_LEGACY_SCALARS,
self._LEGACY_SCALAR_TAG)
def test_scalars_csv_with_scalars(self):
self._test_scalars_csv(self._RUN_WITH_SCALARS,
'%s/scalar_summary' % self._SCALAR_TAG)
def test_scalars_csv_with_histogram(self):
self._test_scalars_csv(self._RUN_WITH_HISTOGRAM, self._HISTOGRAM_TAG,
should_work=False)
def test_active_with_legacy_scalars(self):
self.set_up_with_runs([self._RUN_WITH_LEGACY_SCALARS])
self.assertTrue(self.plugin.is_active())
def test_active_with_scalars(self):
self.set_up_with_runs([self._RUN_WITH_SCALARS])
self.assertTrue(self.plugin.is_active())
def test_active_with_histogram(self):
self.set_up_with_runs([self._RUN_WITH_HISTOGRAM])
self.assertFalse(self.plugin.is_active())
def test_active_with_all(self):
self.set_up_with_runs([self._RUN_WITH_LEGACY_SCALARS,
self._RUN_WITH_SCALARS,
self._RUN_WITH_HISTOGRAM])
self.assertTrue(self.plugin.is_active())
def test_scalars_db_without_exp(self):
self.set_up_db()
self.generate_run_to_db('exp1', self._RUN_WITH_SCALARS)
(data, mime_type) = self.plugin.scalars_impl(
self._SCALAR_TAG, self._RUN_WITH_SCALARS, None,
scalars_plugin.OutputFormat.JSON)
self.assertEqual('application/json', mime_type)
# When querying DB-based backend without an experiment id, it returns all
# scalars without an experiment id. Such scalar can only be generated using
# raw SQL queries though.
self.assertEqual(len(data), 0)
def test_scalars_db_filter_by_experiment(self):
self.set_up_db()
self.generate_run_to_db('exp1', self._RUN_WITH_SCALARS)
all_exps = self.core_plugin.list_experiments_impl()
exp1 = next((x for x in all_exps if x.get('name') == 'exp1'), {})
(data, mime_type) = self.plugin.scalars_impl(
self._SCALAR_TAG, self._RUN_WITH_SCALARS, exp1.get('id'),
scalars_plugin.OutputFormat.JSON)
self.assertEqual('application/json', mime_type)
self.assertEqual(len(data), self._STEPS)
def test_scalars_db_no_match(self):
self.set_up_db()
self.generate_run_to_db('exp1', self._RUN_WITH_SCALARS)
# experiment_id is a number but we passed a string here.
(data, mime_type) = self.plugin.scalars_impl(
self._SCALAR_TAG, self._RUN_WITH_SCALARS, 'random_exp_id',
scalars_plugin.OutputFormat.JSON)
self.assertEqual('application/json', mime_type)
self.assertEqual(len(data), 0)
if __name__ == '__main__':
tf.test.main()
|
import logging
from functools import wraps
from urllib.parse import urlencode, urlsplit, urlunsplit
from collections import (defaultdict, namedtuple)
import aiohttp
from aiohttp import web
from aiohttp.web_exceptions import HTTPUnauthorized
from aiohttp.test_utils import unused_port
_LOGGER = logging.getLogger(__name__)
class JsonError(Exception):
def __init__(self, status, error, description):
self.status = status
self.error = error
self.description = description
super.__init__("{}: {}".format(error, description))
def oauth_error_response(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except JsonError as e:
data = {
'error' : e.error,
'error_description': e.description
}
return web.json_response(data = data, status = e.status)
return wrapper
System = namedtuple('System', ['parameters', 'notifications'])
class Uplink:
def __init__(self, loop):
self.loop = loop
self.app = web.Application()
self.app.router.add_routes([
web.post('/oauth/token', self.on_oauth_token),
web.post('/oauth/authorize', self.on_oauth_authorize),
web.get('/api/v1/systems/{systemId}/notifications', self.on_notifications),
web.get('/api/v1/systems/{systemId}/parameters', self.on_get_parameters),
web.put('/api/v1/systems/{systemId}/parameters', self.on_put_parameters),
])
self.handler = None
self.server = None
self.base = None
self.redirect = None
self.systems = {}
self.requests = defaultdict(int)
self.tokens = {}
self.counter = 0
def get_counted(self, prefix):
self.counter = self.counter + 1
return '{}_{}'.format(prefix, self.counter)
def requests_update(self, fun):
_LOGGER.debug(fun)
self.requests[fun] = self.requests[fun] + 1
async def start(self):
print("Starting fake uplink")
port = unused_port()
host = '127.0.0.1'
self.handler = self.app.make_handler()
self.server = await self.loop.create_server(self.handler,
host,
port)
self.base = 'http://{}:{}'.format(host, port)
self.redirect = '{}/redirect'.format(self.base)
async def stop(self):
_LOGGER.info("Stopping fake uplink")
self.server.close()
await self.server.wait_closed()
await self.app.shutdown()
await self.handler.shutdown()
@oauth_error_response
async def on_oauth_token(self, request):
self.requests_update('on_oauth_token')
data = await request.post()
if data['grant_type'] == 'authorization_code':
token = self.get_counted('dummyaccesstoken')
self.tokens[token] = True
data = {
'access_token' : token,
'expires_in' : 300,
'refresh_token': '<PASSWORD>',
'scopes' : 'READSYSTEM',
'token_type' : 'bearer',
}
return web.json_response(data = data)
elif data['grant_type'] == 'refresh_token':
if data['refresh_token'] == 'dummy<PASSWORD>ken':
token = self.get_counted('dummyaccesstoken')
self.tokens[token] = True
data = {
'access_token' : token,
'expires_in' : 300,
'refresh_token': '<PASSWORD>',
'scopes' : 'READSYSTEM',
'token_type' : 'bearer',
}
return web.json_response(data = data)
else:
raise Exception("unexpected refresh token")
else:
raise JsonError(400, "invalid_request", 'unknown grant_type: {}'.format(data['grant_type']))
@oauth_error_response
async def on_oauth_authorize(self, request):
self.requests_update('on_oauth_authorize')
await request.post()
query = request.query
_LOGGER.info(query)
assert 'redirect_uri' in query
assert 'response_type' in query
assert 'scope' in query
assert 'state' in query
url = list(urlsplit(query['redirect_uri']))
url[3] = urlencode({
'state': query['state'],
'code' : 'dummycode',
})
raise aiohttp.web.HTTPFound(urlunsplit(url))
def expire_tokens(self):
for t in self.tokens:
self.tokens[t] = False
def add_system(self, systemid):
self.systems[systemid] = System({}, {})
def add_parameter(self, systemid, parameter):
self.systems[systemid].parameters[parameter['name']] = parameter
def add_notification(self, systemid, notification):
self.systems[systemid].notifications[notification['notificationId']] = notification
async def check_auth(self, request):
auth = request.headers.get('AUTHORIZATION')
if not auth.startswith('Bearer '):
raise HTTPUnauthorized()
token = auth[7:]
if token not in self.tokens:
raise HTTPUnauthorized()
if not self.tokens[token]:
raise HTTPUnauthorized()
async def on_notifications(self, request):
self.requests_update('on_notifications')
await self.check_auth(request)
systemid = int(request.match_info['systemId'])
notifications = self.systems[systemid].notifications
return web.json_response({
"page": 1,
"itemsPerPage": 2,
"numItems": len(notifications),
"objects": list(notifications.values())
})
async def on_get_parameters(self, request):
self.requests_update('on_get_parameters')
await self.check_auth(request)
systemid = int(request.match_info['systemId'])
parameters = request.query.getall('parameterIds')
return web.json_response(
[self.systems[systemid].parameters[str(p)] for p in parameters]
)
async def on_put_parameters(self, request):
self.requests_update('on_put_parameters')
await self.check_auth(request)
systemid = int(request.match_info['systemId'])
data = await request.json()
response = []
for key in data['settings'].keys():
response.append({
'status' : 'DONE',
'parameter': self.systems[systemid].parameters[str(key)]
})
return web.json_response(response)
|
import os
import pickle
import tarfile
from functools import partial
from abc import abstractmethod, ABCMeta
from cakechat.utils.logger import get_logger, WithLogger
_logger = get_logger(__name__)
DEFAULT_CSV_DELIMITER = ','
class AbstractFileResolver(object, metaclass=ABCMeta):
def __init__(self, file_path):
self._file_path = file_path
@property
def file_path(self):
return self._file_path
def resolve(self):
"""
:return: True if file can be resolved, False otherwise
"""
if os.path.exists(self._file_path):
return True
return self._resolve()
@abstractmethod
def _resolve(self):
"""
Performs some actions if file does not exist locally. Should be defined in subclasses
:return: True if file can be resolved, False otherwise
"""
pass
class DummyFileResolver(AbstractFileResolver):
"""
Does nothing if file does not exist locally
"""
def _resolve(self):
return False
class PackageResolver(WithLogger):
def __init__(self, package_path, package_file_resolver_factory, package_file_ext, package_extractor):
"""
:param package_path:
:param package_file_resolver_factory: a factory creating package file resolver
:param package_file_ext: package file extension
:param package_extractor: a function taking package file, package path, and extracting contents to that path
:return:
"""
WithLogger.__init__(self)
self._package_path = package_path
self._package_file_resolver_factory = package_file_resolver_factory
self._package_file_ext = package_file_ext
self._package_extractor = package_extractor
@staticmethod
def init_resolver(**kwargs):
"""
Method helping to set once some parameters like package_file_resolver and package_extractor
:param kwargs:
:return: partially initialized class object
"""
return partial(PackageResolver, **kwargs)
def resolve(self):
if os.path.exists(self._package_path):
return True
package_file_path = '{}.{}'.format(self._package_path, self._package_file_ext)
package_file_resolver = self._package_file_resolver_factory(package_file_path)
if package_file_resolver.resolve():
self._logger.info('Extracting package {}'.format(package_file_resolver.file_path))
self._package_extractor(package_file_resolver.file_path, self._package_path)
return True
else:
return False
def load_file(file_path, filter_empty_lines=True):
with open(file_path, 'r', encoding='utf-8') as fh:
lines = [line.strip() for line in fh.readlines()]
if filter_empty_lines:
lines = list(filter(None, lines))
return lines
def ensure_dir(dir_name):
if dir_name and not os.path.exists(dir_name):
os.makedirs(dir_name)
def serialize(filename, data, protocol=pickle.HIGHEST_PROTOCOL):
ensure_dir(os.path.dirname(filename))
with open(filename, 'wb') as f:
pickle.dump(data, f, protocol)
def deserialize(filename):
with open(filename, 'rb') as f:
item = pickle.load(f)
return item
def get_persisted(factory, persisted_file_name, **kwargs):
"""
Loads cache if exists, otherwise calls factory and stores the results in the specified cache file.
**kwargs are passed to the serialize() function
:param factory:
:param persisted_file_name:
:return:
"""
if os.path.exists(persisted_file_name):
_logger.info('Loading {}'.format(persisted_file_name))
cached = deserialize(persisted_file_name)
return cached
_logger.info('Creating {}'.format(persisted_file_name))
data = factory()
serialize(persisted_file_name, data, **kwargs)
return data
def is_non_empty_file(file_path):
return os.path.isfile(file_path) and os.stat(file_path).st_size != 0
class FileNotFoundException(Exception):
pass
def extract_tar(source_path, destination_path, compression_type='gz'):
"""
:param source_path:
:param destination_path:
:param compression_type: None, gz or bzip2
:return:
"""
mode = 'r:{}'.format(compression_type if compression_type else 'r')
with tarfile.open(source_path, mode) as fh:
fh.extractall(path=destination_path)
def ensure_file(file_name, mode, encoding=None):
ensure_dir(os.path.dirname(file_name))
return open(file_name, mode, encoding=encoding)
def get_cached(factory, cache_file_name, **kwargs):
"""
Loads cache if exists, otherwise calls factory and stores the results in the specified cache file.
**kwargs are passed to the serialize() function
:param factory:
:param cache_file_name:
:return:
"""
if os.path.exists(cache_file_name):
_logger.info('Loading {}'.format(cache_file_name))
cached = deserialize(cache_file_name)
return cached
_logger.info('Creating {}'.format(cache_file_name))
data = factory()
serialize(cache_file_name, data, **kwargs)
return data
|
# Copyright 2019 Nexenta Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for OpenStack Cinder volume driver
"""
import uuid
import mock
from oslo_utils import units
from cinder import context
from cinder import db
from cinder import test
from cinder.tests.unit.consistencygroup.fake_cgsnapshot import (
fake_cgsnapshot_obj as fake_cgsnapshot)
from cinder.tests.unit.consistencygroup.fake_consistencygroup import (
fake_consistencyobject_obj as fake_cgroup)
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit.fake_snapshot import fake_snapshot_obj as fake_snapshot
from cinder.tests.unit.fake_volume import fake_volume_obj as fake_volume
from cinder.volume import configuration as conf
from cinder.volume.drivers.nexenta.ns5 import iscsi
from cinder.volume.drivers.nexenta.ns5 import jsonrpc
class TestNexentaISCSIDriver(test.TestCase):
def setUp(self):
super(TestNexentaISCSIDriver, self).setUp()
self.ctxt = context.get_admin_context()
self.cfg = mock.Mock(spec=conf.Configuration)
self.cfg.volume_backend_name = 'nexenta_iscsi'
self.cfg.nexenta_group_snapshot_template = 'group-snapshot-%s'
self.cfg.nexenta_origin_snapshot_template = 'origin-snapshot-%s'
self.cfg.nexenta_dataset_description = ''
self.cfg.nexenta_host = '1.1.1.1'
self.cfg.nexenta_user = 'admin'
self.cfg.nexenta_password = '<PASSWORD>'
self.cfg.nexenta_volume = 'cinder'
self.cfg.nexenta_rest_port = 8443
self.cfg.nexenta_use_https = False
self.cfg.nexenta_iscsi_target_portal_port = 3260
self.cfg.nexenta_target_prefix = 'iqn:cinder'
self.cfg.nexenta_target_group_prefix = 'cinder'
self.cfg.nexenta_ns5_blocksize = 32
self.cfg.nexenta_sparse = True
self.cfg.nexenta_lu_writebackcache_disabled = True
self.cfg.nexenta_dataset_compression = 'on'
self.cfg.nexenta_dataset_dedup = 'off'
self.cfg.reserved_percentage = 20
self.cfg.nexenta_host_group_prefix = 'hg'
self.cfg.nexenta_volume = 'pool'
self.cfg.driver_ssl_cert_verify = False
self.cfg.nexenta_luns_per_target = 20
self.cfg.driver_ssl_cert_verify = False
self.cfg.nexenta_iscsi_target_portals = '1.1.1.1:3260,2.2.2.2:3260'
self.cfg.nexenta_iscsi_target_host_group = 'all'
self.cfg.nexenta_rest_address = '1.1.1.1'
self.cfg.nexenta_rest_backoff_factor = 1
self.cfg.nexenta_rest_retry_count = 3
self.cfg.nexenta_rest_connect_timeout = 1
self.cfg.nexenta_rest_read_timeout = 1
self.cfg.nexenta_volume_group = 'vg'
self.cfg.safe_get = self.fake_safe_get
self.nef_mock = mock.Mock()
self.mock_object(jsonrpc, 'NefRequest',
return_value=self.nef_mock)
self.drv = iscsi.NexentaISCSIDriver(
configuration=self.cfg)
self.drv.db = db
self.drv.do_setup(self.ctxt)
def fake_safe_get(self, key):
try:
value = getattr(self.cfg, key)
except AttributeError:
value = None
return value
def fake_uuid4():
return uuid.UUID('38d18a48-b791-4046-b523-a84aad966310')
def test_do_setup(self):
self.assertIsNone(self.drv.do_setup(self.ctxt))
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefServices.get')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefVolumeGroups.create')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefVolumeGroups.get')
def test_check_for_setup_error(self, volume_group_get,
volume_group_create,
service_get):
path = self.drv.root_path
bs = self.cfg.nexenta_ns5_blocksize * units.Ki
name = 'iscsit'
state = 'online'
volume_group_get.return_value = {'path': path}
service_get.return_value = {'name': name, 'state': state}
self.assertIsNone(self.drv.check_for_setup_error())
volume_group_get.assert_called_with(path)
service_get.assert_called_with(name)
volume_group_get.side_effect = jsonrpc.NefException({
'message': 'Failed to open dataset',
'code': 'ENOENT'
})
volume_group_create.return_value = {}
self.assertIsNone(self.drv.check_for_setup_error())
volume_group_get.assert_called_with(path)
payload = {'path': path, 'volumeBlockSize': bs}
volume_group_create.assert_called_with(payload)
service_get.assert_called_with(name)
state = 'offline'
volume_group_get.return_value = {'path': path}
service_get.return_value = {'name': name, 'state': state}
self.assertRaises(jsonrpc.NefException,
self.drv.check_for_setup_error)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefVolumes.create')
def test_create_volume(self, create_volume):
volume = fake_volume(self.ctxt)
self.assertIsNone(self.drv.create_volume(volume))
path = self.drv._get_volume_path(volume)
size = volume['size'] * units.Gi
bs = self.cfg.nexenta_ns5_blocksize * units.Ki
payload = {
'path': path,
'volumeSize': size,
'volumeBlockSize': bs,
'compressionMode': self.cfg.nexenta_dataset_compression,
'sparseVolume': self.cfg.nexenta_sparse
}
create_volume.assert_called_with(payload)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefVolumes.delete')
def test_delete_volume(self, delete_volume):
volume = fake_volume(self.ctxt)
self.assertIsNone(self.drv.delete_volume(volume))
path = self.drv._get_volume_path(volume)
payload = {'snapshots': True}
delete_volume.assert_called_with(path, payload)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefVolumes.set')
def test_extend_volume(self, extend_volume):
volume = fake_volume(self.ctxt)
size = volume['size'] * 2
self.assertIsNone(self.drv.extend_volume(volume, size))
path = self.drv._get_volume_path(volume)
size = size * units.Gi
payload = {'volumeSize': size}
extend_volume.assert_called_with(path, payload)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefSnapshots.delete')
def test_delete_snapshot(self, delete_snapshot):
volume = fake_volume(self.ctxt)
snapshot = fake_snapshot(self.ctxt)
snapshot.volume = volume
delete_snapshot.return_value = {}
self.assertIsNone(self.drv.delete_snapshot(snapshot))
path = self.drv._get_snapshot_path(snapshot)
payload = {'defer': True}
delete_snapshot.assert_called_with(path, payload)
def test_snapshot_revert_use_temp_snapshot(self):
result = self.drv.snapshot_revert_use_temp_snapshot()
expected = False
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefVolumes.rollback')
def test_revert_to_snapshot(self, rollback_volume):
volume = fake_volume(self.ctxt)
snapshot = fake_snapshot(self.ctxt)
snapshot.volume = volume
rollback_volume.return_value = {}
self.assertIsNone(
self.drv.revert_to_snapshot(self.ctxt, volume, snapshot)
)
path = self.drv._get_volume_path(volume)
payload = {'snapshot': snapshot['name']}
rollback_volume.assert_called_with(path, payload)
@mock.patch('cinder.volume.drivers.nexenta.ns5.iscsi.'
'NexentaISCSIDriver.delete_snapshot')
@mock.patch('cinder.volume.drivers.nexenta.ns5.iscsi.'
'NexentaISCSIDriver.create_volume_from_snapshot')
@mock.patch('cinder.volume.drivers.nexenta.ns5.iscsi.'
'NexentaISCSIDriver.create_snapshot')
def test_create_cloned_volume(self, create_snapshot, create_volume,
delete_snapshot):
volume = fake_volume(self.ctxt)
clone_spec = {'id': fake.VOLUME2_ID}
clone = fake_volume(self.ctxt, **clone_spec)
create_snapshot.return_value = {}
create_volume.return_value = {}
delete_snapshot.return_value = {}
self.assertIsNone(self.drv.create_cloned_volume(clone, volume))
snapshot = {
'name': self.drv.origin_snapshot_template % clone['id'],
'volume_id': volume['id'],
'volume_name': volume['name'],
'volume_size': volume['size']
}
create_snapshot.assert_called_with(snapshot)
create_volume.assert_called_with(clone, snapshot)
create_volume.side_effect = jsonrpc.NefException({
'message': 'Failed to create volume',
'code': 'EBUSY'
})
self.assertRaises(jsonrpc.NefException,
self.drv.create_cloned_volume,
clone, volume)
create_snapshot.side_effect = jsonrpc.NefException({
'message': 'Failed to open dataset',
'code': 'ENOENT'
})
self.assertRaises(jsonrpc.NefException,
self.drv.create_cloned_volume,
clone, volume)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefSnapshots.create')
def test_create_snapshot(self, create_snapshot):
volume = fake_volume(self.ctxt)
snapshot = fake_snapshot(self.ctxt)
snapshot.volume = volume
create_snapshot.return_value = {}
self.assertIsNone(self.drv.create_snapshot(snapshot))
path = self.drv._get_snapshot_path(snapshot)
payload = {'path': path}
create_snapshot.assert_called_with(payload)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'iscsi.NexentaISCSIDriver.extend_volume')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefSnapshots.clone')
def test_create_volume_from_snapshot(self, clone_snapshot,
extend_volume):
volume = fake_volume(self.ctxt)
snapshot = fake_snapshot(self.ctxt)
snapshot.volume = volume
clone_size = 10
clone_spec = {
'id': fake.VOLUME2_ID,
'size': clone_size
}
clone = fake_volume(self.ctxt, **clone_spec)
snapshot_path = self.drv._get_snapshot_path(snapshot)
clone_path = self.drv._get_volume_path(clone)
clone_snapshot.return_value = {}
extend_volume.return_value = None
self.assertIsNone(
self.drv.create_volume_from_snapshot(clone, snapshot)
)
clone_payload = {'targetPath': clone_path}
clone_snapshot.assert_called_with(snapshot_path, clone_payload)
extend_volume.assert_called_with(clone, clone_size)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefLunMappings.list')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'iscsi.NexentaISCSIDriver._create_target_group')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'iscsi.NexentaISCSIDriver._create_target')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'iscsi.NexentaISCSIDriver._target_group_props')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'iscsi.NexentaISCSIDriver._get_host_portals')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'iscsi.NexentaISCSIDriver._get_host_group')
@mock.patch('uuid.uuid4', fake_uuid4)
def test_initialize_connection(self, get_host_group, get_host_portals,
get_target_group_props, create_target,
create_target_group, list_mappings):
volume = fake_volume(self.ctxt)
host_iqn = 'iqn:cinder-client'
target_iqn = 'iqn:cinder-target'
connector = {'initiator': host_iqn, 'multipath': True}
host_group = 'cinder-host-group'
target_group = 'cinder-target-group'
target_portals = self.cfg.nexenta_iscsi_target_portals.split(',')
get_host_group.return_value = host_group
get_host_portals.return_value = {
target_iqn: target_portals
}
list_mappings.return_value = [{
'id': '309F9B9013CF627A00000000',
'lun': 0,
'hostGroup': host_group,
'targetGroup': target_group
}]
get_target_group_props.return_value = {
target_iqn: target_portals
}
create_target.return_value = {}
create_target_group.return_value = {}
result = self.drv.initialize_connection(volume, connector)
expected = {
'driver_volume_type': 'iscsi',
'data': {
'target_discovered': False,
'encrypted': False,
'qos_specs': None,
'target_luns': [0] * len(target_portals),
'access_mode': 'rw',
'volume_id': volume['id'],
'target_portals': target_portals,
'target_iqns': [target_iqn] * len(target_portals)
}
}
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefLunMappings.delete')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefLunMappings.list')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'iscsi.NexentaISCSIDriver._get_host_group')
def test_terminate_connection(self, get_host_group,
list_mappings, delete_mapping):
volume = fake_volume(self.ctxt)
host_group = 'cinder-host-group'
target_group = 'cinder-target-group'
connector = {'initiator': 'iqn:test'}
get_host_group.return_value = host_group
list_mappings.return_value = [{
'id': '309F9B9013CF627A00000000',
'lun': 0,
'hostGroup': host_group,
'targetGroup': target_group
}]
delete_mapping.return_value = {}
expected = {'driver_volume_type': 'iscsi', 'data': {}}
result = self.drv.terminate_connection(volume, connector)
self.assertEqual(expected, result)
def test_create_export(self):
volume = fake_volume(self.ctxt)
connector = {'initiator': 'iqn:test'}
self.assertIsNone(
self.drv.create_export(self.ctxt, volume, connector)
)
def test_ensure_export(self):
volume = fake_volume(self.ctxt)
self.assertIsNone(
self.drv.ensure_export(self.ctxt, volume)
)
def test_remove_export(self):
volume = fake_volume(self.ctxt)
self.assertIsNone(
self.drv.remove_export(self.ctxt, volume)
)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefVolumeGroups.get')
def test_get_volume_stats(self, get_volume_group):
available = 100
used = 75
get_volume_group.return_value = {
'bytesAvailable': available * units.Gi,
'bytesUsed': used * units.Gi
}
result = self.drv.get_volume_stats(True)
payload = {'fields': 'bytesAvailable,bytesUsed'}
get_volume_group.assert_called_with(self.drv.root_path, payload)
self.assertEqual(self.drv._stats, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefVolumeGroups.get')
def test_update_volume_stats(self, get_volume_group):
available = 8
used = 2
get_volume_group.return_value = {
'bytesAvailable': available * units.Gi,
'bytesUsed': used * units.Gi
}
location_info = '%(driver)s:%(host)s:%(pool)s/%(group)s' % {
'driver': self.drv.__class__.__name__,
'host': self.cfg.nexenta_host,
'pool': self.cfg.nexenta_volume,
'group': self.cfg.nexenta_volume_group,
}
expected = {
'vendor_name': 'Nexenta',
'dedup': self.cfg.nexenta_dataset_dedup,
'compression': self.cfg.nexenta_dataset_compression,
'description': self.cfg.nexenta_dataset_description,
'driver_version': self.drv.VERSION,
'storage_protocol': 'iSCSI',
'sparsed_volumes': self.cfg.nexenta_sparse,
'total_capacity_gb': used + available,
'free_capacity_gb': available,
'reserved_percentage': self.cfg.reserved_percentage,
'QoS_support': False,
'multiattach': True,
'consistencygroup_support': True,
'consistent_group_snapshot_enabled': True,
'volume_backend_name': self.cfg.volume_backend_name,
'location_info': location_info,
'iscsi_target_portal_port': (
self.cfg.nexenta_iscsi_target_portal_port),
'nef_url': self.cfg.nexenta_rest_address,
'nef_port': self.cfg.nexenta_rest_port
}
self.assertIsNone(self.drv._update_volume_stats())
self.assertEqual(expected, self.drv._stats)
def test__get_volume_path(self):
volume = fake_volume(self.ctxt)
result = self.drv._get_volume_path(volume)
expected = '%s/%s/%s' % (self.cfg.nexenta_volume,
self.cfg.nexenta_volume_group,
volume['name'])
self.assertEqual(expected, result)
def test__get_snapshot_path(self):
volume = fake_volume(self.ctxt)
snapshot = fake_snapshot(self.ctxt)
snapshot.volume = volume
result = self.drv._get_snapshot_path(snapshot)
expected = '%s/%s/%s@%s' % (self.cfg.nexenta_volume,
self.cfg.nexenta_volume_group,
snapshot['volume_name'],
snapshot['name'])
self.assertEqual(expected, result)
def test__get_target_group_name(self):
target_iqn = '%s-test' % self.cfg.nexenta_target_prefix
result = self.drv._get_target_group_name(target_iqn)
expected = '%s-test' % self.cfg.nexenta_target_group_prefix
self.assertEqual(expected, result)
def test__get_target_name(self):
target_group = '%s-test' % self.cfg.nexenta_target_group_prefix
result = self.drv._get_target_name(target_group)
expected = '%s-test' % self.cfg.nexenta_target_prefix
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefNetAddresses.list')
def test__get_host_addresses(self, list_addresses):
expected = ['1.1.1.1', '2.2.2.2', '3.3.3.3']
return_value = []
for address in expected:
return_value.append({
'addressType': 'static',
'address': '%s/24' % address
})
list_addresses.return_value = return_value
result = self.drv._get_host_addresses()
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'iscsi.NexentaISCSIDriver._get_host_addresses')
def test__get_host_portals(self, list_addresses):
list_addresses.return_value = ['1.1.1.1', '2.2.2.2', '3.3.3.3']
expected = ['1.1.1.1:3260', '2.2.2.2:3260']
result = self.drv._get_host_portals()
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefTargets.list')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefTargetsGroups.list')
def test__target_group_props(self, list_target_groups, list_targets):
host_portals = ['1.1.1.1:3260', '2.2.2.2:3260']
target_group = 'cinder-test'
list_target_groups.return_value = [{
'name': target_group,
'members': [
'iqn:cinder-test'
]
}]
list_targets.return_value = [{
'name': 'iqn:cinder-test',
'portals': [
{
'address': '1.1.1.1',
'port': 3260
},
{
'address': '2.2.2.2',
'port': 3260
}
]
}]
expected = {'iqn:cinder-test': host_portals}
result = self.drv._target_group_props(target_group, host_portals)
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefTargetsGroups.create')
def test__create_target_group(self, create_target_group):
name = 'name'
members = ['a', 'b', 'c']
create_target_group.return_value = {}
self.assertIsNone(self.drv._create_target_group(name, members))
payload = {'name': name, 'members': members}
create_target_group.assert_called_with(payload)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefTargetsGroups.set')
def test__update_target_group(self, update_target_group):
name = 'name'
members = ['a', 'b', 'c']
update_target_group.return_value = {}
self.assertIsNone(self.drv._update_target_group(name, members))
payload = {'members': members}
update_target_group.assert_called_with(name, payload)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefLunMappings.delete')
def test__delete_lun_mapping(self, delete_mapping):
name = 'name'
delete_mapping.return_value = {}
self.assertIsNone(self.drv._delete_lun_mapping(name))
delete_mapping.assert_called_with(name)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefTargets.create')
def test__create_target(self, create_target):
name = 'name'
portals = ['1.1.1.1:3260', '2.2.2.2:3260']
create_target.return_value = {}
self.assertIsNone(self.drv._create_target(name, portals))
payload = {
'name': name,
'portals': [
{
'address': '1.1.1.1',
'port': 3260
},
{
'address': '2.2.2.2',
'port': 3260
}
]
}
create_target.assert_called_with(payload)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefHostGroups.list')
def test__get_host_group(self, get_hostgroup):
member = 'member1'
get_hostgroup.return_value = [
{
'name': 'name1',
'members': [
'member1',
'member2',
'member3'
]
},
{
'name': 'name2',
'members': [
'member4',
'member5',
'member6'
]
}
]
expected = 'name1'
result = self.drv._get_host_group(member)
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefHostGroups.create')
def test__create_host_group(self, create_host_group):
name = 'name'
members = ['a', 'b', 'c']
create_host_group.return_value = {}
self.assertIsNone(self.drv._create_host_group(name, members))
payload = {'name': name, 'members': members}
create_host_group.assert_called_with(payload)
def test__s2d(self):
portals = ['1.1.1.1:3260', '2.2.2.2:3260']
expected = [
{
'address': '1.1.1.1',
'port': 3260
},
{
'address': '2.2.2.2',
'port': 3260
}
]
result = self.drv._s2d(portals)
self.assertEqual(expected, result)
def test__d2s(self):
portals = [
{
'address': '1.1.1.1',
'port': 3260
},
{
'address': '2.2.2.2',
'port': 3260
}
]
expected = ['1.1.1.1:3260', '2.2.2.2:3260']
result = self.drv._d2s(portals)
self.assertEqual(expected, result)
def test_create_consistencygroup(self):
cgroup = fake_cgroup(self.ctxt)
result = self.drv.create_consistencygroup(self.ctxt, cgroup)
expected = {}
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'iscsi.NexentaISCSIDriver.delete_volume')
def test_delete_consistencygroup(self, delete_volume):
cgroup = fake_cgroup(self.ctxt)
volume1 = fake_volume(self.ctxt)
volume2_spec = {'id': fake.VOLUME2_ID}
volume2 = fake_volume(self.ctxt, **volume2_spec)
volumes = [volume1, volume2]
delete_volume.return_value = {}
result = self.drv.delete_consistencygroup(self.ctxt,
cgroup,
volumes)
expected = ({}, [])
self.assertEqual(expected, result)
def test_update_consistencygroup(self):
cgroup = fake_cgroup(self.ctxt)
volume1 = fake_volume(self.ctxt)
volume2_spec = {'id': fake.VOLUME2_ID}
volume2 = fake_volume(self.ctxt, **volume2_spec)
volume3_spec = {'id': fake.VOLUME3_ID}
volume3 = fake_volume(self.ctxt, **volume3_spec)
volume4_spec = {'id': fake.VOLUME4_ID}
volume4 = fake_volume(self.ctxt, **volume4_spec)
add_volumes = [volume1, volume2]
remove_volumes = [volume3, volume4]
result = self.drv.update_consistencygroup(self.ctxt,
cgroup,
add_volumes,
remove_volumes)
expected = ({}, [], [])
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefSnapshots.delete')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefSnapshots.rename')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefSnapshots.create')
def test_create_cgsnapshot(self, create_snapshot,
rename_snapshot,
delete_snapshot):
cgsnapshot = fake_cgsnapshot(self.ctxt)
volume = fake_volume(self.ctxt)
snapshot = fake_snapshot(self.ctxt)
snapshot.volume = volume
snapshots = [snapshot]
cgsnapshot_name = (
self.cfg.nexenta_group_snapshot_template % cgsnapshot['id'])
cgsnapshot_path = '%s@%s' % (self.drv.root_path, cgsnapshot_name)
snapshot_path = '%s/%s@%s' % (self.drv.root_path,
snapshot['volume_name'],
cgsnapshot_name)
create_snapshot.return_value = {}
rename_snapshot.return_value = {}
delete_snapshot.return_value = {}
result = self.drv.create_cgsnapshot(self.ctxt,
cgsnapshot,
snapshots)
create_payload = {'path': cgsnapshot_path, 'recursive': True}
create_snapshot.assert_called_with(create_payload)
rename_payload = {'newName': snapshot['name']}
rename_snapshot.assert_called_with(snapshot_path, rename_payload)
delete_payload = {'defer': True, 'recursive': True}
delete_snapshot.assert_called_with(cgsnapshot_path, delete_payload)
expected = ({}, [])
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'iscsi.NexentaISCSIDriver.delete_snapshot')
def test_delete_cgsnapshot(self, delete_snapshot):
cgsnapshot = fake_cgsnapshot(self.ctxt)
volume = fake_volume(self.ctxt)
snapshot = fake_snapshot(self.ctxt)
snapshot.volume = volume
snapshots = [snapshot]
delete_snapshot.return_value = {}
result = self.drv.delete_cgsnapshot(self.ctxt,
cgsnapshot,
snapshots)
delete_snapshot.assert_called_with(snapshot)
expected = ({}, [])
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'iscsi.NexentaISCSIDriver.create_volume_from_snapshot')
def test_create_consistencygroup_from_src_snapshots(self, create_volume):
cgroup = fake_cgroup(self.ctxt)
cgsnapshot = fake_cgsnapshot(self.ctxt)
volume = fake_volume(self.ctxt)
snapshot = fake_snapshot(self.ctxt)
snapshot.volume = volume
snapshots = [snapshot]
clone_spec = {'id': fake.VOLUME2_ID}
clone = fake_volume(self.ctxt, **clone_spec)
clones = [clone]
create_volume.return_value = {}
result = self.drv.create_consistencygroup_from_src(self.ctxt, cgroup,
clones, cgsnapshot,
snapshots, None,
None)
create_volume.assert_called_with(clone, snapshot)
expected = ({}, [])
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefSnapshots.delete')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'iscsi.NexentaISCSIDriver.create_volume_from_snapshot')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefSnapshots.create')
def test_create_consistencygroup_from_src_volumes(self,
create_snapshot,
create_volume,
delete_snapshot):
src_cgroup = fake_cgroup(self.ctxt)
dst_cgroup_spec = {'id': fake.CONSISTENCY_GROUP2_ID}
dst_cgroup = fake_cgroup(self.ctxt, **dst_cgroup_spec)
src_volume = fake_volume(self.ctxt)
src_volumes = [src_volume]
dst_volume_spec = {'id': fake.VOLUME2_ID}
dst_volume = fake_volume(self.ctxt, **dst_volume_spec)
dst_volumes = [dst_volume]
create_snapshot.return_value = {}
create_volume.return_value = {}
delete_snapshot.return_value = {}
result = self.drv.create_consistencygroup_from_src(self.ctxt,
dst_cgroup,
dst_volumes,
None, None,
src_cgroup,
src_volumes)
snapshot_name = (
self.cfg.nexenta_origin_snapshot_template % dst_cgroup['id'])
snapshot_path = '%s@%s' % (self.drv.root_path, snapshot_name)
create_payload = {'path': snapshot_path, 'recursive': True}
create_snapshot.assert_called_with(create_payload)
snapshot = {
'name': snapshot_name,
'volume_id': src_volume['id'],
'volume_name': src_volume['name'],
'volume_size': src_volume['size']
}
create_volume.assert_called_with(dst_volume, snapshot)
delete_payload = {'defer': True, 'recursive': True}
delete_snapshot.assert_called_with(snapshot_path, delete_payload)
expected = ({}, [])
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefVolumes.list')
def test__get_existing_volume(self, list_volumes):
volume = fake_volume(self.ctxt)
parent = self.drv.root_path
name = volume['name']
size = volume['size']
path = self.drv._get_volume_path(volume)
list_volumes.return_value = [{
'name': name,
'path': path,
'volumeSize': size * units.Gi
}]
result = self.drv._get_existing_volume({'source-name': name})
payload = {
'parent': parent,
'fields': 'name,path,volumeSize',
'name': name
}
list_volumes.assert_called_with(payload)
expected = {
'name': name,
'path': path,
'size': size
}
self.assertEqual(expected, result)
def test__check_already_managed_snapshot(self):
volume = fake_volume(self.ctxt)
snapshot = fake_snapshot(self.ctxt)
snapshot.volume = volume
result = self.drv._check_already_managed_snapshot(snapshot)
expected = False
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefSnapshots.list')
def test__get_existing_snapshot(self, list_snapshots):
volume = fake_volume(self.ctxt)
snapshot = fake_snapshot(self.ctxt)
snapshot.volume = volume
name = snapshot['name']
path = self.drv._get_snapshot_path(snapshot)
parent = self.drv._get_volume_path(volume)
list_snapshots.return_value = [{
'name': name,
'path': path
}]
payload = {'source-name': name}
result = self.drv._get_existing_snapshot(snapshot, payload)
payload = {
'parent': parent,
'fields': 'name,path',
'recursive': False,
'name': name
}
list_snapshots.assert_called_with(payload)
expected = {
'name': name,
'path': path,
'volume_name': volume['name'],
'volume_size': volume['size']
}
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefVolumes.rename')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefLunMappings.list')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'iscsi.NexentaISCSIDriver._get_existing_volume')
def test_manage_existing(self, get_existing_volume,
list_mappings, rename_volume):
existing_volume = fake_volume(self.ctxt)
manage_volume_spec = {'id': fake.VOLUME2_ID}
manage_volume = fake_volume(self.ctxt, **manage_volume_spec)
existing_name = existing_volume['name']
existing_path = self.drv._get_volume_path(existing_volume)
existing_size = existing_volume['size']
manage_path = self.drv._get_volume_path(manage_volume)
get_existing_volume.return_value = {
'name': existing_name,
'path': existing_path,
'size': existing_size
}
list_mappings.return_value = []
payload = {'source-name': existing_name}
self.assertIsNone(self.drv.manage_existing(manage_volume, payload))
get_existing_volume.assert_called_with(payload)
payload = {'volume': existing_path}
list_mappings.assert_called_with(payload)
payload = {'newPath': manage_path}
rename_volume.assert_called_with(existing_path, payload)
@mock.patch('cinder.volume.drivers.nexenta.ns5.iscsi.'
'NexentaISCSIDriver._get_existing_volume')
def test_manage_existing_get_size(self, get_volume):
volume = fake_volume(self.ctxt)
name = volume['name']
size = volume['size']
path = self.drv._get_volume_path(volume)
get_volume.return_value = {
'name': name,
'path': path,
'size': size
}
payload = {'source-name': name}
result = self.drv.manage_existing_get_size(volume, payload)
expected = size
self.assertEqual(expected, result)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefVolumes.list')
def test_get_manageable_volumes(self, list_volumes):
volume = fake_volume(self.ctxt)
volumes = [volume]
name = volume['name']
size = volume['size']
path = self.drv._get_volume_path(volume)
guid = 12345
parent = self.drv.root_path
list_volumes.return_value = [{
'name': name,
'path': path,
'guid': guid,
'volumeSize': size * units.Gi
}]
result = self.drv.get_manageable_volumes(volumes, None, 1,
0, 'size', 'asc')
payload = {
'parent': parent,
'fields': 'name,guid,path,volumeSize',
'recursive': False
}
list_volumes.assert_called_with(payload)
expected = [{
'cinder_id': volume['id'],
'extra_info': None,
'reason_not_safe': 'Volume already managed',
'reference': {
'source-guid': guid,
'source-name': volume['name']
},
'safe_to_manage': False,
'size': volume['size']
}]
self.assertEqual(expected, result)
def test_unmanage(self):
volume = fake_volume(self.ctxt)
self.assertIsNone(self.drv.unmanage(volume))
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefSnapshots.rename')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'iscsi.NexentaISCSIDriver._get_existing_snapshot')
def test_manage_existing_snapshot(self, get_existing_snapshot,
rename_snapshot):
volume = fake_volume(self.ctxt)
existing_snapshot = fake_snapshot(self.ctxt)
existing_snapshot.volume = volume
manage_snapshot_spec = {'id': fake.SNAPSHOT2_ID}
manage_snapshot = fake_snapshot(self.ctxt, **manage_snapshot_spec)
manage_snapshot.volume = volume
existing_name = existing_snapshot['name']
manage_name = manage_snapshot['name']
volume_name = volume['name']
volume_size = volume['size']
existing_path = self.drv._get_snapshot_path(existing_snapshot)
get_existing_snapshot.return_value = {
'name': existing_name,
'path': existing_path,
'volume_name': volume_name,
'volume_size': volume_size
}
rename_snapshot.return_value = {}
payload = {'source-name': existing_name}
self.assertIsNone(
self.drv.manage_existing_snapshot(manage_snapshot, payload)
)
get_existing_snapshot.assert_called_with(manage_snapshot, payload)
payload = {'newName': manage_name}
rename_snapshot.assert_called_with(existing_path, payload)
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'iscsi.NexentaISCSIDriver._get_existing_snapshot')
def test_manage_existing_snapshot_get_size(self, get_snapshot):
volume = fake_volume(self.ctxt)
snapshot = fake_snapshot(self.ctxt)
snapshot.volume = volume
snapshot_name = snapshot['name']
volume_name = volume['name']
volume_size = volume['size']
snapshot_path = self.drv._get_snapshot_path(snapshot)
get_snapshot.return_value = {
'name': snapshot_name,
'path': snapshot_path,
'volume_name': volume_name,
'volume_size': volume_size
}
payload = {'source-name': snapshot_name}
result = self.drv.manage_existing_snapshot_get_size(volume, payload)
expected = volume['size']
self.assertEqual(expected, result)
@mock.patch('cinder.objects.VolumeList.get_all_by_host')
@mock.patch('cinder.volume.drivers.nexenta.ns5.'
'jsonrpc.NefSnapshots.list')
def test_get_manageable_snapshots(self, list_snapshots, list_volumes):
volume = fake_volume(self.ctxt)
volumes = [volume]
snapshot = fake_snapshot(self.ctxt)
snapshot.volume = volume
snapshots = [snapshot]
guid = 12345
name = snapshot['name']
path = self.drv._get_snapshot_path(snapshot)
parent = self.drv._get_volume_path(volume)
list_snapshots.return_value = [{
'name': name,
'path': path,
'guid': guid,
'parent': parent,
'hprService': '',
'snaplistId': ''
}]
list_volumes.return_value = volumes
result = self.drv.get_manageable_snapshots(snapshots, None, 1,
0, 'size', 'asc')
payload = {
'parent': self.drv.root_path,
'fields': 'name,guid,path,parent,hprService,snaplistId',
'recursive': True
}
list_snapshots.assert_called_with(payload)
expected = [{
'cinder_id': snapshot['id'],
'extra_info': None,
'reason_not_safe': 'Snapshot already managed',
'source_reference': {
'name': volume['name']
},
'reference': {
'source-guid': guid,
'source-name': snapshot['name']
},
'safe_to_manage': False,
'size': volume['size']
}]
self.assertEqual(expected, result)
def test_unmanage_snapshot(self):
volume = fake_volume(self.ctxt)
snapshot = fake_snapshot(self.ctxt)
snapshot.volume = volume
self.assertIsNone(self.drv.unmanage_snapshot(snapshot))
|
<reponame>calebsander/RuddockWebsite
import flask
import http
from datetime import datetime # ugh
from decimal import Decimal
from ruddock.resources import Permissions
from ruddock.decorators import login_required, get_args_from_form
from ruddock.modules.budget import blueprint, helpers
from .helpers import PaymentType
@blueprint.route('/')
@login_required(Permissions.BUDGET)
def route_portal():
"""Displays the budget portal."""
return flask.render_template('budget_portal.html')
@blueprint.route('/summary')
@login_required(Permissions.BUDGET)
def route_summary():
"""Displays account and budget summaries."""
fyear, is_current = helpers.select_fyear_info(
flask.request.args.get("fyear", None)
)
fyear_id = fyear["fyear_id"]
fyear_num = fyear["fyear_num"]
fyear_options = [(r["fyear_num"], r["fyear_id"]) for r in helpers.get_fyears()]
fyear_options.sort(key=lambda x: x[0], reverse=True)
a_summary = helpers.get_account_summary()
b_summary = helpers.get_budget_summary(fyear_id)
return flask.render_template('summary.html',
a_summary=a_summary,
b_summary=b_summary,
fyear_num=fyear_num,
is_current=is_current,
fyear_options=fyear_options)
@blueprint.route('/expenses')
@login_required(Permissions.BUDGET)
def route_expenses():
"""Displays list of expenses."""
return flask.render_template('expenses.html',
expenses=helpers.get_transactions(),
ptypes=PaymentType.get_all())
@blueprint.route('/payments')
@login_required(Permissions.BUDGET)
def route_payments():
"""Displays list of payments."""
return flask.render_template('payments.html',
payments=helpers.get_payments(),
ptypes=PaymentType.get_all())
@blueprint.route('/add_expense')
@login_required(Permissions.BUDGET)
def route_add_expense():
"""Provides an interface for submitting an expense."""
fyear, is_current = helpers.select_fyear_info(
flask.request.args.get("fyear", None)
)
fyear_id = fyear["fyear_id"]
fyear_num = fyear["fyear_num"]
fyear_options = [(r["fyear_num"], r["fyear_id"]) for r in helpers.get_fyears()]
fyear_options.sort(key=lambda x: x[0], reverse=True)
# Get the lists for the dropdown menus
budgets_list = helpers.get_budget_list(fyear_id)
payment_types = PaymentType.get_all()
accounts = helpers.get_accounts()
payees = helpers.get_payees()
return flask.render_template('add_expense.html',
budgets=budgets_list,
payment_types=payment_types,
accounts=accounts,
payees=payees,
fyear_options=fyear_options,
fyear_num=fyear_num,
is_current=is_current)
@blueprint.route('/add_expense/submit', methods=['POST'])
@login_required(Permissions.BUDGET)
@get_args_from_form()
def route_submit_expense(budget_id, date_incurred, amount, description,
payee_id, new_payee, payment_type, account_id, check_no, defer_payment):
"""Sends the expense to the database."""
# Checkboxes aren't sent as bools... :(
defer_payment = defer_payment is not None
# Server side validation
valid_expense = helpers.validate_expense(budget_id, date_incurred, amount,
description)
valid_payment = helpers.validate_payment(payment_type,
account_id, check_no)
valid_payee = helpers.validate_payee(payee_id, new_payee)
errs = helpers.test_predicates((
(valid_expense, True, "Invalid expense."),
(valid_payment, not defer_payment, "Invalid payment."),
(valid_payee, defer_payment, "Invalid payee." )
))
if errs:
return flask.redirect(flask.url_for("budget.route_add_expense"))
transaction = flask.g.db.begin()
try:
# This next part depends on whether we are deferring payment or not.
# If so, then we leave the payment ID null, cause it the corresponding
# payment hasn't been made yet. However, we require the payee. If it's a new
# payee, we have to insert it into the database first.
# If not, then we need to make a new payment, and use its ID. Payee ID is
# not needed.
if defer_payment:
payee_id = payee_id or helpers.record_new_payee(new_payee)
payment_id = None
else:
date_written = date_incurred # non-deferred payments are instant
date_posted = None if payment_type == PaymentType.CHECK.value else date_written
payee_id = None
payment_id = helpers.record_payment(
account_id, payment_type, amount, date_written, date_posted,
payee_id, check_no)
# Either way, record the expense
helpers.record_expense(
budget_id, date_incurred, description, amount, payment_id, payee_id)
transaction.commit()
flask.flash("Expense recorded successfully!")
except Exception:
transaction.rollback()
flask.flash("An unexpected error occurred. Please find an IMSS rep.")
return flask.redirect(flask.url_for("budget.route_add_expense"))
@blueprint.route('/expenses/<int:expense_id>')
@login_required(Permissions.BUDGET)
def route_show_expense(expense_id):
"""
Displays an expense and allows you to edit it.
"""
expense = helpers.get_expense(expense_id)
if expense is None:
flask.abort(http.client.NOT_FOUND)
budgets_list = helpers.get_budget_list(expense["fyear_id"])
payment_types = PaymentType.get_all()
payees = helpers.get_payees()
return flask.render_template(
'edit_expense.html',
expense=expense,
budgets=budgets_list,
payment_types=payment_types,
payees=payees
)
@blueprint.route('/expense/edit', methods=['POST'])
@login_required(Permissions.BUDGET)
@get_args_from_form()
def route_edit_expense(expense_id, budget_id, date_incurred, amount, description, payee_id, new_payee):
"""Changes the given expense."""
expense = helpers.get_expense(expense_id)
if expense is None:
flask.abort(http.client.NOT_FOUND)
existing_payment = expense["payment_id"] is not None
valid_expense = helpers.validate_expense(budget_id, date_incurred, amount,
description)
amount_unchanged = expense["cost"] == Decimal(amount)
payee_unchanged = expense["payee_id"] == int(payee_id)
# Can't change payment info if there's a linked payment
# TODO soften this so debit purchaes aren't a PITA
errs = helpers.test_predicates((
(valid_expense, True, "Invalid expense."),
(amount_unchanged, existing_payment, "Can't change amount with linked payment."),
(payee_unchanged, existing_payment, "Can't change payee with linked payment."),
))
if errs:
return flask.redirect(flask.url_for("budget.route_show_expense", expense_id=expense_id))
success = helpers.edit_expense(
expense_id, budget_id, date_incurred, description, amount, payee_id
)
if success:
flask.flash("Success!")
else:
flask.flash("Something went wrong during the edit, not sure what.")
return flask.redirect(flask.url_for("budget.route_show_expense", expense_id=expense_id))
@blueprint.route('/expense/delete', methods=['POST'])
@login_required(Permissions.BUDGET)
@get_args_from_form()
def route_delete_expense(expense_id, budget_id, date_incurred, amount, description, payee_id, new_payee):
"""Deletes the given expense."""
expense = helpers.get_expense(expense_id)
if expense is None:
flask.abort(http.client.NOT_FOUND)
existing_payment = expense["payment_id"] is not None
# Can't delete if there's a linked payment
# TODO soften this so debit purchaes aren't a PITA
if existing_payment:
flask.flash("Cannot delete expense if there is a linked payment.")
return flask.redirect(flask.url_for("budget.route_show_expense", expense_id=expense_id))
helpers.delete_expense(expense_id)
flask.flash("Success!")
return flask.redirect(flask.url_for("budget.route_expenses"))
@blueprint.route('/unpaid')
@login_required(Permissions.BUDGET)
def route_unpaid():
"""
Displays unpaid expenses, and allows the user to create payments for them.
"""
payment_types = PaymentType.get_all()
accounts = helpers.get_accounts()
expense_groups=helpers.get_unpaid_expenses()
today = datetime.now().strftime("%Y-%m-%d")
return flask.render_template('unpaid.html',
payment_types=payment_types,
accounts=accounts,
expense_groups=expense_groups,
today=today)
@blueprint.route('/unpaid/submit', methods=['POST'])
@login_required(Permissions.BUDGET)
@get_args_from_form()
def route_submit_unpaid(payee_id, payment_type, account_id, check_no,
date_written):
"""Sends the payment to the database."""
# The date posted is the same as the date written unless we're using a check
date_posted = None if (payment_type != PaymentType.CHECK.value) else date_written
# Server side validation
total = helpers.get_unpaid_amount(payee_id)
valid_payment = helpers.validate_payment(payment_type, account_id, check_no)
has_expenses = (total is not None)
errs = helpers.test_predicates((
(valid_payment, True, "Invalid payment."),
(has_expenses, True, "This payee has no expenses to reimburse."),
))
if errs:
return flask.redirect(flask.url_for("budget.route_unpaid"))
# We use a transaction to make sure we don't submit halfway.
transaction = flask.g.db.begin()
try:
payment_id = helpers.record_payment(
account_id, payment_type, total, date_written, date_posted,
payee_id, check_no)
helpers.mark_as_paid(payee_id, payment_id)
transaction.commit()
flask.flash("Payment recorded successfully!")
except Exception:
transaction.rollback()
flask.flash("An unexpected error occurred. Please find an IMSS rep.")
return flask.redirect(flask.url_for("budget.route_unpaid"))
@blueprint.route('/checks')
@login_required(Permissions.BUDGET)
def route_checks():
"""Displays all undeposited checks."""
return flask.render_template('checks.html',
checks=helpers.get_unposted_payments(),
ptypes=PaymentType.get_all())
@blueprint.route('/checks/submit', methods=['POST'])
@login_required(Permissions.BUDGET)
@get_args_from_form()
def route_process_check(payment_id, date_posted, action):
"""Records a check as deposited."""
# Server side validation
unposted_ids = [str(x["payment_id"]) for x in helpers.get_unposted_payments()]
errs = helpers.test_predicates((
(payment_id in unposted_ids, True, "Not a valid payment ID!"),
(date_posted != "", action == "Post", "No date entered!" )
))
# If any of the validation failed
if errs:
return flask.redirect(flask.url_for("budget.route_checks"))
# Decide what to do
if action == "Post":
helpers.post_payment(payment_id, date_posted)
flask.flash("Payment successfully posted!")
elif action == "Void":
helpers.void_payment(payment_id)
flask.flash("Payment successfully voided!")
else:
flask.flash("Not a legitimate action!")
return flask.redirect(flask.url_for("budget.route_checks"))
|
<gh_stars>0
from json.tool import main
from msilib.schema import ODBCAttribute
from unittest.mock import patch
from bs4 import BeautifulSoup as bs
import os
import json
mainPath = os.path.dirname(__file__) #main path to root folder
folderOdds = r'\pages\odds' #str of the path to odds
folderResults = r'\pages\results' #str of the path to results
pathToOddDir = str(mainPath) + folderOdds #path to the folder where the odd pages are located
pathToResultsDir = str(mainPath) + folderResults #path to the folder where the results pages are located
listOdds = [f for f in os.listdir(pathToOddDir) if f.endswith('.html')] #list of files in odds
listResults = [f for f in os.listdir(pathToResultsDir) if f.endswith('.html')] #list of files in results
participants = []
odds_ = []
pointWin = []
winnerName = []
out_racers = str(mainPath) + '\\out\\' + 'racers.txt'
out_odds = str(mainPath) + '\\out\\' + 'odds.txt'
out_winner_name = str(mainPath) + '\\out\\' + 'winner_names.txt'
out_winner_odd = str(mainPath) + '\\out\\' + 'winner_odd.txt'
# print(listResults)
for results in listOdds:
fullPath = str(pathToOddDir) + '\\' + results
with open(fullPath) as f:
soup = bs(f.read(), 'html.parser')
for div in soup.find_all("div", class_="vr-ParticipantVirtual_Name"):
div.find("div", class_="vr-ParticipantVirtual_Identifier")
raceParticipants = div.get_text()
participants.append(raceParticipants)
participants.append('\n')
print(participants)
with open(out_racers, 'w') as f:
f.write(json.dumps(participants))
participants.clear()
for odds in listOdds:
fullPath = str(pathToOddDir) + '\\' + odds
with open(fullPath) as f:
soup = bs(f.read(), 'html.parser')
for div in soup.find_all("div", class_="vr-ParticipantVirtualOddsOnly gl-Participant_General"):
div.find("div", class_="vr-ParticipantVirtualOddsOnly_Odds")
raceOdds = div.get_text()
odds_.append(raceOdds)
odds_.append('\n')
print(odds_)
with open(out_odds, 'w') as f:
f.write(json.dumps(odds_))
odds_.clear()
for winner in listResults:
fullPath = str(pathToResultsDir) + '\\' + winner
with open(fullPath)as f:
soup = bs(f.read(), 'html.parser')
for div in soup.find_all("div", class_="vrr-ParticipantInfo", limit=1):
winner = soup.find("div", class_="vrr-ParticipantInfo_Runner")
winnerName.append(winner.get_text())
winnerName.append('\n')
print(winnerName)
with open(out_winner_name, 'w') as f:
f.write(json.dumps(winnerName))
winnerName.clear()
for points in listResults:
fullPath = str(pathToResultsDir) + '\\' + points
with open(fullPath) as f:
soup = bs(f.read(), 'html.parser')
for div in soup.find_all("div", class_="vrr-OutrightParticipant gl-Market_General-cn1", limit=1):
pts = soup.find("div", class_="vrr-Price")
pointWin.append(pts.get_text())
pointWin.append('\n')
print(pointWin)
with open(out_winner_odd, 'w') as f:
f.write(json.dumps(pointWin))
pointWin.clear() |
"""
oblique.py - Web Services Interface
Copyright 2008-9, <NAME>, inamidst.com
Licensed under the Eiffel Forum License 2.
http://willie.dftba.net
"""
import re
import urllib
import willie.web as web
from willie.module import commands, example
definitions = 'https://github.com/nslater/oblique/wiki'
r_item = re.compile(r'(?i)<li>(.*?)</li>')
r_tag = re.compile(r'<[^>]+>')
def mappings(uri):
result = {}
bytes = web.get(uri)
for item in r_item.findall(bytes):
item = r_tag.sub('', item).strip(' \t\r\n')
if not ' ' in item:
continue
command, template = item.split(' ', 1)
if not command.isalnum():
continue
if not template.startswith('http://'):
continue
result[command] = template.replace('&', '&')
return result
def service(bot, trigger, command, args):
t = o.services[command]
template = t.replace('${args}', urllib.quote(args.encode('utf-8'), ''))
template = template.replace('${nick}', urllib.quote(trigger.nick, ''))
uri = template.replace('${sender}', urllib.quote(trigger.sender, ''))
info = web.head(uri)
if isinstance(info, list):
info = info[0]
if not 'text/plain' in info.get('content-type', '').lower():
return bot.reply("Sorry, the service didn't respond in plain text.")
bytes = web.get(uri)
lines = bytes.splitlines()
if not lines:
return bot.reply("Sorry, the service didn't respond any output.")
bot.say(lines[0][:350])
def refresh(bot):
if hasattr(bot.config, 'services'):
services = bot.config.services
else:
services = definitions
old = o.services
o.serviceURI = services
o.services = mappings(o.serviceURI)
return len(o.services), set(o.services) - set(old)
@commands('o')
@example('.o servicename arg1 arg2 arg3')
def o(bot, trigger):
"""Call a webservice."""
if trigger.group(1) == 'urban':
text = 'ud ' + trigger.group(2)
else:
text = trigger.group(2)
if (not o.services) or (text == 'refresh'):
length, added = refresh(bot)
if text == 'refresh':
msg = 'Okay, found %s services.' % length
if added:
msg += ' Added: ' + ', '.join(sorted(added)[:5])
if len(added) > 5:
msg += ', &c.'
return bot.reply(msg)
if not text:
return bot.reply('Try %s for details.' % o.serviceURI)
if ' ' in text:
command, args = text.split(' ', 1)
else:
command, args = text, ''
command = command.lower()
if command == 'service':
msg = o.services.get(args, 'No such service!')
return bot.reply(msg)
if not command in o.services:
return bot.reply('Service not found in %s' % o.serviceURI)
if hasattr(bot.config, 'external'):
default = bot.config.external.get('*')
manifest = bot.config.external.get(trigger.sender, default)
if manifest:
commands = set(manifest)
if (command not in commands) and (manifest[0] != '!'):
return bot.reply('Sorry, %s is not whitelisted' % command)
elif (command in commands) and (manifest[0] == '!'):
return bot.reply('Sorry, %s is blacklisted' % command)
service(bot, trigger, command, args)
o.services = {}
o.serviceURI = None
@commands('snippet')
def snippet(bot, trigger):
if not o.services:
refresh(bot)
search = urllib.quote(trigger.group(2).encode('utf-8'))
py = ("BeautifulSoup.BeautifulSoup(re.sub('<.*?>|(?<= ) +', '', " +
"''.join(chr(ord(c)) for c in " +
"eval(urllib.urlopen('http://ajax.googleapis.com/ajax/serv" +
"ices/search/web?v=1.0&q=" + search + "').read()" +
".replace('null', 'None'))['responseData']['resul" +
"ts'][0]['content'].decode('unicode-escape')).replace(" +
"'"', '\x22')), convertEntities=True)")
service(bot, trigger, 'py', py)
|
import firebase_admin
from firebase_admin import credentials, auth, messaging
from firebase_admin import firestore, storage
from House import House, Utility, Amenity, Profile, Tenant
from Landlord import Landlord
from Location import Location
from RepairRating import RepairRating
from Prediction import Prediction
import google
import datetime
from PIL import Image
class FirebaseAdmin():
cred = credentials.Certificate('./ServiceAccount.json')
firebase_admin.initialize_app(cred, {
'storageBucket' : 'roomr-222721.appspot.com'
})
def __init__(self):
self._cred = credentials.Certificate('./ServiceAccount.json')
self._db = firestore.client()
self._transaction = self._db.transaction()
self._houseRef = self._db.collection("House").document("11 Bronte Rd")
def get_db_instance(self):
return self._db
def add_house(self, houseRequest):
if (self._db.collection("House").document(houseRequest["address"]).get().exists):
return {"Result": "Error: House already exists"}
houseRef = self._db.collection("House").document(houseRequest["address"])
landlordRef = self._db.collection("Landlord").document(houseRequest["landlordEmail"])
location = Location()
locationData = location.get_location_from_address(houseRequest["address"])
house = House(houseRequest["address"],
houseRequest["rent"],
houseRequest["size"],
houseRequest["bedNumber"],
houseRequest["bathNumber"],
houseRequest["landlordEmail"],
locationData["url"],
houseRequest["description"],
False,
houseRequest["amenities"],
houseRequest["utilities"])
houseRef.set(house.get_dictionary())
houseRef.update({
"province": locationData["province"],
"city": locationData["city"]
})
houses = []
landlordSnapshot = landlordRef.get()
try:
houseReferences = landlordSnapshot.get("houses")
for reference in houseReferences:
houses.append(reference)
houses.append(houseRef.get().reference)
landlordRef.update({"houses": houses})
except KeyError:
houses.append(houseRef.get().reference)
landlordRef.update({"houses": houses})
return {"Result": "House added successfully"}
def get_landlord_houses(self, email):
landlordHouses = []
landlordSnapshot = self._db.collection("Landlord").document(email).get()
if not landlordSnapshot.exists:
return {"Result": "Error: Landlord does not exist"}
try:
for reference in landlordSnapshot.get("houses"):
houseReference = self._db.document(reference.path).get()
house = House(houseReference.get("address"),
houseReference.get("rent"),
houseReference.get("size"),
houseReference.get("bedNumber"),
houseReference.get("bathNumber"),
houseReference.get("landlordEmail"),
houseReference.get("url"),
houseReference.get("description"),
houseReference.get("isPosted"),
houseReference.get("amenities"),
houseReference.get("utilities"))
try:
for profileReference in houseReference.get("profiles"):
profile = self._db.document(profileReference.path).get()
p = Profile(profile.get("firstName"),
profile.get("lastName"),
profile.get("email"),
profile.get("bio"))
house.add_leaf(p)
except KeyError:
pass
try:
for tenantReference in houseReference.get("tenants"):
tenant = self._db.document(tenantReference.path).get()
t = Tenant(tenant.get("firstName"),
tenant.get("lastName"),
tenant.get("password"),
tenant.get("<PASSWORD>"),
tenant.get("tenantEmail"),
tenant.get("landlordEmail"),
tenant.get("tenantRating"))
house.add_leaf(t)
except KeyError:
pass
landlordHouses.append(house.get_dictionary())
except KeyError:
return {"Result": "No houses found"}
return landlordHouses
def write_temp_landlord(self, landlord, uuid):
tempLandlordRef = self._db.collection("TempLandlord").document(landlord["email"])
tempLandlordRef.set(landlord)
tempLandlordRef.update({"uuid": str(uuid)})
def addUserToAuthentication(self, email, password):
auth.create_user(email = email, password = password)
def verify_temp_landlord(self, email, token):
documents = self._db.collection("TempLandlord").get()
for document in documents:
if str(document.get("uuid")) == token:
auth.create_user(email = email, password = document.get("password"))
landlord = Landlord(document.get("firstName"), document.get("lastName"), document.get("email"), document.get("password"), document.get("<PASSWORD>"))
self._db.collection("Landlord").document(email).set(landlord.get_dictionary())
self._db.collection("TempLandlord").document(email).delete()
return {"Result": "User successfully verified"}
else:
return {"Result": "Error: Invalid token"}
def get_landlord_by_email(self, email):
for document in self._db.collection("Landlord").get():
if str(document.get("email")) == email:
landlord = Landlord(document.get("firstName"), document.get("lastName"), document.get("email"), document.get("password"), document.get("<PASSWORD>"))
return landlord.get_dictionary()
return {"Result": "Error: Landlord not found"}
def get_tenant_by_email(self, email):
for document in self._db.collection("Tenant").get():
if str(document.get("tenantEmail")) == email:
tenant = Tenant(document.get("firstName"), document.get("lastName"), document.get("password"), document.get("<PASSWORD>"), document.get("tenantEmail"), document.get("landlordEmail"), document.get("tenantRating"))
return tenant.get_dictionary()
return {"Result": "Error: Tenant not found"}
def write_profile(self, profile):
profileSnapshot = self._db.collection("Profile").document(profile["email"]).get()
if profileSnapshot.exists:
return {"Result": "Error: Profile already exists"}
else:
self._db.collection("Profile").document(profile["email"]).set(profile)
return {"Result": "Profile successfully added"}
def update_profile(self, profile):
profileSnapshot = self._db.collection("Profile").document(profile["email"]).get()
if profileSnapshot.exists:
if profile["email"] == profileSnapshot.get("email"):
self._db.collection("Profile").document(profile["email"]).update(profile)
def post_listing(self, house):
houseReference = self._db.collection("House").document(house["address"])
houseReference.update({"isPosted": house["isPosted"]})
def addPictureToStorage(self, image):
bucket = storage.bucket()
blob = bucket.blob("Repairs/Test.png")
blob.upload_from_string(
image,
content_type='image/png'
)
blob.make_public()
print(blob.public_url)
def addProfileToHouse(self, profile):
profileReferences = []
houseSnapshot = self._db.collection("House").document(profile["houseAddress"]).get()
try:
for reference in houseSnapshot.get("profiles"):
profileReferences.append(reference)
except KeyError:
self._db.collection("House").document(profile["houseAddress"]).update({"profiles": []})
profileRef = self._db.collection("Profile").document(profile["email"])
profileReferences.append(profileRef)
self._db.collection("House").document(profile["houseAddress"]).update({"profiles": profileReferences})
def search_houses(self, province, city, price, amenities):
filteredHouses = []
houseCollection = self._db.collection("House").get()
for houseDocument in houseCollection:
if houseDocument.get("isPosted") == True:
if houseDocument.get("province") == province:
if houseDocument.get("city") == city:
if houseDocument.get("rent") <= price:
if houseDocument.get("amenities") == amenities:
house = House(houseDocument.get("address"),
houseDocument.get("rent"),
houseDocument.get("size"),
houseDocument.get("bedNumber"),
houseDocument.get("bathNumber"),
houseDocument.get("landlordEmail"),
houseDocument.get("url"),
houseDocument.get("description"),
houseDocument.get("isPosted"),
houseDocument.get("amenities"),
houseDocument.get("utilities"))
try:
for profileReference in houseDocument.get("profiles"):
profile = self._db.document(profileReference.path).get()
p = Profile(profile.get("firstName"),
profile.get("lastName"),
profile.get("email"),
profile.get("bio"))
house.add_leaf(p)
except KeyError:
pass
try:
for tenantReference in houseDocument.get("tenants"):
tenant = self._db.document(tenantReference.path).get()
t = Tenant(tenant.get("firstName"),
tenant.get("lastName"),
tenant.get("password"),
tenant.get("password2"),
tenant.get("tenantEmail"),
tenant.get("landlordEmail"),
tenant.get("tenantRating"))
house.add_leaf(t)
except KeyError:
pass
filteredHouses.append(house.get_dictionary())
return filteredHouses
def convertProfileToTenant(self, profile):
houseReference = self._db.collection("House").document(profile["houseAddress"]).get()
address = profile["houseAddress"]
del profile["houseAddress"]
profileReferences = []
tenantReferences = []
for profileReference in houseReference.get("profiles"):
if self._db.document(profileReference.path).get().to_dict() == profile:
del profile["bio"]
profile["tenantEmail"] = profile["email"]
del profile["email"]
self._db.collection("Tenant").document(profile["tenantEmail"]).set(profile)
tenantReferences.append(self._db.collection("Tenant").document(profile["tenantEmail"]).get().reference)
else:
profileReferences.append(profileReference)
for houseDocument in self._db.collection("House").stream():
houseProfileList = houseDocument.get("profiles")
for profileReference in houseProfileList:
profileSnapshot = self._db.document(profileReference.path).get()
if profileSnapshot.get("email") == profile["tenantEmail"]:
houseProfileList.remove(profileReference)
houseDocument.reference.update({"profiles": houseProfileList})
self._db.collection("Tenant").document(profile["tenantEmail"]).update({"houseReference": self._db.collection("House").document(address)})
self._db.collection("House").document(address).update({"profiles": profileReferences, "tenants": tenantReferences})
def sign_up_tenant(self, tenant):
tenantReferance = self._db.collection("Tenant").document(tenant["tenantEmail"]).get()
if (tenantReferance.get("firstName") == tenant["firstName"] and tenantReferance.get("lastName") == tenant["lastName"]):
self._db.collection("Tenant").document(tenant["tenantEmail"]).update(tenant)
self.addUserToAuthentication(tenant["tenantEmail"], tenant["password"])
def add_payment_landlord(self, paymentId):
paymentRef = self._db.collection("Payment").document(paymentId).get()
payment = paymentRef.to_dict()
payments = []
landlordRef = self._db.collection("Landlord").document(payment["landlordEmail"]).get()
try:
paymentNotifications = landlordRef.get("payments")
print(paymentNotifications)
except KeyError:
self._db.collection("Landlord").document(payment["landlordEmail"]).update({"payments": [payment]})
return
for paymentNotification in paymentNotifications:
payments.append(paymentNotification)
payments.append(payment)
self._db.collection("Landlord").document(payment["landlordEmail"]).update({"payments": payments})
def add_pending_payment(self, payment, paymentId):
self._db.collection("Payment").document(paymentId).set(payment)
def get_landlord_payments(self, landlordEmail):
landlordRef = self._db.collection("Landlord").document(landlordEmail).get()
return landlordRef.get("payments")
def get_tenant_house(self, tenantEmail):
tenantSnapshot = self._db.collection("Tenant").document(tenantEmail).get()
reference = tenantSnapshot.get("houseReference")
print(reference)
houseSnapshot = self._db.document(reference.path).get()
house = House(houseSnapshot.get("address"),
houseSnapshot.get("rent"),
houseSnapshot.get("size"),
houseSnapshot.get("bedNumber"),
houseSnapshot.get("bathNumber"),
houseSnapshot.get("landlordEmail"),
houseSnapshot.get("url"),
houseSnapshot.get("description"),
houseSnapshot.get("isPosted"),
houseSnapshot.get("amenities"),
houseSnapshot.get("utilities"))
for profileReference in houseSnapshot.get("profiles"):
profile = self._db.document(profileReference.path).get()
p = Profile(profile.get("firstName"),
profile.get("lastName"),
profile.get("email"),
profile.get("bio"))
house.add_leaf(p)
try:
for tenantReference in houseSnapshot.get("tenants"):
tenant = self._db.document(tenantReference.path).get()
t = Tenant(tenant.get("firstName"),
tenant.get("lastName"),
tenant.get("password"),
tenant.get("<PASSWORD>"),
tenant.get("tenantEmail"),
tenant.get("landlordEmail"),
tenant.get("tenantRating"))
house.add_leaf(t)
except KeyError:
print("Error: Tenant key error")
return house.get_dictionary()
def get_tenants_from_houses(self, email):
landlordSnapshot = self._db.collection("Landlord").document(email).get()
houses = landlordSnapshot.get("houses")
results = []
for house in houses:
result = {}
houseSnapshot = self._db.document(house.path).get()
if houseSnapshot:
result["houseAddress"] = houseSnapshot.get("address")
for tenantReference in houseSnapshot.get("tenants"):
tenantSnapshot = self._db.document(tenantReference.path).get()
result["tenantName"] = tenantSnapshot.get("firstName") + " " + tenantSnapshot.get("lastName")
results.append(result)
return results
def send_repair_notification(self, landLordEmail):
# This registration token comes from the client FCM SDKs.
landlordSnapshot = self._db.collection("Landlord").document(landLordEmail).get()
registration_token = landlordSnapshot.get("token")
print(registration_token)
notification = messaging.Notification(title="RoomR", body="Reminder, New Repairs has been posted.")
message = messaging.Message(
notification=notification,
token=registration_token
)
# Send a message to the device corresponding to the provided
# registration token.
response = messaging.send(message)
print(response)
def send_payment_notification(self, tenantEmail):
# This registration token comes from the client FCM SDKs.
tenantSnapshot = self._db.collection("Tenant").document(tenantEmail).get()
registration_token = tenantSnapshot.get("token")
print(registration_token)
#registration_token = '<KEY> <KEY>'
notification = messaging.Notification(title="RoomR", body="Reminder, Rent is coming up.")
message = messaging.Message(
notification=notification,
token=registration_token
)
# Send a message to the device corresponding to the provided
# registration token.
response = messaging.send(message)
print(response)
def is_uid_valid(self, token):
try:
auth.get_user(token)
return True
except auth.AuthError:
return False
def update_house(self, locationRequest):
houseReference = self._db.collection("House").document(locationRequest["houseAddress"])
locaction = Location()
print(locaction.retry_location(locationRequest["houseAddress"]))
houseReference.update({"url": locaction.retry_location(locationRequest["houseAddress"]), "province": locationRequest["province"], "city": locationRequest["city"]})
def remove_house(self, house):
houseReference = self._db.collection("House").document(house["address"])
houseReference.delete()
landlordSnapshot = self._db.collection("Landlord").document(house["landlordEmail"]).get()
houses = landlordSnapshot.get("houses")
for houseRef in houses:
if houseRef.path == houseReference.path:
houses.remove(houseRef)
self._db.collection("Landlord").document(house["landlordEmail"]).update({"houses": houses})
def update_repairs(self, houseAddress, date, repairs):
self._db.collection('House').document(houseAddress).collection("Repairs").document(date).update(repairs)
def update_repairs_landlord(self, houseAddress, date, dateUpdated):
repairRating = 0.00
#Getting Landlord Email.
houseSnapshot = self._db.collection("House").document(houseAddress).get()
landLordEmail = houseSnapshot.get("landlordEmail")
#Check if rating exist if not make one, if it does pull it and get the rating.
landlordDocument = self._db.collection("Landlord").document(landLordEmail).get()
landlordDictionary = landlordDocument.to_dict()
if("repairrating" in landlordDictionary):
repairRating = float(landlordDictionary["repairrating"])
else:
repairRating = 5.00
ratingRepair = {u"repairrating": str(repairRating)}
self._db.collection("Landlord").document(landLordEmail).update(ratingRepair)
#Object of RepairRating
repairRatingObj = RepairRating(date, dateUpdated, landLordEmail)
calculatedRating = repairRatingObj.calculateRating()
#Get value from repairRatingCLass add it to the repairRating and update.
#repairRating = RepairRating.getRating()
repairRating = repairRating + calculatedRating
if(repairRating >= 5.00):
repairRating = 5.00
if(repairRating <= 0.00):
repairRating = 0.00
repairRating = round(repairRating, 2)
ratingRepair = {u"repairrating": float(repairRating)}
self._db.collection("Landlord").document(landLordEmail).update(ratingRepair)
#date has to be in month and year format.
date = repairRatingObj.getDateUpdated()
rating = {u"date": date, u"rating": float(repairRating)}
self._db.collection("RepairRatings").document(landLordEmail).collection("Rating").document(date).set(rating)
def getRepairRatingHistory(self, landlordEmail):
ratingHistoryList = []
try:
docs = self._db.collection("RepairRatings").document(landlordEmail).collection("Rating")
doctsToStream = docs.stream()
for doc in doctsToStream:
dictonaryOfRatingHistory = doc.to_dict()
ratingHistoryList.append(dictonaryOfRatingHistory)
except google.cloud.exceptions.NotFound:
ratingHistoryList.append({'error':'Not such repair Rating for this Landlord.'})
print(u'No Such Document')
return ratingHistoryList
def get_repairs_for_house(self, houseAddress):
documents = self._db.collection("House").document(houseAddress).collection("Repairs")
#To get photo we will just send back the link to the photo and from the app it will get the photos.
repairList = []
try:
docs = documents.stream()
for doc in docs:
dictionayOfRepair = doc.to_dict()
repairList.append(dictionayOfRepair)
except google.cloud.exceptions.NotFound:
repairList.append({'error':'Not such repairs for this house.'})
print(u'No Such Document')
return repairList
def predict_image(self, imgUrl, language):
prediction = Prediction(imgUrl,language)
return prediction.getWordsRelatedToImage()
def get_landlord_rating(self, houseAddress):
houseSnapshot = self._db.collection("House").document(houseAddress).get()
landLordEmail = houseSnapshot.get("landlordEmail")
landlordDocument = self._db.collection("Landlord").document(landLordEmail).get()
rating = landlordDocument.get("repairrating")
return rating
def add_repair(self, houseAddress, date, repairs):
self._db.collection('House').document(houseAddress).collection("Repairs").document(date).set(repairs) |
<reponame>MISTCARRYYOU/PythonPDEVS
# Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Controller used as a specific simulation kernel
"""
from pypdevs.basesimulator import BaseSimulator
from pypdevs.logger import *
import threading
import pypdevs.accurate_time as time
import pypdevs.middleware as middleware
from pypdevs.DEVS import CoupledDEVS, AtomicDEVS
from pypdevs.util import DEVSException
from pypdevs.activityVisualisation import visualizeLocations
from pypdevs.realtime.threadingBackend import ThreadingBackend
from pypdevs.realtime.asynchronousComboGenerator import AsynchronousComboGenerator
class Controller(BaseSimulator):
"""
The controller class, which is a special kind of normal simulation kernel. This should always run on the node labeled 0.
It contains some functions that are only required to be ran on a single node, such as GVT initiation
"""
def __init__(self, name, model, server):
"""
Constructor
:param name: name of the controller
:param model: model to host at the kernel
:param server: the server to make requests on
"""
BaseSimulator.__init__(self, name, model, server)
self.waiting_lock = threading.Lock()
self.accumulator = {}
self.no_finish_ring = threading.Lock()
self.no_finish_ring.acquire()
self.location_cell_view = False
self.graph = None
self.allocations = None
self.running_irreversible = None
self.initial_allocator = None
self.prev_termination_time = 0.0
def __setstate__(self, retdict):
"""
For pickling
:param retdict: dictionary containing attributes and their value
"""
BaseSimulator.__setstate__(self, retdict)
self.waiting_lock = threading.Lock()
self.no_finish_ring = threading.Lock()
self.no_finish_ring.acquire()
def GVTdone(self):
"""
Notify this simulation kernel that the GVT calculation is finished
"""
self.wait_for_gvt.set()
def isFinished(self, running):
"""
Checks if all kernels have indicated that they have finished simulation.
If each kernel has indicated this, a final (expensive) check happens to
prevent premature termination.
:param running: the number of kernels that is simulating
:returns: bool -- whether or not simulation is already finished
"""
# NOTE make sure that GVT algorithm is not running at the moment, otherwise we deadlock!
# it might be possible that the GVT algorithm starts immediately after the wait(), causing deadlock again
# Now we are sure that the GVT algorithm is not running when we start this
# It seems that we should be finished, so just ACK this with every simulation kernel before proceeding
# it might be possible that the kernel's 'notifyRun' command is still on the way, making the simulation
# stop too soon.
self.no_finish_ring.acquire()
msgcount = self.finishRing(0, 0, True)
if msgcount == -1:
# One of the nodes was still busy
self.no_finish_ring.release()
return False
else:
msgcount2 = self.finishRing(0, 0, True)
# If they are equal, we are done
ret = msgcount == msgcount2
if not ret:
self.no_finish_ring.release()
else:
self.waiting = 0
return ret
def waitFinish(self, running):
"""
Wait until the specified number of kernels have all told that simulation
finished.
:param running: the number of kernels that is simulating
"""
while 1:
time.sleep(1)
# Make sure that no relocations are running
if self.isFinished(running):
# All simulation kernels have told us that they are idle at the moment
break
self.run_gvt = False
self.event_gvt.set()
self.gvt_thread.join()
def startGVTThread(self, gvt_interval):
"""
Start the GVT thread
:param gvt_interval: the interval between two successive GVT runs
"""
# We seem to be the controller
# Start up the GVT algorithm then
self.event_gvt = threading.Event()
self.run_gvt = True
self.gvt_thread = threading.Thread(target=Controller.threadGVT,
args=[self, gvt_interval])
self.gvt_thread.daemon = True
self.gvt_thread.start()
def threadGVT(self, freq):
"""
Run the GVT algorithm, this method should be called in its own thread,
because it will block
:param freq: the time to sleep between two GVT calculations
"""
# Wait for the simulation to have done something useful before we start
self.event_gvt.wait(freq)
# Maybe simulation already finished...
while self.run_gvt:
self.receiveControl([float('inf'),
float('inf'),
self.accumulator,
{}],
True)
# Wait until the lock is released elsewhere
print("Waiting for clear")
self.wait_for_gvt.wait()
self.wait_for_gvt.clear()
# Limit the GVT algorithm, otherwise this will flood the ring
print("Cleared")
self.event_gvt.wait(freq)
def getVCDVariables(self):
"""
Generate a list of all variables that exist in the current scope
:returns: list -- all VCD variables in the current scope
"""
variables = []
for d in self.total_model.component_set:
variables.extend(d.getVCDVariables())
return variables
def simulate_sync(self):
"""
Synchronous simulation call, identical to the normal call, with the exception that it will be a blocking call as only "simulate" is marked as oneway.
"""
BaseSimulator.simulate_sync(self)
self.no_finish_ring.acquire()
def simulate(self):
"""
Run the actual simulation on the controller. This will simply 'intercept' the call to the original simulate and perform location visualisation when necessary.
"""
self.checkForTemporaryIrreversible()
self.no_finish_ring.release()
if self.location_cell_view:
from pypdevs.activityVisualisation import visualizeLocations
visualizeLocations(self)
# Call superclass (the actual simulation)
BaseSimulator.simulate(self)
self.prev_termination_time = self.termination_time[0]
def getEventGraph(self):
"""
Fetch a graph containing all connections and the number of events between the nodes. This is only useful when an initial allocator is chosen.
:returns: dict -- containing source and destination, it will return the amount of events passed between them
"""
return self.runAllocator()[0]
def getInitialAllocations(self):
"""
Get a list of all initial allocations. Will call the allocator to get the result.
:returns: list -- containing all nodes and the models they host
"""
return self.runAllocator()[1]
def runAllocator(self):
"""
Actually extract the graph of exchanged messages and run the allocator with this information.
Results are cached.
:returns: tuple -- the event graph and the allocations
"""
# Only run this code once
if self.graph is None and self.allocations is None:
# It seems this is the first time
if self.initial_allocator is None:
# No allocator was defined, or it has already issued its allocation code, which resulted into 'nothing'
self.graph = None
self.allocations = None
else:
from pypdevs.util import constructGraph, saveLocations
self.graph = constructGraph(self.model)
allocs = self.initialAllocator.allocate(self.model.component_set,
self.getEventGraph(),
self.kernels,
self.total_activities)
self.allocations = allocs
self.initial_allocator = None
saveLocations("locationsave.txt",
self.allocations,
self.model_ids)
return self.graph, self.allocations
def setCellLocationTracer(self, x, y, location_cell_view):
"""
Sets the Location tracer and all its configuration parameters
:param x: the horizontal size of the grid
:param y: the vertical size of the grid
:param location_cell_view: whether or not to enable it
"""
self.x_size = x
self.y_size = y
self.location_cell_view = location_cell_view
def setRelocator(self, relocator):
"""
Sets the relocator to the one provided by the user
:param relocator: the relocator to use
"""
self.relocator = relocator
# Perform run-time configuration
try:
self.relocator.setController(self)
except AttributeError:
pass
def setActivityTracking(self, at):
"""
Sets the use of activity tracking, which will simply output the activity of all models at the end of the simulation
:param at: whether or not to enable activity tracking
"""
self.activity_tracking = at
def setClassicDEVS(self, classic_DEVS):
"""
Sets the use of Classic DEVS instead of Parallel DEVS.
:param classicDEVS: whether or not to use Classic DEVS
"""
# Do this once, to prevent checks for the classic DEVS formalism
if classic_DEVS:
# Methods, so CamelCase
self.coupledOutputGeneration = self.coupledOutputGenerationClassic
def setAllocator(self, initial_allocator):
"""
Sets the use of an initial relocator.
:param initial_allocator: whether or not to use an initial allocator
"""
self.initial_allocator = initial_allocator
if initial_allocator is not None:
# Methods, so CamelCase
self.atomicOutputGeneration_backup = self.atomicOutputGeneration
self.atomicOutputGeneration = self.atomicOutputGenerationEventTracing
def setDSDEVS(self, dsdevs):
"""
Whether or not to check for DSDEVS events
:param dsdevs: dsdevs boolean
"""
self.use_DSDEVS = dsdevs
def setRealtime(self, input_references):
"""
Sets the use of realtime simulation.
:param input_references: dictionary containing the string to port mapping
"""
self.realtime = True
self.realtime_port_references = input_references
def setTerminationCondition(self, termination_condition):
"""
Sets the termination condition of this simulation kernel.
As soon as the condition is valid, it willl signal all nodes that they have to stop simulation as soon as they have progressed up to this simulation time.
:param termination_condition: a function that accepts two parameters: *time* and *model*. Function returns whether or not to halt simulation
"""
self.termination_condition = termination_condition
self.termination_time_check = False
def findAndPerformRelocations(self, gvt, activities, horizon):
"""
First requests the relocator for relocations to perform, and afterwards actually perform them.
:param gvt: the current GVT
:param activities: list containing all activities of all nodes
:param horizon: the horizon used in this activity tracking
"""
# Now start moving all models according to the provided relocation directives
relocate = self.relocator.getRelocations(gvt, activities, horizon)
#print("Filtered relocate: " + str(relocate))
if relocate:
self.performRelocationsInit(relocate)
def performRelocationsInit(self, relocate):
"""
Perform the relocations specified in the parameter. Split of from the 'findAndPerformRelocations', to make it possible for other parts of the code
to perform relocations too.
:param relocate: dictionary containing the model_id as key and the value is the node to send it to
"""
relocate = {key: relocate[key]
for key in relocate
if self.model_ids[key].location != relocate[key] and
self.model_ids[key].relocatable}
if not relocate:
return
if self.running_irreversible is not None:
self.getProxy(self.running_irreversible).unsetIrreversible()
self.running_irreversible = None
while not self.no_finish_ring.acquire(False):
if not self.run_gvt:
self.GVTdone()
return
time.sleep(0)
kernels = {}
self.locked_kernels = set()
relocation_rules = {}
for model_id in relocate:
source = self.model_ids[model_id].location
destination = relocate[model_id]
if source == destination:
continue
kernels[source] = kernels.get(source, 0) + 1
kernels[destination] = kernels.get(destination, 0) + 1
if kernels[source] == 1:
# We are the first to lock it, so actually send the lock
self.getProxy(source).requestMigrationLock()
if kernels[destination] == 1:
# We are the first to lock it, so actually send the lock
self.getProxy(destination).requestMigrationLock()
relocation_rules.setdefault((source, destination), set()).add(model_id)
while relocation_rules:
# Busy loop until everything is done
# Don't use an iterator, as we will change the list
for source, destination in relocation_rules.keys():
if (source in self.locked_kernels and
destination in self.locked_kernels):
models = relocation_rules[(source, destination)]
self.getProxy(source).migrateTo(destination, models)
del relocation_rules[(source, destination)]
kernels[source] -= len(models)
kernels[destination] -= len(models)
if kernels[source] == 0:
self.getProxy(source).migrationUnlock()
if kernels[destination] == 0:
self.getProxy(destination).migrationUnlock()
# OK, now check whether we need to visualize all locations or not
if self.location_cell_view:
visualizeLocations(self)
# Possibly some node is now hosting all models, so allow this node to become irreversible for some time.
self.checkForTemporaryIrreversible()
# Allow the finishring algorithm again
self.no_finish_ring.release()
def checkForTemporaryIrreversible(self):
"""
Checks if one node is hosting all the models. If this is the case, this node will gain 'temporary irreversibility',
allowing it to skip state saving and thus avoiding the main overhead associated with time warp.
"""
# Check whether or not everything is located at a single node now
if self.relocator.useLastStateOnly():
# If this is the case, we will be unable to know which state to save the activity for
# So disable it for now
# This does offer a slight negative impact, though it isn't really worth fixing for the time being
return
if isinstance(self.destinations[0], int):
current_kernel = self.destinations[0]
else:
current_kernel = 0
for kernel in self.destinations:
if isinstance(kernel, int):
loc = kernel
else:
loc = 0
if loc != current_kernel:
break
else:
# We didn't break, so one of the nodes runs all at once
self.getProxy(current_kernel).setIrreversible()
self.running_irreversible = current_kernel
def notifyLocked(self, remote):
"""
Notify this kernel that the model is locked
:param remote: the node that is locked
"""
self.locked_kernels.add(remote)
def dsRemovePort(self, port):
"""
Remove a port from the simulation
:param port: the port to remove
"""
for iport in port.inline:
iport.outline = [p for p in iport.outline if p != port]
for oport in port.outline:
oport.inline = [p for p in oport.inline if p != port]
self.dc_altered.add(port)
def dsDisconnectPorts(self, p1, p2):
"""
Disconnect two ports
:param p1: source port
:param p2: target port
"""
self.dc_altered.add(p1)
def dsConnectPorts(self, p1, p2):
"""
Connect two ports
:param p1: source port
:param p2: target port
"""
self.dc_altered.add(p1)
def dsUnscheduleModel(self, model):
"""
Dynamic Structure change: remove an existing model
:param model: the model to remove
"""
if isinstance(model, CoupledDEVS):
for m in model.component_set:
self.dsUnscheduleModel(m, False)
for port in model.IPorts:
self.dsRemovePort(port)
for port in model.OPorts:
self.dsRemovePort(port)
elif isinstance(model, AtomicDEVS):
self.model.component_set.remove(model)
self.model.models.remove(model)
# The model is removed, so remove it from the scheduler
self.model.scheduler.unschedule(model)
self.model_ids[model.model_id] = None
self.destinations[model.model_id] = None
self.model.local_model_ids.remove(model.model_id)
for port in model.IPorts:
self.dsRemovePort(port)
for port in model.OPorts:
self.dsRemovePort(port)
else:
raise DEVSException("Unknown model to schedule: %s" % model)
def dsScheduleModel(self, model):
"""
Dynamic Structure change: create a new model
:param model: the model to add
"""
if isinstance(model, CoupledDEVS):
model.full_name = model.parent.full_name + "." + model.getModelName()
for m in model.component_set:
self.dsScheduleModel(m)
for p in model.IPorts:
self.dc_altered.add(p)
for p in model.OPorts:
self.dc_altered.add(p)
elif isinstance(model, AtomicDEVS):
model.model_id = len(self.model_ids)
model.full_name = model.parent.full_name + "." + model.getModelName()
model.location = self.name
self.model_ids.append(model)
self.destinations.append(model)
self.model.component_set.append(model)
self.model.models.append(model)
self.model.local_model_ids.add(model.model_id)
self.atomicInit(model, self.current_clock)
p = model.parent
model.select_hierarchy = [model]
while p != None:
model.select_hierarchy = [p] + model.select_hierarchy
p = p.parent
if model.time_next[0] == self.current_clock[0]:
# If scheduled for 'now', update the age manually
model.time_next = (model.time_next[0], self.current_clock[1])
# It is a new model, so add it to the scheduler too
self.model.scheduler.schedule(model)
for p in model.IPorts:
self.dc_altered.add(p)
for p in model.OPorts:
self.dc_altered.add(p)
else:
raise DEVSException("Unknown model to schedule: %s" % model)
def setRealTime(self, subsystem, generator_file, ports, scale, listeners, args=[]):
"""
Set the use of realtime simulation
:param subsystem: defines the subsystem to use
:param generator_file: filename to use for generating external inputs
:param ports: input port references
:param scale: the scale factor for realtime simulation
:param listeners: the ports on which we should listen for output
:param args: additional arguments for the realtime backend
"""
self.realtime = True
self.threading_backend = ThreadingBackend(subsystem, args)
self.rt_zerotime = time.time()
async = AsynchronousComboGenerator(generator_file, self.threading_backend)
self.asynchronous_generator = async
self.realtime_starttime = time.time()
self.portmap = ports
self.model.listeners = listeners
self.realtime_scale = scale
def gameLoop(self):
"""
Perform all computations up to the current time. Only applicable for the game loop realtime backend.
"""
self.threading_backend.step()
def realtimeInterrupt(self, string):
"""
Create an interrupt from other Python code instead of using stdin or the file
:param string: the value to inject
"""
self.threading_backend.interrupt(string)
def stateChange(self, model_id, variable, value):
"""
Notification function for when a variable's value is altered. It will notify the node that is responsible for simulation of this model AND also notify the tracers of the event.
:param model_id: the model_id of the model whose variable was changed
:param variable: the name of the variable that was changed (as a string)
:param value: the new value of the variable
"""
# Call the node that hosts this model and order it to recompute timeAdvance
proxy = self.getProxy(self.model_ids[model_id].location)
proxy.recomputeTA(model_id, self.prev_termination_time)
self.tracers.tracesUser(self.prev_termination_time,
self.model_ids[model_id],
variable,
value)
|
<gh_stars>0
"""This module contains commands related to Phabricator."""
import json # FIX THIS
import requests # FIX THIS
from sopel.module import commands, example, interval, rule
from sopel.config.types import StaticSection, ValidatedAttribute
import sys
class PhabricatorSection(StaticSection):
host = ValidatedAttribute('host', str)
api_token = ValidatedAttribute('api_token', str)
querykey = ValidatedAttribute('querykey', str)
highpri_notify = ValidatedAttribute('highpri_notify', bool)
highpri_channel = ValidatedAttribute('highpri_channel', str)
def setup(bot):
bot.config.define_section('phabricator', PhabricatorSection)
def configure(config):
config.define_section('phabricator', PhabricatorSection, validate=False)
config.phabricator.configure_setting('host', 'What is the URL of your Phabricator installation?')
config.phabricator.configure_setting('api_token', 'Please enter a Phabricator API token.')
config.phabricator.configure_setting('querykey', 'Please enter a Phabricator query key.')
config.phabricator.configure_setting('highpri_notify', 'Would you like to enable automatic notification of high priority tasks? (true/false)')
config.phabricator.configure_setting('highpri_channel',
'If you enabled high priority notifications, what channel would you like them sent to? (notifications will be sent once every week.')
BOLD = '\x02'
HIGHPRIO_NOTIF_TASKS_PER_PAGE = 5
HIGHPRIO_TASKS_NOTIFICATION_INTERVAL = 7 * 24 * 60 * 60 # every week
MESSAGES_INTERVAL = 2 # seconds (to avoid excess flood)
startup_tasks_notifications = False
priotasks_notify = []
def searchphab(bot, channel, task=1):
data = {
'api.token': bot.settings.phabricator.api_token,
'constraints[ids][0]': task
}
response = requests.post(
url='https://{0}/api/maniphest.search'.format(bot.settings.phabricator.host),
data=data)
response = response.json()
go = 0
try:
result = response.get("result").get("data")[0]
go = 1
except AttributeError:
bot.say("An error occurred while parsing the result.", channel)
except IndexError:
bot.say("Sorry, but I couldn't find information for the task you searched.", channel)
except:
bot.say("An unknown error occured.", channel)
if go == 1:
params = {
'api.token': bot.settings.phabricator.api_token,
'constraints[phids][0]': result.get("fields").get("ownerPHID")
}
response2 = requests.post(
url='https://{0}/api/user.search'.format(bot.settings.phabricator.host),
data=params)
try:
response2 = response2.json()
except json.decoder.JSONDecodeError as e:
bot.say(response2.text, '#ZppixBot-Logs')
bot.say(str(e), '#ZppixBot-Logs')
params2 = {
'api.token': bot.settings.phabricator.api_token,
'constraints[phids][0]': result.get("fields").get("authorPHID")
}
response3 = requests.post(
url='https://{0}/api/user.search'.format(bot.settings.phabricator.host),
data=params2)
response3 = response3.json()
if result.get("fields").get("ownerPHID") is None:
owner = None
else:
owner = response2.get("result").get("data")[0].get("fields").get("username")
author = response3.get("result").get("data")[0].get("fields").get("username")
priority = result.get("fields").get("priority").get("name")
status = result.get("fields").get("status").get("name")
output = 'https://phabricator.miraheze.org/T{0} - '.format(str(result["id"]))
output = '{0}{2}{1}{2}, '.format(output, str(result.get('fields').get('name')), BOLD)
output = output + 'authored by {1}{0}{1}, '.format(author, BOLD)
output = output + 'assigned to {1}{0}{1}, '.format(owner, BOLD)
output = output + 'Priority: {1}{0}{1}, '.format(priority, BOLD)
output = output + 'Status: {1}{0}{1}'.format(status, BOLD)
bot.say(output, channel)
def gethighpri(limit=True, channel='#miraheze', bot=None):
data = {
'api.token': bot.settings.phabricator.api_token,
'queryKey': bot.settings.phabricator.querykey, # mFzMevK.KRMZ for mhphab
}
response = requests.post(
url='https://{0}/api/maniphest.search'.format(bot.settings.phabricator.host),
data=data)
response = response.json()
result = response.get("result")
try:
data = result.get("data")
go = 1
except:
bot.say("They are no high priority tasks that I can process, good job!", channel)
go = 0
if go == 1:
x = 0
while x < len(data):
currdata = data[x]
if x > 5 and limit:
bot.say("They are more than 5 tasks. Please see {0} for the rest or use .highpri".format(
bot.settings.phabricator.host), channel)
break
else:
searchphab(bot=bot, channel=channel, task=currdata.get("id"))
x = x + 1
@commands('task')
@example('.task 1')
def phabtask(bot, trigger):
if trigger.group(2).startswith('T'):
task_id = trigger.group(2).split('T')[1]
else:
task_id = trigger.group(2)
searchphab(bot=bot, channel=trigger.sender, task=task_id)
@rule('T[1-9][0-9]*')
def phabtask2(bot, trigger):
"""Get a Miraheze phabricator link to a the task number you provide."""
task_id = trigger.split('T')[1]
searchphab(bot=bot, channel=trigger.sender, task=task_id)
@interval(HIGHPRIO_TASKS_NOTIFICATION_INTERVAL)
def high_priority_tasks_notification(bot):
if bot.settings.phabricator.highpri_notify is True:
"""Send high priority tasks notifications."""
gethighpri(channel=bot.settings.phabricator.highpri_channel, bot=bot)
@commands('highpri')
@example('.highpri')
def forcehighpri(bot, trigger):
gethighpri(limit=False, channel=trigger.sender, bot=bot)
|
"""Replay Orchestrator
.. moduleauthor:: <NAME> <<EMAIL>>, <NAME> (<EMAIL>)
"""
import json
import threading
import logging
import queue
import time
import sys
import srcs.lib.logger as logger
import srcs.lib.defines as defines
root = logging.getLogger()
root.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
class ReplayOrchestrator(threading.Thread):
"""This thread orchestrates pcap replay by communicating with all replay drivers.
"""
def __init__(self, net_power_obj, replay_plan_file):
"""Initialization of replay orchestrator
:param net_power_obj: defined in srcs/core/netpower.py
:param replay_plan_file: Absolute path to file containing the replay plan which specifies which replay driver
is incharge of each pcap to be replayed
"""
threading.Thread.__init__(self)
self.thread_cmd_queue = queue.Queue()
self.replay_plan_file = replay_plan_file
self.shared_bufs = {}
self.log = logger.Logger(
"/tmp/replay_orchestrator_log.txt", "Replay Orchestrator")
self.replay_plan = None
self.start_time = None
self.net_power_obj = net_power_obj
def get_curr_cmd(self):
curr_cmd = self.thread_cmd_queue.get(block=True)
return curr_cmd
def signal_pcap_replay_trigger(self, node_id, pcap_file_path):
"""Signals the pcap driver on "node_id" to run the pcap
:param node_id: mininet host name
:type node_id: str
:param pcap_file_path: Absolute path to the pcap to be replayed
:type pcap_file_path: str
:return: None
"""
ret = 0
while ret <= 0:
ret = self.net_power_obj.shared_buf_array.write(
f"{node_id}-replay-main-cmd-channel-buffer", pcap_file_path, 0)
self.log.info(f"Signalled start of replay phase to node: {node_id}")
def send_command(self, cmd):
self.thread_cmd_queue.put(cmd)
def cancel_thread(self):
self.thread_cmd_queue.put(defines.EXIT_CMD)
def are_two_pcap_stages_conflicting(self, stage_1_nodes, stage_2_nodes):
"""Checks if two sets have nodes have any intersections
:param stage_1_nodes: list of host names
:param stage_2_nodes: list of host names
:return: True if the intersection is not NULL, else False
"""
for node_id in stage_1_nodes:
if node_id in stage_2_nodes:
return True
return False
def is_pcap_stage_relevant(self, involved_hosts):
"""Checks if any node involved in the pcap doesn't even exist in the emulation
:param involved_hosts: list of mininet host names
:return: True if all nodes involved are valid, else False
"""
relevant_hosts = [host.name for host in self.net_power_obj.network_configuration.mininet_obj.hosts]
is_relevant = True
for node_id in involved_hosts["involved_nodes"]:
if node_id not in relevant_hosts:
is_relevant = False
return is_relevant
def trigger_replay(self, node_ids, pcap_file):
"""Sends a trigger command to all nodes specified in node_ids
:param node_ids: list of mininet host names
:param pcap_file: pcap file to replay among those hosts
:return: None
"""
for node_id in node_ids:
self.signal_pcap_replay_trigger(node_id, pcap_file)
def run(self):
"""Listens for queued commands from the main melody process and sends triggers to the appropriate replay drivers
"""
n_pending_requests = 0
self.log.info("Replay Orchestrator Started ...")
with open(self.replay_plan_file, "r") as f:
self.replay_plan = json.load(f)
cumulative_involved_replay_hosts = []
nxt_replay_pcap_no = 0
while True:
try:
cmd = self.thread_cmd_queue.get(block=False)
except queue.Empty:
cmd = None
if cmd == defines.EXIT_CMD:
self.net_power_obj.enable_TCP_RST()
self.log.info(
"Emulation ended. Stopping replay orchestrator ...")
sys.exit(defines.EXIT_SUCCESS)
elif cmd == defines.TRIGGER_CMD:
if nxt_replay_pcap_no >= len(self.replay_plan):
self.log.info("All pcaps replayed!. Ignoring TRIGGER ...")
time.sleep(1.0)
continue
if not self.is_pcap_stage_relevant(self.replay_plan[nxt_replay_pcap_no]):
nxt_replay_pcap_no += 1
time.sleep(1.0)
continue
if not self.are_two_pcap_stages_conflicting(
cumulative_involved_replay_hosts, self.replay_plan[nxt_replay_pcap_no]["involved_nodes"])\
and n_pending_requests == 0:
self.log.info(
"Signalled Start of Next Replay Pcap: " + \
self.replay_plan[nxt_replay_pcap_no]["pcap_file_path"])
self.net_power_obj.disable_TCP_RST()
self.trigger_replay(
self.replay_plan[nxt_replay_pcap_no]["involved_nodes"],
self.replay_plan[nxt_replay_pcap_no]["pcap_file_path"])
for node_id in self.replay_plan[nxt_replay_pcap_no][
"involved_nodes"]:
cumulative_involved_replay_hosts.append(node_id)
nxt_replay_pcap_no += 1
else:
n_pending_requests += 1
if (len(cumulative_involved_replay_hosts) == 0 and
n_pending_requests > 0):
self.log.info("End of Last Replay batch. Begin processing next "
"batch of pending requests ...")
while n_pending_requests > 0:
if not self.are_two_pcap_stages_conflicting(
cumulative_involved_replay_hosts,
self.replay_plan[nxt_replay_pcap_no]["involved_nodes"]):
for node_id in self.replay_plan[nxt_replay_pcap_no][
"involved_nodes"]:
cumulative_involved_replay_hosts.append(node_id)
self.log.info(
"Signalled Start of Next Replay Pcap: " + \
self.replay_plan[nxt_replay_pcap_no]["pcap_file_path"])
self.trigger_replay(
self.replay_plan[nxt_replay_pcap_no]["involved_nodes"],
self.replay_plan[nxt_replay_pcap_no]["pcap_file_path"])
nxt_replay_pcap_no += 1
n_pending_requests -= 1
else:
break
i = 0
while i < len(cumulative_involved_replay_hosts):
dummy_id, replay_status = \
self.net_power_obj.shared_buf_array.read(
f"{cumulative_involved_replay_hosts[i]}-replay-main-cmd-channel-buffer")
if replay_status == defines.SIGNAL_FINISH_CMD:
cumulative_involved_replay_hosts.pop(i)
else:
i += 1
time.sleep(0.1)
|
#!/usr/bin/python3
import sys, getopt
import os
import subprocess
import os.path
keep_change = False
debug = False
clean = False
code_generated = False
skip_build = False
generated_directory = r'./test/integration/generated/'
swagger_directory = r'./node_modules/@microsoft.azure/autorest.testserver/swagger/'
warning_color = '\033[91m'
end_color = '\033[0m'
working_files = [
"body-array",
"body-boolean",
"body-byte",
"body-date",
"body-datetime",
"body-datetime-rfc1123",
"body-dictionary",
"body-duration",
"body-file",
"body-number",
"body-integer",
"body-string",
"body-time",
"custom-baseUrl",
"custom-baseUrl-more-options",
"extensible-enums-swagger",
"head",
"header",
"model-flattening",
"paging",
"report",
"required-optional",
"url",
"url-multi-collectionFormat",
"validation",
"xms-error-responses"
]
def get_all_files():
global swagger_directory
all_files = []
for filename in os.listdir(swagger_directory):
if filename.endswith(".json"):
all_files.append(os.path.splitext(filename)[0])
else:
continue
return all_files
def revert_generated_code(file):
global warning_color
global end_color
global generated_directory
print(warning_color + "Revert the generated code." + end_color)
git_restore_call = subprocess.run(["git", "restore", '{generated_directory}{file}'.format(file=file, generated_directory=generated_directory)], stderr=subprocess.PIPE, text=True)
if "error" in git_restore_call.stderr:
print(warning_color + "Revert the generated code failed. Remove the directory" + end_color)
os.system('rm -Rf {generated_directory}{file}'.format(file=file, generated_directory=generated_directory))
else:
print(git_restore_call.stdout)
def execute_command(command):
global debug
if debug:
return_value = os.system('%s 2>&1' % command)
else:
return_value = os.system('%s > /dev/null 2>&1' % command)
return return_value
def check_xcode_project_exists():
for fname in os.listdir('.'):
if fname.endswith('.xcodeproj'):
return True
return False
def generate_and_build_code(fileList):
"""Generate code and build code"""
global clean
global keep_change
global warning_color
global end_color
global generated_directory
for file in fileList:
print('== Generate code for test server swagger {file}.json =='.format(file=file))
if clean:
print("Remove `Package.resolved` and `.build` and `Generated` directories.")
os.system('rm {generated_directory}{file}/Package.resolved'.format(file=file, generated_directory=generated_directory))
os.system('rm -Rf {generated_directory}{file}/.build'.format(file=file, generated_directory=generated_directory))
os.system('rm -Rf {generated_directory}{file}/Source/Generated'.format(file=file, generated_directory=generated_directory))
autorest_command = "autorest --input-file={swagger_directory}{file}.json --output-folder={generated_directory}{file} --namespace={file} --use=.".format(file=file, swagger_directory=swagger_directory, generated_directory=generated_directory)
print("Autorest command: %s" % autorest_command)
return_value = execute_command(autorest_command)
if return_value == 0:
print("autorest code generation succeed.")
code_generated = True
else:
print(warning_color + "autorest code generation failed." + end_color)
code_generated = False
if keep_change == 0:
revert_generated_code(file)
if code_generated and not skip_build:
# Build generated code
os.chdir('{generated_directory}{file}'.format(file=file, generated_directory=generated_directory))
build_command = "swift build"
return_value = execute_command(build_command)
if return_value == 0:
print("swift build succeed.")
os.chdir('../../../..')
else:
print(warning_color + "swift build failed." + end_color)
os.chdir('../../../..')
if keep_change == 0:
revert_generated_code(file)
def main(argv):
all_files = False
global clean
global debug
global keep_change
global skip_build
input_file = ''
try:
opts, args = getopt.getopt(argv,"acdksi:", ["all-files", "clean", "debug", "keep-change", "skip-build", "input-file"])
except getopt.GetoptError as error:
print("Error: {}".format(error))
sys.exit(2)
for opt, arg in opts:
if opt in ("-a", "--all-files"):
all_files = True
if opt in ("-c", "--clean"):
clean = True
if opt in ("-d", "--debug"):
debug = True
if opt in ("-k", "--keep-change"):
keep_change = True
if opt in ("-i", "--input-file"):
input_file = argv[1]
if opt in ("-s", "--skip-build"):
skip_build = True
print("== make install ==")
execute_command("make install")
if input_file != '':
generate_and_build_code([input_file])
elif all_files:
generate_and_build_code(get_all_files())
else:
generate_and_build_code(working_files)
if __name__ == "__main__":
main(sys.argv[1:])
|
#!/usr/bin/env python
#
# Copyright 2014 - 2016 The BCE Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the license.txt file.
#
import bce.parser.common.error as _pe
import bce.parser.molecule.ast.bfs as _ast_bfs
import bce.parser.molecule.abbreviation as _ml_abbr
import bce.parser.molecule.error as _ml_error
import bce.parser.molecule.ast.base as _ast_base
import bce.locale.msg_id as _msg_id
import bce.option as _opt
class MergeUtil:
"""Merge utility."""
def __init__(self):
"""Initialize the class."""
self.__data = {}
def __len__(self):
"""Get the count of atoms in the atom dictionary.
:rtype : int
:return: The count.
"""
return len(self.__data)
def get_keys(self):
"""Get all keys of the atom dictionary.
:rtype : list[str]
:return: A list that contains all keys.
"""
# Initialize.
r = []
# Get all keys.
for key in self.__data:
r.append(key)
return r
def multiply(self, coeff):
"""Multiply the coefficient of each atom with specified coefficient.
:param coeff: The coefficient.
"""
for key in self.__data:
self.__data[key] = self.__data[key] * coeff
def add(self, key, value):
"""Add an atom.
:type key: str
:param key: The atom symbol.
:param value: The coefficient.
"""
if key in self.__data:
self.__data[key] = self.__data[key] + value
else:
self.__data[key] = value
def merge(self, another, coeff):
"""Merge with another instance.
:type another: MergeUtil
:param another: Another instance.
:param coeff: The merge coefficient.
"""
for key in another.__data:
self.add(key, another.__data[key] * coeff)
def simplify(self):
"""Simplify.
:rtype : list[str]
:return: A list that contains all atoms which were eliminated after simplifying.
"""
# Initialize the eliminated atoms list.
r = []
# Simplify the coefficient of each atom.
for key in self.__data:
# Do simplifying.
val = self.__data[key].simplify()
# Save the simplified value.
self.__data[key] = val
# Put the atom into the eliminated atoms list if its coefficient equals to 0.
if val.is_zero:
r.append(key)
# Remove atoms that is in the eliminated atoms list from the atom dictionary.
for key in r:
self.__data.pop(key)
return r
def get_data(self):
"""Get the atom dictionary.
:rtype : dict
:return: The data.
"""
return self.__data
def _macro_simplify(expression, mu_obj, node, options):
"""Macro for simplifying.
:type expression: str
:type mu_obj: MergeUtil
:type node: _ast_base._ASTNodeBaseML
:type options: _opt.Option
:param expression: The origin expression.
:param mu_obj: The MergeUtil object.
:param node: The work node.
:param options: The BCE options.
"""
# Simplify.
removed = mu_obj.simplify()
# Pre-create an atom-eliminated error.
err = _pe.Error(_ml_error.PE_ML_ATOM_ELIMINATED,
_msg_id.MSG_PE_ML_ATOM_ELIMINATED_DESCRIPTION,
options)
# Initialize the error flag.
flag = False
for symbol in removed:
if symbol != "e":
# Mark the flag.
flag = True
# Add a description.
err.push_traceback_ex(expression,
node.get_starting_position_in_source_text(),
node.get_ending_position_in_source_text(),
_msg_id.MSG_PE_ML_ATOM_ELIMINATED_TB_MESSAGE,
{"$1": symbol})
# Raise the error if the flag was marked.
if flag:
raise err
def parse_ast(expression, root_node, options):
"""Parse an AST.
:type expression: str
:type root_node: _ast_base.ASTNodeHydrateGroup | _ast_base.ASTNodeMolecule
:type options: _opt.Option
:param expression: The origin expression.
:param root_node: The root node of the AST.
:param options: The BCE options.
:rtype : dict
:return: The parsed atoms dictionary.
"""
# Get the iteration order.
work_list = _ast_bfs.do_bfs(root_node, True)
# Initialize the parsed node container.
parsed = {}
""":type : dict[int, MergeUtil]"""
# Iterate nodes from the leaves to the root.
for work_node in work_list:
if work_node.is_hydrate_group() or work_node.is_molecule():
assert isinstance(work_node, _ast_base.ASTNodeHydrateGroup) or \
isinstance(work_node, _ast_base.ASTNodeMolecule)
# Get the prefix number.
coeff = work_node.get_prefix_number()
# Initialize a new merge utility.
build = MergeUtil()
# Process the electronics.
if work_node.is_molecule():
el_charge = work_node.get_electronic_count().simplify()
if not el_charge.is_zero:
build.add("e", el_charge * coeff)
# Iterate all children.
for child_id in range(0, len(work_node)):
# Get child node and its parsing result.
child = work_node[child_id]
child_parsed = parsed[id(child)]
# Content check.
if work_node.is_hydrate_group() and len(child_parsed) == 0:
assert isinstance(child, _ast_base.ASTNodeMolecule)
err = _pe.Error(_ml_error.PE_ML_NO_CONTENT,
_msg_id.MSG_PE_ML_NO_CONTENT_DESCRIPTION,
options)
if child_id == 0:
err.push_traceback_ex(expression,
child.get_ending_position_in_source_text() + 1,
child.get_ending_position_in_source_text() + 1,
_msg_id.MSG_PE_ML_NO_CONTENT_BEFORE)
elif child_id == len(work_node) - 1:
err.push_traceback_ex(expression,
child.get_starting_position_in_source_text() - 1,
child.get_starting_position_in_source_text() - 1,
_msg_id.MSG_PE_ML_NO_CONTENT_AFTER)
else:
err.push_traceback_ex(expression,
child.get_starting_position_in_source_text() - 1,
child.get_ending_position_in_source_text() + 1,
_msg_id.MSG_PE_ML_NO_CONTENT_INSIDE)
raise err
# Merge.
build.merge(child_parsed, coeff)
# Do simplifying.
_macro_simplify(expression, build, work_node, options)
# Save the parsed result.
parsed[id(work_node)] = build
elif work_node.is_atom():
assert isinstance(work_node, _ast_base.ASTNodeAtom)
# Get suffix number.
coeff = work_node.get_suffix_number()
# Initialize a new merge utility.
build = MergeUtil()
# Add the atom.
build.add(work_node.get_atom_symbol(), coeff)
# Save the parsed result.
parsed[id(work_node)] = build
elif work_node.is_parenthesis():
assert isinstance(work_node, _ast_base.ASTNodeParenthesisWrapper)
# Get suffix number.
coeff = work_node.get_suffix_number()
# Initialize a new merge utility.
build = MergeUtil()
# Get inner node and its parsing result.
inner_parsed = parsed[id(work_node.get_inner_node())]
# Content check.
if len(inner_parsed) == 0:
err = _pe.Error(_ml_error.PE_ML_NO_CONTENT,
_msg_id.MSG_PE_ML_NO_CONTENT_DESCRIPTION,
options)
err.push_traceback_ex(expression,
work_node.get_starting_position_in_source_text(),
work_node.get_right_parenthesis_position(),
_msg_id.MSG_PE_ML_NO_CONTENT_INSIDE)
raise err
# Merge.
build.merge(inner_parsed, coeff)
# Do simplifying.
_macro_simplify(expression, build, work_node, options)
# Save the parsed result.
parsed[id(work_node)] = build
elif work_node.is_abbreviation():
assert isinstance(work_node, _ast_base.ASTNodeAbbreviation)
# Get the abbreviation symbol.
abbr_symbol = work_node.get_abbreviation_symbol()
# Check symbol length.
if len(abbr_symbol) == 0:
err = _pe.Error(_ml_error.PE_ML_NO_CONTENT,
_msg_id.MSG_PE_ML_NO_CONTENT_DESCRIPTION,
options)
err.push_traceback_ex(expression,
work_node.get_starting_position_in_source_text(),
work_node.get_right_parenthesis_position(),
_msg_id.MSG_PE_ML_NO_CONTENT_INSIDE)
raise err
# Initialize the resolving result container.
abbr_resolved = None
# Try to resolve in the user defined dictionary.
if options.is_user_abbreviation_dictionary_enabled():
user_dict = options.get_user_abbreviation_dictionary()
if abbr_symbol in user_dict:
abbr_resolved = user_dict[abbr_symbol]
# Try to resolve in system dictionary if it hasn't been resolved.
if abbr_resolved is None and abbr_symbol in _ml_abbr.ABBREVIATIONS:
abbr_resolved = _ml_abbr.ABBREVIATIONS[abbr_symbol]
# Raise an error if the abbreviation can't be resolved.
if abbr_resolved is None:
err = _pe.Error(_ml_error.PE_ML_UNSUPPORTED_ABBREVIATION,
_msg_id.MSG_PE_ML_UNSUPPORTED_ABBREVIATION_DESCRIPTION,
options)
err.push_traceback_ex(expression,
work_node.get_starting_position_in_source_text() + 1,
work_node.get_right_parenthesis_position() - 1,
_msg_id.MSG_PE_ML_UNSUPPORTED_ABBREVIATION_TB_MESSAGE)
raise err
# Initialize a new merge utility.
build = MergeUtil()
# Get the suffix number.
coeff = work_node.get_suffix_number()
# Add atoms.
for atom_symbol in abbr_resolved:
build.add(atom_symbol, abbr_resolved[atom_symbol] * coeff)
# Do simplifying.
_macro_simplify(expression, build, work_node, options)
# Save the parsed result.
parsed[id(work_node)] = build
else:
raise RuntimeError("Never reach this condition.")
# Get the parsing result of the root node.
root_node_parsed = parsed[id(root_node)]
# Content check.
if len(root_node_parsed) == 0:
err = _pe.Error(_ml_error.PE_ML_NO_CONTENT,
_msg_id.MSG_PE_ML_NO_CONTENT_DESCRIPTION,
options)
err.push_traceback_ex(expression,
0,
len(expression) - 1,
_msg_id.MSG_PE_ML_NO_CONTENT_INSIDE)
raise err
return root_node_parsed.get_data()
|
import numpy as np
np.random.seed(0)
import pandas as pd
import matplotlib.pyplot as plt
import gym
env = gym.make('Taxi-v3')
env.seed(0)
print('观察空间 = {}'.format(env.observation_space))
print('动作空间 = {}'.format(env.action_space))
print('状态数量 = {}'.format(env.observation_space.n))
print('动作数量 = {}'.format(env.action_space.n))
state = env.reset()
taxirow, taxicol, passloc, destidx = env.unwrapped.decode(state)
print(taxirow, taxicol, passloc, destidx)
print('的士位置 = {}'.format((taxirow, taxicol)))
print('乘客位置 = {}'.format(env.unwrapped.locs[passloc]))
print('目标位置 = {}'.format(env.unwrapped.locs[destidx]))
env.render()
class SARSAAgent:
def __init__(self, env, gamma=0.9, learning_rate=0.2, epsilon=.01):
self.gamma = gamma
self.learning_rate = learning_rate
self.epsilon = epsilon
self.action_n = env.action_space.n
self.q = np.zeros((env.observation_space.n, env.action_space.n))
def decide(self, state):
if np.random.uniform() > self.epsilon:
action = self.q[state].argmax()
else:
action = np.random.randint(self.action_n)
return action
def learn(self, state, action, reward, next_state, done, next_action):
u = reward + self.gamma * \
self.q[next_state, next_action] * (1. - done)
td_error = u - self.q[state, action]
self.q[state, action] += self.learning_rate * td_error
def play_sarsa(env, agent, train=False, render=False):
episode_reward = 0
observation = env.reset()
action = agent.decide(observation)
while True:
if render:
env.render()
next_observation, reward, done, _ = env.step(action)
episode_reward += reward
next_action = agent.decide(next_observation) # 终止状态时此步无意义
if train:
agent.learn(observation, action, reward, next_observation, done)
if done:
break
observation, action = next_observation, next_action
return episode_reward
agent = SARSAAgent(env)
# 训练
episodes = 3000
episode_rewards = []
for episode in range(episodes):
episode_reward = play_sarsa(env, agent, train=True)
episode_rewards.append(episode_reward)
plt.plot(episode_rewards)
# 测试
agent.epsilon = 0. # 取消探索
episode_rewards = [play_sarsa(env, agent) for _ in range(100)]
print('平均回合奖励 = {} / {} = {}'.format(sum(episode_rewards),len(episode_rewards), np.mean(episode_rewards)))
print(pd.DataFrame(agent.q))
policy = np.eye(agent.action_n)[agent.q.argmax(axis=-1)]
print(pd.DataFrame(policy))
class ExpectedSARSAAgent:
def __init__(self, env, gamma=0.9, learning_rate=0.1, epsilon=.01):
self.gamma = gamma
self.learning_rate = learning_rate
self.epsilon = epsilon
self.q = np.zeros((env.observation_space.n, env.action_space.n))
self.action_n = env.action_space.n
def decide(self, state):
if np.random.uniform() > self.epsilon:
action = self.q[state].argmax()
else:
action = np.random.randint(self.action_n)
return action
def learn(self, state, action, reward, next_state, done):
v = (self.q[next_state].mean() * self.epsilon + self.q[next_state].max() * (1. - self.epsilon))
u = reward + self.gamma * v * (1. - done)
td_error = u - self.q[state, action]
self.q[state, action] += self.learning_rate * td_error
def play_qlearning(env, agent, train=False, render=False):
episode_reward = 0
observation = env.reset()
while True:
if render:
env.render()
action = agent.decide(observation)
next_observation, reward, done, _ = env.step(action)
episode_reward += reward
if train:
agent.learn(observation, action, reward, next_observation, done)
if done:
break
observation = next_observation
return episode_reward
agent = ExpectedSARSAAgent(env)
# 训练
episodes = 5000
episode_rewards = []
for episode in range(episodes):
episode_reward = play_qlearning(env, agent, train=True)
episode_rewards.append(episode_reward)
plt.plot(episode_rewards)
# 测试
agent.epsilon = 0. # 取消探索
episode_rewards = [play_qlearning(env, agent) for _ in range(100)]
print('平均回合奖励 = {} / {} = {}'.format(sum(episode_rewards),
len(episode_rewards), np.mean(episode_rewards)))
class QLearningAgent:
def __init__(self, env, gamma=0.9, learning_rate=0.1, epsilon=.01):
self.gamma = gamma
self.learning_rate = learning_rate
self.epsilon = epsilon
self.action_n = env.action_space.n
self.q = np.zeros((env.observation_space.n, env.action_space.n))
def decide(self, state):
if np.random.uniform() > self.epsilon:
action = self.q[state].argmax()
else:
action = np.random.randint(self.action_n)
return action
def learn(self, state, action, reward, next_state, done):
u = reward + self.gamma * self.q[next_state].max() * (1. - done)
td_error = u - self.q[state, action]
self.q[state, action] += self.learning_rate * td_error
agent = QLearningAgent(env)
##训练
episodes = 4000
episode_rewards = []
for episode in range(episodes):
episode_reward = play_qlearning(env,agent, train=True)
episode_rewards.append(episode_reward)
plt.plot(episode_rewards)
agent.epsilon = 0
episode_rewards = [play_qlearning(env,agent) for _ in range(100)]
print('平均回合奖励 = {} / {} = {}'.format(sum(episode_rewards),
len(episode_rewards), np.mean(episode_rewards)))
class DoubleQLearningAgent:
def __init__(self, env, gamma=0.9, learning_rate=0.1, epsilon=.01):
self.gamma = gamma
self.learning_rate = learning_rate
self.epsilon = epsilon
self.action_n = env.action_space.n
self.q0 = np.zeros((env.observation_space.n, env.action_space.n))
self.q1 = np.zeros((env.observation_space.n, env.action_space.n))
def decide(self, state):
if np.random.uniform() > self.epsilon:
action = (self.q0 + self.q1)[state].argmax()
else:
action = np.random.randint(self.action_n)
return action
def learn(self, state, action, reward, next_state, done):
if np.random.randint(2):
self.q0, self.q1 = self.q1, self.q0
a = self.q0[next_state].argmax()
u = reward + self.gamma * self.q1[next_state, a] * (1. - done)
td_error = u - self.q0[state, action]
self.q0[state, action] += self.learning_rate * td_error
agent = DoubleQLearningAgent(env)
# 训练
episodes = 9000
episode_rewards = []
for episode in range(episodes):
episode_reward = play_qlearning(env, agent, train=True)
episode_rewards.append(episode_reward)
plt.plot(episode_rewards)
# 测试
agent.epsilon = 0. # 取消探索
episode_rewards = [play_qlearning(env, agent) for _ in range(100)]
print('平均回合奖励 = {} / {} = {}'.format(sum(episode_rewards),
len(episode_rewards), np.mean(episode_rewards)))
class SARSALambdaAgent(SARSAAgent):
def __init__(self, env, lambd=0.6, beta=1.,
gamma=0.9, learning_rate=0.1, epsilon=.01):
super().__init__(env, gamma=gamma, learning_rate=learning_rate,
epsilon=epsilon)
self.lambd = lambd
self.beta = beta
self.e = np.zeros((env.observation_space.n, env.action_space.n))
def learn(self, state, action, reward, next_state, done, next_action):
# 更新资格迹
self.e *= (self.lambd * self.gamma)
self.e[state, action] = 1. + self.beta * self.e[state, action]
# 更新价值
u = reward + self.gamma * \
self.q[next_state, next_action] * (1. - done)
td_error = u - self.q[state, action]
self.q += self.learning_rate * self.e * td_error
if done:
self.e *= 0.
agent = SARSALambdaAgent(env)
# 训练
episodes = 5000
episode_rewards = []
for episode in range(episodes):
episode_reward = play_sarsa(env, agent, train=True)
episode_rewards.append(episode_reward)
plt.plot(episode_rewards)
# 测试
agent.epsilon = 0. # 取消探索
episode_rewards = [play_sarsa(env, agent, train=False) for _ in range(100)]
print('平均回合奖励 = {} / {} = {}'.format(sum(episode_rewards),
len(episode_rewards), np.mean(episode_rewards))) |
<reponame>cty9999/VITAE-mm-pi
#!/usr/local/bin/python
import dynclipy
task = dynclipy.main()
# avoid errors due to no $DISPLAY environment variable available when running sc.pl.paga
import matplotlib
matplotlib.use('Agg')
import pandas as pd
import numpy as np
import h5py
import json
import scanpy as sc
import anndata
import numba
import warnings
import time
checkpoints = {}
# ____________________________________________________________________________
# Load data ####
counts = task["counts"]
parameters = task["parameters"]
start_id = task["priors"]["start_id"]
if isinstance(start_id, list):
start_id = start_id[0]
if "groups_id" in task["priors"]:
groups_id = task["priors"]['groups_id']
else:
groups_id = None
# create dataset
if groups_id is not None:
obs = pd.DataFrame(groups_id)
obs.index = groups_id["cell_id"]
obs["louvain"] = obs["group_id"].astype("category")
adata = anndata.AnnData(counts)
adata.obs = obs
else:
adata = anndata.AnnData(counts)
checkpoints["method_afterpreproc"] = time.time()
# ____________________________________________________________________________
# Basic preprocessing ####
# normalisation & filtering
if counts.shape[1] < 100 and parameters["filter_features"]:
print("You have less than 100 features, but the filter_features parameter is true. This will likely result in an error. Disable filter_features to avoid this")
if parameters["filter_features"]:
n_top_genes = min(2000, counts.shape[1])
sc.pp.recipe_zheng17(adata, n_top_genes=n_top_genes)
# precalculating some dimensionality reductions
sc.tl.pca(adata, n_comps=parameters["n_comps"])
with warnings.catch_warnings():
warnings.simplefilter('ignore', numba.errors.NumbaDeprecationWarning)
sc.pp.neighbors(adata, n_neighbors=parameters["n_neighbors"])
# denoise the graph by recomputing it in the first few diffusion components
if parameters["n_dcs"] != 0:
sc.tl.diffmap(adata, n_comps=parameters["n_dcs"])
# ____________________________________________________________________________
# Cluster, infer trajectory, infer pseudotime, compute dimension reduction ###
# add grouping if not provided
if groups_id is None:
sc.tl.louvain(adata, resolution=parameters["resolution"])
# run paga
sc.tl.paga(adata)
# compute a layout for the paga graph
# - this simply uses a Fruchterman-Reingold layout, a tree layout or any other
# popular graph layout is also possible
# - to obtain a clean visual representation, one can discard low-confidence edges
# using the parameter threshold
sc.pl.paga(adata, threshold=0.01, layout='fr', show=False)
# run dpt for pseudotime information that is overlayed with paga
adata.uns['iroot'] = np.where(adata.obs.index == start_id)[0][0]
if parameters["n_dcs"] == 0:
sc.tl.diffmap(adata)
sc.tl.dpt(adata, n_dcs = min(adata.obsm["X_diffmap"].shape[1], 10))
# run umap for a dimension-reduced embedding, use the positions of the paga
# graph to initialize this embedding
if parameters["embedding_type"] == 'umap':
sc.tl.umap(adata, init_pos='paga')
dimred_name = 'X_umap'
else:
sc.tl.draw_graph(adata, init_pos='paga')
dimred_name = "X_draw_graph_" + parameters["embedding_type"]
checkpoints["method_aftermethod"] = time.time()
# ____________________________________________________________________________
# Process & save output ####
# grouping
grouping = pd.DataFrame({"cell_id": adata.obs.index, "group_id": adata.obs.louvain})
# milestone network
milestone_network = pd.DataFrame(
adata.uns["paga"]["connectivities_tree"].todense(),
index=adata.obs.louvain.cat.categories,
columns=adata.obs.louvain.cat.categories
).stack().reset_index()
milestone_network.columns = ["from", "to", "length"]
milestone_network = milestone_network.query("length > 0").reset_index(drop=True)
milestone_network["directed"] = False
print(milestone_network)
# dimred
dimred = pd.DataFrame([x for x in adata.obsm[dimred_name].T]).T
dimred.columns = ["comp_" + str(i+1) for i in range(dimred.shape[1])]
dimred["cell_id"] = adata.obs.index
# branch progressions: the scaled dpt_pseudotime within every cluster
branch_progressions = adata.obs
branch_progressions["dpt_pseudotime"] = branch_progressions["dpt_pseudotime"].replace([np.inf, -np.inf], 1) # replace unreachable pseudotime with maximal pseudotime
branch_progressions["percentage"] = branch_progressions.groupby("louvain")["dpt_pseudotime"].apply(lambda x: (x-x.min())/(x.max() - x.min())).fillna(0.5)
branch_progressions["cell_id"] = adata.obs.index
branch_progressions["branch_id"] = branch_progressions["louvain"].astype(np.str)
branch_progressions = branch_progressions[["cell_id", "branch_id", "percentage"]]
# branches:
# - length = difference between max and min dpt_pseudotime within every cluster
# - directed = not yet correctly inferred
branches = adata.obs.groupby("louvain").apply(lambda x: x["dpt_pseudotime"].max() - x["dpt_pseudotime"].min()).reset_index()
branches.columns = ["branch_id", "length"]
branches["branch_id"] = branches["branch_id"].astype(np.str)
branches["directed"] = True
print(branches)
# branch network: determine order of from and to based on difference in average pseudotime
branch_network = milestone_network[["from", "to"]]
average_pseudotime = adata.obs.groupby("louvain")["dpt_pseudotime"].mean()
for i, (branch_from, branch_to) in enumerate(zip(branch_network["from"], branch_network["to"])):
if average_pseudotime[branch_from] > average_pseudotime[branch_to]:
branch_network.at[i, "to"] = branch_from
branch_network.at[i, "from"] = branch_to
print(branch_network)
# save
dataset = dynclipy.wrap_data(cell_ids = adata.obs.index)
dataset.add_branch_trajectory(
grouping = grouping,
branch_progressions = branch_progressions,
branches = branches,
branch_network = branch_network
)
dataset.add_dimred(dimred = dimred)
dataset.add_timings(checkpoints)
dataset.write_output(task["output"])
|
<filename>catfacts.py
#!/usr/bin/env python3
import random
import time
import email
import smtplib
import imaplib
import textwrap
import shutil
import tempfile
import traceback
import sys
import argparse
import os.path
import re
import configparser
import logging
from email.utils import parseaddr
from email.mime.text import MIMEText
# 160 is the standard, but I've had texts cut off with it
# it might include the length of the email we're sending from
# 140 should be safe?
# this may be because it includes the name of the sender
TEXT_MESSAGE_SIZE = 140
# too small and message parts arrive not in order
# too long and message parts arrive more separately...
DELAY_BETWEEN_MESSAGE_PARTS = 10 # in seconds
# messages sent when inviting someone to cat facts
# assumed to be small enough for one text message
_INVITE_MESSAGE = "Thank you for signing up for Cat Facts! You will now "\
"receive {rlist} fun facts about CATS! >o<"
INVITE_MESSAGES = {
'hourly' : _INVITE_MESSAGE.format(rlist='hourly'),
'daily' : _INVITE_MESSAGE.format(rlist='daily'),
}
UNSUBSCRIBE_MESSAGE = "Unsubscribe? You gotta be kitten me! "\
"You are now unsubscribed from Cat Facts."
INVITE_USAGE_MESSAGE = "<invalid arguments> to invite someone, either send " \
"'invite sms <number> <provider>' or 'invite email <address>'"
text_gateways = {
# "carriername" : "whatever.com"
'verizon' : 'vtext.com',
'att' : 'txt.att.net',
'at&t' : 'txt.att.net',
'sprint' : 'messaging.sprintpcs.com',
'alltel' : 'message.alltel.com',
'boost' : 'myboostmobile.com',
'nextel': 'messaging.nextel.com',
't-mobile' : 'tmomail.net',
'tmobile' : 'tmomail.net',
'uscellular' : 'email.uscc.net',
'vmobile' : 'vmobl.com'
}
# like the above, but in reverse
# includes some additional domains that we may be sent messages from
reverse_gateways = {
'vtext.com' : 'verizon',
'vzwpix.com' : 'verizon',
'txt.att.net' : 'att' ,
'messaging.sprintpcs.com' : 'sprint',
'message.alltel.com' : 'alltel',
'myboostmobile.com' : 'boost',
'messaging.nextel.com' : 'nextel',
'tmomail.net' : 't-mobile',
'email.uscc.net' : 'uscellular',
'vmobl.com' : 'vmobile',
}
LOG_FORMAT = "[%(asctime)s] %(message)s"
LOG_FILE = "LOG.txt"
logging.basicConfig(format=LOG_FORMAT, filename=LOG_FILE, level=logging.DEBUG)
def get_phone_email(phone_number, provider):
if provider in list(text_gateways.keys()):
return phone_number + '@' + text_gateways[provider]
raise NotImplementedError("I don't know how to handle " + provider +\
" yet as a provider")
def is_bad_address(address):
"""Returns if the address is someone we don't want to email, namely mailer
daemons, postmasters, noreply addresses, etc.
We don't want to email other bots!"""
regex = "mailer.*daemon|post.*master|no.*reply"
return bool(re.search(regex, address, flags=re.IGNORECASE))
def split_text(text):
"""Splits a text into messages of TEXT_MESSAGE_SIZE characters"""
return textwrap.wrap(text, width=TEXT_MESSAGE_SIZE)
def get_phone_recipients(rlist):
recipients = []
file_path = os.path.join('sms', rlist + '.txt')
for line in get_nonwhitespace_lines_from_file(file_path):
# skip lines beginning with # (fror easy commenting-out)
if line[0] == '#':
continue
# otherwise, add the first two items of the line to the recipients list
# (number, provider)
# or (email_address, 'email')
recipients.append(tuple(line.split()[0:2]))
return recipients
def get_email_recipients(rlist):
recipients = []
file_path = os.path.join('email', rlist + '.txt')
for line in get_nonwhitespace_lines_from_file(file_path):
# skip lines beginning with # (fror easy commenting-out)
if line[0] == '#':
continue
recipients.append(line)
return recipients
def get_nonwhitespace_lines_from_file(filename):
f = open(filename, mode='r')
lines = f.readlines()
f.close()
#return all lines that are not empty or whitespace
return [line.rstrip('\n') for line in lines
if not (line == '' or line.isspace())]
def get_random_fact():
# get a random fact
lines = get_nonwhitespace_lines_from_file('facts.txt')
return random.choice(lines)
def get_random_promo():
lines = get_nonwhitespace_lines_from_file('promos.txt')
return random.choice(lines)
def get_username_and_password():
config = configparser.ConfigParser()
config.read('config.cfg')
username = config.get('Login', 'username')
password = config.get('Login', 'password')
return username, password
def get_alert_recipient():
config = configparser.ConfigParser()
config.read('config.cfg')
return config.get('Alert', 'recipient')
def login_to_gmail(username, password):
# ports 587 or 465 should work
mail_server = smtplib.SMTP_SSL("smtp.gmail.com", 465)
mail_server.ehlo()
mail_server.login(username, password)
return mail_server
def logout(mail_server):
mail_server.close()
def mail(username, to, text, subject, mail_server):
msg = MIMEText(text)
msg['From'] = username
msg['To'] = to
if subject:
msg['Subject'] = subject
logging.info('Sending email to %s', to)
mail_server.sendmail(username, to, msg.as_string())
def send_invite(username, email_or_number, provider, mail_server,
rlist='daily'):
logging.info('Sending invite to %s %s', email_or_number, provider)
if provider == 'email':
email = email_or_number
subject = "Cat Facts"
else:
number = email_or_number
email = get_phone_email(number, provider)
subject = None
mail(username, email, INVITE_MESSAGES[rlist], subject, mail_server)
def send_fact(rlist):
message = get_random_fact()
if random.random() < 0.4:
promo = get_random_promo()
message = ' '.join((message, promo))
username, password = get_username_and_password()
mail_server = login_to_gmail(username, password)
# send all emails
logging.info('Sending over email: %s', message)
for email in get_email_recipients(rlist):
mail(username, email, message, "Cat Facts", mail_server)
# send all texts
messages = split_text(message)
phone_recipients = get_phone_recipients(rlist)
for message in messages:
logging.info('Sending over SMS: %s', message)
for number, provider in phone_recipients:
email = get_phone_email(number, provider)
mail(username, email, message, None, mail_server)
time.sleep(DELAY_BETWEEN_MESSAGE_PARTS)
def get_number_and_provider(email):
"""Gets a phone number and service provider from an email address."""
# all seem to be of the form number@something
# so we can find the number on the left of the '@'
# and the provider from processing the something on the right of it
at_index = email.find("@")
if at_index == -1:
return None, None
number = email[:at_index]
domain = email[at_index + 1:]
try:
provider = reverse_gateways[domain]
except KeyError:
return None, None
return number, provider
def add_phone_recipient_to_file(number, provider, rlist='daily'):
file = open(os.path.join('sms', rlist + '.txt'), 'a')
file.write("%s %s\n" % (number, provider))
file.close()
def add_email_recipient_to_file(email, rlist='daily'):
file = open(os.path.join('email', rlist + '.txt'), 'a')
file.write(email + '\n')
file.close()
def get_reply_message():
fact = get_random_fact()
promo = "Remember, text \"UnSuBsCrIbE\" at any time to unsubscribe "\
"from Cat Facts!"
return ' '.join(('<command not recognized>', fact, promo))
def remove_matching_lines_from_file(text, filename):
# open the file and a temporary file
f = open(filename, 'r')
tmpfile = tempfile.NamedTemporaryFile(mode='w+t', delete=False)
# write lines not containing the text to the temporary file
for line in f:
if not text in line:
tmpfile.write(line)
f.close()
tmpfile.close()
# replace the original file with the temporary (modified) file
shutil.move(tmpfile.name, filename)
def get_command_from_text(text):
# parse everything in lowercase
text = text.lower()
# returns a command and the arguments for that command, if any found
# if none found, returns (None, [])
command = None
arguments = []
for possible_command in ['unsubscribe', 'daily', 'hourly', 'invite',
'nuke_everything']:
if possible_command in text:
command = possible_command
if command == 'invite':
# search for an sms invite
match = re.search("invite sms [0-9]{10} \S+", text)
if match:
words = match.group().split()
number = words[2]
provider = words[3]
arguments = ['sms', number, provider]
break
# search for an email address invite
match = re.search("invite email \S+", text)
if match:
words = match.group().split()
email = words[2]
arguments = ['email', email]
break
break
return command, arguments
def get_command_from_message(message):
# go through each text part of the message and see if it includes the
# word for a command
# returns a command and the arguments for that command
for part in message.walk():
if part.get_content_type() in ('text/plain'): # TODO: parse 'text/html'
message_text = part.get_payload()
command, arguments = get_command_from_text(message_text)
if command:
return command, arguments
# no command found
return None, []
def make_alert_message(message):
alert = ' '.join(('Message from', str(message['From']), ': '))
for part in message.walk():
if part.get_content_type() in ('text/plain',):
alert += part.get_payload()
return alert
def nuke_everything():
for rlist in ('daily', 'hourly'):
for recipient_type in ('sms', 'email'):
f = open(os.path.join(recipient_type, rlist + '.txt'), 'w')
f.write("")
def remove_recipient_from_files(recipient, recipient_type):
logging.info('removing recipient %s %s', recipient, recipient_type)
for rlist in ('hourly', 'daily'):
file_path = os.path.join(recipient_type, rlist + '.txt')
if recipient_type == 'sms':
number, provider = recipient
remove_matching_lines_from_file(number, file_path)
else:
remove_matching_lines_from_file(recipient, file_path)
def reply():
username, password = get_username_and_password()
email_recipients = []
phone_recipients = []
for rlist in ('daily', 'hourly'):
email_recipients.extend(get_email_recipients(rlist))
phone_recipients.extend(get_phone_recipients(rlist))
imap_mail = imaplib.IMAP4_SSL('imap.gmail.com')
imap_mail.login(username, password)
imap_mail.list()
imap_mail.select("INBOX")
# get all mail in the inbox, not including that from mailer daemons/post
# masters (which send messagess for mail that was not received)
result, data = imap_mail.uid('search', None, 'ALL')
# login to smtp in preparation for sending mail
mail_server = login_to_gmail(username, password)
for id in data[0].split():
id = id.decode("utf-8")
#get the email for each uid
typ, data = imap_mail.uid("fetch", id, '(RFC822)')
raw_email = data[0][1]
#get a nice interface for the mail via the email library
message = email.message_from_bytes(raw_email)
# with it, get who sent the email
sender = message['From']
# avoid blacklisted senders (noreply, mailer daemons, etc.)
if is_bad_address(sender):
continue
# find out if the message includes a command
command, arguments = get_command_from_message(message)
# extract /just/ the plain address from the address
# e.g. <EMAIL> instead of Foo Bar <<EMAIL>>
sender = parseaddr(sender)[1]
# debug printing
logging.info("Got email! Sender=%s", sender)
# see if we this is a new person / not in the recipient list
number, provider = get_number_and_provider(sender)
logging.debug("Detected number:%s provider:%s", number, provider)
if number:
recipient_type = 'sms'
else:
recipient_type = 'email'
# send alert that we got a message
logging.info("Sending alert...")
mail(username, get_alert_recipient(), make_alert_message(message), '',
mail_server)
if command == 'nuke_everything':
nuke_everything()
else:
existing_recipient = (number, provider) in phone_recipients \
or sender in email_recipients
if existing_recipient:
if command == 'unsubscribe':
logging.info('This recipient is unsubscribing.')
# remove recipient from all files
if recipient_type == 'sms':
remove_recipient_from_files(
(number, provider), recipient_type)
else:
remove_recipient_from_files(sender, recipient_type)
# remove recipient from lists so we treat them properly
# if we receive another email near the same time
if recipient_type == 'email':
email_recipients.remove(sender)
else:
phone_recipients.remove((number, provider))
logging.info('Replying with unsubscription message...')
mail(username, sender, UNSUBSCRIBE_MESSAGE, None,
mail_server)
elif command in ('hourly', 'daily'):
logging.info('This person wants %s cat facts', command)
# remove user from all groups he might have been part of
# previously
if recipient_type == 'sms':
remove_recipient_from_files(
(number, provider), recipient_type)
else:
remove_recipient_from_files(email, recipient_type)
# add the user to his new list
rlist = command
file_path = os.path.join(recipient_type, rlist + '.txt')
if recipient_type == 'sms':
add_phone_recipient_to_file(number, provider,
rlist=command)
else:
add_email_recipient_to_file(sender, rlist=command)
logging.info('Replying with message...')
text = "You will now receive %s cat facts." % command
mail(username, sender, text, None, mail_server)
elif command == 'invite':
logging.info('This person wants to invite someone...')
if len(arguments) == 0:
logging.info("Insufficient arguments")
logging.info("Sending invite usage message")
mail(username, sender, INVITE_USAGE_MESSAGE, None,
mail_server)
else:
method = arguments[0]
if method == 'sms':
logging.info('This person wants to invite via sms')
number, provider = arguments[1:3]
if provider in text_gateways.keys():
logging.info('Inviting the number...')
invite_number(number, provider)
else:
logging.info(
"Invite fail: do not know provider %s", provider)
mail(username, sender,
"Invite fail: do not know provider %s"
% provider,
None, mail_server)
elif method == 'email':
logging.info(
'This person wants to invite via email.')
email_address = arguments[1]
logging.info('Inviting the email...')
invite_email(email_address)
else:
# no command was detected
# get a reply and send it in text messages
logging.info("Replying with command not found message...")
for reply_part in split_text(get_reply_message()):
#debug output
logging.info(reply_part)
mail(username, sender, reply_part, None, mail_server)
time.sleep(DELAY_BETWEEN_MESSAGE_PARTS)
else:
# we don't know this person
if command == 'unsubscribe':
# we'll just ignore an unknown recipient who wants to
# unsubscribe
pass
else:
# this is a new person, so subscribe them!
# check if their message included a time preference
# or default to daily facts
if command in ('hourly', 'daily'):
rlist = command
else:
rlist = 'daily'
if recipient_type == 'sms':
# this person text-messaged
# add them to the recipient rlist
add_phone_recipient_to_file(number, provider,
rlist=rlist)
phone_recipients.append((number, provider))
# also give them a welcome message!
send_invite(username, number, provider, mail_server)
else:
# this person emailed, probably
# add them to the recipient rlist
add_email_recipient_to_file(sender, rlist=rlist)
email_recipients.append(sender)
# also give them a welcome message!
send_invite(username, sender, 'email', mail_server)
# move the email to archive so we don't reply to it again
# (in gmail, by default, when a mail is deleted via imap, it is archived)
imap_mail.uid('store', str(id), '+FLAGS', r'(\Deleted)')
imap_mail.expunge()
#logout of smtp
logout(mail_server)
#logout of imap
imap_mail.close()
imap_mail.logout()
def invite_number(number, provider, rlist='daily'):
username, password = get_username_and_password()
mail_server = login_to_gmail(username, password)
add_phone_recipient_to_file(number, provider, rlist)
send_invite(username, number, provider, mail_server)
def invite_email(email, rlist='daily'):
username, password = get_username_and_password()
mail_server = login_to_gmail(username, password)
add_email_recipient_to_file(email, rlist)
send_invite(username, email, 'email', mail_server)
def main():
parser = argparse.ArgumentParser(
description = "Send cat facts via email and sms")
subparsers = parser.add_subparsers(dest="action")
send_parser = subparsers.add_parser("send", help="send messages")
send_parser.add_argument(
"list", help="list of users to send to",
type=str)
reply_parser = subparsers.add_parser(
"reply", help="read messages and send replies")
invite_parser = subparsers.add_parser(
"invite", help="invite users to cat facts")
invite_parser.add_argument(
'-l', '--list', help="list to add the user to", default='daily',
choices=['hourly', 'daily'])
invite_subparsers = invite_parser.add_subparsers(dest="method")
invite_sms_parser = invite_subparsers.add_parser(
"sms", help="invite a cell phone number")
invite_sms_parser.add_argument(
"number", help="phone number to invite")
invite_sms_parser.add_argument(
"provider", help="phone service provider of number")
invite_email_parser = invite_subparsers.add_parser(
"email", help="invite an email address")
invite_email_parser.add_argument(
"address", help="email address to send facts to")
args = parser.parse_args()
if args.action == 'send':
logging.info('Sending to %s...', args.list)
send_fact(args.list)
elif args.action == 'reply':
logging.info('Replying...')
reply()
elif args.action == 'invite':
logging.info('Inviting via %s...', args.method)
if args.method == 'sms':
number = args.number
provider = args.provider
invite_number(number, provider, rlist=args.list)
elif args.method == 'email':
email_address = args.address
invite_email(email_address, rlist=args.list)
logging.info("Finished running.")
if __name__ == '__main__':
try:
main()
except:
# make sure any exceptions are logged
logging.exception("Got exception")
raise
|
import attitude_utils as attu
import env_utils as envu
import numpy as np
from time import time
class Dynamics_model_6dof(object):
"""
The dynamics model take a agent model object (and later an obstacle object) and modifies
the state of the agent.
The agent object instantiates an engine model, that maps body frame thrust and torque to
the inertial frame. Note that each agent can have its own intertial frame which can be
centered on the agent's target.
Currentlly this model does not model environmental dynamics, will be added later
The agent model maintains a state vector:
position [0:3]
velocity [3:6]
body frame rotational velocity (w_bn) [6:9]
mass [9]
attitude in target frame [10:] (size depends on attitude parameterization)
"""
def __init__(self, h=0.5, noise_u=np.zeros(3), noise_sd=np.zeros(3), M=5.9722e24):
self.h = h
self.g_o = 9.81
self.M = M
self.noise_sd = noise_sd
self.noise_u = noise_u
self.max_disturbance = np.zeros(3)
self.max_norm_disturbance = 0.
self.cnt = 0
self.G = 6.674e-11
print('6dof dynamics model')
def next(self, t , agent):
J = agent.inertia_tensor * agent.state['mass'] / agent.nominal_mass
w = agent.state['w']
x = agent.get_state_dynamics()
old_v = x[3:6].copy()
#
# get force and torque in body frame
#
BT, F,L,mdot = agent.actuator_model.get_action()
#
# convert force to acceleration
#
acc_body_frame = F / agent.state['mass']
#
# Find acceleration to inertial frame
# Since the attitude is BN (body with respect to inertial) the associated DCM
# is BN and maps from inertial to body, so we need to invert it (transpose)
# to map pfrom body to inertial (I think)
#
noise = (self.noise_u + np.clip(self.noise_sd * np.random.normal(size=3), 0, 3*self.noise_sd)) / agent.state['mass']
# centrifugal force requires spaacecraft position in asteroid centered frame
radial_dist = np.linalg.norm(agent.state['position'])
pos_dvec = agent.state['position'] / radial_dist
dcm_NB = agent.attitude_parameterization.get_body_to_inertial_DCM(agent.state['attitude'])
acc_inertial_frame = dcm_NB.dot(acc_body_frame)
#print('acc inertial: ', acc_body_frame, acc_inertial_frame)
thrust = acc_inertial_frame * agent.state['mass']
g = self.G * self.M * pos_dvec / radial_dist*2
#print('g: ', g, self.G * self.M, pos_dvec / radial_dist*2)
disturbance = g + noise
self.max_disturbance = np.maximum(self.max_disturbance,np.abs(disturbance))
self.max_norm_disturbance = np.maximum(self.max_norm_disturbance,np.linalg.norm(disturbance))
if self.cnt % 300000 == 0:
#print('Dynamics: Max Disturbance (N): ',agent.state['mass'] * self.max_disturbance, agent.state['mass'] * np.linalg.norm(self.max_disturbance))
print('Dynamics: Max Disturbance (m/s^2): ',self.max_disturbance, np.linalg.norm(self.max_disturbance))
self.cnt += 1
acc_inertial_frame += disturbance
#
# Here we use the Euler rotational equations of motion to find wdot
#
Jinv = np.linalg.inv(J)
w_tilde = attu.skew(w)
wdot = -Jinv.dot(w_tilde).dot(J).dot(w) + Jinv.dot(L)
#print('DEBUG: ',L,wdot)
#
# differential kinematic equation for derivative of attitude
#
# integrate w_bt (body frame agent rotation relative to target frame) to get
# agent attitude in target frame
# w_bn is stored in agent (rotation in inertial frame, which is caused by thruster torque)
# reward function will try to make w_bt zero
#
w_bt = w
qdot = agent.attitude_parameterization.qdot(agent.state['attitude'], w_bt)
#
# Use 4th order Runge Kutta to integrate equations of motion
#
ode = lambda t,x : self.eqom(t, x, acc_inertial_frame, qdot, wdot, mdot)
x_next = envu.rk4(t, x, ode, self.h )
attitude = x_next[10:]
attitude = agent.attitude_parameterization.fix_attitude(attitude) # normalize quaternions
# integrate w_bt (agent_body to targeta to get agent attitude in target frame)
# w_bn is stored in agent (rotation in inertial frame, which is caused by thruster torque)
#print(thrust_command, w, x_next[6:9])
agent.state['position'] = x_next[0:3]
agent.state['velocity'] = x_next[3:6]
agent.state['w'] = x_next[6:9]
agent.state['mass'] = np.clip(x_next[9],agent.dry_mass,None)
agent.state['attitude'] = attitude
agent.state['attitude_321'] = agent.attitude_parameterization.q2Euler321(attitude)
if np.any(agent.state['velocity'] > 6000.) : # or agent.name == 'Missile':
print('dynamics: ', agent.name)
print('\t: old_v', old_v)
print('\t: v', agent.state['velocity'])
print('\t: inertial acc', acc_inertial_frame)
print('\t: body acc', acc_body_frame)
print('\t: mass ', agent.state['mass'])
print('\t: mdot ', mdot)
print('\t: force ', F)
#if not np.all(agent.state['attitude'] < 4):
# print(agent.state['attitude'] , agent.state['w'])
#assert np.all(agent.state['attitude'] < 4)
agent.state['thrust'] = thrust
agent.state['bf_thrust'] = BT
agent.state['torque'] = L
#if agent.name == 'Missile':
# print('DEBUG3: ',np.dot(agent.state['velocity'],agent.state['acceleration']))
return x_next
def eqom(self,t, x, acc, qdot, wdot, mdot):
r = x[0:3]
v = x[3:6]
w = x[6:9]
rdot = v
vdot = acc
xdot = np.zeros(10+qdot.shape[0])
xdot[0:3] = v
xdot[3:6] = acc
xdot[6:9] = wdot
xdot[9] = mdot
xdot[10:] = qdot
return xdot
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.